summaryrefslogtreecommitdiffstats
path: root/include/llvm/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/CodeGen')
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h406
-rw-r--r--include/llvm/CodeGen/BreakCriticalMachineEdge.h108
-rw-r--r--include/llvm/CodeGen/CallingConvLower.h274
-rw-r--r--include/llvm/CodeGen/DAGISelHeader.h136
-rw-r--r--include/llvm/CodeGen/DebugLoc.h101
-rw-r--r--include/llvm/CodeGen/DwarfWriter.h123
-rw-r--r--include/llvm/CodeGen/ELFRelocation.h51
-rw-r--r--include/llvm/CodeGen/FastISel.h315
-rw-r--r--include/llvm/CodeGen/FileWriters.h31
-rw-r--r--include/llvm/CodeGen/GCMetadata.h192
-rw-r--r--include/llvm/CodeGen/GCMetadataPrinter.h76
-rw-r--r--include/llvm/CodeGen/GCStrategy.h142
-rw-r--r--include/llvm/CodeGen/GCs.h35
-rw-r--r--include/llvm/CodeGen/IntrinsicLowering.h50
-rw-r--r--include/llvm/CodeGen/JITCodeEmitter.h322
-rw-r--r--include/llvm/CodeGen/LatencyPriorityQueue.h112
-rw-r--r--include/llvm/CodeGen/LinkAllAsmWriterComponents.h36
-rw-r--r--include/llvm/CodeGen/LinkAllCodegenComponents.h56
-rw-r--r--include/llvm/CodeGen/LiveInterval.h468
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h537
-rw-r--r--include/llvm/CodeGen/LiveStackAnalysis.h112
-rw-r--r--include/llvm/CodeGen/LiveVariables.h269
-rw-r--r--include/llvm/CodeGen/MachORelocation.h54
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h414
-rw-r--r--include/llvm/CodeGen/MachineCodeEmitter.h330
-rw-r--r--include/llvm/CodeGen/MachineCodeInfo.h51
-rw-r--r--include/llvm/CodeGen/MachineConstantPool.h147
-rw-r--r--include/llvm/CodeGen/MachineDominators.h199
-rw-r--r--include/llvm/CodeGen/MachineFrameInfo.h411
-rw-r--r--include/llvm/CodeGen/MachineFunction.h407
-rw-r--r--include/llvm/CodeGen/MachineFunctionPass.h45
-rw-r--r--include/llvm/CodeGen/MachineInstr.h375
-rw-r--r--include/llvm/CodeGen/MachineInstrBuilder.h225
-rw-r--r--include/llvm/CodeGen/MachineJumpTableInfo.h92
-rw-r--r--include/llvm/CodeGen/MachineLocation.h106
-rw-r--r--include/llvm/CodeGen/MachineLoopInfo.h188
-rw-r--r--include/llvm/CodeGen/MachineMemOperand.h86
-rw-r--r--include/llvm/CodeGen/MachineModuleInfo.h300
-rw-r--r--include/llvm/CodeGen/MachineOperand.h447
-rw-r--r--include/llvm/CodeGen/MachinePassRegistry.h156
-rw-r--r--include/llvm/CodeGen/MachineRegisterInfo.h305
-rw-r--r--include/llvm/CodeGen/MachineRelocation.h339
-rw-r--r--include/llvm/CodeGen/Passes.h212
-rw-r--r--include/llvm/CodeGen/PseudoSourceValue.h71
-rw-r--r--include/llvm/CodeGen/RegAllocRegistry.h64
-rw-r--r--include/llvm/CodeGen/RegisterCoalescer.h154
-rw-r--r--include/llvm/CodeGen/RegisterScavenging.h178
-rw-r--r--include/llvm/CodeGen/RuntimeLibcalls.h255
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h666
-rw-r--r--include/llvm/CodeGen/ScheduleHazardRecognizer.h66
-rw-r--r--include/llvm/CodeGen/SchedulerRegistry.h93
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h880
-rw-r--r--include/llvm/CodeGen/SelectionDAGISel.h140
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h2568
-rw-r--r--include/llvm/CodeGen/ValueTypes.h481
-rw-r--r--include/llvm/CodeGen/ValueTypes.td66
56 files changed, 14523 insertions, 0 deletions
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
new file mode 100644
index 0000000..a004632
--- /dev/null
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -0,0 +1,406 @@
+//===-- llvm/CodeGen/AsmPrinter.h - AsmPrinter Framework --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a class to be used as the base class for target specific
+// asm writers. This class primarily handles common functionality used by
+// all asm writers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ASMPRINTER_H
+#define LLVM_CODEGEN_ASMPRINTER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Target/TargetMachine.h"
+#include <set>
+
+namespace llvm {
+ class GCStrategy;
+ class Constant;
+ class ConstantArray;
+ class ConstantInt;
+ class ConstantStruct;
+ class ConstantVector;
+ class GCMetadataPrinter;
+ class GlobalVariable;
+ class MachineConstantPoolEntry;
+ class MachineConstantPoolValue;
+ class DwarfWriter;
+ class Mangler;
+ class Section;
+ class TargetAsmInfo;
+ class Type;
+ class raw_ostream;
+
+ /// AsmPrinter - This class is intended to be used as a driving class for all
+ /// asm writers.
+ class AsmPrinter : public MachineFunctionPass {
+ static char ID;
+
+ /// FunctionNumber - This provides a unique ID for each function emitted in
+ /// this translation unit. It is autoincremented by SetupMachineFunction,
+ /// and can be accessed with getFunctionNumber() and
+ /// IncrementFunctionNumber().
+ ///
+ unsigned FunctionNumber;
+
+ // GCMetadataPrinters - The garbage collection metadata printer table.
+ typedef DenseMap<GCStrategy*,GCMetadataPrinter*> gcp_map_type;
+ typedef gcp_map_type::iterator gcp_iterator;
+ gcp_map_type GCMetadataPrinters;
+
+ protected:
+ /// DW -This is needed because printDeclare() has to insert
+ /// DbgVariable entries into the dwarf table. This is a short term hack
+ /// that ought be fixed soon.
+ DwarfWriter *DW;
+
+ // Necessary for external weak linkage support
+ std::set<const GlobalValue*> ExtWeakSymbols;
+
+ /// OptLevel - Generating code at a specific optimization level.
+ CodeGenOpt::Level OptLevel;
+ public:
+ /// Output stream on which we're printing assembly code.
+ ///
+ raw_ostream &O;
+
+ /// Target machine description.
+ ///
+ TargetMachine &TM;
+
+ /// Target Asm Printer information.
+ ///
+ const TargetAsmInfo *TAI;
+
+ /// Target Register Information.
+ ///
+ const TargetRegisterInfo *TRI;
+
+ /// The current machine function.
+ const MachineFunction *MF;
+
+ /// Name-mangler for global names.
+ ///
+ Mangler *Mang;
+
+ /// Cache of mangled name for current function. This is recalculated at the
+ /// beginning of each call to runOnMachineFunction().
+ ///
+ std::string CurrentFnName;
+
+ /// CurrentSection - The current section we are emitting to. This is
+ /// controlled and used by the SwitchSection method.
+ std::string CurrentSection;
+ const Section* CurrentSection_;
+
+ /// IsInTextSection - True if the current section we are emitting to is a
+ /// text section.
+ bool IsInTextSection;
+
+ /// VerboseAsm - Emit comments in assembly output if this is true.
+ ///
+ bool VerboseAsm;
+
+ protected:
+ explicit AsmPrinter(raw_ostream &o, TargetMachine &TM,
+ const TargetAsmInfo *T, CodeGenOpt::Level OL, bool V);
+
+ public:
+ virtual ~AsmPrinter();
+
+ /// isVerbose - Return true if assembly output should contain comments.
+ ///
+ bool isVerbose() const { return VerboseAsm; }
+
+ /// SwitchToTextSection - Switch to the specified section of the executable
+ /// if we are not already in it! If GV is non-null and if the global has an
+ /// explicitly requested section, we switch to the section indicated for the
+ /// global instead of NewSection.
+ ///
+ /// If the new section is an empty string, this method forgets what the
+ /// current section is, but does not emit a .section directive.
+ ///
+ /// This method is used when about to emit executable code.
+ ///
+ void SwitchToTextSection(const char *NewSection, const GlobalValue *GV = NULL);
+
+ /// SwitchToDataSection - Switch to the specified section of the executable
+ /// if we are not already in it! If GV is non-null and if the global has an
+ /// explicitly requested section, we switch to the section indicated for the
+ /// global instead of NewSection.
+ ///
+ /// If the new section is an empty string, this method forgets what the
+ /// current section is, but does not emit a .section directive.
+ ///
+ /// This method is used when about to emit data. For most assemblers, this
+ /// is the same as the SwitchToTextSection method, but not all assemblers
+ /// are the same.
+ ///
+ void SwitchToDataSection(const char *NewSection, const GlobalValue *GV = NULL);
+
+ /// SwitchToSection - Switch to the specified section of the executable if
+ /// we are not already in it!
+ void SwitchToSection(const Section* NS);
+
+ /// getGlobalLinkName - Returns the asm/link name of of the specified
+ /// global variable. Should be overridden by each target asm printer to
+ /// generate the appropriate value.
+ virtual const std::string &getGlobalLinkName(const GlobalVariable *GV,
+ std::string &LinkName) const;
+
+ /// EmitExternalGlobal - Emit the external reference to a global variable.
+ /// Should be overridden if an indirect reference should be used.
+ virtual void EmitExternalGlobal(const GlobalVariable *GV);
+
+ /// getCurrentFunctionEHName - Called to return (and cache) the
+ /// CurrentFnEHName.
+ ///
+ const std::string &getCurrentFunctionEHName(const MachineFunction *MF,
+ std::string &FuncEHName) const;
+
+ protected:
+ /// getAnalysisUsage - Record analysis usage.
+ ///
+ void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ /// doInitialization - Set up the AsmPrinter when we are working on a new
+ /// module. If your pass overrides this, it must make sure to explicitly
+ /// call this implementation.
+ bool doInitialization(Module &M);
+
+ /// doFinalization - Shut down the asmprinter. If you override this in your
+ /// pass, you must make sure to call it explicitly.
+ bool doFinalization(Module &M);
+
+ /// PrintSpecial - Print information related to the specified machine instr
+ /// that is independent of the operand, and may be independent of the instr
+ /// itself. This can be useful for portably encoding the comment character
+ /// or other bits of target-specific knowledge into the asmstrings. The
+ /// syntax used is ${:comment}. Targets can override this to add support
+ /// for their own strange codes.
+ virtual void PrintSpecial(const MachineInstr *MI, const char *Code) const;
+
+ /// PrintAsmOperand - Print the specified operand of MI, an INLINEASM
+ /// instruction, using the specified assembler variant. Targets should
+ /// override this to format as appropriate. This method can return true if
+ /// the operand is erroneous.
+ virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode);
+
+ /// PrintAsmMemoryOperand - Print the specified operand of MI, an INLINEASM
+ /// instruction, using the specified assembler variant as an address.
+ /// Targets should override this to format as appropriate. This method can
+ /// return true if the operand is erroneous.
+ virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode);
+
+ /// SetupMachineFunction - This should be called when a new MachineFunction
+ /// is being processed from runOnMachineFunction.
+ void SetupMachineFunction(MachineFunction &MF);
+
+ /// getFunctionNumber - Return a unique ID for the current function.
+ ///
+ unsigned getFunctionNumber() const { return FunctionNumber; }
+
+ /// IncrementFunctionNumber - Increase Function Number. AsmPrinters should
+ /// not normally call this, as the counter is automatically bumped by
+ /// SetupMachineFunction.
+ void IncrementFunctionNumber() { FunctionNumber++; }
+
+ /// EmitConstantPool - Print to the current output stream assembly
+ /// representations of the constants in the constant pool MCP. This is
+ /// used to print out constants which have been "spilled to memory" by
+ /// the code generator.
+ ///
+ void EmitConstantPool(MachineConstantPool *MCP);
+
+ /// EmitJumpTableInfo - Print assembly representations of the jump tables
+ /// used by the current function to the current output stream.
+ ///
+ void EmitJumpTableInfo(MachineJumpTableInfo *MJTI, MachineFunction &MF);
+
+ /// EmitSpecialLLVMGlobal - Check to see if the specified global is a
+ /// special global used by LLVM. If so, emit it and return true, otherwise
+ /// do nothing and return false.
+ bool EmitSpecialLLVMGlobal(const GlobalVariable *GV);
+
+ public:
+ //===------------------------------------------------------------------===//
+ /// LEB 128 number encoding.
+
+ /// PrintULEB128 - Print a series of hexidecimal values(separated by commas)
+ /// representing an unsigned leb128 value.
+ void PrintULEB128(unsigned Value) const;
+
+ /// PrintSLEB128 - Print a series of hexidecimal values(separated by commas)
+ /// representing a signed leb128 value.
+ void PrintSLEB128(int Value) const;
+
+ //===------------------------------------------------------------------===//
+ // Emission and print routines
+ //
+
+ /// PrintHex - Print a value as a hexidecimal value.
+ ///
+ void PrintHex(int Value) const;
+
+ /// EOL - Print a newline character to asm stream. If a comment is present
+ /// then it will be printed first. Comments should not contain '\n'.
+ void EOL() const;
+ void EOL(const std::string &Comment) const;
+ void EOL(const char* Comment) const;
+
+ /// EmitULEB128Bytes - Emit an assembler byte data directive to compose an
+ /// unsigned leb128 value.
+ void EmitULEB128Bytes(unsigned Value) const;
+
+ /// EmitSLEB128Bytes - print an assembler byte data directive to compose a
+ /// signed leb128 value.
+ void EmitSLEB128Bytes(int Value) const;
+
+ /// EmitInt8 - Emit a byte directive and value.
+ ///
+ void EmitInt8(int Value) const;
+
+ /// EmitInt16 - Emit a short directive and value.
+ ///
+ void EmitInt16(int Value) const;
+
+ /// EmitInt32 - Emit a long directive and value.
+ ///
+ void EmitInt32(int Value) const;
+
+ /// EmitInt64 - Emit a long long directive and value.
+ ///
+ void EmitInt64(uint64_t Value) const;
+
+ /// EmitString - Emit a string with quotes and a null terminator.
+ /// Special characters are emitted properly.
+ /// @verbatim (Eg. '\t') @endverbatim
+ void EmitString(const std::string &String) const;
+ void EmitString(const char *String, unsigned Size) const;
+
+ /// EmitFile - Emit a .file directive.
+ void EmitFile(unsigned Number, const std::string &Name) const;
+
+ //===------------------------------------------------------------------===//
+
+ /// EmitAlignment - Emit an alignment directive to the specified power of
+ /// two boundary. For example, if you pass in 3 here, you will get an 8
+ /// byte alignment. If a global value is specified, and if that global has
+ /// an explicit alignment requested, it will unconditionally override the
+ /// alignment request. However, if ForcedAlignBits is specified, this value
+ /// has final say: the ultimate alignment will be the max of ForcedAlignBits
+ /// and the alignment computed with NumBits and the global. If UseFillExpr
+ /// is true, it also emits an optional second value FillValue which the
+ /// assembler uses to fill gaps to match alignment for text sections if the
+ /// has specified a non-zero fill value.
+ ///
+ /// The algorithm is:
+ /// Align = NumBits;
+ /// if (GV && GV->hasalignment) Align = GV->getalignment();
+ /// Align = std::max(Align, ForcedAlignBits);
+ ///
+ void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0,
+ unsigned ForcedAlignBits = 0,
+ bool UseFillExpr = true) const;
+
+ /// printLabel - This method prints a local label used by debug and
+ /// exception handling tables.
+ void printLabel(const MachineInstr *MI) const;
+ void printLabel(unsigned Id) const;
+
+ /// printDeclare - This method prints a local variable declaration used by
+ /// debug tables.
+ void printDeclare(const MachineInstr *MI) const;
+
+ protected:
+ /// EmitZeros - Emit a block of zeros.
+ ///
+ void EmitZeros(uint64_t NumZeros, unsigned AddrSpace = 0) const;
+
+ /// EmitString - Emit a zero-byte-terminated string constant.
+ ///
+ virtual void EmitString(const ConstantArray *CVA) const;
+
+ /// EmitConstantValueOnly - Print out the specified constant, without a
+ /// storage class. Only constants of first-class type are allowed here.
+ void EmitConstantValueOnly(const Constant *CV);
+
+ /// EmitGlobalConstant - Print a general LLVM constant to the .s file.
+ void EmitGlobalConstant(const Constant* CV, unsigned AddrSpace = 0);
+
+ virtual void EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV);
+
+ /// processDebugLoc - Processes the debug information of each machine
+ /// instruction's DebugLoc.
+ void processDebugLoc(DebugLoc DL);
+
+ /// printInlineAsm - This method formats and prints the specified machine
+ /// instruction that is an inline asm.
+ void printInlineAsm(const MachineInstr *MI) const;
+
+ /// printImplicitDef - This method prints the specified machine instruction
+ /// that is an implicit def.
+ virtual void printImplicitDef(const MachineInstr *MI) const;
+
+ /// printBasicBlockLabel - This method prints the label for the specified
+ /// MachineBasicBlock
+ virtual void printBasicBlockLabel(const MachineBasicBlock *MBB,
+ bool printAlign = false,
+ bool printColon = false,
+ bool printComment = true) const;
+
+ /// printPICJumpTableSetLabel - This method prints a set label for the
+ /// specified MachineBasicBlock for a jumptable entry.
+ virtual void printPICJumpTableSetLabel(unsigned uid,
+ const MachineBasicBlock *MBB) const;
+ virtual void printPICJumpTableSetLabel(unsigned uid, unsigned uid2,
+ const MachineBasicBlock *MBB) const;
+ virtual void printPICJumpTableEntry(const MachineJumpTableInfo *MJTI,
+ const MachineBasicBlock *MBB,
+ unsigned uid) const;
+
+ /// printDataDirective - This method prints the asm directive for the
+ /// specified type.
+ void printDataDirective(const Type *type, unsigned AddrSpace = 0);
+
+ /// printSuffixedName - This prints a name with preceding
+ /// getPrivateGlobalPrefix and the specified suffix, handling quoted names
+ /// correctly.
+ void printSuffixedName(const char *Name, const char *Suffix,
+ const char *Prefix = 0);
+ void printSuffixedName(const std::string &Name, const char* Suffix);
+
+ /// printVisibility - This prints visibility information about symbol, if
+ /// this is suported by the target.
+ void printVisibility(const std::string& Name, unsigned Visibility) const;
+
+ /// printOffset - This is just convenient handler for printing offsets.
+ void printOffset(int64_t Offset) const;
+
+ private:
+ const GlobalValue *findGlobalValue(const Constant* CV);
+ void EmitLLVMUsedList(Constant *List);
+ void EmitXXStructorList(Constant *List);
+ void EmitGlobalConstantStruct(const ConstantStruct* CVS,
+ unsigned AddrSpace);
+ void EmitGlobalConstantArray(const ConstantArray* CVA, unsigned AddrSpace);
+ void EmitGlobalConstantVector(const ConstantVector* CP);
+ void EmitGlobalConstantFP(const ConstantFP* CFP, unsigned AddrSpace);
+ void EmitGlobalConstantLargeInt(const ConstantInt* CI, unsigned AddrSpace);
+ GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy *C);
+ };
+}
+
+#endif
diff --git a/include/llvm/CodeGen/BreakCriticalMachineEdge.h b/include/llvm/CodeGen/BreakCriticalMachineEdge.h
new file mode 100644
index 0000000..4861297
--- /dev/null
+++ b/include/llvm/CodeGen/BreakCriticalMachineEdge.h
@@ -0,0 +1,108 @@
+//===--------- BreakCriticalMachineEdge.h - Break critical edges ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===---------------------------------------------------------------------===//
+//
+// Helper function to break a critical machine edge.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_BREAKCRITICALMACHINEEDGE_H
+#define LLVM_CODEGEN_BREAKCRITICALMACHINEEDGE_H
+
+#include "llvm/CodeGen/MachineJumpTableInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+MachineBasicBlock* SplitCriticalMachineEdge(MachineBasicBlock* src,
+ MachineBasicBlock* dst) {
+ MachineFunction &MF = *src->getParent();
+ const BasicBlock* srcBB = src->getBasicBlock();
+
+ MachineBasicBlock* crit_mbb = MF.CreateMachineBasicBlock(srcBB);
+
+ // modify the llvm control flow graph
+ src->removeSuccessor(dst);
+ src->addSuccessor(crit_mbb);
+ crit_mbb->addSuccessor(dst);
+
+ // insert the new block into the machine function.
+ MF.push_back(crit_mbb);
+
+ // insert a unconditional branch linking the new block to dst
+ const TargetMachine& TM = MF.getTarget();
+ const TargetInstrInfo* TII = TM.getInstrInfo();
+ std::vector<MachineOperand> emptyConditions;
+ TII->InsertBranch(*crit_mbb, dst, (MachineBasicBlock*)0,
+ emptyConditions);
+
+ // modify every branch in src that points to dst to point to the new
+ // machine basic block instead:
+ MachineBasicBlock::iterator mii = src->end();
+ bool found_branch = false;
+ while (mii != src->begin()) {
+ mii--;
+ // if there are no more branches, finish the loop
+ if (!mii->getDesc().isTerminator()) {
+ break;
+ }
+
+ // Scan the operands of this branch, replacing any uses of dst with
+ // crit_mbb.
+ for (unsigned i = 0, e = mii->getNumOperands(); i != e; ++i) {
+ MachineOperand & mo = mii->getOperand(i);
+ if (mo.isMBB() && mo.getMBB() == dst) {
+ found_branch = true;
+ mo.setMBB(crit_mbb);
+ }
+ }
+ }
+
+ // TODO: This is tentative. It may be necessary to fix this code. Maybe
+ // I am inserting too many gotos, but I am trusting that the asm printer
+ // will optimize the unnecessary gotos.
+ if(!found_branch) {
+ TII->InsertBranch(*src, crit_mbb, (MachineBasicBlock*)0,
+ emptyConditions);
+ }
+
+ /// Change all the phi functions in dst, so that the incoming block be
+ /// crit_mbb, instead of src
+ for(mii = dst->begin(); mii != dst->end(); mii++) {
+ /// the first instructions are always phi functions.
+ if(mii->getOpcode() != TargetInstrInfo::PHI)
+ break;
+
+ // Find the operands corresponding to the source block
+ std::vector<unsigned> toRemove;
+ unsigned reg = 0;
+ for (unsigned u = 0; u != mii->getNumOperands(); ++u)
+ if (mii->getOperand(u).isMBB() &&
+ mii->getOperand(u).getMBB() == src) {
+ reg = mii->getOperand(u-1).getReg();
+ toRemove.push_back(u-1);
+ }
+ // Remove all uses of this MBB
+ for (std::vector<unsigned>::reverse_iterator I = toRemove.rbegin(),
+ E = toRemove.rend(); I != E; ++I) {
+ mii->RemoveOperand(*I+1);
+ mii->RemoveOperand(*I);
+ }
+
+ // Add a single use corresponding to the new MBB
+ mii->addOperand(MachineOperand::CreateReg(reg, false));
+ mii->addOperand(MachineOperand::CreateMBB(crit_mbb));
+ }
+
+ return crit_mbb;
+}
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/CallingConvLower.h b/include/llvm/CodeGen/CallingConvLower.h
new file mode 100644
index 0000000..7c83e24
--- /dev/null
+++ b/include/llvm/CodeGen/CallingConvLower.h
@@ -0,0 +1,274 @@
+//===-- llvm/CallingConvLower.h - Calling Conventions -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the CCState and CCValAssign classes, used for lowering
+// and implementing calling conventions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_CALLINGCONVLOWER_H
+#define LLVM_CODEGEN_CALLINGCONVLOWER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+
+namespace llvm {
+ class TargetRegisterInfo;
+ class TargetMachine;
+ class CCState;
+ class SDNode;
+
+/// CCValAssign - Represent assignment of one arg/retval to a location.
+class CCValAssign {
+public:
+ enum LocInfo {
+ Full, // The value fills the full location.
+ SExt, // The value is sign extended in the location.
+ ZExt, // The value is zero extended in the location.
+ AExt, // The value is extended with undefined upper bits.
+ BCvt // The value is bit-converted in the location.
+ // TODO: a subset of the value is in the location.
+ };
+private:
+ /// ValNo - This is the value number begin assigned (e.g. an argument number).
+ unsigned ValNo;
+
+ /// Loc is either a stack offset or a register number.
+ unsigned Loc;
+
+ /// isMem - True if this is a memory loc, false if it is a register loc.
+ bool isMem : 1;
+
+ /// isCustom - True if this arg/retval requires special handling.
+ bool isCustom : 1;
+
+ /// Information about how the value is assigned.
+ LocInfo HTP : 6;
+
+ /// ValVT - The type of the value being assigned.
+ MVT ValVT;
+
+ /// LocVT - The type of the location being assigned to.
+ MVT LocVT;
+public:
+
+ static CCValAssign getReg(unsigned ValNo, MVT ValVT,
+ unsigned RegNo, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret.ValNo = ValNo;
+ Ret.Loc = RegNo;
+ Ret.isMem = false;
+ Ret.isCustom = false;
+ Ret.HTP = HTP;
+ Ret.ValVT = ValVT;
+ Ret.LocVT = LocVT;
+ return Ret;
+ }
+
+ static CCValAssign getCustomReg(unsigned ValNo, MVT ValVT,
+ unsigned RegNo, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret = getReg(ValNo, ValVT, RegNo, LocVT, HTP);
+ Ret.isCustom = true;
+ return Ret;
+ }
+
+ static CCValAssign getMem(unsigned ValNo, MVT ValVT,
+ unsigned Offset, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret.ValNo = ValNo;
+ Ret.Loc = Offset;
+ Ret.isMem = true;
+ Ret.isCustom = false;
+ Ret.HTP = HTP;
+ Ret.ValVT = ValVT;
+ Ret.LocVT = LocVT;
+ return Ret;
+ }
+
+ static CCValAssign getCustomMem(unsigned ValNo, MVT ValVT,
+ unsigned Offset, MVT LocVT,
+ LocInfo HTP) {
+ CCValAssign Ret;
+ Ret = getMem(ValNo, ValVT, Offset, LocVT, HTP);
+ Ret.isCustom = true;
+ return Ret;
+ }
+
+ unsigned getValNo() const { return ValNo; }
+ MVT getValVT() const { return ValVT; }
+
+ bool isRegLoc() const { return !isMem; }
+ bool isMemLoc() const { return isMem; }
+
+ bool needsCustom() const { return isCustom; }
+
+ unsigned getLocReg() const { assert(isRegLoc()); return Loc; }
+ unsigned getLocMemOffset() const { assert(isMemLoc()); return Loc; }
+ MVT getLocVT() const { return LocVT; }
+
+ LocInfo getLocInfo() const { return HTP; }
+};
+
+/// CCAssignFn - This function assigns a location for Val, updating State to
+/// reflect the change.
+typedef bool CCAssignFn(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ ISD::ArgFlagsTy ArgFlags, CCState &State);
+
+/// CCCustomFn - This function assigns a location for Val, possibly updating
+/// all args to reflect changes and indicates if it handled it. It must set
+/// isCustom if it handles the arg and returns true.
+typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State);
+
+/// CCState - This class holds information needed while lowering arguments and
+/// return values. It captures which registers are already assigned and which
+/// stack slots are used. It provides accessors to allocate these values.
+class CCState {
+ unsigned CallingConv;
+ bool IsVarArg;
+ const TargetMachine &TM;
+ const TargetRegisterInfo &TRI;
+ SmallVector<CCValAssign, 16> &Locs;
+
+ unsigned StackOffset;
+ SmallVector<uint32_t, 16> UsedRegs;
+public:
+ CCState(unsigned CC, bool isVarArg, const TargetMachine &TM,
+ SmallVector<CCValAssign, 16> &locs);
+
+ void addLoc(const CCValAssign &V) {
+ Locs.push_back(V);
+ }
+
+ const TargetMachine &getTarget() const { return TM; }
+ unsigned getCallingConv() const { return CallingConv; }
+ bool isVarArg() const { return IsVarArg; }
+
+ unsigned getNextStackOffset() const { return StackOffset; }
+
+ /// isAllocated - Return true if the specified register (or an alias) is
+ /// allocated.
+ bool isAllocated(unsigned Reg) const {
+ return UsedRegs[Reg/32] & (1 << (Reg&31));
+ }
+
+ /// AnalyzeFormalArguments - Analyze an ISD::FORMAL_ARGUMENTS node,
+ /// incorporating info about the formals into this state.
+ void AnalyzeFormalArguments(SDNode *TheArgs, CCAssignFn Fn);
+
+ /// AnalyzeReturn - Analyze the returned values of an ISD::RET node,
+ /// incorporating info about the result values into this state.
+ void AnalyzeReturn(SDNode *TheRet, CCAssignFn Fn);
+
+ /// AnalyzeCallOperands - Analyze an ISD::CALL node, incorporating info
+ /// about the passed values into this state.
+ void AnalyzeCallOperands(CallSDNode *TheCall, CCAssignFn Fn);
+
+ /// AnalyzeCallOperands - Same as above except it takes vectors of types
+ /// and argument flags.
+ void AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
+ SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
+ CCAssignFn Fn);
+
+ /// AnalyzeCallResult - Analyze the return values of an ISD::CALL node,
+ /// incorporating info about the passed values into this state.
+ void AnalyzeCallResult(CallSDNode *TheCall, CCAssignFn Fn);
+
+ /// AnalyzeCallResult - Same as above except it's specialized for calls which
+ /// produce a single value.
+ void AnalyzeCallResult(MVT VT, CCAssignFn Fn);
+
+ /// getFirstUnallocated - Return the first unallocated register in the set, or
+ /// NumRegs if they are all allocated.
+ unsigned getFirstUnallocated(const unsigned *Regs, unsigned NumRegs) const {
+ for (unsigned i = 0; i != NumRegs; ++i)
+ if (!isAllocated(Regs[i]))
+ return i;
+ return NumRegs;
+ }
+
+ /// AllocateReg - Attempt to allocate one register. If it is not available,
+ /// return zero. Otherwise, return the register, marking it and any aliases
+ /// as allocated.
+ unsigned AllocateReg(unsigned Reg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// Version of AllocateReg with extra register to be shadowed.
+ unsigned AllocateReg(unsigned Reg, unsigned ShadowReg) {
+ if (isAllocated(Reg)) return 0;
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateReg - Attempt to allocate one of the specified registers. If none
+ /// are available, return zero. Otherwise, return the first one available,
+ /// marking it and any aliases as allocated.
+ unsigned AllocateReg(const unsigned *Regs, unsigned NumRegs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
+ if (FirstUnalloc == NumRegs)
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc];
+ MarkAllocated(Reg);
+ return Reg;
+ }
+
+ /// Version of AllocateReg with list of registers to be shadowed.
+ unsigned AllocateReg(const unsigned *Regs, const unsigned *ShadowRegs,
+ unsigned NumRegs) {
+ unsigned FirstUnalloc = getFirstUnallocated(Regs, NumRegs);
+ if (FirstUnalloc == NumRegs)
+ return 0; // Didn't find the reg.
+
+ // Mark the register and any aliases as allocated.
+ unsigned Reg = Regs[FirstUnalloc], ShadowReg = ShadowRegs[FirstUnalloc];
+ MarkAllocated(Reg);
+ MarkAllocated(ShadowReg);
+ return Reg;
+ }
+
+ /// AllocateStack - Allocate a chunk of stack space with the specified size
+ /// and alignment.
+ unsigned AllocateStack(unsigned Size, unsigned Align) {
+ assert(Align && ((Align-1) & Align) == 0); // Align is power of 2.
+ StackOffset = ((StackOffset + Align-1) & ~(Align-1));
+ unsigned Result = StackOffset;
+ StackOffset += Size;
+ return Result;
+ }
+
+ // HandleByVal - Allocate a stack slot large enough to pass an argument by
+ // value. The size and alignment information of the argument is encoded in its
+ // parameter attribute.
+ void HandleByVal(unsigned ValNo, MVT ValVT,
+ MVT LocVT, CCValAssign::LocInfo LocInfo,
+ int MinSize, int MinAlign, ISD::ArgFlagsTy ArgFlags);
+
+private:
+ /// MarkAllocated - Mark a register and all of its aliases as allocated.
+ void MarkAllocated(unsigned Reg);
+};
+
+
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/CodeGen/DAGISelHeader.h b/include/llvm/CodeGen/DAGISelHeader.h
new file mode 100644
index 0000000..b2acbc1
--- /dev/null
+++ b/include/llvm/CodeGen/DAGISelHeader.h
@@ -0,0 +1,136 @@
+//==-llvm/CodeGen/DAGISelHeader.h - Common DAG ISel definitions -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides definitions of the common, target-independent methods and
+// data, which is used by SelectionDAG-based instruction selectors.
+//
+// *** NOTE: This file is #included into the middle of the target
+// instruction selector class. These functions are really methods.
+// This is a little awkward, but it allows this code to be shared
+// by all the targets while still being able to call into
+// target-specific code without using a virtual function call.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DAGISEL_HEADER_H
+#define LLVM_CODEGEN_DAGISEL_HEADER_H
+
+/// ISelPosition - Node iterator marking the current position of
+/// instruction selection as it procedes through the topologically-sorted
+/// node list.
+SelectionDAG::allnodes_iterator ISelPosition;
+
+/// IsChainCompatible - Returns true if Chain is Op or Chain does
+/// not reach Op.
+static bool IsChainCompatible(SDNode *Chain, SDNode *Op) {
+ if (Chain->getOpcode() == ISD::EntryToken)
+ return true;
+ if (Chain->getOpcode() == ISD::TokenFactor)
+ return false;
+ if (Chain->getNumOperands() > 0) {
+ SDValue C0 = Chain->getOperand(0);
+ if (C0.getValueType() == MVT::Other)
+ return C0.getNode() != Op && IsChainCompatible(C0.getNode(), Op);
+ }
+ return true;
+}
+
+/// ISelUpdater - helper class to handle updates of the
+/// instruciton selection graph.
+class VISIBILITY_HIDDEN ISelUpdater : public SelectionDAG::DAGUpdateListener {
+ SelectionDAG::allnodes_iterator &ISelPosition;
+public:
+ explicit ISelUpdater(SelectionDAG::allnodes_iterator &isp)
+ : ISelPosition(isp) {}
+
+ /// NodeDeleted - Handle nodes deleted from the graph. If the
+ /// node being deleted is the current ISelPosition node, update
+ /// ISelPosition.
+ ///
+ virtual void NodeDeleted(SDNode *N, SDNode *E) {
+ if (ISelPosition == SelectionDAG::allnodes_iterator(N))
+ ++ISelPosition;
+ }
+
+ /// NodeUpdated - Ignore updates for now.
+ virtual void NodeUpdated(SDNode *N) {}
+};
+
+/// ReplaceUses - replace all uses of the old node F with the use
+/// of the new node T.
+void ReplaceUses(SDValue F, SDValue T) DISABLE_INLINE {
+ ISelUpdater ISU(ISelPosition);
+ CurDAG->ReplaceAllUsesOfValueWith(F, T, &ISU);
+}
+
+/// ReplaceUses - replace all uses of the old nodes F with the use
+/// of the new nodes T.
+void ReplaceUses(const SDValue *F, const SDValue *T,
+ unsigned Num) DISABLE_INLINE {
+ ISelUpdater ISU(ISelPosition);
+ CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num, &ISU);
+}
+
+/// ReplaceUses - replace all uses of the old node F with the use
+/// of the new node T.
+void ReplaceUses(SDNode *F, SDNode *T) DISABLE_INLINE {
+ ISelUpdater ISU(ISelPosition);
+ CurDAG->ReplaceAllUsesWith(F, T, &ISU);
+}
+
+/// SelectRoot - Top level entry to DAG instruction selector.
+/// Selects instructions starting at the root of the current DAG.
+void SelectRoot(SelectionDAG &DAG) {
+ SelectRootInit();
+
+ // Create a dummy node (which is not added to allnodes), that adds
+ // a reference to the root node, preventing it from being deleted,
+ // and tracking any changes of the root.
+ HandleSDNode Dummy(CurDAG->getRoot());
+ ISelPosition = next(SelectionDAG::allnodes_iterator(CurDAG->getRoot().getNode()));
+
+ // The AllNodes list is now topological-sorted. Visit the
+ // nodes by starting at the end of the list (the root of the
+ // graph) and preceding back toward the beginning (the entry
+ // node).
+ while (ISelPosition != CurDAG->allnodes_begin()) {
+ SDNode *Node = --ISelPosition;
+ // Skip dead nodes. DAGCombiner is expected to eliminate all dead nodes,
+ // but there are currently some corner cases that it misses. Also, this
+ // makes it theoretically possible to disable the DAGCombiner.
+ if (Node->use_empty())
+ continue;
+#if 0
+ DAG.setSubgraphColor(Node, "red");
+#endif
+ SDNode *ResNode = Select(SDValue(Node, 0));
+ // If node should not be replaced,
+ // continue with the next one.
+ if (ResNode == Node)
+ continue;
+ // Replace node.
+ if (ResNode) {
+#if 0
+ DAG.setSubgraphColor(ResNode, "yellow");
+ DAG.setSubgraphColor(ResNode, "black");
+#endif
+ ReplaceUses(Node, ResNode);
+ }
+ // If after the replacement this node is not used any more,
+ // remove this dead node.
+ if (Node->use_empty()) { // Don't delete EntryToken, etc.
+ ISelUpdater ISU(ISelPosition);
+ CurDAG->RemoveDeadNode(Node, &ISU);
+ }
+ }
+
+ CurDAG->setRoot(Dummy.getValue());
+}
+
+#endif /* LLVM_CODEGEN_DAGISEL_HEADER_H */
diff --git a/include/llvm/CodeGen/DebugLoc.h b/include/llvm/CodeGen/DebugLoc.h
new file mode 100644
index 0000000..77e6733
--- /dev/null
+++ b/include/llvm/CodeGen/DebugLoc.h
@@ -0,0 +1,101 @@
+//===---- llvm/CodeGen/DebugLoc.h - Debug Location Information --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a number of light weight data structures used by the code
+// generator to describe and track debug location information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DEBUGLOC_H
+#define LLVM_CODEGEN_DEBUGLOC_H
+
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+
+namespace llvm {
+ class GlobalVariable;
+
+ /// DebugLocTuple - Debug location tuple of filename id, line and column.
+ ///
+ struct DebugLocTuple {
+ GlobalVariable *CompileUnit;
+ unsigned Line, Col;
+
+ DebugLocTuple(GlobalVariable *v, unsigned l, unsigned c)
+ : CompileUnit(v), Line(l), Col(c) {};
+
+ bool operator==(const DebugLocTuple &DLT) const {
+ return CompileUnit == DLT.CompileUnit &&
+ Line == DLT.Line && Col == DLT.Col;
+ }
+ bool operator!=(const DebugLocTuple &DLT) const {
+ return !(*this == DLT);
+ }
+ };
+
+ /// DebugLoc - Debug location id. This is carried by SDNode and MachineInstr
+ /// to index into a vector of unique debug location tuples.
+ class DebugLoc {
+ unsigned Idx;
+
+ public:
+ DebugLoc() : Idx(~0U) {} // Defaults to invalid.
+
+ static DebugLoc getUnknownLoc() { DebugLoc L; L.Idx = ~0U; return L; }
+ static DebugLoc get(unsigned idx) { DebugLoc L; L.Idx = idx; return L; }
+
+ unsigned getIndex() const { return Idx; }
+
+ /// isUnknown - Return true if there is no debug info for the SDNode /
+ /// MachineInstr.
+ bool isUnknown() const { return Idx == ~0U; }
+
+ bool operator==(const DebugLoc &DL) const { return Idx == DL.Idx; }
+ bool operator!=(const DebugLoc &DL) const { return !(*this == DL); }
+ };
+
+ // Partially specialize DenseMapInfo for DebugLocTyple.
+ template<> struct DenseMapInfo<DebugLocTuple> {
+ static inline DebugLocTuple getEmptyKey() {
+ return DebugLocTuple(0, ~0U, ~0U);
+ }
+ static inline DebugLocTuple getTombstoneKey() {
+ return DebugLocTuple((GlobalVariable*)~1U, ~1U, ~1U);
+ }
+ static unsigned getHashValue(const DebugLocTuple &Val) {
+ return DenseMapInfo<GlobalVariable*>::getHashValue(Val.CompileUnit) ^
+ DenseMapInfo<unsigned>::getHashValue(Val.Line) ^
+ DenseMapInfo<unsigned>::getHashValue(Val.Col);
+ }
+ static bool isEqual(const DebugLocTuple &LHS, const DebugLocTuple &RHS) {
+ return LHS.CompileUnit == RHS.CompileUnit &&
+ LHS.Line == RHS.Line &&
+ LHS.Col == RHS.Col;
+ }
+
+ static bool isPod() { return true; }
+ };
+
+ /// DebugLocTracker - This class tracks debug location information.
+ ///
+ struct DebugLocTracker {
+ /// DebugLocations - A vector of unique DebugLocTuple's.
+ ///
+ std::vector<DebugLocTuple> DebugLocations;
+
+ /// DebugIdMap - This maps DebugLocTuple's to indices into the
+ /// DebugLocations vector.
+ DenseMap<DebugLocTuple, unsigned> DebugIdMap;
+
+ DebugLocTracker() {}
+ };
+
+} // end namespace llvm
+
+#endif /* LLVM_CODEGEN_DEBUGLOC_H */
diff --git a/include/llvm/CodeGen/DwarfWriter.h b/include/llvm/CodeGen/DwarfWriter.h
new file mode 100644
index 0000000..facd5f6
--- /dev/null
+++ b/include/llvm/CodeGen/DwarfWriter.h
@@ -0,0 +1,123 @@
+//===-- llvm/CodeGen/DwarfWriter.h - Dwarf Framework ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains support for writing Dwarf debug and exception info into
+// asm files. For Details on the Dwarf 3 specfication see DWARF Debugging
+// Information Format V.3 reference manual http://dwarf.freestandards.org ,
+//
+// The role of the Dwarf Writer class is to extract information from the
+// MachineModuleInfo object, organize it in Dwarf form and then emit it into asm
+// the current asm file using data and high level Dwarf directives.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_DWARFWRITER_H
+#define LLVM_CODEGEN_DWARFWRITER_H
+
+#include "llvm/Pass.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class AsmPrinter;
+class DwarfDebug;
+class DwarfException;
+class MachineModuleInfo;
+class MachineFunction;
+class MachineInstr;
+class Value;
+class Module;
+class GlobalVariable;
+class TargetAsmInfo;
+class raw_ostream;
+class Instruction;
+class DICompileUnit;
+class DISubprogram;
+class DIVariable;
+
+//===----------------------------------------------------------------------===//
+// DwarfWriter - Emits Dwarf debug and exception handling directives.
+//
+
+class DwarfWriter : public ImmutablePass {
+private:
+ /// DD - Provides the DwarfWriter debug implementation.
+ ///
+ DwarfDebug *DD;
+
+ /// DE - Provides the DwarfWriter exception implementation.
+ ///
+ DwarfException *DE;
+
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ DwarfWriter();
+ virtual ~DwarfWriter();
+
+ //===--------------------------------------------------------------------===//
+ // Main entry points.
+ //
+
+ /// BeginModule - Emit all Dwarf sections that should come prior to the
+ /// content.
+ void BeginModule(Module *M, MachineModuleInfo *MMI, raw_ostream &OS,
+ AsmPrinter *A, const TargetAsmInfo *T);
+
+ /// EndModule - Emit all Dwarf sections that should come after the content.
+ ///
+ void EndModule();
+
+ /// BeginFunction - Gather pre-function debug information. Assumes being
+ /// emitted immediately after the function entry point.
+ void BeginFunction(MachineFunction *MF);
+
+ /// EndFunction - Gather and emit post-function debug information.
+ ///
+ void EndFunction(MachineFunction *MF);
+
+ /// RecordSourceLine - Register a source line with debug info. Returns a
+ /// unique label ID used to generate a label and provide correspondence to
+ /// the source line list.
+ unsigned RecordSourceLine(unsigned Line, unsigned Col, DICompileUnit CU);
+
+ /// RecordRegionStart - Indicate the start of a region.
+ unsigned RecordRegionStart(GlobalVariable *V);
+
+ /// RecordRegionEnd - Indicate the end of a region.
+ unsigned RecordRegionEnd(GlobalVariable *V);
+
+ /// getRecordSourceLineCount - Count source lines.
+ unsigned getRecordSourceLineCount();
+
+ /// RecordVariable - Indicate the declaration of a local variable.
+ ///
+ void RecordVariable(GlobalVariable *GV, unsigned FrameIndex,
+ const MachineInstr *MI);
+
+ /// ShouldEmitDwarfDebug - Returns true if Dwarf debugging declarations should
+ /// be emitted.
+ bool ShouldEmitDwarfDebug() const;
+
+ //// RecordInlinedFnStart - Indicate the start of a inlined function.
+ unsigned RecordInlinedFnStart(DISubprogram SP, DICompileUnit CU,
+ unsigned Line, unsigned Col);
+
+ /// RecordInlinedFnEnd - Indicate the end of inlined subroutine.
+ unsigned RecordInlinedFnEnd(DISubprogram SP);
+
+ /// RecordVariableScope - Record scope for the variable declared by
+ /// DeclareMI. DeclareMI must describe TargetInstrInfo::DECLARE.
+ void RecordVariableScope(DIVariable &DV, const MachineInstr *DeclareMI);
+};
+
+
+} // end llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/ELFRelocation.h b/include/llvm/CodeGen/ELFRelocation.h
new file mode 100644
index 0000000..c3f88f1
--- /dev/null
+++ b/include/llvm/CodeGen/ELFRelocation.h
@@ -0,0 +1,51 @@
+//=== ELFRelocation.h - ELF Relocation Info ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ELFRelocation class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_ELF_RELOCATION_H
+#define LLVM_CODEGEN_ELF_RELOCATION_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+ /// ELFRelocation - This class contains all the information necessary to
+ /// to generate any 32-bit or 64-bit ELF relocation entry.
+ class ELFRelocation {
+ uint64_t r_offset; // offset in the section of the object this applies to
+ uint32_t r_symidx; // symbol table index of the symbol to use
+ uint32_t r_type; // machine specific relocation type
+ int64_t r_add; // explicit relocation addend
+ bool r_rela; // if true then the addend is part of the entry
+ // otherwise the addend is at the location specified
+ // by r_offset
+ public:
+
+ uint64_t getInfo(bool is64Bit = false) const {
+ if (is64Bit)
+ return ((uint64_t)r_symidx << 32) + ((uint64_t)r_type & 0xFFFFFFFFL);
+ else
+ return (r_symidx << 8) + (r_type & 0xFFL);
+ }
+
+ uint64_t getOffset() const { return r_offset; }
+ uint64_t getAddress() const { return r_add; }
+
+ ELFRelocation(uint64_t off, uint32_t sym, uint32_t type,
+ bool rela = true, int64_t addend = 0) :
+ r_offset(off), r_symidx(sym), r_type(type),
+ r_add(addend), r_rela(rela) {}
+ };
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_ELF_RELOCATION_H
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
new file mode 100644
index 0000000..38c1710
--- /dev/null
+++ b/include/llvm/CodeGen/FastISel.h
@@ -0,0 +1,315 @@
+//===-- FastISel.h - Definition of the FastISel class ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FastISel class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FASTISEL_H
+#define LLVM_CODEGEN_FASTISEL_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/CodeGen/DebugLoc.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+
+namespace llvm {
+
+class AllocaInst;
+class ConstantFP;
+class Instruction;
+class MachineBasicBlock;
+class MachineConstantPool;
+class MachineFunction;
+class MachineFrameInfo;
+class MachineModuleInfo;
+class DwarfWriter;
+class MachineRegisterInfo;
+class TargetData;
+class TargetInstrInfo;
+class TargetLowering;
+class TargetMachine;
+class TargetRegisterClass;
+
+/// FastISel - This is a fast-path instruction selection class that
+/// generates poor code and doesn't support illegal types or non-trivial
+/// lowering, but runs quickly.
+class FastISel {
+protected:
+ MachineBasicBlock *MBB;
+ DenseMap<const Value *, unsigned> LocalValueMap;
+ DenseMap<const Value *, unsigned> &ValueMap;
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &MBBMap;
+ DenseMap<const AllocaInst *, int> &StaticAllocaMap;
+#ifndef NDEBUG
+ SmallSet<Instruction*, 8> &CatchInfoLost;
+#endif
+ MachineFunction &MF;
+ MachineModuleInfo *MMI;
+ DwarfWriter *DW;
+ MachineRegisterInfo &MRI;
+ MachineFrameInfo &MFI;
+ MachineConstantPool &MCP;
+ DebugLoc DL;
+ const TargetMachine &TM;
+ const TargetData &TD;
+ const TargetInstrInfo &TII;
+ const TargetLowering &TLI;
+
+public:
+ /// startNewBlock - Set the current block to which generated machine
+ /// instructions will be appended, and clear the local CSE map.
+ ///
+ void startNewBlock(MachineBasicBlock *mbb) {
+ setCurrentBlock(mbb);
+ LocalValueMap.clear();
+ }
+
+ /// setCurrentBlock - Set the current block to which generated machine
+ /// instructions will be appended.
+ ///
+ void setCurrentBlock(MachineBasicBlock *mbb) {
+ MBB = mbb;
+ }
+
+ /// setCurDebugLoc - Set the current debug location information, which is used
+ /// when creating a machine instruction.
+ ///
+ void setCurDebugLoc(DebugLoc dl) { DL = dl; }
+
+ /// getCurDebugLoc() - Return current debug location information.
+ DebugLoc getCurDebugLoc() const { return DL; }
+
+ /// SelectInstruction - Do "fast" instruction selection for the given
+ /// LLVM IR instruction, and append generated machine instructions to
+ /// the current block. Return true if selection was successful.
+ ///
+ bool SelectInstruction(Instruction *I);
+
+ /// SelectInstruction - Do "fast" instruction selection for the given
+ /// LLVM IR operator (Instruction or ConstantExpr), and append
+ /// generated machine instructions to the current block. Return true
+ /// if selection was successful.
+ ///
+ bool SelectOperator(User *I, unsigned Opcode);
+
+ /// TargetSelectInstruction - This method is called by target-independent
+ /// code when the normal FastISel process fails to select an instruction.
+ /// This gives targets a chance to emit code for anything that doesn't
+ /// fit into FastISel's framework. It returns true if it was successful.
+ ///
+ virtual bool
+ TargetSelectInstruction(Instruction *I) = 0;
+
+ /// getRegForValue - Create a virtual register and arrange for it to
+ /// be assigned the value for the given LLVM value.
+ unsigned getRegForValue(Value *V);
+
+ /// lookUpRegForValue - Look up the value to see if its value is already
+ /// cached in a register. It may be defined by instructions across blocks or
+ /// defined locally.
+ unsigned lookUpRegForValue(Value *V);
+
+ /// getRegForGEPIndex - This is a wrapper around getRegForValue that also
+ /// takes care of truncating or sign-extending the given getelementptr
+ /// index value.
+ unsigned getRegForGEPIndex(Value *V);
+
+ virtual ~FastISel();
+
+protected:
+ FastISel(MachineFunction &mf,
+ MachineModuleInfo *mmi,
+ DwarfWriter *dw,
+ DenseMap<const Value *, unsigned> &vm,
+ DenseMap<const BasicBlock *, MachineBasicBlock *> &bm,
+ DenseMap<const AllocaInst *, int> &am
+#ifndef NDEBUG
+ , SmallSet<Instruction*, 8> &cil
+#endif
+ );
+
+ /// FastEmit_r - This method is called by target-independent code
+ /// to request that an instruction with the given type and opcode
+ /// be emitted.
+ virtual unsigned FastEmit_(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode);
+
+ /// FastEmit_r - This method is called by target-independent code
+ /// to request that an instruction with the given type, opcode, and
+ /// register operand be emitted.
+ ///
+ virtual unsigned FastEmit_r(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode, unsigned Op0);
+
+ /// FastEmit_rr - This method is called by target-independent code
+ /// to request that an instruction with the given type, opcode, and
+ /// register operands be emitted.
+ ///
+ virtual unsigned FastEmit_rr(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode,
+ unsigned Op0, unsigned Op1);
+
+ /// FastEmit_ri - This method is called by target-independent code
+ /// to request that an instruction with the given type, opcode, and
+ /// register and immediate operands be emitted.
+ ///
+ virtual unsigned FastEmit_ri(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode,
+ unsigned Op0, uint64_t Imm);
+
+ /// FastEmit_rf - This method is called by target-independent code
+ /// to request that an instruction with the given type, opcode, and
+ /// register and floating-point immediate operands be emitted.
+ ///
+ virtual unsigned FastEmit_rf(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode,
+ unsigned Op0, ConstantFP *FPImm);
+
+ /// FastEmit_rri - This method is called by target-independent code
+ /// to request that an instruction with the given type, opcode, and
+ /// register and immediate operands be emitted.
+ ///
+ virtual unsigned FastEmit_rri(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode,
+ unsigned Op0, unsigned Op1, uint64_t Imm);
+
+ /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
+ /// to emit an instruction with an immediate operand using FastEmit_ri.
+ /// If that fails, it materializes the immediate into a register and try
+ /// FastEmit_rr instead.
+ unsigned FastEmit_ri_(MVT::SimpleValueType VT,
+ ISD::NodeType Opcode,
+ unsigned Op0, uint64_t Imm,
+ MVT::SimpleValueType ImmType);
+
+ /// FastEmit_rf_ - This method is a wrapper of FastEmit_rf. It first tries
+ /// to emit an instruction with an immediate operand using FastEmit_rf.
+ /// If that fails, it materializes the immediate into a register and try
+ /// FastEmit_rr instead.
+ unsigned FastEmit_rf_(MVT::SimpleValueType VT,
+ ISD::NodeType Opcode,
+ unsigned Op0, ConstantFP *FPImm,
+ MVT::SimpleValueType ImmType);
+
+ /// FastEmit_i - This method is called by target-independent code
+ /// to request that an instruction with the given type, opcode, and
+ /// immediate operand be emitted.
+ virtual unsigned FastEmit_i(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode,
+ uint64_t Imm);
+
+ /// FastEmit_f - This method is called by target-independent code
+ /// to request that an instruction with the given type, opcode, and
+ /// floating-point immediate operand be emitted.
+ virtual unsigned FastEmit_f(MVT::SimpleValueType VT,
+ MVT::SimpleValueType RetVT,
+ ISD::NodeType Opcode,
+ ConstantFP *FPImm);
+
+ /// FastEmitInst_ - Emit a MachineInstr with no operands and a
+ /// result register in the given register class.
+ ///
+ unsigned FastEmitInst_(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC);
+
+ /// FastEmitInst_r - Emit a MachineInstr with one register operand
+ /// and a result register in the given register class.
+ ///
+ unsigned FastEmitInst_r(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0);
+
+ /// FastEmitInst_rr - Emit a MachineInstr with two register operands
+ /// and a result register in the given register class.
+ ///
+ unsigned FastEmitInst_rr(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, unsigned Op1);
+
+ /// FastEmitInst_ri - Emit a MachineInstr with two register operands
+ /// and a result register in the given register class.
+ ///
+ unsigned FastEmitInst_ri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, uint64_t Imm);
+
+ /// FastEmitInst_rf - Emit a MachineInstr with two register operands
+ /// and a result register in the given register class.
+ ///
+ unsigned FastEmitInst_rf(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, ConstantFP *FPImm);
+
+ /// FastEmitInst_rri - Emit a MachineInstr with two register operands,
+ /// an immediate, and a result register in the given register class.
+ ///
+ unsigned FastEmitInst_rri(unsigned MachineInstOpcode,
+ const TargetRegisterClass *RC,
+ unsigned Op0, unsigned Op1, uint64_t Imm);
+
+ /// FastEmitInst_i - Emit a MachineInstr with a single immediate
+ /// operand, and a result register in the given register class.
+ unsigned FastEmitInst_i(unsigned MachineInstrOpcode,
+ const TargetRegisterClass *RC,
+ uint64_t Imm);
+
+ /// FastEmitInst_extractsubreg - Emit a MachineInstr for an extract_subreg
+ /// from a specified index of a superregister to a specified type.
+ unsigned FastEmitInst_extractsubreg(MVT::SimpleValueType RetVT,
+ unsigned Op0, uint32_t Idx);
+
+ /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
+ /// with all but the least significant bit set to zero.
+ unsigned FastEmitZExtFromI1(MVT::SimpleValueType VT,
+ unsigned Op);
+
+ /// FastEmitBranch - Emit an unconditional branch to the given block,
+ /// unless it is the immediate (fall-through) successor, and update
+ /// the CFG.
+ void FastEmitBranch(MachineBasicBlock *MBB);
+
+ unsigned UpdateValueMap(Value* I, unsigned Reg);
+
+ unsigned createResultReg(const TargetRegisterClass *RC);
+
+ /// TargetMaterializeConstant - Emit a constant in a register using
+ /// target-specific logic, such as constant pool loads.
+ virtual unsigned TargetMaterializeConstant(Constant* C) {
+ return 0;
+ }
+
+ /// TargetMaterializeAlloca - Emit an alloca address in a register using
+ /// target-specific logic.
+ virtual unsigned TargetMaterializeAlloca(AllocaInst* C) {
+ return 0;
+ }
+
+private:
+ bool SelectBinaryOp(User *I, ISD::NodeType ISDOpcode);
+
+ bool SelectGetElementPtr(User *I);
+
+ bool SelectCall(User *I);
+
+ bool SelectBitCast(User *I);
+
+ bool SelectCast(User *I, ISD::NodeType Opcode);
+};
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/FileWriters.h b/include/llvm/CodeGen/FileWriters.h
new file mode 100644
index 0000000..b3781e0
--- /dev/null
+++ b/include/llvm/CodeGen/FileWriters.h
@@ -0,0 +1,31 @@
+//===-- FileWriters.h - File Writers Creation Functions ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Functions to add the various file writer passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_FILEWRITERS_H
+#define LLVM_CODEGEN_FILEWRITERS_H
+
+namespace llvm {
+
+ class PassManagerBase;
+ class MachineCodeEmitter;
+ class TargetMachine;
+ class raw_ostream;
+
+ MachineCodeEmitter *AddELFWriter(PassManagerBase &FPM, raw_ostream &O,
+ TargetMachine &TM);
+ MachineCodeEmitter *AddMachOWriter(PassManagerBase &FPM, raw_ostream &O,
+ TargetMachine &TM);
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_FILEWRITERS_H
diff --git a/include/llvm/CodeGen/GCMetadata.h b/include/llvm/CodeGen/GCMetadata.h
new file mode 100644
index 0000000..e94aba3
--- /dev/null
+++ b/include/llvm/CodeGen/GCMetadata.h
@@ -0,0 +1,192 @@
+//===-- GCMetadata.h - Garbage collector metadata -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the GCFunctionInfo and GCModuleInfo classes, which are
+// used as a communication channel from the target code generator to the target
+// garbage collectors. This interface allows code generators and garbage
+// collectors to be developed independently.
+//
+// The GCFunctionInfo class logs the data necessary to build a type accurate
+// stack map. The code generator outputs:
+//
+// - Safe points as specified by the GCStrategy's NeededSafePoints.
+// - Stack offsets for GC roots, as specified by calls to llvm.gcroot
+//
+// As a refinement, liveness analysis calculates the set of live roots at each
+// safe point. Liveness analysis is not presently performed by the code
+// generator, so all roots are assumed live.
+//
+// GCModuleInfo simply collects GCFunctionInfo instances for each Function as
+// they are compiled. This accretion is necessary for collectors which must emit
+// a stack map for the compilation unit as a whole. Therefore, GCFunctionInfo
+// outlives the MachineFunction from which it is derived and must not refer to
+// any code generator data structures.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATA_H
+#define LLVM_CODEGEN_GCMETADATA_H
+
+#include "llvm/Pass.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+
+namespace llvm {
+
+ class AsmPrinter;
+ class GCStrategy;
+ class Constant;
+ class TargetAsmInfo;
+
+
+ namespace GC {
+ /// PointKind - The type of a collector-safe point.
+ ///
+ enum PointKind {
+ Loop, //< Instr is a loop (backwards branch).
+ Return, //< Instr is a return instruction.
+ PreCall, //< Instr is a call instruction.
+ PostCall //< Instr is the return address of a call.
+ };
+ }
+
+ /// GCPoint - Metadata for a collector-safe point in machine code.
+ ///
+ struct GCPoint {
+ GC::PointKind Kind; //< The kind of the safe point.
+ unsigned Num; //< Usually a label.
+
+ GCPoint(GC::PointKind K, unsigned N) : Kind(K), Num(N) {}
+ };
+
+ /// GCRoot - Metadata for a pointer to an object managed by the garbage
+ /// collector.
+ struct GCRoot {
+ int Num; //< Usually a frame index.
+ int StackOffset; //< Offset from the stack pointer.
+ Constant *Metadata; //< Metadata straight from the call to llvm.gcroot.
+
+ GCRoot(int N, Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
+ };
+
+
+ /// GCFunctionInfo - Garbage collection metadata for a single function.
+ ///
+ class GCFunctionInfo {
+ public:
+ typedef std::vector<GCPoint>::iterator iterator;
+ typedef std::vector<GCRoot>::iterator roots_iterator;
+ typedef std::vector<GCRoot>::const_iterator live_iterator;
+
+ private:
+ const Function &F;
+ GCStrategy &S;
+ uint64_t FrameSize;
+ std::vector<GCRoot> Roots;
+ std::vector<GCPoint> SafePoints;
+
+ // FIXME: Liveness. A 2D BitVector, perhaps?
+ //
+ // BitVector Liveness;
+ //
+ // bool islive(int point, int root) =
+ // Liveness[point * SafePoints.size() + root]
+ //
+ // The bit vector is the more compact representation where >3.2% of roots
+ // are live per safe point (1.5% on 64-bit hosts).
+
+ public:
+ GCFunctionInfo(const Function &F, GCStrategy &S);
+ ~GCFunctionInfo();
+
+ /// getFunction - Return the function to which this metadata applies.
+ ///
+ const Function &getFunction() const { return F; }
+
+ /// getStrategy - Return the GC strategy for the function.
+ ///
+ GCStrategy &getStrategy() { return S; }
+
+ /// addStackRoot - Registers a root that lives on the stack. Num is the
+ /// stack object ID for the alloca (if the code generator is
+ // using MachineFrameInfo).
+ void addStackRoot(int Num, Constant *Metadata) {
+ Roots.push_back(GCRoot(Num, Metadata));
+ }
+
+ /// addSafePoint - Notes the existence of a safe point. Num is the ID of the
+ /// label just prior to the safe point (if the code generator is using
+ /// MachineModuleInfo).
+ void addSafePoint(GC::PointKind Kind, unsigned Num) {
+ SafePoints.push_back(GCPoint(Kind, Num));
+ }
+
+ /// getFrameSize/setFrameSize - Records the function's frame size.
+ ///
+ uint64_t getFrameSize() const { return FrameSize; }
+ void setFrameSize(uint64_t S) { FrameSize = S; }
+
+ /// begin/end - Iterators for safe points.
+ ///
+ iterator begin() { return SafePoints.begin(); }
+ iterator end() { return SafePoints.end(); }
+ size_t size() const { return SafePoints.size(); }
+
+ /// roots_begin/roots_end - Iterators for all roots in the function.
+ ///
+ roots_iterator roots_begin() { return Roots.begin(); }
+ roots_iterator roots_end () { return Roots.end(); }
+ size_t roots_size() const { return Roots.size(); }
+
+ /// live_begin/live_end - Iterators for live roots at a given safe point.
+ ///
+ live_iterator live_begin(const iterator &p) { return roots_begin(); }
+ live_iterator live_end (const iterator &p) { return roots_end(); }
+ size_t live_size(const iterator &p) const { return roots_size(); }
+ };
+
+
+ /// GCModuleInfo - Garbage collection metadata for a whole module.
+ ///
+ class GCModuleInfo : public ImmutablePass {
+ typedef StringMap<GCStrategy*> strategy_map_type;
+ typedef std::vector<GCStrategy*> list_type;
+ typedef DenseMap<const Function*,GCFunctionInfo*> finfo_map_type;
+
+ strategy_map_type StrategyMap;
+ list_type StrategyList;
+ finfo_map_type FInfoMap;
+
+ GCStrategy *getOrCreateStrategy(const Module *M, const std::string &Name);
+
+ public:
+ typedef list_type::const_iterator iterator;
+
+ static char ID;
+
+ GCModuleInfo();
+ ~GCModuleInfo();
+
+ /// clear - Resets the pass. The metadata deleter pass calls this.
+ ///
+ void clear();
+
+ /// begin/end - Iterators for used strategies.
+ ///
+ iterator begin() const { return StrategyList.begin(); }
+ iterator end() const { return StrategyList.end(); }
+
+ /// get - Look up function metadata.
+ ///
+ GCFunctionInfo &getFunctionInfo(const Function &F);
+ };
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/GCMetadataPrinter.h b/include/llvm/CodeGen/GCMetadataPrinter.h
new file mode 100644
index 0000000..b693b1b
--- /dev/null
+++ b/include/llvm/CodeGen/GCMetadataPrinter.h
@@ -0,0 +1,76 @@
+//===-- llvm/CodeGen/GCMetadataPrinter.h - Prints asm GC tables -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The abstract base class GCMetadataPrinter supports writing GC metadata tables
+// as assembly code. This is a separate class from GCStrategy in order to allow
+// users of the LLVM JIT to avoid linking with the AsmWriter.
+//
+// Subclasses of GCMetadataPrinter must be registered using the
+// GCMetadataPrinterRegistry. This is separate from the GCStrategy itself
+// because these subclasses are logically plugins for the AsmWriter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCMETADATAPRINTER_H
+#define LLVM_CODEGEN_GCMETADATAPRINTER_H
+
+#include "llvm/CodeGen/GCMetadata.h"
+#include "llvm/CodeGen/GCStrategy.h"
+#include "llvm/Support/Registry.h"
+
+namespace llvm {
+
+ class GCMetadataPrinter;
+ class raw_ostream;
+
+ /// GCMetadataPrinterRegistry - The GC assembly printer registry uses all the
+ /// defaults from Registry.
+ typedef Registry<GCMetadataPrinter> GCMetadataPrinterRegistry;
+
+ /// GCMetadataPrinter - Emits GC metadata as assembly code.
+ ///
+ class GCMetadataPrinter {
+ public:
+ typedef GCStrategy::list_type list_type;
+ typedef GCStrategy::iterator iterator;
+
+ private:
+ GCStrategy *S;
+
+ friend class AsmPrinter;
+
+ protected:
+ // May only be subclassed.
+ GCMetadataPrinter();
+
+ // Do not implement.
+ GCMetadataPrinter(const GCMetadataPrinter &);
+ GCMetadataPrinter &operator=(const GCMetadataPrinter &);
+
+ public:
+ GCStrategy &getStrategy() { return *S; }
+ const Module &getModule() const { return S->getModule(); }
+
+ /// begin/end - Iterate over the collected function metadata.
+ iterator begin() { return S->begin(); }
+ iterator end() { return S->end(); }
+
+ /// beginAssembly/finishAssembly - Emit module metadata as assembly code.
+ virtual void beginAssembly(raw_ostream &OS, AsmPrinter &AP,
+ const TargetAsmInfo &TAI);
+
+ virtual void finishAssembly(raw_ostream &OS, AsmPrinter &AP,
+ const TargetAsmInfo &TAI);
+
+ virtual ~GCMetadataPrinter();
+ };
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/GCStrategy.h b/include/llvm/CodeGen/GCStrategy.h
new file mode 100644
index 0000000..cd760db
--- /dev/null
+++ b/include/llvm/CodeGen/GCStrategy.h
@@ -0,0 +1,142 @@
+//===-- llvm/CodeGen/GCStrategy.h - Garbage collection ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GCStrategy coordinates code generation algorithms and implements some itself
+// in order to generate code compatible with a target code generator as
+// specified in a function's 'gc' attribute. Algorithms are enabled by setting
+// flags in a subclass's constructor, and some virtual methods can be
+// overridden.
+//
+// When requested, the GCStrategy will be populated with data about each
+// function which uses it. Specifically:
+//
+// - Safe points
+// Garbage collection is generally only possible at certain points in code.
+// GCStrategy can request that the collector insert such points:
+//
+// - At and after any call to a subroutine
+// - Before returning from the current function
+// - Before backwards branches (loops)
+//
+// - Roots
+// When a reference to a GC-allocated object exists on the stack, it must be
+// stored in an alloca registered with llvm.gcoot.
+//
+// This information can used to emit the metadata tables which are required by
+// the target garbage collector runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCSTRATEGY_H
+#define LLVM_CODEGEN_GCSTRATEGY_H
+
+#include "llvm/CodeGen/GCMetadata.h"
+#include "llvm/Support/Registry.h"
+#include <string>
+
+namespace llvm {
+
+ class GCStrategy;
+
+ /// The GC strategy registry uses all the defaults from Registry.
+ ///
+ typedef Registry<GCStrategy> GCRegistry;
+
+ /// GCStrategy describes a garbage collector algorithm's code generation
+ /// requirements, and provides overridable hooks for those needs which cannot
+ /// be abstractly described.
+ class GCStrategy {
+ public:
+ typedef std::vector<GCFunctionInfo*> list_type;
+ typedef list_type::iterator iterator;
+
+ private:
+ friend class GCModuleInfo;
+ const Module *M;
+ std::string Name;
+
+ list_type Functions;
+
+ protected:
+ unsigned NeededSafePoints; //< Bitmask of required safe points.
+ bool CustomReadBarriers; //< Default is to insert loads.
+ bool CustomWriteBarriers; //< Default is to insert stores.
+ bool CustomRoots; //< Default is to pass through to backend.
+ bool InitRoots; //< If set, roots are nulled during lowering.
+ bool UsesMetadata; //< If set, backend must emit metadata tables.
+
+ public:
+ GCStrategy();
+
+ virtual ~GCStrategy();
+
+
+ /// getName - The name of the GC strategy, for debugging.
+ ///
+ const std::string &getName() const { return Name; }
+
+ /// getModule - The module within which the GC strategy is operating.
+ ///
+ const Module &getModule() const { return *M; }
+
+ /// needsSafePoitns - True if safe points of any kind are required. By
+ // default, none are recorded.
+ bool needsSafePoints() const { return NeededSafePoints != 0; }
+
+ /// needsSafePoint(Kind) - True if the given kind of safe point is
+ // required. By default, none are recorded.
+ bool needsSafePoint(GC::PointKind Kind) const {
+ return (NeededSafePoints & 1 << Kind) != 0;
+ }
+
+ /// customWriteBarrier - By default, write barriers are replaced with simple
+ /// store instructions. If true, then
+ /// performCustomLowering must instead lower them.
+ bool customWriteBarrier() const { return CustomWriteBarriers; }
+
+ /// customReadBarrier - By default, read barriers are replaced with simple
+ /// load instructions. If true, then
+ /// performCustomLowering must instead lower them.
+ bool customReadBarrier() const { return CustomReadBarriers; }
+
+ /// customRoots - By default, roots are left for the code generator so it
+ /// can generate a stack map. If true, then
+ // performCustomLowering must delete them.
+ bool customRoots() const { return CustomRoots; }
+
+ /// initializeRoots - If set, gcroot intrinsics should initialize their
+ // allocas to null before the first use. This is
+ // necessary for most GCs and is enabled by default.
+ bool initializeRoots() const { return InitRoots; }
+
+ /// usesMetadata - If set, appropriate metadata tables must be emitted by
+ /// the back-end (assembler, JIT, or otherwise).
+ bool usesMetadata() const { return UsesMetadata; }
+
+ /// begin/end - Iterators for function metadata.
+ ///
+ iterator begin() { return Functions.begin(); }
+ iterator end() { return Functions.end(); }
+
+ /// insertFunctionMetadata - Creates metadata for a function.
+ ///
+ GCFunctionInfo *insertFunctionInfo(const Function &F);
+
+ /// initializeCustomLowering/performCustomLowering - If any of the actions
+ /// are set to custom, performCustomLowering must be overriden to transform
+ /// the corresponding actions to LLVM IR. initializeCustomLowering is
+ /// optional to override. These are the only GCStrategy methods through
+ /// which the LLVM IR can be modified.
+ virtual bool initializeCustomLowering(Module &F);
+ virtual bool performCustomLowering(Function &F);
+ };
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/GCs.h b/include/llvm/CodeGen/GCs.h
new file mode 100644
index 0000000..c407b61
--- /dev/null
+++ b/include/llvm/CodeGen/GCs.h
@@ -0,0 +1,35 @@
+//===-- GCs.h - Garbage collector linkage hacks ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains hack functions to force linking in the GC components.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_GCS_H
+#define LLVM_CODEGEN_GCS_H
+
+namespace llvm {
+ class GCStrategy;
+ class GCMetadataPrinter;
+
+ /// FIXME: Collector instances are not useful on their own. These no longer
+ /// serve any purpose except to link in the plugins.
+
+ /// Creates an ocaml-compatible garbage collector.
+ void linkOcamlGC();
+
+ /// Creates an ocaml-compatible metadata printer.
+ void linkOcamlGCPrinter();
+
+ /// Creates a shadow stack garbage collector. This collector requires no code
+ /// generator support.
+ void linkShadowStackGC();
+}
+
+#endif
diff --git a/include/llvm/CodeGen/IntrinsicLowering.h b/include/llvm/CodeGen/IntrinsicLowering.h
new file mode 100644
index 0000000..6628329
--- /dev/null
+++ b/include/llvm/CodeGen/IntrinsicLowering.h
@@ -0,0 +1,50 @@
+//===-- IntrinsicLowering.h - Intrinsic Function Lowering -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IntrinsicLowering interface. This interface allows
+// addition of domain-specific or front-end specific intrinsics to LLVM without
+// having to modify all of the C backend or interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_INTRINSICLOWERING_H
+#define LLVM_CODEGEN_INTRINSICLOWERING_H
+
+#include "llvm/Intrinsics.h"
+
+namespace llvm {
+ class CallInst;
+ class Module;
+ class TargetData;
+
+ class IntrinsicLowering {
+ const TargetData& TD;
+ public:
+ explicit IntrinsicLowering(const TargetData &td) : TD(td) {}
+
+ /// AddPrototypes - This method, if called, causes all of the prototypes
+ /// that might be needed by an intrinsic lowering implementation to be
+ /// inserted into the module specified.
+ void AddPrototypes(Module &M);
+
+ /// LowerIntrinsicCall - This method replaces a call with the LLVM function
+ /// which should be used to implement the specified intrinsic function call.
+ /// If an intrinsic function must be implemented by the code generator
+ /// (such as va_start), this function should print a message and abort.
+ ///
+ /// Otherwise, if an intrinsic function call can be lowered, the code to
+ /// implement it (often a call to a non-intrinsic function) is inserted
+ /// _after_ the call instruction and the call is deleted. The caller must
+ /// be capable of handling this kind of change.
+ ///
+ void LowerIntrinsicCall(CallInst *CI);
+ };
+}
+
+#endif
diff --git a/include/llvm/CodeGen/JITCodeEmitter.h b/include/llvm/CodeGen/JITCodeEmitter.h
new file mode 100644
index 0000000..bf6b76e
--- /dev/null
+++ b/include/llvm/CodeGen/JITCodeEmitter.h
@@ -0,0 +1,322 @@
+//===-- llvm/CodeGen/JITCodeEmitter.h - Code emission ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an abstract interface that is used by the machine code
+// emission framework to output the code. This allows machine code emission to
+// be separated from concerns such as resolution of call targets, and where the
+// machine code will be written (memory or disk, f.e.).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_JITCODEEMITTER_H
+#define LLVM_CODEGEN_JITCODEEMITTER_H
+
+#include <string>
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Streams.h"
+#include "llvm/CodeGen/MachineCodeEmitter.h"
+
+using namespace std;
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineConstantPool;
+class MachineJumpTableInfo;
+class MachineFunction;
+class MachineModuleInfo;
+class MachineRelocation;
+class Value;
+class GlobalValue;
+class Function;
+
+/// JITCodeEmitter - This class defines two sorts of methods: those for
+/// emitting the actual bytes of machine code, and those for emitting auxillary
+/// structures, such as jump tables, relocations, etc.
+///
+/// Emission of machine code is complicated by the fact that we don't (in
+/// general) know the size of the machine code that we're about to emit before
+/// we emit it. As such, we preallocate a certain amount of memory, and set the
+/// BufferBegin/BufferEnd pointers to the start and end of the buffer. As we
+/// emit machine instructions, we advance the CurBufferPtr to indicate the
+/// location of the next byte to emit. In the case of a buffer overflow (we
+/// need to emit more machine code than we have allocated space for), the
+/// CurBufferPtr will saturate to BufferEnd and ignore stores. Once the entire
+/// function has been emitted, the overflow condition is checked, and if it has
+/// occurred, more memory is allocated, and we reemit the code into it.
+///
+class JITCodeEmitter : public MachineCodeEmitter {
+public:
+ virtual ~JITCodeEmitter() {}
+
+ /// startFunction - This callback is invoked when the specified function is
+ /// about to be code generated. This initializes the BufferBegin/End/Ptr
+ /// fields.
+ ///
+ virtual void startFunction(MachineFunction &F) = 0;
+
+ /// finishFunction - This callback is invoked when the specified function has
+ /// finished code generation. If a buffer overflow has occurred, this method
+ /// returns true (the callee is required to try again), otherwise it returns
+ /// false.
+ ///
+ virtual bool finishFunction(MachineFunction &F) = 0;
+
+ /// startGVStub - This callback is invoked when the JIT needs the
+ /// address of a GV (e.g. function) that has not been code generated yet.
+ /// The StubSize specifies the total size required by the stub.
+ ///
+ virtual void startGVStub(const GlobalValue* GV, unsigned StubSize,
+ unsigned Alignment = 1) = 0;
+
+ /// startGVStub - This callback is invoked when the JIT needs the address of a
+ /// GV (e.g. function) that has not been code generated yet. Buffer points to
+ /// memory already allocated for this stub.
+ ///
+ virtual void startGVStub(const GlobalValue* GV, void *Buffer,
+ unsigned StubSize) = 0;
+
+ /// finishGVStub - This callback is invoked to terminate a GV stub.
+ ///
+ virtual void *finishGVStub(const GlobalValue* F) = 0;
+
+ /// emitByte - This callback is invoked when a byte needs to be written to the
+ /// output stream.
+ ///
+ void emitByte(uint8_t B) {
+ if (CurBufferPtr != BufferEnd)
+ *CurBufferPtr++ = B;
+ }
+
+ /// emitWordLE - This callback is invoked when a 32-bit word needs to be
+ /// written to the output stream in little-endian format.
+ ///
+ void emitWordLE(unsigned W) {
+ if (4 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitWordBE - This callback is invoked when a 32-bit word needs to be
+ /// written to the output stream in big-endian format.
+ ///
+ void emitWordBE(unsigned W) {
+ if (4 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitDWordLE - This callback is invoked when a 64-bit word needs to be
+ /// written to the output stream in little-endian format.
+ ///
+ void emitDWordLE(uint64_t W) {
+ if (8 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ *CurBufferPtr++ = (uint8_t)(W >> 32);
+ *CurBufferPtr++ = (uint8_t)(W >> 40);
+ *CurBufferPtr++ = (uint8_t)(W >> 48);
+ *CurBufferPtr++ = (uint8_t)(W >> 56);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitDWordBE - This callback is invoked when a 64-bit word needs to be
+ /// written to the output stream in big-endian format.
+ ///
+ void emitDWordBE(uint64_t W) {
+ if (8 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 56);
+ *CurBufferPtr++ = (uint8_t)(W >> 48);
+ *CurBufferPtr++ = (uint8_t)(W >> 40);
+ *CurBufferPtr++ = (uint8_t)(W >> 32);
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitAlignment - Move the CurBufferPtr pointer up the the specified
+ /// alignment (saturated to BufferEnd of course).
+ void emitAlignment(unsigned Alignment) {
+ if (Alignment == 0) Alignment = 1;
+
+ if(Alignment <= (uintptr_t)(BufferEnd-CurBufferPtr)) {
+ // Move the current buffer ptr up to the specified alignment.
+ CurBufferPtr =
+ (uint8_t*)(((uintptr_t)CurBufferPtr+Alignment-1) &
+ ~(uintptr_t)(Alignment-1));
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+
+ /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
+ /// written to the output stream.
+ void emitULEB128Bytes(unsigned Value) {
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value) Byte |= 0x80;
+ emitByte(Byte);
+ } while (Value);
+ }
+
+ /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
+ /// written to the output stream.
+ void emitSLEB128Bytes(int32_t Value) {
+ int32_t Sign = Value >> (8 * sizeof(Value) - 1);
+ bool IsMore;
+
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
+ if (IsMore) Byte |= 0x80;
+ emitByte(Byte);
+ } while (IsMore);
+ }
+
+ /// emitString - This callback is invoked when a String needs to be
+ /// written to the output stream.
+ void emitString(const std::string &String) {
+ for (unsigned i = 0, N = static_cast<unsigned>(String.size());
+ i < N; ++i) {
+ uint8_t C = String[i];
+ emitByte(C);
+ }
+ emitByte(0);
+ }
+
+ /// emitInt32 - Emit a int32 directive.
+ void emitInt32(int32_t Value) {
+ if (4 <= BufferEnd-CurBufferPtr) {
+ *((uint32_t*)CurBufferPtr) = Value;
+ CurBufferPtr += 4;
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitInt64 - Emit a int64 directive.
+ void emitInt64(uint64_t Value) {
+ if (8 <= BufferEnd-CurBufferPtr) {
+ *((uint64_t*)CurBufferPtr) = Value;
+ CurBufferPtr += 8;
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitInt32At - Emit the Int32 Value in Addr.
+ void emitInt32At(uintptr_t *Addr, uintptr_t Value) {
+ if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
+ (*(uint32_t*)Addr) = (uint32_t)Value;
+ }
+
+ /// emitInt64At - Emit the Int64 Value in Addr.
+ void emitInt64At(uintptr_t *Addr, uintptr_t Value) {
+ if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
+ (*(uint64_t*)Addr) = (uint64_t)Value;
+ }
+
+
+ /// emitLabel - Emits a label
+ virtual void emitLabel(uint64_t LabelID) = 0;
+
+ /// allocateSpace - Allocate a block of space in the current output buffer,
+ /// returning null (and setting conditions to indicate buffer overflow) on
+ /// failure. Alignment is the alignment in bytes of the buffer desired.
+ virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) {
+ emitAlignment(Alignment);
+ void *Result;
+
+ // Check for buffer overflow.
+ if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
+ CurBufferPtr = BufferEnd;
+ Result = 0;
+ } else {
+ // Allocate the space.
+ Result = CurBufferPtr;
+ CurBufferPtr += Size;
+ }
+
+ return Result;
+ }
+
+ /// StartMachineBasicBlock - This should be called by the target when a new
+ /// basic block is about to be emitted. This way the MCE knows where the
+ /// start of the block is, and can implement getMachineBasicBlockAddress.
+ virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) = 0;
+
+ /// getCurrentPCValue - This returns the address that the next emitted byte
+ /// will be output to.
+ ///
+ virtual uintptr_t getCurrentPCValue() const {
+ return (uintptr_t)CurBufferPtr;
+ }
+
+ /// getCurrentPCOffset - Return the offset from the start of the emitted
+ /// buffer that we are currently writing to.
+ uintptr_t getCurrentPCOffset() const {
+ return CurBufferPtr-BufferBegin;
+ }
+
+ /// addRelocation - Whenever a relocatable address is needed, it should be
+ /// noted with this interface.
+ virtual void addRelocation(const MachineRelocation &MR) = 0;
+
+ /// FIXME: These should all be handled with relocations!
+
+ /// getConstantPoolEntryAddress - Return the address of the 'Index' entry in
+ /// the constant pool that was last emitted with the emitConstantPool method.
+ ///
+ virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const = 0;
+
+ /// getJumpTableEntryAddress - Return the address of the jump table with index
+ /// 'Index' in the function that last called initJumpTableInfo.
+ ///
+ virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const = 0;
+
+ /// getMachineBasicBlockAddress - Return the address of the specified
+ /// MachineBasicBlock, only usable after the label for the MBB has been
+ /// emitted.
+ ///
+ virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0;
+
+ /// getLabelAddress - Return the address of the specified LabelID, only usable
+ /// after the LabelID has been emitted.
+ ///
+ virtual uintptr_t getLabelAddress(uint64_t LabelID) const = 0;
+
+ /// Specifies the MachineModuleInfo object. This is used for exception handling
+ /// purposes.
+ virtual void setModuleInfo(MachineModuleInfo* Info) = 0;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/LatencyPriorityQueue.h b/include/llvm/CodeGen/LatencyPriorityQueue.h
new file mode 100644
index 0000000..71fae2a
--- /dev/null
+++ b/include/llvm/CodeGen/LatencyPriorityQueue.h
@@ -0,0 +1,112 @@
+//===---- LatencyPriorityQueue.h - A latency-oriented priority queue ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the LatencyPriorityQueue class, which is a
+// SchedulingPriorityQueue that schedules using latency information to
+// reduce the length of the critical path through the basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LATENCY_PRIORITY_QUEUE_H
+#define LATENCY_PRIORITY_QUEUE_H
+
+#include "llvm/CodeGen/ScheduleDAG.h"
+#include "llvm/ADT/PriorityQueue.h"
+
+namespace llvm {
+ class LatencyPriorityQueue;
+
+ /// Sorting functions for the Available queue.
+ struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> {
+ LatencyPriorityQueue *PQ;
+ explicit latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
+
+ bool operator()(const SUnit* left, const SUnit* right) const;
+ };
+
+ class LatencyPriorityQueue : public SchedulingPriorityQueue {
+ // SUnits - The SUnits for the current graph.
+ std::vector<SUnit> *SUnits;
+
+ /// NumNodesSolelyBlocking - This vector contains, for every node in the
+ /// Queue, the number of nodes that the node is the sole unscheduled
+ /// predecessor for. This is used as a tie-breaker heuristic for better
+ /// mobility.
+ std::vector<unsigned> NumNodesSolelyBlocking;
+
+ PriorityQueue<SUnit*, std::vector<SUnit*>, latency_sort> Queue;
+public:
+ LatencyPriorityQueue() : Queue(latency_sort(this)) {
+ }
+
+ void initNodes(std::vector<SUnit> &sunits) {
+ SUnits = &sunits;
+ NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+ }
+
+ void addNode(const SUnit *SU) {
+ NumNodesSolelyBlocking.resize(SUnits->size(), 0);
+ }
+
+ void updateNode(const SUnit *SU) {
+ }
+
+ void releaseState() {
+ SUnits = 0;
+ }
+
+ unsigned getLatency(unsigned NodeNum) const {
+ assert(NodeNum < (*SUnits).size());
+ return (*SUnits)[NodeNum].getHeight();
+ }
+
+ unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
+ assert(NodeNum < NumNodesSolelyBlocking.size());
+ return NumNodesSolelyBlocking[NodeNum];
+ }
+
+ unsigned size() const { return Queue.size(); }
+
+ bool empty() const { return Queue.empty(); }
+
+ virtual void push(SUnit *U) {
+ push_impl(U);
+ }
+ void push_impl(SUnit *U);
+
+ void push_all(const std::vector<SUnit *> &Nodes) {
+ for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
+ push_impl(Nodes[i]);
+ }
+
+ SUnit *pop() {
+ if (empty()) return NULL;
+ SUnit *V = Queue.top();
+ Queue.pop();
+ return V;
+ }
+
+ void remove(SUnit *SU) {
+ assert(!Queue.empty() && "Not in queue!");
+ Queue.erase_one(SU);
+ }
+
+ // ScheduledNode - As nodes are scheduled, we look to see if there are any
+ // successor nodes that have a single unscheduled predecessor. If so, that
+ // single predecessor has a higher priority, since scheduling it will make
+ // the node available.
+ void ScheduledNode(SUnit *Node);
+
+private:
+ void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
+ SUnit *getSingleUnscheduledPred(SUnit *SU);
+ };
+}
+
+#endif
diff --git a/include/llvm/CodeGen/LinkAllAsmWriterComponents.h b/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
new file mode 100644
index 0000000..1673c89
--- /dev/null
+++ b/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
@@ -0,0 +1,36 @@
+//===- llvm/Codegen/LinkAllAsmWriterComponents.h ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all assembler writer related passes for tools like
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
+
+#include "llvm/CodeGen/GCs.h"
+
+namespace {
+ struct ForceAsmWriterLinking {
+ ForceAsmWriterLinking() {
+ // We must reference the plug-ins in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ llvm::linkOcamlGCPrinter();
+
+ }
+ } ForceAsmWriterLinking; // Force link by creating a global definition.
+}
+
+#endif // LLVM_CODEGEN_LINKALLASMWRITERCOMPONENTS_H
diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h
new file mode 100644
index 0000000..a231f49
--- /dev/null
+++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -0,0 +1,56 @@
+//===- llvm/Codegen/LinkAllCodegenComponents.h ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file pulls in all codegen related passes for tools like lli and
+// llc that need this functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+#define LLVM_CODEGEN_LINKALLCODEGENCOMPONENTS_H
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/CodeGen/GCs.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace {
+ struct ForceCodegenLinking {
+ ForceCodegenLinking() {
+ // We must reference the passes in such a way that compilers will not
+ // delete it all as dead code, even with whole program optimization,
+ // yet is effectively a NO-OP. As the compiler isn't smart enough
+ // to know that getenv() never returns -1, this will do the job.
+ if (std::getenv("bar") != (char*) -1)
+ return;
+
+ (void) llvm::createDeadMachineInstructionElimPass();
+
+ (void) llvm::createSimpleRegisterAllocator();
+ (void) llvm::createLocalRegisterAllocator();
+ (void) llvm::createBigBlockRegisterAllocator();
+ (void) llvm::createLinearScanRegisterAllocator();
+ (void) llvm::createPBQPRegisterAllocator();
+
+ (void) llvm::createSimpleRegisterCoalescer();
+
+ llvm::linkOcamlGC();
+ llvm::linkShadowStackGC();
+
+ (void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
+ (void) llvm::createTDRRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
+ (void) llvm::createTDListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
+ (void) llvm::createFastDAGScheduler(NULL, llvm::CodeGenOpt::Default);
+ (void) llvm::createDefaultScheduler(NULL, llvm::CodeGenOpt::Default);
+
+ }
+ } ForceCodegenLinking; // Force link by creating a global definition.
+}
+
+#endif
diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h
new file mode 100644
index 0000000..f1ae587
--- /dev/null
+++ b/include/llvm/CodeGen/LiveInterval.h
@@ -0,0 +1,468 @@
+//===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveRange and LiveInterval classes. Given some
+// numbering of each the machine instructions an interval [i, j) is said to be a
+// live interval for register v if there is no instruction with number j' >= j
+// such that v is live at j' and there is no instruction with number i' < i such
+// that v is live at i'. In this implementation intervals can have holes,
+// i.e. an interval might look like [1,20), [50,65), [1000,1001). Each
+// individual range is represented as an instance of LiveRange, and the whole
+// interval is represented as an instance of LiveInterval.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
+#define LLVM_CODEGEN_LIVEINTERVAL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include <iosfwd>
+#include <cassert>
+#include <climits>
+
+namespace llvm {
+ class MachineInstr;
+ class TargetRegisterInfo;
+ struct LiveInterval;
+
+ /// VNInfo - If the value number definition is undefined (e.g. phi
+ /// merge point), it contains ~0u,x. If the value number is not in use, it
+ /// contains ~1u,x to indicate that the value # is not used.
+ /// def - Instruction # of the definition.
+ /// - or reg # of the definition if it's a stack slot liveinterval.
+ /// copy - Copy iff val# is defined by a copy; zero otherwise.
+ /// hasPHIKill - One or more of the kills are PHI nodes.
+ /// redefByEC - Re-defined by early clobber somewhere during the live range.
+ /// kills - Instruction # of the kills.
+ struct VNInfo {
+ unsigned id;
+ unsigned def;
+ MachineInstr *copy;
+ bool hasPHIKill : 1;
+ bool redefByEC : 1;
+ SmallVector<unsigned, 4> kills;
+ VNInfo()
+ : id(~1U), def(~1U), copy(0), hasPHIKill(false), redefByEC(false) {}
+ VNInfo(unsigned i, unsigned d, MachineInstr *c)
+ : id(i), def(d), copy(c), hasPHIKill(false), redefByEC(false) {}
+ };
+
+ /// LiveRange structure - This represents a simple register range in the
+ /// program, with an inclusive start point and an exclusive end point.
+ /// These ranges are rendered as [start,end).
+ struct LiveRange {
+ unsigned start; // Start point of the interval (inclusive)
+ unsigned end; // End point of the interval (exclusive)
+ VNInfo *valno; // identifier for the value contained in this interval.
+
+ LiveRange(unsigned S, unsigned E, VNInfo *V) : start(S), end(E), valno(V) {
+ assert(S < E && "Cannot create empty or backwards range");
+ }
+
+ /// contains - Return true if the index is covered by this range.
+ ///
+ bool contains(unsigned I) const {
+ return start <= I && I < end;
+ }
+
+ bool operator<(const LiveRange &LR) const {
+ return start < LR.start || (start == LR.start && end < LR.end);
+ }
+ bool operator==(const LiveRange &LR) const {
+ return start == LR.start && end == LR.end;
+ }
+
+ void dump() const;
+ void print(std::ostream &os) const;
+ void print(std::ostream *os) const { if (os) print(*os); }
+
+ private:
+ LiveRange(); // DO NOT IMPLEMENT
+ };
+
+ std::ostream& operator<<(std::ostream& os, const LiveRange &LR);
+
+
+ inline bool operator<(unsigned V, const LiveRange &LR) {
+ return V < LR.start;
+ }
+
+ inline bool operator<(const LiveRange &LR, unsigned V) {
+ return LR.start < V;
+ }
+
+ /// LiveInterval - This class represents some number of live ranges for a
+ /// register or value. This class also contains a bit of register allocator
+ /// state.
+ struct LiveInterval {
+ typedef SmallVector<LiveRange,4> Ranges;
+ typedef SmallVector<VNInfo*,4> VNInfoList;
+
+ unsigned reg; // the register or stack slot of this interval
+ // if the top bits is set, it represents a stack slot.
+ float weight; // weight of this interval
+ unsigned short preference; // preferred register for this interval
+ Ranges ranges; // the ranges in which this register is live
+ VNInfoList valnos; // value#'s
+
+ public:
+
+ struct InstrSlots {
+ enum {
+ LOAD = 0,
+ USE = 1,
+ DEF = 2,
+ STORE = 3,
+ NUM = 4
+ };
+
+ static unsigned scale(unsigned slot, unsigned factor) {
+ unsigned index = slot / NUM,
+ offset = slot % NUM;
+ assert(index <= ~0U / (factor * NUM) &&
+ "Rescaled interval would overflow");
+ return index * NUM * factor + offset;
+ }
+
+ };
+
+ LiveInterval(unsigned Reg, float Weight, bool IsSS = false)
+ : reg(Reg), weight(Weight), preference(0) {
+ if (IsSS)
+ reg = reg | (1U << (sizeof(unsigned)*CHAR_BIT-1));
+ }
+
+ typedef Ranges::iterator iterator;
+ iterator begin() { return ranges.begin(); }
+ iterator end() { return ranges.end(); }
+
+ typedef Ranges::const_iterator const_iterator;
+ const_iterator begin() const { return ranges.begin(); }
+ const_iterator end() const { return ranges.end(); }
+
+ typedef VNInfoList::iterator vni_iterator;
+ vni_iterator vni_begin() { return valnos.begin(); }
+ vni_iterator vni_end() { return valnos.end(); }
+
+ typedef VNInfoList::const_iterator const_vni_iterator;
+ const_vni_iterator vni_begin() const { return valnos.begin(); }
+ const_vni_iterator vni_end() const { return valnos.end(); }
+
+ /// advanceTo - Advance the specified iterator to point to the LiveRange
+ /// containing the specified position, or end() if the position is past the
+ /// end of the interval. If no LiveRange contains this position, but the
+ /// position is in a hole, this method returns an iterator pointing the the
+ /// LiveRange immediately after the hole.
+ iterator advanceTo(iterator I, unsigned Pos) {
+ if (Pos >= endNumber())
+ return end();
+ while (I->end <= Pos) ++I;
+ return I;
+ }
+
+ void clear() {
+ while (!valnos.empty()) {
+ VNInfo *VNI = valnos.back();
+ valnos.pop_back();
+ VNI->~VNInfo();
+ }
+
+ ranges.clear();
+ }
+
+ /// isStackSlot - Return true if this is a stack slot interval.
+ ///
+ bool isStackSlot() const {
+ return reg & (1U << (sizeof(unsigned)*CHAR_BIT-1));
+ }
+
+ /// getStackSlotIndex - Return stack slot index if this is a stack slot
+ /// interval.
+ int getStackSlotIndex() const {
+ assert(isStackSlot() && "Interval is not a stack slot interval!");
+ return reg & ~(1U << (sizeof(unsigned)*CHAR_BIT-1));
+ }
+
+ bool hasAtLeastOneValue() const { return !valnos.empty(); }
+
+ bool containsOneValue() const { return valnos.size() == 1; }
+
+ unsigned getNumValNums() const { return (unsigned)valnos.size(); }
+
+ /// getValNumInfo - Returns pointer to the specified val#.
+ ///
+ inline VNInfo *getValNumInfo(unsigned ValNo) {
+ return valnos[ValNo];
+ }
+ inline const VNInfo *getValNumInfo(unsigned ValNo) const {
+ return valnos[ValNo];
+ }
+
+ /// copyValNumInfo - Copy the value number info for one value number to
+ /// another.
+ void copyValNumInfo(VNInfo *DstValNo, const VNInfo *SrcValNo) {
+ DstValNo->def = SrcValNo->def;
+ DstValNo->copy = SrcValNo->copy;
+ DstValNo->hasPHIKill = SrcValNo->hasPHIKill;
+ DstValNo->redefByEC = SrcValNo->redefByEC;
+ DstValNo->kills = SrcValNo->kills;
+ }
+
+ /// getNextValue - Create a new value number and return it. MIIdx specifies
+ /// the instruction that defines the value number.
+ VNInfo *getNextValue(unsigned MIIdx, MachineInstr *CopyMI,
+ BumpPtrAllocator &VNInfoAllocator) {
+#ifdef __GNUC__
+ unsigned Alignment = (unsigned)__alignof__(VNInfo);
+#else
+ // FIXME: ugly.
+ unsigned Alignment = 8;
+#endif
+ VNInfo *VNI =
+ static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo),
+ Alignment));
+ new (VNI) VNInfo((unsigned)valnos.size(), MIIdx, CopyMI);
+ valnos.push_back(VNI);
+ return VNI;
+ }
+
+ /// addKill - Add a kill instruction index to the specified value
+ /// number.
+ static void addKill(VNInfo *VNI, unsigned KillIdx) {
+ SmallVector<unsigned, 4> &kills = VNI->kills;
+ if (kills.empty()) {
+ kills.push_back(KillIdx);
+ } else {
+ SmallVector<unsigned, 4>::iterator
+ I = std::lower_bound(kills.begin(), kills.end(), KillIdx);
+ kills.insert(I, KillIdx);
+ }
+ }
+
+ /// addKills - Add a number of kills into the VNInfo kill vector. If this
+ /// interval is live at a kill point, then the kill is not added.
+ void addKills(VNInfo *VNI, const SmallVector<unsigned, 4> &kills) {
+ for (unsigned i = 0, e = static_cast<unsigned>(kills.size());
+ i != e; ++i) {
+ unsigned KillIdx = kills[i];
+ if (!liveBeforeAndAt(KillIdx)) {
+ SmallVector<unsigned, 4>::iterator
+ I = std::lower_bound(VNI->kills.begin(), VNI->kills.end(), KillIdx);
+ VNI->kills.insert(I, KillIdx);
+ }
+ }
+ }
+
+ /// removeKill - Remove the specified kill from the list of kills of
+ /// the specified val#.
+ static bool removeKill(VNInfo *VNI, unsigned KillIdx) {
+ SmallVector<unsigned, 4> &kills = VNI->kills;
+ SmallVector<unsigned, 4>::iterator
+ I = std::lower_bound(kills.begin(), kills.end(), KillIdx);
+ if (I != kills.end() && *I == KillIdx) {
+ kills.erase(I);
+ return true;
+ }
+ return false;
+ }
+
+ /// removeKills - Remove all the kills in specified range
+ /// [Start, End] of the specified val#.
+ static void removeKills(VNInfo *VNI, unsigned Start, unsigned End) {
+ SmallVector<unsigned, 4> &kills = VNI->kills;
+ SmallVector<unsigned, 4>::iterator
+ I = std::lower_bound(kills.begin(), kills.end(), Start);
+ SmallVector<unsigned, 4>::iterator
+ E = std::upper_bound(kills.begin(), kills.end(), End);
+ kills.erase(I, E);
+ }
+
+ /// isKill - Return true if the specified index is a kill of the
+ /// specified val#.
+ static bool isKill(const VNInfo *VNI, unsigned KillIdx) {
+ const SmallVector<unsigned, 4> &kills = VNI->kills;
+ SmallVector<unsigned, 4>::const_iterator
+ I = std::lower_bound(kills.begin(), kills.end(), KillIdx);
+ return I != kills.end() && *I == KillIdx;
+ }
+
+ /// isOnlyLROfValNo - Return true if the specified live range is the only
+ /// one defined by the its val#.
+ bool isOnlyLROfValNo( const LiveRange *LR) {
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ const LiveRange *Tmp = I;
+ if (Tmp != LR && Tmp->valno == LR->valno)
+ return false;
+ }
+ return true;
+ }
+
+ /// MergeValueNumberInto - This method is called when two value nubmers
+ /// are found to be equivalent. This eliminates V1, replacing all
+ /// LiveRanges with the V1 value number with the V2 value number. This can
+ /// cause merging of V1/V2 values numbers and compaction of the value space.
+ VNInfo* MergeValueNumberInto(VNInfo *V1, VNInfo *V2);
+
+ /// MergeInClobberRanges - For any live ranges that are not defined in the
+ /// current interval, but are defined in the Clobbers interval, mark them
+ /// used with an unknown definition value. Caller must pass in reference to
+ /// VNInfoAllocator since it will create a new val#.
+ void MergeInClobberRanges(const LiveInterval &Clobbers,
+ BumpPtrAllocator &VNInfoAllocator);
+
+ /// MergeInClobberRange - Same as MergeInClobberRanges except it merge in a
+ /// single LiveRange only.
+ void MergeInClobberRange(unsigned Start, unsigned End,
+ BumpPtrAllocator &VNInfoAllocator);
+
+ /// MergeValueInAsValue - Merge all of the live ranges of a specific val#
+ /// in RHS into this live interval as the specified value number.
+ /// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
+ /// current interval, it will replace the value numbers of the overlaped
+ /// live ranges with the specified value number.
+ void MergeRangesInAsValue(const LiveInterval &RHS, VNInfo *LHSValNo);
+
+ /// MergeValueInAsValue - Merge all of the live ranges of a specific val#
+ /// in RHS into this live interval as the specified value number.
+ /// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
+ /// current interval, but only if the overlapping LiveRanges have the
+ /// specified value number.
+ void MergeValueInAsValue(const LiveInterval &RHS,
+ const VNInfo *RHSValNo, VNInfo *LHSValNo);
+
+ /// Copy - Copy the specified live interval. This copies all the fields
+ /// except for the register of the interval.
+ void Copy(const LiveInterval &RHS, BumpPtrAllocator &VNInfoAllocator);
+
+ bool empty() const { return ranges.empty(); }
+
+ /// beginNumber - Return the lowest numbered slot covered by interval.
+ unsigned beginNumber() const {
+ if (empty())
+ return 0;
+ return ranges.front().start;
+ }
+
+ /// endNumber - return the maximum point of the interval of the whole,
+ /// exclusive.
+ unsigned endNumber() const {
+ if (empty())
+ return 0;
+ return ranges.back().end;
+ }
+
+ bool expiredAt(unsigned index) const {
+ return index >= endNumber();
+ }
+
+ bool liveAt(unsigned index) const;
+
+ // liveBeforeAndAt - Check if the interval is live at the index and the
+ // index just before it. If index is liveAt, check if it starts a new live
+ // range.If it does, then check if the previous live range ends at index-1.
+ bool liveBeforeAndAt(unsigned index) const;
+
+ /// getLiveRangeContaining - Return the live range that contains the
+ /// specified index, or null if there is none.
+ const LiveRange *getLiveRangeContaining(unsigned Idx) const {
+ const_iterator I = FindLiveRangeContaining(Idx);
+ return I == end() ? 0 : &*I;
+ }
+
+ /// FindLiveRangeContaining - Return an iterator to the live range that
+ /// contains the specified index, or end() if there is none.
+ const_iterator FindLiveRangeContaining(unsigned Idx) const;
+
+ /// FindLiveRangeContaining - Return an iterator to the live range that
+ /// contains the specified index, or end() if there is none.
+ iterator FindLiveRangeContaining(unsigned Idx);
+
+ /// findDefinedVNInfo - Find the VNInfo that's defined at the specified
+ /// index (register interval) or defined by the specified register (stack
+ /// inteval).
+ VNInfo *findDefinedVNInfo(unsigned DefIdxOrReg) const;
+
+ /// overlaps - Return true if the intersection of the two live intervals is
+ /// not empty.
+ bool overlaps(const LiveInterval& other) const {
+ return overlapsFrom(other, other.begin());
+ }
+
+ /// overlaps - Return true if the live interval overlaps a range specified
+ /// by [Start, End).
+ bool overlaps(unsigned Start, unsigned End) const;
+
+ /// overlapsFrom - Return true if the intersection of the two live intervals
+ /// is not empty. The specified iterator is a hint that we can begin
+ /// scanning the Other interval starting at I.
+ bool overlapsFrom(const LiveInterval& other, const_iterator I) const;
+
+ /// addRange - Add the specified LiveRange to this interval, merging
+ /// intervals as appropriate. This returns an iterator to the inserted live
+ /// range (which may have grown since it was inserted.
+ void addRange(LiveRange LR) {
+ addRangeFrom(LR, ranges.begin());
+ }
+
+ /// join - Join two live intervals (this, and other) together. This applies
+ /// mappings to the value numbers in the LHS/RHS intervals as specified. If
+ /// the intervals are not joinable, this aborts.
+ void join(LiveInterval &Other, const int *ValNoAssignments,
+ const int *RHSValNoAssignments,
+ SmallVector<VNInfo*, 16> &NewVNInfo);
+
+ /// isInOneLiveRange - Return true if the range specified is entirely in the
+ /// a single LiveRange of the live interval.
+ bool isInOneLiveRange(unsigned Start, unsigned End);
+
+ /// removeRange - Remove the specified range from this interval. Note that
+ /// the range must be a single LiveRange in its entirety.
+ void removeRange(unsigned Start, unsigned End, bool RemoveDeadValNo = false);
+
+ void removeRange(LiveRange LR, bool RemoveDeadValNo = false) {
+ removeRange(LR.start, LR.end, RemoveDeadValNo);
+ }
+
+ /// removeValNo - Remove all the ranges defined by the specified value#.
+ /// Also remove the value# from value# list.
+ void removeValNo(VNInfo *ValNo);
+
+ /// scaleNumbering - Renumber VNI and ranges to provide gaps for new
+ /// instructions.
+ void scaleNumbering(unsigned factor);
+
+ /// getSize - Returns the sum of sizes of all the LiveRange's.
+ ///
+ unsigned getSize() const;
+
+ bool operator<(const LiveInterval& other) const {
+ return beginNumber() < other.beginNumber();
+ }
+
+ void print(std::ostream &OS, const TargetRegisterInfo *TRI = 0) const;
+ void print(std::ostream *OS, const TargetRegisterInfo *TRI = 0) const {
+ if (OS) print(*OS, TRI);
+ }
+ void dump() const;
+
+ private:
+ Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From);
+ void extendIntervalEndTo(Ranges::iterator I, unsigned NewEnd);
+ Ranges::iterator extendIntervalStartTo(Ranges::iterator I, unsigned NewStr);
+ LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT
+ };
+
+ inline std::ostream &operator<<(std::ostream &OS, const LiveInterval &LI) {
+ LI.print(OS);
+ return OS;
+ }
+}
+
+#endif
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
new file mode 100644
index 0000000..7c44cc7
--- /dev/null
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -0,0 +1,537 @@
+//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveInterval analysis pass. Given some numbering of
+// each the machine instructions (in this implemention depth-first order) an
+// interval [i, j) is said to be a live interval for register v if there is no
+// instruction with number j' > j such that v is live at j' and there is no
+// instruction with number i' < i such that v is live at i'. In this
+// implementation intervals can have holes, i.e. an interval might look like
+// [1,20), [50,65), [1000,1001).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
+#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include <cmath>
+
+namespace llvm {
+
+ class AliasAnalysis;
+ class LiveVariables;
+ class MachineLoopInfo;
+ class TargetRegisterInfo;
+ class MachineRegisterInfo;
+ class TargetInstrInfo;
+ class TargetRegisterClass;
+ class VirtRegMap;
+ typedef std::pair<unsigned, MachineBasicBlock*> IdxMBBPair;
+
+ inline bool operator<(unsigned V, const IdxMBBPair &IM) {
+ return V < IM.first;
+ }
+
+ inline bool operator<(const IdxMBBPair &IM, unsigned V) {
+ return IM.first < V;
+ }
+
+ struct Idx2MBBCompare {
+ bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
+ return LHS.first < RHS.first;
+ }
+ };
+
+ class LiveIntervals : public MachineFunctionPass {
+ MachineFunction* mf_;
+ MachineRegisterInfo* mri_;
+ const TargetMachine* tm_;
+ const TargetRegisterInfo* tri_;
+ const TargetInstrInfo* tii_;
+ AliasAnalysis *aa_;
+ LiveVariables* lv_;
+
+ /// Special pool allocator for VNInfo's (LiveInterval val#).
+ ///
+ BumpPtrAllocator VNInfoAllocator;
+
+ /// MBB2IdxMap - The indexes of the first and last instructions in the
+ /// specified basic block.
+ std::vector<std::pair<unsigned, unsigned> > MBB2IdxMap;
+
+ /// Idx2MBBMap - Sorted list of pairs of index of first instruction
+ /// and MBB id.
+ std::vector<IdxMBBPair> Idx2MBBMap;
+
+ /// FunctionSize - The number of instructions present in the function
+ uint64_t FunctionSize;
+
+ typedef DenseMap<MachineInstr*, unsigned> Mi2IndexMap;
+ Mi2IndexMap mi2iMap_;
+
+ typedef std::vector<MachineInstr*> Index2MiMap;
+ Index2MiMap i2miMap_;
+
+ typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
+ Reg2IntervalMap r2iMap_;
+
+ BitVector allocatableRegs_;
+
+ std::vector<MachineInstr*> ClonedMIs;
+
+ typedef LiveInterval::InstrSlots InstrSlots;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ LiveIntervals() : MachineFunctionPass(&ID) {}
+
+ static unsigned getBaseIndex(unsigned index) {
+ return index - (index % InstrSlots::NUM);
+ }
+ static unsigned getBoundaryIndex(unsigned index) {
+ return getBaseIndex(index + InstrSlots::NUM - 1);
+ }
+ static unsigned getLoadIndex(unsigned index) {
+ return getBaseIndex(index) + InstrSlots::LOAD;
+ }
+ static unsigned getUseIndex(unsigned index) {
+ return getBaseIndex(index) + InstrSlots::USE;
+ }
+ static unsigned getDefIndex(unsigned index) {
+ return getBaseIndex(index) + InstrSlots::DEF;
+ }
+ static unsigned getStoreIndex(unsigned index) {
+ return getBaseIndex(index) + InstrSlots::STORE;
+ }
+
+ static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
+ return (isDef + isUse) * powf(10.0F, (float)loopDepth);
+ }
+
+ typedef Reg2IntervalMap::iterator iterator;
+ typedef Reg2IntervalMap::const_iterator const_iterator;
+ const_iterator begin() const { return r2iMap_.begin(); }
+ const_iterator end() const { return r2iMap_.end(); }
+ iterator begin() { return r2iMap_.begin(); }
+ iterator end() { return r2iMap_.end(); }
+ unsigned getNumIntervals() const { return (unsigned)r2iMap_.size(); }
+
+ LiveInterval &getInterval(unsigned reg) {
+ Reg2IntervalMap::iterator I = r2iMap_.find(reg);
+ assert(I != r2iMap_.end() && "Interval does not exist for register");
+ return *I->second;
+ }
+
+ const LiveInterval &getInterval(unsigned reg) const {
+ Reg2IntervalMap::const_iterator I = r2iMap_.find(reg);
+ assert(I != r2iMap_.end() && "Interval does not exist for register");
+ return *I->second;
+ }
+
+ bool hasInterval(unsigned reg) const {
+ return r2iMap_.count(reg);
+ }
+
+ /// getMBBStartIdx - Return the base index of the first instruction in the
+ /// specified MachineBasicBlock.
+ unsigned getMBBStartIdx(MachineBasicBlock *MBB) const {
+ return getMBBStartIdx(MBB->getNumber());
+ }
+ unsigned getMBBStartIdx(unsigned MBBNo) const {
+ assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
+ return MBB2IdxMap[MBBNo].first;
+ }
+
+ /// getMBBEndIdx - Return the store index of the last instruction in the
+ /// specified MachineBasicBlock.
+ unsigned getMBBEndIdx(MachineBasicBlock *MBB) const {
+ return getMBBEndIdx(MBB->getNumber());
+ }
+ unsigned getMBBEndIdx(unsigned MBBNo) const {
+ assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
+ return MBB2IdxMap[MBBNo].second;
+ }
+
+ /// getScaledIntervalSize - get the size of an interval in "units,"
+ /// where every function is composed of one thousand units. This
+ /// measure scales properly with empty index slots in the function.
+ double getScaledIntervalSize(LiveInterval& I) {
+ return (1000.0 / InstrSlots::NUM * I.getSize()) / i2miMap_.size();
+ }
+
+ /// getApproximateInstructionCount - computes an estimate of the number
+ /// of instructions in a given LiveInterval.
+ unsigned getApproximateInstructionCount(LiveInterval& I) {
+ double IntervalPercentage = getScaledIntervalSize(I) / 1000.0;
+ return (unsigned)(IntervalPercentage * FunctionSize);
+ }
+
+ /// getMBBFromIndex - given an index in any instruction of an
+ /// MBB return a pointer the MBB
+ MachineBasicBlock* getMBBFromIndex(unsigned index) const {
+ std::vector<IdxMBBPair>::const_iterator I =
+ std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), index);
+ // Take the pair containing the index
+ std::vector<IdxMBBPair>::const_iterator J =
+ ((I != Idx2MBBMap.end() && I->first > index) ||
+ (I == Idx2MBBMap.end() && Idx2MBBMap.size()>0)) ? (I-1): I;
+
+ assert(J != Idx2MBBMap.end() && J->first < index+1 &&
+ index <= getMBBEndIdx(J->second) &&
+ "index does not correspond to an MBB");
+ return J->second;
+ }
+
+ /// getInstructionIndex - returns the base index of instr
+ unsigned getInstructionIndex(MachineInstr* instr) const {
+ Mi2IndexMap::const_iterator it = mi2iMap_.find(instr);
+ assert(it != mi2iMap_.end() && "Invalid instruction!");
+ return it->second;
+ }
+
+ /// getInstructionFromIndex - given an index in any slot of an
+ /// instruction return a pointer the instruction
+ MachineInstr* getInstructionFromIndex(unsigned index) const {
+ index /= InstrSlots::NUM; // convert index to vector index
+ assert(index < i2miMap_.size() &&
+ "index does not correspond to an instruction");
+ return i2miMap_[index];
+ }
+
+ /// hasGapBeforeInstr - Return true if the previous instruction slot,
+ /// i.e. Index - InstrSlots::NUM, is not occupied.
+ bool hasGapBeforeInstr(unsigned Index) {
+ Index = getBaseIndex(Index - InstrSlots::NUM);
+ return getInstructionFromIndex(Index) == 0;
+ }
+
+ /// hasGapAfterInstr - Return true if the successive instruction slot,
+ /// i.e. Index + InstrSlots::Num, is not occupied.
+ bool hasGapAfterInstr(unsigned Index) {
+ Index = getBaseIndex(Index + InstrSlots::NUM);
+ return getInstructionFromIndex(Index) == 0;
+ }
+
+ /// findGapBeforeInstr - Find an empty instruction slot before the
+ /// specified index. If "Furthest" is true, find one that's furthest
+ /// away from the index (but before any index that's occupied).
+ unsigned findGapBeforeInstr(unsigned Index, bool Furthest = false) {
+ Index = getBaseIndex(Index - InstrSlots::NUM);
+ if (getInstructionFromIndex(Index))
+ return 0; // No gap!
+ if (!Furthest)
+ return Index;
+ unsigned PrevIndex = getBaseIndex(Index - InstrSlots::NUM);
+ while (getInstructionFromIndex(Index)) {
+ Index = PrevIndex;
+ PrevIndex = getBaseIndex(Index - InstrSlots::NUM);
+ }
+ return Index;
+ }
+
+ /// InsertMachineInstrInMaps - Insert the specified machine instruction
+ /// into the instruction index map at the given index.
+ void InsertMachineInstrInMaps(MachineInstr *MI, unsigned Index) {
+ i2miMap_[Index / InstrSlots::NUM] = MI;
+ Mi2IndexMap::iterator it = mi2iMap_.find(MI);
+ assert(it == mi2iMap_.end() && "Already in map!");
+ mi2iMap_[MI] = Index;
+ }
+
+ /// conflictsWithPhysRegDef - Returns true if the specified register
+ /// is defined during the duration of the specified interval.
+ bool conflictsWithPhysRegDef(const LiveInterval &li, VirtRegMap &vrm,
+ unsigned reg);
+
+ /// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except
+ /// it can check use as well.
+ bool conflictsWithPhysRegRef(LiveInterval &li, unsigned Reg,
+ bool CheckUse,
+ SmallPtrSet<MachineInstr*,32> &JoinedCopies);
+
+ /// findLiveInMBBs - Given a live range, if the value of the range
+ /// is live in any MBB returns true as well as the list of basic blocks
+ /// in which the value is live.
+ bool findLiveInMBBs(unsigned Start, unsigned End,
+ SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
+
+ /// findReachableMBBs - Return a list MBB that can be reached via any
+ /// branch or fallthroughs. Return true if the list is not empty.
+ bool findReachableMBBs(unsigned Start, unsigned End,
+ SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
+
+ // Interval creation
+
+ LiveInterval &getOrCreateInterval(unsigned reg) {
+ Reg2IntervalMap::iterator I = r2iMap_.find(reg);
+ if (I == r2iMap_.end())
+ I = r2iMap_.insert(std::make_pair(reg, createInterval(reg))).first;
+ return *I->second;
+ }
+
+ /// dupInterval - Duplicate a live interval. The caller is responsible for
+ /// managing the allocated memory.
+ LiveInterval *dupInterval(LiveInterval *li);
+
+ /// addLiveRangeToEndOfBlock - Given a register and an instruction,
+ /// adds a live range from that instruction to the end of its MBB.
+ LiveRange addLiveRangeToEndOfBlock(unsigned reg,
+ MachineInstr* startInst);
+
+ // Interval removal
+
+ void removeInterval(unsigned Reg) {
+ DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.find(Reg);
+ delete I->second;
+ r2iMap_.erase(I);
+ }
+
+ /// isNotInMIMap - returns true if the specified machine instr has been
+ /// removed or was never entered in the map.
+ bool isNotInMIMap(MachineInstr* instr) const {
+ return !mi2iMap_.count(instr);
+ }
+
+ /// RemoveMachineInstrFromMaps - This marks the specified machine instr as
+ /// deleted.
+ void RemoveMachineInstrFromMaps(MachineInstr *MI) {
+ // remove index -> MachineInstr and
+ // MachineInstr -> index mappings
+ Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
+ if (mi2i != mi2iMap_.end()) {
+ i2miMap_[mi2i->second/InstrSlots::NUM] = 0;
+ mi2iMap_.erase(mi2i);
+ }
+ }
+
+ /// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
+ /// maps used by register allocator.
+ void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
+ Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
+ if (mi2i == mi2iMap_.end())
+ return;
+ i2miMap_[mi2i->second/InstrSlots::NUM] = NewMI;
+ Mi2IndexMap::iterator it = mi2iMap_.find(MI);
+ assert(it != mi2iMap_.end() && "Invalid instruction!");
+ unsigned Index = it->second;
+ mi2iMap_.erase(it);
+ mi2iMap_[NewMI] = Index;
+ }
+
+ BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; }
+
+ /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
+ /// copy field and returns the source register that defines it.
+ unsigned getVNInfoSourceReg(const VNInfo *VNI) const;
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual void releaseMemory();
+
+ /// runOnMachineFunction - pass entry point
+ virtual bool runOnMachineFunction(MachineFunction&);
+
+ /// print - Implement the dump method.
+ virtual void print(std::ostream &O, const Module* = 0) const;
+ void print(std::ostream *O, const Module* M = 0) const {
+ if (O) print(*O, M);
+ }
+
+ /// addIntervalsForSpills - Create new intervals for spilled defs / uses of
+ /// the given interval. FIXME: It also returns the weight of the spill slot
+ /// (if any is created) by reference. This is temporary.
+ std::vector<LiveInterval*>
+ addIntervalsForSpills(const LiveInterval& i,
+ SmallVectorImpl<LiveInterval*> &SpillIs,
+ const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
+
+ /// addIntervalsForSpillsFast - Quickly create new intervals for spilled
+ /// defs / uses without remat or splitting.
+ std::vector<LiveInterval*>
+ addIntervalsForSpillsFast(const LiveInterval &li,
+ const MachineLoopInfo *loopInfo, VirtRegMap &vrm);
+
+ /// spillPhysRegAroundRegDefsUses - Spill the specified physical register
+ /// around all defs and uses of the specified interval. Return true if it
+ /// was able to cut its interval.
+ bool spillPhysRegAroundRegDefsUses(const LiveInterval &li,
+ unsigned PhysReg, VirtRegMap &vrm);
+
+ /// isReMaterializable - Returns true if every definition of MI of every
+ /// val# of the specified interval is re-materializable. Also returns true
+ /// by reference if all of the defs are load instructions.
+ bool isReMaterializable(const LiveInterval &li,
+ SmallVectorImpl<LiveInterval*> &SpillIs,
+ bool &isLoad);
+
+ /// isReMaterializable - Returns true if the definition MI of the specified
+ /// val# of the specified interval is re-materializable.
+ bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
+ MachineInstr *MI);
+
+ /// getRepresentativeReg - Find the largest super register of the specified
+ /// physical register.
+ unsigned getRepresentativeReg(unsigned Reg) const;
+
+ /// getNumConflictsWithPhysReg - Return the number of uses and defs of the
+ /// specified interval that conflicts with the specified physical register.
+ unsigned getNumConflictsWithPhysReg(const LiveInterval &li,
+ unsigned PhysReg) const;
+
+ /// computeNumbering - Compute the index numbering.
+ void computeNumbering();
+
+ /// scaleNumbering - Rescale interval numbers to introduce gaps for new
+ /// instructions
+ void scaleNumbering(int factor);
+
+ /// intervalIsInOneMBB - Returns true if the specified interval is entirely
+ /// within a single basic block.
+ bool intervalIsInOneMBB(const LiveInterval &li) const;
+
+ private:
+ /// computeIntervals - Compute live intervals.
+ void computeIntervals();
+
+ /// handleRegisterDef - update intervals for a register def
+ /// (calls handlePhysicalRegisterDef and
+ /// handleVirtualRegisterDef)
+ void handleRegisterDef(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MI, unsigned MIIdx,
+ MachineOperand& MO, unsigned MOIdx);
+
+ /// handleVirtualRegisterDef - update intervals for a virtual
+ /// register def
+ void handleVirtualRegisterDef(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator MI,
+ unsigned MIIdx, MachineOperand& MO,
+ unsigned MOIdx, LiveInterval& interval);
+
+ /// handlePhysicalRegisterDef - update intervals for a physical register
+ /// def.
+ void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
+ MachineBasicBlock::iterator mi,
+ unsigned MIIdx, MachineOperand& MO,
+ LiveInterval &interval,
+ MachineInstr *CopyMI);
+
+ /// handleLiveInRegister - Create interval for a livein register.
+ void handleLiveInRegister(MachineBasicBlock* mbb,
+ unsigned MIIdx,
+ LiveInterval &interval, bool isAlias = false);
+
+ /// getReMatImplicitUse - If the remat definition MI has one (for now, we
+ /// only allow one) virtual register operand, then its uses are implicitly
+ /// using the register. Returns the virtual register.
+ unsigned getReMatImplicitUse(const LiveInterval &li,
+ MachineInstr *MI) const;
+
+ /// isValNoAvailableAt - Return true if the val# of the specified interval
+ /// which reaches the given instruction also reaches the specified use
+ /// index.
+ bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
+ unsigned UseIdx) const;
+
+ /// isReMaterializable - Returns true if the definition MI of the specified
+ /// val# of the specified interval is re-materializable. Also returns true
+ /// by reference if the def is a load.
+ bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
+ MachineInstr *MI,
+ SmallVectorImpl<LiveInterval*> &SpillIs,
+ bool &isLoad);
+
+ /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
+ /// slot / to reg or any rematerialized load into ith operand of specified
+ /// MI. If it is successul, MI is updated with the newly created MI and
+ /// returns true.
+ bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
+ MachineInstr *DefMI, unsigned InstrIdx,
+ SmallVector<unsigned, 2> &Ops,
+ bool isSS, int Slot, unsigned Reg);
+
+ /// canFoldMemoryOperand - Return true if the specified load / store
+ /// folding is possible.
+ bool canFoldMemoryOperand(MachineInstr *MI,
+ SmallVector<unsigned, 2> &Ops,
+ bool ReMatLoadSS) const;
+
+ /// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
+ /// VNInfo that's after the specified index but is within the basic block.
+ bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
+ MachineBasicBlock *MBB, unsigned Idx) const;
+
+ /// hasAllocatableSuperReg - Return true if the specified physical register
+ /// has any super register that's allocatable.
+ bool hasAllocatableSuperReg(unsigned Reg) const;
+
+ /// SRInfo - Spill / restore info.
+ struct SRInfo {
+ int index;
+ unsigned vreg;
+ bool canFold;
+ SRInfo(int i, unsigned vr, bool f) : index(i), vreg(vr), canFold(f) {};
+ };
+
+ bool alsoFoldARestore(int Id, int index, unsigned vr,
+ BitVector &RestoreMBBs,
+ DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
+ void eraseRestoreInfo(int Id, int index, unsigned vr,
+ BitVector &RestoreMBBs,
+ DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
+
+ /// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
+ /// spilled and create empty intervals for their uses.
+ void handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
+ const TargetRegisterClass* rc,
+ std::vector<LiveInterval*> &NewLIs);
+
+ /// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
+ /// interval on to-be re-materialized operands of MI) with new register.
+ void rewriteImplicitOps(const LiveInterval &li,
+ MachineInstr *MI, unsigned NewVReg, VirtRegMap &vrm);
+
+ /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper
+ /// functions for addIntervalsForSpills to rewrite uses / defs for the given
+ /// live range.
+ bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
+ bool TrySplit, unsigned index, unsigned end, MachineInstr *MI,
+ MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
+ bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
+ VirtRegMap &vrm, const TargetRegisterClass* rc,
+ SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
+ unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
+ DenseMap<unsigned,unsigned> &MBBVRegsMap,
+ std::vector<LiveInterval*> &NewLIs);
+ void rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
+ LiveInterval::Ranges::const_iterator &I,
+ MachineInstr *OrigDefMI, MachineInstr *DefMI, unsigned Slot, int LdSlot,
+ bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
+ VirtRegMap &vrm, const TargetRegisterClass* rc,
+ SmallVector<int, 4> &ReMatIds, const MachineLoopInfo *loopInfo,
+ BitVector &SpillMBBs,
+ DenseMap<unsigned,std::vector<SRInfo> > &SpillIdxes,
+ BitVector &RestoreMBBs,
+ DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes,
+ DenseMap<unsigned,unsigned> &MBBVRegsMap,
+ std::vector<LiveInterval*> &NewLIs);
+
+ static LiveInterval* createInterval(unsigned Reg);
+
+ void printRegName(unsigned reg) const;
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/LiveStackAnalysis.h b/include/llvm/CodeGen/LiveStackAnalysis.h
new file mode 100644
index 0000000..27ae1be
--- /dev/null
+++ b/include/llvm/CodeGen/LiveStackAnalysis.h
@@ -0,0 +1,112 @@
+//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the live stack slot analysis pass. It is analogous to
+// live interval analysis except it's analyzing liveness of stack slots rather
+// than registers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVESTACK_ANALYSIS_H
+#define LLVM_CODEGEN_LIVESTACK_ANALYSIS_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Support/Allocator.h"
+#include <map>
+
+namespace llvm {
+
+ class LiveStacks : public MachineFunctionPass {
+ /// Special pool allocator for VNInfo's (LiveInterval val#).
+ ///
+ BumpPtrAllocator VNInfoAllocator;
+
+ /// S2IMap - Stack slot indices to live interval mapping.
+ ///
+ typedef std::map<int, LiveInterval> SS2IntervalMap;
+ SS2IntervalMap S2IMap;
+
+ /// S2RCMap - Stack slot indices to register class mapping.
+ std::map<int, const TargetRegisterClass*> S2RCMap;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ LiveStacks() : MachineFunctionPass(&ID) {}
+
+ typedef SS2IntervalMap::iterator iterator;
+ typedef SS2IntervalMap::const_iterator const_iterator;
+ const_iterator begin() const { return S2IMap.begin(); }
+ const_iterator end() const { return S2IMap.end(); }
+ iterator begin() { return S2IMap.begin(); }
+ iterator end() { return S2IMap.end(); }
+
+ void scaleNumbering(int factor);
+
+ unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }
+
+ LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC) {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ SS2IntervalMap::iterator I = S2IMap.find(Slot);
+ if (I == S2IMap.end()) {
+ I = S2IMap.insert(I,std::make_pair(Slot, LiveInterval(Slot,0.0F,true)));
+ S2RCMap.insert(std::make_pair(Slot, RC));
+ } else {
+ // Use the largest common subclass register class.
+ const TargetRegisterClass *OldRC = S2RCMap[Slot];
+ S2RCMap[Slot] = getCommonSubClass(OldRC, RC);
+ }
+ return I->second;
+ }
+
+ LiveInterval &getInterval(int Slot) {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ SS2IntervalMap::iterator I = S2IMap.find(Slot);
+ assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+ return I->second;
+ }
+
+ const LiveInterval &getInterval(int Slot) const {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ SS2IntervalMap::const_iterator I = S2IMap.find(Slot);
+ assert(I != S2IMap.end() && "Interval does not exist for stack slot");
+ return I->second;
+ }
+
+ bool hasInterval(int Slot) const {
+ return S2IMap.count(Slot);
+ }
+
+ const TargetRegisterClass *getIntervalRegClass(int Slot) const {
+ assert(Slot >= 0 && "Spill slot indice must be >= 0");
+ std::map<int, const TargetRegisterClass*>::const_iterator
+ I = S2RCMap.find(Slot);
+ assert(I != S2RCMap.end() &&
+ "Register class info does not exist for stack slot");
+ return I->second;
+ }
+
+ BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+ virtual void releaseMemory();
+
+ /// runOnMachineFunction - pass entry point
+ virtual bool runOnMachineFunction(MachineFunction&);
+
+ /// print - Implement the dump method.
+ virtual void print(std::ostream &O, const Module* = 0) const;
+ void print(std::ostream *O, const Module* M = 0) const {
+ if (O) print(*O, M);
+ }
+ };
+}
+
+#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */
diff --git a/include/llvm/CodeGen/LiveVariables.h b/include/llvm/CodeGen/LiveVariables.h
new file mode 100644
index 0000000..26c0362
--- /dev/null
+++ b/include/llvm/CodeGen/LiveVariables.h
@@ -0,0 +1,269 @@
+//===-- llvm/CodeGen/LiveVariables.h - Live Variable Analysis ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the LiveVariables analysis pass. For each machine
+// instruction in the function, this pass calculates the set of registers that
+// are immediately dead after the instruction (i.e., the instruction calculates
+// the value, but it is never used) and the set of registers that are used by
+// the instruction, but are never used after the instruction (i.e., they are
+// killed).
+//
+// This class computes live variables using a sparse implementation based on
+// the machine code SSA form. This class computes live variable information for
+// each virtual and _register allocatable_ physical register in a function. It
+// uses the dominance properties of SSA form to efficiently compute live
+// variables for virtual registers, and assumes that physical registers are only
+// live within a single basic block (allowing it to do a single local analysis
+// to resolve physical register lifetimes in each basic block). If a physical
+// register is not register allocatable, it is not tracked. This is useful for
+// things like the stack pointer and condition codes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
+#define LLVM_CODEGEN_LIVEVARIABLES_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SparseBitVector.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+class TargetRegisterInfo;
+
+class LiveVariables : public MachineFunctionPass {
+public:
+ static char ID; // Pass identification, replacement for typeid
+ LiveVariables() : MachineFunctionPass(&ID) {}
+
+ /// VarInfo - This represents the regions where a virtual register is live in
+ /// the program. We represent this with three different pieces of
+ /// information: the set of blocks in which the instruction is live
+ /// throughout, the set of blocks in which the instruction is actually used,
+ /// and the set of non-phi instructions that are the last users of the value.
+ ///
+ /// In the common case where a value is defined and killed in the same block,
+ /// There is one killing instruction, and AliveBlocks is empty.
+ ///
+ /// Otherwise, the value is live out of the block. If the value is live
+ /// throughout any blocks, these blocks are listed in AliveBlocks. Blocks
+ /// where the liveness range ends are not included in AliveBlocks, instead
+ /// being captured by the Kills set. In these blocks, the value is live into
+ /// the block (unless the value is defined and killed in the same block) and
+ /// lives until the specified instruction. Note that there cannot ever be a
+ /// value whose Kills set contains two instructions from the same basic block.
+ ///
+ /// PHI nodes complicate things a bit. If a PHI node is the last user of a
+ /// value in one of its predecessor blocks, it is not listed in the kills set,
+ /// but does include the predecessor block in the AliveBlocks set (unless that
+ /// block also defines the value). This leads to the (perfectly sensical)
+ /// situation where a value is defined in a block, and the last use is a phi
+ /// node in the successor. In this case, AliveBlocks is empty (the value is
+ /// not live across any blocks) and Kills is empty (phi nodes are not
+ /// included). This is sensical because the value must be live to the end of
+ /// the block, but is not live in any successor blocks.
+ struct VarInfo {
+ /// AliveBlocks - Set of blocks in which this value is alive completely
+ /// through. This is a bit set which uses the basic block number as an
+ /// index.
+ ///
+ SparseBitVector<> AliveBlocks;
+
+ /// NumUses - Number of uses of this register across the entire function.
+ ///
+ unsigned NumUses;
+
+ /// Kills - List of MachineInstruction's which are the last use of this
+ /// virtual register (kill it) in their basic block.
+ ///
+ std::vector<MachineInstr*> Kills;
+
+ VarInfo() : NumUses(0) {}
+
+ /// removeKill - Delete a kill corresponding to the specified
+ /// machine instruction. Returns true if there was a kill
+ /// corresponding to this instruction, false otherwise.
+ bool removeKill(MachineInstr *MI) {
+ std::vector<MachineInstr*>::iterator
+ I = std::find(Kills.begin(), Kills.end(), MI);
+ if (I == Kills.end())
+ return false;
+ Kills.erase(I);
+ return true;
+ }
+
+ void dump() const;
+ };
+
+private:
+ /// VirtRegInfo - This list is a mapping from virtual register number to
+ /// variable information. FirstVirtualRegister is subtracted from the virtual
+ /// register number before indexing into this list.
+ ///
+ std::vector<VarInfo> VirtRegInfo;
+
+ /// ReservedRegisters - This vector keeps track of which registers
+ /// are reserved register which are not allocatable by the target machine.
+ /// We can not track liveness for values that are in this set.
+ ///
+ BitVector ReservedRegisters;
+
+private: // Intermediate data structures
+ MachineFunction *MF;
+
+ MachineRegisterInfo* MRI;
+
+ const TargetRegisterInfo *TRI;
+
+ // PhysRegInfo - Keep track of which instruction was the last def of a
+ // physical register. This is a purely local property, because all physical
+ // register references are presumed dead across basic blocks.
+ MachineInstr **PhysRegDef;
+
+ // PhysRegInfo - Keep track of which instruction was the last use of a
+ // physical register. This is a purely local property, because all physical
+ // register references are presumed dead across basic blocks.
+ MachineInstr **PhysRegUse;
+
+ SmallVector<unsigned, 4> *PHIVarInfo;
+
+ // DistanceMap - Keep track the distance of a MI from the start of the
+ // current basic block.
+ DenseMap<MachineInstr*, unsigned> DistanceMap;
+
+ /// HandlePhysRegKill - Add kills of Reg and its sub-registers to the
+ /// uses. Pay special attention to the sub-register uses which may come below
+ /// the last use of the whole register.
+ bool HandlePhysRegKill(unsigned Reg, MachineInstr *MI);
+
+ void HandlePhysRegUse(unsigned Reg, MachineInstr *MI);
+ void HandlePhysRegDef(unsigned Reg, MachineInstr *MI);
+
+ /// FindLastPartialDef - Return the last partial def of the specified register.
+ /// Also returns the sub-register that's defined.
+ MachineInstr *FindLastPartialDef(unsigned Reg, unsigned &PartDefReg);
+
+ /// hasRegisterUseBelow - Return true if the specified register is used after
+ /// the current instruction and before it's next definition.
+ bool hasRegisterUseBelow(unsigned Reg, MachineBasicBlock::iterator I,
+ MachineBasicBlock *MBB);
+
+ /// analyzePHINodes - Gather information about the PHI nodes in here. In
+ /// particular, we want to map the variable information of a virtual
+ /// register which is used in a PHI node. We map that to the BB the vreg
+ /// is coming from.
+ void analyzePHINodes(const MachineFunction& Fn);
+public:
+
+ virtual bool runOnMachineFunction(MachineFunction &MF);
+
+ /// RegisterDefIsDead - Return true if the specified instruction defines the
+ /// specified register, but that definition is dead.
+ bool RegisterDefIsDead(MachineInstr *MI, unsigned Reg) const;
+
+ //===--------------------------------------------------------------------===//
+ // API to update live variable information
+
+ /// replaceKillInstruction - Update register kill info by replacing a kill
+ /// instruction with a new one.
+ void replaceKillInstruction(unsigned Reg, MachineInstr *OldMI,
+ MachineInstr *NewMI);
+
+ /// addVirtualRegisterKilled - Add information about the fact that the
+ /// specified register is killed after being used by the specified
+ /// instruction. If AddIfNotFound is true, add a implicit operand if it's
+ /// not found.
+ void addVirtualRegisterKilled(unsigned IncomingReg, MachineInstr *MI,
+ bool AddIfNotFound = false) {
+ if (MI->addRegisterKilled(IncomingReg, TRI, AddIfNotFound))
+ getVarInfo(IncomingReg).Kills.push_back(MI);
+ }
+
+ /// removeVirtualRegisterKilled - Remove the specified kill of the virtual
+ /// register from the live variable information. Returns true if the
+ /// variable was marked as killed by the specified instruction,
+ /// false otherwise.
+ bool removeVirtualRegisterKilled(unsigned reg, MachineInstr *MI) {
+ if (!getVarInfo(reg).removeKill(MI))
+ return false;
+
+ bool Removed = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isKill() && MO.getReg() == reg) {
+ MO.setIsKill(false);
+ Removed = true;
+ break;
+ }
+ }
+
+ assert(Removed && "Register is not used by this instruction!");
+ return true;
+ }
+
+ /// removeVirtualRegistersKilled - Remove all killed info for the specified
+ /// instruction.
+ void removeVirtualRegistersKilled(MachineInstr *MI);
+
+ /// addVirtualRegisterDead - Add information about the fact that the specified
+ /// register is dead after being used by the specified instruction. If
+ /// AddIfNotFound is true, add a implicit operand if it's not found.
+ void addVirtualRegisterDead(unsigned IncomingReg, MachineInstr *MI,
+ bool AddIfNotFound = false) {
+ if (MI->addRegisterDead(IncomingReg, TRI, AddIfNotFound))
+ getVarInfo(IncomingReg).Kills.push_back(MI);
+ }
+
+ /// removeVirtualRegisterDead - Remove the specified kill of the virtual
+ /// register from the live variable information. Returns true if the
+ /// variable was marked dead at the specified instruction, false
+ /// otherwise.
+ bool removeVirtualRegisterDead(unsigned reg, MachineInstr *MI) {
+ if (!getVarInfo(reg).removeKill(MI))
+ return false;
+
+ bool Removed = false;
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (MO.isReg() && MO.isDef() && MO.getReg() == reg) {
+ MO.setIsDead(false);
+ Removed = true;
+ break;
+ }
+ }
+ assert(Removed && "Register is not defined by this instruction!");
+ return true;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ virtual void releaseMemory() {
+ VirtRegInfo.clear();
+ }
+
+ /// getVarInfo - Return the VarInfo structure for the specified VIRTUAL
+ /// register.
+ VarInfo &getVarInfo(unsigned RegIdx);
+
+ void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+ MachineBasicBlock *BB);
+ void MarkVirtRegAliveInBlock(VarInfo& VRInfo, MachineBasicBlock* DefBlock,
+ MachineBasicBlock *BB,
+ std::vector<MachineBasicBlock*> &WorkList);
+ void HandleVirtRegDef(unsigned reg, MachineInstr *MI);
+ void HandleVirtRegUse(unsigned reg, MachineBasicBlock *MBB,
+ MachineInstr *MI);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachORelocation.h b/include/llvm/CodeGen/MachORelocation.h
new file mode 100644
index 0000000..d4027cc
--- /dev/null
+++ b/include/llvm/CodeGen/MachORelocation.h
@@ -0,0 +1,54 @@
+//=== MachORelocation.h - Mach-O Relocation Info ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachORelocation class.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_MACHO_RELOCATION_H
+#define LLVM_CODEGEN_MACHO_RELOCATION_H
+
+namespace llvm {
+
+ /// MachORelocation - This struct contains information about each relocation
+ /// that needs to be emitted to the file.
+ /// see <mach-o/reloc.h>
+ class MachORelocation {
+ uint32_t r_address; // offset in the section to what is being relocated
+ uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
+ bool r_pcrel; // was relocated pc-relative already
+ uint8_t r_length; // length = 2 ^ r_length
+ bool r_extern; //
+ uint8_t r_type; // if not 0, machine-specific relocation type.
+ bool r_scattered; // 1 = scattered, 0 = non-scattered
+ int32_t r_value; // the value the item to be relocated is referring
+ // to.
+ public:
+ uint32_t getPackedFields() const {
+ if (r_scattered)
+ return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
+ ((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
+ else
+ return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
+ (r_extern << 4) | (r_type & 15);
+ }
+ uint32_t getAddress() const { return r_scattered ? r_value : r_address; }
+ uint32_t getRawAddress() const { return r_address; }
+
+ MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
+ bool ext, uint8_t type, bool scattered = false,
+ int32_t value = 0) :
+ r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
+ r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
+ };
+
+} // end llvm namespace
+
+#endif // LLVM_CODEGEN_MACHO_RELOCATION_H
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
new file mode 100644
index 0000000..134d226
--- /dev/null
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -0,0 +1,414 @@
+//===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect the sequence of machine instructions for a basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEBASICBLOCK_H
+#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
+
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/Support/Streams.h"
+
+namespace llvm {
+
+class BasicBlock;
+class MachineFunction;
+
+template <>
+struct ilist_traits<MachineInstr> : public ilist_default_traits<MachineInstr> {
+private:
+ mutable ilist_node<MachineInstr> Sentinel;
+
+ // this is only set by the MachineBasicBlock owning the LiveList
+ friend class MachineBasicBlock;
+ MachineBasicBlock* Parent;
+
+public:
+ MachineInstr *createSentinel() const {
+ return static_cast<MachineInstr*>(&Sentinel);
+ }
+ void destroySentinel(MachineInstr *) const {}
+
+ MachineInstr *provideInitialHead() const { return createSentinel(); }
+ MachineInstr *ensureHead(MachineInstr*) const { return createSentinel(); }
+ static void noteHead(MachineInstr*, MachineInstr*) {}
+
+ void addNodeToList(MachineInstr* N);
+ void removeNodeFromList(MachineInstr* N);
+ void transferNodesFromList(ilist_traits &SrcTraits,
+ ilist_iterator<MachineInstr> first,
+ ilist_iterator<MachineInstr> last);
+ void deleteNode(MachineInstr *N);
+private:
+ void createNode(const MachineInstr &);
+};
+
+class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
+ typedef ilist<MachineInstr> Instructions;
+ Instructions Insts;
+ const BasicBlock *BB;
+ int Number;
+ MachineFunction *xParent;
+
+ /// Predecessors/Successors - Keep track of the predecessor / successor
+ /// basicblocks.
+ std::vector<MachineBasicBlock *> Predecessors;
+ std::vector<MachineBasicBlock *> Successors;
+
+ /// LiveIns - Keep track of the physical registers that are livein of
+ /// the basicblock.
+ std::vector<unsigned> LiveIns;
+
+ /// Alignment - Alignment of the basic block. Zero if the basic block does
+ /// not need to be aligned.
+ unsigned Alignment;
+
+ /// IsLandingPad - Indicate that this basic block is entered via an
+ /// exception handler.
+ bool IsLandingPad;
+
+ // Intrusive list support
+ MachineBasicBlock() {}
+
+ explicit MachineBasicBlock(MachineFunction &mf, const BasicBlock *bb);
+
+ ~MachineBasicBlock();
+
+ // MachineBasicBlocks are allocated and owned by MachineFunction.
+ friend class MachineFunction;
+
+public:
+ /// getBasicBlock - Return the LLVM basic block that this instance
+ /// corresponded to originally.
+ ///
+ const BasicBlock *getBasicBlock() const { return BB; }
+
+ /// getParent - Return the MachineFunction containing this basic block.
+ ///
+ const MachineFunction *getParent() const { return xParent; }
+ MachineFunction *getParent() { return xParent; }
+
+ typedef Instructions::iterator iterator;
+ typedef Instructions::const_iterator const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ unsigned size() const { return (unsigned)Insts.size(); }
+ bool empty() const { return Insts.empty(); }
+
+ MachineInstr& front() { return Insts.front(); }
+ MachineInstr& back() { return Insts.back(); }
+ const MachineInstr& front() const { return Insts.front(); }
+ const MachineInstr& back() const { return Insts.back(); }
+
+ iterator begin() { return Insts.begin(); }
+ const_iterator begin() const { return Insts.begin(); }
+ iterator end() { return Insts.end(); }
+ const_iterator end() const { return Insts.end(); }
+ reverse_iterator rbegin() { return Insts.rbegin(); }
+ const_reverse_iterator rbegin() const { return Insts.rbegin(); }
+ reverse_iterator rend () { return Insts.rend(); }
+ const_reverse_iterator rend () const { return Insts.rend(); }
+
+ // Machine-CFG iterators
+ typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator;
+ typedef std::vector<MachineBasicBlock *>::iterator succ_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator;
+ typedef std::vector<MachineBasicBlock *>::reverse_iterator
+ pred_reverse_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
+ const_pred_reverse_iterator;
+ typedef std::vector<MachineBasicBlock *>::reverse_iterator
+ succ_reverse_iterator;
+ typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
+ const_succ_reverse_iterator;
+
+ pred_iterator pred_begin() { return Predecessors.begin(); }
+ const_pred_iterator pred_begin() const { return Predecessors.begin(); }
+ pred_iterator pred_end() { return Predecessors.end(); }
+ const_pred_iterator pred_end() const { return Predecessors.end(); }
+ pred_reverse_iterator pred_rbegin()
+ { return Predecessors.rbegin();}
+ const_pred_reverse_iterator pred_rbegin() const
+ { return Predecessors.rbegin();}
+ pred_reverse_iterator pred_rend()
+ { return Predecessors.rend(); }
+ const_pred_reverse_iterator pred_rend() const
+ { return Predecessors.rend(); }
+ unsigned pred_size() const {
+ return (unsigned)Predecessors.size();
+ }
+ bool pred_empty() const { return Predecessors.empty(); }
+ succ_iterator succ_begin() { return Successors.begin(); }
+ const_succ_iterator succ_begin() const { return Successors.begin(); }
+ succ_iterator succ_end() { return Successors.end(); }
+ const_succ_iterator succ_end() const { return Successors.end(); }
+ succ_reverse_iterator succ_rbegin()
+ { return Successors.rbegin(); }
+ const_succ_reverse_iterator succ_rbegin() const
+ { return Successors.rbegin(); }
+ succ_reverse_iterator succ_rend()
+ { return Successors.rend(); }
+ const_succ_reverse_iterator succ_rend() const
+ { return Successors.rend(); }
+ unsigned succ_size() const {
+ return (unsigned)Successors.size();
+ }
+ bool succ_empty() const { return Successors.empty(); }
+
+ // LiveIn management methods.
+
+ /// addLiveIn - Add the specified register as a live in. Note that it
+ /// is an error to add the same register to the same set more than once.
+ void addLiveIn(unsigned Reg) { LiveIns.push_back(Reg); }
+
+ /// removeLiveIn - Remove the specified register from the live in set.
+ ///
+ void removeLiveIn(unsigned Reg);
+
+ /// isLiveIn - Return true if the specified register is in the live in set.
+ ///
+ bool isLiveIn(unsigned Reg) const;
+
+ // Iteration support for live in sets. These sets are kept in sorted
+ // order by their register number.
+ typedef std::vector<unsigned>::iterator livein_iterator;
+ typedef std::vector<unsigned>::const_iterator const_livein_iterator;
+ livein_iterator livein_begin() { return LiveIns.begin(); }
+ const_livein_iterator livein_begin() const { return LiveIns.begin(); }
+ livein_iterator livein_end() { return LiveIns.end(); }
+ const_livein_iterator livein_end() const { return LiveIns.end(); }
+ bool livein_empty() const { return LiveIns.empty(); }
+
+ /// getAlignment - Return alignment of the basic block.
+ ///
+ unsigned getAlignment() const { return Alignment; }
+
+ /// setAlignment - Set alignment of the basic block.
+ ///
+ void setAlignment(unsigned Align) { Alignment = Align; }
+
+ /// isLandingPad - Returns true if the block is a landing pad. That is
+ /// this basic block is entered via an exception handler.
+ bool isLandingPad() const { return IsLandingPad; }
+
+ /// setIsLandingPad - Indicates the block is a landing pad. That is
+ /// this basic block is entered via an exception handler.
+ void setIsLandingPad() { IsLandingPad = true; }
+
+ // Code Layout methods.
+
+ /// moveBefore/moveAfter - move 'this' block before or after the specified
+ /// block. This only moves the block, it does not modify the CFG or adjust
+ /// potential fall-throughs at the end of the block.
+ void moveBefore(MachineBasicBlock *NewAfter);
+ void moveAfter(MachineBasicBlock *NewBefore);
+
+ // Machine-CFG mutators
+
+ /// addSuccessor - Add succ as a successor of this MachineBasicBlock.
+ /// The Predecessors list of succ is automatically updated.
+ ///
+ void addSuccessor(MachineBasicBlock *succ);
+
+ /// removeSuccessor - Remove successor from the successors list of this
+ /// MachineBasicBlock. The Predecessors list of succ is automatically updated.
+ ///
+ void removeSuccessor(MachineBasicBlock *succ);
+
+ /// removeSuccessor - Remove specified successor from the successors list of
+ /// this MachineBasicBlock. The Predecessors list of succ is automatically
+ /// updated. Return the iterator to the element after the one removed.
+ ///
+ succ_iterator removeSuccessor(succ_iterator I);
+
+ /// transferSuccessors - Transfers all the successors from MBB to this
+ /// machine basic block (i.e., copies all the successors fromMBB and
+ /// remove all the successors fromBB).
+ void transferSuccessors(MachineBasicBlock *fromMBB);
+
+ /// isSuccessor - Return true if the specified MBB is a successor of this
+ /// block.
+ bool isSuccessor(const MachineBasicBlock *MBB) const;
+
+ /// isLayoutSuccessor - Return true if the specified MBB will be emitted
+ /// immediately after this block, such that if this block exits by
+ /// falling through, control will transfer to the specified MBB. Note
+ /// that MBB need not be a successor at all, for example if this block
+ /// ends with an unconditional branch to some other block.
+ bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
+
+ /// getFirstTerminator - returns an iterator to the first terminator
+ /// instruction of this basic block. If a terminator does not exist,
+ /// it returns end()
+ iterator getFirstTerminator();
+
+ /// isOnlyReachableViaFallthough - Return true if this basic block has
+ /// exactly one predecessor and the control transfer mechanism between
+ /// the predecessor and this block is a fall-through.
+ bool isOnlyReachableByFallthrough() const;
+
+ void pop_front() { Insts.pop_front(); }
+ void pop_back() { Insts.pop_back(); }
+ void push_back(MachineInstr *MI) { Insts.push_back(MI); }
+ template<typename IT>
+ void insert(iterator I, IT S, IT E) { Insts.insert(I, S, E); }
+ iterator insert(iterator I, MachineInstr *M) { return Insts.insert(I, M); }
+
+ // erase - Remove the specified element or range from the instruction list.
+ // These functions delete any instructions removed.
+ //
+ iterator erase(iterator I) { return Insts.erase(I); }
+ iterator erase(iterator I, iterator E) { return Insts.erase(I, E); }
+ MachineInstr *remove(MachineInstr *I) { return Insts.remove(I); }
+ void clear() { Insts.clear(); }
+
+ /// splice - Take an instruction from MBB 'Other' at the position From,
+ /// and insert it into this MBB right before 'where'.
+ void splice(iterator where, MachineBasicBlock *Other, iterator From) {
+ Insts.splice(where, Other->Insts, From);
+ }
+
+ /// splice - Take a block of instructions from MBB 'Other' in the range [From,
+ /// To), and insert them into this MBB right before 'where'.
+ void splice(iterator where, MachineBasicBlock *Other, iterator From,
+ iterator To) {
+ Insts.splice(where, Other->Insts, From, To);
+ }
+
+ /// removeFromParent - This method unlinks 'this' from the containing
+ /// function, and returns it, but does not delete it.
+ MachineBasicBlock *removeFromParent();
+
+ /// eraseFromParent - This method unlinks 'this' from the containing
+ /// function and deletes it.
+ void eraseFromParent();
+
+ /// ReplaceUsesOfBlockWith - Given a machine basic block that branched to
+ /// 'Old', change the code and CFG so that it branches to 'New' instead.
+ void ReplaceUsesOfBlockWith(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+ /// CorrectExtraCFGEdges - Various pieces of code can cause excess edges in
+ /// the CFG to be inserted. If we have proven that MBB can only branch to
+ /// DestA and DestB, remove any other MBB successors from the CFG. DestA and
+ /// DestB can be null. Besides DestA and DestB, retain other edges leading
+ /// to LandingPads (currently there can be only one; we don't check or require
+ /// that here). Note it is possible that DestA and/or DestB are LandingPads.
+ bool CorrectExtraCFGEdges(MachineBasicBlock *DestA,
+ MachineBasicBlock *DestB,
+ bool isCond);
+
+ // Debugging methods.
+ void dump() const;
+ void print(std::ostream &OS) const;
+ void print(std::ostream *OS) const { if (OS) print(*OS); }
+
+ /// getNumber - MachineBasicBlocks are uniquely numbered at the function
+ /// level, unless they're not in a MachineFunction yet, in which case this
+ /// will return -1.
+ ///
+ int getNumber() const { return Number; }
+ void setNumber(int N) { Number = N; }
+
+private: // Methods used to maintain doubly linked list of blocks...
+ friend struct ilist_traits<MachineBasicBlock>;
+
+ // Machine-CFG mutators
+
+ /// addPredecessor - Remove pred as a predecessor of this MachineBasicBlock.
+ /// Don't do this unless you know what you're doing, because it doesn't
+ /// update pred's successors list. Use pred->addSuccessor instead.
+ ///
+ void addPredecessor(MachineBasicBlock *pred);
+
+ /// removePredecessor - Remove pred as a predecessor of this
+ /// MachineBasicBlock. Don't do this unless you know what you're
+ /// doing, because it doesn't update pred's successors list. Use
+ /// pred->removeSuccessor instead.
+ ///
+ void removePredecessor(MachineBasicBlock *pred);
+};
+
+std::ostream& operator<<(std::ostream &OS, const MachineBasicBlock &MBB);
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for machine basic block graphs (machine-CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks...
+//
+
+template <> struct GraphTraits<MachineBasicBlock *> {
+ typedef MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::succ_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(MachineBasicBlock *BB) { return BB; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->succ_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->succ_end();
+ }
+};
+
+template <> struct GraphTraits<const MachineBasicBlock *> {
+ typedef const MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(const MachineBasicBlock *BB) { return BB; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->succ_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->succ_end();
+ }
+};
+
+// Provide specializations of GraphTraits to be able to treat a
+// MachineFunction as a graph of MachineBasicBlocks... and to walk it
+// in inverse order. Inverse order for a function is considered
+// to be when traversing the predecessor edges of a MBB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineBasicBlock*> > {
+ typedef MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<MachineBasicBlock *> G) {
+ return G.Graph;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->pred_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->pred_end();
+ }
+};
+
+template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
+ typedef const MachineBasicBlock NodeType;
+ typedef MachineBasicBlock::const_pred_iterator ChildIteratorType;
+ static NodeType *getEntryNode(Inverse<const MachineBasicBlock*> G) {
+ return G.Graph;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->pred_begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->pred_end();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineCodeEmitter.h b/include/llvm/CodeGen/MachineCodeEmitter.h
new file mode 100644
index 0000000..aaa41a4
--- /dev/null
+++ b/include/llvm/CodeGen/MachineCodeEmitter.h
@@ -0,0 +1,330 @@
+//===-- llvm/CodeGen/MachineCodeEmitter.h - Code emission -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an abstract interface that is used by the machine code
+// emission framework to output the code. This allows machine code emission to
+// be separated from concerns such as resolution of call targets, and where the
+// machine code will be written (memory or disk, f.e.).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECODEEMITTER_H
+#define LLVM_CODEGEN_MACHINECODEEMITTER_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace llvm {
+
+class MachineBasicBlock;
+class MachineConstantPool;
+class MachineJumpTableInfo;
+class MachineFunction;
+class MachineModuleInfo;
+class MachineRelocation;
+class Value;
+class GlobalValue;
+class Function;
+
+/// MachineCodeEmitter - This class defines two sorts of methods: those for
+/// emitting the actual bytes of machine code, and those for emitting auxillary
+/// structures, such as jump tables, relocations, etc.
+///
+/// Emission of machine code is complicated by the fact that we don't (in
+/// general) know the size of the machine code that we're about to emit before
+/// we emit it. As such, we preallocate a certain amount of memory, and set the
+/// BufferBegin/BufferEnd pointers to the start and end of the buffer. As we
+/// emit machine instructions, we advance the CurBufferPtr to indicate the
+/// location of the next byte to emit. In the case of a buffer overflow (we
+/// need to emit more machine code than we have allocated space for), the
+/// CurBufferPtr will saturate to BufferEnd and ignore stores. Once the entire
+/// function has been emitted, the overflow condition is checked, and if it has
+/// occurred, more memory is allocated, and we reemit the code into it.
+///
+class MachineCodeEmitter {
+protected:
+ /// BufferBegin/BufferEnd - Pointers to the start and end of the memory
+ /// allocated for this code buffer.
+ uint8_t *BufferBegin, *BufferEnd;
+
+ /// CurBufferPtr - Pointer to the next byte of memory to fill when emitting
+ /// code. This is guranteed to be in the range [BufferBegin,BufferEnd]. If
+ /// this pointer is at BufferEnd, it will never move due to code emission, and
+ /// all code emission requests will be ignored (this is the buffer overflow
+ /// condition).
+ uint8_t *CurBufferPtr;
+
+public:
+ virtual ~MachineCodeEmitter() {}
+
+ /// startFunction - This callback is invoked when the specified function is
+ /// about to be code generated. This initializes the BufferBegin/End/Ptr
+ /// fields.
+ ///
+ virtual void startFunction(MachineFunction &F) = 0;
+
+ /// finishFunction - This callback is invoked when the specified function has
+ /// finished code generation. If a buffer overflow has occurred, this method
+ /// returns true (the callee is required to try again), otherwise it returns
+ /// false.
+ ///
+ virtual bool finishFunction(MachineFunction &F) = 0;
+
+ /// startGVStub - This callback is invoked when the JIT needs the
+ /// address of a GV (e.g. function) that has not been code generated yet.
+ /// The StubSize specifies the total size required by the stub.
+ ///
+ virtual void startGVStub(const GlobalValue* GV, unsigned StubSize,
+ unsigned Alignment = 1) = 0;
+
+ /// startGVStub - This callback is invoked when the JIT needs the address of a
+ /// GV (e.g. function) that has not been code generated yet. Buffer points to
+ /// memory already allocated for this stub.
+ ///
+ virtual void startGVStub(const GlobalValue* GV, void *Buffer,
+ unsigned StubSize) = 0;
+
+ /// finishGVStub - This callback is invoked to terminate a GV stub.
+ ///
+ virtual void *finishGVStub(const GlobalValue* F) = 0;
+
+ /// emitByte - This callback is invoked when a byte needs to be written to the
+ /// output stream.
+ ///
+ void emitByte(uint8_t B) {
+ if (CurBufferPtr != BufferEnd)
+ *CurBufferPtr++ = B;
+ }
+
+ /// emitWordLE - This callback is invoked when a 32-bit word needs to be
+ /// written to the output stream in little-endian format.
+ ///
+ void emitWordLE(unsigned W) {
+ if (4 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitWordBE - This callback is invoked when a 32-bit word needs to be
+ /// written to the output stream in big-endian format.
+ ///
+ void emitWordBE(unsigned W) {
+ if (4 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitDWordLE - This callback is invoked when a 64-bit word needs to be
+ /// written to the output stream in little-endian format.
+ ///
+ void emitDWordLE(uint64_t W) {
+ if (8 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ *CurBufferPtr++ = (uint8_t)(W >> 32);
+ *CurBufferPtr++ = (uint8_t)(W >> 40);
+ *CurBufferPtr++ = (uint8_t)(W >> 48);
+ *CurBufferPtr++ = (uint8_t)(W >> 56);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitDWordBE - This callback is invoked when a 64-bit word needs to be
+ /// written to the output stream in big-endian format.
+ ///
+ void emitDWordBE(uint64_t W) {
+ if (8 <= BufferEnd-CurBufferPtr) {
+ *CurBufferPtr++ = (uint8_t)(W >> 56);
+ *CurBufferPtr++ = (uint8_t)(W >> 48);
+ *CurBufferPtr++ = (uint8_t)(W >> 40);
+ *CurBufferPtr++ = (uint8_t)(W >> 32);
+ *CurBufferPtr++ = (uint8_t)(W >> 24);
+ *CurBufferPtr++ = (uint8_t)(W >> 16);
+ *CurBufferPtr++ = (uint8_t)(W >> 8);
+ *CurBufferPtr++ = (uint8_t)(W >> 0);
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitAlignment - Move the CurBufferPtr pointer up the the specified
+ /// alignment (saturated to BufferEnd of course).
+ void emitAlignment(unsigned Alignment) {
+ if (Alignment == 0) Alignment = 1;
+
+ if(Alignment <= (uintptr_t)(BufferEnd-CurBufferPtr)) {
+ // Move the current buffer ptr up to the specified alignment.
+ CurBufferPtr =
+ (uint8_t*)(((uintptr_t)CurBufferPtr+Alignment-1) &
+ ~(uintptr_t)(Alignment-1));
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+
+ /// emitULEB128Bytes - This callback is invoked when a ULEB128 needs to be
+ /// written to the output stream.
+ void emitULEB128Bytes(unsigned Value) {
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ if (Value) Byte |= 0x80;
+ emitByte(Byte);
+ } while (Value);
+ }
+
+ /// emitSLEB128Bytes - This callback is invoked when a SLEB128 needs to be
+ /// written to the output stream.
+ void emitSLEB128Bytes(int32_t Value) {
+ int32_t Sign = Value >> (8 * sizeof(Value) - 1);
+ bool IsMore;
+
+ do {
+ uint8_t Byte = Value & 0x7f;
+ Value >>= 7;
+ IsMore = Value != Sign || ((Byte ^ Sign) & 0x40) != 0;
+ if (IsMore) Byte |= 0x80;
+ emitByte(Byte);
+ } while (IsMore);
+ }
+
+ /// emitString - This callback is invoked when a String needs to be
+ /// written to the output stream.
+ void emitString(const std::string &String) {
+ for (unsigned i = 0, N = static_cast<unsigned>(String.size());
+ i < N; ++i) {
+ uint8_t C = String[i];
+ emitByte(C);
+ }
+ emitByte(0);
+ }
+
+ /// emitInt32 - Emit a int32 directive.
+ void emitInt32(int32_t Value) {
+ if (4 <= BufferEnd-CurBufferPtr) {
+ *((uint32_t*)CurBufferPtr) = Value;
+ CurBufferPtr += 4;
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitInt64 - Emit a int64 directive.
+ void emitInt64(uint64_t Value) {
+ if (8 <= BufferEnd-CurBufferPtr) {
+ *((uint64_t*)CurBufferPtr) = Value;
+ CurBufferPtr += 8;
+ } else {
+ CurBufferPtr = BufferEnd;
+ }
+ }
+
+ /// emitInt32At - Emit the Int32 Value in Addr.
+ void emitInt32At(uintptr_t *Addr, uintptr_t Value) {
+ if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
+ (*(uint32_t*)Addr) = (uint32_t)Value;
+ }
+
+ /// emitInt64At - Emit the Int64 Value in Addr.
+ void emitInt64At(uintptr_t *Addr, uintptr_t Value) {
+ if (Addr >= (uintptr_t*)BufferBegin && Addr < (uintptr_t*)BufferEnd)
+ (*(uint64_t*)Addr) = (uint64_t)Value;
+ }
+
+
+ /// emitLabel - Emits a label
+ virtual void emitLabel(uint64_t LabelID) = 0;
+
+ /// allocateSpace - Allocate a block of space in the current output buffer,
+ /// returning null (and setting conditions to indicate buffer overflow) on
+ /// failure. Alignment is the alignment in bytes of the buffer desired.
+ virtual void *allocateSpace(uintptr_t Size, unsigned Alignment) {
+ emitAlignment(Alignment);
+ void *Result;
+
+ // Check for buffer overflow.
+ if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
+ CurBufferPtr = BufferEnd;
+ Result = 0;
+ } else {
+ // Allocate the space.
+ Result = CurBufferPtr;
+ CurBufferPtr += Size;
+ }
+
+ return Result;
+ }
+
+ /// StartMachineBasicBlock - This should be called by the target when a new
+ /// basic block is about to be emitted. This way the MCE knows where the
+ /// start of the block is, and can implement getMachineBasicBlockAddress.
+ virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) = 0;
+
+ /// getCurrentPCValue - This returns the address that the next emitted byte
+ /// will be output to.
+ ///
+ virtual uintptr_t getCurrentPCValue() const {
+ return (uintptr_t)CurBufferPtr;
+ }
+
+ /// getCurrentPCOffset - Return the offset from the start of the emitted
+ /// buffer that we are currently writing to.
+ uintptr_t getCurrentPCOffset() const {
+ return CurBufferPtr-BufferBegin;
+ }
+
+ /// addRelocation - Whenever a relocatable address is needed, it should be
+ /// noted with this interface.
+ virtual void addRelocation(const MachineRelocation &MR) = 0;
+
+
+ /// FIXME: These should all be handled with relocations!
+
+ /// getConstantPoolEntryAddress - Return the address of the 'Index' entry in
+ /// the constant pool that was last emitted with the emitConstantPool method.
+ ///
+ virtual uintptr_t getConstantPoolEntryAddress(unsigned Index) const = 0;
+
+ /// getJumpTableEntryAddress - Return the address of the jump table with index
+ /// 'Index' in the function that last called initJumpTableInfo.
+ ///
+ virtual uintptr_t getJumpTableEntryAddress(unsigned Index) const = 0;
+
+ /// getMachineBasicBlockAddress - Return the address of the specified
+ /// MachineBasicBlock, only usable after the label for the MBB has been
+ /// emitted.
+ ///
+ virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const= 0;
+
+ /// getLabelAddress - Return the address of the specified LabelID, only usable
+ /// after the LabelID has been emitted.
+ ///
+ virtual uintptr_t getLabelAddress(uint64_t LabelID) const = 0;
+
+ /// Specifies the MachineModuleInfo object. This is used for exception handling
+ /// purposes.
+ virtual void setModuleInfo(MachineModuleInfo* Info) = 0;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineCodeInfo.h b/include/llvm/CodeGen/MachineCodeInfo.h
new file mode 100644
index 0000000..024e602
--- /dev/null
+++ b/include/llvm/CodeGen/MachineCodeInfo.h
@@ -0,0 +1,51 @@
+//===-- MachineCodeInfo.h - Class used to report JIT info -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MachineCodeInfo, a class used by the JIT ExecutionEngine
+// to report information about the generated machine code.
+//
+// See JIT::runJITOnFunction for usage.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef EE_MACHINE_CODE_INFO_H
+#define EE_MACHINE_CODE_INFO_H
+
+namespace llvm {
+
+class MachineCodeInfo {
+private:
+ size_t Size; // Number of bytes in memory used
+ void *Address; // The address of the function in memory
+
+public:
+ MachineCodeInfo() : Size(0), Address(0) {}
+
+ void setSize(size_t s) {
+ Size = s;
+ }
+
+ void setAddress(void *a) {
+ Address = a;
+ }
+
+ size_t size() const {
+ return Size;
+ }
+
+ void *address() const {
+ return Address;
+ }
+
+};
+
+}
+
+#endif
+
diff --git a/include/llvm/CodeGen/MachineConstantPool.h b/include/llvm/CodeGen/MachineConstantPool.h
new file mode 100644
index 0000000..99996cf
--- /dev/null
+++ b/include/llvm/CodeGen/MachineConstantPool.h
@@ -0,0 +1,147 @@
+//===-- CodeGen/MachineConstantPool.h - Abstract Constant Pool --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file
+/// This file declares the MachineConstantPool class which is an abstract
+/// constant pool to keep track of constants referenced by a function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+#define LLVM_CODEGEN_MACHINECONSTANTPOOL_H
+
+#include <cassert>
+#include <climits>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class FoldingSetNodeID;
+class TargetData;
+class TargetMachine;
+class Type;
+class MachineConstantPool;
+class raw_ostream;
+
+/// Abstract base class for all machine specific constantpool value subclasses.
+///
+class MachineConstantPoolValue {
+ const Type *Ty;
+
+public:
+ explicit MachineConstantPoolValue(const Type *ty) : Ty(ty) {}
+ virtual ~MachineConstantPoolValue() {}
+
+ /// getType - get type of this MachineConstantPoolValue.
+ ///
+ inline const Type *getType() const { return Ty; }
+
+ virtual int getExistingMachineCPValue(MachineConstantPool *CP,
+ unsigned Alignment) = 0;
+
+ virtual void AddSelectionDAGCSEId(FoldingSetNodeID &ID) = 0;
+
+ /// print - Implement operator<<
+ virtual void print(raw_ostream &O) const = 0;
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+ const MachineConstantPoolValue &V) {
+ V.print(OS);
+ return OS;
+}
+
+
+/// This class is a data container for one entry in a MachineConstantPool.
+/// It contains a pointer to the value and an offset from the start of
+/// the constant pool.
+/// @brief An entry in a MachineConstantPool
+class MachineConstantPoolEntry {
+public:
+ /// The constant itself.
+ union {
+ Constant *ConstVal;
+ MachineConstantPoolValue *MachineCPVal;
+ } Val;
+
+ /// The required alignment for this entry. The top bit is set when Val is
+ /// a MachineConstantPoolValue.
+ unsigned Alignment;
+
+ MachineConstantPoolEntry(Constant *V, unsigned A)
+ : Alignment(A) {
+ Val.ConstVal = V;
+ }
+ MachineConstantPoolEntry(MachineConstantPoolValue *V, unsigned A)
+ : Alignment(A) {
+ Val.MachineCPVal = V;
+ Alignment |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
+ }
+
+ bool isMachineConstantPoolEntry() const {
+ return (int)Alignment < 0;
+ }
+
+ int getAlignment() const {
+ return Alignment & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
+ }
+
+ const Type *getType() const;
+};
+
+/// The MachineConstantPool class keeps track of constants referenced by a
+/// function which must be spilled to memory. This is used for constants which
+/// are unable to be used directly as operands to instructions, which typically
+/// include floating point and large integer constants.
+///
+/// Instructions reference the address of these constant pool constants through
+/// the use of MO_ConstantPoolIndex values. When emitting assembly or machine
+/// code, these virtual address references are converted to refer to the
+/// address of the function constant pool values.
+/// @brief The machine constant pool.
+class MachineConstantPool {
+ const TargetData *TD; ///< The machine's TargetData.
+ unsigned PoolAlignment; ///< The alignment for the pool.
+ std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
+public:
+ /// @brief The only constructor.
+ explicit MachineConstantPool(const TargetData *td)
+ : TD(td), PoolAlignment(1) {}
+ ~MachineConstantPool();
+
+ /// getConstantPoolAlignment - Return the the alignment required by
+ /// the whole constant pool, of which the first element must be aligned.
+ unsigned getConstantPoolAlignment() const { return PoolAlignment; }
+
+ /// getConstantPoolIndex - Create a new entry in the constant pool or return
+ /// an existing one. User must specify the minimum required alignment for
+ /// the object.
+ unsigned getConstantPoolIndex(Constant *C, unsigned Alignment);
+ unsigned getConstantPoolIndex(MachineConstantPoolValue *V,unsigned Alignment);
+
+ /// isEmpty - Return true if this constant pool contains no constants.
+ bool isEmpty() const { return Constants.empty(); }
+
+ const std::vector<MachineConstantPoolEntry> &getConstants() const {
+ return Constants;
+ }
+
+ /// print - Used by the MachineFunction printer to print information about
+ /// constant pool objects. Implemented in MachineFunction.cpp
+ ///
+ void print(raw_ostream &OS) const;
+
+ /// dump - Call print(cerr) to be called from the debugger.
+ void dump() const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineDominators.h b/include/llvm/CodeGen/MachineDominators.h
new file mode 100644
index 0000000..5981e5a
--- /dev/null
+++ b/include/llvm/CodeGen/MachineDominators.h
@@ -0,0 +1,199 @@
+//=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes mirroring those in llvm/Analysis/Dominators.h,
+// but for target-specific code rather than target-independent IR.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEDOMINATORS_H
+#define LLVM_CODEGEN_MACHINEDOMINATORS_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominatorInternals.h"
+
+namespace llvm {
+
+inline void WriteAsOperand(std::ostream &, const MachineBasicBlock*, bool t) { }
+
+template<>
+inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB) {
+ this->Roots.push_back(MBB);
+}
+
+EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>);
+EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<MachineBasicBlock>);
+
+typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
+
+//===-------------------------------------
+/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
+/// compute a normal dominator tree.
+///
+class MachineDominatorTree : public MachineFunctionPass {
+public:
+ static char ID; // Pass ID, replacement for typeid
+ DominatorTreeBase<MachineBasicBlock>* DT;
+
+ MachineDominatorTree();
+
+ ~MachineDominatorTree();
+
+ DominatorTreeBase<MachineBasicBlock>& getBase() { return *DT; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ /// getRoots - Return the root blocks of the current CFG. This may include
+ /// multiple blocks if we are computing post dominators. For forward
+ /// dominators, this will always be a single block (the entry node).
+ ///
+ inline const std::vector<MachineBasicBlock*> &getRoots() const {
+ return DT->getRoots();
+ }
+
+ inline MachineBasicBlock *getRoot() const {
+ return DT->getRoot();
+ }
+
+ inline MachineDomTreeNode *getRootNode() const {
+ return DT->getRootNode();
+ }
+
+ virtual bool runOnMachineFunction(MachineFunction &F);
+
+ inline bool dominates(MachineDomTreeNode* A, MachineDomTreeNode* B) const {
+ return DT->dominates(A, B);
+ }
+
+ inline bool dominates(MachineBasicBlock* A, MachineBasicBlock* B) const {
+ return DT->dominates(A, B);
+ }
+
+ // dominates - Return true if A dominates B. This performs the
+ // special checks necessary if A and B are in the same basic block.
+ bool dominates(MachineInstr *A, MachineInstr *B) const {
+ MachineBasicBlock *BBA = A->getParent(), *BBB = B->getParent();
+ if (BBA != BBB) return DT->dominates(BBA, BBB);
+
+ // Loop through the basic block until we find A or B.
+ MachineBasicBlock::iterator I = BBA->begin();
+ for (; &*I != A && &*I != B; ++I) /*empty*/;
+
+ //if(!DT.IsPostDominators) {
+ // A dominates B if it is found first in the basic block.
+ return &*I == A;
+ //} else {
+ // // A post-dominates B if B is found first in the basic block.
+ // return &*I == B;
+ //}
+ }
+
+ inline bool properlyDominates(const MachineDomTreeNode* A,
+ MachineDomTreeNode* B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ inline bool properlyDominates(MachineBasicBlock* A,
+ MachineBasicBlock* B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ /// findNearestCommonDominator - Find nearest common dominator basic block
+ /// for basic block A and B. If there is no such block then return NULL.
+ inline MachineBasicBlock *findNearestCommonDominator(MachineBasicBlock *A,
+ MachineBasicBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ inline MachineDomTreeNode *operator[](MachineBasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ /// getNode - return the (Post)DominatorTree node for the specified basic
+ /// block. This is the same as using operator[] on this class.
+ ///
+ inline MachineDomTreeNode *getNode(MachineBasicBlock *BB) const {
+ return DT->getNode(BB);
+ }
+
+ /// addNewBlock - Add a new node to the dominator tree information. This
+ /// creates a new node as a child of DomBB dominator node,linking it into
+ /// the children list of the immediate dominator.
+ inline MachineDomTreeNode *addNewBlock(MachineBasicBlock *BB,
+ MachineBasicBlock *DomBB) {
+ return DT->addNewBlock(BB, DomBB);
+ }
+
+ /// changeImmediateDominator - This method is used to update the dominator
+ /// tree information when a node's immediate dominator changes.
+ ///
+ inline void changeImmediateDominator(MachineBasicBlock *N,
+ MachineBasicBlock* NewIDom) {
+ DT->changeImmediateDominator(N, NewIDom);
+ }
+
+ inline void changeImmediateDominator(MachineDomTreeNode *N,
+ MachineDomTreeNode* NewIDom) {
+ DT->changeImmediateDominator(N, NewIDom);
+ }
+
+ /// eraseNode - Removes a node from the dominator tree. Block must not
+ /// domiante any other blocks. Removes node from its immediate dominator's
+ /// children list. Deletes dominator node associated with basic block BB.
+ inline void eraseNode(MachineBasicBlock *BB) {
+ DT->eraseNode(BB);
+ }
+
+ /// splitBlock - BB is split and now it has one successor. Update dominator
+ /// tree to reflect this change.
+ inline void splitBlock(MachineBasicBlock* NewBB) {
+ DT->splitBlock(NewBB);
+ }
+
+
+ virtual void releaseMemory();
+
+ virtual void print(std::ostream &OS, const Module* M= 0) const {
+ DT->print(OS, M);
+ }
+};
+
+//===-------------------------------------
+/// DominatorTree GraphTraits specialization so the DominatorTree can be
+/// iterable by generic graph iterators.
+///
+
+template<class T> struct GraphTraits;
+
+template <> struct GraphTraits<MachineDomTreeNode *> {
+ typedef MachineDomTreeNode NodeType;
+ typedef NodeType::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) {
+ return N;
+ }
+ static inline ChildIteratorType child_begin(NodeType* N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType* N) {
+ return N->end();
+ }
+};
+
+template <> struct GraphTraits<MachineDominatorTree*>
+ : public GraphTraits<MachineDomTreeNode *> {
+ static NodeType *getEntryNode(MachineDominatorTree *DT) {
+ return DT->getRootNode();
+ }
+};
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h
new file mode 100644
index 0000000..4c981f7
--- /dev/null
+++ b/include/llvm/CodeGen/MachineFrameInfo.h
@@ -0,0 +1,411 @@
+//===-- CodeGen/MachineFrameInfo.h - Abstract Stack Frame Rep. --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The file defines the MachineFrameInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFRAMEINFO_H
+#define LLVM_CODEGEN_MACHINEFRAMEINFO_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <iosfwd>
+#include <vector>
+
+namespace llvm {
+class TargetData;
+class TargetRegisterClass;
+class Type;
+class MachineModuleInfo;
+class MachineFunction;
+class TargetFrameInfo;
+
+/// The CalleeSavedInfo class tracks the information need to locate where a
+/// callee saved register in the current frame.
+class CalleeSavedInfo {
+
+private:
+ unsigned Reg;
+ const TargetRegisterClass *RegClass;
+ int FrameIdx;
+
+public:
+ CalleeSavedInfo(unsigned R, const TargetRegisterClass *RC, int FI = 0)
+ : Reg(R)
+ , RegClass(RC)
+ , FrameIdx(FI)
+ {}
+
+ // Accessors.
+ unsigned getReg() const { return Reg; }
+ const TargetRegisterClass *getRegClass() const { return RegClass; }
+ int getFrameIdx() const { return FrameIdx; }
+ void setFrameIdx(int FI) { FrameIdx = FI; }
+};
+
+/// The MachineFrameInfo class represents an abstract stack frame until
+/// prolog/epilog code is inserted. This class is key to allowing stack frame
+/// representation optimizations, such as frame pointer elimination. It also
+/// allows more mundane (but still important) optimizations, such as reordering
+/// of abstract objects on the stack frame.
+///
+/// To support this, the class assigns unique integer identifiers to stack
+/// objects requested clients. These identifiers are negative integers for
+/// fixed stack objects (such as arguments passed on the stack) or nonnegative
+/// for objects that may be reordered. Instructions which refer to stack
+/// objects use a special MO_FrameIndex operand to represent these frame
+/// indexes.
+///
+/// Because this class keeps track of all references to the stack frame, it
+/// knows when a variable sized object is allocated on the stack. This is the
+/// sole condition which prevents frame pointer elimination, which is an
+/// important optimization on register-poor architectures. Because original
+/// variable sized alloca's in the source program are the only source of
+/// variable sized stack objects, it is safe to decide whether there will be
+/// any variable sized objects before all stack objects are known (for
+/// example, register allocator spill code never needs variable sized
+/// objects).
+///
+/// When prolog/epilog code emission is performed, the final stack frame is
+/// built and the machine instructions are modified to refer to the actual
+/// stack offsets of the object, eliminating all MO_FrameIndex operands from
+/// the program.
+///
+/// @brief Abstract Stack Frame Information
+class MachineFrameInfo {
+
+ // StackObject - Represent a single object allocated on the stack.
+ struct StackObject {
+ // The size of this object on the stack. 0 means a variable sized object,
+ // ~0ULL means a dead object.
+ uint64_t Size;
+
+ // Alignment - The required alignment of this stack slot.
+ unsigned Alignment;
+
+ // isImmutable - If true, the value of the stack object is set before
+ // entering the function and is not modified inside the function. By
+ // default, fixed objects are immutable unless marked otherwise.
+ bool isImmutable;
+
+ // SPOffset - The offset of this object from the stack pointer on entry to
+ // the function. This field has no meaning for a variable sized element.
+ int64_t SPOffset;
+
+ StackObject(uint64_t Sz, unsigned Al, int64_t SP = 0, bool IM = false)
+ : Size(Sz), Alignment(Al), isImmutable(IM), SPOffset(SP) {}
+ };
+
+ /// Objects - The list of stack objects allocated...
+ ///
+ std::vector<StackObject> Objects;
+
+ /// NumFixedObjects - This contains the number of fixed objects contained on
+ /// the stack. Because fixed objects are stored at a negative index in the
+ /// Objects list, this is also the index to the 0th object in the list.
+ ///
+ unsigned NumFixedObjects;
+
+ /// HasVarSizedObjects - This boolean keeps track of whether any variable
+ /// sized objects have been allocated yet.
+ ///
+ bool HasVarSizedObjects;
+
+ /// FrameAddressTaken - This boolean keeps track of whether there is a call
+ /// to builtin \@llvm.frameaddress.
+ bool FrameAddressTaken;
+
+ /// StackSize - The prolog/epilog code inserter calculates the final stack
+ /// offsets for all of the fixed size objects, updating the Objects list
+ /// above. It then updates StackSize to contain the number of bytes that need
+ /// to be allocated on entry to the function.
+ ///
+ uint64_t StackSize;
+
+ /// OffsetAdjustment - The amount that a frame offset needs to be adjusted to
+ /// have the actual offset from the stack/frame pointer. The calculation is
+ /// MFI->getObjectOffset(Index) + StackSize - TFI.getOffsetOfLocalArea() +
+ /// OffsetAdjustment. If OffsetAdjustment is zero (default) then offsets are
+ /// away from TOS. If OffsetAdjustment == StackSize then offsets are toward
+ /// TOS.
+ int OffsetAdjustment;
+
+ /// MaxAlignment - The prolog/epilog code inserter may process objects
+ /// that require greater alignment than the default alignment the target
+ /// provides. To handle this, MaxAlignment is set to the maximum alignment
+ /// needed by the objects on the current frame. If this is greater than the
+ /// native alignment maintained by the compiler, dynamic alignment code will
+ /// be needed.
+ ///
+ unsigned MaxAlignment;
+
+ /// HasCalls - Set to true if this function has any function calls. This is
+ /// only valid during and after prolog/epilog code insertion.
+ bool HasCalls;
+
+ /// StackProtectorIdx - The frame index for the stack protector.
+ int StackProtectorIdx;
+
+ /// MaxCallFrameSize - This contains the size of the largest call frame if the
+ /// target uses frame setup/destroy pseudo instructions (as defined in the
+ /// TargetFrameInfo class). This information is important for frame pointer
+ /// elimination. If is only valid during and after prolog/epilog code
+ /// insertion.
+ ///
+ unsigned MaxCallFrameSize;
+
+ /// CSInfo - The prolog/epilog code inserter fills in this vector with each
+ /// callee saved register saved in the frame. Beyond its use by the prolog/
+ /// epilog code inserter, this data used for debug info and exception
+ /// handling.
+ std::vector<CalleeSavedInfo> CSInfo;
+
+ /// MMI - This field is set (via setMachineModuleInfo) by a module info
+ /// consumer (ex. DwarfWriter) to indicate that frame layout information
+ /// should be acquired. Typically, it's the responsibility of the target's
+ /// TargetRegisterInfo prologue/epilogue emitting code to inform
+ /// MachineModuleInfo of frame layouts.
+ MachineModuleInfo *MMI;
+
+ /// TargetFrameInfo - Target information about frame layout.
+ ///
+ const TargetFrameInfo &TFI;
+public:
+ explicit MachineFrameInfo(const TargetFrameInfo &tfi) : TFI(tfi) {
+ StackSize = NumFixedObjects = OffsetAdjustment = MaxAlignment = 0;
+ HasVarSizedObjects = false;
+ FrameAddressTaken = false;
+ HasCalls = false;
+ StackProtectorIdx = -1;
+ MaxCallFrameSize = 0;
+ MMI = 0;
+ }
+
+ /// hasStackObjects - Return true if there are any stack objects in this
+ /// function.
+ ///
+ bool hasStackObjects() const { return !Objects.empty(); }
+
+ /// hasVarSizedObjects - This method may be called any time after instruction
+ /// selection is complete to determine if the stack frame for this function
+ /// contains any variable sized objects.
+ ///
+ bool hasVarSizedObjects() const { return HasVarSizedObjects; }
+
+ /// getStackProtectorIndex/setStackProtectorIndex - Return the index for the
+ /// stack protector object.
+ ///
+ int getStackProtectorIndex() const { return StackProtectorIdx; }
+ void setStackProtectorIndex(int I) { StackProtectorIdx = I; }
+
+ /// isFrameAddressTaken - This method may be called any time after instruction
+ /// selection is complete to determine if there is a call to
+ /// \@llvm.frameaddress in this function.
+ bool isFrameAddressTaken() const { return FrameAddressTaken; }
+ void setFrameAddressIsTaken(bool T) { FrameAddressTaken = T; }
+
+ /// getObjectIndexBegin - Return the minimum frame object index.
+ ///
+ int getObjectIndexBegin() const { return -NumFixedObjects; }
+
+ /// getObjectIndexEnd - Return one past the maximum frame object index.
+ ///
+ int getObjectIndexEnd() const { return (int)Objects.size()-NumFixedObjects; }
+
+ /// getNumFixedObjects() - Return the number of fixed objects.
+ unsigned getNumFixedObjects() const { return NumFixedObjects; }
+
+ /// getNumObjects() - Return the number of objects.
+ ///
+ unsigned getNumObjects() const { return Objects.size(); }
+
+ /// getObjectSize - Return the size of the specified object.
+ ///
+ int64_t getObjectSize(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Size;
+ }
+
+ /// setObjectSize - Change the size of the specified stack object.
+ void setObjectSize(int ObjectIdx, int64_t Size) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ Objects[ObjectIdx+NumFixedObjects].Size = Size;
+ }
+
+ /// getObjectAlignment - Return the alignment of the specified stack object.
+ unsigned getObjectAlignment(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Alignment;
+ }
+
+ /// setObjectAlignment - Change the alignment of the specified stack object.
+ void setObjectAlignment(int ObjectIdx, unsigned Align) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
+ }
+
+ /// getObjectOffset - Return the assigned stack offset of the specified object
+ /// from the incoming stack pointer.
+ ///
+ int64_t getObjectOffset(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ assert(!isDeadObjectIndex(ObjectIdx) &&
+ "Getting frame offset for a dead object?");
+ return Objects[ObjectIdx+NumFixedObjects].SPOffset;
+ }
+
+ /// setObjectOffset - Set the stack frame offset of the specified object. The
+ /// offset is relative to the stack pointer on entry to the function.
+ ///
+ void setObjectOffset(int ObjectIdx, int64_t SPOffset) {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ assert(!isDeadObjectIndex(ObjectIdx) &&
+ "Setting frame offset for a dead object?");
+ Objects[ObjectIdx+NumFixedObjects].SPOffset = SPOffset;
+ }
+
+ /// getStackSize - Return the number of bytes that must be allocated to hold
+ /// all of the fixed size frame objects. This is only valid after
+ /// Prolog/Epilog code insertion has finalized the stack frame layout.
+ ///
+ uint64_t getStackSize() const { return StackSize; }
+
+ /// setStackSize - Set the size of the stack...
+ ///
+ void setStackSize(uint64_t Size) { StackSize = Size; }
+
+ /// getOffsetAdjustment - Return the correction for frame offsets.
+ ///
+ int getOffsetAdjustment() const { return OffsetAdjustment; }
+
+ /// setOffsetAdjustment - Set the correction for frame offsets.
+ ///
+ void setOffsetAdjustment(int Adj) { OffsetAdjustment = Adj; }
+
+ /// getMaxAlignment - Return the alignment in bytes that this function must be
+ /// aligned to, which is greater than the default stack alignment provided by
+ /// the target.
+ ///
+ unsigned getMaxAlignment() const { return MaxAlignment; }
+
+ /// setMaxAlignment - Set the preferred alignment.
+ ///
+ void setMaxAlignment(unsigned Align) { MaxAlignment = Align; }
+
+ /// hasCalls - Return true if the current function has no function calls.
+ /// This is only valid during or after prolog/epilog code emission.
+ ///
+ bool hasCalls() const { return HasCalls; }
+ void setHasCalls(bool V) { HasCalls = V; }
+
+ /// getMaxCallFrameSize - Return the maximum size of a call frame that must be
+ /// allocated for an outgoing function call. This is only available if
+ /// CallFrameSetup/Destroy pseudo instructions are used by the target, and
+ /// then only during or after prolog/epilog code insertion.
+ ///
+ unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
+ void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
+
+ /// CreateFixedObject - Create a new object at a fixed location on the stack.
+ /// All fixed objects should be created before other objects are created for
+ /// efficiency. By default, fixed objects are immutable. This returns an
+ /// index with a negative value.
+ ///
+ int CreateFixedObject(uint64_t Size, int64_t SPOffset,
+ bool Immutable = true);
+
+
+ /// isFixedObjectIndex - Returns true if the specified index corresponds to a
+ /// fixed stack object.
+ bool isFixedObjectIndex(int ObjectIdx) const {
+ return ObjectIdx < 0 && (ObjectIdx >= -(int)NumFixedObjects);
+ }
+
+ /// isImmutableObjectIndex - Returns true if the specified index corresponds
+ /// to an immutable object.
+ bool isImmutableObjectIndex(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].isImmutable;
+ }
+
+ /// isDeadObjectIndex - Returns true if the specified index corresponds to
+ /// a dead object.
+ bool isDeadObjectIndex(int ObjectIdx) const {
+ assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
+ "Invalid Object Idx!");
+ return Objects[ObjectIdx+NumFixedObjects].Size == ~0ULL;
+ }
+
+ /// CreateStackObject - Create a new statically sized stack object, returning
+ /// a nonnegative identifier to represent it.
+ ///
+ int CreateStackObject(uint64_t Size, unsigned Alignment) {
+ assert(Size != 0 && "Cannot allocate zero size stack objects!");
+ Objects.push_back(StackObject(Size, Alignment));
+ return (int)Objects.size()-NumFixedObjects-1;
+ }
+
+ /// RemoveStackObject - Remove or mark dead a statically sized stack object.
+ ///
+ void RemoveStackObject(int ObjectIdx) {
+ // Mark it dead.
+ Objects[ObjectIdx+NumFixedObjects].Size = ~0ULL;
+ }
+
+ /// CreateVariableSizedObject - Notify the MachineFrameInfo object that a
+ /// variable sized object has been created. This must be created whenever a
+ /// variable sized object is created, whether or not the index returned is
+ /// actually used.
+ ///
+ int CreateVariableSizedObject() {
+ HasVarSizedObjects = true;
+ Objects.push_back(StackObject(0, 1));
+ return (int)Objects.size()-NumFixedObjects-1;
+ }
+
+ /// getCalleeSavedInfo - Returns a reference to call saved info vector for the
+ /// current function.
+ const std::vector<CalleeSavedInfo> &getCalleeSavedInfo() const {
+ return CSInfo;
+ }
+
+ /// setCalleeSavedInfo - Used by prolog/epilog inserter to set the function's
+ /// callee saved information.
+ void setCalleeSavedInfo(const std::vector<CalleeSavedInfo> &CSI) {
+ CSInfo = CSI;
+ }
+
+ /// getMachineModuleInfo - Used by a prologue/epilogue
+ /// emitter (TargetRegisterInfo) to provide frame layout information.
+ MachineModuleInfo *getMachineModuleInfo() const { return MMI; }
+
+ /// setMachineModuleInfo - Used by a meta info consumer (DwarfWriter) to
+ /// indicate that frame layout information should be gathered.
+ void setMachineModuleInfo(MachineModuleInfo *mmi) { MMI = mmi; }
+
+ /// print - Used by the MachineFunction printer to print information about
+ /// stack objects. Implemented in MachineFunction.cpp
+ ///
+ void print(const MachineFunction &MF, std::ostream &OS) const;
+
+ /// dump - Call print(MF, std::cerr) to be called from the debugger.
+ void dump(const MachineFunction &MF) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
new file mode 100644
index 0000000..a110e58
--- /dev/null
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -0,0 +1,407 @@
+//===-- llvm/CodeGen/MachineFunction.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect native machine code for a function. This class contains a list of
+// MachineBasicBlock instances that make up the current compiled function.
+//
+// This class also contains pointers to various classes which hold
+// target-specific information about the generated code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEFUNCTION_H
+#define LLVM_CODEGEN_MACHINEFUNCTION_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/CodeGen/DebugLoc.h"
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/Support/Annotation.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Recycler.h"
+
+namespace llvm {
+
+class Function;
+class MachineRegisterInfo;
+class MachineFrameInfo;
+class MachineConstantPool;
+class MachineJumpTableInfo;
+class TargetMachine;
+class TargetRegisterClass;
+
+template <>
+struct ilist_traits<MachineBasicBlock>
+ : public ilist_default_traits<MachineBasicBlock> {
+ mutable ilist_node<MachineBasicBlock> Sentinel;
+public:
+ MachineBasicBlock *createSentinel() const {
+ return static_cast<MachineBasicBlock*>(&Sentinel);
+ }
+ void destroySentinel(MachineBasicBlock *) const {}
+
+ MachineBasicBlock *provideInitialHead() const { return createSentinel(); }
+ MachineBasicBlock *ensureHead(MachineBasicBlock*) const {
+ return createSentinel();
+ }
+ static void noteHead(MachineBasicBlock*, MachineBasicBlock*) {}
+
+ void addNodeToList(MachineBasicBlock* MBB);
+ void removeNodeFromList(MachineBasicBlock* MBB);
+ void deleteNode(MachineBasicBlock *MBB);
+private:
+ void createNode(const MachineBasicBlock &);
+};
+
+/// MachineFunctionInfo - This class can be derived from and used by targets to
+/// hold private target-specific information for each MachineFunction. Objects
+/// of type are accessed/created with MF::getInfo and destroyed when the
+/// MachineFunction is destroyed.
+struct MachineFunctionInfo {
+ virtual ~MachineFunctionInfo() {}
+};
+
+class MachineFunction : private Annotation {
+ const Function *Fn;
+ const TargetMachine &Target;
+
+ // RegInfo - Information about each register in use in the function.
+ MachineRegisterInfo *RegInfo;
+
+ // Used to keep track of target-specific per-machine function information for
+ // the target implementation.
+ MachineFunctionInfo *MFInfo;
+
+ // Keep track of objects allocated on the stack.
+ MachineFrameInfo *FrameInfo;
+
+ // Keep track of constants which are spilled to memory
+ MachineConstantPool *ConstantPool;
+
+ // Keep track of jump tables for switch instructions
+ MachineJumpTableInfo *JumpTableInfo;
+
+ // Function-level unique numbering for MachineBasicBlocks. When a
+ // MachineBasicBlock is inserted into a MachineFunction is it automatically
+ // numbered and this vector keeps track of the mapping from ID's to MBB's.
+ std::vector<MachineBasicBlock*> MBBNumbering;
+
+ // Pool-allocate MachineFunction-lifetime and IR objects.
+ BumpPtrAllocator Allocator;
+
+ // Allocation management for instructions in function.
+ Recycler<MachineInstr> InstructionRecycler;
+
+ // Allocation management for basic blocks in function.
+ Recycler<MachineBasicBlock> BasicBlockRecycler;
+
+ // List of machine basic blocks in function
+ typedef ilist<MachineBasicBlock> BasicBlockListType;
+ BasicBlockListType BasicBlocks;
+
+ // Default debug location. Used to print out the debug label at the beginning
+ // of a function.
+ DebugLoc DefaultDebugLoc;
+
+ // Tracks debug locations.
+ DebugLocTracker DebugLocInfo;
+
+public:
+ MachineFunction(const Function *Fn, const TargetMachine &TM);
+ ~MachineFunction();
+
+ /// getFunction - Return the LLVM function that this machine code represents
+ ///
+ const Function *getFunction() const { return Fn; }
+
+ /// getTarget - Return the target machine this machine code is compiled with
+ ///
+ const TargetMachine &getTarget() const { return Target; }
+
+ /// getRegInfo - Return information about the registers currently in use.
+ ///
+ MachineRegisterInfo &getRegInfo() { return *RegInfo; }
+ const MachineRegisterInfo &getRegInfo() const { return *RegInfo; }
+
+ /// getFrameInfo - Return the frame info object for the current function.
+ /// This object contains information about objects allocated on the stack
+ /// frame of the current function in an abstract way.
+ ///
+ MachineFrameInfo *getFrameInfo() { return FrameInfo; }
+ const MachineFrameInfo *getFrameInfo() const { return FrameInfo; }
+
+ /// getJumpTableInfo - Return the jump table info object for the current
+ /// function. This object contains information about jump tables for switch
+ /// instructions in the current function.
+ ///
+ MachineJumpTableInfo *getJumpTableInfo() { return JumpTableInfo; }
+ const MachineJumpTableInfo *getJumpTableInfo() const { return JumpTableInfo; }
+
+ /// getConstantPool - Return the constant pool object for the current
+ /// function.
+ ///
+ MachineConstantPool *getConstantPool() { return ConstantPool; }
+ const MachineConstantPool *getConstantPool() const { return ConstantPool; }
+
+ /// MachineFunctionInfo - Keep track of various per-function pieces of
+ /// information for backends that would like to do so.
+ ///
+ template<typename Ty>
+ Ty *getInfo() {
+ if (!MFInfo) {
+ // This should be just `new (Allocator.Allocate<Ty>()) Ty(*this)', but
+ // that apparently breaks GCC 3.3.
+ Ty *Loc = static_cast<Ty*>(Allocator.Allocate(sizeof(Ty),
+ AlignOf<Ty>::Alignment));
+ MFInfo = new (Loc) Ty(*this);
+ }
+
+ assert((void*)dynamic_cast<Ty*>(MFInfo) == (void*)MFInfo &&
+ "Invalid concrete type or multiple inheritence for getInfo");
+ return static_cast<Ty*>(MFInfo);
+ }
+
+ template<typename Ty>
+ const Ty *getInfo() const {
+ return const_cast<MachineFunction*>(this)->getInfo<Ty>();
+ }
+
+ /// getBlockNumbered - MachineBasicBlocks are automatically numbered when they
+ /// are inserted into the machine function. The block number for a machine
+ /// basic block can be found by using the MBB::getBlockNumber method, this
+ /// method provides the inverse mapping.
+ ///
+ MachineBasicBlock *getBlockNumbered(unsigned N) const {
+ assert(N < MBBNumbering.size() && "Illegal block number");
+ assert(MBBNumbering[N] && "Block was removed from the machine function!");
+ return MBBNumbering[N];
+ }
+
+ /// getNumBlockIDs - Return the number of MBB ID's allocated.
+ ///
+ unsigned getNumBlockIDs() const { return (unsigned)MBBNumbering.size(); }
+
+ /// RenumberBlocks - This discards all of the MachineBasicBlock numbers and
+ /// recomputes them. This guarantees that the MBB numbers are sequential,
+ /// dense, and match the ordering of the blocks within the function. If a
+ /// specific MachineBasicBlock is specified, only that block and those after
+ /// it are renumbered.
+ void RenumberBlocks(MachineBasicBlock *MBBFrom = 0);
+
+ /// print - Print out the MachineFunction in a format suitable for debugging
+ /// to the specified stream.
+ ///
+ void print(std::ostream &OS) const;
+ void print(std::ostream *OS) const { if (OS) print(*OS); }
+
+ /// viewCFG - This function is meant for use from the debugger. You can just
+ /// say 'call F->viewCFG()' and a ghostview window should pop up from the
+ /// program, displaying the CFG of the current function with the code for each
+ /// basic block inside. This depends on there being a 'dot' and 'gv' program
+ /// in your path.
+ ///
+ void viewCFG() const;
+
+ /// viewCFGOnly - This function is meant for use from the debugger. It works
+ /// just like viewCFG, but it does not include the contents of basic blocks
+ /// into the nodes, just the label. If you are only interested in the CFG
+ /// this can make the graph smaller.
+ ///
+ void viewCFGOnly() const;
+
+ /// dump - Print the current MachineFunction to cerr, useful for debugger use.
+ ///
+ void dump() const;
+
+ /// construct - Allocate and initialize a MachineFunction for a given Function
+ /// and Target
+ ///
+ static MachineFunction& construct(const Function *F, const TargetMachine &TM);
+
+ /// destruct - Destroy the MachineFunction corresponding to a given Function
+ ///
+ static void destruct(const Function *F);
+
+ /// get - Return a handle to a MachineFunction corresponding to the given
+ /// Function. This should not be called before "construct()" for a given
+ /// Function.
+ ///
+ static MachineFunction& get(const Function *F);
+
+ // Provide accessors for the MachineBasicBlock list...
+ typedef BasicBlockListType::iterator iterator;
+ typedef BasicBlockListType::const_iterator const_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ /// addLiveIn - Add the specified physical register as a live-in value and
+ /// create a corresponding virtual register for it.
+ unsigned addLiveIn(unsigned PReg, const TargetRegisterClass *RC);
+
+ //===--------------------------------------------------------------------===//
+ // BasicBlock accessor functions.
+ //
+ iterator begin() { return BasicBlocks.begin(); }
+ const_iterator begin() const { return BasicBlocks.begin(); }
+ iterator end () { return BasicBlocks.end(); }
+ const_iterator end () const { return BasicBlocks.end(); }
+
+ reverse_iterator rbegin() { return BasicBlocks.rbegin(); }
+ const_reverse_iterator rbegin() const { return BasicBlocks.rbegin(); }
+ reverse_iterator rend () { return BasicBlocks.rend(); }
+ const_reverse_iterator rend () const { return BasicBlocks.rend(); }
+
+ unsigned size() const { return (unsigned)BasicBlocks.size();}
+ bool empty() const { return BasicBlocks.empty(); }
+ const MachineBasicBlock &front() const { return BasicBlocks.front(); }
+ MachineBasicBlock &front() { return BasicBlocks.front(); }
+ const MachineBasicBlock & back() const { return BasicBlocks.back(); }
+ MachineBasicBlock & back() { return BasicBlocks.back(); }
+
+ void push_back (MachineBasicBlock *MBB) { BasicBlocks.push_back (MBB); }
+ void push_front(MachineBasicBlock *MBB) { BasicBlocks.push_front(MBB); }
+ void insert(iterator MBBI, MachineBasicBlock *MBB) {
+ BasicBlocks.insert(MBBI, MBB);
+ }
+ void splice(iterator InsertPt, iterator MBBI) {
+ BasicBlocks.splice(InsertPt, BasicBlocks, MBBI);
+ }
+
+ void remove(iterator MBBI) {
+ BasicBlocks.remove(MBBI);
+ }
+ void erase(iterator MBBI) {
+ BasicBlocks.erase(MBBI);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Internal functions used to automatically number MachineBasicBlocks
+ //
+
+ /// getNextMBBNumber - Returns the next unique number to be assigned
+ /// to a MachineBasicBlock in this MachineFunction.
+ ///
+ unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
+ MBBNumbering.push_back(MBB);
+ return (unsigned)MBBNumbering.size()-1;
+ }
+
+ /// removeFromMBBNumbering - Remove the specific machine basic block from our
+ /// tracker, this is only really to be used by the MachineBasicBlock
+ /// implementation.
+ void removeFromMBBNumbering(unsigned N) {
+ assert(N < MBBNumbering.size() && "Illegal basic block #");
+ MBBNumbering[N] = 0;
+ }
+
+ /// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
+ /// of `new MachineInstr'.
+ ///
+ MachineInstr *CreateMachineInstr(const TargetInstrDesc &TID,
+ DebugLoc DL,
+ bool NoImp = false);
+
+ /// CloneMachineInstr - Create a new MachineInstr which is a copy of the
+ /// 'Orig' instruction, identical in all ways except the the instruction
+ /// has no parent, prev, or next.
+ ///
+ MachineInstr *CloneMachineInstr(const MachineInstr *Orig);
+
+ /// DeleteMachineInstr - Delete the given MachineInstr.
+ ///
+ void DeleteMachineInstr(MachineInstr *MI);
+
+ /// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
+ /// instead of `new MachineBasicBlock'.
+ ///
+ MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = 0);
+
+ /// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
+ ///
+ void DeleteMachineBasicBlock(MachineBasicBlock *MBB);
+
+ //===--------------------------------------------------------------------===//
+ // Debug location.
+ //
+
+ /// getOrCreateDebugLocID - Look up the DebugLocTuple index with the given
+ /// source file, line, and column. If none currently exists, create a new
+ /// DebugLocTuple, and insert it into the DebugIdMap.
+ unsigned getOrCreateDebugLocID(GlobalVariable *CompileUnit,
+ unsigned Line, unsigned Col);
+
+ /// getDebugLocTuple - Get the DebugLocTuple for a given DebugLoc object.
+ DebugLocTuple getDebugLocTuple(DebugLoc DL) const;
+
+ /// getDefaultDebugLoc - Get the default debug location for the machine
+ /// function.
+ DebugLoc getDefaultDebugLoc() const { return DefaultDebugLoc; }
+
+ /// setDefaultDebugLoc - Get the default debug location for the machine
+ /// function.
+ void setDefaultDebugLoc(DebugLoc DL) { DefaultDebugLoc = DL; }
+};
+
+//===--------------------------------------------------------------------===//
+// GraphTraits specializations for function basic block graphs (CFGs)
+//===--------------------------------------------------------------------===//
+
+// Provide specializations of GraphTraits to be able to treat a
+// machine function as a graph of machine basic blocks... these are
+// the same as the machine basic block iterators, except that the root
+// node is implicitly the first node of the function.
+//
+template <> struct GraphTraits<MachineFunction*> :
+ public GraphTraits<MachineBasicBlock*> {
+ static NodeType *getEntryNode(MachineFunction *F) {
+ return &F->front();
+ }
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef MachineFunction::iterator nodes_iterator;
+ static nodes_iterator nodes_begin(MachineFunction *F) { return F->begin(); }
+ static nodes_iterator nodes_end (MachineFunction *F) { return F->end(); }
+};
+template <> struct GraphTraits<const MachineFunction*> :
+ public GraphTraits<const MachineBasicBlock*> {
+ static NodeType *getEntryNode(const MachineFunction *F) {
+ return &F->front();
+ }
+
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef MachineFunction::const_iterator nodes_iterator;
+ static nodes_iterator nodes_begin(const MachineFunction *F) {
+ return F->begin();
+ }
+ static nodes_iterator nodes_end (const MachineFunction *F) {
+ return F->end();
+ }
+};
+
+
+// Provide specializations of GraphTraits to be able to treat a function as a
+// graph of basic blocks... and to walk it in inverse order. Inverse order for
+// a function is considered to be when traversing the predecessor edges of a BB
+// instead of the successor edges.
+//
+template <> struct GraphTraits<Inverse<MachineFunction*> > :
+ public GraphTraits<Inverse<MachineBasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<MachineFunction*> G) {
+ return &G.Graph->front();
+ }
+};
+template <> struct GraphTraits<Inverse<const MachineFunction*> > :
+ public GraphTraits<Inverse<const MachineBasicBlock*> > {
+ static NodeType *getEntryNode(Inverse<const MachineFunction *> G) {
+ return &G.Graph->front();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineFunctionPass.h b/include/llvm/CodeGen/MachineFunctionPass.h
new file mode 100644
index 0000000..6b5e64a
--- /dev/null
+++ b/include/llvm/CodeGen/MachineFunctionPass.h
@@ -0,0 +1,45 @@
+//===-- MachineFunctionPass.h - Pass for MachineFunctions --------*-C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineFunctionPass class. MachineFunctionPass's are
+// just FunctionPass's, except they operate on machine code as part of a code
+// generator. Because they operate on machine code, not the LLVM
+// representation, MachineFunctionPass's are not allowed to modify the LLVM
+// representation. Due to this limitation, the MachineFunctionPass class takes
+// care of declaring that no LLVM passes are invalidated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINE_FUNCTION_PASS_H
+#define LLVM_CODEGEN_MACHINE_FUNCTION_PASS_H
+
+#include "llvm/Pass.h"
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+ // FIXME: This pass should declare that the pass does not invalidate any LLVM
+ // passes.
+struct MachineFunctionPass : public FunctionPass {
+ explicit MachineFunctionPass(intptr_t ID) : FunctionPass(ID) {}
+ explicit MachineFunctionPass(void *ID) : FunctionPass(ID) {}
+
+protected:
+ /// runOnMachineFunction - This method must be overloaded to perform the
+ /// desired machine code transformation or analysis.
+ ///
+ virtual bool runOnMachineFunction(MachineFunction &MF) = 0;
+
+public:
+ bool runOnFunction(Function &F);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineInstr.h b/include/llvm/CodeGen/MachineInstr.h
new file mode 100644
index 0000000..d61e5d8
--- /dev/null
+++ b/include/llvm/CodeGen/MachineInstr.h
@@ -0,0 +1,375 @@
+//===-- llvm/CodeGen/MachineInstr.h - MachineInstr class --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineInstr class, which is the
+// basic representation for all target dependent machine instructions used by
+// the back end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTR_H
+#define LLVM_CODEGEN_MACHINEINSTR_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/Target/TargetInstrDesc.h"
+#include "llvm/CodeGen/DebugLoc.h"
+#include <list>
+#include <vector>
+
+namespace llvm {
+
+class TargetInstrDesc;
+class TargetInstrInfo;
+class TargetRegisterInfo;
+class MachineFunction;
+
+//===----------------------------------------------------------------------===//
+/// MachineInstr - Representation of each machine instruction.
+///
+class MachineInstr : public ilist_node<MachineInstr> {
+ const TargetInstrDesc *TID; // Instruction descriptor.
+ unsigned short NumImplicitOps; // Number of implicit operands (which
+ // are determined at construction time).
+
+ std::vector<MachineOperand> Operands; // the operands
+ std::list<MachineMemOperand> MemOperands; // information on memory references
+ MachineBasicBlock *Parent; // Pointer to the owning basic block.
+ DebugLoc debugLoc; // Source line information.
+
+ // OperandComplete - Return true if it's illegal to add a new operand
+ bool OperandsComplete() const;
+
+ MachineInstr(const MachineInstr&); // DO NOT IMPLEMENT
+ void operator=(const MachineInstr&); // DO NOT IMPLEMENT
+
+ // Intrusive list support
+ friend struct ilist_traits<MachineInstr>;
+ friend struct ilist_traits<MachineBasicBlock>;
+ void setParent(MachineBasicBlock *P) { Parent = P; }
+
+ /// MachineInstr ctor - This constructor creates a copy of the given
+ /// MachineInstr in the given MachineFunction.
+ MachineInstr(MachineFunction &, const MachineInstr &);
+
+ /// MachineInstr ctor - This constructor creates a dummy MachineInstr with
+ /// TID NULL and no operands.
+ MachineInstr();
+
+ // The next two constructors have DebugLoc and non-DebugLoc versions;
+ // over time, the non-DebugLoc versions should be phased out and eventually
+ // removed.
+
+ /// MachineInstr ctor - This constructor create a MachineInstr and add the
+ /// implicit operands. It reserves space for number of operands specified by
+ /// TargetInstrDesc. The version with a DebugLoc should be preferred.
+ explicit MachineInstr(const TargetInstrDesc &TID, bool NoImp = false);
+
+ /// MachineInstr ctor - Work exactly the same as the ctor above, except that
+ /// the MachineInstr is created and added to the end of the specified basic
+ /// block. The version with a DebugLoc should be preferred.
+ ///
+ MachineInstr(MachineBasicBlock *MBB, const TargetInstrDesc &TID);
+
+ /// MachineInstr ctor - This constructor create a MachineInstr and add the
+ /// implicit operands. It reserves space for number of operands specified by
+ /// TargetInstrDesc. An explicit DebugLoc is supplied.
+ explicit MachineInstr(const TargetInstrDesc &TID, const DebugLoc dl,
+ bool NoImp = false);
+
+ /// MachineInstr ctor - Work exactly the same as the ctor above, except that
+ /// the MachineInstr is created and added to the end of the specified basic
+ /// block.
+ ///
+ MachineInstr(MachineBasicBlock *MBB, const DebugLoc dl,
+ const TargetInstrDesc &TID);
+
+ ~MachineInstr();
+
+ // MachineInstrs are pool-allocated and owned by MachineFunction.
+ friend class MachineFunction;
+
+public:
+ const MachineBasicBlock* getParent() const { return Parent; }
+ MachineBasicBlock* getParent() { return Parent; }
+
+ /// getDebugLoc - Returns the debug location id of this MachineInstr.
+ ///
+ const DebugLoc getDebugLoc() const { return debugLoc; }
+
+ /// getDesc - Returns the target instruction descriptor of this
+ /// MachineInstr.
+ const TargetInstrDesc &getDesc() const { return *TID; }
+
+ /// getOpcode - Returns the opcode of this MachineInstr.
+ ///
+ int getOpcode() const { return TID->Opcode; }
+
+ /// Access to explicit operands of the instruction.
+ ///
+ unsigned getNumOperands() const { return (unsigned)Operands.size(); }
+
+ const MachineOperand& getOperand(unsigned i) const {
+ assert(i < getNumOperands() && "getOperand() out of range!");
+ return Operands[i];
+ }
+ MachineOperand& getOperand(unsigned i) {
+ assert(i < getNumOperands() && "getOperand() out of range!");
+ return Operands[i];
+ }
+
+ /// getNumExplicitOperands - Returns the number of non-implicit operands.
+ ///
+ unsigned getNumExplicitOperands() const;
+
+ /// Access to memory operands of the instruction
+ std::list<MachineMemOperand>::iterator memoperands_begin()
+ { return MemOperands.begin(); }
+ std::list<MachineMemOperand>::iterator memoperands_end()
+ { return MemOperands.end(); }
+ std::list<MachineMemOperand>::const_iterator memoperands_begin() const
+ { return MemOperands.begin(); }
+ std::list<MachineMemOperand>::const_iterator memoperands_end() const
+ { return MemOperands.end(); }
+ bool memoperands_empty() const { return MemOperands.empty(); }
+
+ /// hasOneMemOperand - Return true if this instruction has exactly one
+ /// MachineMemOperand.
+ bool hasOneMemOperand() const {
+ return !memoperands_empty() &&
+ next(memoperands_begin()) == memoperands_end();
+ }
+
+ /// isIdenticalTo - Return true if this instruction is identical to (same
+ /// opcode and same operands as) the specified instruction.
+ bool isIdenticalTo(const MachineInstr *Other) const {
+ if (Other->getOpcode() != getOpcode() ||
+ Other->getNumOperands() != getNumOperands())
+ return false;
+ for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
+ if (!getOperand(i).isIdenticalTo(Other->getOperand(i)))
+ return false;
+ return true;
+ }
+
+ /// removeFromParent - This method unlinks 'this' from the containing basic
+ /// block, and returns it, but does not delete it.
+ MachineInstr *removeFromParent();
+
+ /// eraseFromParent - This method unlinks 'this' from the containing basic
+ /// block and deletes it.
+ void eraseFromParent();
+
+ /// isLabel - Returns true if the MachineInstr represents a label.
+ ///
+ bool isLabel() const;
+
+ /// isDebugLabel - Returns true if the MachineInstr represents a debug label.
+ ///
+ bool isDebugLabel() const;
+
+ /// readsRegister - Return true if the MachineInstr reads the specified
+ /// register. If TargetRegisterInfo is passed, then it also checks if there
+ /// is a read of a super-register.
+ bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const {
+ return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
+ }
+
+ /// killsRegister - Return true if the MachineInstr kills the specified
+ /// register. If TargetRegisterInfo is passed, then it also checks if there is
+ /// a kill of a super-register.
+ bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const {
+ return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
+ }
+
+ /// modifiesRegister - Return true if the MachineInstr modifies the
+ /// specified register. If TargetRegisterInfo is passed, then it also checks
+ /// if there is a def of a super-register.
+ bool modifiesRegister(unsigned Reg,
+ const TargetRegisterInfo *TRI = NULL) const {
+ return findRegisterDefOperandIdx(Reg, false, TRI) != -1;
+ }
+
+ /// registerDefIsDead - Returns true if the register is dead in this machine
+ /// instruction. If TargetRegisterInfo is passed, then it also checks
+ /// if there is a dead def of a super-register.
+ bool registerDefIsDead(unsigned Reg,
+ const TargetRegisterInfo *TRI = NULL) const {
+ return findRegisterDefOperandIdx(Reg, true, TRI) != -1;
+ }
+
+ /// findRegisterUseOperandIdx() - Returns the operand index that is a use of
+ /// the specific register or -1 if it is not found. It further tightening
+ /// the search criteria to a use that kills the register if isKill is true.
+ int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
+ const TargetRegisterInfo *TRI = NULL) const;
+
+ /// findRegisterUseOperand - Wrapper for findRegisterUseOperandIdx, it returns
+ /// a pointer to the MachineOperand rather than an index.
+ MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
+ const TargetRegisterInfo *TRI = NULL) {
+ int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
+ return (Idx == -1) ? NULL : &getOperand(Idx);
+ }
+
+ /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
+ /// the specified register or -1 if it is not found. If isDead is true, defs
+ /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
+ /// also checks if there is a def of a super-register.
+ int findRegisterDefOperandIdx(unsigned Reg, bool isDead = false,
+ const TargetRegisterInfo *TRI = NULL) const;
+
+ /// findRegisterDefOperand - Wrapper for findRegisterDefOperandIdx, it returns
+ /// a pointer to the MachineOperand rather than an index.
+ MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
+ const TargetRegisterInfo *TRI = NULL) {
+ int Idx = findRegisterDefOperandIdx(Reg, isDead, TRI);
+ return (Idx == -1) ? NULL : &getOperand(Idx);
+ }
+
+ /// findFirstPredOperandIdx() - Find the index of the first operand in the
+ /// operand list that is used to represent the predicate. It returns -1 if
+ /// none is found.
+ int findFirstPredOperandIdx() const;
+
+ /// isRegTiedToUseOperand - Given the index of a register def operand,
+ /// check if the register def is tied to a source operand, due to either
+ /// two-address elimination or inline assembly constraints. Returns the
+ /// first tied use operand index by reference is UseOpIdx is not null.
+ bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const;
+
+ /// isRegTiedToDefOperand - Return true if the use operand of the specified
+ /// index is tied to an def operand. It also returns the def operand index by
+ /// reference if DefOpIdx is not null.
+ bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const;
+
+ /// copyKillDeadInfo - Copies kill / dead operand properties from MI.
+ ///
+ void copyKillDeadInfo(const MachineInstr *MI);
+
+ /// copyPredicates - Copies predicate operand(s) from MI.
+ void copyPredicates(const MachineInstr *MI);
+
+ /// addRegisterKilled - We have determined MI kills a register. Look for the
+ /// operand that uses it and mark it as IsKill. If AddIfNotFound is true,
+ /// add a implicit operand if it's not found. Returns true if the operand
+ /// exists / is added.
+ bool addRegisterKilled(unsigned IncomingReg,
+ const TargetRegisterInfo *RegInfo,
+ bool AddIfNotFound = false);
+
+ /// addRegisterDead - We have determined MI defined a register without a use.
+ /// Look for the operand that defines it and mark it as IsDead. If
+ /// AddIfNotFound is true, add a implicit operand if it's not found. Returns
+ /// true if the operand exists / is added.
+ bool addRegisterDead(unsigned IncomingReg, const TargetRegisterInfo *RegInfo,
+ bool AddIfNotFound = false);
+
+ /// isSafeToMove - Return true if it is safe to move this instruction. If
+ /// SawStore is set to true, it means that there is a store (or call) between
+ /// the instruction's location and its intended destination.
+ bool isSafeToMove(const TargetInstrInfo *TII, bool &SawStore) const;
+
+ /// isSafeToReMat - Return true if it's safe to rematerialize the specified
+ /// instruction which defined the specified register instead of copying it.
+ bool isSafeToReMat(const TargetInstrInfo *TII, unsigned DstReg) const;
+
+ /// hasVolatileMemoryRef - Return true if this instruction may have a
+ /// volatile memory reference, or if the information describing the
+ /// memory reference is not available. Return false if it is known to
+ /// have no volatile memory references.
+ bool hasVolatileMemoryRef() const;
+
+ //
+ // Debugging support
+ //
+ void print(std::ostream *OS, const TargetMachine *TM) const {
+ if (OS) print(*OS, TM);
+ }
+ void print(std::ostream &OS, const TargetMachine *TM = 0) const;
+ void print(std::ostream *OS) const { if (OS) print(*OS); }
+ void print(raw_ostream *OS, const TargetMachine *TM) const {
+ if (OS) print(*OS, TM);
+ }
+ void print(raw_ostream &OS, const TargetMachine *TM = 0) const;
+ void print(raw_ostream *OS) const { if (OS) print(*OS); }
+ void dump() const;
+
+ //===--------------------------------------------------------------------===//
+ // Accessors used to build up machine instructions.
+
+ /// addOperand - Add the specified operand to the instruction. If it is an
+ /// implicit operand, it is added to the end of the operand list. If it is
+ /// an explicit operand it is added at the end of the explicit operand list
+ /// (before the first implicit operand).
+ void addOperand(const MachineOperand &Op);
+
+ /// setDesc - Replace the instruction descriptor (thus opcode) of
+ /// the current instruction with a new one.
+ ///
+ void setDesc(const TargetInstrDesc &tid) { TID = &tid; }
+
+ /// setDebugLoc - Replace current source information with new such.
+ /// Avoid using this, the constructor argument is preferable.
+ ///
+ void setDebugLoc(const DebugLoc dl) { debugLoc = dl; }
+
+ /// RemoveOperand - Erase an operand from an instruction, leaving it with one
+ /// fewer operand than it started with.
+ ///
+ void RemoveOperand(unsigned i);
+
+ /// addMemOperand - Add a MachineMemOperand to the machine instruction,
+ /// referencing arbitrary storage.
+ void addMemOperand(MachineFunction &MF,
+ const MachineMemOperand &MO);
+
+ /// clearMemOperands - Erase all of this MachineInstr's MachineMemOperands.
+ void clearMemOperands(MachineFunction &MF);
+
+private:
+ /// getRegInfo - If this instruction is embedded into a MachineFunction,
+ /// return the MachineRegisterInfo object for the current function, otherwise
+ /// return null.
+ MachineRegisterInfo *getRegInfo();
+
+ /// addImplicitDefUseOperands - Add all implicit def and use operands to
+ /// this instruction.
+ void addImplicitDefUseOperands();
+
+ /// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
+ /// this instruction from their respective use lists. This requires that the
+ /// operands already be on their use lists.
+ void RemoveRegOperandsFromUseLists();
+
+ /// AddRegOperandsToUseLists - Add all of the register operands in
+ /// this instruction from their respective use lists. This requires that the
+ /// operands not be on their use lists yet.
+ void AddRegOperandsToUseLists(MachineRegisterInfo &RegInfo);
+};
+
+//===----------------------------------------------------------------------===//
+// Debugging Support
+
+inline std::ostream& operator<<(std::ostream &OS, const MachineInstr &MI) {
+ MI.print(OS);
+ return OS;
+}
+
+inline raw_ostream& operator<<(raw_ostream &OS, const MachineInstr &MI) {
+ MI.print(OS);
+ return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineInstrBuilder.h b/include/llvm/CodeGen/MachineInstrBuilder.h
new file mode 100644
index 0000000..d3a0995
--- /dev/null
+++ b/include/llvm/CodeGen/MachineInstrBuilder.h
@@ -0,0 +1,225 @@
+//===-- CodeGen/MachineInstBuilder.h - Simplify creation of MIs -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes a function named BuildMI, which is useful for dramatically
+// simplifying how MachineInstr's are created. It allows use of code like this:
+//
+// M = BuildMI(X86::ADDrr8, 2).addReg(argVal1).addReg(argVal2);
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+#define LLVM_CODEGEN_MACHINEINSTRBUILDER_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+class TargetInstrDesc;
+
+namespace RegState {
+ enum {
+ Define = 0x2,
+ Implicit = 0x4,
+ Kill = 0x8,
+ Dead = 0x10,
+ EarlyClobber = 0x20,
+ ImplicitDefine = Implicit | Define,
+ ImplicitKill = Implicit | Kill
+ };
+}
+
+class MachineInstrBuilder {
+ MachineInstr *MI;
+public:
+ explicit MachineInstrBuilder(MachineInstr *mi) : MI(mi) {}
+
+ /// Allow automatic conversion to the machine instruction we are working on.
+ ///
+ operator MachineInstr*() const { return MI; }
+ operator MachineBasicBlock::iterator() const { return MI; }
+
+ /// addReg - Add a new virtual register operand...
+ ///
+ const
+ MachineInstrBuilder &addReg(unsigned RegNo, unsigned flags = 0,
+ unsigned SubReg = 0) const {
+ assert((flags & 0x1) == 0 &&
+ "Passing in 'true' to addReg is forbidden! Use enums instead.");
+ MI->addOperand(MachineOperand::CreateReg(RegNo,
+ flags & RegState::Define,
+ flags & RegState::Implicit,
+ flags & RegState::Kill,
+ flags & RegState::Dead,
+ SubReg,
+ flags & RegState::EarlyClobber));
+ return *this;
+ }
+
+ /// addImm - Add a new immediate operand.
+ ///
+ const MachineInstrBuilder &addImm(int64_t Val) const {
+ MI->addOperand(MachineOperand::CreateImm(Val));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addFPImm(const ConstantFP *Val) const {
+ MI->addOperand(MachineOperand::CreateFPImm(Val));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addMBB(MachineBasicBlock *MBB) const {
+ MI->addOperand(MachineOperand::CreateMBB(MBB));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addFrameIndex(unsigned Idx) const {
+ MI->addOperand(MachineOperand::CreateFI(Idx));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addConstantPoolIndex(unsigned Idx,
+ int Offset = 0) const {
+ MI->addOperand(MachineOperand::CreateCPI(Idx, Offset));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addJumpTableIndex(unsigned Idx) const {
+ MI->addOperand(MachineOperand::CreateJTI(Idx));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addGlobalAddress(GlobalValue *GV,
+ int64_t Offset = 0) const {
+ MI->addOperand(MachineOperand::CreateGA(GV, Offset));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addExternalSymbol(const char *FnName,
+ int64_t Offset = 0) const {
+ MI->addOperand(MachineOperand::CreateES(FnName, Offset));
+ return *this;
+ }
+
+ const MachineInstrBuilder &addMemOperand(const MachineMemOperand &MMO) const {
+ MI->addMemOperand(*MI->getParent()->getParent(), MMO);
+ return *this;
+ }
+
+ const MachineInstrBuilder &addOperand(const MachineOperand &MO) const {
+ if (MO.isReg())
+ return addReg(MO.getReg(),
+ (MO.isDef() ? RegState::Define : 0) |
+ (MO.isImplicit() ? RegState::Implicit : 0) |
+ (MO.isKill() ? RegState::Kill : 0) |
+ (MO.isDead() ? RegState::Dead : 0) |
+ (MO.isEarlyClobber() ? RegState::EarlyClobber : 0),
+ MO.getSubReg());
+ if (MO.isImm())
+ return addImm(MO.getImm());
+ if (MO.isFI())
+ return addFrameIndex(MO.getIndex());
+ if (MO.isGlobal())
+ return addGlobalAddress(MO.getGlobal(), MO.getOffset());
+ if (MO.isCPI())
+ return addConstantPoolIndex(MO.getIndex(), MO.getOffset());
+ if (MO.isSymbol())
+ return addExternalSymbol(MO.getSymbolName());
+ if (MO.isJTI())
+ return addJumpTableIndex(MO.getIndex());
+
+ assert(0 && "Unknown operand for MachineInstrBuilder::AddOperand!");
+ return *this;
+ }
+};
+
+/// BuildMI - Builder interface. Specify how to create the initial instruction
+/// itself.
+///
+inline MachineInstrBuilder BuildMI(MachineFunction &MF,
+ DebugLoc DL,
+ const TargetInstrDesc &TID) {
+ return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL));
+}
+
+/// BuildMI - This version of the builder sets up the first operand as a
+/// destination virtual register.
+///
+inline MachineInstrBuilder BuildMI(MachineFunction &MF,
+ DebugLoc DL,
+ const TargetInstrDesc &TID,
+ unsigned DestReg) {
+ return MachineInstrBuilder(MF.CreateMachineInstr(TID, DL))
+ .addReg(DestReg, RegState::Define);
+}
+
+/// BuildMI - This version of the builder inserts the newly-built
+/// instruction before the given position in the given MachineBasicBlock, and
+/// sets up the first operand as a destination virtual register.
+///
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I,
+ DebugLoc DL,
+ const TargetInstrDesc &TID,
+ unsigned DestReg) {
+ MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MI).addReg(DestReg, RegState::Define);
+}
+
+/// BuildMI - This version of the builder inserts the newly-built
+/// instruction before the given position in the given MachineBasicBlock, and
+/// does NOT take a destination register.
+///
+inline MachineInstrBuilder BuildMI(MachineBasicBlock &BB,
+ MachineBasicBlock::iterator I,
+ DebugLoc DL,
+ const TargetInstrDesc &TID) {
+ MachineInstr *MI = BB.getParent()->CreateMachineInstr(TID, DL);
+ BB.insert(I, MI);
+ return MachineInstrBuilder(MI);
+}
+
+/// BuildMI - This version of the builder inserts the newly-built
+/// instruction at the end of the given MachineBasicBlock, and does NOT take a
+/// destination register.
+///
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
+ DebugLoc DL,
+ const TargetInstrDesc &TID) {
+ return BuildMI(*BB, BB->end(), DL, TID);
+}
+
+/// BuildMI - This version of the builder inserts the newly-built
+/// instruction at the end of the given MachineBasicBlock, and sets up the first
+/// operand as a destination virtual register.
+///
+inline MachineInstrBuilder BuildMI(MachineBasicBlock *BB,
+ DebugLoc DL,
+ const TargetInstrDesc &TID,
+ unsigned DestReg) {
+ return BuildMI(*BB, BB->end(), DL, TID, DestReg);
+}
+
+inline unsigned getDefRegState(bool B) {
+ return B ? RegState::Define : 0;
+}
+inline unsigned getImplRegState(bool B) {
+ return B ? RegState::Implicit : 0;
+}
+inline unsigned getKillRegState(bool B) {
+ return B ? RegState::Kill : 0;
+}
+inline unsigned getDeadRegState(bool B) {
+ return B ? RegState::Dead : 0;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineJumpTableInfo.h b/include/llvm/CodeGen/MachineJumpTableInfo.h
new file mode 100644
index 0000000..56e2e54
--- /dev/null
+++ b/include/llvm/CodeGen/MachineJumpTableInfo.h
@@ -0,0 +1,92 @@
+//===-- CodeGen/MachineJumpTableInfo.h - Abstract Jump Tables --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// The MachineJumpTableInfo class keeps track of jump tables referenced by
+// lowered switch instructions in the MachineFunction.
+//
+// Instructions reference the address of these jump tables through the use of
+// MO_JumpTableIndex values. When emitting assembly or machine code, these
+// virtual address references are converted to refer to the address of the
+// function jump tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+#define LLVM_CODEGEN_MACHINEJUMPTABLEINFO_H
+
+#include <vector>
+#include <iosfwd>
+#include <cassert>
+
+namespace llvm {
+
+class MachineBasicBlock;
+class TargetData;
+
+/// MachineJumpTableEntry - One jump table in the jump table info.
+///
+struct MachineJumpTableEntry {
+ /// MBBs - The vector of basic blocks from which to create the jump table.
+ std::vector<MachineBasicBlock*> MBBs;
+
+ explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
+ : MBBs(M) {}
+};
+
+class MachineJumpTableInfo {
+ unsigned EntrySize;
+ unsigned Alignment;
+ std::vector<MachineJumpTableEntry> JumpTables;
+public:
+ MachineJumpTableInfo(unsigned Size, unsigned Align)
+ : EntrySize(Size), Alignment(Align) {}
+
+ /// getJumpTableIndex - Create a new jump table or return an existing one.
+ ///
+ unsigned getJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
+
+ /// isEmpty - Return true if there are no jump tables.
+ ///
+ bool isEmpty() const { return JumpTables.empty(); }
+
+ const std::vector<MachineJumpTableEntry> &getJumpTables() const {
+ return JumpTables;
+ }
+
+ /// RemoveJumpTable - Mark the specific index as being dead. This will cause
+ /// it to not be emitted.
+ void RemoveJumpTable(unsigned Idx) {
+ JumpTables[Idx].MBBs.clear();
+ }
+
+ /// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
+ /// the jump tables to branch to New instead.
+ bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);
+
+ /// getEntrySize - Returns the size of an individual field in a jump table.
+ ///
+ unsigned getEntrySize() const { return EntrySize; }
+
+ /// getAlignment - returns the target's preferred alignment for jump tables
+ unsigned getAlignment() const { return Alignment; }
+
+ /// print - Used by the MachineFunction printer to print information about
+ /// jump tables. Implemented in MachineFunction.cpp
+ ///
+ void print(std::ostream &OS) const;
+ void print(std::ostream *OS) const { if (OS) print(*OS); }
+
+ /// dump - Call print(std::cerr) to be called from the debugger.
+ ///
+ void dump() const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineLocation.h b/include/llvm/CodeGen/MachineLocation.h
new file mode 100644
index 0000000..2db4e55
--- /dev/null
+++ b/include/llvm/CodeGen/MachineLocation.h
@@ -0,0 +1,106 @@
+//===-- llvm/CodeGen/MachineLocation.h --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// The MachineLocation class is used to represent a simple location in a machine
+// frame. Locations will be one of two forms; a register or an address formed
+// from a base address plus an offset. Register indirection can be specified by
+// using an offset of zero.
+//
+// The MachineMove class is used to represent abstract move operations in the
+// prolog/epilog of a compiled function. A collection of these objects can be
+// used by a debug consumer to track the location of values when unwinding stack
+// frames.
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CODEGEN_MACHINELOCATION_H
+#define LLVM_CODEGEN_MACHINELOCATION_H
+
+namespace llvm {
+
+class MachineLocation {
+private:
+ bool IsRegister; // True if location is a register.
+ unsigned Register; // gcc/gdb register number.
+ int Offset; // Displacement if not register.
+
+public:
+ enum {
+ // The target register number for an abstract frame pointer. The value is
+ // an arbitrary value greater than TargetRegisterInfo::FirstVirtualRegister.
+ VirtualFP = ~0U
+ };
+ MachineLocation()
+ : IsRegister(false)
+ , Register(0)
+ , Offset(0)
+ {}
+ explicit MachineLocation(unsigned R)
+ : IsRegister(true)
+ , Register(R)
+ , Offset(0)
+ {}
+ MachineLocation(unsigned R, int O)
+ : IsRegister(false)
+ , Register(R)
+ , Offset(O)
+ {}
+
+ // Accessors
+ bool isReg() const { return IsRegister; }
+ unsigned getReg() const { return Register; }
+ int getOffset() const { return Offset; }
+ void setIsRegister(bool Is) { IsRegister = Is; }
+ void setRegister(unsigned R) { Register = R; }
+ void setOffset(int O) { Offset = O; }
+ void set(unsigned R) {
+ IsRegister = true;
+ Register = R;
+ Offset = 0;
+ }
+ void set(unsigned R, int O) {
+ IsRegister = false;
+ Register = R;
+ Offset = O;
+ }
+
+#ifndef NDEBUG
+ void dump();
+#endif
+};
+
+class MachineMove {
+private:
+ unsigned LabelID; // Label ID number for post-instruction
+ // address when result of move takes
+ // effect.
+ MachineLocation Destination; // Move to location.
+ MachineLocation Source; // Move from location.
+
+public:
+ MachineMove()
+ : LabelID(0)
+ , Destination()
+ , Source()
+ {}
+
+ MachineMove(unsigned ID, MachineLocation &D, MachineLocation &S)
+ : LabelID(ID)
+ , Destination(D)
+ , Source(S)
+ {}
+
+ // Accessors
+ unsigned getLabelID() const { return LabelID; }
+ const MachineLocation &getDestination() const { return Destination; }
+ const MachineLocation &getSource() const { return Source; }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineLoopInfo.h b/include/llvm/CodeGen/MachineLoopInfo.h
new file mode 100644
index 0000000..8c96308
--- /dev/null
+++ b/include/llvm/CodeGen/MachineLoopInfo.h
@@ -0,0 +1,188 @@
+//===- llvm/CodeGen/MachineLoopInfo.h - Natural Loop Calculator -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineLoopInfo class that is used to identify natural
+// loops and determine the loop depth of various nodes of the CFG. Note that
+// natural loops may actually be several loops that share the same header node.
+//
+// This analysis calculates the nesting structure of loops in a function. For
+// each natural loop identified, this analysis identifies natural loops
+// contained entirely within the loop and the basic blocks the make up the loop.
+//
+// It can calculate on the fly various bits of information, for example:
+//
+// * whether there is a preheader for the loop
+// * the number of back edges to the header
+// * whether or not a particular block branches out of the loop
+// * the successor blocks of the loop
+// * the loop depth
+// * the trip count
+// * etc...
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINE_LOOP_INFO_H
+#define LLVM_CODEGEN_MACHINE_LOOP_INFO_H
+
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/Analysis/LoopInfo.h"
+
+namespace llvm {
+
+// Provide overrides for Loop methods that don't make sense for machine loops.
+template<> inline
+PHINode *LoopBase<MachineBasicBlock>::getCanonicalInductionVariable() const {
+ assert(0 && "getCanonicalInductionVariable not supported for machine loops!");
+ return 0;
+}
+
+template<> inline Instruction*
+LoopBase<MachineBasicBlock>::getCanonicalInductionVariableIncrement() const {
+ assert(0 &&
+ "getCanonicalInductionVariableIncrement not supported for machine loops!");
+ return 0;
+}
+
+template<>
+inline bool LoopBase<MachineBasicBlock>::isLoopInvariant(Value *V) const {
+ assert(0 && "isLoopInvariant not supported for machine loops!");
+ return false;
+}
+
+template<>
+inline Value *LoopBase<MachineBasicBlock>::getTripCount() const {
+ assert(0 && "getTripCount not supported for machine loops!");
+ return 0;
+}
+
+template<>
+inline bool LoopBase<MachineBasicBlock>::isLCSSAForm() const {
+ assert(0 && "isLCSSAForm not supported for machine loops");
+ return false;
+}
+
+typedef LoopBase<MachineBasicBlock> MachineLoop;
+
+class MachineLoopInfo : public MachineFunctionPass {
+ LoopInfoBase<MachineBasicBlock>* LI;
+ friend class LoopBase<MachineBasicBlock>;
+
+ LoopInfoBase<MachineBasicBlock>& getBase() { return *LI; }
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ MachineLoopInfo() : MachineFunctionPass(&ID) {
+ LI = new LoopInfoBase<MachineBasicBlock>();
+ }
+
+ ~MachineLoopInfo() { delete LI; }
+
+ /// iterator/begin/end - The interface to the top-level loops in the current
+ /// function.
+ ///
+ typedef std::vector<MachineLoop*>::const_iterator iterator;
+ inline iterator begin() const { return LI->begin(); }
+ inline iterator end() const { return LI->end(); }
+ bool empty() const { return LI->empty(); }
+
+ /// getLoopFor - Return the inner most loop that BB lives in. If a basic
+ /// block is in no loop (for example the entry node), null is returned.
+ ///
+ inline MachineLoop *getLoopFor(const MachineBasicBlock *BB) const {
+ return LI->getLoopFor(BB);
+ }
+
+ /// operator[] - same as getLoopFor...
+ ///
+ inline const MachineLoop *operator[](const MachineBasicBlock *BB) const {
+ return LI->getLoopFor(BB);
+ }
+
+ /// getLoopDepth - Return the loop nesting level of the specified block...
+ ///
+ inline unsigned getLoopDepth(const MachineBasicBlock *BB) const {
+ return LI->getLoopDepth(BB);
+ }
+
+ // isLoopHeader - True if the block is a loop header node
+ inline bool isLoopHeader(MachineBasicBlock *BB) const {
+ return LI->isLoopHeader(BB);
+ }
+
+ /// runOnFunction - Calculate the natural loop information.
+ ///
+ virtual bool runOnMachineFunction(MachineFunction &F);
+
+ virtual void releaseMemory() { LI->releaseMemory(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ /// removeLoop - This removes the specified top-level loop from this loop info
+ /// object. The loop is not deleted, as it will presumably be inserted into
+ /// another loop.
+ inline MachineLoop *removeLoop(iterator I) { return LI->removeLoop(I); }
+
+ /// changeLoopFor - Change the top-level loop that contains BB to the
+ /// specified loop. This should be used by transformations that restructure
+ /// the loop hierarchy tree.
+ inline void changeLoopFor(MachineBasicBlock *BB, MachineLoop *L) {
+ LI->changeLoopFor(BB, L);
+ }
+
+ /// changeTopLevelLoop - Replace the specified loop in the top-level loops
+ /// list with the indicated loop.
+ inline void changeTopLevelLoop(MachineLoop *OldLoop, MachineLoop *NewLoop) {
+ LI->changeTopLevelLoop(OldLoop, NewLoop);
+ }
+
+ /// addTopLevelLoop - This adds the specified loop to the collection of
+ /// top-level loops.
+ inline void addTopLevelLoop(MachineLoop *New) {
+ LI->addTopLevelLoop(New);
+ }
+
+ /// removeBlock - This method completely removes BB from all data structures,
+ /// including all of the Loop objects it is nested in and our mapping from
+ /// MachineBasicBlocks to loops.
+ void removeBlock(MachineBasicBlock *BB) {
+ LI->removeBlock(BB);
+ }
+};
+
+
+// Allow clients to walk the list of nested loops...
+template <> struct GraphTraits<const MachineLoop*> {
+ typedef const MachineLoop NodeType;
+ typedef std::vector<MachineLoop*>::const_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(const MachineLoop *L) { return L; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+};
+
+template <> struct GraphTraits<MachineLoop*> {
+ typedef MachineLoop NodeType;
+ typedef std::vector<MachineLoop*>::const_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(MachineLoop *L) { return L; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h
new file mode 100644
index 0000000..4388c0a
--- /dev/null
+++ b/include/llvm/CodeGen/MachineMemOperand.h
@@ -0,0 +1,86 @@
+//==- llvm/CodeGen/MachineMemOperand.h - MachineMemOperand class -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineMemOperand class, which is a
+// description of a memory reference. It is used to help track dependencies
+// in the backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMEMOPERAND_H
+#define LLVM_CODEGEN_MACHINEMEMOPERAND_H
+
+namespace llvm {
+
+class Value;
+class FoldingSetNodeID;
+
+//===----------------------------------------------------------------------===//
+/// MachineMemOperand - A description of a memory reference used in the backend.
+/// Instead of holding a StoreInst or LoadInst, this class holds the address
+/// Value of the reference along with a byte size and offset. This allows it
+/// to describe lowered loads and stores. Also, the special PseudoSourceValue
+/// objects can be used to represent loads and stores to memory locations
+/// that aren't explicit in the regular LLVM IR.
+///
+class MachineMemOperand {
+ int64_t Offset;
+ uint64_t Size;
+ const Value *V;
+ unsigned int Flags;
+
+public:
+ /// Flags values. These may be or'd together.
+ enum MemOperandFlags {
+ /// The memory access reads data.
+ MOLoad = 1,
+ /// The memory access writes data.
+ MOStore = 2,
+ /// The memory access is volatile.
+ MOVolatile = 4
+ };
+
+ /// MachineMemOperand - Construct an MachineMemOperand object with the
+ /// specified address Value, flags, offset, size, and alignment.
+ MachineMemOperand(const Value *v, unsigned int f, int64_t o, uint64_t s,
+ unsigned int a);
+
+ /// getValue - Return the base address of the memory access.
+ /// Special values are PseudoSourceValue::FPRel, PseudoSourceValue::SPRel,
+ /// and the other PseudoSourceValue members which indicate references to
+ /// frame/stack pointer relative references and other special references.
+ const Value *getValue() const { return V; }
+
+ /// getFlags - Return the raw flags of the source value, \see MemOperandFlags.
+ unsigned int getFlags() const { return Flags & 7; }
+
+ /// getOffset - For normal values, this is a byte offset added to the base
+ /// address. For PseudoSourceValue::FPRel values, this is the FrameIndex
+ /// number.
+ int64_t getOffset() const { return Offset; }
+
+ /// getSize - Return the size in bytes of the memory reference.
+ uint64_t getSize() const { return Size; }
+
+ /// getAlignment - Return the minimum known alignment in bytes of the
+ /// memory reference.
+ unsigned int getAlignment() const { return (1u << (Flags >> 3)) >> 1; }
+
+ bool isLoad() const { return Flags & MOLoad; }
+ bool isStore() const { return Flags & MOStore; }
+ bool isVolatile() const { return Flags & MOVolatile; }
+
+ /// Profile - Gather unique data for the object.
+ ///
+ void Profile(FoldingSetNodeID &ID) const;
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h
new file mode 100644
index 0000000..1872bd2
--- /dev/null
+++ b/include/llvm/CodeGen/MachineModuleInfo.h
@@ -0,0 +1,300 @@
+//===-- llvm/CodeGen/MachineModuleInfo.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Collect meta information for a module. This information should be in a
+// neutral form that can be used by different debugging and exception handling
+// schemes.
+//
+// The organization of information is primarily clustered around the source
+// compile units. The main exception is source line correspondence where
+// inlining may interleave code from various compile units.
+//
+// The following information can be retrieved from the MachineModuleInfo.
+//
+// -- Source directories - Directories are uniqued based on their canonical
+// string and assigned a sequential numeric ID (base 1.)
+// -- Source files - Files are also uniqued based on their name and directory
+// ID. A file ID is sequential number (base 1.)
+// -- Source line correspondence - A vector of file ID, line#, column# triples.
+// A DEBUG_LOCATION instruction is generated by the DAG Legalizer
+// corresponding to each entry in the source line list. This allows a debug
+// emitter to generate labels referenced by debug information tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEMODULEINFO_H
+#define LLVM_CODEGEN_MACHINEMODULEINFO_H
+
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/UniqueVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/CodeGen/MachineLocation.h"
+#include "llvm/GlobalValue.h"
+#include "llvm/Pass.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Forward declarations.
+class Constant;
+class GlobalVariable;
+class MachineBasicBlock;
+class MachineFunction;
+class Module;
+class PointerType;
+class StructType;
+
+//===----------------------------------------------------------------------===//
+/// LandingPadInfo - This structure is used to retain landing pad info for
+/// the current function.
+///
+struct LandingPadInfo {
+ MachineBasicBlock *LandingPadBlock; // Landing pad block.
+ SmallVector<unsigned, 1> BeginLabels; // Labels prior to invoke.
+ SmallVector<unsigned, 1> EndLabels; // Labels after invoke.
+ unsigned LandingPadLabel; // Label at beginning of landing pad.
+ Function *Personality; // Personality function.
+ std::vector<int> TypeIds; // List of type ids (filters negative)
+
+ explicit LandingPadInfo(MachineBasicBlock *MBB)
+ : LandingPadBlock(MBB)
+ , LandingPadLabel(0)
+ , Personality(NULL)
+ {}
+};
+
+//===----------------------------------------------------------------------===//
+/// MachineModuleInfo - This class contains meta information specific to a
+/// module. Queries can be made by different debugging and exception handling
+/// schemes and reformated for specific use.
+///
+class MachineModuleInfo : public ImmutablePass {
+private:
+ // LabelIDList - One entry per assigned label. Normally the entry is equal to
+ // the list index(+1). If the entry is zero then the label has been deleted.
+ // Any other value indicates the label has been deleted by is mapped to
+ // another label.
+ std::vector<unsigned> LabelIDList;
+
+ // FrameMoves - List of moves done by a function's prolog. Used to construct
+ // frame maps by debug and exception handling consumers.
+ std::vector<MachineMove> FrameMoves;
+
+ // LandingPads - List of LandingPadInfo describing the landing pad information
+ // in the current function.
+ std::vector<LandingPadInfo> LandingPads;
+
+ // TypeInfos - List of C++ TypeInfo used in the current function.
+ //
+ std::vector<GlobalVariable *> TypeInfos;
+
+ // FilterIds - List of typeids encoding filters used in the current function.
+ //
+ std::vector<unsigned> FilterIds;
+
+ // FilterEnds - List of the indices in FilterIds corresponding to filter
+ // terminators.
+ //
+ std::vector<unsigned> FilterEnds;
+
+ // Personalities - Vector of all personality functions ever seen. Used to emit
+ // common EH frames.
+ std::vector<Function *> Personalities;
+
+ // UsedFunctions - the functions in the llvm.used list in a more easily
+ // searchable format.
+ SmallPtrSet<const Function *, 32> UsedFunctions;
+
+ /// UsedDbgLabels - labels are used by debug info entries.
+ SmallSet<unsigned, 8> UsedDbgLabels;
+
+ bool CallsEHReturn;
+ bool CallsUnwindInit;
+
+ /// DbgInfoAvailable - True if debugging information is available
+ /// in this module.
+ bool DbgInfoAvailable;
+public:
+ static char ID; // Pass identification, replacement for typeid
+
+ MachineModuleInfo();
+ ~MachineModuleInfo();
+
+ /// doInitialization - Initialize the state for a new module.
+ ///
+ bool doInitialization();
+
+ /// doFinalization - Tear down the state after completion of a module.
+ ///
+ bool doFinalization();
+
+ /// BeginFunction - Begin gathering function meta information.
+ ///
+ void BeginFunction(MachineFunction *MF);
+
+ /// EndFunction - Discard function meta information.
+ ///
+ void EndFunction();
+
+ /// AnalyzeModule - Scan the module for global debug information.
+ ///
+ void AnalyzeModule(Module &M);
+
+ /// hasDebugInfo - Returns true if valid debug info is present.
+ ///
+ bool hasDebugInfo() const { return DbgInfoAvailable; }
+ void setDebugInfoAvailability(bool avail) { DbgInfoAvailable = true; }
+
+ bool callsEHReturn() const { return CallsEHReturn; }
+ void setCallsEHReturn(bool b) { CallsEHReturn = b; }
+
+ bool callsUnwindInit() const { return CallsUnwindInit; }
+ void setCallsUnwindInit(bool b) { CallsUnwindInit = b; }
+
+ /// NextLabelID - Return the next unique label id.
+ ///
+ unsigned NextLabelID() {
+ unsigned ID = (unsigned)LabelIDList.size() + 1;
+ LabelIDList.push_back(ID);
+ return ID;
+ }
+
+ /// InvalidateLabel - Inhibit use of the specified label # from
+ /// MachineModuleInfo, for example because the code was deleted.
+ void InvalidateLabel(unsigned LabelID) {
+ // Remap to zero to indicate deletion.
+ RemapLabel(LabelID, 0);
+ }
+
+ /// RemapLabel - Indicate that a label has been merged into another.
+ ///
+ void RemapLabel(unsigned OldLabelID, unsigned NewLabelID) {
+ assert(0 < OldLabelID && OldLabelID <= LabelIDList.size() &&
+ "Old label ID out of range.");
+ assert(NewLabelID <= LabelIDList.size() &&
+ "New label ID out of range.");
+ LabelIDList[OldLabelID - 1] = NewLabelID;
+ }
+
+ /// MappedLabel - Find out the label's final ID. Zero indicates deletion.
+ /// ID != Mapped ID indicates that the label was folded into another label.
+ unsigned MappedLabel(unsigned LabelID) const {
+ assert(LabelID <= LabelIDList.size() && "Debug label ID out of range.");
+ return LabelID ? LabelIDList[LabelID - 1] : 0;
+ }
+
+ /// isDbgLabelUsed - Return true if label with LabelID is used by
+ /// DwarfWriter.
+ bool isDbgLabelUsed(unsigned LabelID) {
+ return UsedDbgLabels.count(LabelID);
+ }
+
+ /// RecordUsedDbgLabel - Mark label with LabelID as used. This is used
+ /// by DwarfWriter to inform DebugLabelFolder that certain labels are
+ /// not to be deleted.
+ void RecordUsedDbgLabel(unsigned LabelID) {
+ UsedDbgLabels.insert(LabelID);
+ }
+
+ /// getFrameMoves - Returns a reference to a list of moves done in the current
+ /// function's prologue. Used to construct frame maps for debug and exception
+ /// handling comsumers.
+ std::vector<MachineMove> &getFrameMoves() { return FrameMoves; }
+
+ //===-EH-----------------------------------------------------------------===//
+
+ /// getOrCreateLandingPadInfo - Find or create an LandingPadInfo for the
+ /// specified MachineBasicBlock.
+ LandingPadInfo &getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad);
+
+ /// addInvoke - Provide the begin and end labels of an invoke style call and
+ /// associate it with a try landing pad block.
+ void addInvoke(MachineBasicBlock *LandingPad, unsigned BeginLabel,
+ unsigned EndLabel);
+
+ /// addLandingPad - Add a new panding pad. Returns the label ID for the
+ /// landing pad entry.
+ unsigned addLandingPad(MachineBasicBlock *LandingPad);
+
+ /// addPersonality - Provide the personality function for the exception
+ /// information.
+ void addPersonality(MachineBasicBlock *LandingPad, Function *Personality);
+
+ /// getPersonalityIndex - Get index of the current personality function inside
+ /// Personalitites array
+ unsigned getPersonalityIndex() const;
+
+ /// getPersonalities - Return array of personality functions ever seen.
+ const std::vector<Function *>& getPersonalities() const {
+ return Personalities;
+ }
+
+ // UsedFunctions - Return set of the functions in the llvm.used list.
+ const SmallPtrSet<const Function *, 32>& getUsedFunctions() const {
+ return UsedFunctions;
+ }
+
+ /// addCatchTypeInfo - Provide the catch typeinfo for a landing pad.
+ ///
+ void addCatchTypeInfo(MachineBasicBlock *LandingPad,
+ std::vector<GlobalVariable *> &TyInfo);
+
+ /// addFilterTypeInfo - Provide the filter typeinfo for a landing pad.
+ ///
+ void addFilterTypeInfo(MachineBasicBlock *LandingPad,
+ std::vector<GlobalVariable *> &TyInfo);
+
+ /// addCleanup - Add a cleanup action for a landing pad.
+ ///
+ void addCleanup(MachineBasicBlock *LandingPad);
+
+ /// getTypeIDFor - Return the type id for the specified typeinfo. This is
+ /// function wide.
+ unsigned getTypeIDFor(GlobalVariable *TI);
+
+ /// getFilterIDFor - Return the id of the filter encoded by TyIds. This is
+ /// function wide.
+ int getFilterIDFor(std::vector<unsigned> &TyIds);
+
+ /// TidyLandingPads - Remap landing pad labels and remove any deleted landing
+ /// pads.
+ void TidyLandingPads();
+
+ /// getLandingPads - Return a reference to the landing pad info for the
+ /// current function.
+ const std::vector<LandingPadInfo> &getLandingPads() const {
+ return LandingPads;
+ }
+
+ /// getTypeInfos - Return a reference to the C++ typeinfo for the current
+ /// function.
+ const std::vector<GlobalVariable *> &getTypeInfos() const {
+ return TypeInfos;
+ }
+
+ /// getFilterIds - Return a reference to the typeids encoding filters used in
+ /// the current function.
+ const std::vector<unsigned> &getFilterIds() const {
+ return FilterIds;
+ }
+
+ /// getPersonality - Return a personality function if available. The presence
+ /// of one is required to emit exception handling info.
+ Function *getPersonality() const;
+
+}; // End class MachineModuleInfo
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineOperand.h b/include/llvm/CodeGen/MachineOperand.h
new file mode 100644
index 0000000..7a41684
--- /dev/null
+++ b/include/llvm/CodeGen/MachineOperand.h
@@ -0,0 +1,447 @@
+//===-- llvm/CodeGen/MachineOperand.h - MachineOperand class ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the MachineOperand class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
+#define LLVM_CODEGEN_MACHINEOPERAND_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <iosfwd>
+
+namespace llvm {
+
+class ConstantFP;
+class MachineBasicBlock;
+class GlobalValue;
+class MachineInstr;
+class TargetMachine;
+class MachineRegisterInfo;
+class raw_ostream;
+
+/// MachineOperand class - Representation of each machine instruction operand.
+///
+class MachineOperand {
+public:
+ enum MachineOperandType {
+ MO_Register, ///< Register operand.
+ MO_Immediate, ///< Immediate operand
+ MO_FPImmediate, ///< Floating-point immediate operand
+ MO_MachineBasicBlock, ///< MachineBasicBlock reference
+ MO_FrameIndex, ///< Abstract Stack Frame Index
+ MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
+ MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
+ MO_ExternalSymbol, ///< Name of external global symbol
+ MO_GlobalAddress ///< Address of a global value
+ };
+
+private:
+ /// OpKind - Specify what kind of operand this is. This discriminates the
+ /// union.
+ MachineOperandType OpKind : 8;
+
+ /// IsDef/IsImp/IsKill/IsDead flags - These are only valid for MO_Register
+ /// operands.
+
+ /// IsDef - True if this is a def, false if this is a use of the register.
+ ///
+ bool IsDef : 1;
+
+ /// IsImp - True if this is an implicit def or use, false if it is explicit.
+ ///
+ bool IsImp : 1;
+
+ /// IsKill - True if this instruction is the last use of the register on this
+ /// path through the function. This is only valid on uses of registers.
+ bool IsKill : 1;
+
+ /// IsDead - True if this register is never used by a subsequent instruction.
+ /// This is only valid on definitions of registers.
+ bool IsDead : 1;
+
+ /// IsEarlyClobber - True if this MO_Register 'def' operand is written to
+ /// by the MachineInstr before all input registers are read. This is used to
+ /// model the GCC inline asm '&' constraint modifier.
+ bool IsEarlyClobber : 1;
+
+ /// SubReg - Subregister number, only valid for MO_Register. A value of 0
+ /// indicates the MO_Register has no subReg.
+ unsigned char SubReg;
+
+ /// ParentMI - This is the instruction that this operand is embedded into.
+ /// This is valid for all operand types, when the operand is in an instr.
+ MachineInstr *ParentMI;
+
+ /// Contents union - This contains the payload for the various operand types.
+ union {
+ MachineBasicBlock *MBB; // For MO_MachineBasicBlock.
+ const ConstantFP *CFP; // For MO_FPImmediate.
+ int64_t ImmVal; // For MO_Immediate.
+
+ struct { // For MO_Register.
+ unsigned RegNo;
+ MachineOperand **Prev; // Access list for register.
+ MachineOperand *Next;
+ } Reg;
+
+ /// OffsetedInfo - This struct contains the offset and an object identifier.
+ /// this represent the object as with an optional offset from it.
+ struct {
+ union {
+ int Index; // For MO_*Index - The index itself.
+ const char *SymbolName; // For MO_ExternalSymbol.
+ GlobalValue *GV; // For MO_GlobalAddress.
+ } Val;
+ int64_t Offset; // An offset from the object.
+ } OffsetedInfo;
+ } Contents;
+
+ explicit MachineOperand(MachineOperandType K) : OpKind(K), ParentMI(0) {}
+public:
+ MachineOperand(const MachineOperand &M) {
+ *this = M;
+ }
+
+ ~MachineOperand() {}
+
+ /// getType - Returns the MachineOperandType for this operand.
+ ///
+ MachineOperandType getType() const { return OpKind; }
+
+ /// getParent - Return the instruction that this operand belongs to.
+ ///
+ MachineInstr *getParent() { return ParentMI; }
+ const MachineInstr *getParent() const { return ParentMI; }
+
+ void print(std::ostream &os, const TargetMachine *TM = 0) const;
+ void print(raw_ostream &os, const TargetMachine *TM = 0) const;
+
+ //===--------------------------------------------------------------------===//
+ // Accessors that tell you what kind of MachineOperand you're looking at.
+ //===--------------------------------------------------------------------===//
+
+ /// isReg - Tests if this is a MO_Register operand.
+ bool isReg() const { return OpKind == MO_Register; }
+ /// isImm - Tests if this is a MO_Immediate operand.
+ bool isImm() const { return OpKind == MO_Immediate; }
+ /// isFPImm - Tests if this is a MO_FPImmediate operand.
+ bool isFPImm() const { return OpKind == MO_FPImmediate; }
+ /// isMBB - Tests if this is a MO_MachineBasicBlock operand.
+ bool isMBB() const { return OpKind == MO_MachineBasicBlock; }
+ /// isFI - Tests if this is a MO_FrameIndex operand.
+ bool isFI() const { return OpKind == MO_FrameIndex; }
+ /// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
+ bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
+ /// isJTI - Tests if this is a MO_JumpTableIndex operand.
+ bool isJTI() const { return OpKind == MO_JumpTableIndex; }
+ /// isGlobal - Tests if this is a MO_GlobalAddress operand.
+ bool isGlobal() const { return OpKind == MO_GlobalAddress; }
+ /// isSymbol - Tests if this is a MO_ExternalSymbol operand.
+ bool isSymbol() const { return OpKind == MO_ExternalSymbol; }
+
+ //===--------------------------------------------------------------------===//
+ // Accessors for Register Operands
+ //===--------------------------------------------------------------------===//
+
+ /// getReg - Returns the register number.
+ unsigned getReg() const {
+ assert(isReg() && "This is not a register operand!");
+ return Contents.Reg.RegNo;
+ }
+
+ unsigned getSubReg() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return (unsigned)SubReg;
+ }
+
+ bool isUse() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return !IsDef;
+ }
+
+ bool isDef() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsDef;
+ }
+
+ bool isImplicit() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsImp;
+ }
+
+ bool isDead() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsDead;
+ }
+
+ bool isKill() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsKill;
+ }
+
+ bool isEarlyClobber() const {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ return IsEarlyClobber;
+ }
+
+ /// getNextOperandForReg - Return the next MachineOperand in the function that
+ /// uses or defines this register.
+ MachineOperand *getNextOperandForReg() const {
+ assert(isReg() && "This is not a register operand!");
+ return Contents.Reg.Next;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Mutators for Register Operands
+ //===--------------------------------------------------------------------===//
+
+ /// Change the register this operand corresponds to.
+ ///
+ void setReg(unsigned Reg);
+
+ void setSubReg(unsigned subReg) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ SubReg = (unsigned char)subReg;
+ }
+
+ void setIsUse(bool Val = true) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ IsDef = !Val;
+ }
+
+ void setIsDef(bool Val = true) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ IsDef = Val;
+ }
+
+ void setImplicit(bool Val = true) {
+ assert(isReg() && "Wrong MachineOperand accessor");
+ IsImp = Val;
+ }
+
+ void setIsKill(bool Val = true) {
+ assert(isReg() && !IsDef && "Wrong MachineOperand accessor");
+ IsKill = Val;
+ }
+
+ void setIsDead(bool Val = true) {
+ assert(isReg() && IsDef && "Wrong MachineOperand accessor");
+ IsDead = Val;
+ }
+
+ void setIsEarlyClobber(bool Val = true) {
+ assert(isReg() && IsDef && "Wrong MachineOperand accessor");
+ IsEarlyClobber = Val;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Accessors for various operand types.
+ //===--------------------------------------------------------------------===//
+
+ int64_t getImm() const {
+ assert(isImm() && "Wrong MachineOperand accessor");
+ return Contents.ImmVal;
+ }
+
+ const ConstantFP *getFPImm() const {
+ assert(isFPImm() && "Wrong MachineOperand accessor");
+ return Contents.CFP;
+ }
+
+ MachineBasicBlock *getMBB() const {
+ assert(isMBB() && "Wrong MachineOperand accessor");
+ return Contents.MBB;
+ }
+
+ int getIndex() const {
+ assert((isFI() || isCPI() || isJTI()) &&
+ "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Val.Index;
+ }
+
+ GlobalValue *getGlobal() const {
+ assert(isGlobal() && "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Val.GV;
+ }
+
+ int64_t getOffset() const {
+ assert((isGlobal() || isSymbol() || isCPI()) &&
+ "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Offset;
+ }
+
+ const char *getSymbolName() const {
+ assert(isSymbol() && "Wrong MachineOperand accessor");
+ return Contents.OffsetedInfo.Val.SymbolName;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Mutators for various operand types.
+ //===--------------------------------------------------------------------===//
+
+ void setImm(int64_t immVal) {
+ assert(isImm() && "Wrong MachineOperand mutator");
+ Contents.ImmVal = immVal;
+ }
+
+ void setOffset(int64_t Offset) {
+ assert((isGlobal() || isSymbol() || isCPI()) &&
+ "Wrong MachineOperand accessor");
+ Contents.OffsetedInfo.Offset = Offset;
+ }
+
+ void setIndex(int Idx) {
+ assert((isFI() || isCPI() || isJTI()) &&
+ "Wrong MachineOperand accessor");
+ Contents.OffsetedInfo.Val.Index = Idx;
+ }
+
+ void setMBB(MachineBasicBlock *MBB) {
+ assert(isMBB() && "Wrong MachineOperand accessor");
+ Contents.MBB = MBB;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Other methods.
+ //===--------------------------------------------------------------------===//
+
+ /// isIdenticalTo - Return true if this operand is identical to the specified
+ /// operand. Note: This method ignores isKill and isDead properties.
+ bool isIdenticalTo(const MachineOperand &Other) const;
+
+ /// ChangeToImmediate - Replace this operand with a new immediate operand of
+ /// the specified value. If an operand is known to be an immediate already,
+ /// the setImm method should be used.
+ void ChangeToImmediate(int64_t ImmVal);
+
+ /// ChangeToRegister - Replace this operand with a new register operand of
+ /// the specified value. If an operand is known to be an register already,
+ /// the setReg method should be used.
+ void ChangeToRegister(unsigned Reg, bool isDef, bool isImp = false,
+ bool isKill = false, bool isDead = false);
+
+ //===--------------------------------------------------------------------===//
+ // Construction methods.
+ //===--------------------------------------------------------------------===//
+
+ static MachineOperand CreateImm(int64_t Val) {
+ MachineOperand Op(MachineOperand::MO_Immediate);
+ Op.setImm(Val);
+ return Op;
+ }
+
+ static MachineOperand CreateFPImm(const ConstantFP *CFP) {
+ MachineOperand Op(MachineOperand::MO_FPImmediate);
+ Op.Contents.CFP = CFP;
+ return Op;
+ }
+
+ static MachineOperand CreateReg(unsigned Reg, bool isDef, bool isImp = false,
+ bool isKill = false, bool isDead = false,
+ unsigned SubReg = 0,
+ bool isEarlyClobber = false) {
+ MachineOperand Op(MachineOperand::MO_Register);
+ Op.IsDef = isDef;
+ Op.IsImp = isImp;
+ Op.IsKill = isKill;
+ Op.IsDead = isDead;
+ Op.IsEarlyClobber = isEarlyClobber;
+ Op.Contents.Reg.RegNo = Reg;
+ Op.Contents.Reg.Prev = 0;
+ Op.Contents.Reg.Next = 0;
+ Op.SubReg = SubReg;
+ return Op;
+ }
+ static MachineOperand CreateMBB(MachineBasicBlock *MBB) {
+ MachineOperand Op(MachineOperand::MO_MachineBasicBlock);
+ Op.setMBB(MBB);
+ return Op;
+ }
+ static MachineOperand CreateFI(unsigned Idx) {
+ MachineOperand Op(MachineOperand::MO_FrameIndex);
+ Op.setIndex(Idx);
+ return Op;
+ }
+ static MachineOperand CreateCPI(unsigned Idx, int Offset) {
+ MachineOperand Op(MachineOperand::MO_ConstantPoolIndex);
+ Op.setIndex(Idx);
+ Op.setOffset(Offset);
+ return Op;
+ }
+ static MachineOperand CreateJTI(unsigned Idx) {
+ MachineOperand Op(MachineOperand::MO_JumpTableIndex);
+ Op.setIndex(Idx);
+ return Op;
+ }
+ static MachineOperand CreateGA(GlobalValue *GV, int64_t Offset) {
+ MachineOperand Op(MachineOperand::MO_GlobalAddress);
+ Op.Contents.OffsetedInfo.Val.GV = GV;
+ Op.setOffset(Offset);
+ return Op;
+ }
+ static MachineOperand CreateES(const char *SymName, int64_t Offset = 0) {
+ MachineOperand Op(MachineOperand::MO_ExternalSymbol);
+ Op.Contents.OffsetedInfo.Val.SymbolName = SymName;
+ Op.setOffset(Offset);
+ return Op;
+ }
+ const MachineOperand &operator=(const MachineOperand &MO) {
+ OpKind = MO.OpKind;
+ IsDef = MO.IsDef;
+ IsImp = MO.IsImp;
+ IsKill = MO.IsKill;
+ IsDead = MO.IsDead;
+ IsEarlyClobber = MO.IsEarlyClobber;
+ SubReg = MO.SubReg;
+ ParentMI = MO.ParentMI;
+ Contents = MO.Contents;
+ return *this;
+ }
+
+ friend class MachineInstr;
+ friend class MachineRegisterInfo;
+private:
+ //===--------------------------------------------------------------------===//
+ // Methods for handling register use/def lists.
+ //===--------------------------------------------------------------------===//
+
+ /// isOnRegUseList - Return true if this operand is on a register use/def list
+ /// or false if not. This can only be called for register operands that are
+ /// part of a machine instruction.
+ bool isOnRegUseList() const {
+ assert(isReg() && "Can only add reg operand to use lists");
+ return Contents.Reg.Prev != 0;
+ }
+
+ /// AddRegOperandToRegInfo - Add this register operand to the specified
+ /// MachineRegisterInfo. If it is null, then the next/prev fields should be
+ /// explicitly nulled out.
+ void AddRegOperandToRegInfo(MachineRegisterInfo *RegInfo);
+
+ /// RemoveRegOperandFromRegInfo - Remove this register operand from the
+ /// MachineRegisterInfo it is linked with.
+ void RemoveRegOperandFromRegInfo();
+};
+
+inline std::ostream &operator<<(std::ostream &OS, const MachineOperand &MO) {
+ MO.print(OS, 0);
+ return OS;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
+ MO.print(OS, 0);
+ return OS;
+}
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachinePassRegistry.h b/include/llvm/CodeGen/MachinePassRegistry.h
new file mode 100644
index 0000000..680d2b8
--- /dev/null
+++ b/include/llvm/CodeGen/MachinePassRegistry.h
@@ -0,0 +1,156 @@
+//===-- llvm/CodeGen/MachinePassRegistry.h ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the mechanics for machine function pass registries. A
+// function pass registry (MachinePassRegistry) is auto filled by the static
+// constructors of MachinePassRegistryNode. Further there is a command line
+// parser (RegisterPassParser) which listens to each registry for additions
+// and deletions, so that the appropriate command option is updated.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+#define LLVM_CODEGEN_MACHINEPASSREGISTRY_H
+
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/Support/CommandLine.h"
+
+namespace llvm {
+
+typedef void *(*MachinePassCtor)();
+
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryListener - Listener to adds and removals of nodes in
+/// registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryListener {
+public:
+ MachinePassRegistryListener() {}
+ virtual ~MachinePassRegistryListener() {}
+ virtual void NotifyAdd(const char *N, MachinePassCtor C, const char *D) = 0;
+ virtual void NotifyRemove(const char *N) = 0;
+};
+
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistryNode - Machine pass node stored in registration list.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistryNode {
+
+private:
+
+ MachinePassRegistryNode *Next; // Next function pass in list.
+ const char *Name; // Name of function pass.
+ const char *Description; // Description string.
+ MachinePassCtor Ctor; // Function pass creator.
+
+public:
+
+ MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
+ : Next(NULL)
+ , Name(N)
+ , Description(D)
+ , Ctor(C)
+ {}
+
+ // Accessors
+ MachinePassRegistryNode *getNext() const { return Next; }
+ MachinePassRegistryNode **getNextAddress() { return &Next; }
+ const char *getName() const { return Name; }
+ const char *getDescription() const { return Description; }
+ MachinePassCtor getCtor() const { return Ctor; }
+ void setNext(MachinePassRegistryNode *N) { Next = N; }
+
+};
+
+
+//===----------------------------------------------------------------------===//
+///
+/// MachinePassRegistry - Track the registration of machine passes.
+///
+//===----------------------------------------------------------------------===//
+class MachinePassRegistry {
+
+private:
+
+ MachinePassRegistryNode *List; // List of registry nodes.
+ MachinePassCtor Default; // Default function pass creator.
+ MachinePassRegistryListener* Listener;// Listener for list adds are removes.
+
+public:
+
+ // NO CONSTRUCTOR - we don't want static constructor ordering to mess
+ // with the registry.
+
+ // Accessors.
+ //
+ MachinePassRegistryNode *getList() { return List; }
+ MachinePassCtor getDefault() { return Default; }
+ void setDefault(MachinePassCtor C) { Default = C; }
+ void setListener(MachinePassRegistryListener *L) { Listener = L; }
+
+ /// Add - Adds a function pass to the registration list.
+ ///
+ void Add(MachinePassRegistryNode *Node);
+
+ /// Remove - Removes a function pass from the registration list.
+ ///
+ void Remove(MachinePassRegistryNode *Node);
+
+};
+
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterPassParser class - Handle the addition of new machine passes.
+///
+//===----------------------------------------------------------------------===//
+template<class RegistryClass>
+class RegisterPassParser : public MachinePassRegistryListener,
+ public cl::parser<typename RegistryClass::FunctionPassCtor> {
+public:
+ RegisterPassParser() {}
+ ~RegisterPassParser() { RegistryClass::setListener(NULL); }
+
+ void initialize(cl::Option &O) {
+ cl::parser<typename RegistryClass::FunctionPassCtor>::initialize(O);
+
+ // Add existing passes to option.
+ for (RegistryClass *Node = RegistryClass::getList();
+ Node; Node = Node->getNext()) {
+ addLiteralOption(Node->getName(),
+ (typename RegistryClass::FunctionPassCtor)Node->getCtor(),
+ Node->getDescription());
+ }
+
+ // Make sure we listen for list changes.
+ RegistryClass::setListener(this);
+ }
+
+ // Implement the MachinePassRegistryListener callbacks.
+ //
+ virtual void NotifyAdd(const char *N,
+ MachinePassCtor C,
+ const char *D) {
+ this->addLiteralOption(N, (typename RegistryClass::FunctionPassCtor)C, D);
+ }
+ virtual void NotifyRemove(const char *N) {
+ this->removeLiteralOption(N);
+ }
+};
+
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h
new file mode 100644
index 0000000..02f9b7c
--- /dev/null
+++ b/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -0,0 +1,305 @@
+//===-- llvm/CodeGen/MachineRegisterInfo.h ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINEREGISTERINFO_H
+#define LLVM_CODEGEN_MACHINEREGISTERINFO_H
+
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/iterator.h"
+#include <vector>
+
+namespace llvm {
+
+/// MachineRegisterInfo - Keep track of information for virtual and physical
+/// registers, including vreg register classes, use/def chains for registers,
+/// etc.
+class MachineRegisterInfo {
+ /// VRegInfo - Information we keep for each virtual register. The entries in
+ /// this vector are actually converted to vreg numbers by adding the
+ /// TargetRegisterInfo::FirstVirtualRegister delta to their index.
+ ///
+ /// Each element in this list contains the register class of the vreg and the
+ /// start of the use/def list for the register.
+ std::vector<std::pair<const TargetRegisterClass*, MachineOperand*> > VRegInfo;
+
+ /// RegClassVRegMap - This vector acts as a map from TargetRegisterClass to
+ /// virtual registers. For each target register class, it keeps a list of
+ /// virtual registers belonging to the class.
+ std::vector<std::vector<unsigned> > RegClass2VRegMap;
+
+ /// PhysRegUseDefLists - This is an array of the head of the use/def list for
+ /// physical registers.
+ MachineOperand **PhysRegUseDefLists;
+
+ /// UsedPhysRegs - This is a bit vector that is computed and set by the
+ /// register allocator, and must be kept up to date by passes that run after
+ /// register allocation (though most don't modify this). This is used
+ /// so that the code generator knows which callee save registers to save and
+ /// for other target specific uses.
+ BitVector UsedPhysRegs;
+
+ /// LiveIns/LiveOuts - Keep track of the physical registers that are
+ /// livein/liveout of the function. Live in values are typically arguments in
+ /// registers, live out values are typically return values in registers.
+ /// LiveIn values are allowed to have virtual registers associated with them,
+ /// stored in the second element.
+ std::vector<std::pair<unsigned, unsigned> > LiveIns;
+ std::vector<unsigned> LiveOuts;
+
+ MachineRegisterInfo(const MachineRegisterInfo&); // DO NOT IMPLEMENT
+ void operator=(const MachineRegisterInfo&); // DO NOT IMPLEMENT
+public:
+ explicit MachineRegisterInfo(const TargetRegisterInfo &TRI);
+ ~MachineRegisterInfo();
+
+ //===--------------------------------------------------------------------===//
+ // Register Info
+ //===--------------------------------------------------------------------===//
+
+ /// reg_begin/reg_end - Provide iteration support to walk over all definitions
+ /// and uses of a register within the MachineFunction that corresponds to this
+ /// MachineRegisterInfo object.
+ template<bool Uses, bool Defs>
+ class defusechain_iterator;
+
+ /// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
+ /// register.
+ typedef defusechain_iterator<true,true> reg_iterator;
+ reg_iterator reg_begin(unsigned RegNo) const {
+ return reg_iterator(getRegUseDefListHead(RegNo));
+ }
+ static reg_iterator reg_end() { return reg_iterator(0); }
+
+ /// reg_empty - Return true if there are no instructions using or defining the
+ /// specified register (it may be live-in).
+ bool reg_empty(unsigned RegNo) const { return reg_begin(RegNo) == reg_end(); }
+
+ /// def_iterator/def_begin/def_end - Walk all defs of the specified register.
+ typedef defusechain_iterator<false,true> def_iterator;
+ def_iterator def_begin(unsigned RegNo) const {
+ return def_iterator(getRegUseDefListHead(RegNo));
+ }
+ static def_iterator def_end() { return def_iterator(0); }
+
+ /// def_empty - Return true if there are no instructions defining the
+ /// specified register (it may be live-in).
+ bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
+
+ /// use_iterator/use_begin/use_end - Walk all uses of the specified register.
+ typedef defusechain_iterator<true,false> use_iterator;
+ use_iterator use_begin(unsigned RegNo) const {
+ return use_iterator(getRegUseDefListHead(RegNo));
+ }
+ static use_iterator use_end() { return use_iterator(0); }
+
+ /// use_empty - Return true if there are no instructions using the specified
+ /// register.
+ bool use_empty(unsigned RegNo) const { return use_begin(RegNo) == use_end(); }
+
+
+ /// replaceRegWith - Replace all instances of FromReg with ToReg in the
+ /// machine function. This is like llvm-level X->replaceAllUsesWith(Y),
+ /// except that it also changes any definitions of the register as well.
+ void replaceRegWith(unsigned FromReg, unsigned ToReg);
+
+ /// getRegUseDefListHead - Return the head pointer for the register use/def
+ /// list for the specified virtual or physical register.
+ MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
+ if (RegNo < TargetRegisterInfo::FirstVirtualRegister)
+ return PhysRegUseDefLists[RegNo];
+ RegNo -= TargetRegisterInfo::FirstVirtualRegister;
+ return VRegInfo[RegNo].second;
+ }
+
+ MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
+ if (RegNo < TargetRegisterInfo::FirstVirtualRegister)
+ return PhysRegUseDefLists[RegNo];
+ RegNo -= TargetRegisterInfo::FirstVirtualRegister;
+ return VRegInfo[RegNo].second;
+ }
+
+ /// getVRegDef - Return the machine instr that defines the specified virtual
+ /// register or null if none is found. This assumes that the code is in SSA
+ /// form, so there should only be one definition.
+ MachineInstr *getVRegDef(unsigned Reg) const;
+
+#ifndef NDEBUG
+ void dumpUses(unsigned RegNo) const;
+#endif
+
+ //===--------------------------------------------------------------------===//
+ // Virtual Register Info
+ //===--------------------------------------------------------------------===//
+
+ /// getRegClass - Return the register class of the specified virtual register.
+ ///
+ const TargetRegisterClass *getRegClass(unsigned Reg) const {
+ Reg -= TargetRegisterInfo::FirstVirtualRegister;
+ assert(Reg < VRegInfo.size() && "Invalid vreg!");
+ return VRegInfo[Reg].first;
+ }
+
+ /// setRegClass - Set the register class of the specified virtual register.
+ ///
+ void setRegClass(unsigned Reg, const TargetRegisterClass *RC);
+
+ /// createVirtualRegister - Create and return a new virtual register in the
+ /// function with the specified register class.
+ ///
+ unsigned createVirtualRegister(const TargetRegisterClass *RegClass);
+
+ /// getLastVirtReg - Return the highest currently assigned virtual register.
+ ///
+ unsigned getLastVirtReg() const {
+ return (unsigned)VRegInfo.size()+TargetRegisterInfo::FirstVirtualRegister-1;
+ }
+
+ /// getRegClassVirtRegs - Return the list of virtual registers of the given
+ /// target register class.
+ std::vector<unsigned> &getRegClassVirtRegs(const TargetRegisterClass *RC) {
+ return RegClass2VRegMap[RC->getID()];
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Physical Register Use Info
+ //===--------------------------------------------------------------------===//
+
+ /// isPhysRegUsed - Return true if the specified register is used in this
+ /// function. This only works after register allocation.
+ bool isPhysRegUsed(unsigned Reg) const { return UsedPhysRegs[Reg]; }
+
+ /// setPhysRegUsed - Mark the specified register used in this function.
+ /// This should only be called during and after register allocation.
+ void setPhysRegUsed(unsigned Reg) { UsedPhysRegs[Reg] = true; }
+
+ /// setPhysRegUnused - Mark the specified register unused in this function.
+ /// This should only be called during and after register allocation.
+ void setPhysRegUnused(unsigned Reg) { UsedPhysRegs[Reg] = false; }
+
+
+ //===--------------------------------------------------------------------===//
+ // LiveIn/LiveOut Management
+ //===--------------------------------------------------------------------===//
+
+ /// addLiveIn/Out - Add the specified register as a live in/out. Note that it
+ /// is an error to add the same register to the same set more than once.
+ void addLiveIn(unsigned Reg, unsigned vreg = 0) {
+ LiveIns.push_back(std::make_pair(Reg, vreg));
+ }
+ void addLiveOut(unsigned Reg) { LiveOuts.push_back(Reg); }
+
+ // Iteration support for live in/out sets. These sets are kept in sorted
+ // order by their register number.
+ typedef std::vector<std::pair<unsigned,unsigned> >::const_iterator
+ livein_iterator;
+ typedef std::vector<unsigned>::const_iterator liveout_iterator;
+ livein_iterator livein_begin() const { return LiveIns.begin(); }
+ livein_iterator livein_end() const { return LiveIns.end(); }
+ bool livein_empty() const { return LiveIns.empty(); }
+ liveout_iterator liveout_begin() const { return LiveOuts.begin(); }
+ liveout_iterator liveout_end() const { return LiveOuts.end(); }
+ bool liveout_empty() const { return LiveOuts.empty(); }
+
+ bool isLiveIn(unsigned Reg) const {
+ for (livein_iterator I = livein_begin(), E = livein_end(); I != E; ++I)
+ if (I->first == Reg || I->second == Reg)
+ return true;
+ return false;
+ }
+
+private:
+ void HandleVRegListReallocation();
+
+public:
+ /// defusechain_iterator - This class provides iterator support for machine
+ /// operands in the function that use or define a specific register. If
+ /// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
+ /// returns defs. If neither are true then you are silly and it always
+ /// returns end().
+ template<bool ReturnUses, bool ReturnDefs>
+ class defusechain_iterator
+ : public forward_iterator<MachineInstr, ptrdiff_t> {
+ MachineOperand *Op;
+ explicit defusechain_iterator(MachineOperand *op) : Op(op) {
+ // If the first node isn't one we're interested in, advance to one that
+ // we are interested in.
+ if (op) {
+ if ((!ReturnUses && op->isUse()) ||
+ (!ReturnDefs && op->isDef()))
+ ++*this;
+ }
+ }
+ friend class MachineRegisterInfo;
+ public:
+ typedef forward_iterator<MachineInstr, ptrdiff_t>::reference reference;
+ typedef forward_iterator<MachineInstr, ptrdiff_t>::pointer pointer;
+
+ defusechain_iterator(const defusechain_iterator &I) : Op(I.Op) {}
+ defusechain_iterator() : Op(0) {}
+
+ bool operator==(const defusechain_iterator &x) const {
+ return Op == x.Op;
+ }
+ bool operator!=(const defusechain_iterator &x) const {
+ return !operator==(x);
+ }
+
+ /// atEnd - return true if this iterator is equal to reg_end() on the value.
+ bool atEnd() const { return Op == 0; }
+
+ // Iterator traversal: forward iteration only
+ defusechain_iterator &operator++() { // Preincrement
+ assert(Op && "Cannot increment end iterator!");
+ Op = Op->getNextOperandForReg();
+
+ // If this is an operand we don't care about, skip it.
+ while (Op && ((!ReturnUses && Op->isUse()) ||
+ (!ReturnDefs && Op->isDef())))
+ Op = Op->getNextOperandForReg();
+
+ return *this;
+ }
+ defusechain_iterator operator++(int) { // Postincrement
+ defusechain_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ MachineOperand &getOperand() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return *Op;
+ }
+
+ /// getOperandNo - Return the operand # of this MachineOperand in its
+ /// MachineInstr.
+ unsigned getOperandNo() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return Op - &Op->getParent()->getOperand(0);
+ }
+
+ // Retrieve a reference to the current operand.
+ MachineInstr &operator*() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return *Op->getParent();
+ }
+
+ MachineInstr *operator->() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return Op->getParent();
+ }
+ };
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/MachineRelocation.h b/include/llvm/CodeGen/MachineRelocation.h
new file mode 100644
index 0000000..c539781
--- /dev/null
+++ b/include/llvm/CodeGen/MachineRelocation.h
@@ -0,0 +1,339 @@
+//===-- llvm/CodeGen/MachineRelocation.h - Target Relocation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MachineRelocation class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_MACHINERELOCATION_H
+#define LLVM_CODEGEN_MACHINERELOCATION_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace llvm {
+class GlobalValue;
+class MachineBasicBlock;
+
+/// MachineRelocation - This represents a target-specific relocation value,
+/// produced by the code emitter. This relocation is resolved after the has
+/// been emitted, either to an object file or to memory, when the target of the
+/// relocation can be resolved.
+///
+/// A relocation is made up of the following logical portions:
+/// 1. An offset in the machine code buffer, the location to modify.
+/// 2. A target specific relocation type (a number from 0 to 63).
+/// 3. A symbol being referenced, either as a GlobalValue* or as a string.
+/// 4. An optional constant value to be added to the reference.
+/// 5. A bit, CanRewrite, which indicates to the JIT that a function stub is
+/// not needed for the relocation.
+/// 6. An index into the GOT, if the target uses a GOT
+///
+class MachineRelocation {
+ enum AddressType {
+ isResult, // Relocation has be transformed into its result pointer.
+ isGV, // The Target.GV field is valid.
+ isIndirectSym, // Relocation of an indirect symbol.
+ isBB, // Relocation of BB address.
+ isExtSym, // The Target.ExtSym field is valid.
+ isConstPool, // Relocation of constant pool address.
+ isJumpTable, // Relocation of jump table address.
+ isGOTIndex // The Target.GOTIndex field is valid.
+ };
+
+ /// Offset - This is the offset from the start of the code buffer of the
+ /// relocation to perform.
+ uintptr_t Offset;
+
+ /// ConstantVal - A field that may be used by the target relocation type.
+ intptr_t ConstantVal;
+
+ union {
+ void *Result; // If this has been resolved to a resolved pointer
+ GlobalValue *GV; // If this is a pointer to a GV or an indirect ref.
+ MachineBasicBlock *MBB; // If this is a pointer to a LLVM BB
+ const char *ExtSym; // If this is a pointer to a named symbol
+ unsigned Index; // Constant pool / jump table index
+ unsigned GOTIndex; // Index in the GOT of this symbol/global
+ } Target;
+
+ unsigned TargetReloType : 6; // The target relocation ID
+ AddressType AddrType : 4; // The field of Target to use
+ bool NeedStub : 1; // True if this relocation requires a stub
+ bool GOTRelative : 1; // Should this relocation be relative to the GOT?
+ bool TargetResolve : 1; // True if target should resolve the address
+
+public:
+ // Relocation types used in a generic implementation. Currently, relocation
+ // entries for all things use the generic VANILLA type until they are refined
+ // into target relocation types.
+ enum RelocationType {
+ VANILLA
+ };
+
+ /// MachineRelocation::getGV - Return a relocation entry for a GlobalValue.
+ ///
+ static MachineRelocation getGV(uintptr_t offset, unsigned RelocationType,
+ GlobalValue *GV, intptr_t cst = 0,
+ bool NeedStub = 0,
+ bool GOTrelative = 0) {
+ assert((RelocationType & ~63) == 0 && "Relocation type too large!");
+ MachineRelocation Result;
+ Result.Offset = offset;
+ Result.ConstantVal = cst;
+ Result.TargetReloType = RelocationType;
+ Result.AddrType = isGV;
+ Result.NeedStub = NeedStub;
+ Result.GOTRelative = GOTrelative;
+ Result.TargetResolve = false;
+ Result.Target.GV = GV;
+ return Result;
+ }
+
+ /// MachineRelocation::getIndirectSymbol - Return a relocation entry for an
+ /// indirect symbol.
+ static MachineRelocation getIndirectSymbol(uintptr_t offset,
+ unsigned RelocationType,
+ GlobalValue *GV, intptr_t cst = 0,
+ bool NeedStub = 0,
+ bool GOTrelative = 0) {
+ assert((RelocationType & ~63) == 0 && "Relocation type too large!");
+ MachineRelocation Result;
+ Result.Offset = offset;
+ Result.ConstantVal = cst;
+ Result.TargetReloType = RelocationType;
+ Result.AddrType = isIndirectSym;
+ Result.NeedStub = NeedStub;
+ Result.GOTRelative = GOTrelative;
+ Result.TargetResolve = false;
+ Result.Target.GV = GV;
+ return Result;
+ }
+
+ /// MachineRelocation::getBB - Return a relocation entry for a BB.
+ ///
+ static MachineRelocation getBB(uintptr_t offset,unsigned RelocationType,
+ MachineBasicBlock *MBB, intptr_t cst = 0) {
+ assert((RelocationType & ~63) == 0 && "Relocation type too large!");
+ MachineRelocation Result;
+ Result.Offset = offset;
+ Result.ConstantVal = cst;
+ Result.TargetReloType = RelocationType;
+ Result.AddrType = isBB;
+ Result.NeedStub = false;
+ Result.GOTRelative = false;
+ Result.TargetResolve = false;
+ Result.Target.MBB = MBB;
+ return Result;
+ }
+
+ /// MachineRelocation::getExtSym - Return a relocation entry for an external
+ /// symbol, like "free".
+ ///
+ static MachineRelocation getExtSym(uintptr_t offset, unsigned RelocationType,
+ const char *ES, intptr_t cst = 0,
+ bool GOTrelative = 0) {
+ assert((RelocationType & ~63) == 0 && "Relocation type too large!");
+ MachineRelocation Result;
+ Result.Offset = offset;
+ Result.ConstantVal = cst;
+ Result.TargetReloType = RelocationType;
+ Result.AddrType = isExtSym;
+ Result.NeedStub = true;
+ Result.GOTRelative = GOTrelative;
+ Result.TargetResolve = false;
+ Result.Target.ExtSym = ES;
+ return Result;
+ }
+
+ /// MachineRelocation::getConstPool - Return a relocation entry for a constant
+ /// pool entry.
+ ///
+ static MachineRelocation getConstPool(uintptr_t offset,unsigned RelocationType,
+ unsigned CPI, intptr_t cst = 0,
+ bool letTargetResolve = false) {
+ assert((RelocationType & ~63) == 0 && "Relocation type too large!");
+ MachineRelocation Result;
+ Result.Offset = offset;
+ Result.ConstantVal = cst;
+ Result.TargetReloType = RelocationType;
+ Result.AddrType = isConstPool;
+ Result.NeedStub = false;
+ Result.GOTRelative = false;
+ Result.TargetResolve = letTargetResolve;
+ Result.Target.Index = CPI;
+ return Result;
+ }
+
+ /// MachineRelocation::getJumpTable - Return a relocation entry for a jump
+ /// table entry.
+ ///
+ static MachineRelocation getJumpTable(uintptr_t offset,unsigned RelocationType,
+ unsigned JTI, intptr_t cst = 0,
+ bool letTargetResolve = false) {
+ assert((RelocationType & ~63) == 0 && "Relocation type too large!");
+ MachineRelocation Result;
+ Result.Offset = offset;
+ Result.ConstantVal = cst;
+ Result.TargetReloType = RelocationType;
+ Result.AddrType = isJumpTable;
+ Result.NeedStub = false;
+ Result.GOTRelative = false;
+ Result.TargetResolve = letTargetResolve;
+ Result.Target.Index = JTI;
+ return Result;
+ }
+
+ /// getMachineCodeOffset - Return the offset into the code buffer that the
+ /// relocation should be performed.
+ intptr_t getMachineCodeOffset() const {
+ return Offset;
+ }
+
+ /// getRelocationType - Return the target-specific relocation ID for this
+ /// relocation.
+ unsigned getRelocationType() const {
+ return TargetReloType;
+ }
+
+ /// getConstantVal - Get the constant value associated with this relocation.
+ /// This is often an offset from the symbol.
+ ///
+ intptr_t getConstantVal() const {
+ return ConstantVal;
+ }
+
+ /// setConstantVal - Set the constant value associated with this relocation.
+ /// This is often an offset from the symbol.
+ ///
+ void setConstantVal(intptr_t val) {
+ ConstantVal = val;
+ }
+
+ /// isGlobalValue - Return true if this relocation is a GlobalValue, as
+ /// opposed to a constant string.
+ bool isGlobalValue() const {
+ return AddrType == isGV;
+ }
+
+ /// isIndirectSymbol - Return true if this relocation is the address an
+ /// indirect symbol
+ bool isIndirectSymbol() const {
+ return AddrType == isIndirectSym;
+ }
+
+ /// isBasicBlock - Return true if this relocation is a basic block reference.
+ ///
+ bool isBasicBlock() const {
+ return AddrType == isBB;
+ }
+
+ /// isExternalSymbol - Return true if this is a constant string.
+ ///
+ bool isExternalSymbol() const {
+ return AddrType == isExtSym;
+ }
+
+ /// isConstantPoolIndex - Return true if this is a constant pool reference.
+ ///
+ bool isConstantPoolIndex() const {
+ return AddrType == isConstPool;
+ }
+
+ /// isJumpTableIndex - Return true if this is a jump table reference.
+ ///
+ bool isJumpTableIndex() const {
+ return AddrType == isJumpTable;
+ }
+
+ /// isGOTRelative - Return true the target wants the index into the GOT of
+ /// the symbol rather than the address of the symbol.
+ bool isGOTRelative() const {
+ return GOTRelative;
+ }
+
+ /// doesntNeedStub - This function returns true if the JIT for this target
+ /// target is capable of directly handling the relocated GlobalValue reference
+ /// without using either a stub function or issuing an extra load to get the
+ /// GV address.
+ bool doesntNeedStub() const {
+ return !NeedStub;
+ }
+
+ /// letTargetResolve - Return true if the target JITInfo is usually
+ /// responsible for resolving the address of this relocation.
+ bool letTargetResolve() const {
+ return TargetResolve;
+ }
+
+ /// getGlobalValue - If this is a global value reference, return the
+ /// referenced global.
+ GlobalValue *getGlobalValue() const {
+ assert((isGlobalValue() || isIndirectSymbol()) &&
+ "This is not a global value reference!");
+ return Target.GV;
+ }
+
+ MachineBasicBlock *getBasicBlock() const {
+ assert(isBasicBlock() && "This is not a basic block reference!");
+ return Target.MBB;
+ }
+
+ /// getString - If this is a string value, return the string reference.
+ ///
+ const char *getExternalSymbol() const {
+ assert(isExternalSymbol() && "This is not an external symbol reference!");
+ return Target.ExtSym;
+ }
+
+ /// getConstantPoolIndex - If this is a const pool reference, return
+ /// the index into the constant pool.
+ unsigned getConstantPoolIndex() const {
+ assert(isConstantPoolIndex() && "This is not a constant pool reference!");
+ return Target.Index;
+ }
+
+ /// getJumpTableIndex - If this is a jump table reference, return
+ /// the index into the jump table.
+ unsigned getJumpTableIndex() const {
+ assert(isJumpTableIndex() && "This is not a jump table reference!");
+ return Target.Index;
+ }
+
+ /// getResultPointer - Once this has been resolved to point to an actual
+ /// address, this returns the pointer.
+ void *getResultPointer() const {
+ assert(AddrType == isResult && "Result pointer isn't set yet!");
+ return Target.Result;
+ }
+
+ /// setResultPointer - Set the result to the specified pointer value.
+ ///
+ void setResultPointer(void *Ptr) {
+ Target.Result = Ptr;
+ AddrType = isResult;
+ }
+
+ /// setGOTIndex - Set the GOT index to a specific value.
+ void setGOTIndex(unsigned idx) {
+ AddrType = isGOTIndex;
+ Target.GOTIndex = idx;
+ }
+
+ /// getGOTIndex - Once this has been resolved to an entry in the GOT,
+ /// this returns that index. The index is from the lowest address entry
+ /// in the GOT.
+ unsigned getGOTIndex() const {
+ assert(AddrType == isGOTIndex);
+ return Target.GOTIndex;
+ }
+};
+}
+
+#endif
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
new file mode 100644
index 0000000..7f1c16f
--- /dev/null
+++ b/include/llvm/CodeGen/Passes.h
@@ -0,0 +1,212 @@
+//===-- Passes.h - Target independent code generation passes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines interfaces to access the target independent code generation
+// passes provided by the LLVM backend.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PASSES_H
+#define LLVM_CODEGEN_PASSES_H
+
+#include <iosfwd>
+#include <string>
+
+namespace llvm {
+
+ class FunctionPass;
+ class PassInfo;
+ class TargetMachine;
+ class TargetLowering;
+ class RegisterCoalescer;
+
+ /// createUnreachableBlockEliminationPass - The LLVM code generator does not
+ /// work well with unreachable basic blocks (what live ranges make sense for a
+ /// block that cannot be reached?). As such, a code generator should either
+ /// not instruction select unreachable blocks, or it can run this pass as it's
+ /// last LLVM modifying pass to clean up blocks that are not reachable from
+ /// the entry block.
+ FunctionPass *createUnreachableBlockEliminationPass();
+
+ /// MachineFunctionPrinter pass - This pass prints out the machine function to
+ /// standard error, as a debugging tool.
+ FunctionPass *createMachineFunctionPrinterPass(std::ostream *OS,
+ const std::string &Banner ="");
+
+ /// MachineLoopInfo pass - This pass is a loop analysis pass.
+ ///
+ extern const PassInfo *const MachineLoopInfoID;
+
+ /// MachineDominators pass - This pass is a machine dominators analysis pass.
+ ///
+ extern const PassInfo *const MachineDominatorsID;
+
+ /// PHIElimination pass - This pass eliminates machine instruction PHI nodes
+ /// by inserting copy instructions. This destroys SSA information, but is the
+ /// desired input for some register allocators. This pass is "required" by
+ /// these register allocator like this: AU.addRequiredID(PHIEliminationID);
+ ///
+ extern const PassInfo *const PHIEliminationID;
+
+ /// StrongPHIElimination pass - This pass eliminates machine instruction PHI
+ /// nodes by inserting copy instructions. This destroys SSA information, but
+ /// is the desired input for some register allocators. This pass is
+ /// "required" by these register allocator like this:
+ /// AU.addRequiredID(PHIEliminationID);
+ /// This pass is still in development
+ extern const PassInfo *const StrongPHIEliminationID;
+
+ extern const PassInfo *const PreAllocSplittingID;
+
+ /// SimpleRegisterCoalescing pass. Aggressively coalesces every register
+ /// copy it can.
+ ///
+ extern const PassInfo *const SimpleRegisterCoalescingID;
+
+ /// TwoAddressInstruction pass - This pass reduces two-address instructions to
+ /// use two operands. This destroys SSA information but it is desired by
+ /// register allocators.
+ extern const PassInfo *const TwoAddressInstructionPassID;
+
+ /// UnreachableMachineBlockElimination pass - This pass removes unreachable
+ /// machine basic blocks.
+ extern const PassInfo *const UnreachableMachineBlockElimID;
+
+ /// DeadMachineInstructionElim pass - This pass removes dead machine
+ /// instructions.
+ ///
+ FunctionPass *createDeadMachineInstructionElimPass();
+
+ /// Creates a register allocator as the user specified on the command line.
+ ///
+ FunctionPass *createRegisterAllocator();
+
+ /// SimpleRegisterAllocation Pass - This pass converts the input machine code
+ /// from SSA form to use explicit registers by spilling every register. Wow,
+ /// great policy huh?
+ ///
+ FunctionPass *createSimpleRegisterAllocator();
+
+ /// LocalRegisterAllocation Pass - This pass register allocates the input code
+ /// a basic block at a time, yielding code better than the simple register
+ /// allocator, but not as good as a global allocator.
+ ///
+ FunctionPass *createLocalRegisterAllocator();
+
+ /// BigBlockRegisterAllocation Pass - The BigBlock register allocator
+ /// munches single basic blocks at a time, like the local register
+ /// allocator. While the BigBlock allocator is a little slower, and uses
+ /// somewhat more memory than the local register allocator, it tends to
+ /// yield the best allocations (of any of the allocators) for blocks that
+ /// have hundreds or thousands of instructions in sequence.
+ ///
+ FunctionPass *createBigBlockRegisterAllocator();
+
+ /// LinearScanRegisterAllocation Pass - This pass implements the linear scan
+ /// register allocation algorithm, a global register allocator.
+ ///
+ FunctionPass *createLinearScanRegisterAllocator();
+
+ /// PBQPRegisterAllocation Pass - This pass implements the Partitioned Boolean
+ /// Quadratic Prograaming (PBQP) based register allocator.
+ ///
+ FunctionPass *createPBQPRegisterAllocator();
+
+ /// SimpleRegisterCoalescing Pass - Coalesce all copies possible. Can run
+ /// independently of the register allocator.
+ ///
+ RegisterCoalescer *createSimpleRegisterCoalescer();
+
+ /// PrologEpilogCodeInserter Pass - This pass inserts prolog and epilog code,
+ /// and eliminates abstract frame references.
+ ///
+ FunctionPass *createPrologEpilogCodeInserter();
+
+ /// LowerSubregs Pass - This pass lowers subregs to register-register copies
+ /// which yields suboptimal, but correct code if the register allocator
+ /// cannot coalesce all subreg operations during allocation.
+ ///
+ FunctionPass *createLowerSubregsPass();
+
+ /// createPostRAScheduler - under development.
+ FunctionPass *createPostRAScheduler();
+
+ /// BranchFolding Pass - This pass performs machine code CFG based
+ /// optimizations to delete branches to branches, eliminate branches to
+ /// successor blocks (creating fall throughs), and eliminating branches over
+ /// branches.
+ FunctionPass *createBranchFoldingPass(bool DefaultEnableTailMerge);
+
+ /// IfConverter Pass - This pass performs machine code if conversion.
+ FunctionPass *createIfConverterPass();
+
+ /// Code Placement Pass - This pass optimize code placement and aligns loop
+ /// headers to target specific alignment boundary.
+ FunctionPass *createCodePlacementOptPass();
+
+ /// DebugLabelFoldingPass - This pass prunes out redundant debug labels. This
+ /// allows a debug emitter to determine if the range of two labels is empty,
+ /// by seeing if the labels map to the same reduced label.
+ FunctionPass *createDebugLabelFoldingPass();
+
+ /// MachineCodeDeletion Pass - This pass deletes all of the machine code for
+ /// the current function, which should happen after the function has been
+ /// emitted to a .s file or to memory.
+ FunctionPass *createMachineCodeDeleter();
+
+ /// getRegisterAllocator - This creates an instance of the register allocator
+ /// for the Sparc.
+ FunctionPass *getRegisterAllocator(TargetMachine &T);
+
+ /// IntrinsicLowering Pass - Performs target-independent LLVM IR
+ /// transformations for highly portable strategies.
+ FunctionPass *createGCLoweringPass();
+
+ /// MachineCodeAnalysis Pass - Target-independent pass to mark safe points in
+ /// machine code. Must be added very late during code generation, just prior
+ /// to output, and importantly after all CFG transformations (such as branch
+ /// folding).
+ FunctionPass *createGCMachineCodeAnalysisPass();
+
+ /// Deleter Pass - Releases GC metadata.
+ ///
+ FunctionPass *createGCInfoDeleter();
+
+ /// Creates a pass to print GC metadata.
+ ///
+ FunctionPass *createGCInfoPrinter(std::ostream &OS);
+
+ /// createMachineLICMPass - This pass performs LICM on machine instructions.
+ ///
+ FunctionPass *createMachineLICMPass();
+
+ /// createMachineSinkingPass - This pass performs sinking on machine
+ /// instructions.
+ FunctionPass *createMachineSinkingPass();
+
+ /// createStackSlotColoringPass - This pass performs stack slot coloring.
+ FunctionPass *createStackSlotColoringPass(bool);
+
+ /// createStackProtectorPass - This pass adds stack protectors to functions.
+ FunctionPass *createStackProtectorPass(const TargetLowering *tli);
+
+ /// createMachineVerifierPass - This pass verifies cenerated machine code
+ /// instructions for correctness.
+ ///
+ /// @param allowPhysDoubleDefs ignore double definitions of
+ /// registers. Useful before LiveVariables has run.
+ FunctionPass *createMachineVerifierPass(bool allowDoubleDefs);
+
+ /// createDwarfEHPass - This pass mulches exception handling code into a form
+ /// adapted to code generation. Required if using dwarf exception handling.
+ FunctionPass *createDwarfEHPass(const TargetLowering *tli, bool fast);
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/PseudoSourceValue.h b/include/llvm/CodeGen/PseudoSourceValue.h
new file mode 100644
index 0000000..3ad2502
--- /dev/null
+++ b/include/llvm/CodeGen/PseudoSourceValue.h
@@ -0,0 +1,71 @@
+//===-- llvm/CodeGen/PseudoSourceValue.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the PseudoSourceValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+#define LLVM_CODEGEN_PSEUDOSOURCEVALUE_H
+
+#include "llvm/Value.h"
+
+namespace llvm {
+ class MachineFrameInfo;
+ class raw_ostream;
+
+ /// PseudoSourceValue - Special value supplied for machine level alias
+ /// analysis. It indicates that the a memory access references the functions
+ /// stack frame (e.g., a spill slot), below the stack frame (e.g., argument
+ /// space), or constant pool.
+ class PseudoSourceValue : public Value {
+ public:
+ PseudoSourceValue();
+
+ /// dump - Support for debugging, callable in GDB: V->dump()
+ //
+ virtual void dump() const;
+
+ /// print - Implement operator<< on PseudoSourceValue.
+ ///
+ virtual void print(raw_ostream &OS) const;
+
+ /// isConstant - Test whether this PseudoSourceValue has a constant value.
+ ///
+ virtual bool isConstant(const MachineFrameInfo *) const;
+
+ /// classof - Methods for support type inquiry through isa, cast, and
+ /// dyn_cast:
+ ///
+ static inline bool classof(const PseudoSourceValue *) { return true; }
+ static inline bool classof(const Value *V) {
+ return V->getValueID() == PseudoSourceValueVal;
+ }
+
+ /// A pseudo source value referencing a fixed stack frame entry,
+ /// e.g., a spill slot.
+ static const PseudoSourceValue *getFixedStack(int FI);
+
+ /// A source value referencing the area below the stack frame of a function,
+ /// e.g., the argument space.
+ static const PseudoSourceValue *getStack();
+
+ /// A source value referencing the global offset table (or something the
+ /// like).
+ static const PseudoSourceValue *getGOT();
+
+ /// A SV referencing the constant pool
+ static const PseudoSourceValue *getConstantPool();
+
+ /// A SV referencing the jump table
+ static const PseudoSourceValue *getJumpTable();
+ };
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/RegAllocRegistry.h b/include/llvm/CodeGen/RegAllocRegistry.h
new file mode 100644
index 0000000..a08e42a
--- /dev/null
+++ b/include/llvm/CodeGen/RegAllocRegistry.h
@@ -0,0 +1,64 @@
+//===-- llvm/CodeGen/RegAllocRegistry.h -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for register allocator function
+// pass registry (RegisterRegAlloc).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGENREGALLOCREGISTRY_H
+#define LLVM_CODEGENREGALLOCREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterRegAlloc class - Track the registration of register allocators.
+///
+//===----------------------------------------------------------------------===//
+class RegisterRegAlloc : public MachinePassRegistryNode {
+
+public:
+
+ typedef FunctionPass *(*FunctionPassCtor)();
+
+ static MachinePassRegistry Registry;
+
+ RegisterRegAlloc(const char *N, const char *D, FunctionPassCtor C)
+ : MachinePassRegistryNode(N, D, (MachinePassCtor)C)
+ { Registry.Add(this); }
+ ~RegisterRegAlloc() { Registry.Remove(this); }
+
+
+ // Accessors.
+ //
+ RegisterRegAlloc *getNext() const {
+ return (RegisterRegAlloc *)MachinePassRegistryNode::getNext();
+ }
+ static RegisterRegAlloc *getList() {
+ return (RegisterRegAlloc *)Registry.getList();
+ }
+ static FunctionPassCtor getDefault() {
+ return (FunctionPassCtor)Registry.getDefault();
+ }
+ static void setDefault(FunctionPassCtor C) {
+ Registry.setDefault((MachinePassCtor)C);
+ }
+ static void setListener(MachinePassRegistryListener *L) {
+ Registry.setListener(L);
+ }
+
+};
+
+} // end namespace llvm
+
+
+#endif
diff --git a/include/llvm/CodeGen/RegisterCoalescer.h b/include/llvm/CodeGen/RegisterCoalescer.h
new file mode 100644
index 0000000..79dd9db
--- /dev/null
+++ b/include/llvm/CodeGen/RegisterCoalescer.h
@@ -0,0 +1,154 @@
+//===-- RegisterCoalescer.h - Register Coalescing Interface ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the abstract interface for register coalescers,
+// allowing them to interact with and query register allocators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/System/IncludeFile.h"
+#include "llvm/CodeGen/LiveInterval.h"
+#include "llvm/ADT/SmallPtrSet.h"
+
+#ifndef LLVM_CODEGEN_REGISTER_COALESCER_H
+#define LLVM_CODEGEN_REGISTER_COALESCER_H
+
+namespace llvm {
+
+ class MachineFunction;
+ class RegallocQuery;
+ class AnalysisUsage;
+ class MachineInstr;
+
+ /// An abstract interface for register coalescers. Coalescers must
+ /// implement this interface to be part of the coalescer analysis
+ /// group.
+ class RegisterCoalescer {
+ public:
+ static char ID; // Class identification, replacement for typeinfo
+ RegisterCoalescer() {}
+ virtual ~RegisterCoalescer(); // We want to be subclassed
+
+ /// Run the coalescer on this function, providing interference
+ /// data to query. Return whether we removed any copies.
+ virtual bool coalesceFunction(MachineFunction &mf,
+ RegallocQuery &ifd) = 0;
+
+ /// Reset state. Can be used to allow a coalescer run by
+ /// PassManager to be run again by the register allocator.
+ virtual void reset(MachineFunction &mf) {};
+
+ /// Register allocators must call this from their own
+ /// getAnalysisUsage to cover the case where the coalescer is not
+ /// a Pass in the proper sense and isn't managed by PassManager.
+ /// PassManager needs to know which analyses to make available and
+ /// which to invalidate when running the register allocator or any
+ /// pass that might call coalescing. The long-term solution is to
+ /// allow hierarchies of PassManagers.
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {};
+ };
+
+ /// An abstract interface for register allocators to interact with
+ /// coalescers
+ ///
+ /// Example:
+ ///
+ /// This is simply an example of how to use the RegallocQuery
+ /// interface. It is not meant to be used in production.
+ ///
+ /// class LinearScanRegallocQuery : public RegallocQuery {
+ /// private:
+ /// const LiveIntervals \&li;
+ ///
+ /// public:
+ /// LinearScanRegallocQuery(LiveIntervals &intervals)
+ /// : li(intervals) {};
+ ///
+ /// /// This is pretty slow and conservative, but since linear scan
+ /// /// allocation doesn't pre-compute interference information it's
+ /// /// the best we can do. Coalescers are always free to ignore this
+ /// /// and implement their own discovery strategy. See
+ /// /// SimpleRegisterCoalescing for an example.
+ /// void getInterferences(IntervalSet &interferences,
+ /// const LiveInterval &a) const {
+ /// for(LiveIntervals::const_iterator iv = li.begin(),
+ /// ivend = li.end();
+ /// iv != ivend;
+ /// ++iv) {
+ /// if (interfere(a, iv->second)) {
+ /// interferences.insert(&iv->second);
+ /// }
+ /// }
+ /// };
+ ///
+ /// /// This is *really* slow and stupid. See above.
+ /// int getNumberOfInterferences(const LiveInterval &a) const {
+ /// IntervalSet intervals;
+ /// getInterferences(intervals, a);
+ /// return intervals.size();
+ /// };
+ /// };
+ ///
+ /// In the allocator:
+ ///
+ /// RegisterCoalescer &coalescer = getAnalysis<RegisterCoalescer>();
+ ///
+ /// // We don't reset the coalescer so if it's already been run this
+ /// // takes almost no time.
+ /// LinearScanRegallocQuery ifd(*li_);
+ /// coalescer.coalesceFunction(fn, ifd);
+ ///
+ class RegallocQuery {
+ public:
+ typedef SmallPtrSet<const LiveInterval *, 8> IntervalSet;
+
+ virtual ~RegallocQuery() {};
+
+ /// Return whether two live ranges interfere.
+ virtual bool interfere(const LiveInterval &a,
+ const LiveInterval &b) const {
+ // A naive test
+ return a.overlaps(b);
+ };
+
+ /// Return the set of intervals that interfere with this one.
+ virtual void getInterferences(IntervalSet &interferences,
+ const LiveInterval &a) const = 0;
+
+ /// This can often be cheaper than actually returning the
+ /// interferences.
+ virtual int getNumberOfInterferences(const LiveInterval &a) const = 0;
+
+ /// Make any data structure updates necessary to reflect
+ /// coalescing or other modifications.
+ virtual void updateDataForMerge(const LiveInterval &a,
+ const LiveInterval &b,
+ const MachineInstr &copy) {};
+
+ /// Allow the register allocator to communicate when it doesn't
+ /// want a copy coalesced. This may be due to assumptions made by
+ /// the allocator about various invariants and so this question is
+ /// a matter of legality, not performance. Performance decisions
+ /// about which copies to coalesce should be made by the
+ /// coalescer.
+ virtual bool isLegalToCoalesce(const MachineInstr &inst) const {
+ return true;
+ }
+ };
+}
+
+// Because of the way .a files work, we must force the SimpleRC
+// implementation to be pulled in if the RegisterCoalescing header is
+// included. Otherwise we run the risk of RegisterCoalescing being
+// used, but the default implementation not being linked into the tool
+// that uses it.
+FORCE_DEFINING_FILE_TO_BE_LINKED(RegisterCoalescer)
+FORCE_DEFINING_FILE_TO_BE_LINKED(SimpleRegisterCoalescing)
+
+#endif
diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h
new file mode 100644
index 0000000..a4ed012
--- /dev/null
+++ b/include/llvm/CodeGen/RegisterScavenging.h
@@ -0,0 +1,178 @@
+//===-- RegisterScavenging.h - Machine register scavenging ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the machine register scavenger class. It can provide
+// information such as unused register at any point in a machine basic block.
+// It also provides a mechanism to make registers availbale by evicting them
+// to spill slots.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_REGISTER_SCAVENGING_H
+#define LLVM_CODEGEN_REGISTER_SCAVENGING_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+
+class MachineRegisterInfo;
+class TargetRegisterInfo;
+class TargetInstrInfo;
+class TargetRegisterClass;
+
+class RegScavenger {
+ const TargetRegisterInfo *TRI;
+ const TargetInstrInfo *TII;
+ MachineRegisterInfo* MRI;
+ MachineBasicBlock *MBB;
+ MachineBasicBlock::iterator MBBI;
+ unsigned NumPhysRegs;
+
+ /// Tracking - True if RegScavenger is currently tracking the liveness of
+ /// registers.
+ bool Tracking;
+
+ /// ScavengingFrameIndex - Special spill slot used for scavenging a register
+ /// post register allocation.
+ int ScavengingFrameIndex;
+
+ /// ScavengedReg - If none zero, the specific register is currently being
+ /// scavenged. That is, it is spilled to the special scavenging stack slot.
+ unsigned ScavengedReg;
+
+ /// ScavengedRC - Register class of the scavenged register.
+ ///
+ const TargetRegisterClass *ScavengedRC;
+
+ /// ScavengeRestore - Instruction that restores the scavenged register from
+ /// stack.
+ const MachineInstr *ScavengeRestore;
+
+ /// CalleeSavedrRegs - A bitvector of callee saved registers for the target.
+ ///
+ BitVector CalleeSavedRegs;
+
+ /// ReservedRegs - A bitvector of reserved registers.
+ ///
+ BitVector ReservedRegs;
+
+ /// RegsAvailable - The current state of all the physical registers immediately
+ /// before MBBI. One bit per physical register. If bit is set that means it's
+ /// available, unset means the register is currently being used.
+ BitVector RegsAvailable;
+
+ /// ImplicitDefed - If bit is set that means the register is defined by an
+ /// implicit_def instructions. That means it can be clobbered at will.
+ BitVector ImplicitDefed;
+
+ /// CurrDist - Distance from MBB entry to the current instruction MBBI.
+ ///
+ unsigned CurrDist;
+
+ /// DistanceMap - Keep track the distance of a MI from the start of the
+ /// current basic block.
+ DenseMap<MachineInstr*, unsigned> DistanceMap;
+
+public:
+ RegScavenger()
+ : MBB(NULL), NumPhysRegs(0), Tracking(false),
+ ScavengingFrameIndex(-1), ScavengedReg(0), ScavengedRC(NULL) {}
+
+ /// enterBasicBlock - Start tracking liveness from the begin of the specific
+ /// basic block.
+ void enterBasicBlock(MachineBasicBlock *mbb);
+
+ /// forward / backward - Move the internal MBB iterator and update register
+ /// states.
+ void forward();
+ void backward();
+
+ /// forward / backward - Move the internal MBB iterator and update register
+ /// states until it has processed the specific iterator.
+ void forward(MachineBasicBlock::iterator I) {
+ if (!Tracking && MBB->begin() != I) forward();
+ while (MBBI != I) forward();
+ }
+ void backward(MachineBasicBlock::iterator I) {
+ while (MBBI != I) backward();
+ }
+
+ /// skipTo - Move the internal MBB iterator but do not update register states.
+ ///
+ void skipTo(MachineBasicBlock::iterator I) { MBBI = I; }
+
+ /// isReserved - Returns true if a register is reserved. It is never "unused".
+ bool isReserved(unsigned Reg) const { return ReservedRegs[Reg]; }
+
+ /// isUsed / isUsed - Test if a register is currently being used.
+ ///
+ bool isUsed(unsigned Reg) const { return !RegsAvailable[Reg]; }
+ bool isUnused(unsigned Reg) const { return RegsAvailable[Reg]; }
+
+ bool isImplicitlyDefined(unsigned Reg) const { return ImplicitDefed[Reg]; }
+
+ /// getRegsUsed - return all registers currently in use in used.
+ void getRegsUsed(BitVector &used, bool includeReserved);
+
+ /// setUsed / setUnused - Mark the state of one or a number of registers.
+ ///
+ void setUsed(unsigned Reg, bool ImpDef = false);
+ void setUsed(BitVector &Regs, bool ImpDef = false) {
+ RegsAvailable &= ~Regs;
+ if (ImpDef)
+ ImplicitDefed |= Regs;
+ else
+ ImplicitDefed &= ~Regs;
+ }
+ void setUnused(unsigned Reg, const MachineInstr *MI);
+ void setUnused(BitVector &Regs) {
+ RegsAvailable |= Regs;
+ ImplicitDefed &= ~Regs;
+ }
+
+ /// FindUnusedReg - Find a unused register of the specified register class
+ /// from the specified set of registers. It return 0 is none is found.
+ unsigned FindUnusedReg(const TargetRegisterClass *RegClass,
+ const BitVector &Candidates) const;
+
+ /// FindUnusedReg - Find a unused register of the specified register class.
+ /// Exclude callee saved registers if directed. It return 0 is none is found.
+ unsigned FindUnusedReg(const TargetRegisterClass *RegClass,
+ bool ExCalleeSaved = false) const;
+
+ /// setScavengingFrameIndex / getScavengingFrameIndex - accessor and setter of
+ /// ScavengingFrameIndex.
+ void setScavengingFrameIndex(int FI) { ScavengingFrameIndex = FI; }
+ int getScavengingFrameIndex() const { return ScavengingFrameIndex; }
+
+ /// scavengeRegister - Make a register of the specific register class
+ /// available and do the appropriate bookkeeping. SPAdj is the stack
+ /// adjustment due to call frame, it's passed along to eliminateFrameIndex().
+ /// Returns the scavenged register.
+ unsigned scavengeRegister(const TargetRegisterClass *RegClass,
+ MachineBasicBlock::iterator I, int SPAdj);
+ unsigned scavengeRegister(const TargetRegisterClass *RegClass, int SPAdj) {
+ return scavengeRegister(RegClass, MBBI, SPAdj);
+ }
+
+private:
+ /// restoreScavengedReg - Restore scavenged by loading it back from the
+ /// emergency spill slot. Mark it used.
+ void restoreScavengedReg();
+
+ MachineInstr *findFirstUse(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator I, unsigned Reg,
+ unsigned &Dist);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h
new file mode 100644
index 0000000..dd76fcc
--- /dev/null
+++ b/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -0,0 +1,255 @@
+//===-- CodeGen/RuntimeLibcall.h - Runtime Library Calls --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the enum representing the list of runtime library calls
+// the backend may emit during code generation, and also some helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_RUNTIMELIBCALLS_H
+#define LLVM_CODEGEN_RUNTIMELIBCALLS_H
+
+#include "llvm/CodeGen/ValueTypes.h"
+
+namespace llvm {
+namespace RTLIB {
+ /// RTLIB::Libcall enum - This enum defines all of the runtime library calls
+ /// the backend can emit. The various long double types cannot be merged,
+ /// because 80-bit library functions use "xf" and 128-bit use "tf".
+ ///
+ /// When adding PPCF128 functions here, note that their names generally need
+ /// to be overridden for Darwin with the xxx$LDBL128 form. See
+ /// PPCISelLowering.cpp.
+ ///
+ enum Libcall {
+ // Integer
+ SHL_I16,
+ SHL_I32,
+ SHL_I64,
+ SHL_I128,
+ SRL_I16,
+ SRL_I32,
+ SRL_I64,
+ SRL_I128,
+ SRA_I16,
+ SRA_I32,
+ SRA_I64,
+ SRA_I128,
+ MUL_I16,
+ MUL_I32,
+ MUL_I64,
+ MUL_I128,
+ SDIV_I16,
+ SDIV_I32,
+ SDIV_I64,
+ SDIV_I128,
+ UDIV_I16,
+ UDIV_I32,
+ UDIV_I64,
+ UDIV_I128,
+ SREM_I16,
+ SREM_I32,
+ SREM_I64,
+ SREM_I128,
+ UREM_I16,
+ UREM_I32,
+ UREM_I64,
+ UREM_I128,
+ NEG_I32,
+ NEG_I64,
+
+ // FLOATING POINT
+ ADD_F32,
+ ADD_F64,
+ ADD_F80,
+ ADD_PPCF128,
+ SUB_F32,
+ SUB_F64,
+ SUB_F80,
+ SUB_PPCF128,
+ MUL_F32,
+ MUL_F64,
+ MUL_F80,
+ MUL_PPCF128,
+ DIV_F32,
+ DIV_F64,
+ DIV_F80,
+ DIV_PPCF128,
+ REM_F32,
+ REM_F64,
+ REM_F80,
+ REM_PPCF128,
+ POWI_F32,
+ POWI_F64,
+ POWI_F80,
+ POWI_PPCF128,
+ SQRT_F32,
+ SQRT_F64,
+ SQRT_F80,
+ SQRT_PPCF128,
+ LOG_F32,
+ LOG_F64,
+ LOG_F80,
+ LOG_PPCF128,
+ LOG2_F32,
+ LOG2_F64,
+ LOG2_F80,
+ LOG2_PPCF128,
+ LOG10_F32,
+ LOG10_F64,
+ LOG10_F80,
+ LOG10_PPCF128,
+ EXP_F32,
+ EXP_F64,
+ EXP_F80,
+ EXP_PPCF128,
+ EXP2_F32,
+ EXP2_F64,
+ EXP2_F80,
+ EXP2_PPCF128,
+ SIN_F32,
+ SIN_F64,
+ SIN_F80,
+ SIN_PPCF128,
+ COS_F32,
+ COS_F64,
+ COS_F80,
+ COS_PPCF128,
+ POW_F32,
+ POW_F64,
+ POW_F80,
+ POW_PPCF128,
+ CEIL_F32,
+ CEIL_F64,
+ CEIL_F80,
+ CEIL_PPCF128,
+ TRUNC_F32,
+ TRUNC_F64,
+ TRUNC_F80,
+ TRUNC_PPCF128,
+ RINT_F32,
+ RINT_F64,
+ RINT_F80,
+ RINT_PPCF128,
+ NEARBYINT_F32,
+ NEARBYINT_F64,
+ NEARBYINT_F80,
+ NEARBYINT_PPCF128,
+ FLOOR_F32,
+ FLOOR_F64,
+ FLOOR_F80,
+ FLOOR_PPCF128,
+
+ // CONVERSION
+ FPEXT_F32_F64,
+ FPROUND_F64_F32,
+ FPROUND_F80_F32,
+ FPROUND_PPCF128_F32,
+ FPROUND_F80_F64,
+ FPROUND_PPCF128_F64,
+ FPTOSINT_F32_I32,
+ FPTOSINT_F32_I64,
+ FPTOSINT_F32_I128,
+ FPTOSINT_F64_I32,
+ FPTOSINT_F64_I64,
+ FPTOSINT_F64_I128,
+ FPTOSINT_F80_I32,
+ FPTOSINT_F80_I64,
+ FPTOSINT_F80_I128,
+ FPTOSINT_PPCF128_I32,
+ FPTOSINT_PPCF128_I64,
+ FPTOSINT_PPCF128_I128,
+ FPTOUINT_F32_I32,
+ FPTOUINT_F32_I64,
+ FPTOUINT_F32_I128,
+ FPTOUINT_F64_I32,
+ FPTOUINT_F64_I64,
+ FPTOUINT_F64_I128,
+ FPTOUINT_F80_I32,
+ FPTOUINT_F80_I64,
+ FPTOUINT_F80_I128,
+ FPTOUINT_PPCF128_I32,
+ FPTOUINT_PPCF128_I64,
+ FPTOUINT_PPCF128_I128,
+ SINTTOFP_I32_F32,
+ SINTTOFP_I32_F64,
+ SINTTOFP_I32_F80,
+ SINTTOFP_I32_PPCF128,
+ SINTTOFP_I64_F32,
+ SINTTOFP_I64_F64,
+ SINTTOFP_I64_F80,
+ SINTTOFP_I64_PPCF128,
+ SINTTOFP_I128_F32,
+ SINTTOFP_I128_F64,
+ SINTTOFP_I128_F80,
+ SINTTOFP_I128_PPCF128,
+ UINTTOFP_I32_F32,
+ UINTTOFP_I32_F64,
+ UINTTOFP_I32_F80,
+ UINTTOFP_I32_PPCF128,
+ UINTTOFP_I64_F32,
+ UINTTOFP_I64_F64,
+ UINTTOFP_I64_F80,
+ UINTTOFP_I64_PPCF128,
+ UINTTOFP_I128_F32,
+ UINTTOFP_I128_F64,
+ UINTTOFP_I128_F80,
+ UINTTOFP_I128_PPCF128,
+
+ // COMPARISON
+ OEQ_F32,
+ OEQ_F64,
+ UNE_F32,
+ UNE_F64,
+ OGE_F32,
+ OGE_F64,
+ OLT_F32,
+ OLT_F64,
+ OLE_F32,
+ OLE_F64,
+ OGT_F32,
+ OGT_F64,
+ UO_F32,
+ UO_F64,
+ O_F32,
+ O_F64,
+
+ // EXCEPTION HANDLING
+ UNWIND_RESUME,
+
+ UNKNOWN_LIBCALL
+ };
+
+ /// getFPEXT - Return the FPEXT_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPEXT(MVT OpVT, MVT RetVT);
+
+ /// getFPROUND - Return the FPROUND_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPROUND(MVT OpVT, MVT RetVT);
+
+ /// getFPTOSINT - Return the FPTOSINT_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPTOSINT(MVT OpVT, MVT RetVT);
+
+ /// getFPTOUINT - Return the FPTOUINT_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getFPTOUINT(MVT OpVT, MVT RetVT);
+
+ /// getSINTTOFP - Return the SINTTOFP_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getSINTTOFP(MVT OpVT, MVT RetVT);
+
+ /// getUINTTOFP - Return the UINTTOFP_*_* value for the given types, or
+ /// UNKNOWN_LIBCALL if there is none.
+ Libcall getUINTTOFP(MVT OpVT, MVT RetVT);
+}
+}
+
+#endif
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
new file mode 100644
index 0000000..237d491
--- /dev/null
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -0,0 +1,666 @@
+//===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleDAG class, which is used as the common
+// base class for instruction schedulers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEDAG_H
+#define LLVM_CODEGEN_SCHEDULEDAG_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/PointerIntPair.h"
+
+namespace llvm {
+ class SUnit;
+ class MachineConstantPool;
+ class MachineFunction;
+ class MachineModuleInfo;
+ class MachineRegisterInfo;
+ class MachineInstr;
+ class TargetRegisterInfo;
+ class ScheduleDAG;
+ class SDNode;
+ class TargetInstrInfo;
+ class TargetInstrDesc;
+ class TargetLowering;
+ class TargetMachine;
+ class TargetRegisterClass;
+ template<class Graph> class GraphWriter;
+
+ /// SDep - Scheduling dependency. This represents one direction of an
+ /// edge in the scheduling DAG.
+ class SDep {
+ public:
+ /// Kind - These are the different kinds of scheduling dependencies.
+ enum Kind {
+ Data, ///< Regular data dependence (aka true-dependence).
+ Anti, ///< A register anti-dependedence (aka WAR).
+ Output, ///< A register output-dependence (aka WAW).
+ Order ///< Any other ordering dependency.
+ };
+
+ private:
+ /// Dep - A pointer to the depending/depended-on SUnit, and an enum
+ /// indicating the kind of the dependency.
+ PointerIntPair<SUnit *, 2, Kind> Dep;
+
+ /// Contents - A union discriminated by the dependence kind.
+ union {
+ /// Reg - For Data, Anti, and Output dependencies, the associated
+ /// register. For Data dependencies that don't currently have a register
+ /// assigned, this is set to zero.
+ unsigned Reg;
+
+ /// Order - Additional information about Order dependencies.
+ struct {
+ /// isNormalMemory - True if both sides of the dependence
+ /// access memory in non-volatile and fully modeled ways.
+ bool isNormalMemory : 1;
+
+ /// isMustAlias - True if both sides of the dependence are known to
+ /// access the same memory.
+ bool isMustAlias : 1;
+
+ /// isArtificial - True if this is an artificial dependency, meaning
+ /// it is not necessary for program correctness, and may be safely
+ /// deleted if necessary.
+ bool isArtificial : 1;
+ } Order;
+ } Contents;
+
+ /// Latency - The time associated with this edge. Often this is just
+ /// the value of the Latency field of the predecessor, however advanced
+ /// models may provide additional information about specific edges.
+ unsigned Latency;
+
+ public:
+ /// SDep - Construct a null SDep. This is only for use by container
+ /// classes which require default constructors. SUnits may not
+ /// have null SDep edges.
+ SDep() : Dep(0, Data) {}
+
+ /// SDep - Construct an SDep with the specified values.
+ SDep(SUnit *S, Kind kind, unsigned latency = 1, unsigned Reg = 0,
+ bool isNormalMemory = false, bool isMustAlias = false,
+ bool isArtificial = false)
+ : Dep(S, kind), Contents(), Latency(latency) {
+ switch (kind) {
+ case Anti:
+ case Output:
+ assert(Reg != 0 &&
+ "SDep::Anti and SDep::Output must use a non-zero Reg!");
+ // fall through
+ case Data:
+ assert(!isMustAlias && "isMustAlias only applies with SDep::Order!");
+ assert(!isArtificial && "isArtificial only applies with SDep::Order!");
+ Contents.Reg = Reg;
+ break;
+ case Order:
+ assert(Reg == 0 && "Reg given for non-register dependence!");
+ Contents.Order.isNormalMemory = isNormalMemory;
+ Contents.Order.isMustAlias = isMustAlias;
+ Contents.Order.isArtificial = isArtificial;
+ break;
+ }
+ }
+
+ bool operator==(const SDep &Other) const {
+ if (Dep != Other.Dep || Latency != Other.Latency) return false;
+ switch (Dep.getInt()) {
+ case Data:
+ case Anti:
+ case Output:
+ return Contents.Reg == Other.Contents.Reg;
+ case Order:
+ return Contents.Order.isNormalMemory ==
+ Other.Contents.Order.isNormalMemory &&
+ Contents.Order.isMustAlias == Other.Contents.Order.isMustAlias &&
+ Contents.Order.isArtificial == Other.Contents.Order.isArtificial;
+ }
+ assert(0 && "Invalid dependency kind!");
+ return false;
+ }
+
+ bool operator!=(const SDep &Other) const {
+ return !operator==(Other);
+ }
+
+ /// getLatency - Return the latency value for this edge, which roughly
+ /// means the minimum number of cycles that must elapse between the
+ /// predecessor and the successor, given that they have this edge
+ /// between them.
+ unsigned getLatency() const {
+ return Latency;
+ }
+
+ //// getSUnit - Return the SUnit to which this edge points.
+ SUnit *getSUnit() const {
+ return Dep.getPointer();
+ }
+
+ //// setSUnit - Assign the SUnit to which this edge points.
+ void setSUnit(SUnit *SU) {
+ Dep.setPointer(SU);
+ }
+
+ /// getKind - Return an enum value representing the kind of the dependence.
+ Kind getKind() const {
+ return Dep.getInt();
+ }
+
+ /// isCtrl - Shorthand for getKind() != SDep::Data.
+ bool isCtrl() const {
+ return getKind() != Data;
+ }
+
+ /// isNormalMemory - Test if this is an Order dependence between two
+ /// memory accesses where both sides of the dependence access memory
+ /// in non-volatile and fully modeled ways.
+ bool isNormalMemory() const {
+ return getKind() == Order && Contents.Order.isNormalMemory;
+ }
+
+ /// isMustAlias - Test if this is an Order dependence that is marked
+ /// as "must alias", meaning that the SUnits at either end of the edge
+ /// have a memory dependence on a known memory location.
+ bool isMustAlias() const {
+ return getKind() == Order && Contents.Order.isMustAlias;
+ }
+
+ /// isArtificial - Test if this is an Order dependence that is marked
+ /// as "artificial", meaning it isn't necessary for correctness.
+ bool isArtificial() const {
+ return getKind() == Order && Contents.Order.isArtificial;
+ }
+
+ /// isAssignedRegDep - Test if this is a Data dependence that is
+ /// associated with a register.
+ bool isAssignedRegDep() const {
+ return getKind() == Data && Contents.Reg != 0;
+ }
+
+ /// getReg - Return the register associated with this edge. This is
+ /// only valid on Data, Anti, and Output edges. On Data edges, this
+ /// value may be zero, meaning there is no associated register.
+ unsigned getReg() const {
+ assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+ "getReg called on non-register dependence edge!");
+ return Contents.Reg;
+ }
+
+ /// setReg - Assign the associated register for this edge. This is
+ /// only valid on Data, Anti, and Output edges. On Anti and Output
+ /// edges, this value must not be zero. On Data edges, the value may
+ /// be zero, which would mean that no specific register is associated
+ /// with this edge.
+ void setReg(unsigned Reg) {
+ assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
+ "setReg called on non-register dependence edge!");
+ assert((getKind() != Anti || Reg != 0) &&
+ "SDep::Anti edge cannot use the zero register!");
+ assert((getKind() != Output || Reg != 0) &&
+ "SDep::Output edge cannot use the zero register!");
+ Contents.Reg = Reg;
+ }
+ };
+
+ /// SUnit - Scheduling unit. This is a node in the scheduling DAG.
+ class SUnit {
+ private:
+ SDNode *Node; // Representative node.
+ MachineInstr *Instr; // Alternatively, a MachineInstr.
+ public:
+ SUnit *OrigNode; // If not this, the node from which
+ // this node was cloned.
+
+ // Preds/Succs - The SUnits before/after us in the graph. The boolean value
+ // is true if the edge is a token chain edge, false if it is a value edge.
+ SmallVector<SDep, 4> Preds; // All sunit predecessors.
+ SmallVector<SDep, 4> Succs; // All sunit successors.
+
+ typedef SmallVector<SDep, 4>::iterator pred_iterator;
+ typedef SmallVector<SDep, 4>::iterator succ_iterator;
+ typedef SmallVector<SDep, 4>::const_iterator const_pred_iterator;
+ typedef SmallVector<SDep, 4>::const_iterator const_succ_iterator;
+
+ unsigned NodeNum; // Entry # of node in the node vector.
+ unsigned NodeQueueId; // Queue id of node.
+ unsigned short Latency; // Node latency.
+ short NumPreds; // # of SDep::Data preds.
+ short NumSuccs; // # of SDep::Data sucss.
+ short NumPredsLeft; // # of preds not scheduled.
+ short NumSuccsLeft; // # of succs not scheduled.
+ bool isTwoAddress : 1; // Is a two-address instruction.
+ bool isCommutable : 1; // Is a commutable instruction.
+ bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
+ bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
+ bool isPending : 1; // True once pending.
+ bool isAvailable : 1; // True once available.
+ bool isScheduled : 1; // True once scheduled.
+ bool isScheduleHigh : 1; // True if preferable to schedule high.
+ bool isCloned : 1; // True if this node has been cloned.
+ private:
+ bool isDepthCurrent : 1; // True if Depth is current.
+ bool isHeightCurrent : 1; // True if Height is current.
+ unsigned Depth; // Node depth.
+ unsigned Height; // Node height.
+ public:
+ const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
+ const TargetRegisterClass *CopySrcRC;
+
+ /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
+ /// an SDNode and any nodes flagged to it.
+ SUnit(SDNode *node, unsigned nodenum)
+ : Node(node), Instr(0), OrigNode(0), NodeNum(nodenum), NodeQueueId(0),
+ Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0),
+ isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
+ hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isCloned(false),
+ isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
+ CopyDstRC(NULL), CopySrcRC(NULL) {}
+
+ /// SUnit - Construct an SUnit for post-regalloc scheduling to represent
+ /// a MachineInstr.
+ SUnit(MachineInstr *instr, unsigned nodenum)
+ : Node(0), Instr(instr), OrigNode(0), NodeNum(nodenum), NodeQueueId(0),
+ Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0),
+ isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
+ hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isCloned(false),
+ isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
+ CopyDstRC(NULL), CopySrcRC(NULL) {}
+
+ /// SUnit - Construct a placeholder SUnit.
+ SUnit()
+ : Node(0), Instr(0), OrigNode(0), NodeNum(~0u), NodeQueueId(0),
+ Latency(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0), NumSuccsLeft(0),
+ isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
+ hasPhysRegClobbers(false),
+ isPending(false), isAvailable(false), isScheduled(false),
+ isScheduleHigh(false), isCloned(false),
+ isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
+ CopyDstRC(NULL), CopySrcRC(NULL) {}
+
+ /// setNode - Assign the representative SDNode for this SUnit.
+ /// This may be used during pre-regalloc scheduling.
+ void setNode(SDNode *N) {
+ assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
+ Node = N;
+ }
+
+ /// getNode - Return the representative SDNode for this SUnit.
+ /// This may be used during pre-regalloc scheduling.
+ SDNode *getNode() const {
+ assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
+ return Node;
+ }
+
+ /// setInstr - Assign the instruction for the SUnit.
+ /// This may be used during post-regalloc scheduling.
+ void setInstr(MachineInstr *MI) {
+ assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
+ Instr = MI;
+ }
+
+ /// getInstr - Return the representative MachineInstr for this SUnit.
+ /// This may be used during post-regalloc scheduling.
+ MachineInstr *getInstr() const {
+ assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
+ return Instr;
+ }
+
+ /// addPred - This adds the specified edge as a pred of the current node if
+ /// not already. It also adds the current node as a successor of the
+ /// specified node.
+ void addPred(const SDep &D);
+
+ /// removePred - This removes the specified edge as a pred of the current
+ /// node if it exists. It also removes the current node as a successor of
+ /// the specified node.
+ void removePred(const SDep &D);
+
+ /// getDepth - Return the depth of this node, which is the length of the
+ /// maximum path up to any node with has no predecessors.
+ unsigned getDepth() const {
+ if (!isDepthCurrent) const_cast<SUnit *>(this)->ComputeDepth();
+ return Depth;
+ }
+
+ /// getHeight - Return the height of this node, which is the length of the
+ /// maximum path down to any node with has no successors.
+ unsigned getHeight() const {
+ if (!isHeightCurrent) const_cast<SUnit *>(this)->ComputeHeight();
+ return Height;
+ }
+
+ /// setDepthToAtLeast - If NewDepth is greater than this node's depth
+ /// value, set it to be the new depth value. This also recursively
+ /// marks successor nodes dirty.
+ void setDepthToAtLeast(unsigned NewDepth);
+
+ /// setDepthToAtLeast - If NewDepth is greater than this node's depth
+ /// value, set it to be the new height value. This also recursively
+ /// marks predecessor nodes dirty.
+ void setHeightToAtLeast(unsigned NewHeight);
+
+ /// setDepthDirty - Set a flag in this node to indicate that its
+ /// stored Depth value will require recomputation the next time
+ /// getDepth() is called.
+ void setDepthDirty();
+
+ /// setHeightDirty - Set a flag in this node to indicate that its
+ /// stored Height value will require recomputation the next time
+ /// getHeight() is called.
+ void setHeightDirty();
+
+ /// isPred - Test if node N is a predecessor of this node.
+ bool isPred(SUnit *N) {
+ for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
+ if (Preds[i].getSUnit() == N)
+ return true;
+ return false;
+ }
+
+ /// isSucc - Test if node N is a successor of this node.
+ bool isSucc(SUnit *N) {
+ for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
+ if (Succs[i].getSUnit() == N)
+ return true;
+ return false;
+ }
+
+ void dump(const ScheduleDAG *G) const;
+ void dumpAll(const ScheduleDAG *G) const;
+ void print(raw_ostream &O, const ScheduleDAG *G) const;
+
+ private:
+ void ComputeDepth();
+ void ComputeHeight();
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// SchedulingPriorityQueue - This interface is used to plug different
+ /// priorities computation algorithms into the list scheduler. It implements
+ /// the interface of a standard priority queue, where nodes are inserted in
+ /// arbitrary order and returned in priority order. The computation of the
+ /// priority and the representation of the queue are totally up to the
+ /// implementation to decide.
+ ///
+ class SchedulingPriorityQueue {
+ public:
+ virtual ~SchedulingPriorityQueue() {}
+
+ virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
+ virtual void addNode(const SUnit *SU) = 0;
+ virtual void updateNode(const SUnit *SU) = 0;
+ virtual void releaseState() = 0;
+
+ virtual unsigned size() const = 0;
+ virtual bool empty() const = 0;
+ virtual void push(SUnit *U) = 0;
+
+ virtual void push_all(const std::vector<SUnit *> &Nodes) = 0;
+ virtual SUnit *pop() = 0;
+
+ virtual void remove(SUnit *SU) = 0;
+
+ /// ScheduledNode - As each node is scheduled, this method is invoked. This
+ /// allows the priority function to adjust the priority of related
+ /// unscheduled nodes, for example.
+ ///
+ virtual void ScheduledNode(SUnit *) {}
+
+ virtual void UnscheduledNode(SUnit *) {}
+ };
+
+ class ScheduleDAG {
+ public:
+ MachineBasicBlock *BB; // The block in which to insert instructions.
+ MachineBasicBlock::iterator InsertPos;// The position to insert instructions.
+ const TargetMachine &TM; // Target processor
+ const TargetInstrInfo *TII; // Target instruction information
+ const TargetRegisterInfo *TRI; // Target processor register info
+ const TargetLowering *TLI; // Target lowering info
+ MachineFunction &MF; // Machine function
+ MachineRegisterInfo &MRI; // Virtual/real register map
+ MachineConstantPool *ConstPool; // Target constant pool
+ std::vector<SUnit*> Sequence; // The schedule. Null SUnit*'s
+ // represent noop instructions.
+ std::vector<SUnit> SUnits; // The scheduling units.
+ SUnit EntrySU; // Special node for the region entry.
+ SUnit ExitSU; // Special node for the region exit.
+
+ explicit ScheduleDAG(MachineFunction &mf);
+
+ virtual ~ScheduleDAG();
+
+ /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
+ /// using 'dot'.
+ ///
+ void viewGraph();
+
+ /// EmitSchedule - Insert MachineInstrs into the MachineBasicBlock
+ /// according to the order specified in Sequence.
+ ///
+ virtual MachineBasicBlock *EmitSchedule() = 0;
+
+ void dumpSchedule() const;
+
+ virtual void dumpNode(const SUnit *SU) const = 0;
+
+ /// getGraphNodeLabel - Return a label for an SUnit node in a visualization
+ /// of the ScheduleDAG.
+ virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
+
+ /// addCustomGraphFeatures - Add custom features for a visualization of
+ /// the ScheduleDAG.
+ virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
+
+#ifndef NDEBUG
+ /// VerifySchedule - Verify that all SUnits were scheduled and that
+ /// their state is consistent.
+ void VerifySchedule(bool isBottomUp);
+#endif
+
+ protected:
+ /// Run - perform scheduling.
+ ///
+ void Run(MachineBasicBlock *bb, MachineBasicBlock::iterator insertPos);
+
+ /// BuildSchedGraph - Build SUnits and set up their Preds and Succs
+ /// to form the scheduling dependency graph.
+ ///
+ virtual void BuildSchedGraph() = 0;
+
+ /// ComputeLatency - Compute node latency.
+ ///
+ virtual void ComputeLatency(SUnit *SU) = 0;
+
+ /// Schedule - Order nodes according to selected style, filling
+ /// in the Sequence member.
+ ///
+ virtual void Schedule() = 0;
+
+ /// ForceUnitLatencies - Return true if all scheduling edges should be given a
+ /// latency value of one. The default is to return false; schedulers may
+ /// override this as needed.
+ virtual bool ForceUnitLatencies() const { return false; }
+
+ /// EmitNoop - Emit a noop instruction.
+ ///
+ void EmitNoop();
+
+ void AddMemOperand(MachineInstr *MI, const MachineMemOperand &MO);
+
+ void EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap);
+
+ private:
+ /// EmitLiveInCopy - Emit a copy for a live in physical register. If the
+ /// physical register has only a single copy use, then coalesced the copy
+ /// if possible.
+ void EmitLiveInCopy(MachineBasicBlock *MBB,
+ MachineBasicBlock::iterator &InsertPos,
+ unsigned VirtReg, unsigned PhysReg,
+ const TargetRegisterClass *RC,
+ DenseMap<MachineInstr*, unsigned> &CopyRegMap);
+
+ /// EmitLiveInCopies - If this is the first basic block in the function,
+ /// and if it has live ins that need to be copied into vregs, emit the
+ /// copies into the top of the block.
+ void EmitLiveInCopies(MachineBasicBlock *MBB);
+ };
+
+ class SUnitIterator : public forward_iterator<SUnit, ptrdiff_t> {
+ SUnit *Node;
+ unsigned Operand;
+
+ SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
+ public:
+ bool operator==(const SUnitIterator& x) const {
+ return Operand == x.Operand;
+ }
+ bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
+
+ const SUnitIterator &operator=(const SUnitIterator &I) {
+ assert(I.Node == Node && "Cannot assign iterators to two different nodes!");
+ Operand = I.Operand;
+ return *this;
+ }
+
+ pointer operator*() const {
+ return Node->Preds[Operand].getSUnit();
+ }
+ pointer operator->() const { return operator*(); }
+
+ SUnitIterator& operator++() { // Preincrement
+ ++Operand;
+ return *this;
+ }
+ SUnitIterator operator++(int) { // Postincrement
+ SUnitIterator tmp = *this; ++*this; return tmp;
+ }
+
+ static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
+ static SUnitIterator end (SUnit *N) {
+ return SUnitIterator(N, (unsigned)N->Preds.size());
+ }
+
+ unsigned getOperand() const { return Operand; }
+ const SUnit *getNode() const { return Node; }
+ /// isCtrlDep - Test if this is not an SDep::Data dependence.
+ bool isCtrlDep() const {
+ return getSDep().isCtrl();
+ }
+ bool isArtificialDep() const {
+ return getSDep().isArtificial();
+ }
+ const SDep &getSDep() const {
+ return Node->Preds[Operand];
+ }
+ };
+
+ template <> struct GraphTraits<SUnit*> {
+ typedef SUnit NodeType;
+ typedef SUnitIterator ChildIteratorType;
+ static inline NodeType *getEntryNode(SUnit *N) { return N; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return SUnitIterator::begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return SUnitIterator::end(N);
+ }
+ };
+
+ template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
+ typedef std::vector<SUnit>::iterator nodes_iterator;
+ static nodes_iterator nodes_begin(ScheduleDAG *G) {
+ return G->SUnits.begin();
+ }
+ static nodes_iterator nodes_end(ScheduleDAG *G) {
+ return G->SUnits.end();
+ }
+ };
+
+ /// ScheduleDAGTopologicalSort is a class that computes a topological
+ /// ordering for SUnits and provides methods for dynamically updating
+ /// the ordering as new edges are added.
+ ///
+ /// This allows a very fast implementation of IsReachable, for example.
+ ///
+ class ScheduleDAGTopologicalSort {
+ /// SUnits - A reference to the ScheduleDAG's SUnits.
+ std::vector<SUnit> &SUnits;
+
+ /// Index2Node - Maps topological index to the node number.
+ std::vector<int> Index2Node;
+ /// Node2Index - Maps the node number to its topological index.
+ std::vector<int> Node2Index;
+ /// Visited - a set of nodes visited during a DFS traversal.
+ BitVector Visited;
+
+ /// DFS - make a DFS traversal and mark all nodes affected by the
+ /// edge insertion. These nodes will later get new topological indexes
+ /// by means of the Shift method.
+ void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
+
+ /// Shift - reassign topological indexes for the nodes in the DAG
+ /// to preserve the topological ordering.
+ void Shift(BitVector& Visited, int LowerBound, int UpperBound);
+
+ /// Allocate - assign the topological index to the node n.
+ void Allocate(int n, int index);
+
+ public:
+ explicit ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits);
+
+ /// InitDAGTopologicalSorting - create the initial topological
+ /// ordering from the DAG to be scheduled.
+ void InitDAGTopologicalSorting();
+
+ /// IsReachable - Checks if SU is reachable from TargetSU.
+ bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
+
+ /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU
+ /// will create a cycle.
+ bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
+
+ /// AddPred - Updates the topological ordering to accomodate an edge
+ /// to be added from SUnit X to SUnit Y.
+ void AddPred(SUnit *Y, SUnit *X);
+
+ /// RemovePred - Updates the topological ordering to accomodate an
+ /// an edge to be removed from the specified node N from the predecessors
+ /// of the current node M.
+ void RemovePred(SUnit *M, SUnit *N);
+
+ typedef std::vector<int>::iterator iterator;
+ typedef std::vector<int>::const_iterator const_iterator;
+ iterator begin() { return Index2Node.begin(); }
+ const_iterator begin() const { return Index2Node.begin(); }
+ iterator end() { return Index2Node.end(); }
+ const_iterator end() const { return Index2Node.end(); }
+
+ typedef std::vector<int>::reverse_iterator reverse_iterator;
+ typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
+ reverse_iterator rbegin() { return Index2Node.rbegin(); }
+ const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
+ reverse_iterator rend() { return Index2Node.rend(); }
+ const_reverse_iterator rend() const { return Index2Node.rend(); }
+ };
+}
+
+#endif
diff --git a/include/llvm/CodeGen/ScheduleHazardRecognizer.h b/include/llvm/CodeGen/ScheduleHazardRecognizer.h
new file mode 100644
index 0000000..369882d
--- /dev/null
+++ b/include/llvm/CodeGen/ScheduleHazardRecognizer.h
@@ -0,0 +1,66 @@
+//=- llvm/CodeGen/ScheduleHazardRecognizer.h - Scheduling Support -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScheduleHazardRecognizer class, which implements
+// hazard-avoidance heuristics for scheduling.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+#define LLVM_CODEGEN_SCHEDULEHAZARDRECOGNIZER_H
+
+namespace llvm {
+
+class SUnit;
+
+/// HazardRecognizer - This determines whether or not an instruction can be
+/// issued this cycle, and whether or not a noop needs to be inserted to handle
+/// the hazard.
+class ScheduleHazardRecognizer {
+public:
+ virtual ~ScheduleHazardRecognizer();
+
+ enum HazardType {
+ NoHazard, // This instruction can be emitted at this cycle.
+ Hazard, // This instruction can't be emitted at this cycle.
+ NoopHazard // This instruction can't be emitted, and needs noops.
+ };
+
+ /// getHazardType - Return the hazard type of emitting this node. There are
+ /// three possible results. Either:
+ /// * NoHazard: it is legal to issue this instruction on this cycle.
+ /// * Hazard: issuing this instruction would stall the machine. If some
+ /// other instruction is available, issue it first.
+ /// * NoopHazard: issuing this instruction would break the program. If
+ /// some other instruction can be issued, do so, otherwise issue a noop.
+ virtual HazardType getHazardType(SUnit *) {
+ return NoHazard;
+ }
+
+ /// EmitInstruction - This callback is invoked when an instruction is
+ /// emitted, to advance the hazard state.
+ virtual void EmitInstruction(SUnit *) {}
+
+ /// AdvanceCycle - This callback is invoked when no instructions can be
+ /// issued on this cycle without a hazard. This should increment the
+ /// internal state of the hazard recognizer so that previously "Hazard"
+ /// instructions will now not be hazards.
+ virtual void AdvanceCycle() {}
+
+ /// EmitNoop - This callback is invoked when a noop was added to the
+ /// instruction stream.
+ virtual void EmitNoop() {
+ // Default implementation: count it as a cycle.
+ AdvanceCycle();
+ }
+};
+
+}
+
+#endif
diff --git a/include/llvm/CodeGen/SchedulerRegistry.h b/include/llvm/CodeGen/SchedulerRegistry.h
new file mode 100644
index 0000000..1cf64a0
--- /dev/null
+++ b/include/llvm/CodeGen/SchedulerRegistry.h
@@ -0,0 +1,93 @@
+//===-- llvm/CodeGen/SchedulerRegistry.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for instruction scheduler function
+// pass registry (RegisterScheduler).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGENSCHEDULERREGISTRY_H
+#define LLVM_CODEGENSCHEDULERREGISTRY_H
+
+#include "llvm/CodeGen/MachinePassRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+///
+/// RegisterScheduler class - Track the registration of instruction schedulers.
+///
+//===----------------------------------------------------------------------===//
+
+class SelectionDAGISel;
+class ScheduleDAGSDNodes;
+class SelectionDAG;
+class MachineBasicBlock;
+
+class RegisterScheduler : public MachinePassRegistryNode {
+public:
+ typedef ScheduleDAGSDNodes *(*FunctionPassCtor)(SelectionDAGISel*,
+ CodeGenOpt::Level);
+
+ static MachinePassRegistry Registry;
+
+ RegisterScheduler(const char *N, const char *D, FunctionPassCtor C)
+ : MachinePassRegistryNode(N, D, (MachinePassCtor)C)
+ { Registry.Add(this); }
+ ~RegisterScheduler() { Registry.Remove(this); }
+
+
+ // Accessors.
+ //
+ RegisterScheduler *getNext() const {
+ return (RegisterScheduler *)MachinePassRegistryNode::getNext();
+ }
+ static RegisterScheduler *getList() {
+ return (RegisterScheduler *)Registry.getList();
+ }
+ static FunctionPassCtor getDefault() {
+ return (FunctionPassCtor)Registry.getDefault();
+ }
+ static void setDefault(FunctionPassCtor C) {
+ Registry.setDefault((MachinePassCtor)C);
+ }
+ static void setListener(MachinePassRegistryListener *L) {
+ Registry.setListener(L);
+ }
+};
+
+/// createBURRListDAGScheduler - This creates a bottom up register usage
+/// reduction list scheduler.
+ScheduleDAGSDNodes *createBURRListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createTDRRListDAGScheduler - This creates a top down register usage
+/// reduction list scheduler.
+ScheduleDAGSDNodes *createTDRRListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createTDListDAGScheduler - This creates a top-down list scheduler with
+/// a hazard recognizer.
+ScheduleDAGSDNodes *createTDListDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createFastDAGScheduler - This creates a "fast" scheduler.
+///
+ScheduleDAGSDNodes *createFastDAGScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+/// createDefaultScheduler - This creates an instruction scheduler appropriate
+/// for the target.
+ScheduleDAGSDNodes *createDefaultScheduler(SelectionDAGISel *IS,
+ CodeGenOpt::Level OptLevel);
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
new file mode 100644
index 0000000..ec2d1d7
--- /dev/null
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -0,0 +1,880 @@
+//===-- llvm/CodeGen/SelectionDAG.h - InstSelection DAG ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SelectionDAG class, and transitively defines the
+// SDNode class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAG_H
+#define LLVM_CODEGEN_SELECTIONDAG_H
+
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/CodeGen/SelectionDAGNodes.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cassert>
+#include <vector>
+#include <map>
+#include <string>
+
+namespace llvm {
+
+class AliasAnalysis;
+class TargetLowering;
+class MachineModuleInfo;
+class DwarfWriter;
+class MachineFunction;
+class MachineConstantPoolValue;
+class FunctionLoweringInfo;
+
+template<> struct ilist_traits<SDNode> : public ilist_default_traits<SDNode> {
+private:
+ mutable ilist_node<SDNode> Sentinel;
+public:
+ SDNode *createSentinel() const {
+ return static_cast<SDNode*>(&Sentinel);
+ }
+ static void destroySentinel(SDNode *) {}
+
+ SDNode *provideInitialHead() const { return createSentinel(); }
+ SDNode *ensureHead(SDNode*) const { return createSentinel(); }
+ static void noteHead(SDNode*, SDNode*) {}
+
+ static void deleteNode(SDNode *) {
+ assert(0 && "ilist_traits<SDNode> shouldn't see a deleteNode call!");
+ }
+private:
+ static void createNode(const SDNode &);
+};
+
+enum CombineLevel {
+ Unrestricted, // Combine may create illegal operations and illegal types.
+ NoIllegalTypes, // Combine may create illegal operations but no illegal types.
+ NoIllegalOperations // Combine may only create legal operations and types.
+};
+
+/// SelectionDAG class - This is used to represent a portion of an LLVM function
+/// in a low-level Data Dependence DAG representation suitable for instruction
+/// selection. This DAG is constructed as the first step of instruction
+/// selection in order to allow implementation of machine specific optimizations
+/// and code simplifications.
+///
+/// The representation used by the SelectionDAG is a target-independent
+/// representation, which has some similarities to the GCC RTL representation,
+/// but is significantly more simple, powerful, and is a graph form instead of a
+/// linear form.
+///
+class SelectionDAG {
+ TargetLowering &TLI;
+ MachineFunction *MF;
+ FunctionLoweringInfo &FLI;
+ MachineModuleInfo *MMI;
+ DwarfWriter *DW;
+
+ /// EntryNode - The starting token.
+ SDNode EntryNode;
+
+ /// Root - The root of the entire DAG.
+ SDValue Root;
+
+ /// AllNodes - A linked list of nodes in the current DAG.
+ ilist<SDNode> AllNodes;
+
+ /// NodeAllocatorType - The AllocatorType for allocating SDNodes. We use
+ /// pool allocation with recycling.
+ typedef RecyclingAllocator<BumpPtrAllocator, SDNode, sizeof(LargestSDNode),
+ AlignOf<MostAlignedSDNode>::Alignment>
+ NodeAllocatorType;
+
+ /// NodeAllocator - Pool allocation for nodes.
+ NodeAllocatorType NodeAllocator;
+
+ /// CSEMap - This structure is used to memoize nodes, automatically performing
+ /// CSE with existing nodes with a duplicate is requested.
+ FoldingSet<SDNode> CSEMap;
+
+ /// OperandAllocator - Pool allocation for machine-opcode SDNode operands.
+ BumpPtrAllocator OperandAllocator;
+
+ /// Allocator - Pool allocation for misc. objects that are created once per
+ /// SelectionDAG.
+ BumpPtrAllocator Allocator;
+
+ /// VerifyNode - Sanity check the given node. Aborts if it is invalid.
+ void VerifyNode(SDNode *N);
+
+ /// setGraphColorHelper - Implementation of setSubgraphColor.
+ /// Return whether we had to truncate the search.
+ ///
+ bool setSubgraphColorHelper(SDNode *N, const char *Color,
+ DenseSet<SDNode *> &visited,
+ int level, bool &printed);
+
+public:
+ SelectionDAG(TargetLowering &tli, FunctionLoweringInfo &fli);
+ ~SelectionDAG();
+
+ /// init - Prepare this SelectionDAG to process code in the given
+ /// MachineFunction.
+ ///
+ void init(MachineFunction &mf, MachineModuleInfo *mmi, DwarfWriter *dw);
+
+ /// clear - Clear state and free memory necessary to make this
+ /// SelectionDAG ready to process a new block.
+ ///
+ void clear();
+
+ MachineFunction &getMachineFunction() const { return *MF; }
+ const TargetMachine &getTarget() const;
+ TargetLowering &getTargetLoweringInfo() const { return TLI; }
+ FunctionLoweringInfo &getFunctionLoweringInfo() const { return FLI; }
+ MachineModuleInfo *getMachineModuleInfo() const { return MMI; }
+ DwarfWriter *getDwarfWriter() const { return DW; }
+
+ /// viewGraph - Pop up a GraphViz/gv window with the DAG rendered using 'dot'.
+ ///
+ void viewGraph(const std::string &Title);
+ void viewGraph();
+
+#ifndef NDEBUG
+ std::map<const SDNode *, std::string> NodeGraphAttrs;
+#endif
+
+ /// clearGraphAttrs - Clear all previously defined node graph attributes.
+ /// Intended to be used from a debugging tool (eg. gdb).
+ void clearGraphAttrs();
+
+ /// setGraphAttrs - Set graph attributes for a node. (eg. "color=red".)
+ ///
+ void setGraphAttrs(const SDNode *N, const char *Attrs);
+
+ /// getGraphAttrs - Get graph attributes for a node. (eg. "color=red".)
+ /// Used from getNodeAttributes.
+ const std::string getGraphAttrs(const SDNode *N) const;
+
+ /// setGraphColor - Convenience for setting node color attribute.
+ ///
+ void setGraphColor(const SDNode *N, const char *Color);
+
+ /// setGraphColor - Convenience for setting subgraph color attribute.
+ ///
+ void setSubgraphColor(SDNode *N, const char *Color);
+
+ typedef ilist<SDNode>::const_iterator allnodes_const_iterator;
+ allnodes_const_iterator allnodes_begin() const { return AllNodes.begin(); }
+ allnodes_const_iterator allnodes_end() const { return AllNodes.end(); }
+ typedef ilist<SDNode>::iterator allnodes_iterator;
+ allnodes_iterator allnodes_begin() { return AllNodes.begin(); }
+ allnodes_iterator allnodes_end() { return AllNodes.end(); }
+ ilist<SDNode>::size_type allnodes_size() const {
+ return AllNodes.size();
+ }
+
+ /// getRoot - Return the root tag of the SelectionDAG.
+ ///
+ const SDValue &getRoot() const { return Root; }
+
+ /// getEntryNode - Return the token chain corresponding to the entry of the
+ /// function.
+ SDValue getEntryNode() const {
+ return SDValue(const_cast<SDNode *>(&EntryNode), 0);
+ }
+
+ /// setRoot - Set the current root tag of the SelectionDAG.
+ ///
+ const SDValue &setRoot(SDValue N) {
+ assert((!N.getNode() || N.getValueType() == MVT::Other) &&
+ "DAG root value is not a chain!");
+ return Root = N;
+ }
+
+ /// Combine - This iterates over the nodes in the SelectionDAG, folding
+ /// certain types of nodes together, or eliminating superfluous nodes. The
+ /// Level argument controls whether Combine is allowed to produce nodes and
+ /// types that are illegal on the target.
+ void Combine(CombineLevel Level, AliasAnalysis &AA,
+ CodeGenOpt::Level OptLevel);
+
+ /// LegalizeTypes - This transforms the SelectionDAG into a SelectionDAG that
+ /// only uses types natively supported by the target. Returns "true" if it
+ /// made any changes.
+ ///
+ /// Note that this is an involved process that may invalidate pointers into
+ /// the graph.
+ bool LegalizeTypes();
+
+ /// Legalize - This transforms the SelectionDAG into a SelectionDAG that is
+ /// compatible with the target instruction selector, as indicated by the
+ /// TargetLowering object.
+ ///
+ /// Note that this is an involved process that may invalidate pointers into
+ /// the graph.
+ void Legalize(bool TypesNeedLegalizing, CodeGenOpt::Level OptLevel);
+
+ /// LegalizeVectors - This transforms the SelectionDAG into a SelectionDAG
+ /// that only uses vector math operations supported by the target. This is
+ /// necessary as a separate step from Legalize because unrolling a vector
+ /// operation can introduce illegal types, which requires running
+ /// LegalizeTypes again.
+ ///
+ /// This returns true if it made any changes; in that case, LegalizeTypes
+ /// is called again before Legalize.
+ ///
+ /// Note that this is an involved process that may invalidate pointers into
+ /// the graph.
+ bool LegalizeVectors();
+
+ /// RemoveDeadNodes - This method deletes all unreachable nodes in the
+ /// SelectionDAG.
+ void RemoveDeadNodes();
+
+ /// DeleteNode - Remove the specified node from the system. This node must
+ /// have no referrers.
+ void DeleteNode(SDNode *N);
+
+ /// getVTList - Return an SDVTList that represents the list of values
+ /// specified.
+ SDVTList getVTList(MVT VT);
+ SDVTList getVTList(MVT VT1, MVT VT2);
+ SDVTList getVTList(MVT VT1, MVT VT2, MVT VT3);
+ SDVTList getVTList(MVT VT1, MVT VT2, MVT VT3, MVT VT4);
+ SDVTList getVTList(const MVT *VTs, unsigned NumVTs);
+
+ //===--------------------------------------------------------------------===//
+ // Node creation methods.
+ //
+ SDValue getConstant(uint64_t Val, MVT VT, bool isTarget = false);
+ SDValue getConstant(const APInt &Val, MVT VT, bool isTarget = false);
+ SDValue getConstant(const ConstantInt &Val, MVT VT, bool isTarget = false);
+ SDValue getIntPtrConstant(uint64_t Val, bool isTarget = false);
+ SDValue getTargetConstant(uint64_t Val, MVT VT) {
+ return getConstant(Val, VT, true);
+ }
+ SDValue getTargetConstant(const APInt &Val, MVT VT) {
+ return getConstant(Val, VT, true);
+ }
+ SDValue getTargetConstant(const ConstantInt &Val, MVT VT) {
+ return getConstant(Val, VT, true);
+ }
+ SDValue getConstantFP(double Val, MVT VT, bool isTarget = false);
+ SDValue getConstantFP(const APFloat& Val, MVT VT, bool isTarget = false);
+ SDValue getConstantFP(const ConstantFP &CF, MVT VT, bool isTarget = false);
+ SDValue getTargetConstantFP(double Val, MVT VT) {
+ return getConstantFP(Val, VT, true);
+ }
+ SDValue getTargetConstantFP(const APFloat& Val, MVT VT) {
+ return getConstantFP(Val, VT, true);
+ }
+ SDValue getTargetConstantFP(const ConstantFP &Val, MVT VT) {
+ return getConstantFP(Val, VT, true);
+ }
+ SDValue getGlobalAddress(const GlobalValue *GV, MVT VT,
+ int64_t offset = 0, bool isTargetGA = false);
+ SDValue getTargetGlobalAddress(const GlobalValue *GV, MVT VT,
+ int64_t offset = 0) {
+ return getGlobalAddress(GV, VT, offset, true);
+ }
+ SDValue getFrameIndex(int FI, MVT VT, bool isTarget = false);
+ SDValue getTargetFrameIndex(int FI, MVT VT) {
+ return getFrameIndex(FI, VT, true);
+ }
+ SDValue getJumpTable(int JTI, MVT VT, bool isTarget = false);
+ SDValue getTargetJumpTable(int JTI, MVT VT) {
+ return getJumpTable(JTI, VT, true);
+ }
+ SDValue getConstantPool(Constant *C, MVT VT,
+ unsigned Align = 0, int Offs = 0, bool isT=false);
+ SDValue getTargetConstantPool(Constant *C, MVT VT,
+ unsigned Align = 0, int Offset = 0) {
+ return getConstantPool(C, VT, Align, Offset, true);
+ }
+ SDValue getConstantPool(MachineConstantPoolValue *C, MVT VT,
+ unsigned Align = 0, int Offs = 0, bool isT=false);
+ SDValue getTargetConstantPool(MachineConstantPoolValue *C,
+ MVT VT, unsigned Align = 0,
+ int Offset = 0) {
+ return getConstantPool(C, VT, Align, Offset, true);
+ }
+ // When generating a branch to a BB, we don't in general know enough
+ // to provide debug info for the BB at that time, so keep this one around.
+ SDValue getBasicBlock(MachineBasicBlock *MBB);
+ SDValue getBasicBlock(MachineBasicBlock *MBB, DebugLoc dl);
+ SDValue getExternalSymbol(const char *Sym, MVT VT);
+ SDValue getExternalSymbol(const char *Sym, DebugLoc dl, MVT VT);
+ SDValue getTargetExternalSymbol(const char *Sym, MVT VT);
+ SDValue getTargetExternalSymbol(const char *Sym, DebugLoc dl, MVT VT);
+ SDValue getArgFlags(ISD::ArgFlagsTy Flags);
+ SDValue getValueType(MVT);
+ SDValue getRegister(unsigned Reg, MVT VT);
+ SDValue getDbgStopPoint(DebugLoc DL, SDValue Root,
+ unsigned Line, unsigned Col, Value *CU);
+ SDValue getLabel(unsigned Opcode, DebugLoc dl, SDValue Root,
+ unsigned LabelID);
+
+ SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N) {
+ return getNode(ISD::CopyToReg, dl, MVT::Other, Chain,
+ getRegister(Reg, N.getValueType()), N);
+ }
+
+ // This version of the getCopyToReg method takes an extra operand, which
+ // indicates that there is potentially an incoming flag value (if Flag is not
+ // null) and that there should be a flag result.
+ SDValue getCopyToReg(SDValue Chain, DebugLoc dl, unsigned Reg, SDValue N,
+ SDValue Flag) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, getRegister(Reg, N.getValueType()), N, Flag };
+ return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3);
+ }
+
+ // Similar to last getCopyToReg() except parameter Reg is a SDValue
+ SDValue getCopyToReg(SDValue Chain, DebugLoc dl, SDValue Reg, SDValue N,
+ SDValue Flag) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, Reg, N, Flag };
+ return getNode(ISD::CopyToReg, dl, VTs, Ops, Flag.getNode() ? 4 : 3);
+ }
+
+ SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, MVT VT) {
+ SDVTList VTs = getVTList(VT, MVT::Other);
+ SDValue Ops[] = { Chain, getRegister(Reg, VT) };
+ return getNode(ISD::CopyFromReg, dl, VTs, Ops, 2);
+ }
+
+ // This version of the getCopyFromReg method takes an extra operand, which
+ // indicates that there is potentially an incoming flag value (if Flag is not
+ // null) and that there should be a flag result.
+ SDValue getCopyFromReg(SDValue Chain, DebugLoc dl, unsigned Reg, MVT VT,
+ SDValue Flag) {
+ SDVTList VTs = getVTList(VT, MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, getRegister(Reg, VT), Flag };
+ return getNode(ISD::CopyFromReg, dl, VTs, Ops, Flag.getNode() ? 3 : 2);
+ }
+
+ SDValue getCondCode(ISD::CondCode Cond);
+
+ /// Returns the ConvertRndSat Note: Avoid using this node because it may
+ /// disappear in the future and most targets don't support it.
+ SDValue getConvertRndSat(MVT VT, DebugLoc dl, SDValue Val, SDValue DTy,
+ SDValue STy,
+ SDValue Rnd, SDValue Sat, ISD::CvtCode Code);
+
+ /// getVectorShuffle - Return an ISD::VECTOR_SHUFFLE node. The number of
+ /// elements in VT, which must be a vector type, must match the number of
+ /// mask elements NumElts. A integer mask element equal to -1 is treated as
+ /// undefined.
+ SDValue getVectorShuffle(MVT VT, DebugLoc dl, SDValue N1, SDValue N2,
+ const int *MaskElts);
+
+ /// getZeroExtendInReg - Return the expression required to zero extend the Op
+ /// value assuming it was the smaller SrcTy value.
+ SDValue getZeroExtendInReg(SDValue Op, DebugLoc DL, MVT SrcTy);
+
+ /// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
+ SDValue getNOT(DebugLoc DL, SDValue Val, MVT VT);
+
+ /// getCALLSEQ_START - Return a new CALLSEQ_START node, which always must have
+ /// a flag result (to ensure it's not CSE'd). CALLSEQ_START does not have a
+ /// useful DebugLoc.
+ SDValue getCALLSEQ_START(SDValue Chain, SDValue Op) {
+ SDVTList VTs = getVTList(MVT::Other, MVT::Flag);
+ SDValue Ops[] = { Chain, Op };
+ return getNode(ISD::CALLSEQ_START, DebugLoc::getUnknownLoc(),
+ VTs, Ops, 2);
+ }
+
+ /// getCALLSEQ_END - Return a new CALLSEQ_END node, which always must have a
+ /// flag result (to ensure it's not CSE'd). CALLSEQ_END does not have
+ /// a useful DebugLoc.
+ SDValue getCALLSEQ_END(SDValue Chain, SDValue Op1, SDValue Op2,
+ SDValue InFlag) {
+ SDVTList NodeTys = getVTList(MVT::Other, MVT::Flag);
+ SmallVector<SDValue, 4> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Op1);
+ Ops.push_back(Op2);
+ Ops.push_back(InFlag);
+ return getNode(ISD::CALLSEQ_END, DebugLoc::getUnknownLoc(), NodeTys,
+ &Ops[0],
+ (unsigned)Ops.size() - (InFlag.getNode() == 0 ? 1 : 0));
+ }
+
+ /// getUNDEF - Return an UNDEF node. UNDEF does not have a useful DebugLoc.
+ SDValue getUNDEF(MVT VT) {
+ return getNode(ISD::UNDEF, DebugLoc::getUnknownLoc(), VT);
+ }
+
+ /// getGLOBAL_OFFSET_TABLE - Return a GLOBAL_OFFSET_TABLE node. This does
+ /// not have a useful DebugLoc.
+ SDValue getGLOBAL_OFFSET_TABLE(MVT VT) {
+ return getNode(ISD::GLOBAL_OFFSET_TABLE, DebugLoc::getUnknownLoc(), VT);
+ }
+
+ /// getNode - Gets or creates the specified node.
+ ///
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, SDValue N);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT, SDValue N1, SDValue N2);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+ SDValue N1, SDValue N2, SDValue N3);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+ SDValue N1, SDValue N2, SDValue N3, SDValue N4);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+ SDValue N1, SDValue N2, SDValue N3, SDValue N4,
+ SDValue N5);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+ const SDUse *Ops, unsigned NumOps);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, MVT VT,
+ const SDValue *Ops, unsigned NumOps);
+ SDValue getNode(unsigned Opcode, DebugLoc DL,
+ const std::vector<MVT> &ResultTys,
+ const SDValue *Ops, unsigned NumOps);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, const MVT *VTs, unsigned NumVTs,
+ const SDValue *Ops, unsigned NumOps);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
+ const SDValue *Ops, unsigned NumOps);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs, SDValue N);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
+ SDValue N1, SDValue N2);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
+ SDValue N1, SDValue N2, SDValue N3);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
+ SDValue N1, SDValue N2, SDValue N3, SDValue N4);
+ SDValue getNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
+ SDValue N1, SDValue N2, SDValue N3, SDValue N4,
+ SDValue N5);
+
+ SDValue getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align, bool AlwaysInline,
+ const Value *DstSV, uint64_t DstSVOff,
+ const Value *SrcSV, uint64_t SrcSVOff);
+
+ SDValue getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ const Value *DstSV, uint64_t DstOSVff,
+ const Value *SrcSV, uint64_t SrcSVOff);
+
+ SDValue getMemset(SDValue Chain, DebugLoc dl, SDValue Dst, SDValue Src,
+ SDValue Size, unsigned Align,
+ const Value *DstSV, uint64_t DstSVOff);
+
+ /// getSetCC - Helper function to make it easier to build SetCC's if you just
+ /// have an ISD::CondCode instead of an SDValue.
+ ///
+ SDValue getSetCC(DebugLoc DL, MVT VT, SDValue LHS, SDValue RHS,
+ ISD::CondCode Cond) {
+ return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond));
+ }
+
+ /// getVSetCC - Helper function to make it easier to build VSetCC's nodes
+ /// if you just have an ISD::CondCode instead of an SDValue.
+ ///
+ SDValue getVSetCC(DebugLoc DL, MVT VT, SDValue LHS, SDValue RHS,
+ ISD::CondCode Cond) {
+ return getNode(ISD::VSETCC, DL, VT, LHS, RHS, getCondCode(Cond));
+ }
+
+ /// getSelectCC - Helper function to make it easier to build SelectCC's if you
+ /// just have an ISD::CondCode instead of an SDValue.
+ ///
+ SDValue getSelectCC(DebugLoc DL, SDValue LHS, SDValue RHS,
+ SDValue True, SDValue False, ISD::CondCode Cond) {
+ return getNode(ISD::SELECT_CC, DL, True.getValueType(),
+ LHS, RHS, True, False, getCondCode(Cond));
+ }
+
+ /// getVAArg - VAArg produces a result and token chain, and takes a pointer
+ /// and a source value as input.
+ SDValue getVAArg(MVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
+ SDValue SV);
+
+ /// getAtomic - Gets a node for an atomic op, produces result and chain and
+ /// takes 3 operands
+ SDValue getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT, SDValue Chain,
+ SDValue Ptr, SDValue Cmp, SDValue Swp, const Value* PtrVal,
+ unsigned Alignment=0);
+
+ /// getAtomic - Gets a node for an atomic op, produces result and chain and
+ /// takes 2 operands.
+ SDValue getAtomic(unsigned Opcode, DebugLoc dl, MVT MemVT, SDValue Chain,
+ SDValue Ptr, SDValue Val, const Value* PtrVal,
+ unsigned Alignment = 0);
+
+ /// getMemIntrinsicNode - Creates a MemIntrinsicNode that may produce a
+ /// result and takes a list of operands.
+ SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
+ const MVT *VTs, unsigned NumVTs,
+ const SDValue *Ops, unsigned NumOps,
+ MVT MemVT, const Value *srcValue, int SVOff,
+ unsigned Align = 0, bool Vol = false,
+ bool ReadMem = true, bool WriteMem = true);
+
+ SDValue getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
+ const SDValue *Ops, unsigned NumOps,
+ MVT MemVT, const Value *srcValue, int SVOff,
+ unsigned Align = 0, bool Vol = false,
+ bool ReadMem = true, bool WriteMem = true);
+
+ /// getMergeValues - Create a MERGE_VALUES node from the given operands.
+ SDValue getMergeValues(const SDValue *Ops, unsigned NumOps, DebugLoc dl);
+
+ /// getCall - Create a CALL node from the given information.
+ ///
+ SDValue getCall(unsigned CallingConv, DebugLoc dl, bool IsVarArgs,
+ bool IsTailCall, bool isInreg, SDVTList VTs,
+ const SDValue *Operands, unsigned NumOperands);
+
+ /// getLoad - Loads are not normal binary operators: their result type is not
+ /// determined by their operands, and they produce a value AND a token chain.
+ ///
+ SDValue getLoad(MVT VT, DebugLoc dl, SDValue Chain, SDValue Ptr,
+ const Value *SV, int SVOffset, bool isVolatile=false,
+ unsigned Alignment=0);
+ SDValue getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, MVT VT,
+ SDValue Chain, SDValue Ptr, const Value *SV,
+ int SVOffset, MVT EVT, bool isVolatile=false,
+ unsigned Alignment=0);
+ SDValue getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
+ SDValue Offset, ISD::MemIndexedMode AM);
+ SDValue getLoad(ISD::MemIndexedMode AM, DebugLoc dl, ISD::LoadExtType ExtType,
+ MVT VT, SDValue Chain,
+ SDValue Ptr, SDValue Offset,
+ const Value *SV, int SVOffset, MVT EVT,
+ bool isVolatile=false, unsigned Alignment=0);
+
+ /// getStore - Helper function to build ISD::STORE nodes.
+ ///
+ SDValue getStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
+ const Value *SV, int SVOffset, bool isVolatile=false,
+ unsigned Alignment=0);
+ SDValue getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val, SDValue Ptr,
+ const Value *SV, int SVOffset, MVT TVT,
+ bool isVolatile=false, unsigned Alignment=0);
+ SDValue getIndexedStore(SDValue OrigStoe, DebugLoc dl, SDValue Base,
+ SDValue Offset, ISD::MemIndexedMode AM);
+
+ /// getSrcValue - Construct a node to track a Value* through the backend.
+ SDValue getSrcValue(const Value *v);
+
+ /// getMemOperand - Construct a node to track a memory reference
+ /// through the backend.
+ SDValue getMemOperand(const MachineMemOperand &MO);
+
+ /// getShiftAmountOperand - Return the specified value casted to
+ /// the target's desired shift amount type.
+ SDValue getShiftAmountOperand(SDValue Op);
+
+ /// UpdateNodeOperands - *Mutate* the specified node in-place to have the
+ /// specified operands. If the resultant node already exists in the DAG,
+ /// this does not modify the specified node, instead it returns the node that
+ /// already exists. If the resultant node does not exist in the DAG, the
+ /// input node is returned. As a degenerate case, if you specify the same
+ /// input operands as the node already has, the input node is returned.
+ SDValue UpdateNodeOperands(SDValue N, SDValue Op);
+ SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2);
+ SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDValue Op3);
+ SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDValue Op3, SDValue Op4);
+ SDValue UpdateNodeOperands(SDValue N, SDValue Op1, SDValue Op2,
+ SDValue Op3, SDValue Op4, SDValue Op5);
+ SDValue UpdateNodeOperands(SDValue N,
+ const SDValue *Ops, unsigned NumOps);
+
+ /// SelectNodeTo - These are used for target selectors to *mutate* the
+ /// specified node to have the specified return type, Target opcode, and
+ /// operands. Note that target opcodes are stored as
+ /// ~TargetOpcode in the node opcode field. The resultant node is returned.
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT, SDValue Op1);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT,
+ SDValue Op1, SDValue Op2);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT,
+ SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT,
+ const SDValue *Ops, unsigned NumOps);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1, MVT VT2);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1,
+ MVT VT2, const SDValue *Ops, unsigned NumOps);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1,
+ MVT VT2, MVT VT3, const SDValue *Ops, unsigned NumOps);
+ SDNode *SelectNodeTo(SDNode *N, unsigned MachineOpc, MVT VT1,
+ MVT VT2, MVT VT3, MVT VT4, const SDValue *Ops,
+ unsigned NumOps);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1,
+ MVT VT2, SDValue Op1);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1,
+ MVT VT2, SDValue Op1, SDValue Op2);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1,
+ MVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, MVT VT1,
+ MVT VT2, MVT VT3, SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *SelectNodeTo(SDNode *N, unsigned TargetOpc, SDVTList VTs,
+ const SDValue *Ops, unsigned NumOps);
+
+ /// MorphNodeTo - These *mutate* the specified node to have the specified
+ /// return type, opcode, and operands.
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT, SDValue Op1);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT,
+ SDValue Op1, SDValue Op2);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT,
+ SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT,
+ const SDValue *Ops, unsigned NumOps);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1, MVT VT2);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1,
+ MVT VT2, const SDValue *Ops, unsigned NumOps);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1,
+ MVT VT2, MVT VT3, const SDValue *Ops, unsigned NumOps);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1,
+ MVT VT2, SDValue Op1);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1,
+ MVT VT2, SDValue Op1, SDValue Op2);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, MVT VT1,
+ MVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
+ const SDValue *Ops, unsigned NumOps);
+
+ /// getTargetNode - These are used for target selectors to create a new node
+ /// with specified return type(s), target opcode, and operands.
+ ///
+ /// Note that getTargetNode returns the resultant node. If there is already a
+ /// node of the specified opcode and operands, it returns that node instead of
+ /// the current one.
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, SDValue Op1);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT, SDValue Op1,
+ SDValue Op2);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
+ SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT,
+ const SDValue *Ops, unsigned NumOps);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2,
+ SDValue Op1);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
+ MVT VT2, SDValue Op1, SDValue Op2);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1,
+ MVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2,
+ const SDValue *Ops, unsigned NumOps);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3,
+ SDValue Op1, SDValue Op2);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3,
+ SDValue Op1, SDValue Op2, SDValue Op3);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3,
+ const SDValue *Ops, unsigned NumOps);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl, MVT VT1, MVT VT2, MVT VT3,
+ MVT VT4, const SDValue *Ops, unsigned NumOps);
+ SDNode *getTargetNode(unsigned Opcode, DebugLoc dl,
+ const std::vector<MVT> &ResultTys, const SDValue *Ops,
+ unsigned NumOps);
+
+ /// getNodeIfExists - Get the specified node if it's already available, or
+ /// else return NULL.
+ SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs,
+ const SDValue *Ops, unsigned NumOps);
+
+ /// DAGUpdateListener - Clients of various APIs that cause global effects on
+ /// the DAG can optionally implement this interface. This allows the clients
+ /// to handle the various sorts of updates that happen.
+ class DAGUpdateListener {
+ public:
+ virtual ~DAGUpdateListener();
+
+ /// NodeDeleted - The node N that was deleted and, if E is not null, an
+ /// equivalent node E that replaced it.
+ virtual void NodeDeleted(SDNode *N, SDNode *E) = 0;
+
+ /// NodeUpdated - The node N that was updated.
+ virtual void NodeUpdated(SDNode *N) = 0;
+ };
+
+ /// RemoveDeadNode - Remove the specified node from the system. If any of its
+ /// operands then becomes dead, remove them as well. Inform UpdateListener
+ /// for each node deleted.
+ void RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener = 0);
+
+ /// RemoveDeadNodes - This method deletes the unreachable nodes in the
+ /// given list, and any nodes that become unreachable as a result.
+ void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
+ DAGUpdateListener *UpdateListener = 0);
+
+ /// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
+ /// This can cause recursive merging of nodes in the DAG. Use the first
+ /// version if 'From' is known to have a single result, use the second
+ /// if you have two nodes with identical results (or if 'To' has a superset
+ /// of the results of 'From'), use the third otherwise.
+ ///
+ /// These methods all take an optional UpdateListener, which (if not null) is
+ /// informed about nodes that are deleted and modified due to recursive
+ /// changes in the dag.
+ ///
+ /// These functions only replace all existing uses. It's possible that as
+ /// these replacements are being performed, CSE may cause the From node
+ /// to be given new uses. These new uses of From are left in place, and
+ /// not automatically transfered to To.
+ ///
+ void ReplaceAllUsesWith(SDValue From, SDValue Op,
+ DAGUpdateListener *UpdateListener = 0);
+ void ReplaceAllUsesWith(SDNode *From, SDNode *To,
+ DAGUpdateListener *UpdateListener = 0);
+ void ReplaceAllUsesWith(SDNode *From, const SDValue *To,
+ DAGUpdateListener *UpdateListener = 0);
+
+ /// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
+ /// uses of other values produced by From.Val alone.
+ void ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
+ DAGUpdateListener *UpdateListener = 0);
+
+ /// ReplaceAllUsesOfValuesWith - Like ReplaceAllUsesOfValueWith, but
+ /// for multiple values at once. This correctly handles the case where
+ /// there is an overlap between the From values and the To values.
+ void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
+ unsigned Num,
+ DAGUpdateListener *UpdateListener = 0);
+
+ /// AssignTopologicalOrder - Topological-sort the AllNodes list and a
+ /// assign a unique node id for each node in the DAG based on their
+ /// topological order. Returns the number of nodes.
+ unsigned AssignTopologicalOrder();
+
+ /// RepositionNode - Move node N in the AllNodes list to be immediately
+ /// before the given iterator Position. This may be used to update the
+ /// topological ordering when the list of nodes is modified.
+ void RepositionNode(allnodes_iterator Position, SDNode *N) {
+ AllNodes.insert(Position, AllNodes.remove(N));
+ }
+
+ /// isCommutativeBinOp - Returns true if the opcode is a commutative binary
+ /// operation.
+ static bool isCommutativeBinOp(unsigned Opcode) {
+ // FIXME: This should get its info from the td file, so that we can include
+ // target info.
+ switch (Opcode) {
+ case ISD::ADD:
+ case ISD::MUL:
+ case ISD::MULHU:
+ case ISD::MULHS:
+ case ISD::SMUL_LOHI:
+ case ISD::UMUL_LOHI:
+ case ISD::FADD:
+ case ISD::FMUL:
+ case ISD::AND:
+ case ISD::OR:
+ case ISD::XOR:
+ case ISD::SADDO:
+ case ISD::UADDO:
+ case ISD::ADDC:
+ case ISD::ADDE: return true;
+ default: return false;
+ }
+ }
+
+ void dump() const;
+
+ /// CreateStackTemporary - Create a stack temporary, suitable for holding the
+ /// specified value type. If minAlign is specified, the slot size will have
+ /// at least that alignment.
+ SDValue CreateStackTemporary(MVT VT, unsigned minAlign = 1);
+
+ /// CreateStackTemporary - Create a stack temporary suitable for holding
+ /// either of the specified value types.
+ SDValue CreateStackTemporary(MVT VT1, MVT VT2);
+
+ /// FoldConstantArithmetic -
+ SDValue FoldConstantArithmetic(unsigned Opcode,
+ MVT VT,
+ ConstantSDNode *Cst1,
+ ConstantSDNode *Cst2);
+
+ /// FoldSetCC - Constant fold a setcc to true or false.
+ SDValue FoldSetCC(MVT VT, SDValue N1,
+ SDValue N2, ISD::CondCode Cond, DebugLoc dl);
+
+ /// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
+ /// use this predicate to simplify operations downstream.
+ bool SignBitIsZero(SDValue Op, unsigned Depth = 0) const;
+
+ /// MaskedValueIsZero - Return true if 'Op & Mask' is known to be zero. We
+ /// use this predicate to simplify operations downstream. Op and Mask are
+ /// known to be the same type.
+ bool MaskedValueIsZero(SDValue Op, const APInt &Mask, unsigned Depth = 0)
+ const;
+
+ /// ComputeMaskedBits - Determine which of the bits specified in Mask are
+ /// known to be either zero or one and return them in the KnownZero/KnownOne
+ /// bitsets. This code only analyzes bits in Mask, in order to short-circuit
+ /// processing. Targets can implement the computeMaskedBitsForTargetNode
+ /// method in the TargetLowering class to allow target nodes to be understood.
+ void ComputeMaskedBits(SDValue Op, const APInt &Mask, APInt &KnownZero,
+ APInt &KnownOne, unsigned Depth = 0) const;
+
+ /// ComputeNumSignBits - Return the number of times the sign bit of the
+ /// register is replicated into the other bits. We know that at least 1 bit
+ /// is always equal to the sign bit (itself), but other cases can give us
+ /// information. For example, immediately after an "SRA X, 2", we know that
+ /// the top 3 bits are all equal to each other, so we return 3. Targets can
+ /// implement the ComputeNumSignBitsForTarget method in the TargetLowering
+ /// class to allow target nodes to be understood.
+ unsigned ComputeNumSignBits(SDValue Op, unsigned Depth = 0) const;
+
+ /// isVerifiedDebugInfoDesc - Returns true if the specified SDValue has
+ /// been verified as a debug information descriptor.
+ bool isVerifiedDebugInfoDesc(SDValue Op) const;
+
+ /// getShuffleScalarElt - Returns the scalar element that will make up the ith
+ /// element of the result of the vector shuffle.
+ SDValue getShuffleScalarElt(const ShuffleVectorSDNode *N, unsigned Idx);
+
+private:
+ bool RemoveNodeFromCSEMaps(SDNode *N);
+ void AddModifiedNodeToCSEMaps(SDNode *N, DAGUpdateListener *UpdateListener);
+ SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
+ SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
+ void *&InsertPos);
+ SDNode *FindModifiedNodeSlot(SDNode *N, const SDValue *Ops, unsigned NumOps,
+ void *&InsertPos);
+
+ void DeleteNodeNotInCSEMaps(SDNode *N);
+ void DeallocateNode(SDNode *N);
+
+ unsigned getMVTAlignment(MVT MemoryVT) const;
+
+ void allnodes_clear();
+
+ /// VTList - List of non-single value types.
+ std::vector<SDVTList> VTList;
+
+ /// CondCodeNodes - Maps to auto-CSE operations.
+ std::vector<CondCodeSDNode*> CondCodeNodes;
+
+ std::vector<SDNode*> ValueTypeNodes;
+ std::map<MVT, SDNode*, MVT::compareRawBits> ExtendedValueTypeNodes;
+ StringMap<SDNode*> ExternalSymbols;
+ StringMap<SDNode*> TargetExternalSymbols;
+};
+
+template <> struct GraphTraits<SelectionDAG*> : public GraphTraits<SDNode*> {
+ typedef SelectionDAG::allnodes_iterator nodes_iterator;
+ static nodes_iterator nodes_begin(SelectionDAG *G) {
+ return G->allnodes_begin();
+ }
+ static nodes_iterator nodes_end(SelectionDAG *G) {
+ return G->allnodes_end();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h
new file mode 100644
index 0000000..d2c0dc4
--- /dev/null
+++ b/include/llvm/CodeGen/SelectionDAGISel.h
@@ -0,0 +1,140 @@
+//===-- llvm/CodeGen/SelectionDAGISel.h - Common Base Class------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SelectionDAGISel class, which is used as the common
+// base class for SelectionDAG-based instruction selectors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAG_ISEL_H
+#define LLVM_CODEGEN_SELECTIONDAG_ISEL_H
+
+#include "llvm/BasicBlock.h"
+#include "llvm/Pass.h"
+#include "llvm/Constant.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+
+namespace llvm {
+ class FastISel;
+ class SelectionDAGLowering;
+ class SDValue;
+ class MachineRegisterInfo;
+ class MachineBasicBlock;
+ class MachineFunction;
+ class MachineInstr;
+ class MachineModuleInfo;
+ class DwarfWriter;
+ class TargetLowering;
+ class TargetInstrInfo;
+ class FunctionLoweringInfo;
+ class ScheduleHazardRecognizer;
+ class GCFunctionInfo;
+ class ScheduleDAGSDNodes;
+
+/// SelectionDAGISel - This is the common base class used for SelectionDAG-based
+/// pattern-matching instruction selectors.
+class SelectionDAGISel : public FunctionPass {
+public:
+ const TargetMachine &TM;
+ TargetLowering &TLI;
+ FunctionLoweringInfo *FuncInfo;
+ MachineFunction *MF;
+ MachineRegisterInfo *RegInfo;
+ SelectionDAG *CurDAG;
+ SelectionDAGLowering *SDL;
+ MachineBasicBlock *BB;
+ AliasAnalysis *AA;
+ GCFunctionInfo *GFI;
+ CodeGenOpt::Level OptLevel;
+ static char ID;
+
+ explicit SelectionDAGISel(TargetMachine &tm,
+ CodeGenOpt::Level OL = CodeGenOpt::Default);
+ virtual ~SelectionDAGISel();
+
+ TargetLowering &getTargetLowering() { return TLI; }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const;
+
+ virtual bool runOnFunction(Function &Fn);
+
+ unsigned MakeReg(MVT VT);
+
+ virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {}
+ virtual void InstructionSelect() = 0;
+
+ void SelectRootInit() {
+ DAGSize = CurDAG->AssignTopologicalOrder();
+ }
+
+ /// SelectInlineAsmMemoryOperand - Select the specified address as a target
+ /// addressing mode, according to the specified constraint code. If this does
+ /// not match or is not implemented, return true. The resultant operands
+ /// (which will appear in the machine instruction) should be added to the
+ /// OutOps vector.
+ virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ char ConstraintCode,
+ std::vector<SDValue> &OutOps) {
+ return true;
+ }
+
+ /// IsLegalAndProfitableToFold - Returns true if the specific operand node N of
+ /// U can be folded during instruction selection that starts at Root and
+ /// folding N is profitable.
+ virtual
+ bool IsLegalAndProfitableToFold(SDNode *N, SDNode *U, SDNode *Root) const;
+
+ /// CreateTargetHazardRecognizer - Return a newly allocated hazard recognizer
+ /// to use for this target when scheduling the DAG.
+ virtual ScheduleHazardRecognizer *CreateTargetHazardRecognizer();
+
+protected:
+ /// DAGSize - Size of DAG being instruction selected.
+ ///
+ unsigned DAGSize;
+
+ /// SelectInlineAsmMemoryOperands - Calls to this are automatically generated
+ /// by tblgen. Others should not call it.
+ void SelectInlineAsmMemoryOperands(std::vector<SDValue> &Ops);
+
+ // Calls to these predicates are generated by tblgen.
+ bool CheckAndMask(SDValue LHS, ConstantSDNode *RHS,
+ int64_t DesiredMaskS) const;
+ bool CheckOrMask(SDValue LHS, ConstantSDNode *RHS,
+ int64_t DesiredMaskS) const;
+
+private:
+ void SelectAllBasicBlocks(Function &Fn, MachineFunction &MF,
+ MachineModuleInfo *MMI,
+ DwarfWriter *DW,
+ const TargetInstrInfo &TII);
+ void FinishBasicBlock();
+
+ void SelectBasicBlock(BasicBlock *LLVMBB,
+ BasicBlock::iterator Begin,
+ BasicBlock::iterator End);
+ void CodeGenAndEmitDAG();
+ void LowerArguments(BasicBlock *BB);
+
+ void ComputeLiveOutVRegInfo();
+
+ void HandlePHINodesInSuccessorBlocks(BasicBlock *LLVMBB);
+
+ bool HandlePHINodesInSuccessorBlocksFast(BasicBlock *LLVMBB, FastISel *F);
+
+ /// Create the scheduler. If a specific scheduler was specified
+ /// via the SchedulerRegistry, use it, otherwise select the
+ /// one preferred by the target.
+ ///
+ ScheduleDAGSDNodes *CreateScheduler();
+};
+
+}
+
+#endif /* LLVM_CODEGEN_SELECTIONDAG_ISEL_H */
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
new file mode 100644
index 0000000..ad48510
--- /dev/null
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -0,0 +1,2568 @@
+//===-- llvm/CodeGen/SelectionDAGNodes.h - SelectionDAG Nodes ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SDNode class and derived classes, which are used to
+// represent the nodes and operations present in a SelectionDAG. These nodes
+// and operations are machine code level operations, with some similarities to
+// the GCC RTL representation.
+//
+// Clients should include the SelectionDAG.h file instead of this file directly.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_SELECTIONDAGNODES_H
+#define LLVM_CODEGEN_SELECTIONDAGNODES_H
+
+#include "llvm/Constants.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/iterator.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/ValueTypes.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/RecyclingAllocator.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/CodeGen/DebugLoc.h"
+#include <cassert>
+#include <climits>
+
+namespace llvm {
+
+class SelectionDAG;
+class GlobalValue;
+class MachineBasicBlock;
+class MachineConstantPoolValue;
+class SDNode;
+class Value;
+template <typename T> struct DenseMapInfo;
+template <typename T> struct simplify_type;
+template <typename T> struct ilist_traits;
+
+/// SDVTList - This represents a list of ValueType's that has been intern'd by
+/// a SelectionDAG. Instances of this simple value class are returned by
+/// SelectionDAG::getVTList(...).
+///
+struct SDVTList {
+ const MVT *VTs;
+ unsigned int NumVTs;
+};
+
+/// ISD namespace - This namespace contains an enum which represents all of the
+/// SelectionDAG node types and value types.
+///
+namespace ISD {
+
+ //===--------------------------------------------------------------------===//
+ /// ISD::NodeType enum - This enum defines the target-independent operators
+ /// for a SelectionDAG.
+ ///
+ /// Targets may also define target-dependent operator codes for SDNodes. For
+ /// example, on x86, these are the enum values in the X86ISD namespace.
+ /// Targets should aim to use target-independent operators to model their
+ /// instruction sets as much as possible, and only use target-dependent
+ /// operators when they have special requirements.
+ ///
+ /// Finally, during and after selection proper, SNodes may use special
+ /// operator codes that correspond directly with MachineInstr opcodes. These
+ /// are used to represent selected instructions. See the isMachineOpcode()
+ /// and getMachineOpcode() member functions of SDNode.
+ ///
+ enum NodeType {
+ // DELETED_NODE - This is an illegal value that is used to catch
+ // errors. This opcode is not a legal opcode for any node.
+ DELETED_NODE,
+
+ // EntryToken - This is the marker used to indicate the start of the region.
+ EntryToken,
+
+ // TokenFactor - This node takes multiple tokens as input and produces a
+ // single token result. This is used to represent the fact that the operand
+ // operators are independent of each other.
+ TokenFactor,
+
+ // AssertSext, AssertZext - These nodes record if a register contains a
+ // value that has already been zero or sign extended from a narrower type.
+ // These nodes take two operands. The first is the node that has already
+ // been extended, and the second is a value type node indicating the width
+ // of the extension
+ AssertSext, AssertZext,
+
+ // Various leaf nodes.
+ BasicBlock, VALUETYPE, ARG_FLAGS, CONDCODE, Register,
+ Constant, ConstantFP,
+ GlobalAddress, GlobalTLSAddress, FrameIndex,
+ JumpTable, ConstantPool, ExternalSymbol,
+
+ // The address of the GOT
+ GLOBAL_OFFSET_TABLE,
+
+ // FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
+ // llvm.returnaddress on the DAG. These nodes take one operand, the index
+ // of the frame or return address to return. An index of zero corresponds
+ // to the current function's frame or return address, an index of one to the
+ // parent's frame or return address, and so on.
+ FRAMEADDR, RETURNADDR,
+
+ // FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
+ // first (possible) on-stack argument. This is needed for correct stack
+ // adjustment during unwind.
+ FRAME_TO_ARGS_OFFSET,
+
+ // RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
+ // address of the exception block on entry to an landing pad block.
+ EXCEPTIONADDR,
+
+ // RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node represents
+ // the selection index of the exception thrown.
+ EHSELECTION,
+
+ // OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
+ // 'eh_return' gcc dwarf builtin, which is used to return from
+ // exception. The general meaning is: adjust stack by OFFSET and pass
+ // execution to HANDLER. Many platform-related details also :)
+ EH_RETURN,
+
+ // TargetConstant* - Like Constant*, but the DAG does not do any folding or
+ // simplification of the constant.
+ TargetConstant,
+ TargetConstantFP,
+
+ // TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
+ // anything else with this node, and this is valid in the target-specific
+ // dag, turning into a GlobalAddress operand.
+ TargetGlobalAddress,
+ TargetGlobalTLSAddress,
+ TargetFrameIndex,
+ TargetJumpTable,
+ TargetConstantPool,
+ TargetExternalSymbol,
+
+ /// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
+ /// This node represents a target intrinsic function with no side effects.
+ /// The first operand is the ID number of the intrinsic from the
+ /// llvm::Intrinsic namespace. The operands to the intrinsic follow. The
+ /// node has returns the result of the intrinsic.
+ INTRINSIC_WO_CHAIN,
+
+ /// RESULT,OUTCHAIN = INTRINSIC_W_CHAIN(INCHAIN, INTRINSICID, arg1, ...)
+ /// This node represents a target intrinsic function with side effects that
+ /// returns a result. The first operand is a chain pointer. The second is
+ /// the ID number of the intrinsic from the llvm::Intrinsic namespace. The
+ /// operands to the intrinsic follow. The node has two results, the result
+ /// of the intrinsic and an output chain.
+ INTRINSIC_W_CHAIN,
+
+ /// OUTCHAIN = INTRINSIC_VOID(INCHAIN, INTRINSICID, arg1, arg2, ...)
+ /// This node represents a target intrinsic function with side effects that
+ /// does not return a result. The first operand is a chain pointer. The
+ /// second is the ID number of the intrinsic from the llvm::Intrinsic
+ /// namespace. The operands to the intrinsic follow.
+ INTRINSIC_VOID,
+
+ // CopyToReg - This node has three operands: a chain, a register number to
+ // set to this value, and a value.
+ CopyToReg,
+
+ // CopyFromReg - This node indicates that the input value is a virtual or
+ // physical register that is defined outside of the scope of this
+ // SelectionDAG. The register is available from the RegisterSDNode object.
+ CopyFromReg,
+
+ // UNDEF - An undefined node
+ UNDEF,
+
+ /// FORMAL_ARGUMENTS(CHAIN, CC#, ISVARARG, FLAG0, ..., FLAGn) - This node
+ /// represents the formal arguments for a function. CC# is a Constant value
+ /// indicating the calling convention of the function, and ISVARARG is a
+ /// flag that indicates whether the function is varargs or not. This node
+ /// has one result value for each incoming argument, plus one for the output
+ /// chain. It must be custom legalized. See description of CALL node for
+ /// FLAG argument contents explanation.
+ ///
+ FORMAL_ARGUMENTS,
+
+ /// RV1, RV2...RVn, CHAIN = CALL(CHAIN, CALLEE,
+ /// ARG0, FLAG0, ARG1, FLAG1, ... ARGn, FLAGn)
+ /// This node represents a fully general function call, before the legalizer
+ /// runs. This has one result value for each argument / flag pair, plus
+ /// a chain result. It must be custom legalized. Flag argument indicates
+ /// misc. argument attributes. Currently:
+ /// Bit 0 - signness
+ /// Bit 1 - 'inreg' attribute
+ /// Bit 2 - 'sret' attribute
+ /// Bit 4 - 'byval' attribute
+ /// Bit 5 - 'nest' attribute
+ /// Bit 6-9 - alignment of byval structures
+ /// Bit 10-26 - size of byval structures
+ /// Bits 31:27 - argument ABI alignment in the first argument piece and
+ /// alignment '1' in other argument pieces.
+ ///
+ /// CALL nodes use the CallSDNode subclass of SDNode, which
+ /// additionally carries information about the calling convention,
+ /// whether the call is varargs, and if it's marked as a tail call.
+ ///
+ CALL,
+
+ // EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
+ // a Constant, which is required to be operand #1) half of the integer or
+ // float value specified as operand #0. This is only for use before
+ // legalization, for values that will be broken into multiple registers.
+ EXTRACT_ELEMENT,
+
+ // BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways. Given
+ // two values of the same integer value type, this produces a value twice as
+ // big. Like EXTRACT_ELEMENT, this can only be used before legalization.
+ BUILD_PAIR,
+
+ // MERGE_VALUES - This node takes multiple discrete operands and returns
+ // them all as its individual results. This nodes has exactly the same
+ // number of inputs and outputs, and is only valid before legalization.
+ // This node is useful for some pieces of the code generator that want to
+ // think about a single node with multiple results, not multiple nodes.
+ MERGE_VALUES,
+
+ // Simple integer binary arithmetic operators.
+ ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
+
+ // SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
+ // a signed/unsigned value of type i[2*N], and return the full value as
+ // two results, each of type iN.
+ SMUL_LOHI, UMUL_LOHI,
+
+ // SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
+ // remainder result.
+ SDIVREM, UDIVREM,
+
+ // CARRY_FALSE - This node is used when folding other nodes,
+ // like ADDC/SUBC, which indicate the carry result is always false.
+ CARRY_FALSE,
+
+ // Carry-setting nodes for multiple precision addition and subtraction.
+ // These nodes take two operands of the same value type, and produce two
+ // results. The first result is the normal add or sub result, the second
+ // result is the carry flag result.
+ ADDC, SUBC,
+
+ // Carry-using nodes for multiple precision addition and subtraction. These
+ // nodes take three operands: The first two are the normal lhs and rhs to
+ // the add or sub, and the third is the input carry flag. These nodes
+ // produce two results; the normal result of the add or sub, and the output
+ // carry flag. These nodes both read and write a carry flag to allow them
+ // to them to be chained together for add and sub of arbitrarily large
+ // values.
+ ADDE, SUBE,
+
+ // RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
+ // These nodes take two operands: the normal LHS and RHS to the add. They
+ // produce two results: the normal result of the add, and a boolean that
+ // indicates if an overflow occured (*not* a flag, because it may be stored
+ // to memory, etc.). If the type of the boolean is not i1 then the high
+ // bits conform to getBooleanContents.
+ // These nodes are generated from the llvm.[su]add.with.overflow intrinsics.
+ SADDO, UADDO,
+
+ // Same for subtraction
+ SSUBO, USUBO,
+
+ // Same for multiplication
+ SMULO, UMULO,
+
+ // Simple binary floating point operators.
+ FADD, FSUB, FMUL, FDIV, FREM,
+
+ // FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
+ // DAG node does not require that X and Y have the same type, just that they
+ // are both floating point. X and the result must have the same type.
+ // FCOPYSIGN(f32, f64) is allowed.
+ FCOPYSIGN,
+
+ // INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
+ // value as an integer 0/1 value.
+ FGETSIGN,
+
+ /// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
+ /// specified, possibly variable, elements. The number of elements is
+ /// required to be a power of two. The types of the operands must all be
+ /// the same and must match the vector element type, except that integer
+ /// types are allowed to be larger than the element type, in which case
+ /// the operands are implicitly truncated.
+ BUILD_VECTOR,
+
+ /// INSERT_VECTOR_ELT(VECTOR, VAL, IDX) - Returns VECTOR with the element
+ /// at IDX replaced with VAL. If the type of VAL is larger than the vector
+ /// element type then VAL is truncated before replacement.
+ INSERT_VECTOR_ELT,
+
+ /// EXTRACT_VECTOR_ELT(VECTOR, IDX) - Returns a single element from VECTOR
+ /// identified by the (potentially variable) element number IDX.
+ EXTRACT_VECTOR_ELT,
+
+ /// CONCAT_VECTORS(VECTOR0, VECTOR1, ...) - Given a number of values of
+ /// vector type with the same length and element type, this produces a
+ /// concatenated vector result value, with length equal to the sum of the
+ /// lengths of the input vectors.
+ CONCAT_VECTORS,
+
+ /// EXTRACT_SUBVECTOR(VECTOR, IDX) - Returns a subvector from VECTOR (an
+ /// vector value) starting with the (potentially variable) element number
+ /// IDX, which must be a multiple of the result vector length.
+ EXTRACT_SUBVECTOR,
+
+ /// VECTOR_SHUFFLE(VEC1, VEC2) - Returns a vector, of the same type as
+ /// VEC1/VEC2. A VECTOR_SHUFFLE node also contains an array of constant int
+ /// values that indicate which value (or undef) each result element will
+ /// get. These constant ints are accessible through the
+ /// ShuffleVectorSDNode class. This is quite similar to the Altivec
+ /// 'vperm' instruction, except that the indices must be constants and are
+ /// in terms of the element size of VEC1/VEC2, not in terms of bytes.
+ VECTOR_SHUFFLE,
+
+ /// SCALAR_TO_VECTOR(VAL) - This represents the operation of loading a
+ /// scalar value into element 0 of the resultant vector type. The top
+ /// elements 1 to N-1 of the N-element vector are undefined. The type
+ /// of the operand must match the vector element type, except when they
+ /// are integer types. In this case the operand is allowed to be wider
+ /// than the vector element type, and is implicitly truncated to it.
+ SCALAR_TO_VECTOR,
+
+ // MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing
+ // an unsigned/signed value of type i[2*N], then return the top part.
+ MULHU, MULHS,
+
+ // Bitwise operators - logical and, logical or, logical xor, shift left,
+ // shift right algebraic (shift in sign bits), shift right logical (shift in
+ // zeroes), rotate left, rotate right, and byteswap.
+ AND, OR, XOR, SHL, SRA, SRL, ROTL, ROTR, BSWAP,
+
+ // Counting operators
+ CTTZ, CTLZ, CTPOP,
+
+ // Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
+ // i1 then the high bits must conform to getBooleanContents.
+ SELECT,
+
+ // Select with condition operator - This selects between a true value and
+ // a false value (ops #2 and #3) based on the boolean result of comparing
+ // the lhs and rhs (ops #0 and #1) of a conditional expression with the
+ // condition code in op #4, a CondCodeSDNode.
+ SELECT_CC,
+
+ // SetCC operator - This evaluates to a true value iff the condition is
+ // true. If the result value type is not i1 then the high bits conform
+ // to getBooleanContents. The operands to this are the left and right
+ // operands to compare (ops #0, and #1) and the condition code to compare
+ // them with (op #2) as a CondCodeSDNode.
+ SETCC,
+
+ // Vector SetCC operator - This evaluates to a vector of integer elements
+ // with the high bit in each element set to true if the comparison is true
+ // and false if the comparison is false. All other bits in each element
+ // are undefined. The operands to this are the left and right operands
+ // to compare (ops #0, and #1) and the condition code to compare them with
+ // (op #2) as a CondCodeSDNode.
+ VSETCC,
+
+ // SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
+ // integer shift operations, just like ADD/SUB_PARTS. The operation
+ // ordering is:
+ // [Lo,Hi] = op [LoLHS,HiLHS], Amt
+ SHL_PARTS, SRA_PARTS, SRL_PARTS,
+
+ // Conversion operators. These are all single input single output
+ // operations. For all of these, the result type must be strictly
+ // wider or narrower (depending on the operation) than the source
+ // type.
+
+ // SIGN_EXTEND - Used for integer types, replicating the sign bit
+ // into new bits.
+ SIGN_EXTEND,
+
+ // ZERO_EXTEND - Used for integer types, zeroing the new bits.
+ ZERO_EXTEND,
+
+ // ANY_EXTEND - Used for integer types. The high bits are undefined.
+ ANY_EXTEND,
+
+ // TRUNCATE - Completely drop the high bits.
+ TRUNCATE,
+
+ // [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
+ // depends on the first letter) to floating point.
+ SINT_TO_FP,
+ UINT_TO_FP,
+
+ // SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
+ // sign extend a small value in a large integer register (e.g. sign
+ // extending the low 8 bits of a 32-bit register to fill the top 24 bits
+ // with the 7th bit). The size of the smaller type is indicated by the 1th
+ // operand, a ValueType node.
+ SIGN_EXTEND_INREG,
+
+ /// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
+ /// integer.
+ FP_TO_SINT,
+ FP_TO_UINT,
+
+ /// X = FP_ROUND(Y, TRUNC) - Rounding 'Y' from a larger floating point type
+ /// down to the precision of the destination VT. TRUNC is a flag, which is
+ /// always an integer that is zero or one. If TRUNC is 0, this is a
+ /// normal rounding, if it is 1, this FP_ROUND is known to not change the
+ /// value of Y.
+ ///
+ /// The TRUNC = 1 case is used in cases where we know that the value will
+ /// not be modified by the node, because Y is not using any of the extra
+ /// precision of source type. This allows certain transformations like
+ /// FP_EXTEND(FP_ROUND(X,1)) -> X which are not safe for
+ /// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
+ FP_ROUND,
+
+ // FLT_ROUNDS_ - Returns current rounding mode:
+ // -1 Undefined
+ // 0 Round to 0
+ // 1 Round to nearest
+ // 2 Round to +inf
+ // 3 Round to -inf
+ FLT_ROUNDS_,
+
+ /// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
+ /// rounds it to a floating point value. It then promotes it and returns it
+ /// in a register of the same size. This operation effectively just
+ /// discards excess precision. The type to round down to is specified by
+ /// the VT operand, a VTSDNode.
+ FP_ROUND_INREG,
+
+ /// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
+ FP_EXTEND,
+
+ // BIT_CONVERT - Theis operator converts between integer and FP values, as
+ // if one was stored to memory as integer and the other was loaded from the
+ // same address (or equivalently for vector format conversions, etc). The
+ // source and result are required to have the same bit size (e.g.
+ // f32 <-> i32). This can also be used for int-to-int or fp-to-fp
+ // conversions, but that is a noop, deleted by getNode().
+ BIT_CONVERT,
+
+ // CONVERT_RNDSAT - This operator is used to support various conversions
+ // between various types (float, signed, unsigned and vectors of those
+ // types) with rounding and saturation. NOTE: Avoid using this operator as
+ // most target don't support it and the operator might be removed in the
+ // future. It takes the following arguments:
+ // 0) value
+ // 1) dest type (type to convert to)
+ // 2) src type (type to convert from)
+ // 3) rounding imm
+ // 4) saturation imm
+ // 5) ISD::CvtCode indicating the type of conversion to do
+ CONVERT_RNDSAT,
+
+ // FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ // FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+ // FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary floating
+ // point operations. These are inspired by libm.
+ FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
+ FLOG, FLOG2, FLOG10, FEXP, FEXP2,
+ FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR,
+
+ // LOAD and STORE have token chains as their first operand, then the same
+ // operands as an LLVM load/store instruction, then an offset node that
+ // is added / subtracted from the base pointer to form the address (for
+ // indexed memory ops).
+ LOAD, STORE,
+
+ // DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
+ // to a specified boundary. This node always has two return values: a new
+ // stack pointer value and a chain. The first operand is the token chain,
+ // the second is the number of bytes to allocate, and the third is the
+ // alignment boundary. The size is guaranteed to be a multiple of the stack
+ // alignment, and the alignment is guaranteed to be bigger than the stack
+ // alignment (if required) or 0 to get standard stack alignment.
+ DYNAMIC_STACKALLOC,
+
+ // Control flow instructions. These all have token chains.
+
+ // BR - Unconditional branch. The first operand is the chain
+ // operand, the second is the MBB to branch to.
+ BR,
+
+ // BRIND - Indirect branch. The first operand is the chain, the second
+ // is the value to branch to, which must be of the same type as the target's
+ // pointer type.
+ BRIND,
+
+ // BR_JT - Jumptable branch. The first operand is the chain, the second
+ // is the jumptable index, the last one is the jumptable entry index.
+ BR_JT,
+
+ // BRCOND - Conditional branch. The first operand is the chain, the
+ // second is the condition, the third is the block to branch to if the
+ // condition is true. If the type of the condition is not i1, then the
+ // high bits must conform to getBooleanContents.
+ BRCOND,
+
+ // BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
+ // that the condition is represented as condition code, and two nodes to
+ // compare, rather than as a combined SetCC node. The operands in order are
+ // chain, cc, lhs, rhs, block to branch to if condition is true.
+ BR_CC,
+
+ // RET - Return from function. The first operand is the chain,
+ // and any subsequent operands are pairs of return value and return value
+ // attributes (see CALL for description of attributes) for the function.
+ // This operation can have variable number of operands.
+ RET,
+
+ // INLINEASM - Represents an inline asm block. This node always has two
+ // return values: a chain and a flag result. The inputs are as follows:
+ // Operand #0 : Input chain.
+ // Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
+ // Operand #2n+2: A RegisterNode.
+ // Operand #2n+3: A TargetConstant, indicating if the reg is a use/def
+ // Operand #last: Optional, an incoming flag.
+ INLINEASM,
+
+ // DBG_LABEL, EH_LABEL - Represents a label in mid basic block used to track
+ // locations needed for debug and exception handling tables. These nodes
+ // take a chain as input and return a chain.
+ DBG_LABEL,
+ EH_LABEL,
+
+ // DECLARE - Represents a llvm.dbg.declare intrinsic. It's used to track
+ // local variable declarations for debugging information. First operand is
+ // a chain, while the next two operands are first two arguments (address
+ // and variable) of a llvm.dbg.declare instruction.
+ DECLARE,
+
+ // STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
+ // value, the same type as the pointer type for the system, and an output
+ // chain.
+ STACKSAVE,
+
+ // STACKRESTORE has two operands, an input chain and a pointer to restore to
+ // it returns an output chain.
+ STACKRESTORE,
+
+ // CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of
+ // a call sequence, and carry arbitrary information that target might want
+ // to know. The first operand is a chain, the rest are specified by the
+ // target and not touched by the DAG optimizers.
+ // CALLSEQ_START..CALLSEQ_END pairs may not be nested.
+ CALLSEQ_START, // Beginning of a call sequence
+ CALLSEQ_END, // End of a call sequence
+
+ // VAARG - VAARG has three operands: an input chain, a pointer, and a
+ // SRCVALUE. It returns a pair of values: the vaarg value and a new chain.
+ VAARG,
+
+ // VACOPY - VACOPY has five operands: an input chain, a destination pointer,
+ // a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
+ // source.
+ VACOPY,
+
+ // VAEND, VASTART - VAEND and VASTART have three operands: an input chain, a
+ // pointer, and a SRCVALUE.
+ VAEND, VASTART,
+
+ // SRCVALUE - This is a node type that holds a Value* that is used to
+ // make reference to a value in the LLVM IR.
+ SRCVALUE,
+
+ // MEMOPERAND - This is a node that contains a MachineMemOperand which
+ // records information about a memory reference. This is used to make
+ // AliasAnalysis queries from the backend.
+ MEMOPERAND,
+
+ // PCMARKER - This corresponds to the pcmarker intrinsic.
+ PCMARKER,
+
+ // READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
+ // The only operand is a chain and a value and a chain are produced. The
+ // value is the contents of the architecture specific cycle counter like
+ // register (or other high accuracy low latency clock source)
+ READCYCLECOUNTER,
+
+ // HANDLENODE node - Used as a handle for various purposes.
+ HANDLENODE,
+
+ // DBG_STOPPOINT - This node is used to represent a source location for
+ // debug info. It takes token chain as input, and carries a line number,
+ // column number, and a pointer to a CompileUnit object identifying
+ // the containing compilation unit. It produces a token chain as output.
+ DBG_STOPPOINT,
+
+ // DEBUG_LOC - This node is used to represent source line information
+ // embedded in the code. It takes a token chain as input, then a line
+ // number, then a column then a file id (provided by MachineModuleInfo.) It
+ // produces a token chain as output.
+ DEBUG_LOC,
+
+ // TRAMPOLINE - This corresponds to the init_trampoline intrinsic.
+ // It takes as input a token chain, the pointer to the trampoline,
+ // the pointer to the nested function, the pointer to pass for the
+ // 'nest' parameter, a SRCVALUE for the trampoline and another for
+ // the nested function (allowing targets to access the original
+ // Function*). It produces the result of the intrinsic and a token
+ // chain as output.
+ TRAMPOLINE,
+
+ // TRAP - Trapping instruction
+ TRAP,
+
+ // PREFETCH - This corresponds to a prefetch intrinsic. It takes chains are
+ // their first operand. The other operands are the address to prefetch,
+ // read / write specifier, and locality specifier.
+ PREFETCH,
+
+ // OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
+ // store-store, device)
+ // This corresponds to the memory.barrier intrinsic.
+ // it takes an input chain, 4 operands to specify the type of barrier, an
+ // operand specifying if the barrier applies to device and uncached memory
+ // and produces an output chain.
+ MEMBARRIER,
+
+ // Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
+ // this corresponds to the atomic.lcs intrinsic.
+ // cmp is compared to *ptr, and if equal, swap is stored in *ptr.
+ // the return is always the original value in *ptr
+ ATOMIC_CMP_SWAP,
+
+ // Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
+ // this corresponds to the atomic.swap intrinsic.
+ // amt is stored to *ptr atomically.
+ // the return is always the original value in *ptr
+ ATOMIC_SWAP,
+
+ // Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
+ // this corresponds to the atomic.load.[OpName] intrinsic.
+ // op(*ptr, amt) is stored to *ptr atomically.
+ // the return is always the original value in *ptr
+ ATOMIC_LOAD_ADD,
+ ATOMIC_LOAD_SUB,
+ ATOMIC_LOAD_AND,
+ ATOMIC_LOAD_OR,
+ ATOMIC_LOAD_XOR,
+ ATOMIC_LOAD_NAND,
+ ATOMIC_LOAD_MIN,
+ ATOMIC_LOAD_MAX,
+ ATOMIC_LOAD_UMIN,
+ ATOMIC_LOAD_UMAX,
+
+ // BUILTIN_OP_END - This must be the last enum value in this list.
+ BUILTIN_OP_END
+ };
+
+ /// Node predicates
+
+ /// isBuildVectorAllOnes - Return true if the specified node is a
+ /// BUILD_VECTOR where all of the elements are ~0 or undef.
+ bool isBuildVectorAllOnes(const SDNode *N);
+
+ /// isBuildVectorAllZeros - Return true if the specified node is a
+ /// BUILD_VECTOR where all of the elements are 0 or undef.
+ bool isBuildVectorAllZeros(const SDNode *N);
+
+ /// isScalarToVector - Return true if the specified node is a
+ /// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
+ /// element is not an undef.
+ bool isScalarToVector(const SDNode *N);
+
+ /// isDebugLabel - Return true if the specified node represents a debug
+ /// label (i.e. ISD::DBG_LABEL or TargetInstrInfo::DBG_LABEL node).
+ bool isDebugLabel(const SDNode *N);
+
+ //===--------------------------------------------------------------------===//
+ /// MemIndexedMode enum - This enum defines the load / store indexed
+ /// addressing modes.
+ ///
+ /// UNINDEXED "Normal" load / store. The effective address is already
+ /// computed and is available in the base pointer. The offset
+ /// operand is always undefined. In addition to producing a
+ /// chain, an unindexed load produces one value (result of the
+ /// load); an unindexed store does not produce a value.
+ ///
+ /// PRE_INC Similar to the unindexed mode where the effective address is
+ /// PRE_DEC the value of the base pointer add / subtract the offset.
+ /// It considers the computation as being folded into the load /
+ /// store operation (i.e. the load / store does the address
+ /// computation as well as performing the memory transaction).
+ /// The base operand is always undefined. In addition to
+ /// producing a chain, pre-indexed load produces two values
+ /// (result of the load and the result of the address
+ /// computation); a pre-indexed store produces one value (result
+ /// of the address computation).
+ ///
+ /// POST_INC The effective address is the value of the base pointer. The
+ /// POST_DEC value of the offset operand is then added to / subtracted
+ /// from the base after memory transaction. In addition to
+ /// producing a chain, post-indexed load produces two values
+ /// (the result of the load and the result of the base +/- offset
+ /// computation); a post-indexed store produces one value (the
+ /// the result of the base +/- offset computation).
+ ///
+ enum MemIndexedMode {
+ UNINDEXED = 0,
+ PRE_INC,
+ PRE_DEC,
+ POST_INC,
+ POST_DEC,
+ LAST_INDEXED_MODE
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// LoadExtType enum - This enum defines the three variants of LOADEXT
+ /// (load with extension).
+ ///
+ /// SEXTLOAD loads the integer operand and sign extends it to a larger
+ /// integer result type.
+ /// ZEXTLOAD loads the integer operand and zero extends it to a larger
+ /// integer result type.
+ /// EXTLOAD is used for three things: floating point extending loads,
+ /// integer extending loads [the top bits are undefined], and vector
+ /// extending loads [load into low elt].
+ ///
+ enum LoadExtType {
+ NON_EXTLOAD = 0,
+ EXTLOAD,
+ SEXTLOAD,
+ ZEXTLOAD,
+ LAST_LOADEXT_TYPE
+ };
+
+ //===--------------------------------------------------------------------===//
+ /// ISD::CondCode enum - These are ordered carefully to make the bitfields
+ /// below work out, when considering SETFALSE (something that never exists
+ /// dynamically) as 0. "U" -> Unsigned (for integer operands) or Unordered
+ /// (for floating point), "L" -> Less than, "G" -> Greater than, "E" -> Equal
+ /// to. If the "N" column is 1, the result of the comparison is undefined if
+ /// the input is a NAN.
+ ///
+ /// All of these (except for the 'always folded ops') should be handled for
+ /// floating point. For integer, only the SETEQ,SETNE,SETLT,SETLE,SETGT,
+ /// SETGE,SETULT,SETULE,SETUGT, and SETUGE opcodes are used.
+ ///
+ /// Note that these are laid out in a specific order to allow bit-twiddling
+ /// to transform conditions.
+ enum CondCode {
+ // Opcode N U L G E Intuitive operation
+ SETFALSE, // 0 0 0 0 Always false (always folded)
+ SETOEQ, // 0 0 0 1 True if ordered and equal
+ SETOGT, // 0 0 1 0 True if ordered and greater than
+ SETOGE, // 0 0 1 1 True if ordered and greater than or equal
+ SETOLT, // 0 1 0 0 True if ordered and less than
+ SETOLE, // 0 1 0 1 True if ordered and less than or equal
+ SETONE, // 0 1 1 0 True if ordered and operands are unequal
+ SETO, // 0 1 1 1 True if ordered (no nans)
+ SETUO, // 1 0 0 0 True if unordered: isnan(X) | isnan(Y)
+ SETUEQ, // 1 0 0 1 True if unordered or equal
+ SETUGT, // 1 0 1 0 True if unordered or greater than
+ SETUGE, // 1 0 1 1 True if unordered, greater than, or equal
+ SETULT, // 1 1 0 0 True if unordered or less than
+ SETULE, // 1 1 0 1 True if unordered, less than, or equal
+ SETUNE, // 1 1 1 0 True if unordered or not equal
+ SETTRUE, // 1 1 1 1 Always true (always folded)
+ // Don't care operations: undefined if the input is a nan.
+ SETFALSE2, // 1 X 0 0 0 Always false (always folded)
+ SETEQ, // 1 X 0 0 1 True if equal
+ SETGT, // 1 X 0 1 0 True if greater than
+ SETGE, // 1 X 0 1 1 True if greater than or equal
+ SETLT, // 1 X 1 0 0 True if less than
+ SETLE, // 1 X 1 0 1 True if less than or equal
+ SETNE, // 1 X 1 1 0 True if not equal
+ SETTRUE2, // 1 X 1 1 1 Always true (always folded)
+
+ SETCC_INVALID // Marker value.
+ };
+
+ /// isSignedIntSetCC - Return true if this is a setcc instruction that
+ /// performs a signed comparison when used with integer operands.
+ inline bool isSignedIntSetCC(CondCode Code) {
+ return Code == SETGT || Code == SETGE || Code == SETLT || Code == SETLE;
+ }
+
+ /// isUnsignedIntSetCC - Return true if this is a setcc instruction that
+ /// performs an unsigned comparison when used with integer operands.
+ inline bool isUnsignedIntSetCC(CondCode Code) {
+ return Code == SETUGT || Code == SETUGE || Code == SETULT || Code == SETULE;
+ }
+
+ /// isTrueWhenEqual - Return true if the specified condition returns true if
+ /// the two operands to the condition are equal. Note that if one of the two
+ /// operands is a NaN, this value is meaningless.
+ inline bool isTrueWhenEqual(CondCode Cond) {
+ return ((int)Cond & 1) != 0;
+ }
+
+ /// getUnorderedFlavor - This function returns 0 if the condition is always
+ /// false if an operand is a NaN, 1 if the condition is always true if the
+ /// operand is a NaN, and 2 if the condition is undefined if the operand is a
+ /// NaN.
+ inline unsigned getUnorderedFlavor(CondCode Cond) {
+ return ((int)Cond >> 3) & 3;
+ }
+
+ /// getSetCCInverse - Return the operation corresponding to !(X op Y), where
+ /// 'op' is a valid SetCC operation.
+ CondCode getSetCCInverse(CondCode Operation, bool isInteger);
+
+ /// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
+ /// when given the operation for (X op Y).
+ CondCode getSetCCSwappedOperands(CondCode Operation);
+
+ /// getSetCCOrOperation - Return the result of a logical OR between different
+ /// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This
+ /// function returns SETCC_INVALID if it is not possible to represent the
+ /// resultant comparison.
+ CondCode getSetCCOrOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+ /// getSetCCAndOperation - Return the result of a logical AND between
+ /// different comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
+ /// function returns SETCC_INVALID if it is not possible to represent the
+ /// resultant comparison.
+ CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
+
+ //===--------------------------------------------------------------------===//
+ /// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
+ /// supports.
+ enum CvtCode {
+ CVT_FF, // Float from Float
+ CVT_FS, // Float from Signed
+ CVT_FU, // Float from Unsigned
+ CVT_SF, // Signed from Float
+ CVT_UF, // Unsigned from Float
+ CVT_SS, // Signed from Signed
+ CVT_SU, // Signed from Unsigned
+ CVT_US, // Unsigned from Signed
+ CVT_UU, // Unsigned from Unsigned
+ CVT_INVALID // Marker - Invalid opcode
+ };
+} // end llvm::ISD namespace
+
+
+//===----------------------------------------------------------------------===//
+/// SDValue - Unlike LLVM values, Selection DAG nodes may return multiple
+/// values as the result of a computation. Many nodes return multiple values,
+/// from loads (which define a token and a return value) to ADDC (which returns
+/// a result and a carry value), to calls (which may return an arbitrary number
+/// of values).
+///
+/// As such, each use of a SelectionDAG computation must indicate the node that
+/// computes it as well as which return value to use from that node. This pair
+/// of information is represented with the SDValue value type.
+///
+class SDValue {
+ SDNode *Node; // The node defining the value we are using.
+ unsigned ResNo; // Which return value of the node we are using.
+public:
+ SDValue() : Node(0), ResNo(0) {}
+ SDValue(SDNode *node, unsigned resno) : Node(node), ResNo(resno) {}
+
+ /// get the index which selects a specific result in the SDNode
+ unsigned getResNo() const { return ResNo; }
+
+ /// get the SDNode which holds the desired result
+ SDNode *getNode() const { return Node; }
+
+ /// set the SDNode
+ void setNode(SDNode *N) { Node = N; }
+
+ bool operator==(const SDValue &O) const {
+ return Node == O.Node && ResNo == O.ResNo;
+ }
+ bool operator!=(const SDValue &O) const {
+ return !operator==(O);
+ }
+ bool operator<(const SDValue &O) const {
+ return Node < O.Node || (Node == O.Node && ResNo < O.ResNo);
+ }
+
+ SDValue getValue(unsigned R) const {
+ return SDValue(Node, R);
+ }
+
+ // isOperandOf - Return true if this node is an operand of N.
+ bool isOperandOf(SDNode *N) const;
+
+ /// getValueType - Return the ValueType of the referenced return value.
+ ///
+ inline MVT getValueType() const;
+
+ /// getValueSizeInBits - Returns the size of the value in bits.
+ ///
+ unsigned getValueSizeInBits() const {
+ return getValueType().getSizeInBits();
+ }
+
+ // Forwarding methods - These forward to the corresponding methods in SDNode.
+ inline unsigned getOpcode() const;
+ inline unsigned getNumOperands() const;
+ inline const SDValue &getOperand(unsigned i) const;
+ inline uint64_t getConstantOperandVal(unsigned i) const;
+ inline bool isTargetOpcode() const;
+ inline bool isMachineOpcode() const;
+ inline unsigned getMachineOpcode() const;
+ inline const DebugLoc getDebugLoc() const;
+
+
+ /// reachesChainWithoutSideEffects - Return true if this operand (which must
+ /// be a chain) reaches the specified operand without crossing any
+ /// side-effecting instructions. In practice, this looks through token
+ /// factors and non-volatile loads. In order to remain efficient, this only
+ /// looks a couple of nodes in, it does not do an exhaustive search.
+ bool reachesChainWithoutSideEffects(SDValue Dest,
+ unsigned Depth = 2) const;
+
+ /// use_empty - Return true if there are no nodes using value ResNo
+ /// of Node.
+ ///
+ inline bool use_empty() const;
+
+ /// hasOneUse - Return true if there is exactly one node using value
+ /// ResNo of Node.
+ ///
+ inline bool hasOneUse() const;
+};
+
+
+template<> struct DenseMapInfo<SDValue> {
+ static inline SDValue getEmptyKey() {
+ return SDValue((SDNode*)-1, -1U);
+ }
+ static inline SDValue getTombstoneKey() {
+ return SDValue((SDNode*)-1, 0);
+ }
+ static unsigned getHashValue(const SDValue &Val) {
+ return ((unsigned)((uintptr_t)Val.getNode() >> 4) ^
+ (unsigned)((uintptr_t)Val.getNode() >> 9)) + Val.getResNo();
+ }
+ static bool isEqual(const SDValue &LHS, const SDValue &RHS) {
+ return LHS == RHS;
+ }
+ static bool isPod() { return true; }
+};
+
+/// simplify_type specializations - Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDValue> {
+ typedef SDNode* SimpleType;
+ static SimpleType getSimplifiedValue(const SDValue &Val) {
+ return static_cast<SimpleType>(Val.getNode());
+ }
+};
+template<> struct simplify_type<const SDValue> {
+ typedef SDNode* SimpleType;
+ static SimpleType getSimplifiedValue(const SDValue &Val) {
+ return static_cast<SimpleType>(Val.getNode());
+ }
+};
+
+/// SDUse - Represents a use of a SDNode. This class holds an SDValue,
+/// which records the SDNode being used and the result number, a
+/// pointer to the SDNode using the value, and Next and Prev pointers,
+/// which link together all the uses of an SDNode.
+///
+class SDUse {
+ /// Val - The value being used.
+ SDValue Val;
+ /// User - The user of this value.
+ SDNode *User;
+ /// Prev, Next - Pointers to the uses list of the SDNode referred by
+ /// this operand.
+ SDUse **Prev, *Next;
+
+ SDUse(const SDUse &U); // Do not implement
+ void operator=(const SDUse &U); // Do not implement
+
+public:
+ SDUse() : Val(), User(NULL), Prev(NULL), Next(NULL) {}
+
+ /// Normally SDUse will just implicitly convert to an SDValue that it holds.
+ operator const SDValue&() const { return Val; }
+
+ /// If implicit conversion to SDValue doesn't work, the get() method returns
+ /// the SDValue.
+ const SDValue &get() const { return Val; }
+
+ /// getUser - This returns the SDNode that contains this Use.
+ SDNode *getUser() { return User; }
+
+ /// getNext - Get the next SDUse in the use list.
+ SDUse *getNext() const { return Next; }
+
+ /// getNode - Convenience function for get().getNode().
+ SDNode *getNode() const { return Val.getNode(); }
+ /// getResNo - Convenience function for get().getResNo().
+ unsigned getResNo() const { return Val.getResNo(); }
+ /// getValueType - Convenience function for get().getValueType().
+ MVT getValueType() const { return Val.getValueType(); }
+
+ /// operator== - Convenience function for get().operator==
+ bool operator==(const SDValue &V) const {
+ return Val == V;
+ }
+
+ /// operator!= - Convenience function for get().operator!=
+ bool operator!=(const SDValue &V) const {
+ return Val != V;
+ }
+
+ /// operator< - Convenience function for get().operator<
+ bool operator<(const SDValue &V) const {
+ return Val < V;
+ }
+
+private:
+ friend class SelectionDAG;
+ friend class SDNode;
+
+ void setUser(SDNode *p) { User = p; }
+
+ /// set - Remove this use from its existing use list, assign it the
+ /// given value, and add it to the new value's node's use list.
+ inline void set(const SDValue &V);
+ /// setInitial - like set, but only supports initializing a newly-allocated
+ /// SDUse with a non-null value.
+ inline void setInitial(const SDValue &V);
+ /// setNode - like set, but only sets the Node portion of the value,
+ /// leaving the ResNo portion unmodified.
+ inline void setNode(SDNode *N);
+
+ void addToList(SDUse **List) {
+ Next = *List;
+ if (Next) Next->Prev = &Next;
+ Prev = List;
+ *List = this;
+ }
+
+ void removeFromList() {
+ *Prev = Next;
+ if (Next) Next->Prev = Prev;
+ }
+};
+
+/// simplify_type specializations - Allow casting operators to work directly on
+/// SDValues as if they were SDNode*'s.
+template<> struct simplify_type<SDUse> {
+ typedef SDNode* SimpleType;
+ static SimpleType getSimplifiedValue(const SDUse &Val) {
+ return static_cast<SimpleType>(Val.getNode());
+ }
+};
+template<> struct simplify_type<const SDUse> {
+ typedef SDNode* SimpleType;
+ static SimpleType getSimplifiedValue(const SDUse &Val) {
+ return static_cast<SimpleType>(Val.getNode());
+ }
+};
+
+
+/// SDNode - Represents one node in the SelectionDAG.
+///
+class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
+private:
+ /// NodeType - The operation that this node performs.
+ ///
+ short NodeType;
+
+ /// OperandsNeedDelete - This is true if OperandList was new[]'d. If true,
+ /// then they will be delete[]'d when the node is destroyed.
+ unsigned short OperandsNeedDelete : 1;
+
+protected:
+ /// SubclassData - This member is defined by this class, but is not used for
+ /// anything. Subclasses can use it to hold whatever state they find useful.
+ /// This field is initialized to zero by the ctor.
+ unsigned short SubclassData : 15;
+
+private:
+ /// NodeId - Unique id per SDNode in the DAG.
+ int NodeId;
+
+ /// OperandList - The values that are used by this operation.
+ ///
+ SDUse *OperandList;
+
+ /// ValueList - The types of the values this node defines. SDNode's may
+ /// define multiple values simultaneously.
+ const MVT *ValueList;
+
+ /// UseList - List of uses for this SDNode.
+ SDUse *UseList;
+
+ /// NumOperands/NumValues - The number of entries in the Operand/Value list.
+ unsigned short NumOperands, NumValues;
+
+ /// debugLoc - source line information.
+ DebugLoc debugLoc;
+
+ /// getValueTypeList - Return a pointer to the specified value type.
+ static const MVT *getValueTypeList(MVT VT);
+
+ friend class SelectionDAG;
+ friend struct ilist_traits<SDNode>;
+
+public:
+ //===--------------------------------------------------------------------===//
+ // Accessors
+ //
+
+ /// getOpcode - Return the SelectionDAG opcode value for this node. For
+ /// pre-isel nodes (those for which isMachineOpcode returns false), these
+ /// are the opcode values in the ISD and <target>ISD namespaces. For
+ /// post-isel opcodes, see getMachineOpcode.
+ unsigned getOpcode() const { return (unsigned short)NodeType; }
+
+ /// isTargetOpcode - Test if this node has a target-specific opcode (in the
+ /// \<target\>ISD namespace).
+ bool isTargetOpcode() const { return NodeType >= ISD::BUILTIN_OP_END; }
+
+ /// isMachineOpcode - Test if this node has a post-isel opcode, directly
+ /// corresponding to a MachineInstr opcode.
+ bool isMachineOpcode() const { return NodeType < 0; }
+
+ /// getMachineOpcode - This may only be called if isMachineOpcode returns
+ /// true. It returns the MachineInstr opcode value that the node's opcode
+ /// corresponds to.
+ unsigned getMachineOpcode() const {
+ assert(isMachineOpcode() && "Not a MachineInstr opcode!");
+ return ~NodeType;
+ }
+
+ /// use_empty - Return true if there are no uses of this node.
+ ///
+ bool use_empty() const { return UseList == NULL; }
+
+ /// hasOneUse - Return true if there is exactly one use of this node.
+ ///
+ bool hasOneUse() const {
+ return !use_empty() && next(use_begin()) == use_end();
+ }
+
+ /// use_size - Return the number of uses of this node. This method takes
+ /// time proportional to the number of uses.
+ ///
+ size_t use_size() const { return std::distance(use_begin(), use_end()); }
+
+ /// getNodeId - Return the unique node id.
+ ///
+ int getNodeId() const { return NodeId; }
+
+ /// setNodeId - Set unique node id.
+ void setNodeId(int Id) { NodeId = Id; }
+
+ /// getDebugLoc - Return the source location info.
+ const DebugLoc getDebugLoc() const { return debugLoc; }
+
+ /// setDebugLoc - Set source location info. Try to avoid this, putting
+ /// it in the constructor is preferable.
+ void setDebugLoc(const DebugLoc dl) { debugLoc = dl; }
+
+ /// use_iterator - This class provides iterator support for SDUse
+ /// operands that use a specific SDNode.
+ class use_iterator
+ : public forward_iterator<SDUse, ptrdiff_t> {
+ SDUse *Op;
+ explicit use_iterator(SDUse *op) : Op(op) {
+ }
+ friend class SDNode;
+ public:
+ typedef forward_iterator<SDUse, ptrdiff_t>::reference reference;
+ typedef forward_iterator<SDUse, ptrdiff_t>::pointer pointer;
+
+ use_iterator(const use_iterator &I) : Op(I.Op) {}
+ use_iterator() : Op(0) {}
+
+ bool operator==(const use_iterator &x) const {
+ return Op == x.Op;
+ }
+ bool operator!=(const use_iterator &x) const {
+ return !operator==(x);
+ }
+
+ /// atEnd - return true if this iterator is at the end of uses list.
+ bool atEnd() const { return Op == 0; }
+
+ // Iterator traversal: forward iteration only.
+ use_iterator &operator++() { // Preincrement
+ assert(Op && "Cannot increment end iterator!");
+ Op = Op->getNext();
+ return *this;
+ }
+
+ use_iterator operator++(int) { // Postincrement
+ use_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ /// Retrieve a pointer to the current user node.
+ SDNode *operator*() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return Op->getUser();
+ }
+
+ SDNode *operator->() const { return operator*(); }
+
+ SDUse &getUse() const { return *Op; }
+
+ /// getOperandNo - Retrieve the operand # of this use in its user.
+ ///
+ unsigned getOperandNo() const {
+ assert(Op && "Cannot dereference end iterator!");
+ return (unsigned)(Op - Op->getUser()->OperandList);
+ }
+ };
+
+ /// use_begin/use_end - Provide iteration support to walk over all uses
+ /// of an SDNode.
+
+ use_iterator use_begin() const {
+ return use_iterator(UseList);
+ }
+
+ static use_iterator use_end() { return use_iterator(0); }
+
+
+ /// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
+ /// indicated value. This method ignores uses of other values defined by this
+ /// operation.
+ bool hasNUsesOfValue(unsigned NUses, unsigned Value) const;
+
+ /// hasAnyUseOfValue - Return true if there are any use of the indicated
+ /// value. This method ignores uses of other values defined by this operation.
+ bool hasAnyUseOfValue(unsigned Value) const;
+
+ /// isOnlyUserOf - Return true if this node is the only use of N.
+ ///
+ bool isOnlyUserOf(SDNode *N) const;
+
+ /// isOperandOf - Return true if this node is an operand of N.
+ ///
+ bool isOperandOf(SDNode *N) const;
+
+ /// isPredecessorOf - Return true if this node is a predecessor of N. This
+ /// node is either an operand of N or it can be reached by recursively
+ /// traversing up the operands.
+ /// NOTE: this is an expensive method. Use it carefully.
+ bool isPredecessorOf(SDNode *N) const;
+
+ /// getNumOperands - Return the number of values used by this operation.
+ ///
+ unsigned getNumOperands() const { return NumOperands; }
+
+ /// getConstantOperandVal - Helper method returns the integer value of a
+ /// ConstantSDNode operand.
+ uint64_t getConstantOperandVal(unsigned Num) const;
+
+ const SDValue &getOperand(unsigned Num) const {
+ assert(Num < NumOperands && "Invalid child # of SDNode!");
+ return OperandList[Num];
+ }
+
+ typedef SDUse* op_iterator;
+ op_iterator op_begin() const { return OperandList; }
+ op_iterator op_end() const { return OperandList+NumOperands; }
+
+ SDVTList getVTList() const {
+ SDVTList X = { ValueList, NumValues };
+ return X;
+ };
+
+ /// getFlaggedNode - If this node has a flag operand, return the node
+ /// to which the flag operand points. Otherwise return NULL.
+ SDNode *getFlaggedNode() const {
+ if (getNumOperands() != 0 &&
+ getOperand(getNumOperands()-1).getValueType() == MVT::Flag)
+ return getOperand(getNumOperands()-1).getNode();
+ return 0;
+ }
+
+ // If this is a pseudo op, like copyfromreg, look to see if there is a
+ // real target node flagged to it. If so, return the target node.
+ const SDNode *getFlaggedMachineNode() const {
+ const SDNode *FoundNode = this;
+
+ // Climb up flag edges until a machine-opcode node is found, or the
+ // end of the chain is reached.
+ while (!FoundNode->isMachineOpcode()) {
+ const SDNode *N = FoundNode->getFlaggedNode();
+ if (!N) break;
+ FoundNode = N;
+ }
+
+ return FoundNode;
+ }
+
+ /// getNumValues - Return the number of values defined/returned by this
+ /// operator.
+ ///
+ unsigned getNumValues() const { return NumValues; }
+
+ /// getValueType - Return the type of a specified result.
+ ///
+ MVT getValueType(unsigned ResNo) const {
+ assert(ResNo < NumValues && "Illegal result number!");
+ return ValueList[ResNo];
+ }
+
+ /// getValueSizeInBits - Returns MVT::getSizeInBits(getValueType(ResNo)).
+ ///
+ unsigned getValueSizeInBits(unsigned ResNo) const {
+ return getValueType(ResNo).getSizeInBits();
+ }
+
+ typedef const MVT* value_iterator;
+ value_iterator value_begin() const { return ValueList; }
+ value_iterator value_end() const { return ValueList+NumValues; }
+
+ /// getOperationName - Return the opcode of this operation for printing.
+ ///
+ std::string getOperationName(const SelectionDAG *G = 0) const;
+ static const char* getIndexedModeName(ISD::MemIndexedMode AM);
+ void print_types(raw_ostream &OS, const SelectionDAG *G) const;
+ void print_details(raw_ostream &OS, const SelectionDAG *G) const;
+ void print(raw_ostream &OS, const SelectionDAG *G = 0) const;
+ void printr(raw_ostream &OS, const SelectionDAG *G = 0) const;
+ void dump() const;
+ void dumpr() const;
+ void dump(const SelectionDAG *G) const;
+
+ static bool classof(const SDNode *) { return true; }
+
+ /// Profile - Gather unique data for the node.
+ ///
+ void Profile(FoldingSetNodeID &ID) const;
+
+ /// addUse - This method should only be used by the SDUse class.
+ ///
+ void addUse(SDUse &U) { U.addToList(&UseList); }
+
+protected:
+ static SDVTList getSDVTList(MVT VT) {
+ SDVTList Ret = { getValueTypeList(VT), 1 };
+ return Ret;
+ }
+
+ SDNode(unsigned Opc, const DebugLoc dl, SDVTList VTs, const SDValue *Ops,
+ unsigned NumOps)
+ : NodeType(Opc), OperandsNeedDelete(true), SubclassData(0),
+ NodeId(-1),
+ OperandList(NumOps ? new SDUse[NumOps] : 0),
+ ValueList(VTs.VTs), UseList(NULL),
+ NumOperands(NumOps), NumValues(VTs.NumVTs),
+ debugLoc(dl) {
+ for (unsigned i = 0; i != NumOps; ++i) {
+ OperandList[i].setUser(this);
+ OperandList[i].setInitial(Ops[i]);
+ }
+ }
+
+ /// This constructor adds no operands itself; operands can be
+ /// set later with InitOperands.
+ SDNode(unsigned Opc, const DebugLoc dl, SDVTList VTs)
+ : NodeType(Opc), OperandsNeedDelete(false), SubclassData(0),
+ NodeId(-1), OperandList(0), ValueList(VTs.VTs), UseList(NULL),
+ NumOperands(0), NumValues(VTs.NumVTs),
+ debugLoc(dl) {}
+
+ /// InitOperands - Initialize the operands list of this with 1 operand.
+ void InitOperands(SDUse *Ops, const SDValue &Op0) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ NumOperands = 1;
+ OperandList = Ops;
+ }
+
+ /// InitOperands - Initialize the operands list of this with 2 operands.
+ void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ Ops[1].setUser(this);
+ Ops[1].setInitial(Op1);
+ NumOperands = 2;
+ OperandList = Ops;
+ }
+
+ /// InitOperands - Initialize the operands list of this with 3 operands.
+ void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1,
+ const SDValue &Op2) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ Ops[1].setUser(this);
+ Ops[1].setInitial(Op1);
+ Ops[2].setUser(this);
+ Ops[2].setInitial(Op2);
+ NumOperands = 3;
+ OperandList = Ops;
+ }
+
+ /// InitOperands - Initialize the operands list of this with 4 operands.
+ void InitOperands(SDUse *Ops, const SDValue &Op0, const SDValue &Op1,
+ const SDValue &Op2, const SDValue &Op3) {
+ Ops[0].setUser(this);
+ Ops[0].setInitial(Op0);
+ Ops[1].setUser(this);
+ Ops[1].setInitial(Op1);
+ Ops[2].setUser(this);
+ Ops[2].setInitial(Op2);
+ Ops[3].setUser(this);
+ Ops[3].setInitial(Op3);
+ NumOperands = 4;
+ OperandList = Ops;
+ }
+
+ /// InitOperands - Initialize the operands list of this with N operands.
+ void InitOperands(SDUse *Ops, const SDValue *Vals, unsigned N) {
+ for (unsigned i = 0; i != N; ++i) {
+ Ops[i].setUser(this);
+ Ops[i].setInitial(Vals[i]);
+ }
+ NumOperands = N;
+ OperandList = Ops;
+ }
+
+ /// DropOperands - Release the operands and set this node to have
+ /// zero operands.
+ void DropOperands();
+};
+
+
+// Define inline functions from the SDValue class.
+
+inline unsigned SDValue::getOpcode() const {
+ return Node->getOpcode();
+}
+inline MVT SDValue::getValueType() const {
+ return Node->getValueType(ResNo);
+}
+inline unsigned SDValue::getNumOperands() const {
+ return Node->getNumOperands();
+}
+inline const SDValue &SDValue::getOperand(unsigned i) const {
+ return Node->getOperand(i);
+}
+inline uint64_t SDValue::getConstantOperandVal(unsigned i) const {
+ return Node->getConstantOperandVal(i);
+}
+inline bool SDValue::isTargetOpcode() const {
+ return Node->isTargetOpcode();
+}
+inline bool SDValue::isMachineOpcode() const {
+ return Node->isMachineOpcode();
+}
+inline unsigned SDValue::getMachineOpcode() const {
+ return Node->getMachineOpcode();
+}
+inline bool SDValue::use_empty() const {
+ return !Node->hasAnyUseOfValue(ResNo);
+}
+inline bool SDValue::hasOneUse() const {
+ return Node->hasNUsesOfValue(1, ResNo);
+}
+inline const DebugLoc SDValue::getDebugLoc() const {
+ return Node->getDebugLoc();
+}
+
+// Define inline functions from the SDUse class.
+
+inline void SDUse::set(const SDValue &V) {
+ if (Val.getNode()) removeFromList();
+ Val = V;
+ if (V.getNode()) V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setInitial(const SDValue &V) {
+ Val = V;
+ V.getNode()->addUse(*this);
+}
+
+inline void SDUse::setNode(SDNode *N) {
+ if (Val.getNode()) removeFromList();
+ Val.setNode(N);
+ if (N) N->addUse(*this);
+}
+
+/// UnarySDNode - This class is used for single-operand SDNodes. This is solely
+/// to allow co-allocation of node operands with the node itself.
+class UnarySDNode : public SDNode {
+ SDUse Op;
+public:
+ UnarySDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, SDValue X)
+ : SDNode(Opc, dl, VTs) {
+ InitOperands(&Op, X);
+ }
+};
+
+/// BinarySDNode - This class is used for two-operand SDNodes. This is solely
+/// to allow co-allocation of node operands with the node itself.
+class BinarySDNode : public SDNode {
+ SDUse Ops[2];
+public:
+ BinarySDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, SDValue X, SDValue Y)
+ : SDNode(Opc, dl, VTs) {
+ InitOperands(Ops, X, Y);
+ }
+};
+
+/// TernarySDNode - This class is used for three-operand SDNodes. This is solely
+/// to allow co-allocation of node operands with the node itself.
+class TernarySDNode : public SDNode {
+ SDUse Ops[3];
+public:
+ TernarySDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, SDValue X, SDValue Y,
+ SDValue Z)
+ : SDNode(Opc, dl, VTs) {
+ InitOperands(Ops, X, Y, Z);
+ }
+};
+
+
+/// HandleSDNode - This class is used to form a handle around another node that
+/// is persistant and is updated across invocations of replaceAllUsesWith on its
+/// operand. This node should be directly created by end-users and not added to
+/// the AllNodes list.
+class HandleSDNode : public SDNode {
+ SDUse Op;
+public:
+ // FIXME: Remove the "noinline" attribute once <rdar://problem/5852746> is
+ // fixed.
+#ifdef __GNUC__
+ explicit __attribute__((__noinline__)) HandleSDNode(SDValue X)
+#else
+ explicit HandleSDNode(SDValue X)
+#endif
+ : SDNode(ISD::HANDLENODE, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)) {
+ InitOperands(&Op, X);
+ }
+ ~HandleSDNode();
+ const SDValue &getValue() const { return Op; }
+};
+
+/// Abstact virtual class for operations for memory operations
+class MemSDNode : public SDNode {
+private:
+ // MemoryVT - VT of in-memory value.
+ MVT MemoryVT;
+
+ //! SrcValue - Memory location for alias analysis.
+ const Value *SrcValue;
+
+ //! SVOffset - Memory location offset. Note that base is defined in MemSDNode
+ int SVOffset;
+
+public:
+ MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, MVT MemoryVT,
+ const Value *srcValue, int SVOff,
+ unsigned alignment, bool isvolatile);
+
+ MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, const SDValue *Ops,
+ unsigned NumOps, MVT MemoryVT, const Value *srcValue, int SVOff,
+ unsigned alignment, bool isvolatile);
+
+ /// Returns alignment and volatility of the memory access
+ unsigned getAlignment() const { return (1u << (SubclassData >> 6)) >> 1; }
+ bool isVolatile() const { return (SubclassData >> 5) & 1; }
+
+ /// getRawSubclassData - Return the SubclassData value, which contains an
+ /// encoding of the alignment and volatile information, as well as bits
+ /// used by subclasses. This function should only be used to compute a
+ /// FoldingSetNodeID value.
+ unsigned getRawSubclassData() const {
+ return SubclassData;
+ }
+
+ /// Returns the SrcValue and offset that describes the location of the access
+ const Value *getSrcValue() const { return SrcValue; }
+ int getSrcValueOffset() const { return SVOffset; }
+
+ /// getMemoryVT - Return the type of the in-memory value.
+ MVT getMemoryVT() const { return MemoryVT; }
+
+ /// getMemOperand - Return a MachineMemOperand object describing the memory
+ /// reference performed by operation.
+ MachineMemOperand getMemOperand() const;
+
+ const SDValue &getChain() const { return getOperand(0); }
+ const SDValue &getBasePtr() const {
+ return getOperand(getOpcode() == ISD::STORE ? 2 : 1);
+ }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const MemSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ // For some targets, we lower some target intrinsics to a MemIntrinsicNode
+ // with either an intrinsic or a target opcode.
+ return N->getOpcode() == ISD::LOAD ||
+ N->getOpcode() == ISD::STORE ||
+ N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
+ N->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+ N->getOpcode() == ISD::INTRINSIC_VOID ||
+ N->isTargetOpcode();
+ }
+};
+
+/// AtomicSDNode - A SDNode reprenting atomic operations.
+///
+class AtomicSDNode : public MemSDNode {
+ SDUse Ops[4];
+
+public:
+ // Opc: opcode for atomic
+ // VTL: value type list
+ // Chain: memory chain for operaand
+ // Ptr: address to update as a SDValue
+ // Cmp: compare value
+ // Swp: swap value
+ // SrcVal: address to update as a Value (used for MemOperand)
+ // Align: alignment of memory
+ AtomicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTL, MVT MemVT,
+ SDValue Chain, SDValue Ptr,
+ SDValue Cmp, SDValue Swp, const Value* SrcVal,
+ unsigned Align=0)
+ : MemSDNode(Opc, dl, VTL, MemVT, SrcVal, /*SVOffset=*/0,
+ Align, /*isVolatile=*/true) {
+ InitOperands(Ops, Chain, Ptr, Cmp, Swp);
+ }
+ AtomicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTL, MVT MemVT,
+ SDValue Chain, SDValue Ptr,
+ SDValue Val, const Value* SrcVal, unsigned Align=0)
+ : MemSDNode(Opc, dl, VTL, MemVT, SrcVal, /*SVOffset=*/0,
+ Align, /*isVolatile=*/true) {
+ InitOperands(Ops, Chain, Ptr, Val);
+ }
+
+ const SDValue &getBasePtr() const { return getOperand(1); }
+ const SDValue &getVal() const { return getOperand(2); }
+
+ bool isCompareAndSwap() const {
+ unsigned Op = getOpcode();
+ return Op == ISD::ATOMIC_CMP_SWAP;
+ }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const AtomicSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ATOMIC_CMP_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_SWAP ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_ADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_SUB ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_AND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_OR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_XOR ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_NAND ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_UMAX;
+ }
+};
+
+/// MemIntrinsicSDNode - This SDNode is used for target intrinsic that touches
+/// memory and need an associated memory operand.
+///
+class MemIntrinsicSDNode : public MemSDNode {
+ bool ReadMem; // Intrinsic reads memory
+ bool WriteMem; // Intrinsic writes memory
+public:
+ MemIntrinsicSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
+ const SDValue *Ops, unsigned NumOps,
+ MVT MemoryVT, const Value *srcValue, int SVO,
+ unsigned Align, bool Vol, bool ReadMem, bool WriteMem)
+ : MemSDNode(Opc, dl, VTs, Ops, NumOps, MemoryVT, srcValue, SVO, Align, Vol),
+ ReadMem(ReadMem), WriteMem(WriteMem) {
+ }
+
+ bool readMem() const { return ReadMem; }
+ bool writeMem() const { return WriteMem; }
+
+ // Methods to support isa and dyn_cast
+ static bool classof(const MemIntrinsicSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ // We lower some target intrinsics to their target opcode
+ // early a node with a target opcode can be of this class
+ return N->getOpcode() == ISD::INTRINSIC_W_CHAIN ||
+ N->getOpcode() == ISD::INTRINSIC_VOID ||
+ N->isTargetOpcode();
+ }
+};
+
+/// ShuffleVectorSDNode - This SDNode is used to implement the code generator
+/// support for the llvm IR shufflevector instruction. It combines elements
+/// from two input vectors into a new input vector, with the selection and
+/// ordering of elements determined by an array of integers, referred to as
+/// the shuffle mask. For input vectors of width N, mask indices of 0..N-1
+/// refer to elements from the LHS input, and indices from N to 2N-1 the RHS.
+/// An index of -1 is treated as undef, such that the code generator may put
+/// any value in the corresponding element of the result.
+class ShuffleVectorSDNode : public SDNode {
+ SDUse Ops[2];
+
+ // The memory for Mask is owned by the SelectionDAG's OperandAllocator, and
+ // is freed when the SelectionDAG object is destroyed.
+ const int *Mask;
+protected:
+ friend class SelectionDAG;
+ ShuffleVectorSDNode(MVT VT, DebugLoc dl, SDValue N1, SDValue N2,
+ const int *M)
+ : SDNode(ISD::VECTOR_SHUFFLE, dl, getSDVTList(VT)), Mask(M) {
+ InitOperands(Ops, N1, N2);
+ }
+public:
+
+ void getMask(SmallVectorImpl<int> &M) const {
+ MVT VT = getValueType(0);
+ M.clear();
+ for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
+ M.push_back(Mask[i]);
+ }
+ int getMaskElt(unsigned Idx) const {
+ assert(Idx < getValueType(0).getVectorNumElements() && "Idx out of range!");
+ return Mask[Idx];
+ }
+
+ bool isSplat() const { return isSplatMask(Mask, getValueType(0)); }
+ int getSplatIndex() const {
+ assert(isSplat() && "Cannot get splat index for non-splat!");
+ return Mask[0];
+ }
+ static bool isSplatMask(const int *Mask, MVT VT);
+
+ static bool classof(const ShuffleVectorSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::VECTOR_SHUFFLE;
+ }
+};
+
+class ConstantSDNode : public SDNode {
+ const ConstantInt *Value;
+ friend class SelectionDAG;
+ ConstantSDNode(bool isTarget, const ConstantInt *val, MVT VT)
+ : SDNode(isTarget ? ISD::TargetConstant : ISD::Constant,
+ DebugLoc::getUnknownLoc(), getSDVTList(VT)), Value(val) {
+ }
+public:
+
+ const ConstantInt *getConstantIntValue() const { return Value; }
+ const APInt &getAPIntValue() const { return Value->getValue(); }
+ uint64_t getZExtValue() const { return Value->getZExtValue(); }
+ int64_t getSExtValue() const { return Value->getSExtValue(); }
+
+ bool isNullValue() const { return Value->isNullValue(); }
+ bool isAllOnesValue() const { return Value->isAllOnesValue(); }
+
+ static bool classof(const ConstantSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::Constant ||
+ N->getOpcode() == ISD::TargetConstant;
+ }
+};
+
+class ConstantFPSDNode : public SDNode {
+ const ConstantFP *Value;
+ friend class SelectionDAG;
+ ConstantFPSDNode(bool isTarget, const ConstantFP *val, MVT VT)
+ : SDNode(isTarget ? ISD::TargetConstantFP : ISD::ConstantFP,
+ DebugLoc::getUnknownLoc(), getSDVTList(VT)), Value(val) {
+ }
+public:
+
+ const APFloat& getValueAPF() const { return Value->getValueAPF(); }
+ const ConstantFP *getConstantFPValue() const { return Value; }
+
+ /// isExactlyValue - We don't rely on operator== working on double values, as
+ /// it returns true for things that are clearly not equal, like -0.0 and 0.0.
+ /// As such, this method can be used to do an exact bit-for-bit comparison of
+ /// two floating point values.
+
+ /// We leave the version with the double argument here because it's just so
+ /// convenient to write "2.0" and the like. Without this function we'd
+ /// have to duplicate its logic everywhere it's called.
+ bool isExactlyValue(double V) const {
+ bool ignored;
+ // convert is not supported on this type
+ if (&Value->getValueAPF().getSemantics() == &APFloat::PPCDoubleDouble)
+ return false;
+ APFloat Tmp(V);
+ Tmp.convert(Value->getValueAPF().getSemantics(),
+ APFloat::rmNearestTiesToEven, &ignored);
+ return isExactlyValue(Tmp);
+ }
+ bool isExactlyValue(const APFloat& V) const;
+
+ bool isValueValidForType(MVT VT, const APFloat& Val);
+
+ static bool classof(const ConstantFPSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ConstantFP ||
+ N->getOpcode() == ISD::TargetConstantFP;
+ }
+};
+
+class GlobalAddressSDNode : public SDNode {
+ GlobalValue *TheGlobal;
+ int64_t Offset;
+ friend class SelectionDAG;
+ GlobalAddressSDNode(bool isTarget, const GlobalValue *GA, MVT VT,
+ int64_t o = 0);
+public:
+
+ GlobalValue *getGlobal() const { return TheGlobal; }
+ int64_t getOffset() const { return Offset; }
+ // Return the address space this GlobalAddress belongs to.
+ unsigned getAddressSpace() const;
+
+ static bool classof(const GlobalAddressSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::GlobalAddress ||
+ N->getOpcode() == ISD::TargetGlobalAddress ||
+ N->getOpcode() == ISD::GlobalTLSAddress ||
+ N->getOpcode() == ISD::TargetGlobalTLSAddress;
+ }
+};
+
+class FrameIndexSDNode : public SDNode {
+ int FI;
+ friend class SelectionDAG;
+ FrameIndexSDNode(int fi, MVT VT, bool isTarg)
+ : SDNode(isTarg ? ISD::TargetFrameIndex : ISD::FrameIndex,
+ DebugLoc::getUnknownLoc(), getSDVTList(VT)), FI(fi) {
+ }
+public:
+
+ int getIndex() const { return FI; }
+
+ static bool classof(const FrameIndexSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::FrameIndex ||
+ N->getOpcode() == ISD::TargetFrameIndex;
+ }
+};
+
+class JumpTableSDNode : public SDNode {
+ int JTI;
+ friend class SelectionDAG;
+ JumpTableSDNode(int jti, MVT VT, bool isTarg)
+ : SDNode(isTarg ? ISD::TargetJumpTable : ISD::JumpTable,
+ DebugLoc::getUnknownLoc(), getSDVTList(VT)), JTI(jti) {
+ }
+public:
+
+ int getIndex() const { return JTI; }
+
+ static bool classof(const JumpTableSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::JumpTable ||
+ N->getOpcode() == ISD::TargetJumpTable;
+ }
+};
+
+class ConstantPoolSDNode : public SDNode {
+ union {
+ Constant *ConstVal;
+ MachineConstantPoolValue *MachineCPVal;
+ } Val;
+ int Offset; // It's a MachineConstantPoolValue if top bit is set.
+ unsigned Alignment; // Minimum alignment requirement of CP (not log2 value).
+ friend class SelectionDAG;
+ ConstantPoolSDNode(bool isTarget, Constant *c, MVT VT, int o=0)
+ : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool,
+ DebugLoc::getUnknownLoc(),
+ getSDVTList(VT)), Offset(o), Alignment(0) {
+ assert((int)Offset >= 0 && "Offset is too large");
+ Val.ConstVal = c;
+ }
+ ConstantPoolSDNode(bool isTarget, Constant *c, MVT VT, int o, unsigned Align)
+ : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool,
+ DebugLoc::getUnknownLoc(),
+ getSDVTList(VT)), Offset(o), Alignment(Align) {
+ assert((int)Offset >= 0 && "Offset is too large");
+ Val.ConstVal = c;
+ }
+ ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
+ MVT VT, int o=0)
+ : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool,
+ DebugLoc::getUnknownLoc(),
+ getSDVTList(VT)), Offset(o), Alignment(0) {
+ assert((int)Offset >= 0 && "Offset is too large");
+ Val.MachineCPVal = v;
+ Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
+ }
+ ConstantPoolSDNode(bool isTarget, MachineConstantPoolValue *v,
+ MVT VT, int o, unsigned Align)
+ : SDNode(isTarget ? ISD::TargetConstantPool : ISD::ConstantPool,
+ DebugLoc::getUnknownLoc(),
+ getSDVTList(VT)), Offset(o), Alignment(Align) {
+ assert((int)Offset >= 0 && "Offset is too large");
+ Val.MachineCPVal = v;
+ Offset |= 1 << (sizeof(unsigned)*CHAR_BIT-1);
+ }
+public:
+
+ bool isMachineConstantPoolEntry() const {
+ return (int)Offset < 0;
+ }
+
+ Constant *getConstVal() const {
+ assert(!isMachineConstantPoolEntry() && "Wrong constantpool type");
+ return Val.ConstVal;
+ }
+
+ MachineConstantPoolValue *getMachineCPVal() const {
+ assert(isMachineConstantPoolEntry() && "Wrong constantpool type");
+ return Val.MachineCPVal;
+ }
+
+ int getOffset() const {
+ return Offset & ~(1 << (sizeof(unsigned)*CHAR_BIT-1));
+ }
+
+ // Return the alignment of this constant pool object, which is either 0 (for
+ // default alignment) or the desired value.
+ unsigned getAlignment() const { return Alignment; }
+
+ const Type *getType() const;
+
+ static bool classof(const ConstantPoolSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ConstantPool ||
+ N->getOpcode() == ISD::TargetConstantPool;
+ }
+};
+
+class BasicBlockSDNode : public SDNode {
+ MachineBasicBlock *MBB;
+ friend class SelectionDAG;
+ /// Debug info is meaningful and potentially useful here, but we create
+ /// blocks out of order when they're jumped to, which makes it a bit
+ /// harder. Let's see if we need it first.
+ explicit BasicBlockSDNode(MachineBasicBlock *mbb)
+ : SDNode(ISD::BasicBlock, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)), MBB(mbb) {
+ }
+public:
+
+ MachineBasicBlock *getBasicBlock() const { return MBB; }
+
+ static bool classof(const BasicBlockSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::BasicBlock;
+ }
+};
+
+/// BuildVectorSDNode - A "pseudo-class" with methods for operating on
+/// BUILD_VECTORs.
+class BuildVectorSDNode : public SDNode {
+ // These are constructed as SDNodes and then cast to BuildVectorSDNodes.
+ explicit BuildVectorSDNode(); // Do not implement
+public:
+ /// isConstantSplat - Check if this is a constant splat, and if so, find the
+ /// smallest element size that splats the vector. If MinSplatBits is
+ /// nonzero, the element size must be at least that large. Note that the
+ /// splat element may be the entire vector (i.e., a one element vector).
+ /// Returns the splat element value in SplatValue. Any undefined bits in
+ /// that value are zero, and the corresponding bits in the SplatUndef mask
+ /// are set. The SplatBitSize value is set to the splat element size in
+ /// bits. HasAnyUndefs is set to true if any bits in the vector are
+ /// undefined.
+ bool isConstantSplat(APInt &SplatValue, APInt &SplatUndef,
+ unsigned &SplatBitSize, bool &HasAnyUndefs,
+ unsigned MinSplatBits = 0);
+
+ static inline bool classof(const BuildVectorSDNode *) { return true; }
+ static inline bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::BUILD_VECTOR;
+ }
+};
+
+/// SrcValueSDNode - An SDNode that holds an arbitrary LLVM IR Value. This is
+/// used when the SelectionDAG needs to make a simple reference to something
+/// in the LLVM IR representation.
+///
+/// Note that this is not used for carrying alias information; that is done
+/// with MemOperandSDNode, which includes a Value which is required to be a
+/// pointer, and several other fields specific to memory references.
+///
+class SrcValueSDNode : public SDNode {
+ const Value *V;
+ friend class SelectionDAG;
+ /// Create a SrcValue for a general value.
+ explicit SrcValueSDNode(const Value *v)
+ : SDNode(ISD::SRCVALUE, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)), V(v) {}
+
+public:
+ /// getValue - return the contained Value.
+ const Value *getValue() const { return V; }
+
+ static bool classof(const SrcValueSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::SRCVALUE;
+ }
+};
+
+
+/// MemOperandSDNode - An SDNode that holds a MachineMemOperand. This is
+/// used to represent a reference to memory after ISD::LOAD
+/// and ISD::STORE have been lowered.
+///
+class MemOperandSDNode : public SDNode {
+ friend class SelectionDAG;
+ /// Create a MachineMemOperand node
+ explicit MemOperandSDNode(const MachineMemOperand &mo)
+ : SDNode(ISD::MEMOPERAND, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)), MO(mo) {}
+
+public:
+ /// MO - The contained MachineMemOperand.
+ const MachineMemOperand MO;
+
+ static bool classof(const MemOperandSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::MEMOPERAND;
+ }
+};
+
+
+class RegisterSDNode : public SDNode {
+ unsigned Reg;
+ friend class SelectionDAG;
+ RegisterSDNode(unsigned reg, MVT VT)
+ : SDNode(ISD::Register, DebugLoc::getUnknownLoc(),
+ getSDVTList(VT)), Reg(reg) {
+ }
+public:
+
+ unsigned getReg() const { return Reg; }
+
+ static bool classof(const RegisterSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::Register;
+ }
+};
+
+class DbgStopPointSDNode : public SDNode {
+ SDUse Chain;
+ unsigned Line;
+ unsigned Column;
+ Value *CU;
+ friend class SelectionDAG;
+ DbgStopPointSDNode(SDValue ch, unsigned l, unsigned c,
+ Value *cu)
+ : SDNode(ISD::DBG_STOPPOINT, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)), Line(l), Column(c), CU(cu) {
+ InitOperands(&Chain, ch);
+ }
+public:
+ unsigned getLine() const { return Line; }
+ unsigned getColumn() const { return Column; }
+ Value *getCompileUnit() const { return CU; }
+
+ static bool classof(const DbgStopPointSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::DBG_STOPPOINT;
+ }
+};
+
+class LabelSDNode : public SDNode {
+ SDUse Chain;
+ unsigned LabelID;
+ friend class SelectionDAG;
+LabelSDNode(unsigned NodeTy, DebugLoc dl, SDValue ch, unsigned id)
+ : SDNode(NodeTy, dl, getSDVTList(MVT::Other)), LabelID(id) {
+ InitOperands(&Chain, ch);
+ }
+public:
+ unsigned getLabelID() const { return LabelID; }
+
+ static bool classof(const LabelSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::DBG_LABEL ||
+ N->getOpcode() == ISD::EH_LABEL;
+ }
+};
+
+class ExternalSymbolSDNode : public SDNode {
+ const char *Symbol;
+ friend class SelectionDAG;
+ ExternalSymbolSDNode(bool isTarget, const char *Sym, MVT VT)
+ : SDNode(isTarget ? ISD::TargetExternalSymbol : ISD::ExternalSymbol,
+ DebugLoc::getUnknownLoc(),
+ getSDVTList(VT)), Symbol(Sym) {
+ }
+public:
+
+ const char *getSymbol() const { return Symbol; }
+
+ static bool classof(const ExternalSymbolSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ExternalSymbol ||
+ N->getOpcode() == ISD::TargetExternalSymbol;
+ }
+};
+
+class CondCodeSDNode : public SDNode {
+ ISD::CondCode Condition;
+ friend class SelectionDAG;
+ explicit CondCodeSDNode(ISD::CondCode Cond)
+ : SDNode(ISD::CONDCODE, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)), Condition(Cond) {
+ }
+public:
+
+ ISD::CondCode get() const { return Condition; }
+
+ static bool classof(const CondCodeSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::CONDCODE;
+ }
+};
+
+/// CvtRndSatSDNode - NOTE: avoid using this node as this may disappear in the
+/// future and most targets don't support it.
+class CvtRndSatSDNode : public SDNode {
+ ISD::CvtCode CvtCode;
+ friend class SelectionDAG;
+ explicit CvtRndSatSDNode(MVT VT, DebugLoc dl, const SDValue *Ops,
+ unsigned NumOps, ISD::CvtCode Code)
+ : SDNode(ISD::CONVERT_RNDSAT, dl, getSDVTList(VT), Ops, NumOps),
+ CvtCode(Code) {
+ assert(NumOps == 5 && "wrong number of operations");
+ }
+public:
+ ISD::CvtCode getCvtCode() const { return CvtCode; }
+
+ static bool classof(const CvtRndSatSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::CONVERT_RNDSAT;
+ }
+};
+
+namespace ISD {
+ struct ArgFlagsTy {
+ private:
+ static const uint64_t NoFlagSet = 0ULL;
+ static const uint64_t ZExt = 1ULL<<0; ///< Zero extended
+ static const uint64_t ZExtOffs = 0;
+ static const uint64_t SExt = 1ULL<<1; ///< Sign extended
+ static const uint64_t SExtOffs = 1;
+ static const uint64_t InReg = 1ULL<<2; ///< Passed in register
+ static const uint64_t InRegOffs = 2;
+ static const uint64_t SRet = 1ULL<<3; ///< Hidden struct-ret ptr
+ static const uint64_t SRetOffs = 3;
+ static const uint64_t ByVal = 1ULL<<4; ///< Struct passed by value
+ static const uint64_t ByValOffs = 4;
+ static const uint64_t Nest = 1ULL<<5; ///< Nested fn static chain
+ static const uint64_t NestOffs = 5;
+ static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
+ static const uint64_t ByValAlignOffs = 6;
+ static const uint64_t Split = 1ULL << 10;
+ static const uint64_t SplitOffs = 10;
+ static const uint64_t OrigAlign = 0x1FULL<<27;
+ static const uint64_t OrigAlignOffs = 27;
+ static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
+ static const uint64_t ByValSizeOffs = 32;
+
+ static const uint64_t One = 1ULL; //< 1 of this type, for shifts
+
+ uint64_t Flags;
+ public:
+ ArgFlagsTy() : Flags(0) { }
+
+ bool isZExt() const { return Flags & ZExt; }
+ void setZExt() { Flags |= One << ZExtOffs; }
+
+ bool isSExt() const { return Flags & SExt; }
+ void setSExt() { Flags |= One << SExtOffs; }
+
+ bool isInReg() const { return Flags & InReg; }
+ void setInReg() { Flags |= One << InRegOffs; }
+
+ bool isSRet() const { return Flags & SRet; }
+ void setSRet() { Flags |= One << SRetOffs; }
+
+ bool isByVal() const { return Flags & ByVal; }
+ void setByVal() { Flags |= One << ByValOffs; }
+
+ bool isNest() const { return Flags & Nest; }
+ void setNest() { Flags |= One << NestOffs; }
+
+ unsigned getByValAlign() const {
+ return (unsigned)
+ ((One << ((Flags & ByValAlign) >> ByValAlignOffs)) / 2);
+ }
+ void setByValAlign(unsigned A) {
+ Flags = (Flags & ~ByValAlign) |
+ (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
+ }
+
+ bool isSplit() const { return Flags & Split; }
+ void setSplit() { Flags |= One << SplitOffs; }
+
+ unsigned getOrigAlign() const {
+ return (unsigned)
+ ((One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2);
+ }
+ void setOrigAlign(unsigned A) {
+ Flags = (Flags & ~OrigAlign) |
+ (uint64_t(Log2_32(A) + 1) << OrigAlignOffs);
+ }
+
+ unsigned getByValSize() const {
+ return (unsigned)((Flags & ByValSize) >> ByValSizeOffs);
+ }
+ void setByValSize(unsigned S) {
+ Flags = (Flags & ~ByValSize) | (uint64_t(S) << ByValSizeOffs);
+ }
+
+ /// getArgFlagsString - Returns the flags as a string, eg: "zext align:4".
+ std::string getArgFlagsString();
+
+ /// getRawBits - Represent the flags as a bunch of bits.
+ uint64_t getRawBits() const { return Flags; }
+ };
+}
+
+/// ARG_FLAGSSDNode - Leaf node holding parameter flags.
+class ARG_FLAGSSDNode : public SDNode {
+ ISD::ArgFlagsTy TheFlags;
+ friend class SelectionDAG;
+ explicit ARG_FLAGSSDNode(ISD::ArgFlagsTy Flags)
+ : SDNode(ISD::ARG_FLAGS, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)), TheFlags(Flags) {
+ }
+public:
+ ISD::ArgFlagsTy getArgFlags() const { return TheFlags; }
+
+ static bool classof(const ARG_FLAGSSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::ARG_FLAGS;
+ }
+};
+
+/// CallSDNode - Node for calls -- ISD::CALL.
+class CallSDNode : public SDNode {
+ unsigned CallingConv;
+ bool IsVarArg;
+ bool IsTailCall;
+ // We might eventually want a full-blown Attributes for the result; that
+ // will expand the size of the representation. At the moment we only
+ // need Inreg.
+ bool Inreg;
+ friend class SelectionDAG;
+ CallSDNode(unsigned cc, DebugLoc dl, bool isvararg, bool istailcall,
+ bool isinreg, SDVTList VTs, const SDValue *Operands,
+ unsigned numOperands)
+ : SDNode(ISD::CALL, dl, VTs, Operands, numOperands),
+ CallingConv(cc), IsVarArg(isvararg), IsTailCall(istailcall),
+ Inreg(isinreg) {}
+public:
+ unsigned getCallingConv() const { return CallingConv; }
+ unsigned isVarArg() const { return IsVarArg; }
+ unsigned isTailCall() const { return IsTailCall; }
+ unsigned isInreg() const { return Inreg; }
+
+ /// Set this call to not be marked as a tail call. Normally setter
+ /// methods in SDNodes are unsafe because it breaks the CSE map,
+ /// but we don't include the tail call flag for calls so it's ok
+ /// in this case.
+ void setNotTailCall() { IsTailCall = false; }
+
+ SDValue getChain() const { return getOperand(0); }
+ SDValue getCallee() const { return getOperand(1); }
+
+ unsigned getNumArgs() const { return (getNumOperands() - 2) / 2; }
+ SDValue getArg(unsigned i) const { return getOperand(2+2*i); }
+ SDValue getArgFlagsVal(unsigned i) const {
+ return getOperand(3+2*i);
+ }
+ ISD::ArgFlagsTy getArgFlags(unsigned i) const {
+ return cast<ARG_FLAGSSDNode>(getArgFlagsVal(i).getNode())->getArgFlags();
+ }
+
+ unsigned getNumRetVals() const { return getNumValues() - 1; }
+ MVT getRetValType(unsigned i) const { return getValueType(i); }
+
+ static bool classof(const CallSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::CALL;
+ }
+};
+
+/// VTSDNode - This class is used to represent MVT's, which are used
+/// to parameterize some operations.
+class VTSDNode : public SDNode {
+ MVT ValueType;
+ friend class SelectionDAG;
+ explicit VTSDNode(MVT VT)
+ : SDNode(ISD::VALUETYPE, DebugLoc::getUnknownLoc(),
+ getSDVTList(MVT::Other)), ValueType(VT) {
+ }
+public:
+
+ MVT getVT() const { return ValueType; }
+
+ static bool classof(const VTSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::VALUETYPE;
+ }
+};
+
+/// LSBaseSDNode - Base class for LoadSDNode and StoreSDNode
+///
+class LSBaseSDNode : public MemSDNode {
+ //! Operand array for load and store
+ /*!
+ \note Moving this array to the base class captures more
+ common functionality shared between LoadSDNode and
+ StoreSDNode
+ */
+ SDUse Ops[4];
+public:
+ LSBaseSDNode(ISD::NodeType NodeTy, DebugLoc dl, SDValue *Operands,
+ unsigned numOperands, SDVTList VTs, ISD::MemIndexedMode AM,
+ MVT VT, const Value *SV, int SVO, unsigned Align, bool Vol)
+ : MemSDNode(NodeTy, dl, VTs, VT, SV, SVO, Align, Vol) {
+ assert(Align != 0 && "Loads and stores should have non-zero aligment");
+ SubclassData |= AM << 2;
+ assert(getAddressingMode() == AM && "MemIndexedMode encoding error!");
+ InitOperands(Ops, Operands, numOperands);
+ assert((getOffset().getOpcode() == ISD::UNDEF || isIndexed()) &&
+ "Only indexed loads and stores have a non-undef offset operand");
+ }
+
+ const SDValue &getOffset() const {
+ return getOperand(getOpcode() == ISD::LOAD ? 2 : 3);
+ }
+
+ /// getAddressingMode - Return the addressing mode for this load or store:
+ /// unindexed, pre-inc, pre-dec, post-inc, or post-dec.
+ ISD::MemIndexedMode getAddressingMode() const {
+ return ISD::MemIndexedMode((SubclassData >> 2) & 7);
+ }
+
+ /// isIndexed - Return true if this is a pre/post inc/dec load/store.
+ bool isIndexed() const { return getAddressingMode() != ISD::UNINDEXED; }
+
+ /// isUnindexed - Return true if this is NOT a pre/post inc/dec load/store.
+ bool isUnindexed() const { return getAddressingMode() == ISD::UNINDEXED; }
+
+ static bool classof(const LSBaseSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::LOAD ||
+ N->getOpcode() == ISD::STORE;
+ }
+};
+
+/// LoadSDNode - This class is used to represent ISD::LOAD nodes.
+///
+class LoadSDNode : public LSBaseSDNode {
+ friend class SelectionDAG;
+ LoadSDNode(SDValue *ChainPtrOff, DebugLoc dl, SDVTList VTs,
+ ISD::MemIndexedMode AM, ISD::LoadExtType ETy, MVT LVT,
+ const Value *SV, int O=0, unsigned Align=0, bool Vol=false)
+ : LSBaseSDNode(ISD::LOAD, dl, ChainPtrOff, 3,
+ VTs, AM, LVT, SV, O, Align, Vol) {
+ SubclassData |= (unsigned short)ETy;
+ assert(getExtensionType() == ETy && "LoadExtType encoding error!");
+ }
+public:
+
+ /// getExtensionType - Return whether this is a plain node,
+ /// or one of the varieties of value-extending loads.
+ ISD::LoadExtType getExtensionType() const {
+ return ISD::LoadExtType(SubclassData & 3);
+ }
+
+ const SDValue &getBasePtr() const { return getOperand(1); }
+ const SDValue &getOffset() const { return getOperand(2); }
+
+ static bool classof(const LoadSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::LOAD;
+ }
+};
+
+/// StoreSDNode - This class is used to represent ISD::STORE nodes.
+///
+class StoreSDNode : public LSBaseSDNode {
+ friend class SelectionDAG;
+ StoreSDNode(SDValue *ChainValuePtrOff, DebugLoc dl, SDVTList VTs,
+ ISD::MemIndexedMode AM, bool isTrunc, MVT SVT,
+ const Value *SV, int O=0, unsigned Align=0, bool Vol=false)
+ : LSBaseSDNode(ISD::STORE, dl, ChainValuePtrOff, 4,
+ VTs, AM, SVT, SV, O, Align, Vol) {
+ SubclassData |= (unsigned short)isTrunc;
+ assert(isTruncatingStore() == isTrunc && "isTrunc encoding error!");
+ }
+public:
+
+ /// isTruncatingStore - Return true if the op does a truncation before store.
+ /// For integers this is the same as doing a TRUNCATE and storing the result.
+ /// For floats, it is the same as doing an FP_ROUND and storing the result.
+ bool isTruncatingStore() const { return SubclassData & 1; }
+
+ const SDValue &getValue() const { return getOperand(1); }
+ const SDValue &getBasePtr() const { return getOperand(2); }
+ const SDValue &getOffset() const { return getOperand(3); }
+
+ static bool classof(const StoreSDNode *) { return true; }
+ static bool classof(const SDNode *N) {
+ return N->getOpcode() == ISD::STORE;
+ }
+};
+
+
+class SDNodeIterator : public forward_iterator<SDNode, ptrdiff_t> {
+ SDNode *Node;
+ unsigned Operand;
+
+ SDNodeIterator(SDNode *N, unsigned Op) : Node(N), Operand(Op) {}
+public:
+ bool operator==(const SDNodeIterator& x) const {
+ return Operand == x.Operand;
+ }
+ bool operator!=(const SDNodeIterator& x) const { return !operator==(x); }
+
+ const SDNodeIterator &operator=(const SDNodeIterator &I) {
+ assert(I.Node == Node && "Cannot assign iterators to two different nodes!");
+ Operand = I.Operand;
+ return *this;
+ }
+
+ pointer operator*() const {
+ return Node->getOperand(Operand).getNode();
+ }
+ pointer operator->() const { return operator*(); }
+
+ SDNodeIterator& operator++() { // Preincrement
+ ++Operand;
+ return *this;
+ }
+ SDNodeIterator operator++(int) { // Postincrement
+ SDNodeIterator tmp = *this; ++*this; return tmp;
+ }
+
+ static SDNodeIterator begin(SDNode *N) { return SDNodeIterator(N, 0); }
+ static SDNodeIterator end (SDNode *N) {
+ return SDNodeIterator(N, N->getNumOperands());
+ }
+
+ unsigned getOperand() const { return Operand; }
+ const SDNode *getNode() const { return Node; }
+};
+
+template <> struct GraphTraits<SDNode*> {
+ typedef SDNode NodeType;
+ typedef SDNodeIterator ChildIteratorType;
+ static inline NodeType *getEntryNode(SDNode *N) { return N; }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return SDNodeIterator::begin(N);
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return SDNodeIterator::end(N);
+ }
+};
+
+/// LargestSDNode - The largest SDNode class.
+///
+typedef LoadSDNode LargestSDNode;
+
+/// MostAlignedSDNode - The SDNode class with the greatest alignment
+/// requirement.
+///
+typedef ARG_FLAGSSDNode MostAlignedSDNode;
+
+namespace ISD {
+ /// isNormalLoad - Returns true if the specified node is a non-extending
+ /// and unindexed load.
+ inline bool isNormalLoad(const SDNode *N) {
+ const LoadSDNode *Ld = dyn_cast<LoadSDNode>(N);
+ return Ld && Ld->getExtensionType() == ISD::NON_EXTLOAD &&
+ Ld->getAddressingMode() == ISD::UNINDEXED;
+ }
+
+ /// isNON_EXTLoad - Returns true if the specified node is a non-extending
+ /// load.
+ inline bool isNON_EXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
+ }
+
+ /// isEXTLoad - Returns true if the specified node is a EXTLOAD.
+ ///
+ inline bool isEXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
+ }
+
+ /// isSEXTLoad - Returns true if the specified node is a SEXTLOAD.
+ ///
+ inline bool isSEXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
+ }
+
+ /// isZEXTLoad - Returns true if the specified node is a ZEXTLOAD.
+ ///
+ inline bool isZEXTLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
+ }
+
+ /// isUNINDEXEDLoad - Returns true if the specified node is an unindexed load.
+ ///
+ inline bool isUNINDEXEDLoad(const SDNode *N) {
+ return isa<LoadSDNode>(N) &&
+ cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+ }
+
+ /// isNormalStore - Returns true if the specified node is a non-truncating
+ /// and unindexed store.
+ inline bool isNormalStore(const SDNode *N) {
+ const StoreSDNode *St = dyn_cast<StoreSDNode>(N);
+ return St && !St->isTruncatingStore() &&
+ St->getAddressingMode() == ISD::UNINDEXED;
+ }
+
+ /// isNON_TRUNCStore - Returns true if the specified node is a non-truncating
+ /// store.
+ inline bool isNON_TRUNCStore(const SDNode *N) {
+ return isa<StoreSDNode>(N) && !cast<StoreSDNode>(N)->isTruncatingStore();
+ }
+
+ /// isTRUNCStore - Returns true if the specified node is a truncating
+ /// store.
+ inline bool isTRUNCStore(const SDNode *N) {
+ return isa<StoreSDNode>(N) && cast<StoreSDNode>(N)->isTruncatingStore();
+ }
+
+ /// isUNINDEXEDStore - Returns true if the specified node is an
+ /// unindexed store.
+ inline bool isUNINDEXEDStore(const SDNode *N) {
+ return isa<StoreSDNode>(N) &&
+ cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+ }
+}
+
+
+} // end llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/ValueTypes.h b/include/llvm/CodeGen/ValueTypes.h
new file mode 100644
index 0000000..95c3a11
--- /dev/null
+++ b/include/llvm/CodeGen/ValueTypes.h
@@ -0,0 +1,481 @@
+//===- CodeGen/ValueTypes.h - Low-Level Target independ. types --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of low-level target independent types which various
+// values in the code generator are. This allows the target specific behavior
+// of instructions to be described to target independent passes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CODEGEN_VALUETYPES_H
+#define LLVM_CODEGEN_VALUETYPES_H
+
+#include <cassert>
+#include <string>
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+
+namespace llvm {
+ class Type;
+
+ struct MVT { // MVT = Machine Value Type
+ public:
+ enum SimpleValueType {
+ // If you change this numbering, you must change the values in
+ // ValueTypes.td as well!
+ Other = 0, // This is a non-standard value
+ i1 = 1, // This is a 1 bit integer value
+ i8 = 2, // This is an 8 bit integer value
+ i16 = 3, // This is a 16 bit integer value
+ i32 = 4, // This is a 32 bit integer value
+ i64 = 5, // This is a 64 bit integer value
+ i128 = 6, // This is a 128 bit integer value
+
+ FIRST_INTEGER_VALUETYPE = i1,
+ LAST_INTEGER_VALUETYPE = i128,
+
+ f32 = 7, // This is a 32 bit floating point value
+ f64 = 8, // This is a 64 bit floating point value
+ f80 = 9, // This is a 80 bit floating point value
+ f128 = 10, // This is a 128 bit floating point value
+ ppcf128 = 11, // This is a PPC 128-bit floating point value
+ Flag = 12, // This is a condition code or machine flag.
+
+ isVoid = 13, // This has no value
+
+ v2i8 = 14, // 2 x i8
+ v4i8 = 15, // 4 x i8
+ v2i16 = 16, // 2 x i16
+ v8i8 = 17, // 8 x i8
+ v4i16 = 18, // 4 x i16
+ v2i32 = 19, // 2 x i32
+ v1i64 = 20, // 1 x i64
+ v16i8 = 21, // 16 x i8
+ v8i16 = 22, // 8 x i16
+ v3i32 = 23, // 3 x i32
+ v4i32 = 24, // 4 x i32
+ v2i64 = 25, // 2 x i64
+
+ v2f32 = 26, // 2 x f32
+ v3f32 = 27, // 3 x f32
+ v4f32 = 28, // 4 x f32
+ v2f64 = 29, // 2 x f64
+
+ FIRST_VECTOR_VALUETYPE = v2i8,
+ LAST_VECTOR_VALUETYPE = v2f64,
+
+ LAST_VALUETYPE = 30, // This always remains at the end of the list.
+
+ // iPTRAny - An int value the size of the pointer of the current
+ // target to any address space. This must only be used internal to
+ // tblgen. Other than for overloading, we treat iPTRAny the same as iPTR.
+ iPTRAny = 252,
+
+ // fAny - Any floating-point or vector floating-point value. This is used
+ // for intrinsics that have overloadings based on floating-point types.
+ // This is only for tblgen's consumption!
+ fAny = 253,
+
+ // iAny - An integer or vector integer value of any bit width. This is
+ // used for intrinsics that have overloadings based on integer bit widths.
+ // This is only for tblgen's consumption!
+ iAny = 254,
+
+ // iPTR - An int value the size of the pointer of the current
+ // target. This should only be used internal to tblgen!
+ iPTR = 255,
+
+ // LastSimpleValueType - The greatest valid SimpleValueType value.
+ LastSimpleValueType = 255
+ };
+
+ private:
+ /// This union holds low-level value types. Valid values include any of
+ /// the values in the SimpleValueType enum, or any value returned from one
+ /// of the MVT methods. Any value type equal to one of the SimpleValueType
+ /// enum values is a "simple" value type. All others are "extended".
+ ///
+ /// Note that simple doesn't necessary mean legal for the target machine.
+ /// All legal value types must be simple, but often there are some simple
+ /// value types that are not legal.
+ ///
+ union {
+ uintptr_t V;
+ const Type *LLVMTy;
+ };
+
+ public:
+ MVT() {}
+ MVT(SimpleValueType S) : V(S) {}
+
+ bool operator==(const MVT VT) const {
+ return getRawBits() == VT.getRawBits();
+ }
+ bool operator!=(const MVT VT) const {
+ return getRawBits() != VT.getRawBits();
+ }
+
+ /// getFloatingPointVT - Returns the MVT that represents a floating point
+ /// type with the given number of bits. There are two floating point types
+ /// with 128 bits - this returns f128 rather than ppcf128.
+ static MVT getFloatingPointVT(unsigned BitWidth) {
+ switch (BitWidth) {
+ default:
+ assert(false && "Bad bit width!");
+ case 32:
+ return f32;
+ case 64:
+ return f64;
+ case 80:
+ return f80;
+ case 128:
+ return f128;
+ }
+ }
+
+ /// getIntegerVT - Returns the MVT that represents an integer with the given
+ /// number of bits.
+ static MVT getIntegerVT(unsigned BitWidth) {
+ switch (BitWidth) {
+ default:
+ break;
+ case 1:
+ return i1;
+ case 8:
+ return i8;
+ case 16:
+ return i16;
+ case 32:
+ return i32;
+ case 64:
+ return i64;
+ case 128:
+ return i128;
+ }
+ return getExtendedIntegerVT(BitWidth);
+ }
+
+ /// getVectorVT - Returns the MVT that represents a vector NumElements in
+ /// length, where each element is of type VT.
+ static MVT getVectorVT(MVT VT, unsigned NumElements) {
+ switch (VT.V) {
+ default:
+ break;
+ case i8:
+ if (NumElements == 2) return v2i8;
+ if (NumElements == 4) return v4i8;
+ if (NumElements == 8) return v8i8;
+ if (NumElements == 16) return v16i8;
+ break;
+ case i16:
+ if (NumElements == 2) return v2i16;
+ if (NumElements == 4) return v4i16;
+ if (NumElements == 8) return v8i16;
+ break;
+ case i32:
+ if (NumElements == 2) return v2i32;
+ if (NumElements == 3) return v3i32;
+ if (NumElements == 4) return v4i32;
+ break;
+ case i64:
+ if (NumElements == 1) return v1i64;
+ if (NumElements == 2) return v2i64;
+ break;
+ case f32:
+ if (NumElements == 2) return v2f32;
+ if (NumElements == 3) return v3f32;
+ if (NumElements == 4) return v4f32;
+ break;
+ case f64:
+ if (NumElements == 2) return v2f64;
+ break;
+ }
+ return getExtendedVectorVT(VT, NumElements);
+ }
+
+ /// getIntVectorWithNumElements - Return any integer vector type that has
+ /// the specified number of elements.
+ static MVT getIntVectorWithNumElements(unsigned NumElts) {
+ switch (NumElts) {
+ default: return getVectorVT(i8, NumElts);
+ case 1: return v1i64;
+ case 2: return v2i32;
+ case 3: return v3i32;
+ case 4: return v4i16;
+ case 8: return v8i8;
+ case 16: return v16i8;
+ }
+ }
+
+ /// isSimple - Test if the given MVT is simple (as opposed to being
+ /// extended).
+ bool isSimple() const {
+ return V <= LastSimpleValueType;
+ }
+
+ /// isExtended - Test if the given MVT is extended (as opposed to
+ /// being simple).
+ bool isExtended() const {
+ return !isSimple();
+ }
+
+ /// isFloatingPoint - Return true if this is a FP, or a vector FP type.
+ bool isFloatingPoint() const {
+ return isSimple() ?
+ ((V >= f32 && V <= ppcf128) || (V >= v2f32 && V <= v2f64)) :
+ isExtendedFloatingPoint();
+ }
+
+ /// isInteger - Return true if this is an integer, or a vector integer type.
+ bool isInteger() const {
+ return isSimple() ?
+ ((V >= FIRST_INTEGER_VALUETYPE && V <= LAST_INTEGER_VALUETYPE) ||
+ (V >= v2i8 && V <= v2i64)) : isExtendedInteger();
+ }
+
+ /// isVector - Return true if this is a vector value type.
+ bool isVector() const {
+ return isSimple() ?
+ (V >= FIRST_VECTOR_VALUETYPE && V <= LAST_VECTOR_VALUETYPE) :
+ isExtendedVector();
+ }
+
+ /// is64BitVector - Return true if this is a 64-bit vector type.
+ bool is64BitVector() const {
+ return isSimple() ?
+ (V==v8i8 || V==v4i16 || V==v2i32 || V==v1i64 || V==v2f32) :
+ isExtended64BitVector();
+ }
+
+ /// is128BitVector - Return true if this is a 128-bit vector type.
+ bool is128BitVector() const {
+ return isSimple() ?
+ (V==v16i8 || V==v8i16 || V==v4i32 ||
+ V==v2i64 || V==v4f32 || V==v2f64) :
+ isExtended128BitVector();
+ }
+
+ /// isByteSized - Return true if the bit size is a multiple of 8.
+ bool isByteSized() const {
+ return (getSizeInBits() & 7) == 0;
+ }
+
+ /// isRound - Return true if the size is a power-of-two number of bytes.
+ bool isRound() const {
+ unsigned BitSize = getSizeInBits();
+ return BitSize >= 8 && !(BitSize & (BitSize - 1));
+ }
+
+ /// bitsEq - Return true if this has the same number of bits as VT.
+ bool bitsEq(MVT VT) const {
+ return getSizeInBits() == VT.getSizeInBits();
+ }
+
+ /// bitsGT - Return true if this has more bits than VT.
+ bool bitsGT(MVT VT) const {
+ return getSizeInBits() > VT.getSizeInBits();
+ }
+
+ /// bitsGE - Return true if this has no less bits than VT.
+ bool bitsGE(MVT VT) const {
+ return getSizeInBits() >= VT.getSizeInBits();
+ }
+
+ /// bitsLT - Return true if this has less bits than VT.
+ bool bitsLT(MVT VT) const {
+ return getSizeInBits() < VT.getSizeInBits();
+ }
+
+ /// bitsLE - Return true if this has no more bits than VT.
+ bool bitsLE(MVT VT) const {
+ return getSizeInBits() <= VT.getSizeInBits();
+ }
+
+
+ /// getSimpleVT - Return the SimpleValueType held in the specified
+ /// simple MVT.
+ SimpleValueType getSimpleVT() const {
+ assert(isSimple() && "Expected a SimpleValueType!");
+ return SimpleValueType(V);
+ }
+
+ /// getVectorElementType - Given a vector type, return the type of
+ /// each element.
+ MVT getVectorElementType() const {
+ assert(isVector() && "Invalid vector type!");
+ switch (V) {
+ default:
+ return getExtendedVectorElementType();
+ case v2i8 :
+ case v4i8 :
+ case v8i8 :
+ case v16i8: return i8;
+ case v2i16:
+ case v4i16:
+ case v8i16: return i16;
+ case v2i32:
+ case v3i32:
+ case v4i32: return i32;
+ case v1i64:
+ case v2i64: return i64;
+ case v2f32:
+ case v3f32:
+ case v4f32: return f32;
+ case v2f64: return f64;
+ }
+ }
+
+ /// getVectorNumElements - Given a vector type, return the number of
+ /// elements it contains.
+ unsigned getVectorNumElements() const {
+ assert(isVector() && "Invalid vector type!");
+ switch (V) {
+ default:
+ return getExtendedVectorNumElements();
+ case v16i8: return 16;
+ case v8i8 :
+ case v8i16: return 8;
+ case v4i8:
+ case v4i16:
+ case v4i32:
+ case v4f32: return 4;
+ case v3i32:
+ case v3f32: return 3;
+ case v2i8:
+ case v2i16:
+ case v2i32:
+ case v2i64:
+ case v2f32:
+ case v2f64: return 2;
+ case v1i64: return 1;
+ }
+ }
+
+ /// getSizeInBits - Return the size of the specified value type in bits.
+ unsigned getSizeInBits() const {
+ switch (V) {
+ case iPTR:
+ assert(0 && "Value type size is target-dependent. Ask TLI.");
+ case iPTRAny:
+ case iAny:
+ case fAny:
+ assert(0 && "Value type is overloaded.");
+ default:
+ return getExtendedSizeInBits();
+ case i1 : return 1;
+ case i8 : return 8;
+ case i16 :
+ case v2i8: return 16;
+ case f32 :
+ case i32 :
+ case v4i8:
+ case v2i16: return 32;
+ case f64 :
+ case i64 :
+ case v8i8:
+ case v4i16:
+ case v2i32:
+ case v1i64:
+ case v2f32: return 64;
+ case f80 : return 80;
+ case v3i32:
+ case v3f32: return 96;
+ case f128:
+ case ppcf128:
+ case i128:
+ case v16i8:
+ case v8i16:
+ case v4i32:
+ case v2i64:
+ case v4f32:
+ case v2f64: return 128;
+ }
+ }
+
+ /// getStoreSizeInBits - Return the number of bits overwritten by a store
+ /// of the specified value type.
+ unsigned getStoreSizeInBits() const {
+ return (getSizeInBits() + 7)/8*8;
+ }
+
+ /// getRoundIntegerType - Rounds the bit-width of the given integer MVT up
+ /// to the nearest power of two (and at least to eight), and returns the
+ /// integer MVT with that number of bits.
+ MVT getRoundIntegerType() const {
+ assert(isInteger() && !isVector() && "Invalid integer type!");
+ unsigned BitWidth = getSizeInBits();
+ if (BitWidth <= 8)
+ return i8;
+ else
+ return getIntegerVT(1 << Log2_32_Ceil(BitWidth));
+ }
+
+ /// isPow2VectorType - Retuns true if the given vector is a power of 2.
+ bool isPow2VectorType() const {
+ unsigned NElts = getVectorNumElements();
+ return !(NElts & (NElts - 1));
+ }
+
+ /// getPow2VectorType - Widens the length of the given vector MVT up to
+ /// the nearest power of 2 and returns that type.
+ MVT getPow2VectorType() const {
+ if (!isPow2VectorType()) {
+ unsigned NElts = getVectorNumElements();
+ unsigned Pow2NElts = 1 << Log2_32_Ceil(NElts);
+ return MVT::getVectorVT(getVectorElementType(), Pow2NElts);
+ }
+ else {
+ return *this;
+ }
+ }
+
+ /// getMVTString - This function returns value type as a string,
+ /// e.g. "i32".
+ std::string getMVTString() const;
+
+ /// getTypeForMVT - This method returns an LLVM type corresponding to the
+ /// specified MVT. For integer types, this returns an unsigned type. Note
+ /// that this will abort for types that cannot be represented.
+ const Type *getTypeForMVT() const;
+
+ /// getMVT - Return the value type corresponding to the specified type.
+ /// This returns all pointers as iPTR. If HandleUnknown is true, unknown
+ /// types are returned as Other, otherwise they are invalid.
+ static MVT getMVT(const Type *Ty, bool HandleUnknown = false);
+
+ /// getRawBits - Represent the type as a bunch of bits.
+ uintptr_t getRawBits() const { return V; }
+
+ /// compareRawBits - A meaningless but well-behaved order, useful for
+ /// constructing containers.
+ struct compareRawBits {
+ bool operator()(MVT L, MVT R) const {
+ return L.getRawBits() < R.getRawBits();
+ }
+ };
+
+ private:
+ // Methods for handling the Extended-type case in functions above.
+ // These are all out-of-line to prevent users of this header file
+ // from having a dependency on Type.h.
+ static MVT getExtendedIntegerVT(unsigned BitWidth);
+ static MVT getExtendedVectorVT(MVT VT, unsigned NumElements);
+ bool isExtendedFloatingPoint() const;
+ bool isExtendedInteger() const;
+ bool isExtendedVector() const;
+ bool isExtended64BitVector() const;
+ bool isExtended128BitVector() const;
+ MVT getExtendedVectorElementType() const;
+ unsigned getExtendedVectorNumElements() const;
+ unsigned getExtendedSizeInBits() const;
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td
new file mode 100644
index 0000000..53ed0be
--- /dev/null
+++ b/include/llvm/CodeGen/ValueTypes.td
@@ -0,0 +1,66 @@
+//===- ValueTypes.td - ValueType definitions ---------------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Value types - These values correspond to the register types defined in the
+// ValueTypes.h file. If you update anything here, you must update it there as
+// well!
+//
+//===----------------------------------------------------------------------===//
+
+class ValueType<int size, int value> {
+ string Namespace = "MVT";
+ int Size = size;
+ int Value = value;
+}
+
+def OtherVT: ValueType<0 , 0>; // "Other" value
+def i1 : ValueType<1 , 1>; // One bit boolean value
+def i8 : ValueType<8 , 2>; // 8-bit integer value
+def i16 : ValueType<16 , 3>; // 16-bit integer value
+def i32 : ValueType<32 , 4>; // 32-bit integer value
+def i64 : ValueType<64 , 5>; // 64-bit integer value
+def i128 : ValueType<128, 6>; // 128-bit integer value
+def f32 : ValueType<32 , 7>; // 32-bit floating point value
+def f64 : ValueType<64 , 8>; // 64-bit floating point value
+def f80 : ValueType<80 , 9>; // 80-bit floating point value
+def f128 : ValueType<128, 10>; // 128-bit floating point value
+def ppcf128: ValueType<128, 11>; // PPC 128-bit floating point value
+def FlagVT : ValueType<0 , 12>; // Condition code or machine flag
+def isVoid : ValueType<0 , 13>; // Produces no value
+def v2i8 : ValueType<16 , 14>; // 2 x i8 vector value
+def v4i8 : ValueType<32 , 15>; // 4 x i8 vector value
+def v2i16 : ValueType<32 , 16>; // 2 x i16 vector value
+def v8i8 : ValueType<64 , 17>; // 8 x i8 vector value
+def v4i16 : ValueType<64 , 18>; // 4 x i16 vector value
+def v2i32 : ValueType<64 , 19>; // 2 x i32 vector value
+def v1i64 : ValueType<64 , 20>; // 1 x i64 vector value
+
+def v16i8 : ValueType<128, 21>; // 16 x i8 vector value
+def v8i16 : ValueType<128, 22>; // 8 x i16 vector value
+def v3i32 : ValueType<96 , 23>; // 3 x i32 vector value
+def v4i32 : ValueType<128, 24>; // 4 x i32 vector value
+def v2i64 : ValueType<128, 25>; // 2 x i64 vector value
+
+def v2f32 : ValueType<64, 26>; // 2 x f32 vector value
+def v3f32 : ValueType<96 , 27>; // 3 x f32 vector value
+def v4f32 : ValueType<128, 28>; // 4 x f32 vector value
+def v2f64 : ValueType<128, 29>; // 2 x f64 vector value
+
+// Pseudo valuetype mapped to the current pointer size to any address space.
+// Should only be used in TableGen.
+def iPTRAny : ValueType<0, 252>;
+
+// Pseudo valuetype to represent "float of any format"
+def fAny : ValueType<0 , 253>;
+
+// Pseudo valuetype to represent "integer of any bit width"
+def iAny : ValueType<0 , 254>;
+
+// Pseudo valuetype mapped to the current pointer size.
+def iPTR : ValueType<0 , 255>;
OpenPOWER on IntegriCloud