diff options
Diffstat (limited to 'contrib/llvm/lib/Target/Mips')
55 files changed, 13342 insertions, 0 deletions
diff --git a/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp new file mode 100644 index 0000000..58b5590 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -0,0 +1,66 @@ +//===-- MipsAsmParser.cpp - Parse Mips assembly to MCInst instructions ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/MipsMCTargetDesc.h" +#include "llvm/MC/MCParser/MCAsmLexer.h" +#include "llvm/MC/MCTargetAsmParser.h" +#include "llvm/Support/TargetRegistry.h" + +using namespace llvm; + +namespace { +class MipsAsmParser : public MCTargetAsmParser { + bool MatchAndEmitInstruction(SMLoc IDLoc, + SmallVectorImpl<MCParsedAsmOperand*> &Operands, + MCStreamer &Out); + + bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); + + bool ParseInstruction(StringRef Name, SMLoc NameLoc, + SmallVectorImpl<MCParsedAsmOperand*> &Operands); + + bool ParseDirective(AsmToken DirectiveID); + +public: + MipsAsmParser(MCSubtargetInfo &sti, MCAsmParser &parser) + : MCTargetAsmParser() { + } + +}; +} + +bool MipsAsmParser:: +MatchAndEmitInstruction(SMLoc IDLoc, + SmallVectorImpl<MCParsedAsmOperand*> &Operands, + MCStreamer &Out) { + return true; +} + +bool MipsAsmParser:: +ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) { + return true; +} + +bool MipsAsmParser:: +ParseInstruction(StringRef Name, SMLoc NameLoc, + SmallVectorImpl<MCParsedAsmOperand*> &Operands) { + return true; +} + +bool MipsAsmParser:: +ParseDirective(AsmToken DirectiveID) { + return true; +} + +extern "C" void LLVMInitializeMipsAsmParser() { + RegisterMCAsmParser<MipsAsmParser> X(TheMipsTarget); + RegisterMCAsmParser<MipsAsmParser> Y(TheMipselTarget); + RegisterMCAsmParser<MipsAsmParser> A(TheMips64Target); + RegisterMCAsmParser<MipsAsmParser> B(TheMips64elTarget); +} diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp new file mode 100644 index 0000000..6886b17 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.cpp @@ -0,0 +1,179 @@ +//===-- MipsInstPrinter.cpp - Convert Mips MCInst to assembly syntax ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class prints an Mips MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "asm-printer" +#include "MipsInstPrinter.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +#include "MipsGenAsmWriter.inc" + +const char* Mips::MipsFCCToString(Mips::CondCode CC) { + switch (CC) { + case FCOND_F: + case FCOND_T: return "f"; + case FCOND_UN: + case FCOND_OR: return "un"; + case FCOND_OEQ: + case FCOND_UNE: return "eq"; + case FCOND_UEQ: + case FCOND_ONE: return "ueq"; + case FCOND_OLT: + case FCOND_UGE: return "olt"; + case FCOND_ULT: + case FCOND_OGE: return "ult"; + case FCOND_OLE: + case FCOND_UGT: return "ole"; + case FCOND_ULE: + case FCOND_OGT: return "ule"; + case FCOND_SF: + case FCOND_ST: return "sf"; + case FCOND_NGLE: + case FCOND_GLE: return "ngle"; + case FCOND_SEQ: + case FCOND_SNE: return "seq"; + case FCOND_NGL: + case FCOND_GL: return "ngl"; + case FCOND_LT: + case FCOND_NLT: return "lt"; + case FCOND_NGE: + case FCOND_GE: return "nge"; + case FCOND_LE: + case FCOND_NLE: return "le"; + case FCOND_NGT: + case FCOND_GT: return "ngt"; + } + llvm_unreachable("Impossible condition code!"); +} + +void MipsInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const { + OS << '$' << StringRef(getRegisterName(RegNo)).lower(); +} + +void MipsInstPrinter::printInst(const MCInst *MI, raw_ostream &O, + StringRef Annot) { + printInstruction(MI, O); + printAnnotation(O, Annot); +} + +static void printExpr(const MCExpr *Expr, raw_ostream &OS) { + int Offset = 0; + const MCSymbolRefExpr *SRE; + + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Expr)) { + SRE = dyn_cast<MCSymbolRefExpr>(BE->getLHS()); + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS()); + assert(SRE && CE && "Binary expression must be sym+const."); + Offset = CE->getValue(); + } + else if (!(SRE = dyn_cast<MCSymbolRefExpr>(Expr))) + assert(false && "Unexpected MCExpr type."); + + MCSymbolRefExpr::VariantKind Kind = SRE->getKind(); + + switch (Kind) { + default: llvm_unreachable("Invalid kind!"); + case MCSymbolRefExpr::VK_None: break; + case MCSymbolRefExpr::VK_Mips_GPREL: OS << "%gp_rel("; break; + case MCSymbolRefExpr::VK_Mips_GOT_CALL: OS << "%call16("; break; + case MCSymbolRefExpr::VK_Mips_GOT16: OS << "%got("; break; + case MCSymbolRefExpr::VK_Mips_GOT: OS << "%got("; break; + case MCSymbolRefExpr::VK_Mips_ABS_HI: OS << "%hi("; break; + case MCSymbolRefExpr::VK_Mips_ABS_LO: OS << "%lo("; break; + case MCSymbolRefExpr::VK_Mips_TLSGD: OS << "%tlsgd("; break; + case MCSymbolRefExpr::VK_Mips_TLSLDM: OS << "%tlsldm("; break; + case MCSymbolRefExpr::VK_Mips_DTPREL_HI: OS << "%dtprel_hi("; break; + case MCSymbolRefExpr::VK_Mips_DTPREL_LO: OS << "%dtprel_lo("; break; + case MCSymbolRefExpr::VK_Mips_GOTTPREL: OS << "%gottprel("; break; + case MCSymbolRefExpr::VK_Mips_TPREL_HI: OS << "%tprel_hi("; break; + case MCSymbolRefExpr::VK_Mips_TPREL_LO: OS << "%tprel_lo("; break; + case MCSymbolRefExpr::VK_Mips_GPOFF_HI: OS << "%hi(%neg(%gp_rel("; break; + case MCSymbolRefExpr::VK_Mips_GPOFF_LO: OS << "%lo(%neg(%gp_rel("; break; + case MCSymbolRefExpr::VK_Mips_GOT_DISP: OS << "%got_disp("; break; + case MCSymbolRefExpr::VK_Mips_GOT_PAGE: OS << "%got_page("; break; + case MCSymbolRefExpr::VK_Mips_GOT_OFST: OS << "%got_ofst("; break; + } + + OS << SRE->getSymbol(); + + if (Offset) { + if (Offset > 0) + OS << '+'; + OS << Offset; + } + + if ((Kind == MCSymbolRefExpr::VK_Mips_GPOFF_HI) || + (Kind == MCSymbolRefExpr::VK_Mips_GPOFF_LO)) + OS << ")))"; + else if (Kind != MCSymbolRefExpr::VK_None) + OS << ')'; +} + +void MipsInstPrinter::printOperand(const MCInst *MI, unsigned OpNo, + raw_ostream &O) { + const MCOperand &Op = MI->getOperand(OpNo); + if (Op.isReg()) { + printRegName(O, Op.getReg()); + return; + } + + if (Op.isImm()) { + O << Op.getImm(); + return; + } + + assert(Op.isExpr() && "unknown operand kind in printOperand"); + printExpr(Op.getExpr(), O); +} + +void MipsInstPrinter::printUnsignedImm(const MCInst *MI, int opNum, + raw_ostream &O) { + const MCOperand &MO = MI->getOperand(opNum); + if (MO.isImm()) + O << (unsigned short int)MO.getImm(); + else + printOperand(MI, opNum, O); +} + +void MipsInstPrinter:: +printMemOperand(const MCInst *MI, int opNum, raw_ostream &O) { + // Load/Store memory operands -- imm($reg) + // If PIC target the target is loaded as the + // pattern lw $25,%call16($28) + printOperand(MI, opNum+1, O); + O << "("; + printOperand(MI, opNum, O); + O << ")"; +} + +void MipsInstPrinter:: +printMemOperandEA(const MCInst *MI, int opNum, raw_ostream &O) { + // when using stack locations for not load/store instructions + // print the same way as all normal 3 operand instructions. + printOperand(MI, opNum, O); + O << ", "; + printOperand(MI, opNum+1, O); + return; +} + +void MipsInstPrinter:: +printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O) { + const MCOperand& MO = MI->getOperand(opNum); + O << MipsFCCToString((Mips::CondCode)MO.getImm()); +} diff --git a/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h new file mode 100644 index 0000000..76b839b --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/InstPrinter/MipsInstPrinter.h @@ -0,0 +1,100 @@ +//=== MipsInstPrinter.h - Convert Mips MCInst to assembly syntax -*- C++ -*-==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This class prints a Mips MCInst to a .s file. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSINSTPRINTER_H +#define MIPSINSTPRINTER_H +#include "llvm/MC/MCInstPrinter.h" + +namespace llvm { +// These enumeration declarations were orignally in MipsInstrInfo.h but +// had to be moved here to avoid circular dependencies between +// LLVMMipsCodeGen and LLVMMipsAsmPrinter. +namespace Mips { +// Mips Branch Codes +enum FPBranchCode { + BRANCH_F, + BRANCH_T, + BRANCH_FL, + BRANCH_TL, + BRANCH_INVALID +}; + +// Mips Condition Codes +enum CondCode { + // To be used with float branch True + FCOND_F, + FCOND_UN, + FCOND_OEQ, + FCOND_UEQ, + FCOND_OLT, + FCOND_ULT, + FCOND_OLE, + FCOND_ULE, + FCOND_SF, + FCOND_NGLE, + FCOND_SEQ, + FCOND_NGL, + FCOND_LT, + FCOND_NGE, + FCOND_LE, + FCOND_NGT, + + // To be used with float branch False + // This conditions have the same mnemonic as the + // above ones, but are used with a branch False; + FCOND_T, + FCOND_OR, + FCOND_UNE, + FCOND_ONE, + FCOND_UGE, + FCOND_OGE, + FCOND_UGT, + FCOND_OGT, + FCOND_ST, + FCOND_GLE, + FCOND_SNE, + FCOND_GL, + FCOND_NLT, + FCOND_GE, + FCOND_NLE, + FCOND_GT +}; + +const char *MipsFCCToString(Mips::CondCode CC); +} // end namespace Mips + +class TargetMachine; + +class MipsInstPrinter : public MCInstPrinter { +public: + MipsInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII, + const MCRegisterInfo &MRI) + : MCInstPrinter(MAI, MII, MRI) {} + + // Autogenerated by tblgen. + void printInstruction(const MCInst *MI, raw_ostream &O); + static const char *getRegisterName(unsigned RegNo); + + virtual void printRegName(raw_ostream &OS, unsigned RegNo) const; + virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot); + +private: + void printOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O); + void printUnsignedImm(const MCInst *MI, int opNum, raw_ostream &O); + void printMemOperand(const MCInst *MI, int opNum, raw_ostream &O); + void printMemOperandEA(const MCInst *MI, int opNum, raw_ostream &O); + void printFCCOperand(const MCInst *MI, int opNum, raw_ostream &O); +}; +} // end namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp new file mode 100644 index 0000000..e79be33 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp @@ -0,0 +1,237 @@ +//===-- MipsASMBackend.cpp - Mips Asm Backend ----------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the MipsAsmBackend and MipsELFObjectWriter classes. +// +//===----------------------------------------------------------------------===// +// + +#include "MipsBaseInfo.h" +#include "MipsFixupKinds.h" +#include "MCTargetDesc/MipsMCTargetDesc.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCDirectives.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +// Prepare value for the target space for it +static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) { + + // Add/subtract and shift + switch (Kind) { + default: + return 0; + case FK_GPRel_4: + case FK_Data_4: + case Mips::fixup_Mips_LO16: + break; + case Mips::fixup_Mips_PC16: + // So far we are only using this type for branches. + // For branches we start 1 instruction after the branch + // so the displacement will be one instruction size less. + Value -= 4; + // The displacement is then divided by 4 to give us an 18 bit + // address range. + Value >>= 2; + break; + case Mips::fixup_Mips_26: + // So far we are only using this type for jumps. + // The displacement is then divided by 4 to give us an 28 bit + // address range. + Value >>= 2; + break; + case Mips::fixup_Mips_HI16: + case Mips::fixup_Mips_GOT_Local: + // Get the higher 16-bits. Also add 1 if bit 15 is 1. + Value = ((Value + 0x8000) >> 16) & 0xffff; + break; + } + + return Value; +} + +namespace { +class MipsAsmBackend : public MCAsmBackend { + Triple::OSType OSType; + bool IsLittle; // Big or little endian + bool Is64Bit; // 32 or 64 bit words + +public: + MipsAsmBackend(const Target &T, Triple::OSType _OSType, + bool _isLittle, bool _is64Bit) + :MCAsmBackend(), OSType(_OSType), IsLittle(_isLittle), Is64Bit(_is64Bit) {} + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createMipsELFObjectWriter(OS, OSType, IsLittle, Is64Bit); + } + + /// ApplyFixup - Apply the \arg Value for given \arg Fixup into the provided + /// data fragment, at the offset specified by the fixup and following the + /// fixup kind as appropriate. + void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, + uint64_t Value) const { + MCFixupKind Kind = Fixup.getKind(); + Value = adjustFixupValue((unsigned)Kind, Value); + int64_t SymOffset = MipsGetSymAndOffset(Fixup).second; + + if (!Value && !SymOffset) + return; // Doesn't change encoding. + + // Where do we start in the object + unsigned Offset = Fixup.getOffset(); + // Number of bytes we need to fixup + unsigned NumBytes = (getFixupKindInfo(Kind).TargetSize + 7) / 8; + // Used to point to big endian bytes + unsigned FullSize; + + switch ((unsigned)Kind) { + case Mips::fixup_Mips_16: + FullSize = 2; + break; + case Mips::fixup_Mips_64: + FullSize = 8; + break; + default: + FullSize = 4; + break; + } + + // Grab current value, if any, from bits. + uint64_t CurVal = 0; + + for (unsigned i = 0; i != NumBytes; ++i) { + unsigned Idx = IsLittle ? i : (FullSize - 1 - i); + CurVal |= (uint64_t)((uint8_t)Data[Offset + Idx]) << (i*8); + } + + uint64_t Mask = ((uint64_t)(-1) >> (64 - getFixupKindInfo(Kind).TargetSize)); + CurVal |= (Value + SymOffset) & Mask; + + // Write out the fixed up bytes back to the code/data bits. + for (unsigned i = 0; i != NumBytes; ++i) { + unsigned Idx = IsLittle ? i : (FullSize - 1 - i); + Data[Offset + Idx] = (uint8_t)((CurVal >> (i*8)) & 0xff); + } + } + + unsigned getNumFixupKinds() const { return Mips::NumTargetFixupKinds; } + + const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const { + const static MCFixupKindInfo Infos[Mips::NumTargetFixupKinds] = { + // This table *must* be in same the order of fixup_* kinds in + // MipsFixupKinds.h. + // + // name offset bits flags + { "fixup_Mips_16", 0, 16, 0 }, + { "fixup_Mips_32", 0, 32, 0 }, + { "fixup_Mips_REL32", 0, 32, 0 }, + { "fixup_Mips_26", 0, 26, 0 }, + { "fixup_Mips_HI16", 0, 16, 0 }, + { "fixup_Mips_LO16", 0, 16, 0 }, + { "fixup_Mips_GPREL16", 0, 16, 0 }, + { "fixup_Mips_LITERAL", 0, 16, 0 }, + { "fixup_Mips_GOT_Global", 0, 16, 0 }, + { "fixup_Mips_GOT_Local", 0, 16, 0 }, + { "fixup_Mips_PC16", 0, 16, MCFixupKindInfo::FKF_IsPCRel }, + { "fixup_Mips_CALL16", 0, 16, 0 }, + { "fixup_Mips_GPREL32", 0, 32, 0 }, + { "fixup_Mips_SHIFT5", 6, 5, 0 }, + { "fixup_Mips_SHIFT6", 6, 5, 0 }, + { "fixup_Mips_64", 0, 64, 0 }, + { "fixup_Mips_TLSGD", 0, 16, 0 }, + { "fixup_Mips_GOTTPREL", 0, 16, 0 }, + { "fixup_Mips_TPREL_HI", 0, 16, 0 }, + { "fixup_Mips_TPREL_LO", 0, 16, 0 }, + { "fixup_Mips_TLSLDM", 0, 16, 0 }, + { "fixup_Mips_DTPREL_HI", 0, 16, 0 }, + { "fixup_Mips_DTPREL_LO", 0, 16, 0 }, + { "fixup_Mips_Branch_PCRel", 0, 16, MCFixupKindInfo::FKF_IsPCRel } + }; + + if (Kind < FirstTargetFixupKind) + return MCAsmBackend::getFixupKindInfo(Kind); + + assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && + "Invalid kind!"); + return Infos[Kind - FirstTargetFixupKind]; + } + + /// @name Target Relaxation Interfaces + /// @{ + + /// MayNeedRelaxation - Check whether the given instruction may need + /// relaxation. + /// + /// \param Inst - The instruction to test. + bool mayNeedRelaxation(const MCInst &Inst) const { + return false; + } + + /// fixupNeedsRelaxation - Target specific predicate for whether a given + /// fixup requires the associated instruction to be relaxed. + bool fixupNeedsRelaxation(const MCFixup &Fixup, + uint64_t Value, + const MCInstFragment *DF, + const MCAsmLayout &Layout) const { + // FIXME. + assert(0 && "RelaxInstruction() unimplemented"); + return false; + } + + /// RelaxInstruction - Relax the instruction in the given fragment + /// to the next wider instruction. + /// + /// \param Inst - The instruction to relax, which may be the same + /// as the output. + /// \parm Res [output] - On return, the relaxed instruction. + void relaxInstruction(const MCInst &Inst, MCInst &Res) const { + } + + /// @} + + /// WriteNopData - Write an (optimal) nop sequence of Count bytes + /// to the given output. If the target cannot generate such a sequence, + /// it should return an error. + /// + /// \return - True on success. + bool writeNopData(uint64_t Count, MCObjectWriter *OW) const { + return true; + } +}; // class MipsAsmBackend + +} // namespace + +// MCAsmBackend +MCAsmBackend *llvm::createMipsAsmBackendEL32(const Target &T, StringRef TT) { + return new MipsAsmBackend(T, Triple(TT).getOS(), + /*IsLittle*/true, /*Is64Bit*/false); +} + +MCAsmBackend *llvm::createMipsAsmBackendEB32(const Target &T, StringRef TT) { + return new MipsAsmBackend(T, Triple(TT).getOS(), + /*IsLittle*/false, /*Is64Bit*/false); +} + +MCAsmBackend *llvm::createMipsAsmBackendEL64(const Target &T, StringRef TT) { + return new MipsAsmBackend(T, Triple(TT).getOS(), + /*IsLittle*/true, /*Is64Bit*/true); +} + +MCAsmBackend *llvm::createMipsAsmBackendEB64(const Target &T, StringRef TT) { + return new MipsAsmBackend(T, Triple(TT).getOS(), + /*IsLittle*/false, /*Is64Bit*/true); +} + diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h new file mode 100644 index 0000000..fb1c5ce --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsBaseInfo.h @@ -0,0 +1,233 @@ +//===-- MipsBaseInfo.h - Top level definitions for MIPS MC ------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains small standalone helper functions and enum definitions for +// the Mips target useful for the compiler back-end and the MC libraries. +// +//===----------------------------------------------------------------------===// +#ifndef MIPSBASEINFO_H +#define MIPSBASEINFO_H + +#include "MipsFixupKinds.h" +#include "MipsMCTargetDesc.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/Support/DataTypes.h" +#include "llvm/Support/ErrorHandling.h" + +namespace llvm { + +/// MipsII - This namespace holds all of the target specific flags that +/// instruction info tracks. +/// +namespace MipsII { + /// Target Operand Flag enum. + enum TOF { + //===------------------------------------------------------------------===// + // Mips Specific MachineOperand flags. + + MO_NO_FLAG, + + /// MO_GOT16 - Represents the offset into the global offset table at which + /// the address the relocation entry symbol resides during execution. + MO_GOT16, + MO_GOT, + + /// MO_GOT_CALL - Represents the offset into the global offset table at + /// which the address of a call site relocation entry symbol resides + /// during execution. This is different from the above since this flag + /// can only be present in call instructions. + MO_GOT_CALL, + + /// MO_GPREL - Represents the offset from the current gp value to be used + /// for the relocatable object file being produced. + MO_GPREL, + + /// MO_ABS_HI/LO - Represents the hi or low part of an absolute symbol + /// address. + MO_ABS_HI, + MO_ABS_LO, + + /// MO_TLSGD - Represents the offset into the global offset table at which + // the module ID and TSL block offset reside during execution (General + // Dynamic TLS). + MO_TLSGD, + + /// MO_TLSLDM - Represents the offset into the global offset table at which + // the module ID and TSL block offset reside during execution (Local + // Dynamic TLS). + MO_TLSLDM, + MO_DTPREL_HI, + MO_DTPREL_LO, + + /// MO_GOTTPREL - Represents the offset from the thread pointer (Initial + // Exec TLS). + MO_GOTTPREL, + + /// MO_TPREL_HI/LO - Represents the hi and low part of the offset from + // the thread pointer (Local Exec TLS). + MO_TPREL_HI, + MO_TPREL_LO, + + // N32/64 Flags. + MO_GPOFF_HI, + MO_GPOFF_LO, + MO_GOT_DISP, + MO_GOT_PAGE, + MO_GOT_OFST + }; + + enum { + //===------------------------------------------------------------------===// + // Instruction encodings. These are the standard/most common forms for + // Mips instructions. + // + + // Pseudo - This represents an instruction that is a pseudo instruction + // or one that has not been implemented yet. It is illegal to code generate + // it, but tolerated for intermediate implementation stages. + Pseudo = 0, + + /// FrmR - This form is for instructions of the format R. + FrmR = 1, + /// FrmI - This form is for instructions of the format I. + FrmI = 2, + /// FrmJ - This form is for instructions of the format J. + FrmJ = 3, + /// FrmFR - This form is for instructions of the format FR. + FrmFR = 4, + /// FrmFI - This form is for instructions of the format FI. + FrmFI = 5, + /// FrmOther - This form is for instructions that have no specific format. + FrmOther = 6, + + FormMask = 15 + }; +} + + +/// getMipsRegisterNumbering - Given the enum value for some register, +/// return the number that it corresponds to. +inline static unsigned getMipsRegisterNumbering(unsigned RegEnum) +{ + switch (RegEnum) { + case Mips::ZERO: case Mips::ZERO_64: case Mips::F0: case Mips::D0_64: + case Mips::D0: + return 0; + case Mips::AT: case Mips::AT_64: case Mips::F1: case Mips::D1_64: + return 1; + case Mips::V0: case Mips::V0_64: case Mips::F2: case Mips::D2_64: + case Mips::D1: + return 2; + case Mips::V1: case Mips::V1_64: case Mips::F3: case Mips::D3_64: + return 3; + case Mips::A0: case Mips::A0_64: case Mips::F4: case Mips::D4_64: + case Mips::D2: + return 4; + case Mips::A1: case Mips::A1_64: case Mips::F5: case Mips::D5_64: + return 5; + case Mips::A2: case Mips::A2_64: case Mips::F6: case Mips::D6_64: + case Mips::D3: + return 6; + case Mips::A3: case Mips::A3_64: case Mips::F7: case Mips::D7_64: + return 7; + case Mips::T0: case Mips::T0_64: case Mips::F8: case Mips::D8_64: + case Mips::D4: + return 8; + case Mips::T1: case Mips::T1_64: case Mips::F9: case Mips::D9_64: + return 9; + case Mips::T2: case Mips::T2_64: case Mips::F10: case Mips::D10_64: + case Mips::D5: + return 10; + case Mips::T3: case Mips::T3_64: case Mips::F11: case Mips::D11_64: + return 11; + case Mips::T4: case Mips::T4_64: case Mips::F12: case Mips::D12_64: + case Mips::D6: + return 12; + case Mips::T5: case Mips::T5_64: case Mips::F13: case Mips::D13_64: + return 13; + case Mips::T6: case Mips::T6_64: case Mips::F14: case Mips::D14_64: + case Mips::D7: + return 14; + case Mips::T7: case Mips::T7_64: case Mips::F15: case Mips::D15_64: + return 15; + case Mips::S0: case Mips::S0_64: case Mips::F16: case Mips::D16_64: + case Mips::D8: + return 16; + case Mips::S1: case Mips::S1_64: case Mips::F17: case Mips::D17_64: + return 17; + case Mips::S2: case Mips::S2_64: case Mips::F18: case Mips::D18_64: + case Mips::D9: + return 18; + case Mips::S3: case Mips::S3_64: case Mips::F19: case Mips::D19_64: + return 19; + case Mips::S4: case Mips::S4_64: case Mips::F20: case Mips::D20_64: + case Mips::D10: + return 20; + case Mips::S5: case Mips::S5_64: case Mips::F21: case Mips::D21_64: + return 21; + case Mips::S6: case Mips::S6_64: case Mips::F22: case Mips::D22_64: + case Mips::D11: + return 22; + case Mips::S7: case Mips::S7_64: case Mips::F23: case Mips::D23_64: + return 23; + case Mips::T8: case Mips::T8_64: case Mips::F24: case Mips::D24_64: + case Mips::D12: + return 24; + case Mips::T9: case Mips::T9_64: case Mips::F25: case Mips::D25_64: + return 25; + case Mips::K0: case Mips::K0_64: case Mips::F26: case Mips::D26_64: + case Mips::D13: + return 26; + case Mips::K1: case Mips::K1_64: case Mips::F27: case Mips::D27_64: + return 27; + case Mips::GP: case Mips::GP_64: case Mips::F28: case Mips::D28_64: + case Mips::D14: + return 28; + case Mips::SP: case Mips::SP_64: case Mips::F29: case Mips::D29_64: + case Mips::HWR29: + return 29; + case Mips::FP: case Mips::FP_64: case Mips::F30: case Mips::D30_64: + case Mips::D15: + return 30; + case Mips::RA: case Mips::RA_64: case Mips::F31: case Mips::D31_64: + return 31; + default: llvm_unreachable("Unknown register number!"); + } +} + +inline static std::pair<const MCSymbolRefExpr*, int64_t> +MipsGetSymAndOffset(const MCFixup &Fixup) { + MCFixupKind FixupKind = Fixup.getKind(); + + if ((FixupKind < FirstTargetFixupKind) || + (FixupKind >= MCFixupKind(Mips::LastTargetFixupKind))) + return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0); + + const MCExpr *Expr = Fixup.getValue(); + MCExpr::ExprKind Kind = Expr->getKind(); + + if (Kind == MCExpr::Binary) { + const MCBinaryExpr *BE = static_cast<const MCBinaryExpr*>(Expr); + const MCExpr *LHS = BE->getLHS(); + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(BE->getRHS()); + + if ((LHS->getKind() != MCExpr::SymbolRef) || !CE) + return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0); + + return std::make_pair(cast<MCSymbolRefExpr>(LHS), CE->getValue()); + } + + if (Kind != MCExpr::SymbolRef) + return std::make_pair((const MCSymbolRefExpr*)0, (int64_t)0); + + return std::make_pair(cast<MCSymbolRefExpr>(Expr), 0); +} +} + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp new file mode 100644 index 0000000..2091bec --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp @@ -0,0 +1,249 @@ +//===-- MipsELFObjectWriter.cpp - Mips ELF Writer -------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/MipsBaseInfo.h" +#include "MCTargetDesc/MipsFixupKinds.h" +#include "MCTargetDesc/MipsMCTargetDesc.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCSection.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/ErrorHandling.h" +#include <list> + +using namespace llvm; + +namespace { + struct RelEntry { + RelEntry(const ELFRelocationEntry &R, const MCSymbol *S, int64_t O) : + Reloc(R), Sym(S), Offset(O) {} + ELFRelocationEntry Reloc; + const MCSymbol *Sym; + int64_t Offset; + }; + + typedef std::list<RelEntry> RelLs; + typedef RelLs::iterator RelLsIter; + + class MipsELFObjectWriter : public MCELFObjectTargetWriter { + public: + MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI); + + virtual ~MipsELFObjectWriter(); + + virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, + bool IsPCRel, bool IsRelocWithSymbol, + int64_t Addend) const; + virtual unsigned getEFlags() const; + virtual const MCSymbol *ExplicitRelSym(const MCAssembler &Asm, + const MCValue &Target, + const MCFragment &F, + const MCFixup &Fixup, + bool IsPCRel) const; + virtual void sortRelocs(const MCAssembler &Asm, + std::vector<ELFRelocationEntry> &Relocs); + }; +} + +MipsELFObjectWriter::MipsELFObjectWriter(bool _is64Bit, uint8_t OSABI) + : MCELFObjectTargetWriter(_is64Bit, OSABI, ELF::EM_MIPS, + /*HasRelocationAddend*/ false) {} + +MipsELFObjectWriter::~MipsELFObjectWriter() {} + +// FIXME: get the real EABI Version from the Subtarget class. +unsigned MipsELFObjectWriter::getEFlags() const { + + // FIXME: We can't tell if we are PIC (dynamic) or CPIC (static) + unsigned Flag = ELF::EF_MIPS_NOREORDER; + + if (is64Bit()) + Flag |= ELF::EF_MIPS_ARCH_64R2; + else + Flag |= ELF::EF_MIPS_ARCH_32R2; + return Flag; +} + +const MCSymbol *MipsELFObjectWriter::ExplicitRelSym(const MCAssembler &Asm, + const MCValue &Target, + const MCFragment &F, + const MCFixup &Fixup, + bool IsPCRel) const { + assert(Target.getSymA() && "SymA cannot be 0."); + const MCSymbol &Sym = Target.getSymA()->getSymbol().AliasedSymbol(); + + if (Sym.getSection().getKind().isMergeableCString() || + Sym.getSection().getKind().isMergeableConst()) + return &Sym; + + return NULL; +} + +unsigned MipsELFObjectWriter::GetRelocType(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel, + bool IsRelocWithSymbol, + int64_t Addend) const { + // determine the type of the relocation + unsigned Type = (unsigned)ELF::R_MIPS_NONE; + unsigned Kind = (unsigned)Fixup.getKind(); + + switch (Kind) { + default: + llvm_unreachable("invalid fixup kind!"); + case FK_Data_4: + Type = ELF::R_MIPS_32; + break; + case FK_GPRel_4: + Type = ELF::R_MIPS_GPREL32; + break; + case Mips::fixup_Mips_GPREL16: + Type = ELF::R_MIPS_GPREL16; + break; + case Mips::fixup_Mips_26: + Type = ELF::R_MIPS_26; + break; + case Mips::fixup_Mips_CALL16: + Type = ELF::R_MIPS_CALL16; + break; + case Mips::fixup_Mips_GOT_Global: + case Mips::fixup_Mips_GOT_Local: + Type = ELF::R_MIPS_GOT16; + break; + case Mips::fixup_Mips_HI16: + Type = ELF::R_MIPS_HI16; + break; + case Mips::fixup_Mips_LO16: + Type = ELF::R_MIPS_LO16; + break; + case Mips::fixup_Mips_TLSGD: + Type = ELF::R_MIPS_TLS_GD; + break; + case Mips::fixup_Mips_GOTTPREL: + Type = ELF::R_MIPS_TLS_GOTTPREL; + break; + case Mips::fixup_Mips_TPREL_HI: + Type = ELF::R_MIPS_TLS_TPREL_HI16; + break; + case Mips::fixup_Mips_TPREL_LO: + Type = ELF::R_MIPS_TLS_TPREL_LO16; + break; + case Mips::fixup_Mips_TLSLDM: + Type = ELF::R_MIPS_TLS_LDM; + break; + case Mips::fixup_Mips_DTPREL_HI: + Type = ELF::R_MIPS_TLS_DTPREL_HI16; + break; + case Mips::fixup_Mips_DTPREL_LO: + Type = ELF::R_MIPS_TLS_DTPREL_LO16; + break; + case Mips::fixup_Mips_Branch_PCRel: + case Mips::fixup_Mips_PC16: + Type = ELF::R_MIPS_PC16; + break; + } + + return Type; +} + +// Return true if R is either a GOT16 against a local symbol or HI16. +static bool NeedsMatchingLo(const MCAssembler &Asm, const RelEntry &R) { + if (!R.Sym) + return false; + + MCSymbolData &SD = Asm.getSymbolData(R.Sym->AliasedSymbol()); + + return ((R.Reloc.Type == ELF::R_MIPS_GOT16) && !SD.isExternal()) || + (R.Reloc.Type == ELF::R_MIPS_HI16); +} + +static bool HasMatchingLo(const MCAssembler &Asm, RelLsIter I, RelLsIter Last) { + if (I == Last) + return false; + + RelLsIter Hi = I++; + + return (I->Reloc.Type == ELF::R_MIPS_LO16) && (Hi->Sym == I->Sym) && + (Hi->Offset == I->Offset); +} + +static bool HasSameSymbol(const RelEntry &R0, const RelEntry &R1) { + return R0.Sym == R1.Sym; +} + +static int CompareOffset(const RelEntry &R0, const RelEntry &R1) { + return (R0.Offset > R1.Offset) ? 1 : ((R0.Offset == R1.Offset) ? 0 : -1); +} + +void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm, + std::vector<ELFRelocationEntry> &Relocs) { + // Call the defualt function first. Relocations are sorted in descending + // order of r_offset. + MCELFObjectTargetWriter::sortRelocs(Asm, Relocs); + + RelLs RelocLs; + std::vector<RelLsIter> Unmatched; + + // Fill RelocLs. Traverse Relocs backwards so that relocations in RelocLs + // are in ascending order of r_offset. + for (std::vector<ELFRelocationEntry>::reverse_iterator R = Relocs.rbegin(); + R != Relocs.rend(); ++R) { + std::pair<const MCSymbolRefExpr*, int64_t> P = + MipsGetSymAndOffset(*R->Fixup); + RelocLs.push_back(RelEntry(*R, P.first ? &P.first->getSymbol() : 0, + P.second)); + } + + // Get list of unmatched HI16 and GOT16. + for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R) + if (NeedsMatchingLo(Asm, *R) && !HasMatchingLo(Asm, R, --RelocLs.end())) + Unmatched.push_back(R); + + // Insert unmatched HI16 and GOT16 immediately before their matching LO16. + for (std::vector<RelLsIter>::iterator U = Unmatched.begin(); + U != Unmatched.end(); ++U) { + RelLsIter LoPos = RelocLs.end(), HiPos = *U; + bool MatchedLo = false; + + for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R) { + if ((R->Reloc.Type == ELF::R_MIPS_LO16) && HasSameSymbol(*HiPos, *R) && + (CompareOffset(*R, *HiPos) >= 0) && + ((LoPos == RelocLs.end()) || ((CompareOffset(*R, *LoPos) < 0)) || + (!MatchedLo && !CompareOffset(*R, *LoPos)))) + LoPos = R; + + MatchedLo = NeedsMatchingLo(Asm, *R) && + HasMatchingLo(Asm, R, --RelocLs.end()); + } + + // If a matching LoPos was found, move HiPos and insert it before LoPos. + // Make the offsets of HiPos and LoPos match. + if (LoPos != RelocLs.end()) { + HiPos->Offset = LoPos->Offset; + RelocLs.insert(LoPos, *HiPos); + RelocLs.erase(HiPos); + } + } + + // Put the sorted list back in reverse order. + assert(Relocs.size() == RelocLs.size()); + unsigned I = RelocLs.size(); + + for (RelLsIter R = RelocLs.begin(); R != RelocLs.end(); ++R) + Relocs[--I] = R->Reloc; +} + +MCObjectWriter *llvm::createMipsELFObjectWriter(raw_ostream &OS, + uint8_t OSABI, + bool IsLittleEndian, + bool Is64Bit) { + MCELFObjectTargetWriter *MOTW = new MipsELFObjectWriter(Is64Bit, OSABI); + return createELFObjectWriter(MOTW, OS, IsLittleEndian); +} diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h new file mode 100644 index 0000000..9b76eda --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h @@ -0,0 +1,106 @@ +//===-- MipsFixupKinds.h - Mips Specific Fixup Entries ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_MIPS_MIPSFIXUPKINDS_H +#define LLVM_MIPS_MIPSFIXUPKINDS_H + +#include "llvm/MC/MCFixup.h" + +namespace llvm { +namespace Mips { + // Although most of the current fixup types reflect a unique relocation + // one can have multiple fixup types for a given relocation and thus need + // to be uniquely named. + // + // This table *must* be in the save order of + // MCFixupKindInfo Infos[Mips::NumTargetFixupKinds] + // in MipsAsmBackend.cpp. + // + enum Fixups { + // Branch fixups resulting in R_MIPS_16. + fixup_Mips_16 = FirstTargetFixupKind, + + // Pure 32 bit data fixup resulting in - R_MIPS_32. + fixup_Mips_32, + + // Full 32 bit data relative data fixup resulting in - R_MIPS_REL32. + fixup_Mips_REL32, + + // Jump 26 bit fixup resulting in - R_MIPS_26. + fixup_Mips_26, + + // Pure upper 16 bit fixup resulting in - R_MIPS_HI16. + fixup_Mips_HI16, + + // Pure lower 16 bit fixup resulting in - R_MIPS_LO16. + fixup_Mips_LO16, + + // 16 bit fixup for GP offest resulting in - R_MIPS_GPREL16. + fixup_Mips_GPREL16, + + // 16 bit literal fixup resulting in - R_MIPS_LITERAL. + fixup_Mips_LITERAL, + + // Global symbol fixup resulting in - R_MIPS_GOT16. + fixup_Mips_GOT_Global, + + // Local symbol fixup resulting in - R_MIPS_GOT16. + fixup_Mips_GOT_Local, + + // PC relative branch fixup resulting in - R_MIPS_PC16. + fixup_Mips_PC16, + + // resulting in - R_MIPS_CALL16. + fixup_Mips_CALL16, + + // resulting in - R_MIPS_GPREL32. + fixup_Mips_GPREL32, + + // resulting in - R_MIPS_SHIFT5. + fixup_Mips_SHIFT5, + + // resulting in - R_MIPS_SHIFT6. + fixup_Mips_SHIFT6, + + // Pure 64 bit data fixup resulting in - R_MIPS_64. + fixup_Mips_64, + + // resulting in - R_MIPS_TLS_GD. + fixup_Mips_TLSGD, + + // resulting in - R_MIPS_TLS_GOTTPREL. + fixup_Mips_GOTTPREL, + + // resulting in - R_MIPS_TLS_TPREL_HI16. + fixup_Mips_TPREL_HI, + + // resulting in - R_MIPS_TLS_TPREL_LO16. + fixup_Mips_TPREL_LO, + + // resulting in - R_MIPS_TLS_LDM. + fixup_Mips_TLSLDM, + + // resulting in - R_MIPS_TLS_DTPREL_HI16. + fixup_Mips_DTPREL_HI, + + // resulting in - R_MIPS_TLS_DTPREL_LO16. + fixup_Mips_DTPREL_LO, + + // PC relative branch fixup resulting in - R_MIPS_PC16 + fixup_Mips_Branch_PCRel, + + // Marker + LastTargetFixupKind, + NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind + }; +} // namespace Mips +} // namespace llvm + + +#endif // LLVM_MIPS_MIPSFIXUPKINDS_H diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp new file mode 100644 index 0000000..9d67aa1 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp @@ -0,0 +1,42 @@ +//===-- MipsMCAsmInfo.cpp - Mips Asm Properties ---------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of the MipsMCAsmInfo properties. +// +//===----------------------------------------------------------------------===// + +#include "MipsMCAsmInfo.h" +#include "llvm/ADT/Triple.h" + +using namespace llvm; + +void MipsMCAsmInfo::anchor() { } + +MipsMCAsmInfo::MipsMCAsmInfo(const Target &T, StringRef TT) { + Triple TheTriple(TT); + if ((TheTriple.getArch() == Triple::mips) || + (TheTriple.getArch() == Triple::mips64)) + IsLittleEndian = false; + + AlignmentIsInBytes = false; + Data16bitsDirective = "\t.2byte\t"; + Data32bitsDirective = "\t.4byte\t"; + Data64bitsDirective = "\t.8byte\t"; + PrivateGlobalPrefix = "$"; + CommentString = "#"; + ZeroDirective = "\t.space\t"; + GPRel32Directive = "\t.gpword\t"; + GPRel64Directive = "\t.gpdword\t"; + WeakRefDirective = "\t.weak\t"; + + SupportsDebugInformation = true; + ExceptionsType = ExceptionHandling::DwarfCFI; + HasLEB128 = true; + DwarfRegNumForCFI = true; +} diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h new file mode 100644 index 0000000..e1d8789 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.h @@ -0,0 +1,31 @@ +//===-- MipsMCAsmInfo.h - Mips Asm Info ------------------------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the MipsMCAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSTARGETASMINFO_H +#define MIPSTARGETASMINFO_H + +#include "llvm/MC/MCAsmInfo.h" + +namespace llvm { + class StringRef; + class Target; + + class MipsMCAsmInfo : public MCAsmInfo { + virtual void anchor(); + public: + explicit MipsMCAsmInfo(const Target &T, StringRef TT); + }; + +} // namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp new file mode 100644 index 0000000..27954b1 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -0,0 +1,284 @@ +//===-- MipsMCCodeEmitter.cpp - Convert Mips Code to Machine Code ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the MipsMCCodeEmitter class. +// +//===----------------------------------------------------------------------===// +// +#define DEBUG_TYPE "mccodeemitter" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "MCTargetDesc/MipsFixupKinds.h" +#include "MCTargetDesc/MipsMCTargetDesc.h" +#include "llvm/ADT/APFloat.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +namespace { +class MipsMCCodeEmitter : public MCCodeEmitter { + MipsMCCodeEmitter(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT + void operator=(const MipsMCCodeEmitter &); // DO NOT IMPLEMENT + const MCInstrInfo &MCII; + const MCSubtargetInfo &STI; + MCContext &Ctx; + bool IsLittleEndian; + +public: + MipsMCCodeEmitter(const MCInstrInfo &mcii, const MCSubtargetInfo &sti, + MCContext &ctx, bool IsLittle) : + MCII(mcii), STI(sti) , Ctx(ctx), IsLittleEndian(IsLittle) {} + + ~MipsMCCodeEmitter() {} + + void EmitByte(unsigned char C, raw_ostream &OS) const { + OS << (char)C; + } + + void EmitInstruction(uint64_t Val, unsigned Size, raw_ostream &OS) const { + // Output the instruction encoding in little endian byte order. + for (unsigned i = 0; i < Size; ++i) { + unsigned Shift = IsLittleEndian ? i * 8 : (Size - 1 - i) * 8; + EmitByte((Val >> Shift) & 0xff, OS); + } + } + + void EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups) const; + + // getBinaryCodeForInstr - TableGen'erated function for getting the + // binary encoding for an instruction. + uint64_t getBinaryCodeForInstr(const MCInst &MI, + SmallVectorImpl<MCFixup> &Fixups) const; + + // getBranchJumpOpValue - Return binary encoding of the jump + // target operand. If the machine operand requires relocation, + // record the relocation and return zero. + unsigned getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; + + // getBranchTargetOpValue - Return binary encoding of the branch + // target operand. If the machine operand requires relocation, + // record the relocation and return zero. + unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; + + // getMachineOpValue - Return binary encoding of operand. If the machin + // operand requires relocation, record the relocation and return zero. + unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getMemEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getSizeExtEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getSizeInsEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const; + +}; // class MipsMCCodeEmitter +} // namespace + +MCCodeEmitter *llvm::createMipsMCCodeEmitterEB(const MCInstrInfo &MCII, + const MCSubtargetInfo &STI, + MCContext &Ctx) +{ + return new MipsMCCodeEmitter(MCII, STI, Ctx, false); +} + +MCCodeEmitter *llvm::createMipsMCCodeEmitterEL(const MCInstrInfo &MCII, + const MCSubtargetInfo &STI, + MCContext &Ctx) +{ + return new MipsMCCodeEmitter(MCII, STI, Ctx, true); +} + +/// EncodeInstruction - Emit the instruction. +/// Size the instruction (currently only 4 bytes +void MipsMCCodeEmitter:: +EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups) const +{ + uint32_t Binary = getBinaryCodeForInstr(MI, Fixups); + + // Check for unimplemented opcodes. + // Unfortunately in MIPS both NOT and SLL will come in with Binary == 0 + // so we have to special check for them. + unsigned Opcode = MI.getOpcode(); + if ((Opcode != Mips::NOP) && (Opcode != Mips::SLL) && !Binary) + llvm_unreachable("unimplemented opcode in EncodeInstruction()"); + + const MCInstrDesc &Desc = MCII.get(MI.getOpcode()); + uint64_t TSFlags = Desc.TSFlags; + + // Pseudo instructions don't get encoded and shouldn't be here + // in the first place! + if ((TSFlags & MipsII::FormMask) == MipsII::Pseudo) + llvm_unreachable("Pseudo opcode found in EncodeInstruction()"); + + // For now all instructions are 4 bytes + int Size = 4; // FIXME: Have Desc.getSize() return the correct value! + + EmitInstruction(Binary, Size, OS); +} + +/// getBranchTargetOpValue - Return binary encoding of the branch +/// target operand. If the machine operand requires relocation, +/// record the relocation and return zero. +unsigned MipsMCCodeEmitter:: +getBranchTargetOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + + const MCOperand &MO = MI.getOperand(OpNo); + assert(MO.isExpr() && "getBranchTargetOpValue expects only expressions"); + + const MCExpr *Expr = MO.getExpr(); + Fixups.push_back(MCFixup::Create(0, Expr, + MCFixupKind(Mips::fixup_Mips_PC16))); + return 0; +} + +/// getJumpTargetOpValue - Return binary encoding of the jump +/// target operand. If the machine operand requires relocation, +/// record the relocation and return zero. +unsigned MipsMCCodeEmitter:: +getJumpTargetOpValue(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + + const MCOperand &MO = MI.getOperand(OpNo); + assert(MO.isExpr() && "getJumpTargetOpValue expects only expressions"); + + const MCExpr *Expr = MO.getExpr(); + Fixups.push_back(MCFixup::Create(0, Expr, + MCFixupKind(Mips::fixup_Mips_26))); + return 0; +} + +/// getMachineOpValue - Return binary encoding of operand. If the machine +/// operand requires relocation, record the relocation and return zero. +unsigned MipsMCCodeEmitter:: +getMachineOpValue(const MCInst &MI, const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups) const { + if (MO.isReg()) { + unsigned Reg = MO.getReg(); + unsigned RegNo = getMipsRegisterNumbering(Reg); + return RegNo; + } else if (MO.isImm()) { + return static_cast<unsigned>(MO.getImm()); + } else if (MO.isFPImm()) { + return static_cast<unsigned>(APFloat(MO.getFPImm()) + .bitcastToAPInt().getHiBits(32).getLimitedValue()); + } + + // MO must be an Expr. + assert(MO.isExpr()); + + const MCExpr *Expr = MO.getExpr(); + MCExpr::ExprKind Kind = Expr->getKind(); + + if (Kind == MCExpr::Binary) { + Expr = static_cast<const MCBinaryExpr*>(Expr)->getLHS(); + Kind = Expr->getKind(); + } + + assert (Kind == MCExpr::SymbolRef); + + Mips::Fixups FixupKind; + + switch(cast<MCSymbolRefExpr>(Expr)->getKind()) { + case MCSymbolRefExpr::VK_Mips_GPREL: + FixupKind = Mips::fixup_Mips_GPREL16; + break; + case MCSymbolRefExpr::VK_Mips_GOT_CALL: + FixupKind = Mips::fixup_Mips_CALL16; + break; + case MCSymbolRefExpr::VK_Mips_GOT16: + FixupKind = Mips::fixup_Mips_GOT_Global; + break; + case MCSymbolRefExpr::VK_Mips_GOT: + FixupKind = Mips::fixup_Mips_GOT_Local; + break; + case MCSymbolRefExpr::VK_Mips_ABS_HI: + FixupKind = Mips::fixup_Mips_HI16; + break; + case MCSymbolRefExpr::VK_Mips_ABS_LO: + FixupKind = Mips::fixup_Mips_LO16; + break; + case MCSymbolRefExpr::VK_Mips_TLSGD: + FixupKind = Mips::fixup_Mips_TLSGD; + break; + case MCSymbolRefExpr::VK_Mips_TLSLDM: + FixupKind = Mips::fixup_Mips_TLSLDM; + break; + case MCSymbolRefExpr::VK_Mips_DTPREL_HI: + FixupKind = Mips::fixup_Mips_DTPREL_HI; + break; + case MCSymbolRefExpr::VK_Mips_DTPREL_LO: + FixupKind = Mips::fixup_Mips_DTPREL_LO; + break; + case MCSymbolRefExpr::VK_Mips_GOTTPREL: + FixupKind = Mips::fixup_Mips_GOTTPREL; + break; + case MCSymbolRefExpr::VK_Mips_TPREL_HI: + FixupKind = Mips::fixup_Mips_TPREL_HI; + break; + case MCSymbolRefExpr::VK_Mips_TPREL_LO: + FixupKind = Mips::fixup_Mips_TPREL_LO; + break; + default: + break; + } // switch + + Fixups.push_back(MCFixup::Create(0, MO.getExpr(), MCFixupKind(FixupKind))); + + // All of the information is in the fixup. + return 0; +} + +/// getMemEncoding - Return binary encoding of memory related operand. +/// If the offset operand requires relocation, record the relocation. +unsigned +MipsMCCodeEmitter::getMemEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + // Base register is encoded in bits 20-16, offset is encoded in bits 15-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo),Fixups) << 16; + unsigned OffBits = getMachineOpValue(MI, MI.getOperand(OpNo+1), Fixups); + + return (OffBits & 0xFFFF) | RegBits; +} + +unsigned +MipsMCCodeEmitter::getSizeExtEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + assert(MI.getOperand(OpNo).isImm()); + unsigned SizeEncoding = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups); + return SizeEncoding - 1; +} + +// FIXME: should be called getMSBEncoding +// +unsigned +MipsMCCodeEmitter::getSizeInsEncoding(const MCInst &MI, unsigned OpNo, + SmallVectorImpl<MCFixup> &Fixups) const { + assert(MI.getOperand(OpNo-1).isImm()); + assert(MI.getOperand(OpNo).isImm()); + unsigned Position = getMachineOpValue(MI, MI.getOperand(OpNo-1), Fixups); + unsigned Size = getMachineOpValue(MI, MI.getOperand(OpNo), Fixups); + + return Position + Size - 1; +} + +#include "MipsGenMCCodeEmitter.inc" + diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp new file mode 100644 index 0000000..3c544f6 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp @@ -0,0 +1,175 @@ +//===-- MipsMCTargetDesc.cpp - Mips Target Descriptions -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides Mips specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#include "MipsMCAsmInfo.h" +#include "MipsMCTargetDesc.h" +#include "InstPrinter/MipsInstPrinter.h" +#include "llvm/MC/MachineLocation.h" +#include "llvm/MC/MCCodeGenInfo.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" + +#define GET_INSTRINFO_MC_DESC +#include "MipsGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_MC_DESC +#include "MipsGenSubtargetInfo.inc" + +#define GET_REGINFO_MC_DESC +#include "MipsGenRegisterInfo.inc" + +using namespace llvm; + +static MCInstrInfo *createMipsMCInstrInfo() { + MCInstrInfo *X = new MCInstrInfo(); + InitMipsMCInstrInfo(X); + return X; +} + +static MCRegisterInfo *createMipsMCRegisterInfo(StringRef TT) { + MCRegisterInfo *X = new MCRegisterInfo(); + InitMipsMCRegisterInfo(X, Mips::RA); + return X; +} + +static MCSubtargetInfo *createMipsMCSubtargetInfo(StringRef TT, StringRef CPU, + StringRef FS) { + MCSubtargetInfo *X = new MCSubtargetInfo(); + InitMipsMCSubtargetInfo(X, TT, CPU, FS); + return X; +} + +static MCAsmInfo *createMipsMCAsmInfo(const Target &T, StringRef TT) { + MCAsmInfo *MAI = new MipsMCAsmInfo(T, TT); + + MachineLocation Dst(MachineLocation::VirtualFP); + MachineLocation Src(Mips::SP, 0); + MAI->addInitialFrameState(0, Dst, Src); + + return MAI; +} + +static MCCodeGenInfo *createMipsMCCodeGenInfo(StringRef TT, Reloc::Model RM, + CodeModel::Model CM, + CodeGenOpt::Level OL) { + MCCodeGenInfo *X = new MCCodeGenInfo(); + if (CM == CodeModel::JITDefault) + RM = Reloc::Static; + else if (RM == Reloc::Default) + RM = Reloc::PIC_; + X->InitMCCodeGenInfo(RM, CM, OL); + return X; +} + +static MCInstPrinter *createMipsMCInstPrinter(const Target &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI) { + return new MipsInstPrinter(MAI, MII, MRI); +} + +static MCStreamer *createMCStreamer(const Target &T, StringRef TT, + MCContext &Ctx, MCAsmBackend &MAB, + raw_ostream &_OS, + MCCodeEmitter *_Emitter, + bool RelaxAll, + bool NoExecStack) { + Triple TheTriple(TT); + + return createELFStreamer(Ctx, MAB, _OS, _Emitter, RelaxAll, NoExecStack); +} + +extern "C" void LLVMInitializeMipsTargetMC() { + // Register the MC asm info. + RegisterMCAsmInfoFn X(TheMipsTarget, createMipsMCAsmInfo); + RegisterMCAsmInfoFn Y(TheMipselTarget, createMipsMCAsmInfo); + RegisterMCAsmInfoFn A(TheMips64Target, createMipsMCAsmInfo); + RegisterMCAsmInfoFn B(TheMips64elTarget, createMipsMCAsmInfo); + + // Register the MC codegen info. + TargetRegistry::RegisterMCCodeGenInfo(TheMipsTarget, + createMipsMCCodeGenInfo); + TargetRegistry::RegisterMCCodeGenInfo(TheMipselTarget, + createMipsMCCodeGenInfo); + TargetRegistry::RegisterMCCodeGenInfo(TheMips64Target, + createMipsMCCodeGenInfo); + TargetRegistry::RegisterMCCodeGenInfo(TheMips64elTarget, + createMipsMCCodeGenInfo); + + // Register the MC instruction info. + TargetRegistry::RegisterMCInstrInfo(TheMipsTarget, createMipsMCInstrInfo); + TargetRegistry::RegisterMCInstrInfo(TheMipselTarget, createMipsMCInstrInfo); + TargetRegistry::RegisterMCInstrInfo(TheMips64Target, createMipsMCInstrInfo); + TargetRegistry::RegisterMCInstrInfo(TheMips64elTarget, + createMipsMCInstrInfo); + + // Register the MC register info. + TargetRegistry::RegisterMCRegInfo(TheMipsTarget, createMipsMCRegisterInfo); + TargetRegistry::RegisterMCRegInfo(TheMipselTarget, createMipsMCRegisterInfo); + TargetRegistry::RegisterMCRegInfo(TheMips64Target, createMipsMCRegisterInfo); + TargetRegistry::RegisterMCRegInfo(TheMips64elTarget, + createMipsMCRegisterInfo); + + // Register the MC Code Emitter + TargetRegistry::RegisterMCCodeEmitter(TheMipsTarget, + createMipsMCCodeEmitterEB); + TargetRegistry::RegisterMCCodeEmitter(TheMipselTarget, + createMipsMCCodeEmitterEL); + TargetRegistry::RegisterMCCodeEmitter(TheMips64Target, + createMipsMCCodeEmitterEB); + TargetRegistry::RegisterMCCodeEmitter(TheMips64elTarget, + createMipsMCCodeEmitterEL); + + // Register the object streamer. + TargetRegistry::RegisterMCObjectStreamer(TheMipsTarget, createMCStreamer); + TargetRegistry::RegisterMCObjectStreamer(TheMipselTarget, createMCStreamer); + TargetRegistry::RegisterMCObjectStreamer(TheMips64Target, createMCStreamer); + TargetRegistry::RegisterMCObjectStreamer(TheMips64elTarget, + createMCStreamer); + + // Register the asm backend. + TargetRegistry::RegisterMCAsmBackend(TheMipsTarget, + createMipsAsmBackendEB32); + TargetRegistry::RegisterMCAsmBackend(TheMipselTarget, + createMipsAsmBackendEL32); + TargetRegistry::RegisterMCAsmBackend(TheMips64Target, + createMipsAsmBackendEB64); + TargetRegistry::RegisterMCAsmBackend(TheMips64elTarget, + createMipsAsmBackendEL64); + + // Register the MC subtarget info. + TargetRegistry::RegisterMCSubtargetInfo(TheMipsTarget, + createMipsMCSubtargetInfo); + TargetRegistry::RegisterMCSubtargetInfo(TheMipselTarget, + createMipsMCSubtargetInfo); + TargetRegistry::RegisterMCSubtargetInfo(TheMips64Target, + createMipsMCSubtargetInfo); + TargetRegistry::RegisterMCSubtargetInfo(TheMips64elTarget, + createMipsMCSubtargetInfo); + + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(TheMipsTarget, + createMipsMCInstPrinter); + TargetRegistry::RegisterMCInstPrinter(TheMipselTarget, + createMipsMCInstPrinter); + TargetRegistry::RegisterMCInstPrinter(TheMips64Target, + createMipsMCInstPrinter); + TargetRegistry::RegisterMCInstPrinter(TheMips64elTarget, + createMipsMCInstPrinter); +} diff --git a/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h new file mode 100644 index 0000000..547ccdd --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.h @@ -0,0 +1,65 @@ +//===-- MipsMCTargetDesc.h - Mips Target Descriptions -----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides Mips specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSMCTARGETDESC_H +#define MIPSMCTARGETDESC_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { +class MCAsmBackend; +class MCCodeEmitter; +class MCContext; +class MCInstrInfo; +class MCObjectWriter; +class MCSubtargetInfo; +class StringRef; +class Target; +class raw_ostream; + +extern Target TheMipsTarget; +extern Target TheMipselTarget; +extern Target TheMips64Target; +extern Target TheMips64elTarget; + +MCCodeEmitter *createMipsMCCodeEmitterEB(const MCInstrInfo &MCII, + const MCSubtargetInfo &STI, + MCContext &Ctx); +MCCodeEmitter *createMipsMCCodeEmitterEL(const MCInstrInfo &MCII, + const MCSubtargetInfo &STI, + MCContext &Ctx); + +MCAsmBackend *createMipsAsmBackendEB32(const Target &T, StringRef TT); +MCAsmBackend *createMipsAsmBackendEL32(const Target &T, StringRef TT); +MCAsmBackend *createMipsAsmBackendEB64(const Target &T, StringRef TT); +MCAsmBackend *createMipsAsmBackendEL64(const Target &T, StringRef TT); + +MCObjectWriter *createMipsELFObjectWriter(raw_ostream &OS, + uint8_t OSABI, + bool IsLittleEndian, + bool Is64Bit); +} // End llvm namespace + +// Defines symbolic names for Mips registers. This defines a mapping from +// register name to register number. +#define GET_REGINFO_ENUM +#include "MipsGenRegisterInfo.inc" + +// Defines symbolic names for the Mips instructions. +#define GET_INSTRINFO_ENUM +#include "MipsGenInstrInfo.inc" + +#define GET_SUBTARGETINFO_ENUM +#include "MipsGenSubtargetInfo.inc" + +#endif diff --git a/contrib/llvm/lib/Target/Mips/Mips.h b/contrib/llvm/lib/Target/Mips/Mips.h new file mode 100644 index 0000000..bafadc8 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/Mips.h @@ -0,0 +1,35 @@ +//===-- Mips.h - Top-level interface for Mips representation ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the entry points for global functions defined in +// the LLVM Mips back-end. +// +//===----------------------------------------------------------------------===// + +#ifndef TARGET_MIPS_H +#define TARGET_MIPS_H + +#include "MCTargetDesc/MipsMCTargetDesc.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { + class MipsTargetMachine; + class FunctionPass; + + FunctionPass *createMipsISelDag(MipsTargetMachine &TM); + FunctionPass *createMipsDelaySlotFillerPass(MipsTargetMachine &TM); + FunctionPass *createMipsExpandPseudoPass(MipsTargetMachine &TM); + FunctionPass *createMipsEmitGPRestorePass(MipsTargetMachine &TM); + + FunctionPass *createMipsJITCodeEmitterPass(MipsTargetMachine &TM, + JITCodeEmitter &JCE); + +} // end namespace llvm; + +#endif diff --git a/contrib/llvm/lib/Target/Mips/Mips.td b/contrib/llvm/lib/Target/Mips/Mips.td new file mode 100644 index 0000000..cbebe84 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/Mips.td @@ -0,0 +1,97 @@ +//===-- Mips.td - Describe the Mips Target Machine ---------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This is the top level entry point for the Mips target. +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Target-independent interfaces +//===----------------------------------------------------------------------===// + +include "llvm/Target/Target.td" + +//===----------------------------------------------------------------------===// +// Register File, Calling Conv, Instruction Descriptions +//===----------------------------------------------------------------------===// + +include "MipsRegisterInfo.td" +include "MipsSchedule.td" +include "MipsInstrInfo.td" +include "MipsCallingConv.td" + +def MipsInstrInfo : InstrInfo; + +//===----------------------------------------------------------------------===// +// Mips Subtarget features // +//===----------------------------------------------------------------------===// + +def FeatureGP64Bit : SubtargetFeature<"gp64", "IsGP64bit", "true", + "General Purpose Registers are 64-bit wide.">; +def FeatureFP64Bit : SubtargetFeature<"fp64", "IsFP64bit", "true", + "Support 64-bit FP registers.">; +def FeatureSingleFloat : SubtargetFeature<"single-float", "IsSingleFloat", + "true", "Only supports single precision float">; +def FeatureO32 : SubtargetFeature<"o32", "MipsABI", "O32", + "Enable o32 ABI">; +def FeatureN32 : SubtargetFeature<"n32", "MipsABI", "N32", + "Enable n32 ABI">; +def FeatureN64 : SubtargetFeature<"n64", "MipsABI", "N64", + "Enable n64 ABI">; +def FeatureEABI : SubtargetFeature<"eabi", "MipsABI", "EABI", + "Enable eabi ABI">; +def FeatureVFPU : SubtargetFeature<"vfpu", "HasVFPU", + "true", "Enable vector FPU instructions.">; +def FeatureSEInReg : SubtargetFeature<"seinreg", "HasSEInReg", "true", + "Enable 'signext in register' instructions.">; +def FeatureCondMov : SubtargetFeature<"condmov", "HasCondMov", "true", + "Enable 'conditional move' instructions.">; +def FeatureMulDivAdd : SubtargetFeature<"muldivadd", "HasMulDivAdd", "true", + "Enable 'multiply add/sub' instructions.">; +def FeatureMinMax : SubtargetFeature<"minmax", "HasMinMax", "true", + "Enable 'min/max' instructions.">; +def FeatureSwap : SubtargetFeature<"swap", "HasSwap", "true", + "Enable 'byte/half swap' instructions.">; +def FeatureBitCount : SubtargetFeature<"bitcount", "HasBitCount", "true", + "Enable 'count leading bits' instructions.">; +def FeatureMips32 : SubtargetFeature<"mips32", "MipsArchVersion", "Mips32", + "Mips32 ISA Support", + [FeatureCondMov, FeatureBitCount]>; +def FeatureMips32r2 : SubtargetFeature<"mips32r2", "MipsArchVersion", + "Mips32r2", "Mips32r2 ISA Support", + [FeatureMips32, FeatureSEInReg, FeatureSwap]>; +def FeatureMips64 : SubtargetFeature<"mips64", "MipsArchVersion", + "Mips64", "Mips64 ISA Support", + [FeatureGP64Bit, FeatureFP64Bit, + FeatureMips32]>; +def FeatureMips64r2 : SubtargetFeature<"mips64r2", "MipsArchVersion", + "Mips64r2", "Mips64r2 ISA Support", + [FeatureMips64, FeatureMips32r2]>; + +//===----------------------------------------------------------------------===// +// Mips processors supported. +//===----------------------------------------------------------------------===// + +class Proc<string Name, list<SubtargetFeature> Features> + : Processor<Name, MipsGenericItineraries, Features>; + +def : Proc<"mips32", [FeatureMips32]>; +def : Proc<"mips32r2", [FeatureMips32r2]>; +def : Proc<"mips64", [FeatureMips64]>; +def : Proc<"mips64r2", [FeatureMips64r2]>; + +def MipsAsmWriter : AsmWriter { + string AsmWriterClassName = "InstPrinter"; + bit isMCAsmWriter = 1; +} + +def Mips : Target { + let InstructionSet = MipsInstrInfo; + + let AssemblyWriters = [MipsAsmWriter]; +} + diff --git a/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td new file mode 100644 index 0000000..427e8d9 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/Mips64InstrInfo.td @@ -0,0 +1,273 @@ +//===- Mips64InstrInfo.td - Mips64 Instruction Information -*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes Mips64 instructions. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Mips Operand, Complex Patterns and Transformations Definitions. +//===----------------------------------------------------------------------===// + +// Instruction operand types +def shamt_64 : Operand<i64>; + +// Unsigned Operand +def uimm16_64 : Operand<i64> { + let PrintMethod = "printUnsignedImm"; +} + +// Transformation Function - get Imm - 32. +def Subtract32 : SDNodeXForm<imm, [{ + return getImm(N, (unsigned)N->getZExtValue() - 32); +}]>; + +// shamt must fit in 6 bits. +def immZExt6 : ImmLeaf<i32, [{return Imm == (Imm & 0x3f);}]>; + +//===----------------------------------------------------------------------===// +// Instructions specific format +//===----------------------------------------------------------------------===// +// Shifts +// 64-bit shift instructions. +class shift_rotate_imm64<bits<6> func, bits<5> isRotate, string instr_asm, + SDNode OpNode>: + shift_rotate_imm<func, isRotate, instr_asm, OpNode, immZExt6, shamt, + CPU64Regs>; + +// Mul, Div +class Mult64<bits<6> func, string instr_asm, InstrItinClass itin>: + Mult<func, instr_asm, itin, CPU64Regs, [HI64, LO64]>; +class Div64<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>: + Div<op, func, instr_asm, itin, CPU64Regs, [HI64, LO64]>; + +multiclass Atomic2Ops64<PatFrag Op, string Opstr> { + def #NAME# : Atomic2Ops<Op, Opstr, CPU64Regs, CPURegs>, Requires<[NotN64]>; + def _P8 : Atomic2Ops<Op, Opstr, CPU64Regs, CPU64Regs>, Requires<[IsN64]>; +} + +multiclass AtomicCmpSwap64<PatFrag Op, string Width> { + def #NAME# : AtomicCmpSwap<Op, Width, CPU64Regs, CPURegs>, Requires<[NotN64]>; + def _P8 : AtomicCmpSwap<Op, Width, CPU64Regs, CPU64Regs>, + Requires<[IsN64]>; +} + +let usesCustomInserter = 1, Predicates = [HasMips64] in { + defm ATOMIC_LOAD_ADD_I64 : Atomic2Ops64<atomic_load_add_64, "load_add_64">; + defm ATOMIC_LOAD_SUB_I64 : Atomic2Ops64<atomic_load_sub_64, "load_sub_64">; + defm ATOMIC_LOAD_AND_I64 : Atomic2Ops64<atomic_load_and_64, "load_and_64">; + defm ATOMIC_LOAD_OR_I64 : Atomic2Ops64<atomic_load_or_64, "load_or_64">; + defm ATOMIC_LOAD_XOR_I64 : Atomic2Ops64<atomic_load_xor_64, "load_xor_64">; + defm ATOMIC_LOAD_NAND_I64 : Atomic2Ops64<atomic_load_nand_64, "load_nand_64">; + defm ATOMIC_SWAP_I64 : Atomic2Ops64<atomic_swap_64, "swap_64">; + defm ATOMIC_CMP_SWAP_I64 : AtomicCmpSwap64<atomic_cmp_swap_64, "64">; +} + +//===----------------------------------------------------------------------===// +// Instruction definition +//===----------------------------------------------------------------------===// + +/// Arithmetic Instructions (ALU Immediate) +def DADDiu : ArithLogicI<0x19, "daddiu", add, simm16_64, immSExt16, + CPU64Regs>; +def DANDi : ArithLogicI<0x0c, "andi", and, uimm16_64, immZExt16, CPU64Regs>; +def SLTi64 : SetCC_I<0x0a, "slti", setlt, simm16_64, immSExt16, CPU64Regs>; +def SLTiu64 : SetCC_I<0x0b, "sltiu", setult, simm16_64, immSExt16, CPU64Regs>; +def ORi64 : ArithLogicI<0x0d, "ori", or, uimm16_64, immZExt16, CPU64Regs>; +def XORi64 : ArithLogicI<0x0e, "xori", xor, uimm16_64, immZExt16, CPU64Regs>; +def LUi64 : LoadUpper<0x0f, "lui", CPU64Regs, uimm16_64>; + +/// Arithmetic Instructions (3-Operand, R-Type) +def DADDu : ArithLogicR<0x00, 0x2d, "daddu", add, IIAlu, CPU64Regs, 1>; +def DSUBu : ArithLogicR<0x00, 0x2f, "dsubu", sub, IIAlu, CPU64Regs>; +def SLT64 : SetCC_R<0x00, 0x2a, "slt", setlt, CPU64Regs>; +def SLTu64 : SetCC_R<0x00, 0x2b, "sltu", setult, CPU64Regs>; +def AND64 : ArithLogicR<0x00, 0x24, "and", and, IIAlu, CPU64Regs, 1>; +def OR64 : ArithLogicR<0x00, 0x25, "or", or, IIAlu, CPU64Regs, 1>; +def XOR64 : ArithLogicR<0x00, 0x26, "xor", xor, IIAlu, CPU64Regs, 1>; +def NOR64 : LogicNOR<0x00, 0x27, "nor", CPU64Regs>; + +/// Shift Instructions +def DSLL : shift_rotate_imm64<0x38, 0x00, "dsll", shl>; +def DSRL : shift_rotate_imm64<0x3a, 0x00, "dsrl", srl>; +def DSRA : shift_rotate_imm64<0x3b, 0x00, "dsra", sra>; +def DSLLV : shift_rotate_reg<0x24, 0x00, "dsllv", shl, CPU64Regs>; +def DSRLV : shift_rotate_reg<0x26, 0x00, "dsrlv", srl, CPU64Regs>; +def DSRAV : shift_rotate_reg<0x27, 0x00, "dsrav", sra, CPU64Regs>; + +// Rotate Instructions +let Predicates = [HasMips64r2] in { + def DROTR : shift_rotate_imm64<0x3a, 0x01, "drotr", rotr>; + def DROTRV : shift_rotate_reg<0x16, 0x01, "drotrv", rotr, CPU64Regs>; +} + +/// Load and Store Instructions +/// aligned +defm LB64 : LoadM64<0x20, "lb", sextloadi8>; +defm LBu64 : LoadM64<0x24, "lbu", zextloadi8>; +defm LH64 : LoadM64<0x21, "lh", sextloadi16_a>; +defm LHu64 : LoadM64<0x25, "lhu", zextloadi16_a>; +defm LW64 : LoadM64<0x23, "lw", sextloadi32_a>; +defm LWu64 : LoadM64<0x27, "lwu", zextloadi32_a>; +defm SB64 : StoreM64<0x28, "sb", truncstorei8>; +defm SH64 : StoreM64<0x29, "sh", truncstorei16_a>; +defm SW64 : StoreM64<0x2b, "sw", truncstorei32_a>; +defm LD : LoadM64<0x37, "ld", load_a>; +defm SD : StoreM64<0x3f, "sd", store_a>; + +/// unaligned +defm ULH64 : LoadM64<0x21, "ulh", sextloadi16_u, 1>; +defm ULHu64 : LoadM64<0x25, "ulhu", zextloadi16_u, 1>; +defm ULW64 : LoadM64<0x23, "ulw", sextloadi32_u, 1>; +defm USH64 : StoreM64<0x29, "ush", truncstorei16_u, 1>; +defm USW64 : StoreM64<0x2b, "usw", truncstorei32_u, 1>; +defm ULD : LoadM64<0x37, "uld", load_u, 1>; +defm USD : StoreM64<0x3f, "usd", store_u, 1>; + +/// Load-linked, Store-conditional +def LLD : LLBase<0x34, "lld", CPU64Regs, mem>, Requires<[NotN64]>; +def LLD_P8 : LLBase<0x34, "lld", CPU64Regs, mem64>, Requires<[IsN64]>; +def SCD : SCBase<0x3c, "scd", CPU64Regs, mem>, Requires<[NotN64]>; +def SCD_P8 : SCBase<0x3c, "scd", CPU64Regs, mem64>, Requires<[IsN64]>; + +/// Jump and Branch Instructions +def JR64 : JumpFR<0x00, 0x08, "jr", CPU64Regs>; +def BEQ64 : CBranch<0x04, "beq", seteq, CPU64Regs>; +def BNE64 : CBranch<0x05, "bne", setne, CPU64Regs>; +def BGEZ64 : CBranchZero<0x01, 1, "bgez", setge, CPU64Regs>; +def BGTZ64 : CBranchZero<0x07, 0, "bgtz", setgt, CPU64Regs>; +def BLEZ64 : CBranchZero<0x07, 0, "blez", setle, CPU64Regs>; +def BLTZ64 : CBranchZero<0x01, 0, "bltz", setlt, CPU64Regs>; + +def JALR64 : JumpLinkReg<0x00, 0x09, "jalr", CPU64Regs>; + +/// Multiply and Divide Instructions. +def DMULT : Mult64<0x1c, "dmult", IIImul>; +def DMULTu : Mult64<0x1d, "dmultu", IIImul>; +def DSDIV : Div64<MipsDivRem, 0x1e, "ddiv", IIIdiv>; +def DUDIV : Div64<MipsDivRemU, 0x1f, "ddivu", IIIdiv>; + +def MTHI64 : MoveToLOHI<0x11, "mthi", CPU64Regs, [HI64]>; +def MTLO64 : MoveToLOHI<0x13, "mtlo", CPU64Regs, [LO64]>; +def MFHI64 : MoveFromLOHI<0x10, "mfhi", CPU64Regs, [HI64]>; +def MFLO64 : MoveFromLOHI<0x12, "mflo", CPU64Regs, [LO64]>; + +/// Sign Ext In Register Instructions. +def SEB64 : SignExtInReg<0x10, "seb", i8, CPU64Regs>; +def SEH64 : SignExtInReg<0x18, "seh", i16, CPU64Regs>; + +/// Count Leading +def DCLZ : CountLeading0<0x24, "dclz", CPU64Regs>; +def DCLO : CountLeading1<0x25, "dclo", CPU64Regs>; + +/// Double Word Swap Bytes/HalfWords +def DSBH : SubwordSwap<0x24, 0x2, "dsbh", CPU64Regs>; +def DSHD : SubwordSwap<0x24, 0x5, "dshd", CPU64Regs>; + +def LEA_ADDiu64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>; + +let Uses = [SP_64] in +def DynAlloc64 : EffectiveAddress<"daddiu\t$rt, $addr", CPU64Regs, mem_ea_64>, + Requires<[IsN64]>; + +def RDHWR64 : ReadHardware<CPU64Regs, HWRegs64>; + +def DEXT : ExtBase<3, "dext", CPU64Regs>; +def DINS : InsBase<7, "dins", CPU64Regs>; + +def DSLL64_32 : FR<0x3c, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt), + "dsll\t$rd, $rt, 32", [], IIAlu>; + +def SLL64_32 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPURegs:$rt), + "sll\t$rd, $rt, 0", [], IIAlu>; +def SLL64_64 : FR<0x0, 0x00, (outs CPU64Regs:$rd), (ins CPU64Regs:$rt), + "sll\t$rd, $rt, 0", [], IIAlu>; + +//===----------------------------------------------------------------------===// +// Arbitrary patterns that map to one or more instructions +//===----------------------------------------------------------------------===// + +// extended loads +let Predicates = [NotN64] in { + def : Pat<(i64 (extloadi1 addr:$src)), (LB64 addr:$src)>; + def : Pat<(i64 (extloadi8 addr:$src)), (LB64 addr:$src)>; + def : Pat<(i64 (extloadi16_a addr:$src)), (LH64 addr:$src)>; + def : Pat<(i64 (extloadi16_u addr:$src)), (ULH64 addr:$src)>; + def : Pat<(i64 (extloadi32_a addr:$src)), (LW64 addr:$src)>; + def : Pat<(i64 (extloadi32_u addr:$src)), (ULW64 addr:$src)>; + def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64 addr:$a), 32), 32)>; +} +let Predicates = [IsN64] in { + def : Pat<(i64 (extloadi1 addr:$src)), (LB64_P8 addr:$src)>; + def : Pat<(i64 (extloadi8 addr:$src)), (LB64_P8 addr:$src)>; + def : Pat<(i64 (extloadi16_a addr:$src)), (LH64_P8 addr:$src)>; + def : Pat<(i64 (extloadi16_u addr:$src)), (ULH64_P8 addr:$src)>; + def : Pat<(i64 (extloadi32_a addr:$src)), (LW64_P8 addr:$src)>; + def : Pat<(i64 (extloadi32_u addr:$src)), (ULW64_P8 addr:$src)>; + def : Pat<(zextloadi32_u addr:$a), (DSRL (DSLL (ULW64_P8 addr:$a), 32), 32)>; +} + +// hi/lo relocs +def : Pat<(MipsHi tglobaladdr:$in), (LUi64 tglobaladdr:$in)>; +def : Pat<(MipsHi tblockaddress:$in), (LUi64 tblockaddress:$in)>; +def : Pat<(MipsHi tjumptable:$in), (LUi64 tjumptable:$in)>; +def : Pat<(MipsHi tconstpool:$in), (LUi64 tconstpool:$in)>; +def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi64 tglobaltlsaddr:$in)>; + +def : Pat<(MipsLo tglobaladdr:$in), (DADDiu ZERO_64, tglobaladdr:$in)>; +def : Pat<(MipsLo tblockaddress:$in), (DADDiu ZERO_64, tblockaddress:$in)>; +def : Pat<(MipsLo tjumptable:$in), (DADDiu ZERO_64, tjumptable:$in)>; +def : Pat<(MipsLo tconstpool:$in), (DADDiu ZERO_64, tconstpool:$in)>; +def : Pat<(MipsLo tglobaltlsaddr:$in), (DADDiu ZERO_64, tglobaltlsaddr:$in)>; + +def : Pat<(add CPU64Regs:$hi, (MipsLo tglobaladdr:$lo)), + (DADDiu CPU64Regs:$hi, tglobaladdr:$lo)>; +def : Pat<(add CPU64Regs:$hi, (MipsLo tblockaddress:$lo)), + (DADDiu CPU64Regs:$hi, tblockaddress:$lo)>; +def : Pat<(add CPU64Regs:$hi, (MipsLo tjumptable:$lo)), + (DADDiu CPU64Regs:$hi, tjumptable:$lo)>; +def : Pat<(add CPU64Regs:$hi, (MipsLo tconstpool:$lo)), + (DADDiu CPU64Regs:$hi, tconstpool:$lo)>; +def : Pat<(add CPU64Regs:$hi, (MipsLo tglobaltlsaddr:$lo)), + (DADDiu CPU64Regs:$hi, tglobaltlsaddr:$lo)>; + +def : WrapperPat<tglobaladdr, DADDiu, CPU64Regs>; +def : WrapperPat<tconstpool, DADDiu, CPU64Regs>; +def : WrapperPat<texternalsym, DADDiu, CPU64Regs>; +def : WrapperPat<tblockaddress, DADDiu, CPU64Regs>; +def : WrapperPat<tjumptable, DADDiu, CPU64Regs>; +def : WrapperPat<tglobaltlsaddr, DADDiu, CPU64Regs>; + +defm : BrcondPats<CPU64Regs, BEQ64, BNE64, SLT64, SLTu64, SLTi64, SLTiu64, + ZERO_64>; + +// setcc patterns +defm : SeteqPats<CPU64Regs, SLTiu64, XOR64, SLTu64, ZERO_64>; +defm : SetlePats<CPU64Regs, SLT64, SLTu64>; +defm : SetgtPats<CPU64Regs, SLT64, SLTu64>; +defm : SetgePats<CPU64Regs, SLT64, SLTu64>; +defm : SetgeImmPats<CPU64Regs, SLTi64, SLTiu64>; + +// select MipsDynAlloc +def : Pat<(MipsDynAlloc addr:$f), (DynAlloc64 addr:$f)>, Requires<[IsN64]>; + +// truncate +def : Pat<(i32 (trunc CPU64Regs:$src)), + (SLL (EXTRACT_SUBREG CPU64Regs:$src, sub_32), 0)>, Requires<[IsN64]>; + +// 32-to-64-bit extension +def : Pat<(i64 (anyext CPURegs:$src)), (SLL64_32 CPURegs:$src)>; +def : Pat<(i64 (zext CPURegs:$src)), (DSRL (DSLL64_32 CPURegs:$src), 32)>; +def : Pat<(i64 (sext CPURegs:$src)), (SLL64_32 CPURegs:$src)>; + +// Sign extend in register +def : Pat<(i64 (sext_inreg CPU64Regs:$src, i32)), (SLL64_64 CPU64Regs:$src)>; + +// bswap pattern +def : Pat<(bswap CPU64Regs:$rt), (DSHD (DSBH CPU64Regs:$rt))>; diff --git a/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp new file mode 100644 index 0000000..dc8fbd0 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.cpp @@ -0,0 +1,153 @@ +//===-- MipsAnalyzeImmediate.cpp - Analyze Immediates ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +#include "MipsAnalyzeImmediate.h" +#include "Mips.h" +#include "llvm/Support/MathExtras.h" + +using namespace llvm; + +MipsAnalyzeImmediate::Inst::Inst(unsigned O, unsigned I) : Opc(O), ImmOpnd(I) {} + +// Add I to the instruction sequences. +void MipsAnalyzeImmediate::AddInstr(InstSeqLs &SeqLs, const Inst &I) { + // Add an instruction seqeunce consisting of just I. + if (SeqLs.empty()) { + SeqLs.push_back(InstSeq(1, I)); + return; + } + + for (InstSeqLs::iterator Iter = SeqLs.begin(); Iter != SeqLs.end(); ++Iter) + Iter->push_back(I); +} + +void MipsAnalyzeImmediate::GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize, + InstSeqLs &SeqLs) { + GetInstSeqLs((Imm + 0x8000ULL) & 0xffffffffffff0000ULL, RemSize, SeqLs); + AddInstr(SeqLs, Inst(ADDiu, Imm & 0xffffULL)); +} + +void MipsAnalyzeImmediate::GetInstSeqLsORi(uint64_t Imm, unsigned RemSize, + InstSeqLs &SeqLs) { + GetInstSeqLs(Imm & 0xffffffffffff0000ULL, RemSize, SeqLs); + AddInstr(SeqLs, Inst(ORi, Imm & 0xffffULL)); +} + +void MipsAnalyzeImmediate::GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize, + InstSeqLs &SeqLs) { + unsigned Shamt = CountTrailingZeros_64(Imm); + GetInstSeqLs(Imm >> Shamt, RemSize - Shamt, SeqLs); + AddInstr(SeqLs, Inst(SLL, Shamt)); +} + +void MipsAnalyzeImmediate::GetInstSeqLs(uint64_t Imm, unsigned RemSize, + InstSeqLs &SeqLs) { + uint64_t MaskedImm = Imm & (0xffffffffffffffffULL >> (64 - Size)); + + // Do nothing if Imm is 0. + if (!MaskedImm) + return; + + // A single ADDiu will do if RemSize <= 16. + if (RemSize <= 16) { + AddInstr(SeqLs, Inst(ADDiu, MaskedImm)); + return; + } + + // Shift if the lower 16-bit is cleared. + if (!(Imm & 0xffff)) { + GetInstSeqLsSLL(Imm, RemSize, SeqLs); + return; + } + + GetInstSeqLsADDiu(Imm, RemSize, SeqLs); + + // If bit 15 is cleared, it doesn't make a difference whether the last + // instruction is an ADDiu or ORi. In that case, do not call GetInstSeqLsORi. + if (Imm & 0x8000) { + InstSeqLs SeqLsORi; + GetInstSeqLsORi(Imm, RemSize, SeqLsORi); + SeqLs.insert(SeqLs.end(), SeqLsORi.begin(), SeqLsORi.end()); + } +} + +// Replace a ADDiu & SLL pair with a LUi. +// e.g. the following two instructions +// ADDiu 0x0111 +// SLL 18 +// are replaced with +// LUi 0x444 +void MipsAnalyzeImmediate::ReplaceADDiuSLLWithLUi(InstSeq &Seq) { + // Check if the first two instructions are ADDiu and SLL and the shift amount + // is at least 16. + if ((Seq.size() < 2) || (Seq[0].Opc != ADDiu) || + (Seq[1].Opc != SLL) || (Seq[1].ImmOpnd < 16)) + return; + + // Sign-extend and shift operand of ADDiu and see if it still fits in 16-bit. + int64_t Imm = SignExtend64<16>(Seq[0].ImmOpnd); + int64_t ShiftedImm = Imm << (Seq[1].ImmOpnd - 16); + + if (!isInt<16>(ShiftedImm)) + return; + + // Replace the first instruction and erase the second. + Seq[0].Opc = LUi; + Seq[0].ImmOpnd = (unsigned)(ShiftedImm & 0xffff); + Seq.erase(Seq.begin() + 1); +} + +void MipsAnalyzeImmediate::GetShortestSeq(InstSeqLs &SeqLs, InstSeq &Insts) { + InstSeqLs::iterator ShortestSeq = SeqLs.end(); + // The length of an instruction sequence is at most 7. + unsigned ShortestLength = 8; + + for (InstSeqLs::iterator S = SeqLs.begin(); S != SeqLs.end(); ++S) { + ReplaceADDiuSLLWithLUi(*S); + assert(S->size() <= 7); + + if (S->size() < ShortestLength) { + ShortestSeq = S; + ShortestLength = S->size(); + } + } + + Insts.clear(); + Insts.append(ShortestSeq->begin(), ShortestSeq->end()); +} + +const MipsAnalyzeImmediate::InstSeq +&MipsAnalyzeImmediate::Analyze(uint64_t Imm, unsigned Size, + bool LastInstrIsADDiu) { + this->Size = Size; + + if (Size == 32) { + ADDiu = Mips::ADDiu; + ORi = Mips::ORi; + SLL = Mips::SLL; + LUi = Mips::LUi; + } else { + ADDiu = Mips::DADDiu; + ORi = Mips::ORi64; + SLL = Mips::DSLL; + LUi = Mips::LUi64; + } + + InstSeqLs SeqLs; + + // Get the list of instruction sequences. + if (LastInstrIsADDiu | !Imm) + GetInstSeqLsADDiu(Imm, Size, SeqLs); + else + GetInstSeqLs(Imm, Size, SeqLs); + + // Set Insts to the shortest instruction sequence. + GetShortestSeq(SeqLs, Insts); + + return Insts; +} diff --git a/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h new file mode 100644 index 0000000..a094dda --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsAnalyzeImmediate.h @@ -0,0 +1,63 @@ +//===-- MipsAnalyzeImmediate.h - Analyze Immediates ------------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +#ifndef MIPS_ANALYZE_IMMEDIATE_H +#define MIPS_ANALYZE_IMMEDIATE_H + +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/DataTypes.h" + +namespace llvm { + + class MipsAnalyzeImmediate { + public: + struct Inst { + unsigned Opc, ImmOpnd; + Inst(unsigned Opc, unsigned ImmOpnd); + }; + typedef SmallVector<Inst, 7 > InstSeq; + + /// Analyze - Get an instrucion sequence to load immediate Imm. The last + /// instruction in the sequence must be an ADDiu if LastInstrIsADDiu is + /// true; + const InstSeq &Analyze(uint64_t Imm, unsigned Size, bool LastInstrIsADDiu); + private: + typedef SmallVector<InstSeq, 5> InstSeqLs; + + /// AddInstr - Add I to all instruction sequences in SeqLs. + void AddInstr(InstSeqLs &SeqLs, const Inst &I); + + /// GetInstSeqLsADDiu - Get instrucion sequences which end with an ADDiu to + /// load immediate Imm + void GetInstSeqLsADDiu(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs); + + /// GetInstSeqLsORi - Get instrucion sequences which end with an ORi to + /// load immediate Imm + void GetInstSeqLsORi(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs); + + /// GetInstSeqLsSLL - Get instrucion sequences which end with a SLL to + /// load immediate Imm + void GetInstSeqLsSLL(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs); + + /// GetInstSeqLs - Get instrucion sequences to load immediate Imm. + void GetInstSeqLs(uint64_t Imm, unsigned RemSize, InstSeqLs &SeqLs); + + /// ReplaceADDiuSLLWithLUi - Replace an ADDiu & SLL pair with a LUi. + void ReplaceADDiuSLLWithLUi(InstSeq &Seq); + + /// GetShortestSeq - Find the shortest instruction sequence in SeqLs and + /// return it in Insts. + void GetShortestSeq(InstSeqLs &SeqLs, InstSeq &Insts); + + unsigned Size; + unsigned ADDiu, ORi, SLL, LUi; + InstSeq Insts; + }; +} + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp new file mode 100644 index 0000000..8206cfc --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.cpp @@ -0,0 +1,562 @@ +//===-- MipsAsmPrinter.cpp - Mips LLVM Assembly Printer -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains a printer that converts from our internal representation +// of machine-dependent LLVM code to GAS-format MIPS assembly language. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mips-asm-printer" +#include "MipsAsmPrinter.h" +#include "Mips.h" +#include "MipsInstrInfo.h" +#include "InstPrinter/MipsInstPrinter.h" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/ADT/StringExtras.h" +#include "llvm/ADT/Twine.h" +#include "llvm/Analysis/DebugInfo.h" +#include "llvm/BasicBlock.h" +#include "llvm/Instructions.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineMemOperand.h" +#include "llvm/Instructions.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCAsmInfo.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Target/Mangler.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetLoweringObjectFile.h" +#include "llvm/Target/TargetOptions.h" + +using namespace llvm; + +void MipsAsmPrinter::EmitInstrWithMacroNoAT(const MachineInstr *MI) { + MCInst TmpInst; + + MCInstLowering.Lower(MI, TmpInst); + OutStreamer.EmitRawText(StringRef("\t.set\tmacro")); + if (MipsFI->getEmitNOAT()) + OutStreamer.EmitRawText(StringRef("\t.set\tat")); + OutStreamer.EmitInstruction(TmpInst); + if (MipsFI->getEmitNOAT()) + OutStreamer.EmitRawText(StringRef("\t.set\tnoat")); + OutStreamer.EmitRawText(StringRef("\t.set\tnomacro")); +} + +bool MipsAsmPrinter::runOnMachineFunction(MachineFunction &MF) { + MipsFI = MF.getInfo<MipsFunctionInfo>(); + AsmPrinter::runOnMachineFunction(MF); + return true; +} + +void MipsAsmPrinter::EmitInstruction(const MachineInstr *MI) { + if (MI->isDebugValue()) { + SmallString<128> Str; + raw_svector_ostream OS(Str); + + PrintDebugValueComment(MI, OS); + return; + } + + unsigned Opc = MI->getOpcode(); + MCInst TmpInst0; + SmallVector<MCInst, 4> MCInsts; + + switch (Opc) { + case Mips::ULW: + case Mips::ULH: + case Mips::ULHu: + case Mips::USW: + case Mips::USH: + case Mips::ULW_P8: + case Mips::ULH_P8: + case Mips::ULHu_P8: + case Mips::USW_P8: + case Mips::USH_P8: + case Mips::ULD: + case Mips::ULW64: + case Mips::ULH64: + case Mips::ULHu64: + case Mips::USD: + case Mips::USW64: + case Mips::USH64: + case Mips::ULD_P8: + case Mips::ULW64_P8: + case Mips::ULH64_P8: + case Mips::ULHu64_P8: + case Mips::USD_P8: + case Mips::USW64_P8: + case Mips::USH64_P8: { + if (OutStreamer.hasRawTextSupport()) { + EmitInstrWithMacroNoAT(MI); + return; + } + + MCInstLowering.LowerUnalignedLoadStore(MI, MCInsts); + for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); I + != MCInsts.end(); ++I) + OutStreamer.EmitInstruction(*I); + + return; + } + case Mips::CPRESTORE: { + const MachineOperand &MO = MI->getOperand(0); + assert(MO.isImm() && "CPRESTORE's operand must be an immediate."); + int64_t Offset = MO.getImm(); + + if (OutStreamer.hasRawTextSupport()) { + if (!isInt<16>(Offset)) { + EmitInstrWithMacroNoAT(MI); + return; + } + } else { + MCInstLowering.LowerCPRESTORE(Offset, MCInsts); + + for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); + I != MCInsts.end(); ++I) + OutStreamer.EmitInstruction(*I); + + return; + } + + break; + } + case Mips::SETGP01: { + MCInstLowering.LowerSETGP01(MI, MCInsts); + + for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); + I != MCInsts.end(); ++I) + OutStreamer.EmitInstruction(*I); + + return; + } + default: + break; + } + + MCInstLowering.Lower(MI, TmpInst0); + OutStreamer.EmitInstruction(TmpInst0); +} + +//===----------------------------------------------------------------------===// +// +// Mips Asm Directives +// +// -- Frame directive "frame Stackpointer, Stacksize, RARegister" +// Describe the stack frame. +// +// -- Mask directives "(f)mask bitmask, offset" +// Tells the assembler which registers are saved and where. +// bitmask - contain a little endian bitset indicating which registers are +// saved on function prologue (e.g. with a 0x80000000 mask, the +// assembler knows the register 31 (RA) is saved at prologue. +// offset - the position before stack pointer subtraction indicating where +// the first saved register on prologue is located. (e.g. with a +// +// Consider the following function prologue: +// +// .frame $fp,48,$ra +// .mask 0xc0000000,-8 +// addiu $sp, $sp, -48 +// sw $ra, 40($sp) +// sw $fp, 36($sp) +// +// With a 0xc0000000 mask, the assembler knows the register 31 (RA) and +// 30 (FP) are saved at prologue. As the save order on prologue is from +// left to right, RA is saved first. A -8 offset means that after the +// stack pointer subtration, the first register in the mask (RA) will be +// saved at address 48-8=40. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Mask directives +//===----------------------------------------------------------------------===// + +// Create a bitmask with all callee saved registers for CPU or Floating Point +// registers. For CPU registers consider RA, GP and FP for saving if necessary. +void MipsAsmPrinter::printSavedRegsBitmask(raw_ostream &O) { + // CPU and FPU Saved Registers Bitmasks + unsigned CPUBitmask = 0, FPUBitmask = 0; + int CPUTopSavedRegOff, FPUTopSavedRegOff; + + // Set the CPU and FPU Bitmasks + const MachineFrameInfo *MFI = MF->getFrameInfo(); + const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); + // size of stack area to which FP callee-saved regs are saved. + unsigned CPURegSize = Mips::CPURegsRegisterClass->getSize(); + unsigned FGR32RegSize = Mips::FGR32RegisterClass->getSize(); + unsigned AFGR64RegSize = Mips::AFGR64RegisterClass->getSize(); + bool HasAFGR64Reg = false; + unsigned CSFPRegsSize = 0; + unsigned i, e = CSI.size(); + + // Set FPU Bitmask. + for (i = 0; i != e; ++i) { + unsigned Reg = CSI[i].getReg(); + if (Mips::CPURegsRegisterClass->contains(Reg)) + break; + + unsigned RegNum = getMipsRegisterNumbering(Reg); + if (Mips::AFGR64RegisterClass->contains(Reg)) { + FPUBitmask |= (3 << RegNum); + CSFPRegsSize += AFGR64RegSize; + HasAFGR64Reg = true; + continue; + } + + FPUBitmask |= (1 << RegNum); + CSFPRegsSize += FGR32RegSize; + } + + // Set CPU Bitmask. + for (; i != e; ++i) { + unsigned Reg = CSI[i].getReg(); + unsigned RegNum = getMipsRegisterNumbering(Reg); + CPUBitmask |= (1 << RegNum); + } + + // FP Regs are saved right below where the virtual frame pointer points to. + FPUTopSavedRegOff = FPUBitmask ? + (HasAFGR64Reg ? -AFGR64RegSize : -FGR32RegSize) : 0; + + // CPU Regs are saved below FP Regs. + CPUTopSavedRegOff = CPUBitmask ? -CSFPRegsSize - CPURegSize : 0; + + // Print CPUBitmask + O << "\t.mask \t"; printHex32(CPUBitmask, O); + O << ',' << CPUTopSavedRegOff << '\n'; + + // Print FPUBitmask + O << "\t.fmask\t"; printHex32(FPUBitmask, O); + O << "," << FPUTopSavedRegOff << '\n'; +} + +// Print a 32 bit hex number with all numbers. +void MipsAsmPrinter::printHex32(unsigned Value, raw_ostream &O) { + O << "0x"; + for (int i = 7; i >= 0; i--) + O.write_hex((Value & (0xF << (i*4))) >> (i*4)); +} + +//===----------------------------------------------------------------------===// +// Frame and Set directives +//===----------------------------------------------------------------------===// + +/// Frame Directive +void MipsAsmPrinter::emitFrameDirective() { + const TargetRegisterInfo &RI = *TM.getRegisterInfo(); + + unsigned stackReg = RI.getFrameRegister(*MF); + unsigned returnReg = RI.getRARegister(); + unsigned stackSize = MF->getFrameInfo()->getStackSize(); + + if (OutStreamer.hasRawTextSupport()) + OutStreamer.EmitRawText("\t.frame\t$" + + StringRef(MipsInstPrinter::getRegisterName(stackReg)).lower() + + "," + Twine(stackSize) + ",$" + + StringRef(MipsInstPrinter::getRegisterName(returnReg)).lower()); +} + +/// Emit Set directives. +const char *MipsAsmPrinter::getCurrentABIString() const { + switch (Subtarget->getTargetABI()) { + case MipsSubtarget::O32: return "abi32"; + case MipsSubtarget::N32: return "abiN32"; + case MipsSubtarget::N64: return "abi64"; + case MipsSubtarget::EABI: return "eabi32"; // TODO: handle eabi64 + default: llvm_unreachable("Unknown Mips ABI");; + } +} + +void MipsAsmPrinter::EmitFunctionEntryLabel() { + if (OutStreamer.hasRawTextSupport()) + OutStreamer.EmitRawText("\t.ent\t" + Twine(CurrentFnSym->getName())); + OutStreamer.EmitLabel(CurrentFnSym); +} + +/// EmitFunctionBodyStart - Targets can override this to emit stuff before +/// the first basic block in the function. +void MipsAsmPrinter::EmitFunctionBodyStart() { + MCInstLowering.Initialize(Mang, &MF->getContext()); + + emitFrameDirective(); + + bool EmitCPLoad = (MF->getTarget().getRelocationModel() == Reloc::PIC_) && + Subtarget->isABI_O32() && MipsFI->globalBaseRegSet() && + MipsFI->globalBaseRegFixed(); + + if (OutStreamer.hasRawTextSupport()) { + SmallString<128> Str; + raw_svector_ostream OS(Str); + printSavedRegsBitmask(OS); + OutStreamer.EmitRawText(OS.str()); + + OutStreamer.EmitRawText(StringRef("\t.set\tnoreorder")); + + // Emit .cpload directive if needed. + if (EmitCPLoad) + OutStreamer.EmitRawText(StringRef("\t.cpload\t$25")); + + OutStreamer.EmitRawText(StringRef("\t.set\tnomacro")); + if (MipsFI->getEmitNOAT()) + OutStreamer.EmitRawText(StringRef("\t.set\tnoat")); + } else if (EmitCPLoad) { + SmallVector<MCInst, 4> MCInsts; + MCInstLowering.LowerCPLOAD(MCInsts); + for (SmallVector<MCInst, 4>::iterator I = MCInsts.begin(); + I != MCInsts.end(); ++I) + OutStreamer.EmitInstruction(*I); + } +} + +/// EmitFunctionBodyEnd - Targets can override this to emit stuff after +/// the last basic block in the function. +void MipsAsmPrinter::EmitFunctionBodyEnd() { + // There are instruction for this macros, but they must + // always be at the function end, and we can't emit and + // break with BB logic. + if (OutStreamer.hasRawTextSupport()) { + if (MipsFI->getEmitNOAT()) + OutStreamer.EmitRawText(StringRef("\t.set\tat")); + + OutStreamer.EmitRawText(StringRef("\t.set\tmacro")); + OutStreamer.EmitRawText(StringRef("\t.set\treorder")); + OutStreamer.EmitRawText("\t.end\t" + Twine(CurrentFnSym->getName())); + } +} + +/// isBlockOnlyReachableByFallthough - Return true if the basic block has +/// exactly one predecessor and the control transfer mechanism between +/// the predecessor and this block is a fall-through. +bool MipsAsmPrinter::isBlockOnlyReachableByFallthrough(const MachineBasicBlock* + MBB) const { + // The predecessor has to be immediately before this block. + const MachineBasicBlock *Pred = *MBB->pred_begin(); + + // If the predecessor is a switch statement, assume a jump table + // implementation, so it is not a fall through. + if (const BasicBlock *bb = Pred->getBasicBlock()) + if (isa<SwitchInst>(bb->getTerminator())) + return false; + + // If this is a landing pad, it isn't a fall through. If it has no preds, + // then nothing falls through to it. + if (MBB->isLandingPad() || MBB->pred_empty()) + return false; + + // If there isn't exactly one predecessor, it can't be a fall through. + MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), PI2 = PI; + ++PI2; + + if (PI2 != MBB->pred_end()) + return false; + + // The predecessor has to be immediately before this block. + if (!Pred->isLayoutSuccessor(MBB)) + return false; + + // If the block is completely empty, then it definitely does fall through. + if (Pred->empty()) + return true; + + // Otherwise, check the last instruction. + // Check if the last terminator is an unconditional branch. + MachineBasicBlock::const_iterator I = Pred->end(); + while (I != Pred->begin() && !(--I)->isTerminator()) ; + + return !I->isBarrier(); +} + +// Print out an operand for an inline asm expression. +bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant,const char *ExtraCode, + raw_ostream &O) { + // Does this asm operand have a single letter operand modifier? + if (ExtraCode && ExtraCode[0]) + return true; // Unknown modifier. + + printOperand(MI, OpNo, O); + return false; +} + +bool MipsAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, + unsigned OpNum, unsigned AsmVariant, + const char *ExtraCode, + raw_ostream &O) { + if (ExtraCode && ExtraCode[0]) + return true; // Unknown modifier. + + const MachineOperand &MO = MI->getOperand(OpNum); + assert(MO.isReg() && "unexpected inline asm memory operand"); + O << "0($" << MipsInstPrinter::getRegisterName(MO.getReg()) << ")"; + return false; +} + +void MipsAsmPrinter::printOperand(const MachineInstr *MI, int opNum, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(opNum); + bool closeP = false; + + if (MO.getTargetFlags()) + closeP = true; + + switch(MO.getTargetFlags()) { + case MipsII::MO_GPREL: O << "%gp_rel("; break; + case MipsII::MO_GOT_CALL: O << "%call16("; break; + case MipsII::MO_GOT: O << "%got("; break; + case MipsII::MO_ABS_HI: O << "%hi("; break; + case MipsII::MO_ABS_LO: O << "%lo("; break; + case MipsII::MO_TLSGD: O << "%tlsgd("; break; + case MipsII::MO_GOTTPREL: O << "%gottprel("; break; + case MipsII::MO_TPREL_HI: O << "%tprel_hi("; break; + case MipsII::MO_TPREL_LO: O << "%tprel_lo("; break; + case MipsII::MO_GPOFF_HI: O << "%hi(%neg(%gp_rel("; break; + case MipsII::MO_GPOFF_LO: O << "%lo(%neg(%gp_rel("; break; + case MipsII::MO_GOT_DISP: O << "%got_disp("; break; + case MipsII::MO_GOT_PAGE: O << "%got_page("; break; + case MipsII::MO_GOT_OFST: O << "%got_ofst("; break; + } + + switch (MO.getType()) { + case MachineOperand::MO_Register: + O << '$' + << StringRef(MipsInstPrinter::getRegisterName(MO.getReg())).lower(); + break; + + case MachineOperand::MO_Immediate: + O << MO.getImm(); + break; + + case MachineOperand::MO_MachineBasicBlock: + O << *MO.getMBB()->getSymbol(); + return; + + case MachineOperand::MO_GlobalAddress: + O << *Mang->getSymbol(MO.getGlobal()); + break; + + case MachineOperand::MO_BlockAddress: { + MCSymbol* BA = GetBlockAddressSymbol(MO.getBlockAddress()); + O << BA->getName(); + break; + } + + case MachineOperand::MO_ExternalSymbol: + O << *GetExternalSymbolSymbol(MO.getSymbolName()); + break; + + case MachineOperand::MO_JumpTableIndex: + O << MAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber() + << '_' << MO.getIndex(); + break; + + case MachineOperand::MO_ConstantPoolIndex: + O << MAI->getPrivateGlobalPrefix() << "CPI" + << getFunctionNumber() << "_" << MO.getIndex(); + if (MO.getOffset()) + O << "+" << MO.getOffset(); + break; + + default: + llvm_unreachable("<unknown operand type>"); + } + + if (closeP) O << ")"; +} + +void MipsAsmPrinter::printUnsignedImm(const MachineInstr *MI, int opNum, + raw_ostream &O) { + const MachineOperand &MO = MI->getOperand(opNum); + if (MO.isImm()) + O << (unsigned short int)MO.getImm(); + else + printOperand(MI, opNum, O); +} + +void MipsAsmPrinter:: +printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O) { + // Load/Store memory operands -- imm($reg) + // If PIC target the target is loaded as the + // pattern lw $25,%call16($28) + printOperand(MI, opNum+1, O); + O << "("; + printOperand(MI, opNum, O); + O << ")"; +} + +void MipsAsmPrinter:: +printMemOperandEA(const MachineInstr *MI, int opNum, raw_ostream &O) { + // when using stack locations for not load/store instructions + // print the same way as all normal 3 operand instructions. + printOperand(MI, opNum, O); + O << ", "; + printOperand(MI, opNum+1, O); + return; +} + +void MipsAsmPrinter:: +printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O, + const char *Modifier) { + const MachineOperand& MO = MI->getOperand(opNum); + O << Mips::MipsFCCToString((Mips::CondCode)MO.getImm()); +} + +void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) { + // FIXME: Use SwitchSection. + + // Tell the assembler which ABI we are using + if (OutStreamer.hasRawTextSupport()) + OutStreamer.EmitRawText("\t.section .mdebug." + + Twine(getCurrentABIString())); + + // TODO: handle O64 ABI + if (OutStreamer.hasRawTextSupport()) { + if (Subtarget->isABI_EABI()) { + if (Subtarget->isGP32bit()) + OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long32")); + else + OutStreamer.EmitRawText(StringRef("\t.section .gcc_compiled_long64")); + } + } + + // return to previous section + if (OutStreamer.hasRawTextSupport()) + OutStreamer.EmitRawText(StringRef("\t.previous")); +} + +MachineLocation +MipsAsmPrinter::getDebugValueLocation(const MachineInstr *MI) const { + // Handles frame addresses emitted in MipsInstrInfo::emitFrameIndexDebugValue. + assert(MI->getNumOperands() == 4 && "Invalid no. of machine operands!"); + assert(MI->getOperand(0).isReg() && MI->getOperand(1).isImm() && + "Unexpected MachineOperand types"); + return MachineLocation(MI->getOperand(0).getReg(), + MI->getOperand(1).getImm()); +} + +void MipsAsmPrinter::PrintDebugValueComment(const MachineInstr *MI, + raw_ostream &OS) { + // TODO: implement +} + +// Force static initialization. +extern "C" void LLVMInitializeMipsAsmPrinter() { + RegisterAsmPrinter<MipsAsmPrinter> X(TheMipsTarget); + RegisterAsmPrinter<MipsAsmPrinter> Y(TheMipselTarget); + RegisterAsmPrinter<MipsAsmPrinter> A(TheMips64Target); + RegisterAsmPrinter<MipsAsmPrinter> B(TheMips64elTarget); +} diff --git a/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h new file mode 100644 index 0000000..562bf9c --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsAsmPrinter.h @@ -0,0 +1,81 @@ +//===-- MipsAsmPrinter.h - Mips LLVM Assembly Printer ----------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Mips Assembly printer class. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSASMPRINTER_H +#define MIPSASMPRINTER_H + +#include "MipsMachineFunction.h" +#include "MipsMCInstLower.h" +#include "MipsSubtarget.h" +#include "llvm/CodeGen/AsmPrinter.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Target/TargetMachine.h" + +namespace llvm { +class MCStreamer; +class MachineInstr; +class MachineBasicBlock; +class Module; +class raw_ostream; + +class LLVM_LIBRARY_VISIBILITY MipsAsmPrinter : public AsmPrinter { + + void EmitInstrWithMacroNoAT(const MachineInstr *MI); + +public: + + const MipsSubtarget *Subtarget; + const MipsFunctionInfo *MipsFI; + MipsMCInstLower MCInstLowering; + + explicit MipsAsmPrinter(TargetMachine &TM, MCStreamer &Streamer) + : AsmPrinter(TM, Streamer), MCInstLowering(*this) { + Subtarget = &TM.getSubtarget<MipsSubtarget>(); + } + + virtual const char *getPassName() const { + return "Mips Assembly Printer"; + } + + virtual bool runOnMachineFunction(MachineFunction &MF); + + void EmitInstruction(const MachineInstr *MI); + void printSavedRegsBitmask(raw_ostream &O); + void printHex32(unsigned int Value, raw_ostream &O); + void emitFrameDirective(); + const char *getCurrentABIString() const; + virtual void EmitFunctionEntryLabel(); + virtual void EmitFunctionBodyStart(); + virtual void EmitFunctionBodyEnd(); + virtual bool isBlockOnlyReachableByFallthrough(const MachineBasicBlock* + MBB) const; + bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, + unsigned AsmVariant, const char *ExtraCode, + raw_ostream &O); + bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum, + unsigned AsmVariant, const char *ExtraCode, + raw_ostream &O); + void printOperand(const MachineInstr *MI, int opNum, raw_ostream &O); + void printUnsignedImm(const MachineInstr *MI, int opNum, raw_ostream &O); + void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &O); + void printMemOperandEA(const MachineInstr *MI, int opNum, raw_ostream &O); + void printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O, + const char *Modifier = 0); + void EmitStartOfAsmFile(Module &M); + virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const; + void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); +}; +} + +#endif + diff --git a/contrib/llvm/lib/Target/Mips/MipsCallingConv.td b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td new file mode 100644 index 0000000..4b7e1d3 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsCallingConv.td @@ -0,0 +1,179 @@ +//===-- MipsCallingConv.td - Calling Conventions for Mips --*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// This describes the calling conventions for Mips architecture. +//===----------------------------------------------------------------------===// + +/// CCIfSubtarget - Match if the current subtarget has a feature F. +class CCIfSubtarget<string F, CCAction A>: + CCIf<!strconcat("State.getTarget().getSubtarget<MipsSubtarget>().", F), A>; + +//===----------------------------------------------------------------------===// +// Mips O32 Calling Convention +//===----------------------------------------------------------------------===// + +// Only the return rules are defined here for O32. The rules for argument +// passing are defined in MipsISelLowering.cpp. +def RetCC_MipsO32 : CallingConv<[ + // i32 are returned in registers V0, V1, A0, A1 + CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>, + + // f32 are returned in registers F0, F2 + CCIfType<[f32], CCAssignToReg<[F0, F2]>>, + + // f64 are returned in register D0, D1 + CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToReg<[D0, D1]>>> +]>; + +//===----------------------------------------------------------------------===// +// Mips N32/64 Calling Convention +//===----------------------------------------------------------------------===// + +def CC_MipsN : CallingConv<[ + // Handles byval parameters. + CCIfByVal<CCCustom<"CC_Mips64Byval">>, + + // Promote i8/i16 arguments to i32. + CCIfType<[i8, i16], CCPromoteToType<i32>>, + + // Integer arguments are passed in integer registers. + CCIfType<[i32], CCAssignToRegWithShadow<[A0, A1, A2, A3, + T0, T1, T2, T3], + [F12, F13, F14, F15, + F16, F17, F18, F19]>>, + + CCIfType<[i64], CCAssignToRegWithShadow<[A0_64, A1_64, A2_64, A3_64, + T0_64, T1_64, T2_64, T3_64], + [D12_64, D13_64, D14_64, D15_64, + D16_64, D17_64, D18_64, D19_64]>>, + + // f32 arguments are passed in single precision FP registers. + CCIfType<[f32], CCAssignToRegWithShadow<[F12, F13, F14, F15, + F16, F17, F18, F19], + [A0_64, A1_64, A2_64, A3_64, + T0_64, T1_64, T2_64, T3_64]>>, + + // f64 arguments are passed in double precision FP registers. + CCIfType<[f64], CCAssignToRegWithShadow<[D12_64, D13_64, D14_64, D15_64, + D16_64, D17_64, D18_64, D19_64], + [A0_64, A1_64, A2_64, A3_64, + T0_64, T1_64, T2_64, T3_64]>>, + + // All stack parameter slots become 64-bit doublewords and are 8-byte aligned. + CCIfType<[i32, f32], CCAssignToStack<4, 8>>, + CCIfType<[i64, f64], CCAssignToStack<8, 8>> +]>; + +// N32/64 variable arguments. +// All arguments are passed in integer registers. +def CC_MipsN_VarArg : CallingConv<[ + // Handles byval parameters. + CCIfByVal<CCCustom<"CC_Mips64Byval">>, + + // Promote i8/i16 arguments to i32. + CCIfType<[i8, i16], CCPromoteToType<i32>>, + + CCIfType<[i32, f32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>, + + CCIfType<[i64, f64], CCAssignToReg<[A0_64, A1_64, A2_64, A3_64, + T0_64, T1_64, T2_64, T3_64]>>, + + // All stack parameter slots become 64-bit doublewords and are 8-byte aligned. + CCIfType<[i32, f32], CCAssignToStack<4, 8>>, + CCIfType<[i64, f64], CCAssignToStack<8, 8>> +]>; + +def RetCC_MipsN : CallingConv<[ + // i32 are returned in registers V0, V1 + CCIfType<[i32], CCAssignToReg<[V0, V1]>>, + + // i64 are returned in registers V0_64, V1_64 + CCIfType<[i64], CCAssignToReg<[V0_64, V1_64]>>, + + // f32 are returned in registers F0, F2 + CCIfType<[f32], CCAssignToReg<[F0, F2]>>, + + // f64 are returned in registers D0, D2 + CCIfType<[f64], CCAssignToReg<[D0_64, D2_64]>> +]>; + +//===----------------------------------------------------------------------===// +// Mips EABI Calling Convention +//===----------------------------------------------------------------------===// + +def CC_MipsEABI : CallingConv<[ + // Promote i8/i16 arguments to i32. + CCIfType<[i8, i16], CCPromoteToType<i32>>, + + // Integer arguments are passed in integer registers. + CCIfType<[i32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>, + + // Single fp arguments are passed in pairs within 32-bit mode + CCIfType<[f32], CCIfSubtarget<"isSingleFloat()", + CCAssignToReg<[F12, F13, F14, F15, F16, F17, F18, F19]>>>, + + CCIfType<[f32], CCIfSubtarget<"isNotSingleFloat()", + CCAssignToReg<[F12, F14, F16, F18]>>>, + + // The first 4 double fp arguments are passed in single fp registers. + CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", + CCAssignToReg<[D6, D7, D8, D9]>>>, + + // Integer values get stored in stack slots that are 4 bytes in + // size and 4-byte aligned. + CCIfType<[i32, f32], CCAssignToStack<4, 4>>, + + // Integer values get stored in stack slots that are 8 bytes in + // size and 8-byte aligned. + CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToStack<8, 8>>> +]>; + +def RetCC_MipsEABI : CallingConv<[ + // i32 are returned in registers V0, V1 + CCIfType<[i32], CCAssignToReg<[V0, V1]>>, + + // f32 are returned in registers F0, F1 + CCIfType<[f32], CCAssignToReg<[F0, F1]>>, + + // f64 are returned in register D0 + CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToReg<[D0]>>> +]>; + +//===----------------------------------------------------------------------===// +// Mips Calling Convention Dispatch +//===----------------------------------------------------------------------===// + +def CC_Mips : CallingConv<[ + CCIfSubtarget<"isABI_EABI()", CCDelegateTo<CC_MipsEABI>>, + CCIfSubtarget<"isABI_N32()", CCDelegateTo<CC_MipsN>>, + CCIfSubtarget<"isABI_N64()", CCDelegateTo<CC_MipsN>> +]>; + +def RetCC_Mips : CallingConv<[ + CCIfSubtarget<"isABI_EABI()", CCDelegateTo<RetCC_MipsEABI>>, + CCIfSubtarget<"isABI_N32()", CCDelegateTo<RetCC_MipsN>>, + CCIfSubtarget<"isABI_N64()", CCDelegateTo<RetCC_MipsN>>, + CCDelegateTo<RetCC_MipsO32> +]>; + +//===----------------------------------------------------------------------===// +// Callee-saved register lists. +//===----------------------------------------------------------------------===// + +def CSR_SingleFloatOnly : CalleeSavedRegs<(add (sequence "F%u", 31, 20), RA, FP, + (sequence "S%u", 7, 0))>; + +def CSR_O32 : CalleeSavedRegs<(add (sequence "D%u", 15, 10), RA, FP, + (sequence "S%u", 7, 0))>; + +def CSR_N32 : CalleeSavedRegs<(add D31_64, D29_64, D27_64, D25_64, D24_64, + D23_64, D22_64, D21_64, RA_64, FP_64, GP_64, + (sequence "S%u_64", 7, 0))>; + +def CSR_N64 : CalleeSavedRegs<(add (sequence "D%u_64", 31, 24), RA_64, FP_64, + GP_64, (sequence "S%u_64", 7, 0))>; diff --git a/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp new file mode 100644 index 0000000..7d81902 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsCodeEmitter.cpp @@ -0,0 +1,427 @@ +//===-- Mips/MipsCodeEmitter.cpp - Convert Mips Code to Machine Code ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===---------------------------------------------------------------------===// +// +// This file contains the pass that transforms the Mips machine instructions +// into relocatable machine code. +// +//===---------------------------------------------------------------------===// + +#define DEBUG_TYPE "jit" +#include "Mips.h" +#include "MipsInstrInfo.h" +#include "MipsRelocations.h" +#include "MipsSubtarget.h" +#include "MipsTargetMachine.h" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "llvm/ADT/Statistic.h" +#include "llvm/CodeGen/JITCodeEmitter.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/PassManager.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#ifndef NDEBUG +#include <iomanip> +#endif + +using namespace llvm; + +STATISTIC(NumEmitted, "Number of machine instructions emitted"); + +namespace { + +class MipsCodeEmitter : public MachineFunctionPass { + MipsJITInfo *JTI; + const MipsInstrInfo *II; + const TargetData *TD; + const MipsSubtarget *Subtarget; + TargetMachine &TM; + JITCodeEmitter &MCE; + const std::vector<MachineConstantPoolEntry> *MCPEs; + const std::vector<MachineJumpTableEntry> *MJTEs; + bool IsPIC; + + void getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<MachineModuleInfo> (); + MachineFunctionPass::getAnalysisUsage(AU); + } + + static char ID; + + public: + MipsCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) : + MachineFunctionPass(ID), JTI(0), + II((const MipsInstrInfo *) tm.getInstrInfo()), + TD(tm.getTargetData()), TM(tm), MCE(mce), MCPEs(0), MJTEs(0), + IsPIC(TM.getRelocationModel() == Reloc::PIC_) { + } + + bool runOnMachineFunction(MachineFunction &MF); + + virtual const char *getPassName() const { + return "Mips Machine Code Emitter"; + } + + /// getBinaryCodeForInstr - This function, generated by the + /// CodeEmitterGenerator using TableGen, produces the binary encoding for + /// machine instructions. + uint64_t getBinaryCodeForInstr(const MachineInstr &MI) const; + + void emitInstruction(const MachineInstr &MI); + + private: + + void emitWordLE(unsigned Word); + + /// Routines that handle operands which add machine relocations which are + /// fixed up by the relocation stage. + void emitGlobalAddress(const GlobalValue *GV, unsigned Reloc, + bool MayNeedFarStub) const; + void emitExternalSymbolAddress(const char *ES, unsigned Reloc) const; + void emitConstPoolAddress(unsigned CPI, unsigned Reloc) const; + void emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const; + void emitMachineBasicBlock(MachineBasicBlock *BB, unsigned Reloc) const; + + /// getMachineOpValue - Return binary encoding of operand. If the machine + /// operand requires relocation, record the relocation and return zero. + unsigned getMachineOpValue(const MachineInstr &MI, + const MachineOperand &MO) const; + + unsigned getRelocation(const MachineInstr &MI, + const MachineOperand &MO) const; + + unsigned getJumpTargetOpValue(const MachineInstr &MI, unsigned OpNo) const; + + unsigned getBranchTargetOpValue(const MachineInstr &MI, + unsigned OpNo) const; + unsigned getMemEncoding(const MachineInstr &MI, unsigned OpNo) const; + unsigned getSizeExtEncoding(const MachineInstr &MI, unsigned OpNo) const; + unsigned getSizeInsEncoding(const MachineInstr &MI, unsigned OpNo) const; + + int emitULW(const MachineInstr &MI); + int emitUSW(const MachineInstr &MI); + int emitULH(const MachineInstr &MI); + int emitULHu(const MachineInstr &MI); + int emitUSH(const MachineInstr &MI); + + void emitGlobalAddressUnaligned(const GlobalValue *GV, unsigned Reloc, + int Offset) const; + }; +} + +char MipsCodeEmitter::ID = 0; + +bool MipsCodeEmitter::runOnMachineFunction(MachineFunction &MF) { + JTI = ((MipsTargetMachine&) MF.getTarget()).getJITInfo(); + II = ((const MipsTargetMachine&) MF.getTarget()).getInstrInfo(); + TD = ((const MipsTargetMachine&) MF.getTarget()).getTargetData(); + Subtarget = &TM.getSubtarget<MipsSubtarget> (); + MCPEs = &MF.getConstantPool()->getConstants(); + MJTEs = 0; + if (MF.getJumpTableInfo()) MJTEs = &MF.getJumpTableInfo()->getJumpTables(); + JTI->Initialize(MF, IsPIC); + MCE.setModuleInfo(&getAnalysis<MachineModuleInfo> ()); + + do { + DEBUG(errs() << "JITTing function '" + << MF.getFunction()->getName() << "'\n"); + MCE.startFunction(MF); + + for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); + MBB != E; ++MBB){ + MCE.StartMachineBasicBlock(MBB); + for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); + I != E; ++I) + emitInstruction(*I); + } + } while (MCE.finishFunction(MF)); + + return false; +} + +unsigned MipsCodeEmitter::getRelocation(const MachineInstr &MI, + const MachineOperand &MO) const { + // NOTE: This relocations are for static. + uint64_t TSFlags = MI.getDesc().TSFlags; + uint64_t Form = TSFlags & MipsII::FormMask; + if (Form == MipsII::FrmJ) + return Mips::reloc_mips_26; + if ((Form == MipsII::FrmI || Form == MipsII::FrmFI) + && MI.isBranch()) + return Mips::reloc_mips_pc16; + if (Form == MipsII::FrmI && MI.getOpcode() == Mips::LUi) + return Mips::reloc_mips_hi; + return Mips::reloc_mips_lo; +} + +unsigned MipsCodeEmitter::getJumpTargetOpValue(const MachineInstr &MI, + unsigned OpNo) const { + MachineOperand MO = MI.getOperand(OpNo); + if (MO.isGlobal()) + emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true); + else if (MO.isSymbol()) + emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO)); + else if (MO.isMBB()) + emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO)); + else + llvm_unreachable("Unexpected jump target operand kind."); + return 0; +} + +unsigned MipsCodeEmitter::getBranchTargetOpValue(const MachineInstr &MI, + unsigned OpNo) const { + MachineOperand MO = MI.getOperand(OpNo); + emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO)); + return 0; +} + +unsigned MipsCodeEmitter::getMemEncoding(const MachineInstr &MI, + unsigned OpNo) const { + // Base register is encoded in bits 20-16, offset is encoded in bits 15-0. + assert(MI.getOperand(OpNo).isReg()); + unsigned RegBits = getMachineOpValue(MI, MI.getOperand(OpNo)) << 16; + return (getMachineOpValue(MI, MI.getOperand(OpNo+1)) & 0xFFFF) | RegBits; +} + +unsigned MipsCodeEmitter::getSizeExtEncoding(const MachineInstr &MI, + unsigned OpNo) const { + // size is encoded as size-1. + return getMachineOpValue(MI, MI.getOperand(OpNo)) - 1; +} + +unsigned MipsCodeEmitter::getSizeInsEncoding(const MachineInstr &MI, + unsigned OpNo) const { + // size is encoded as pos+size-1. + return getMachineOpValue(MI, MI.getOperand(OpNo-1)) + + getMachineOpValue(MI, MI.getOperand(OpNo)) - 1; +} + +/// getMachineOpValue - Return binary encoding of operand. If the machine +/// operand requires relocation, record the relocation and return zero. +unsigned MipsCodeEmitter::getMachineOpValue(const MachineInstr &MI, + const MachineOperand &MO) const { + if (MO.isReg()) + return getMipsRegisterNumbering(MO.getReg()); + else if (MO.isImm()) + return static_cast<unsigned>(MO.getImm()); + else if (MO.isGlobal()) { + if (MI.getOpcode() == Mips::ULW || MI.getOpcode() == Mips::USW || + MI.getOpcode() == Mips::ULH || MI.getOpcode() == Mips::ULHu) + emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 4); + else if (MI.getOpcode() == Mips::USH) + emitGlobalAddressUnaligned(MO.getGlobal(), getRelocation(MI, MO), 8); + else + emitGlobalAddress(MO.getGlobal(), getRelocation(MI, MO), true); + } else if (MO.isSymbol()) + emitExternalSymbolAddress(MO.getSymbolName(), getRelocation(MI, MO)); + else if (MO.isCPI()) + emitConstPoolAddress(MO.getIndex(), getRelocation(MI, MO)); + else if (MO.isJTI()) + emitJumpTableAddress(MO.getIndex(), getRelocation(MI, MO)); + else if (MO.isMBB()) + emitMachineBasicBlock(MO.getMBB(), getRelocation(MI, MO)); + else + llvm_unreachable("Unable to encode MachineOperand!"); + return 0; +} + +void MipsCodeEmitter::emitGlobalAddress(const GlobalValue *GV, unsigned Reloc, + bool MayNeedFarStub) const { + MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc, + const_cast<GlobalValue *>(GV), 0, + MayNeedFarStub)); +} + +void MipsCodeEmitter::emitGlobalAddressUnaligned(const GlobalValue *GV, + unsigned Reloc, int Offset) const { + MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset(), Reloc, + const_cast<GlobalValue *>(GV), 0, false)); + MCE.addRelocation(MachineRelocation::getGV(MCE.getCurrentPCOffset() + Offset, + Reloc, const_cast<GlobalValue *>(GV), 0, false)); +} + +void MipsCodeEmitter:: +emitExternalSymbolAddress(const char *ES, unsigned Reloc) const { + MCE.addRelocation(MachineRelocation::getExtSym(MCE.getCurrentPCOffset(), + Reloc, ES, 0, 0, false)); +} + +void MipsCodeEmitter::emitConstPoolAddress(unsigned CPI, unsigned Reloc) const { + MCE.addRelocation(MachineRelocation::getConstPool(MCE.getCurrentPCOffset(), + Reloc, CPI, 0, false)); +} + +void MipsCodeEmitter:: +emitJumpTableAddress(unsigned JTIndex, unsigned Reloc) const { + MCE.addRelocation(MachineRelocation::getJumpTable(MCE.getCurrentPCOffset(), + Reloc, JTIndex, 0, false)); +} + +void MipsCodeEmitter::emitMachineBasicBlock(MachineBasicBlock *BB, + unsigned Reloc) const { + MCE.addRelocation(MachineRelocation::getBB(MCE.getCurrentPCOffset(), + Reloc, BB)); +} + +int MipsCodeEmitter::emitUSW(const MachineInstr &MI) { + unsigned src = getMachineOpValue(MI, MI.getOperand(0)); + unsigned base = getMachineOpValue(MI, MI.getOperand(1)); + unsigned offset = getMachineOpValue(MI, MI.getOperand(2)); + // swr src, offset(base) + // swl src, offset+3(base) + MCE.emitWordLE( + (0x2e << 26) | (base << 21) | (src << 16) | (offset & 0xffff)); + MCE.emitWordLE( + (0x2a << 26) | (base << 21) | (src << 16) | ((offset+3) & 0xffff)); + return 2; +} + +int MipsCodeEmitter::emitULW(const MachineInstr &MI) { + unsigned dst = getMachineOpValue(MI, MI.getOperand(0)); + unsigned base = getMachineOpValue(MI, MI.getOperand(1)); + unsigned offset = getMachineOpValue(MI, MI.getOperand(2)); + unsigned at = 1; + if (dst != base) { + // lwr dst, offset(base) + // lwl dst, offset+3(base) + MCE.emitWordLE( + (0x26 << 26) | (base << 21) | (dst << 16) | (offset & 0xffff)); + MCE.emitWordLE( + (0x22 << 26) | (base << 21) | (dst << 16) | ((offset+3) & 0xffff)); + return 2; + } else { + // lwr at, offset(base) + // lwl at, offset+3(base) + // addu dst, at, $zero + MCE.emitWordLE( + (0x26 << 26) | (base << 21) | (at << 16) | (offset & 0xffff)); + MCE.emitWordLE( + (0x22 << 26) | (base << 21) | (at << 16) | ((offset+3) & 0xffff)); + MCE.emitWordLE( + (0x0 << 26) | (at << 21) | (0x0 << 16) | (dst << 11) | (0x0 << 6) | 0x21); + return 3; + } +} + +int MipsCodeEmitter::emitUSH(const MachineInstr &MI) { + unsigned src = getMachineOpValue(MI, MI.getOperand(0)); + unsigned base = getMachineOpValue(MI, MI.getOperand(1)); + unsigned offset = getMachineOpValue(MI, MI.getOperand(2)); + unsigned at = 1; + // sb src, offset(base) + // srl at,src,8 + // sb at, offset+1(base) + MCE.emitWordLE( + (0x28 << 26) | (base << 21) | (src << 16) | (offset & 0xffff)); + MCE.emitWordLE( + (0x0 << 26) | (0x0 << 21) | (src << 16) | (at << 11) | (0x8 << 6) | 0x2); + MCE.emitWordLE( + (0x28 << 26) | (base << 21) | (at << 16) | ((offset+1) & 0xffff)); + return 3; +} + +int MipsCodeEmitter::emitULH(const MachineInstr &MI) { + unsigned dst = getMachineOpValue(MI, MI.getOperand(0)); + unsigned base = getMachineOpValue(MI, MI.getOperand(1)); + unsigned offset = getMachineOpValue(MI, MI.getOperand(2)); + unsigned at = 1; + // lbu at, offset(base) + // lb dst, offset+1(base) + // sll dst,dst,8 + // or dst,dst,at + MCE.emitWordLE( + (0x24 << 26) | (base << 21) | (at << 16) | (offset & 0xffff)); + MCE.emitWordLE( + (0x20 << 26) | (base << 21) | (dst << 16) | ((offset+1) & 0xffff)); + MCE.emitWordLE( + (0x0 << 26) | (0x0 << 21) | (dst << 16) | (dst << 11) | (0x8 << 6) | 0x0); + MCE.emitWordLE( + (0x0 << 26) | (dst << 21) | (at << 16) | (dst << 11) | (0x0 << 6) | 0x25); + return 4; +} + +int MipsCodeEmitter::emitULHu(const MachineInstr &MI) { + unsigned dst = getMachineOpValue(MI, MI.getOperand(0)); + unsigned base = getMachineOpValue(MI, MI.getOperand(1)); + unsigned offset = getMachineOpValue(MI, MI.getOperand(2)); + unsigned at = 1; + // lbu at, offset(base) + // lbu dst, offset+1(base) + // sll dst,dst,8 + // or dst,dst,at + MCE.emitWordLE( + (0x24 << 26) | (base << 21) | (at << 16) | (offset & 0xffff)); + MCE.emitWordLE( + (0x24 << 26) | (base << 21) | (dst << 16) | ((offset+1) & 0xffff)); + MCE.emitWordLE( + (0x0 << 26) | (0x0 << 21) | (dst << 16) | (dst << 11) | (0x8 << 6) | 0x0); + MCE.emitWordLE( + (0x0 << 26) | (dst << 21) | (at << 16) | (dst << 11) | (0x0 << 6) | 0x25); + return 4; +} + +void MipsCodeEmitter::emitInstruction(const MachineInstr &MI) { + DEBUG(errs() << "JIT: " << (void*)MCE.getCurrentPCValue() << ":\t" << MI); + + MCE.processDebugLoc(MI.getDebugLoc(), true); + + // Skip pseudo instructions. + if ((MI.getDesc().TSFlags & MipsII::FormMask) == MipsII::Pseudo) + return; + + + switch (MI.getOpcode()) { + case Mips::USW: + NumEmitted += emitUSW(MI); + break; + case Mips::ULW: + NumEmitted += emitULW(MI); + break; + case Mips::ULH: + NumEmitted += emitULH(MI); + break; + case Mips::ULHu: + NumEmitted += emitULHu(MI); + break; + case Mips::USH: + NumEmitted += emitUSH(MI); + break; + + default: + emitWordLE(getBinaryCodeForInstr(MI)); + ++NumEmitted; // Keep track of the # of mi's emitted + break; + } + + MCE.processDebugLoc(MI.getDebugLoc(), false); +} + +void MipsCodeEmitter::emitWordLE(unsigned Word) { + DEBUG(errs() << " 0x"; + errs().write_hex(Word) << "\n"); + MCE.emitWordLE(Word); +} + +/// createMipsJITCodeEmitterPass - Return a pass that emits the collected Mips +/// code to the specified MCE object. +FunctionPass *llvm::createMipsJITCodeEmitterPass(MipsTargetMachine &TM, + JITCodeEmitter &JCE) { + return new MipsCodeEmitter(TM, JCE); +} + +#include "MipsGenCodeEmitter.inc" diff --git a/contrib/llvm/lib/Target/Mips/MipsCondMov.td b/contrib/llvm/lib/Target/Mips/MipsCondMov.td new file mode 100644 index 0000000..075a3e8 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsCondMov.td @@ -0,0 +1,194 @@ +//===-- MipsCondMov.td - Describe Mips Conditional Moves --*- tablegen -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the Conditional Moves implementation. +// +//===----------------------------------------------------------------------===// + +// Conditional moves: +// These instructions are expanded in +// MipsISelLowering::EmitInstrWithCustomInserter if target does not have +// conditional move instructions. +// cond:int, data:int +class CondMovIntInt<RegisterClass CRC, RegisterClass DRC, bits<6> funct, + string instr_asm> : + FR<0, funct, (outs DRC:$rd), (ins DRC:$rs, CRC:$rt, DRC:$F), + !strconcat(instr_asm, "\t$rd, $rs, $rt"), [], NoItinerary> { + let shamt = 0; + let Constraints = "$F = $rd"; +} + +// cond:int, data:float +class CondMovIntFP<RegisterClass CRC, RegisterClass DRC, bits<5> fmt, + bits<6> func, string instr_asm> : + FFR<0x11, func, fmt, (outs DRC:$fd), (ins DRC:$fs, CRC:$rt, DRC:$F), + !strconcat(instr_asm, "\t$fd, $fs, $rt"), []> { + bits<5> rt; + let ft = rt; + let Constraints = "$F = $fd"; +} + +// cond:float, data:int +class CondMovFPInt<RegisterClass RC, SDNode cmov, bits<1> tf, + string instr_asm> : + FCMOV<tf, (outs RC:$rd), (ins RC:$rs, RC:$F), + !strconcat(instr_asm, "\t$rd, $rs, $$fcc0"), + [(set RC:$rd, (cmov RC:$rs, RC:$F))]> { + let cc = 0; + let Uses = [FCR31]; + let Constraints = "$F = $rd"; +} + +// cond:float, data:float +class CondMovFPFP<RegisterClass RC, SDNode cmov, bits<5> fmt, bits<1> tf, + string instr_asm> : + FFCMOV<fmt, tf, (outs RC:$fd), (ins RC:$fs, RC:$F), + !strconcat(instr_asm, "\t$fd, $fs, $$fcc0"), + [(set RC:$fd, (cmov RC:$fs, RC:$F))]> { + let cc = 0; + let Uses = [FCR31]; + let Constraints = "$F = $fd"; +} + +// select patterns +multiclass MovzPats0<RegisterClass CRC, RegisterClass DRC, + Instruction MOVZInst, Instruction SLTOp, + Instruction SLTuOp, Instruction SLTiOp, + Instruction SLTiuOp> { + def : Pat<(select (i32 (setge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, (SLTOp CRC:$lhs, CRC:$rhs), DRC:$F)>; + def : Pat<(select (i32 (setuge CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, (SLTuOp CRC:$lhs, CRC:$rhs), DRC:$F)>; + def : Pat<(select (i32 (setge CRC:$lhs, immSExt16:$rhs)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, (SLTiOp CRC:$lhs, immSExt16:$rhs), DRC:$F)>; + def : Pat<(select (i32 (setuge CRC:$lh, immSExt16:$rh)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, (SLTiuOp CRC:$lh, immSExt16:$rh), DRC:$F)>; + def : Pat<(select (i32 (setle CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, (SLTOp CRC:$rhs, CRC:$lhs), DRC:$F)>; + def : Pat<(select (i32 (setule CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, (SLTuOp CRC:$rhs, CRC:$lhs), DRC:$F)>; +} + +multiclass MovzPats1<RegisterClass CRC, RegisterClass DRC, + Instruction MOVZInst, Instruction XOROp> { + def : Pat<(select (i32 (seteq CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>; + def : Pat<(select (i32 (seteq CRC:$lhs, 0)), DRC:$T, DRC:$F), + (MOVZInst DRC:$T, CRC:$lhs, DRC:$F)>; +} + +multiclass MovnPats<RegisterClass CRC, RegisterClass DRC, Instruction MOVNInst, + Instruction XOROp> { + def : Pat<(select (i32 (setne CRC:$lhs, CRC:$rhs)), DRC:$T, DRC:$F), + (MOVNInst DRC:$T, (XOROp CRC:$lhs, CRC:$rhs), DRC:$F)>; + def : Pat<(select CRC:$cond, DRC:$T, DRC:$F), + (MOVNInst DRC:$T, CRC:$cond, DRC:$F)>; + def : Pat<(select (i32 (setne CRC:$lhs, 0)),DRC:$T, DRC:$F), + (MOVNInst DRC:$T, CRC:$lhs, DRC:$F)>; +} + +// Instantiation of instructions. +def MOVZ_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0a, "movz">; +let Predicates = [HasMips64] in { + def MOVZ_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0a, "movz">; + def MOVZ_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0a, "movz">; + def MOVZ_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0a, "movz">; +} + +def MOVN_I_I : CondMovIntInt<CPURegs, CPURegs, 0x0b, "movn">; +let Predicates = [HasMips64] in { + def MOVN_I_I64 : CondMovIntInt<CPURegs, CPU64Regs, 0x0b, "movn">; + def MOVN_I64_I : CondMovIntInt<CPU64Regs, CPURegs, 0x0b, "movn">; + def MOVN_I64_I64 : CondMovIntInt<CPU64Regs, CPU64Regs, 0x0b, "movn">; +} + +def MOVZ_I_S : CondMovIntFP<CPURegs, FGR32, 16, 18, "movz.s">; +def MOVZ_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 18, "movz.s">, + Requires<[HasMips64]>; + +def MOVN_I_S : CondMovIntFP<CPURegs, FGR32, 16, 19, "movn.s">; +def MOVN_I64_S : CondMovIntFP<CPU64Regs, FGR32, 16, 19, "movn.s">, + Requires<[HasMips64]>; + +let Predicates = [NotFP64bit] in { + def MOVZ_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 18, "movz.d">; + def MOVN_I_D32 : CondMovIntFP<CPURegs, AFGR64, 17, 19, "movn.d">; +} +let Predicates = [IsFP64bit] in { + def MOVZ_I_D64 : CondMovIntFP<CPURegs, FGR64, 17, 18, "movz.d">; + def MOVZ_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 18, "movz.d">; + def MOVN_I_D64 : CondMovIntFP<CPURegs, FGR64, 17, 19, "movn.d">; + def MOVN_I64_D64 : CondMovIntFP<CPU64Regs, FGR64, 17, 19, "movn.d">; +} + +def MOVT_I : CondMovFPInt<CPURegs, MipsCMovFP_T, 1, "movt">; +def MOVT_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_T, 1, "movt">, + Requires<[HasMips64]>; + +def MOVF_I : CondMovFPInt<CPURegs, MipsCMovFP_F, 0, "movf">; +def MOVF_I64 : CondMovFPInt<CPU64Regs, MipsCMovFP_F, 0, "movf">, + Requires<[HasMips64]>; + +def MOVT_S : CondMovFPFP<FGR32, MipsCMovFP_T, 16, 1, "movt.s">; +def MOVF_S : CondMovFPFP<FGR32, MipsCMovFP_F, 16, 0, "movf.s">; + +let Predicates = [NotFP64bit] in { + def MOVT_D32 : CondMovFPFP<AFGR64, MipsCMovFP_T, 17, 1, "movt.d">; + def MOVF_D32 : CondMovFPFP<AFGR64, MipsCMovFP_F, 17, 0, "movf.d">; +} +let Predicates = [IsFP64bit] in { + def MOVT_D64 : CondMovFPFP<FGR64, MipsCMovFP_T, 17, 1, "movt.d">; + def MOVF_D64 : CondMovFPFP<FGR64, MipsCMovFP_F, 17, 0, "movf.d">; +} + +// Instantiation of conditional move patterns. +defm : MovzPats0<CPURegs, CPURegs, MOVZ_I_I, SLT, SLTu, SLTi, SLTiu>; +defm : MovzPats1<CPURegs, CPURegs, MOVZ_I_I, XOR>; +let Predicates = [HasMips64] in { + defm : MovzPats0<CPURegs, CPU64Regs, MOVZ_I_I64, SLT, SLTu, SLTi, SLTiu>; + defm : MovzPats0<CPU64Regs, CPURegs, MOVZ_I_I, SLT64, SLTu64, SLTi64, + SLTiu64>; + defm : MovzPats0<CPU64Regs, CPU64Regs, MOVZ_I_I64, SLT64, SLTu64, SLTi64, + SLTiu64>; + defm : MovzPats1<CPURegs, CPU64Regs, MOVZ_I_I64, XOR>; + defm : MovzPats1<CPU64Regs, CPURegs, MOVZ_I64_I, XOR64>; + defm : MovzPats1<CPU64Regs, CPU64Regs, MOVZ_I64_I64, XOR64>; +} + +defm : MovnPats<CPURegs, CPURegs, MOVN_I_I, XOR>; +let Predicates = [HasMips64] in { + defm : MovnPats<CPURegs, CPU64Regs, MOVN_I_I64, XOR>; + defm : MovnPats<CPU64Regs, CPURegs, MOVN_I64_I, XOR64>; + defm : MovnPats<CPU64Regs, CPU64Regs, MOVN_I64_I64, XOR64>; +} + +defm : MovzPats0<CPURegs, FGR32, MOVZ_I_S, SLT, SLTu, SLTi, SLTiu>; +defm : MovzPats1<CPURegs, FGR32, MOVZ_I_S, XOR>; +defm : MovnPats<CPURegs, FGR32, MOVN_I_S, XOR>; +let Predicates = [HasMips64] in { + defm : MovzPats0<CPU64Regs, FGR32, MOVZ_I_S, SLT64, SLTu64, SLTi64, + SLTiu64>; + defm : MovzPats1<CPU64Regs, FGR32, MOVZ_I64_S, XOR64>; + defm : MovnPats<CPU64Regs, FGR32, MOVN_I64_S, XOR64>; +} + +let Predicates = [NotFP64bit] in { + defm : MovzPats0<CPURegs, AFGR64, MOVZ_I_D32, SLT, SLTu, SLTi, SLTiu>; + defm : MovzPats1<CPURegs, AFGR64, MOVZ_I_D32, XOR>; + defm : MovnPats<CPURegs, AFGR64, MOVN_I_D32, XOR>; +} +let Predicates = [IsFP64bit] in { + defm : MovzPats0<CPURegs, FGR64, MOVZ_I_D64, SLT, SLTu, SLTi, SLTiu>; + defm : MovzPats0<CPU64Regs, FGR64, MOVZ_I_D64, SLT64, SLTu64, SLTi64, + SLTiu64>; + defm : MovzPats1<CPURegs, FGR64, MOVZ_I_D64, XOR>; + defm : MovzPats1<CPU64Regs, FGR64, MOVZ_I64_D64, XOR64>; + defm : MovnPats<CPURegs, FGR64, MOVN_I_D64, XOR>; + defm : MovnPats<CPU64Regs, FGR64, MOVN_I64_D64, XOR64>; +} diff --git a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp new file mode 100644 index 0000000..debf2f1 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -0,0 +1,253 @@ +//===-- DelaySlotFiller.cpp - Mips Delay Slot Filler ----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Simple pass to fills delay slots with useful instructions. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "delay-slot-filler" + +#include "Mips.h" +#include "MipsTargetMachine.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/Statistic.h" + +using namespace llvm; + +STATISTIC(FilledSlots, "Number of delay slots filled"); +STATISTIC(UsefulSlots, "Number of delay slots filled with instructions that" + " are not NOP."); + +static cl::opt<bool> EnableDelaySlotFiller( + "enable-mips-delay-filler", + cl::init(false), + cl::desc("Fill the Mips delay slots useful instructions."), + cl::Hidden); + +namespace { + struct Filler : public MachineFunctionPass { + + TargetMachine &TM; + const TargetInstrInfo *TII; + MachineBasicBlock::iterator LastFiller; + + static char ID; + Filler(TargetMachine &tm) + : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { } + + virtual const char *getPassName() const { + return "Mips Delay Slot Filler"; + } + + bool runOnMachineBasicBlock(MachineBasicBlock &MBB); + bool runOnMachineFunction(MachineFunction &F) { + bool Changed = false; + for (MachineFunction::iterator FI = F.begin(), FE = F.end(); + FI != FE; ++FI) + Changed |= runOnMachineBasicBlock(*FI); + return Changed; + } + + bool isDelayFiller(MachineBasicBlock &MBB, + MachineBasicBlock::iterator candidate); + + void insertCallUses(MachineBasicBlock::iterator MI, + SmallSet<unsigned, 32>& RegDefs, + SmallSet<unsigned, 32>& RegUses); + + void insertDefsUses(MachineBasicBlock::iterator MI, + SmallSet<unsigned, 32>& RegDefs, + SmallSet<unsigned, 32>& RegUses); + + bool IsRegInSet(SmallSet<unsigned, 32>& RegSet, + unsigned Reg); + + bool delayHasHazard(MachineBasicBlock::iterator candidate, + bool &sawLoad, bool &sawStore, + SmallSet<unsigned, 32> &RegDefs, + SmallSet<unsigned, 32> &RegUses); + + bool + findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot, + MachineBasicBlock::iterator &Filler); + + + }; + char Filler::ID = 0; +} // end of anonymous namespace + +/// runOnMachineBasicBlock - Fill in delay slots for the given basic block. +/// We assume there is only one delay slot per delayed instruction. +bool Filler:: +runOnMachineBasicBlock(MachineBasicBlock &MBB) { + bool Changed = false; + LastFiller = MBB.end(); + + for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ++I) + if (I->hasDelaySlot()) { + ++FilledSlots; + Changed = true; + + MachineBasicBlock::iterator D; + + if (EnableDelaySlotFiller && findDelayInstr(MBB, I, D)) { + MBB.splice(llvm::next(I), &MBB, D); + ++UsefulSlots; + } else + BuildMI(MBB, llvm::next(I), I->getDebugLoc(), TII->get(Mips::NOP)); + + // Record the filler instruction that filled the delay slot. + // The instruction after it will be visited in the next iteration. + LastFiller = ++I; + } + return Changed; + +} + +/// createMipsDelaySlotFillerPass - Returns a pass that fills in delay +/// slots in Mips MachineFunctions +FunctionPass *llvm::createMipsDelaySlotFillerPass(MipsTargetMachine &tm) { + return new Filler(tm); +} + +bool Filler::findDelayInstr(MachineBasicBlock &MBB, + MachineBasicBlock::iterator slot, + MachineBasicBlock::iterator &Filler) { + SmallSet<unsigned, 32> RegDefs; + SmallSet<unsigned, 32> RegUses; + + insertDefsUses(slot, RegDefs, RegUses); + + bool sawLoad = false; + bool sawStore = false; + + for (MachineBasicBlock::reverse_iterator I(slot); I != MBB.rend(); ++I) { + // skip debug value + if (I->isDebugValue()) + continue; + + // Convert to forward iterator. + MachineBasicBlock::iterator FI(llvm::next(I).base()); + + if (I->hasUnmodeledSideEffects() + || I->isInlineAsm() + || I->isLabel() + || FI == LastFiller + || I->isPseudo() + // + // Should not allow: + // ERET, DERET or WAIT, PAUSE. Need to add these to instruction + // list. TBD. + ) + break; + + if (delayHasHazard(FI, sawLoad, sawStore, RegDefs, RegUses)) { + insertDefsUses(FI, RegDefs, RegUses); + continue; + } + + Filler = FI; + return true; + } + + return false; +} + +bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate, + bool &sawLoad, bool &sawStore, + SmallSet<unsigned, 32> &RegDefs, + SmallSet<unsigned, 32> &RegUses) { + if (candidate->isImplicitDef() || candidate->isKill()) + return true; + + // Loads or stores cannot be moved past a store to the delay slot + // and stores cannot be moved past a load. + if (candidate->mayLoad()) { + if (sawStore) + return true; + sawLoad = true; + } + + if (candidate->mayStore()) { + if (sawStore) + return true; + sawStore = true; + if (sawLoad) + return true; + } + + assert((!candidate->isCall() && !candidate->isReturn()) && + "Cannot put calls or returns in delay slot."); + + for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) { + const MachineOperand &MO = candidate->getOperand(i); + unsigned Reg; + + if (!MO.isReg() || !(Reg = MO.getReg())) + continue; // skip + + if (MO.isDef()) { + // check whether Reg is defined or used before delay slot. + if (IsRegInSet(RegDefs, Reg) || IsRegInSet(RegUses, Reg)) + return true; + } + if (MO.isUse()) { + // check whether Reg is defined before delay slot. + if (IsRegInSet(RegDefs, Reg)) + return true; + } + } + return false; +} + +// Insert Defs and Uses of MI into the sets RegDefs and RegUses. +void Filler::insertDefsUses(MachineBasicBlock::iterator MI, + SmallSet<unsigned, 32>& RegDefs, + SmallSet<unsigned, 32>& RegUses) { + // If MI is a call or return, just examine the explicit non-variadic operands. + MCInstrDesc MCID = MI->getDesc(); + unsigned e = MI->isCall() || MI->isReturn() ? MCID.getNumOperands() : + MI->getNumOperands(); + + // Add RA to RegDefs to prevent users of RA from going into delay slot. + if (MI->isCall()) + RegDefs.insert(Mips::RA); + + for (unsigned i = 0; i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + unsigned Reg; + + if (!MO.isReg() || !(Reg = MO.getReg())) + continue; + + if (MO.isDef()) + RegDefs.insert(Reg); + else if (MO.isUse()) + RegUses.insert(Reg); + } +} + +//returns true if the Reg or its alias is in the RegSet. +bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg) { + if (RegSet.count(Reg)) + return true; + // check Aliased Registers + for (const uint16_t *Alias = TM.getRegisterInfo()->getAliasSet(Reg); + *Alias; ++Alias) + if (RegSet.count(*Alias)) + return true; + + return false; +} diff --git a/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp b/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp new file mode 100644 index 0000000..119d1a8 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsEmitGPRestore.cpp @@ -0,0 +1,97 @@ +//===-- MipsEmitGPRestore.cpp - Emit GP Restore Instruction ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass emits instructions that restore $gp right +// after jalr instructions. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "emit-gp-restore" + +#include "Mips.h" +#include "MipsTargetMachine.h" +#include "MipsMachineFunction.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/ADT/Statistic.h" + +using namespace llvm; + +namespace { + struct Inserter : public MachineFunctionPass { + + TargetMachine &TM; + const TargetInstrInfo *TII; + + static char ID; + Inserter(TargetMachine &tm) + : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { } + + virtual const char *getPassName() const { + return "Mips Emit GP Restore"; + } + + bool runOnMachineFunction(MachineFunction &F); + }; + char Inserter::ID = 0; +} // end of anonymous namespace + +bool Inserter::runOnMachineFunction(MachineFunction &F) { + MipsFunctionInfo *MipsFI = F.getInfo<MipsFunctionInfo>(); + + if ((TM.getRelocationModel() != Reloc::PIC_) || + (!MipsFI->globalBaseRegFixed())) + return false; + + bool Changed = false; + int FI = MipsFI->getGPFI(); + + for (MachineFunction::iterator MFI = F.begin(), MFE = F.end(); + MFI != MFE; ++MFI) { + MachineBasicBlock& MBB = *MFI; + MachineBasicBlock::iterator I = MFI->begin(); + + // If MBB is a landing pad, insert instruction that restores $gp after + // EH_LABEL. + if (MBB.isLandingPad()) { + // Find EH_LABEL first. + for (; I->getOpcode() != TargetOpcode::EH_LABEL; ++I) ; + + // Insert lw. + ++I; + DebugLoc dl = I != MBB.end() ? I->getDebugLoc() : DebugLoc(); + BuildMI(MBB, I, dl, TII->get(Mips::LW), Mips::GP).addFrameIndex(FI) + .addImm(0); + Changed = true; + } + + while (I != MFI->end()) { + if (I->getOpcode() != Mips::JALR) { + ++I; + continue; + } + + DebugLoc dl = I->getDebugLoc(); + // emit lw $gp, ($gp save slot on stack) after jalr + BuildMI(MBB, ++I, dl, TII->get(Mips::LW), Mips::GP).addFrameIndex(FI) + .addImm(0); + Changed = true; + } + } + + return Changed; +} + +/// createMipsEmitGPRestorePass - Returns a pass that emits instructions that +/// restores $gp clobbered by jalr instructions. +FunctionPass *llvm::createMipsEmitGPRestorePass(MipsTargetMachine &tm) { + return new Inserter(tm); +} + diff --git a/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp b/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp new file mode 100644 index 0000000..baeae97 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsExpandPseudo.cpp @@ -0,0 +1,123 @@ +//===-- MipsExpandPseudo.cpp - Expand Pseudo Instructions ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass expands pseudo instructions into target instructions after register +// allocation but before post-RA scheduling. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mips-expand-pseudo" + +#include "Mips.h" +#include "MipsTargetMachine.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/ADT/Statistic.h" + +using namespace llvm; + +namespace { + struct MipsExpandPseudo : public MachineFunctionPass { + + TargetMachine &TM; + const TargetInstrInfo *TII; + + static char ID; + MipsExpandPseudo(TargetMachine &tm) + : MachineFunctionPass(ID), TM(tm), TII(tm.getInstrInfo()) { } + + virtual const char *getPassName() const { + return "Mips PseudoInstrs Expansion"; + } + + bool runOnMachineFunction(MachineFunction &F); + bool runOnMachineBasicBlock(MachineBasicBlock &MBB); + + private: + void ExpandBuildPairF64(MachineBasicBlock&, MachineBasicBlock::iterator); + void ExpandExtractElementF64(MachineBasicBlock&, + MachineBasicBlock::iterator); + }; + char MipsExpandPseudo::ID = 0; +} // end of anonymous namespace + +bool MipsExpandPseudo::runOnMachineFunction(MachineFunction& F) { + bool Changed = false; + + for (MachineFunction::iterator I = F.begin(); I != F.end(); ++I) + Changed |= runOnMachineBasicBlock(*I); + + return Changed; +} + +bool MipsExpandPseudo::runOnMachineBasicBlock(MachineBasicBlock& MBB) { + + bool Changed = false; + for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end();) { + const MCInstrDesc& MCid = I->getDesc(); + + switch(MCid.getOpcode()) { + default: + ++I; + continue; + case Mips::SETGP2: + // Convert "setgp2 $globalreg, $t9" to "addu $globalreg, $v0, $t9" + BuildMI(MBB, I, I->getDebugLoc(), TII->get(Mips::ADDu), + I->getOperand(0).getReg()) + .addReg(Mips::V0).addReg(I->getOperand(1).getReg()); + break; + case Mips::BuildPairF64: + ExpandBuildPairF64(MBB, I); + break; + case Mips::ExtractElementF64: + ExpandExtractElementF64(MBB, I); + break; + } + + // delete original instr + MBB.erase(I++); + Changed = true; + } + + return Changed; +} + +void MipsExpandPseudo::ExpandBuildPairF64(MachineBasicBlock& MBB, + MachineBasicBlock::iterator I) { + unsigned DstReg = I->getOperand(0).getReg(); + unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg(); + const MCInstrDesc& Mtc1Tdd = TII->get(Mips::MTC1); + DebugLoc dl = I->getDebugLoc(); + const uint16_t* SubReg = + TM.getRegisterInfo()->getSubRegisters(DstReg); + + // mtc1 Lo, $fp + // mtc1 Hi, $fp + 1 + BuildMI(MBB, I, dl, Mtc1Tdd, *SubReg).addReg(LoReg); + BuildMI(MBB, I, dl, Mtc1Tdd, *(SubReg + 1)).addReg(HiReg); +} + +void MipsExpandPseudo::ExpandExtractElementF64(MachineBasicBlock& MBB, + MachineBasicBlock::iterator I) { + unsigned DstReg = I->getOperand(0).getReg(); + unsigned SrcReg = I->getOperand(1).getReg(); + unsigned N = I->getOperand(2).getImm(); + const MCInstrDesc& Mfc1Tdd = TII->get(Mips::MFC1); + DebugLoc dl = I->getDebugLoc(); + const uint16_t* SubReg = TM.getRegisterInfo()->getSubRegisters(SrcReg); + + BuildMI(MBB, I, dl, Mfc1Tdd, DstReg).addReg(*(SubReg + N)); +} + +/// createMipsMipsExpandPseudoPass - Returns a pass that expands pseudo +/// instrs into real instrs +FunctionPass *llvm::createMipsExpandPseudoPass(MipsTargetMachine &tm) { + return new MipsExpandPseudo(tm); +} diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp new file mode 100644 index 0000000..f8ea3d0 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.cpp @@ -0,0 +1,326 @@ +//===-- MipsFrameLowering.cpp - Mips Frame Information --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the Mips implementation of TargetFrameLowering class. +// +//===----------------------------------------------------------------------===// + +#include "MipsFrameLowering.h" +#include "MipsAnalyzeImmediate.h" +#include "MipsInstrInfo.h" +#include "MipsMachineFunction.h" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "llvm/Function.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Support/CommandLine.h" + +using namespace llvm; + + +//===----------------------------------------------------------------------===// +// +// Stack Frame Processing methods +// +----------------------------+ +// +// The stack is allocated decrementing the stack pointer on +// the first instruction of a function prologue. Once decremented, +// all stack references are done thought a positive offset +// from the stack/frame pointer, so the stack is considering +// to grow up! Otherwise terrible hacks would have to be made +// to get this stack ABI compliant :) +// +// The stack frame required by the ABI (after call): +// Offset +// +// 0 ---------- +// 4 Args to pass +// . saved $GP (used in PIC) +// . Alloca allocations +// . Local Area +// . CPU "Callee Saved" Registers +// . saved FP +// . saved RA +// . FPU "Callee Saved" Registers +// StackSize ----------- +// +// Offset - offset from sp after stack allocation on function prologue +// +// The sp is the stack pointer subtracted/added from the stack size +// at the Prologue/Epilogue +// +// References to the previous stack (to obtain arguments) are done +// with offsets that exceeds the stack size: (stacksize+(4*(num_arg-1)) +// +// Examples: +// - reference to the actual stack frame +// for any local area var there is smt like : FI >= 0, StackOffset: 4 +// sw REGX, 4(SP) +// +// - reference to previous stack frame +// suppose there's a load to the 5th arguments : FI < 0, StackOffset: 16. +// The emitted instruction will be something like: +// lw REGX, 16+StackSize(SP) +// +// Since the total stack size is unknown on LowerFormalArguments, all +// stack references (ObjectOffset) created to reference the function +// arguments, are negative numbers. This way, on eliminateFrameIndex it's +// possible to detect those references and the offsets are adjusted to +// their real location. +// +//===----------------------------------------------------------------------===// + +// hasFP - Return true if the specified function should have a dedicated frame +// pointer register. This is true if the function has variable sized allocas or +// if frame pointer elimination is disabled. +bool MipsFrameLowering::hasFP(const MachineFunction &MF) const { + const MachineFrameInfo *MFI = MF.getFrameInfo(); + return MF.getTarget().Options.DisableFramePointerElim(MF) || + MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken(); +} + +bool MipsFrameLowering::targetHandlesStackFrameRounding() const { + return true; +} + +// Build an instruction sequence to load an immediate that is too large to fit +// in 16-bit and add the result to Reg. +static void expandLargeImm(unsigned Reg, int64_t Imm, bool IsN64, + const MipsInstrInfo &TII, MachineBasicBlock& MBB, + MachineBasicBlock::iterator II, DebugLoc DL) { + unsigned LUi = IsN64 ? Mips::LUi64 : Mips::LUi; + unsigned ADDu = IsN64 ? Mips::DADDu : Mips::ADDu; + unsigned ZEROReg = IsN64 ? Mips::ZERO_64 : Mips::ZERO; + unsigned ATReg = IsN64 ? Mips::AT_64 : Mips::AT; + MipsAnalyzeImmediate AnalyzeImm; + const MipsAnalyzeImmediate::InstSeq &Seq = + AnalyzeImm.Analyze(Imm, IsN64 ? 64 : 32, false /* LastInstrIsADDiu */); + MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin(); + + // The first instruction can be a LUi, which is different from other + // instructions (ADDiu, ORI and SLL) in that it does not have a register + // operand. + if (Inst->Opc == LUi) + BuildMI(MBB, II, DL, TII.get(LUi), ATReg) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + else + BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + + // Build the remaining instructions in Seq. + for (++Inst; Inst != Seq.end(); ++Inst) + BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + + BuildMI(MBB, II, DL, TII.get(ADDu), Reg).addReg(Reg).addReg(ATReg); +} + +void MipsFrameLowering::emitPrologue(MachineFunction &MF) const { + MachineBasicBlock &MBB = MF.front(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); + const MipsRegisterInfo *RegInfo = + static_cast<const MipsRegisterInfo*>(MF.getTarget().getRegisterInfo()); + const MipsInstrInfo &TII = + *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo()); + MachineBasicBlock::iterator MBBI = MBB.begin(); + DebugLoc dl = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); + bool isPIC = (MF.getTarget().getRelocationModel() == Reloc::PIC_); + unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP; + unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP; + unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; + unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu; + unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu; + + // First, compute final stack size. + unsigned RegSize = STI.isGP32bit() ? 4 : 8; + unsigned StackAlign = getStackAlignment(); + unsigned LocalVarAreaOffset = MipsFI->needGPSaveRestore() ? + (MFI->getObjectOffset(MipsFI->getGPFI()) + RegSize) : + MipsFI->getMaxCallFrameSize(); + uint64_t StackSize = RoundUpToAlignment(LocalVarAreaOffset, StackAlign) + + RoundUpToAlignment(MFI->getStackSize(), StackAlign); + + // Update stack size + MFI->setStackSize(StackSize); + + // Emit instructions that set the global base register if the target ABI is + // O32. + if (isPIC && MipsFI->globalBaseRegSet() && STI.isABI_O32() && + !MipsFI->globalBaseRegFixed()) { + // See MipsInstrInfo.td for explanation. + MachineBasicBlock *NewEntry = MF.CreateMachineBasicBlock(); + MF.insert(&MBB, NewEntry); + NewEntry->addSuccessor(&MBB); + + // Copy live in registers. + for (MachineBasicBlock::livein_iterator R = MBB.livein_begin(); + R != MBB.livein_end(); ++R) + NewEntry->addLiveIn(*R); + + BuildMI(*NewEntry, NewEntry->begin(), dl, TII.get(Mips:: SETGP01), + Mips::V0); + } + + // No need to allocate space on the stack. + if (StackSize == 0 && !MFI->adjustsStack()) return; + + MachineModuleInfo &MMI = MF.getMMI(); + std::vector<MachineMove> &Moves = MMI.getFrameMoves(); + MachineLocation DstML, SrcML; + + // Adjust stack. + if (isInt<16>(-StackSize)) // addi sp, sp, (-stacksize) + BuildMI(MBB, MBBI, dl, TII.get(ADDiu), SP).addReg(SP).addImm(-StackSize); + else { // Expand immediate that doesn't fit in 16-bit. + MipsFI->setEmitNOAT(); + expandLargeImm(SP, -StackSize, STI.isABI_N64(), TII, MBB, MBBI, dl); + } + + // emit ".cfi_def_cfa_offset StackSize" + MCSymbol *AdjustSPLabel = MMI.getContext().CreateTempSymbol(); + BuildMI(MBB, MBBI, dl, + TII.get(TargetOpcode::PROLOG_LABEL)).addSym(AdjustSPLabel); + DstML = MachineLocation(MachineLocation::VirtualFP); + SrcML = MachineLocation(MachineLocation::VirtualFP, -StackSize); + Moves.push_back(MachineMove(AdjustSPLabel, DstML, SrcML)); + + const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); + + if (CSI.size()) { + // Find the instruction past the last instruction that saves a callee-saved + // register to the stack. + for (unsigned i = 0; i < CSI.size(); ++i) + ++MBBI; + + // Iterate over list of callee-saved registers and emit .cfi_offset + // directives. + MCSymbol *CSLabel = MMI.getContext().CreateTempSymbol(); + BuildMI(MBB, MBBI, dl, + TII.get(TargetOpcode::PROLOG_LABEL)).addSym(CSLabel); + + for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), + E = CSI.end(); I != E; ++I) { + int64_t Offset = MFI->getObjectOffset(I->getFrameIdx()); + unsigned Reg = I->getReg(); + + // If Reg is a double precision register, emit two cfa_offsets, + // one for each of the paired single precision registers. + if (Mips::AFGR64RegisterClass->contains(Reg)) { + const uint16_t *SubRegs = RegInfo->getSubRegisters(Reg); + MachineLocation DstML0(MachineLocation::VirtualFP, Offset); + MachineLocation DstML1(MachineLocation::VirtualFP, Offset + 4); + MachineLocation SrcML0(*SubRegs); + MachineLocation SrcML1(*(SubRegs + 1)); + + if (!STI.isLittle()) + std::swap(SrcML0, SrcML1); + + Moves.push_back(MachineMove(CSLabel, DstML0, SrcML0)); + Moves.push_back(MachineMove(CSLabel, DstML1, SrcML1)); + } + else { + // Reg is either in CPURegs or FGR32. + DstML = MachineLocation(MachineLocation::VirtualFP, Offset); + SrcML = MachineLocation(Reg); + Moves.push_back(MachineMove(CSLabel, DstML, SrcML)); + } + } + } + + // if framepointer enabled, set it to point to the stack pointer. + if (hasFP(MF)) { + // Insert instruction "move $fp, $sp" at this location. + BuildMI(MBB, MBBI, dl, TII.get(ADDu), FP).addReg(SP).addReg(ZERO); + + // emit ".cfi_def_cfa_register $fp" + MCSymbol *SetFPLabel = MMI.getContext().CreateTempSymbol(); + BuildMI(MBB, MBBI, dl, + TII.get(TargetOpcode::PROLOG_LABEL)).addSym(SetFPLabel); + DstML = MachineLocation(FP); + SrcML = MachineLocation(MachineLocation::VirtualFP); + Moves.push_back(MachineMove(SetFPLabel, DstML, SrcML)); + } + + // Restore GP from the saved stack location + if (MipsFI->needGPSaveRestore()) { + unsigned Offset = MFI->getObjectOffset(MipsFI->getGPFI()); + BuildMI(MBB, MBBI, dl, TII.get(Mips::CPRESTORE)).addImm(Offset) + .addReg(Mips::GP); + } +} + +void MipsFrameLowering::emitEpilogue(MachineFunction &MF, + MachineBasicBlock &MBB) const { + MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + const MipsInstrInfo &TII = + *static_cast<const MipsInstrInfo*>(MF.getTarget().getInstrInfo()); + DebugLoc dl = MBBI->getDebugLoc(); + unsigned SP = STI.isABI_N64() ? Mips::SP_64 : Mips::SP; + unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP; + unsigned ZERO = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; + unsigned ADDu = STI.isABI_N64() ? Mips::DADDu : Mips::ADDu; + unsigned ADDiu = STI.isABI_N64() ? Mips::DADDiu : Mips::ADDiu; + + // if framepointer enabled, restore the stack pointer. + if (hasFP(MF)) { + // Find the first instruction that restores a callee-saved register. + MachineBasicBlock::iterator I = MBBI; + + for (unsigned i = 0; i < MFI->getCalleeSavedInfo().size(); ++i) + --I; + + // Insert instruction "move $sp, $fp" at this location. + BuildMI(MBB, I, dl, TII.get(ADDu), SP).addReg(FP).addReg(ZERO); + } + + // Get the number of bytes from FrameInfo + uint64_t StackSize = MFI->getStackSize(); + + if (!StackSize) + return; + + // Adjust stack. + if (isInt<16>(StackSize)) // addi sp, sp, (-stacksize) + BuildMI(MBB, MBBI, dl, TII.get(ADDiu), SP).addReg(SP).addImm(StackSize); + else // Expand immediate that doesn't fit in 16-bit. + expandLargeImm(SP, StackSize, STI.isABI_N64(), TII, MBB, MBBI, dl); +} + +void MipsFrameLowering:: +processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS) const { + MachineRegisterInfo& MRI = MF.getRegInfo(); + unsigned FP = STI.isABI_N64() ? Mips::FP_64 : Mips::FP; + + // FIXME: remove this code if register allocator can correctly mark + // $fp and $ra used or unused. + + // Mark $fp and $ra as used or unused. + if (hasFP(MF)) + MRI.setPhysRegUsed(FP); + + // The register allocator might determine $ra is used after seeing + // instruction "jr $ra", but we do not want PrologEpilogInserter to insert + // instructions to save/restore $ra unless there is a function call. + // To correct this, $ra is explicitly marked unused if there is no + // function call. + if (MF.getFrameInfo()->hasCalls()) + MRI.setPhysRegUsed(Mips::RA); + else { + MRI.setPhysRegUnused(Mips::RA); + MRI.setPhysRegUnused(Mips::RA_64); + } +} diff --git a/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h new file mode 100644 index 0000000..bd1d89f --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsFrameLowering.h @@ -0,0 +1,49 @@ +//===-- MipsFrameLowering.h - Define frame lowering for Mips ----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// +// +//===----------------------------------------------------------------------===// + +#ifndef MIPS_FRAMEINFO_H +#define MIPS_FRAMEINFO_H + +#include "Mips.h" +#include "MipsSubtarget.h" +#include "llvm/Target/TargetFrameLowering.h" + +namespace llvm { + class MipsSubtarget; + +class MipsFrameLowering : public TargetFrameLowering { +protected: + const MipsSubtarget &STI; + +public: + explicit MipsFrameLowering(const MipsSubtarget &sti) + : TargetFrameLowering(StackGrowsDown, sti.hasMips64() ? 16 : 8, 0), + STI(sti) { + } + + bool targetHandlesStackFrameRounding() const; + + /// emitProlog/emitEpilog - These methods insert prolog and epilog code into + /// the function. + void emitPrologue(MachineFunction &MF) const; + void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const; + + bool hasFP(const MachineFunction &MF) const; + + void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, + RegScavenger *RS) const; +}; + +} // End llvm namespace + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp new file mode 100644 index 0000000..f0651c6 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsISelDAGToDAG.cpp @@ -0,0 +1,567 @@ +//===-- MipsISelDAGToDAG.cpp - A Dag to Dag Inst Selector for Mips --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines an instruction selector for the MIPS target. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mips-isel" +#include "Mips.h" +#include "MipsAnalyzeImmediate.h" +#include "MipsMachineFunction.h" +#include "MipsRegisterInfo.h" +#include "MipsSubtarget.h" +#include "MipsTargetMachine.h" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "llvm/GlobalValue.h" +#include "llvm/Instructions.h" +#include "llvm/Intrinsics.h" +#include "llvm/Support/CFG.h" +#include "llvm/Type.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Instruction Selector Implementation +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// MipsDAGToDAGISel - MIPS specific code to select MIPS machine +// instructions for SelectionDAG operations. +//===----------------------------------------------------------------------===// +namespace { + +class MipsDAGToDAGISel : public SelectionDAGISel { + + /// TM - Keep a reference to MipsTargetMachine. + MipsTargetMachine &TM; + + /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can + /// make the right decision when generating code for different targets. + const MipsSubtarget &Subtarget; + +public: + explicit MipsDAGToDAGISel(MipsTargetMachine &tm) : + SelectionDAGISel(tm), + TM(tm), Subtarget(tm.getSubtarget<MipsSubtarget>()) {} + + // Pass Name + virtual const char *getPassName() const { + return "MIPS DAG->DAG Pattern Instruction Selection"; + } + + virtual bool runOnMachineFunction(MachineFunction &MF); + +private: + // Include the pieces autogenerated from the target description. + #include "MipsGenDAGISel.inc" + + /// getTargetMachine - Return a reference to the TargetMachine, casted + /// to the target-specific type. + const MipsTargetMachine &getTargetMachine() { + return static_cast<const MipsTargetMachine &>(TM); + } + + /// getInstrInfo - Return a reference to the TargetInstrInfo, casted + /// to the target-specific type. + const MipsInstrInfo *getInstrInfo() { + return getTargetMachine().getInstrInfo(); + } + + SDNode *getGlobalBaseReg(); + + std::pair<SDNode*, SDNode*> SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, + EVT Ty, bool HasLo, bool HasHi); + + SDNode *Select(SDNode *N); + + // Complex Pattern. + bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base, SDValue &Offset); + + // getImm - Return a target constant with the specified value. + inline SDValue getImm(const SDNode *Node, unsigned Imm) { + return CurDAG->getTargetConstant(Imm, Node->getValueType(0)); + } + + void ProcessFunctionAfterISel(MachineFunction &MF); + bool ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI, const MachineInstr&); + void InitGlobalBaseReg(MachineFunction &MF); + + virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op, + char ConstraintCode, + std::vector<SDValue> &OutOps); +}; + +} + +// Insert instructions to initialize the global base register in the +// first MBB of the function. When the ABI is O32 and the relocation model is +// PIC, the necessary instructions are emitted later to prevent optimization +// passes from moving them. +void MipsDAGToDAGISel::InitGlobalBaseReg(MachineFunction &MF) { + MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); + + if (!MipsFI->globalBaseRegSet()) + return; + + MachineBasicBlock &MBB = MF.front(); + MachineBasicBlock::iterator I = MBB.begin(); + MachineRegisterInfo &RegInfo = MF.getRegInfo(); + const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo(); + DebugLoc DL = I != MBB.end() ? I->getDebugLoc() : DebugLoc(); + unsigned V0, V1, GlobalBaseReg = MipsFI->getGlobalBaseReg(); + bool FixGlobalBaseReg = MipsFI->globalBaseRegFixed(); + + if (Subtarget.isABI_O32() && FixGlobalBaseReg) + // $gp is the global base register. + V0 = V1 = GlobalBaseReg; + else { + const TargetRegisterClass *RC; + RC = Subtarget.isABI_N64() ? + Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass; + + V0 = RegInfo.createVirtualRegister(RC); + V1 = RegInfo.createVirtualRegister(RC); + } + + if (Subtarget.isABI_N64()) { + MF.getRegInfo().addLiveIn(Mips::T9_64); + MBB.addLiveIn(Mips::T9_64); + + // lui $v0, %hi(%neg(%gp_rel(fname))) + // daddu $v1, $v0, $t9 + // daddiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname))) + const GlobalValue *FName = MF.getFunction(); + BuildMI(MBB, I, DL, TII.get(Mips::LUi64), V0) + .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI); + BuildMI(MBB, I, DL, TII.get(Mips::DADDu), V1).addReg(V0).addReg(Mips::T9_64); + BuildMI(MBB, I, DL, TII.get(Mips::DADDiu), GlobalBaseReg).addReg(V1) + .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO); + } else if (MF.getTarget().getRelocationModel() == Reloc::Static) { + // Set global register to __gnu_local_gp. + // + // lui $v0, %hi(__gnu_local_gp) + // addiu $globalbasereg, $v0, %lo(__gnu_local_gp) + BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0) + .addExternalSymbol("__gnu_local_gp", MipsII::MO_ABS_HI); + BuildMI(MBB, I, DL, TII.get(Mips::ADDiu), GlobalBaseReg).addReg(V0) + .addExternalSymbol("__gnu_local_gp", MipsII::MO_ABS_LO); + } else { + MF.getRegInfo().addLiveIn(Mips::T9); + MBB.addLiveIn(Mips::T9); + + if (Subtarget.isABI_N32()) { + // lui $v0, %hi(%neg(%gp_rel(fname))) + // addu $v1, $v0, $t9 + // addiu $globalbasereg, $v1, %lo(%neg(%gp_rel(fname))) + const GlobalValue *FName = MF.getFunction(); + BuildMI(MBB, I, DL, TII.get(Mips::LUi), V0) + .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_HI); + BuildMI(MBB, I, DL, TII.get(Mips::ADDu), V1).addReg(V0).addReg(Mips::T9); + BuildMI(MBB, I, DL, TII.get(Mips::ADDiu), GlobalBaseReg).addReg(V1) + .addGlobalAddress(FName, 0, MipsII::MO_GPOFF_LO); + } else if (!MipsFI->globalBaseRegFixed()) { + assert(Subtarget.isABI_O32()); + + BuildMI(MBB, I, DL, TII.get(Mips::SETGP2), GlobalBaseReg) + .addReg(Mips::T9); + } + } +} + +bool MipsDAGToDAGISel::ReplaceUsesWithZeroReg(MachineRegisterInfo *MRI, + const MachineInstr& MI) { + unsigned DstReg = 0, ZeroReg = 0; + + // Check if MI is "addiu $dst, $zero, 0" or "daddiu $dst, $zero, 0". + if ((MI.getOpcode() == Mips::ADDiu) && + (MI.getOperand(1).getReg() == Mips::ZERO) && + (MI.getOperand(2).getImm() == 0)) { + DstReg = MI.getOperand(0).getReg(); + ZeroReg = Mips::ZERO; + } else if ((MI.getOpcode() == Mips::DADDiu) && + (MI.getOperand(1).getReg() == Mips::ZERO_64) && + (MI.getOperand(2).getImm() == 0)) { + DstReg = MI.getOperand(0).getReg(); + ZeroReg = Mips::ZERO_64; + } + + if (!DstReg) + return false; + + // Replace uses with ZeroReg. + for (MachineRegisterInfo::use_iterator U = MRI->use_begin(DstReg), + E = MRI->use_end(); U != E; ++U) { + MachineOperand &MO = U.getOperand(); + MachineInstr *MI = MO.getParent(); + + // Do not replace if it is a phi's operand or is tied to def operand. + if (MI->isPHI() || MI->isRegTiedToDefOperand(U.getOperandNo())) + continue; + + MO.setReg(ZeroReg); + } + + return true; +} + +void MipsDAGToDAGISel::ProcessFunctionAfterISel(MachineFunction &MF) { + InitGlobalBaseReg(MF); + + MachineRegisterInfo *MRI = &MF.getRegInfo(); + + for (MachineFunction::iterator MFI = MF.begin(), MFE = MF.end(); MFI != MFE; + ++MFI) + for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I) + ReplaceUsesWithZeroReg(MRI, *I); +} + +bool MipsDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) { + bool Ret = SelectionDAGISel::runOnMachineFunction(MF); + + ProcessFunctionAfterISel(MF); + + return Ret; +} + +/// getGlobalBaseReg - Output the instructions required to put the +/// GOT address into a register. +SDNode *MipsDAGToDAGISel::getGlobalBaseReg() { + unsigned GlobalBaseReg = MF->getInfo<MipsFunctionInfo>()->getGlobalBaseReg(); + return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); +} + +/// ComplexPattern used on MipsInstrInfo +/// Used on Mips Load/Store instructions +bool MipsDAGToDAGISel:: +SelectAddr(SDNode *Parent, SDValue Addr, SDValue &Base, SDValue &Offset) { + EVT ValTy = Addr.getValueType(); + + // If Parent is an unaligned f32 load or store, select a (base + index) + // floating point load/store instruction (luxc1 or suxc1). + const LSBaseSDNode* LS = 0; + + if (Parent && (LS = dyn_cast<LSBaseSDNode>(Parent))) { + EVT VT = LS->getMemoryVT(); + + if (VT.getSizeInBits() / 8 > LS->getAlignment()) { + assert(TLI.allowsUnalignedMemoryAccesses(VT) && + "Unaligned loads/stores not supported for this type."); + if (VT == MVT::f32) + return false; + } + } + + // if Address is FI, get the TargetFrameIndex. + if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + Offset = CurDAG->getTargetConstant(0, ValTy); + return true; + } + + // on PIC code Load GA + if (Addr.getOpcode() == MipsISD::Wrapper) { + Base = Addr.getOperand(0); + Offset = Addr.getOperand(1); + return true; + } + + if (TM.getRelocationModel() != Reloc::PIC_) { + if ((Addr.getOpcode() == ISD::TargetExternalSymbol || + Addr.getOpcode() == ISD::TargetGlobalAddress)) + return false; + } + + // Addresses of the form FI+const or FI|const + if (CurDAG->isBaseWithConstantOffset(Addr)) { + ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)); + if (isInt<16>(CN->getSExtValue())) { + + // If the first operand is a FI, get the TargetFI Node + if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode> + (Addr.getOperand(0))) + Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), ValTy); + else + Base = Addr.getOperand(0); + + Offset = CurDAG->getTargetConstant(CN->getZExtValue(), ValTy); + return true; + } + } + + // Operand is a result from an ADD. + if (Addr.getOpcode() == ISD::ADD) { + // When loading from constant pools, load the lower address part in + // the instruction itself. Example, instead of: + // lui $2, %hi($CPI1_0) + // addiu $2, $2, %lo($CPI1_0) + // lwc1 $f0, 0($2) + // Generate: + // lui $2, %hi($CPI1_0) + // lwc1 $f0, %lo($CPI1_0)($2) + if (Addr.getOperand(1).getOpcode() == MipsISD::Lo) { + SDValue LoVal = Addr.getOperand(1); + if (isa<ConstantPoolSDNode>(LoVal.getOperand(0)) || + isa<GlobalAddressSDNode>(LoVal.getOperand(0))) { + Base = Addr.getOperand(0); + Offset = LoVal.getOperand(0); + return true; + } + } + + // If an indexed floating point load/store can be emitted, return false. + if (LS && (LS->getMemoryVT() == MVT::f32 || LS->getMemoryVT() == MVT::f64) && + Subtarget.hasMips32r2Or64()) + return false; + } + + Base = Addr; + Offset = CurDAG->getTargetConstant(0, ValTy); + return true; +} + +/// Select multiply instructions. +std::pair<SDNode*, SDNode*> +MipsDAGToDAGISel::SelectMULT(SDNode *N, unsigned Opc, DebugLoc dl, EVT Ty, + bool HasLo, bool HasHi) { + SDNode *Lo = 0, *Hi = 0; + SDNode *Mul = CurDAG->getMachineNode(Opc, dl, MVT::Glue, N->getOperand(0), + N->getOperand(1)); + SDValue InFlag = SDValue(Mul, 0); + + if (HasLo) { + Lo = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFLO : Mips::MFLO64, dl, + Ty, MVT::Glue, InFlag); + InFlag = SDValue(Lo, 1); + } + if (HasHi) + Hi = CurDAG->getMachineNode(Ty == MVT::i32 ? Mips::MFHI : Mips::MFHI64, dl, + Ty, InFlag); + + return std::make_pair(Lo, Hi); +} + + +/// Select instructions not customized! Used for +/// expanded, promoted and normal instructions +SDNode* MipsDAGToDAGISel::Select(SDNode *Node) { + unsigned Opcode = Node->getOpcode(); + DebugLoc dl = Node->getDebugLoc(); + + // Dump information about the Node being selected + DEBUG(errs() << "Selecting: "; Node->dump(CurDAG); errs() << "\n"); + + // If we have a custom node, we already have selected! + if (Node->isMachineOpcode()) { + DEBUG(errs() << "== "; Node->dump(CurDAG); errs() << "\n"); + return NULL; + } + + /// + // Instruction Selection not handled by the auto-generated + // tablegen selection should be handled here. + /// + EVT NodeTy = Node->getValueType(0); + unsigned MultOpc; + + switch(Opcode) { + default: break; + + case ISD::SUBE: + case ISD::ADDE: { + SDValue InFlag = Node->getOperand(2), CmpLHS; + unsigned Opc = InFlag.getOpcode(); (void)Opc; + assert(((Opc == ISD::ADDC || Opc == ISD::ADDE) || + (Opc == ISD::SUBC || Opc == ISD::SUBE)) && + "(ADD|SUB)E flag operand must come from (ADD|SUB)C/E insn"); + + unsigned MOp; + if (Opcode == ISD::ADDE) { + CmpLHS = InFlag.getValue(0); + MOp = Mips::ADDu; + } else { + CmpLHS = InFlag.getOperand(0); + MOp = Mips::SUBu; + } + + SDValue Ops[] = { CmpLHS, InFlag.getOperand(1) }; + + SDValue LHS = Node->getOperand(0); + SDValue RHS = Node->getOperand(1); + + EVT VT = LHS.getValueType(); + SDNode *Carry = CurDAG->getMachineNode(Mips::SLTu, dl, VT, Ops, 2); + SDNode *AddCarry = CurDAG->getMachineNode(Mips::ADDu, dl, VT, + SDValue(Carry,0), RHS); + + return CurDAG->SelectNodeTo(Node, MOp, VT, MVT::Glue, + LHS, SDValue(AddCarry,0)); + } + + /// Mul with two results + case ISD::SMUL_LOHI: + case ISD::UMUL_LOHI: { + if (NodeTy == MVT::i32) + MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::MULTu : Mips::MULT); + else + MultOpc = (Opcode == ISD::UMUL_LOHI ? Mips::DMULTu : Mips::DMULT); + + std::pair<SDNode*, SDNode*> LoHi = SelectMULT(Node, MultOpc, dl, NodeTy, + true, true); + + if (!SDValue(Node, 0).use_empty()) + ReplaceUses(SDValue(Node, 0), SDValue(LoHi.first, 0)); + + if (!SDValue(Node, 1).use_empty()) + ReplaceUses(SDValue(Node, 1), SDValue(LoHi.second, 0)); + + return NULL; + } + + /// Special Muls + case ISD::MUL: { + // Mips32 has a 32-bit three operand mul instruction. + if (Subtarget.hasMips32() && NodeTy == MVT::i32) + break; + return SelectMULT(Node, NodeTy == MVT::i32 ? Mips::MULT : Mips::DMULT, + dl, NodeTy, true, false).first; + } + case ISD::MULHS: + case ISD::MULHU: { + if (NodeTy == MVT::i32) + MultOpc = (Opcode == ISD::MULHU ? Mips::MULTu : Mips::MULT); + else + MultOpc = (Opcode == ISD::MULHU ? Mips::DMULTu : Mips::DMULT); + + return SelectMULT(Node, MultOpc, dl, NodeTy, false, true).second; + } + + // Get target GOT address. + case ISD::GLOBAL_OFFSET_TABLE: + return getGlobalBaseReg(); + + case ISD::ConstantFP: { + ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(Node); + if (Node->getValueType(0) == MVT::f64 && CN->isExactlyValue(+0.0)) { + if (Subtarget.hasMips64()) { + SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, + Mips::ZERO_64, MVT::i64); + return CurDAG->getMachineNode(Mips::DMTC1, dl, MVT::f64, Zero); + } + + SDValue Zero = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, + Mips::ZERO, MVT::i32); + return CurDAG->getMachineNode(Mips::BuildPairF64, dl, MVT::f64, Zero, + Zero); + } + break; + } + + case ISD::Constant: { + const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Node); + unsigned Size = CN->getValueSizeInBits(0); + + if (Size == 32) + break; + + MipsAnalyzeImmediate AnalyzeImm; + int64_t Imm = CN->getSExtValue(); + + const MipsAnalyzeImmediate::InstSeq &Seq = + AnalyzeImm.Analyze(Imm, Size, false); + + MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin(); + DebugLoc DL = CN->getDebugLoc(); + SDNode *RegOpnd; + SDValue ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd), + MVT::i64); + + // The first instruction can be a LUi which is different from other + // instructions (ADDiu, ORI and SLL) in that it does not have a register + // operand. + if (Inst->Opc == Mips::LUi64) + RegOpnd = CurDAG->getMachineNode(Inst->Opc, DL, MVT::i64, ImmOpnd); + else + RegOpnd = + CurDAG->getMachineNode(Inst->Opc, DL, MVT::i64, + CurDAG->getRegister(Mips::ZERO_64, MVT::i64), + ImmOpnd); + + // The remaining instructions in the sequence are handled here. + for (++Inst; Inst != Seq.end(); ++Inst) { + ImmOpnd = CurDAG->getTargetConstant(SignExtend64<16>(Inst->ImmOpnd), + MVT::i64); + RegOpnd = CurDAG->getMachineNode(Inst->Opc, DL, MVT::i64, + SDValue(RegOpnd, 0), ImmOpnd); + } + + return RegOpnd; + } + + case MipsISD::ThreadPointer: { + EVT PtrVT = TLI.getPointerTy(); + unsigned RdhwrOpc, SrcReg, DestReg; + + if (PtrVT == MVT::i32) { + RdhwrOpc = Mips::RDHWR; + SrcReg = Mips::HWR29; + DestReg = Mips::V1; + } else { + RdhwrOpc = Mips::RDHWR64; + SrcReg = Mips::HWR29_64; + DestReg = Mips::V1_64; + } + + SDNode *Rdhwr = + CurDAG->getMachineNode(RdhwrOpc, Node->getDebugLoc(), + Node->getValueType(0), + CurDAG->getRegister(SrcReg, PtrVT)); + SDValue Chain = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, DestReg, + SDValue(Rdhwr, 0)); + SDValue ResNode = CurDAG->getCopyFromReg(Chain, dl, DestReg, PtrVT); + ReplaceUses(SDValue(Node, 0), ResNode); + return ResNode.getNode(); + } + } + + // Select the default instruction + SDNode *ResNode = SelectCode(Node); + + DEBUG(errs() << "=> "); + if (ResNode == NULL || ResNode == Node) + DEBUG(Node->dump(CurDAG)); + else + DEBUG(ResNode->dump(CurDAG)); + DEBUG(errs() << "\n"); + return ResNode; +} + +bool MipsDAGToDAGISel:: +SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode, + std::vector<SDValue> &OutOps) { + assert(ConstraintCode == 'm' && "unexpected asm memory constraint"); + OutOps.push_back(Op); + return false; +} + +/// createMipsISelDag - This pass converts a legalized DAG into a +/// MIPS-specific DAG, ready for instruction scheduling. +FunctionPass *llvm::createMipsISelDag(MipsTargetMachine &TM) { + return new MipsDAGToDAGISel(TM); +} diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp new file mode 100644 index 0000000..6a23bc3 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.cpp @@ -0,0 +1,3088 @@ +//===-- MipsISelLowering.cpp - Mips DAG Lowering Implementation -----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that Mips uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mips-lower" +#include "MipsISelLowering.h" +#include "MipsMachineFunction.h" +#include "MipsTargetMachine.h" +#include "MipsTargetObjectFile.h" +#include "MipsSubtarget.h" +#include "InstPrinter/MipsInstPrinter.h" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Intrinsics.h" +#include "llvm/CallingConv.h" +#include "llvm/CodeGen/CallingConvLower.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +using namespace llvm; + +// If I is a shifted mask, set the size (Size) and the first bit of the +// mask (Pos), and return true. +// For example, if I is 0x003ff800, (Pos, Size) = (11, 11). +static bool IsShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) { + if (!isShiftedMask_64(I)) + return false; + + Size = CountPopulation_64(I); + Pos = CountTrailingZeros_64(I); + return true; +} + +static SDValue GetGlobalReg(SelectionDAG &DAG, EVT Ty) { + MipsFunctionInfo *FI = DAG.getMachineFunction().getInfo<MipsFunctionInfo>(); + return DAG.getRegister(FI->getGlobalBaseReg(), Ty); +} + +const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const { + switch (Opcode) { + case MipsISD::JmpLink: return "MipsISD::JmpLink"; + case MipsISD::Hi: return "MipsISD::Hi"; + case MipsISD::Lo: return "MipsISD::Lo"; + case MipsISD::GPRel: return "MipsISD::GPRel"; + case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer"; + case MipsISD::Ret: return "MipsISD::Ret"; + case MipsISD::FPBrcond: return "MipsISD::FPBrcond"; + case MipsISD::FPCmp: return "MipsISD::FPCmp"; + case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T"; + case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F"; + case MipsISD::FPRound: return "MipsISD::FPRound"; + case MipsISD::MAdd: return "MipsISD::MAdd"; + case MipsISD::MAddu: return "MipsISD::MAddu"; + case MipsISD::MSub: return "MipsISD::MSub"; + case MipsISD::MSubu: return "MipsISD::MSubu"; + case MipsISD::DivRem: return "MipsISD::DivRem"; + case MipsISD::DivRemU: return "MipsISD::DivRemU"; + case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64"; + case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64"; + case MipsISD::Wrapper: return "MipsISD::Wrapper"; + case MipsISD::DynAlloc: return "MipsISD::DynAlloc"; + case MipsISD::Sync: return "MipsISD::Sync"; + case MipsISD::Ext: return "MipsISD::Ext"; + case MipsISD::Ins: return "MipsISD::Ins"; + default: return NULL; + } +} + +MipsTargetLowering:: +MipsTargetLowering(MipsTargetMachine &TM) + : TargetLowering(TM, new MipsTargetObjectFile()), + Subtarget(&TM.getSubtarget<MipsSubtarget>()), + HasMips64(Subtarget->hasMips64()), IsN64(Subtarget->isABI_N64()), + IsO32(Subtarget->isABI_O32()) { + + // Mips does not have i1 type, so use i32 for + // setcc operations results (slt, sgt, ...). + setBooleanContents(ZeroOrOneBooleanContent); + setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct? + + // Set up the register classes + addRegisterClass(MVT::i32, Mips::CPURegsRegisterClass); + + if (HasMips64) + addRegisterClass(MVT::i64, Mips::CPU64RegsRegisterClass); + + if (!TM.Options.UseSoftFloat) { + addRegisterClass(MVT::f32, Mips::FGR32RegisterClass); + + // When dealing with single precision only, use libcalls + if (!Subtarget->isSingleFloat()) { + if (HasMips64) + addRegisterClass(MVT::f64, Mips::FGR64RegisterClass); + else + addRegisterClass(MVT::f64, Mips::AFGR64RegisterClass); + } + } + + // Load extented operations for i1 types must be promoted + setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote); + setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote); + setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote); + + // MIPS doesn't have extending float->double load/store + setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand); + setTruncStoreAction(MVT::f64, MVT::f32, Expand); + + // Used by legalize types to correctly generate the setcc result. + // Without this, every float setcc comes with a AND/OR with the result, + // we don't want this, since the fpcmp result goes to a flag register, + // which is used implicitly by brcond and select operations. + AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32); + + // Mips Custom Operations + setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); + setOperationAction(ISD::BlockAddress, MVT::i32, Custom); + setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom); + setOperationAction(ISD::JumpTable, MVT::i32, Custom); + setOperationAction(ISD::ConstantPool, MVT::i32, Custom); + setOperationAction(ISD::SELECT, MVT::f32, Custom); + setOperationAction(ISD::SELECT, MVT::f64, Custom); + setOperationAction(ISD::SELECT, MVT::i32, Custom); + setOperationAction(ISD::SETCC, MVT::f32, Custom); + setOperationAction(ISD::SETCC, MVT::f64, Custom); + setOperationAction(ISD::BRCOND, MVT::Other, Custom); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); + setOperationAction(ISD::VASTART, MVT::Other, Custom); + setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); + setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); + setOperationAction(ISD::MEMBARRIER, MVT::Other, Custom); + setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom); + + if (!TM.Options.NoNaNsFPMath) { + setOperationAction(ISD::FABS, MVT::f32, Custom); + setOperationAction(ISD::FABS, MVT::f64, Custom); + } + + if (HasMips64) { + setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); + setOperationAction(ISD::BlockAddress, MVT::i64, Custom); + setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); + setOperationAction(ISD::JumpTable, MVT::i64, Custom); + setOperationAction(ISD::ConstantPool, MVT::i64, Custom); + setOperationAction(ISD::SELECT, MVT::i64, Custom); + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); + } + + setOperationAction(ISD::SDIV, MVT::i32, Expand); + setOperationAction(ISD::SREM, MVT::i32, Expand); + setOperationAction(ISD::UDIV, MVT::i32, Expand); + setOperationAction(ISD::UREM, MVT::i32, Expand); + setOperationAction(ISD::SDIV, MVT::i64, Expand); + setOperationAction(ISD::SREM, MVT::i64, Expand); + setOperationAction(ISD::UDIV, MVT::i64, Expand); + setOperationAction(ISD::UREM, MVT::i64, Expand); + + // Operations not directly supported by Mips. + setOperationAction(ISD::BR_JT, MVT::Other, Expand); + setOperationAction(ISD::BR_CC, MVT::Other, Expand); + setOperationAction(ISD::SELECT_CC, MVT::Other, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand); + setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand); + setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); + setOperationAction(ISD::CTPOP, MVT::i32, Expand); + setOperationAction(ISD::CTPOP, MVT::i64, Expand); + setOperationAction(ISD::CTTZ, MVT::i32, Expand); + setOperationAction(ISD::CTTZ, MVT::i64, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); + setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand); + setOperationAction(ISD::ROTL, MVT::i32, Expand); + setOperationAction(ISD::ROTL, MVT::i64, Expand); + + if (!Subtarget->hasMips32r2()) + setOperationAction(ISD::ROTR, MVT::i32, Expand); + + if (!Subtarget->hasMips64r2()) + setOperationAction(ISD::ROTR, MVT::i64, Expand); + + setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); + setOperationAction(ISD::FSIN, MVT::f32, Expand); + setOperationAction(ISD::FSIN, MVT::f64, Expand); + setOperationAction(ISD::FCOS, MVT::f32, Expand); + setOperationAction(ISD::FCOS, MVT::f64, Expand); + setOperationAction(ISD::FPOWI, MVT::f32, Expand); + setOperationAction(ISD::FPOW, MVT::f32, Expand); + setOperationAction(ISD::FPOW, MVT::f64, Expand); + setOperationAction(ISD::FLOG, MVT::f32, Expand); + setOperationAction(ISD::FLOG2, MVT::f32, Expand); + setOperationAction(ISD::FLOG10, MVT::f32, Expand); + setOperationAction(ISD::FEXP, MVT::f32, Expand); + setOperationAction(ISD::FMA, MVT::f32, Expand); + setOperationAction(ISD::FMA, MVT::f64, Expand); + setOperationAction(ISD::FREM, MVT::f32, Expand); + setOperationAction(ISD::FREM, MVT::f64, Expand); + + if (!TM.Options.NoNaNsFPMath) { + setOperationAction(ISD::FNEG, MVT::f32, Expand); + setOperationAction(ISD::FNEG, MVT::f64, Expand); + } + + setOperationAction(ISD::EXCEPTIONADDR, MVT::i32, Expand); + setOperationAction(ISD::EXCEPTIONADDR, MVT::i64, Expand); + setOperationAction(ISD::EHSELECTION, MVT::i32, Expand); + setOperationAction(ISD::EHSELECTION, MVT::i64, Expand); + + setOperationAction(ISD::VAARG, MVT::Other, Expand); + setOperationAction(ISD::VACOPY, MVT::Other, Expand); + setOperationAction(ISD::VAEND, MVT::Other, Expand); + + // Use the default for now + setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); + setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); + + setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand); + setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Expand); + setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand); + + setInsertFencesForAtomic(true); + + if (Subtarget->isSingleFloat()) + setOperationAction(ISD::SELECT_CC, MVT::f64, Expand); + + if (!Subtarget->hasSEInReg()) { + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); + } + + if (!Subtarget->hasBitCount()) { + setOperationAction(ISD::CTLZ, MVT::i32, Expand); + setOperationAction(ISD::CTLZ, MVT::i64, Expand); + } + + if (!Subtarget->hasSwap()) { + setOperationAction(ISD::BSWAP, MVT::i32, Expand); + setOperationAction(ISD::BSWAP, MVT::i64, Expand); + } + + setTargetDAGCombine(ISD::ADDE); + setTargetDAGCombine(ISD::SUBE); + setTargetDAGCombine(ISD::SDIVREM); + setTargetDAGCombine(ISD::UDIVREM); + setTargetDAGCombine(ISD::SELECT); + setTargetDAGCombine(ISD::AND); + setTargetDAGCombine(ISD::OR); + + setMinFunctionAlignment(HasMips64 ? 3 : 2); + + setStackPointerRegisterToSaveRestore(IsN64 ? Mips::SP_64 : Mips::SP); + computeRegisterProperties(); + + setExceptionPointerRegister(IsN64 ? Mips::A0_64 : Mips::A0); + setExceptionSelectorRegister(IsN64 ? Mips::A1_64 : Mips::A1); +} + +bool MipsTargetLowering::allowsUnalignedMemoryAccesses(EVT VT) const { + MVT::SimpleValueType SVT = VT.getSimpleVT().SimpleTy; + + switch (SVT) { + case MVT::i64: + case MVT::i32: + case MVT::i16: + return true; + case MVT::f32: + return Subtarget->hasMips32r2Or64(); + default: + return false; + } +} + +EVT MipsTargetLowering::getSetCCResultType(EVT VT) const { + return MVT::i32; +} + +// SelectMadd - +// Transforms a subgraph in CurDAG if the following pattern is found: +// (addc multLo, Lo0), (adde multHi, Hi0), +// where, +// multHi/Lo: product of multiplication +// Lo0: initial value of Lo register +// Hi0: initial value of Hi register +// Return true if pattern matching was successful. +static bool SelectMadd(SDNode* ADDENode, SelectionDAG* CurDAG) { + // ADDENode's second operand must be a flag output of an ADDC node in order + // for the matching to be successful. + SDNode* ADDCNode = ADDENode->getOperand(2).getNode(); + + if (ADDCNode->getOpcode() != ISD::ADDC) + return false; + + SDValue MultHi = ADDENode->getOperand(0); + SDValue MultLo = ADDCNode->getOperand(0); + SDNode* MultNode = MultHi.getNode(); + unsigned MultOpc = MultHi.getOpcode(); + + // MultHi and MultLo must be generated by the same node, + if (MultLo.getNode() != MultNode) + return false; + + // and it must be a multiplication. + if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) + return false; + + // MultLo amd MultHi must be the first and second output of MultNode + // respectively. + if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) + return false; + + // Transform this to a MADD only if ADDENode and ADDCNode are the only users + // of the values of MultNode, in which case MultNode will be removed in later + // phases. + // If there exist users other than ADDENode or ADDCNode, this function returns + // here, which will result in MultNode being mapped to a single MULT + // instruction node rather than a pair of MULT and MADD instructions being + // produced. + if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) + return false; + + SDValue Chain = CurDAG->getEntryNode(); + DebugLoc dl = ADDENode->getDebugLoc(); + + // create MipsMAdd(u) node + MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MAddu : MipsISD::MAdd; + + SDValue MAdd = CurDAG->getNode(MultOpc, dl, MVT::Glue, + MultNode->getOperand(0),// Factor 0 + MultNode->getOperand(1),// Factor 1 + ADDCNode->getOperand(1),// Lo0 + ADDENode->getOperand(1));// Hi0 + + // create CopyFromReg nodes + SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, dl, Mips::LO, MVT::i32, + MAdd); + SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), dl, + Mips::HI, MVT::i32, + CopyFromLo.getValue(2)); + + // replace uses of adde and addc here + if (!SDValue(ADDCNode, 0).use_empty()) + CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDCNode, 0), CopyFromLo); + + if (!SDValue(ADDENode, 0).use_empty()) + CurDAG->ReplaceAllUsesOfValueWith(SDValue(ADDENode, 0), CopyFromHi); + + return true; +} + +// SelectMsub - +// Transforms a subgraph in CurDAG if the following pattern is found: +// (addc Lo0, multLo), (sube Hi0, multHi), +// where, +// multHi/Lo: product of multiplication +// Lo0: initial value of Lo register +// Hi0: initial value of Hi register +// Return true if pattern matching was successful. +static bool SelectMsub(SDNode* SUBENode, SelectionDAG* CurDAG) { + // SUBENode's second operand must be a flag output of an SUBC node in order + // for the matching to be successful. + SDNode* SUBCNode = SUBENode->getOperand(2).getNode(); + + if (SUBCNode->getOpcode() != ISD::SUBC) + return false; + + SDValue MultHi = SUBENode->getOperand(1); + SDValue MultLo = SUBCNode->getOperand(1); + SDNode* MultNode = MultHi.getNode(); + unsigned MultOpc = MultHi.getOpcode(); + + // MultHi and MultLo must be generated by the same node, + if (MultLo.getNode() != MultNode) + return false; + + // and it must be a multiplication. + if (MultOpc != ISD::SMUL_LOHI && MultOpc != ISD::UMUL_LOHI) + return false; + + // MultLo amd MultHi must be the first and second output of MultNode + // respectively. + if (MultHi.getResNo() != 1 || MultLo.getResNo() != 0) + return false; + + // Transform this to a MSUB only if SUBENode and SUBCNode are the only users + // of the values of MultNode, in which case MultNode will be removed in later + // phases. + // If there exist users other than SUBENode or SUBCNode, this function returns + // here, which will result in MultNode being mapped to a single MULT + // instruction node rather than a pair of MULT and MSUB instructions being + // produced. + if (!MultHi.hasOneUse() || !MultLo.hasOneUse()) + return false; + + SDValue Chain = CurDAG->getEntryNode(); + DebugLoc dl = SUBENode->getDebugLoc(); + + // create MipsSub(u) node + MultOpc = MultOpc == ISD::UMUL_LOHI ? MipsISD::MSubu : MipsISD::MSub; + + SDValue MSub = CurDAG->getNode(MultOpc, dl, MVT::Glue, + MultNode->getOperand(0),// Factor 0 + MultNode->getOperand(1),// Factor 1 + SUBCNode->getOperand(0),// Lo0 + SUBENode->getOperand(0));// Hi0 + + // create CopyFromReg nodes + SDValue CopyFromLo = CurDAG->getCopyFromReg(Chain, dl, Mips::LO, MVT::i32, + MSub); + SDValue CopyFromHi = CurDAG->getCopyFromReg(CopyFromLo.getValue(1), dl, + Mips::HI, MVT::i32, + CopyFromLo.getValue(2)); + + // replace uses of sube and subc here + if (!SDValue(SUBCNode, 0).use_empty()) + CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBCNode, 0), CopyFromLo); + + if (!SDValue(SUBENode, 0).use_empty()) + CurDAG->ReplaceAllUsesOfValueWith(SDValue(SUBENode, 0), CopyFromHi); + + return true; +} + +static SDValue PerformADDECombine(SDNode *N, SelectionDAG& DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget* Subtarget) { + if (DCI.isBeforeLegalize()) + return SDValue(); + + if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && + SelectMadd(N, &DAG)) + return SDValue(N, 0); + + return SDValue(); +} + +static SDValue PerformSUBECombine(SDNode *N, SelectionDAG& DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget* Subtarget) { + if (DCI.isBeforeLegalize()) + return SDValue(); + + if (Subtarget->hasMips32() && N->getValueType(0) == MVT::i32 && + SelectMsub(N, &DAG)) + return SDValue(N, 0); + + return SDValue(); +} + +static SDValue PerformDivRemCombine(SDNode *N, SelectionDAG& DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget* Subtarget) { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + EVT Ty = N->getValueType(0); + unsigned LO = (Ty == MVT::i32) ? Mips::LO : Mips::LO64; + unsigned HI = (Ty == MVT::i32) ? Mips::HI : Mips::HI64; + unsigned opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem : + MipsISD::DivRemU; + DebugLoc dl = N->getDebugLoc(); + + SDValue DivRem = DAG.getNode(opc, dl, MVT::Glue, + N->getOperand(0), N->getOperand(1)); + SDValue InChain = DAG.getEntryNode(); + SDValue InGlue = DivRem; + + // insert MFLO + if (N->hasAnyUseOfValue(0)) { + SDValue CopyFromLo = DAG.getCopyFromReg(InChain, dl, LO, Ty, + InGlue); + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo); + InChain = CopyFromLo.getValue(1); + InGlue = CopyFromLo.getValue(2); + } + + // insert MFHI + if (N->hasAnyUseOfValue(1)) { + SDValue CopyFromHi = DAG.getCopyFromReg(InChain, dl, + HI, Ty, InGlue); + DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi); + } + + return SDValue(); +} + +static Mips::CondCode FPCondCCodeToFCC(ISD::CondCode CC) { + switch (CC) { + default: llvm_unreachable("Unknown fp condition code!"); + case ISD::SETEQ: + case ISD::SETOEQ: return Mips::FCOND_OEQ; + case ISD::SETUNE: return Mips::FCOND_UNE; + case ISD::SETLT: + case ISD::SETOLT: return Mips::FCOND_OLT; + case ISD::SETGT: + case ISD::SETOGT: return Mips::FCOND_OGT; + case ISD::SETLE: + case ISD::SETOLE: return Mips::FCOND_OLE; + case ISD::SETGE: + case ISD::SETOGE: return Mips::FCOND_OGE; + case ISD::SETULT: return Mips::FCOND_ULT; + case ISD::SETULE: return Mips::FCOND_ULE; + case ISD::SETUGT: return Mips::FCOND_UGT; + case ISD::SETUGE: return Mips::FCOND_UGE; + case ISD::SETUO: return Mips::FCOND_UN; + case ISD::SETO: return Mips::FCOND_OR; + case ISD::SETNE: + case ISD::SETONE: return Mips::FCOND_ONE; + case ISD::SETUEQ: return Mips::FCOND_UEQ; + } +} + + +// Returns true if condition code has to be inverted. +static bool InvertFPCondCode(Mips::CondCode CC) { + if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT) + return false; + + assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) && + "Illegal Condition Code"); + + return true; +} + +// Creates and returns an FPCmp node from a setcc node. +// Returns Op if setcc is not a floating point comparison. +static SDValue CreateFPCmp(SelectionDAG& DAG, const SDValue& Op) { + // must be a SETCC node + if (Op.getOpcode() != ISD::SETCC) + return Op; + + SDValue LHS = Op.getOperand(0); + + if (!LHS.getValueType().isFloatingPoint()) + return Op; + + SDValue RHS = Op.getOperand(1); + DebugLoc dl = Op.getDebugLoc(); + + // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of + // node if necessary. + ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); + + return DAG.getNode(MipsISD::FPCmp, dl, MVT::Glue, LHS, RHS, + DAG.getConstant(FPCondCCodeToFCC(CC), MVT::i32)); +} + +// Creates and returns a CMovFPT/F node. +static SDValue CreateCMovFP(SelectionDAG& DAG, SDValue Cond, SDValue True, + SDValue False, DebugLoc DL) { + bool invert = InvertFPCondCode((Mips::CondCode) + cast<ConstantSDNode>(Cond.getOperand(2)) + ->getSExtValue()); + + return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL, + True.getValueType(), True, False, Cond); +} + +static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG& DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget* Subtarget) { + if (DCI.isBeforeLegalizeOps()) + return SDValue(); + + SDValue SetCC = N->getOperand(0); + + if ((SetCC.getOpcode() != ISD::SETCC) || + !SetCC.getOperand(0).getValueType().isInteger()) + return SDValue(); + + SDValue False = N->getOperand(2); + EVT FalseTy = False.getValueType(); + + if (!FalseTy.isInteger()) + return SDValue(); + + ConstantSDNode *CN = dyn_cast<ConstantSDNode>(False); + + if (!CN || CN->getZExtValue()) + return SDValue(); + + const DebugLoc DL = N->getDebugLoc(); + ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get(); + SDValue True = N->getOperand(1); + + SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0), + SetCC.getOperand(1), ISD::getSetCCInverse(CC, true)); + + return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True); +} + +static SDValue PerformANDCombine(SDNode *N, SelectionDAG& DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget* Subtarget) { + // Pattern match EXT. + // $dst = and ((sra or srl) $src , pos), (2**size - 1) + // => ext $dst, $src, size, pos + if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2()) + return SDValue(); + + SDValue ShiftRight = N->getOperand(0), Mask = N->getOperand(1); + unsigned ShiftRightOpc = ShiftRight.getOpcode(); + + // Op's first operand must be a shift right. + if (ShiftRightOpc != ISD::SRA && ShiftRightOpc != ISD::SRL) + return SDValue(); + + // The second operand of the shift must be an immediate. + ConstantSDNode *CN; + if (!(CN = dyn_cast<ConstantSDNode>(ShiftRight.getOperand(1)))) + return SDValue(); + + uint64_t Pos = CN->getZExtValue(); + uint64_t SMPos, SMSize; + + // Op's second operand must be a shifted mask. + if (!(CN = dyn_cast<ConstantSDNode>(Mask)) || + !IsShiftedMask(CN->getZExtValue(), SMPos, SMSize)) + return SDValue(); + + // Return if the shifted mask does not start at bit 0 or the sum of its size + // and Pos exceeds the word's size. + EVT ValTy = N->getValueType(0); + if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits()) + return SDValue(); + + return DAG.getNode(MipsISD::Ext, N->getDebugLoc(), ValTy, + ShiftRight.getOperand(0), DAG.getConstant(Pos, MVT::i32), + DAG.getConstant(SMSize, MVT::i32)); +} + +static SDValue PerformORCombine(SDNode *N, SelectionDAG& DAG, + TargetLowering::DAGCombinerInfo &DCI, + const MipsSubtarget* Subtarget) { + // Pattern match INS. + // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1), + // where mask1 = (2**size - 1) << pos, mask0 = ~mask1 + // => ins $dst, $src, size, pos, $src1 + if (DCI.isBeforeLegalizeOps() || !Subtarget->hasMips32r2()) + return SDValue(); + + SDValue And0 = N->getOperand(0), And1 = N->getOperand(1); + uint64_t SMPos0, SMSize0, SMPos1, SMSize1; + ConstantSDNode *CN; + + // See if Op's first operand matches (and $src1 , mask0). + if (And0.getOpcode() != ISD::AND) + return SDValue(); + + if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) || + !IsShiftedMask(~CN->getSExtValue(), SMPos0, SMSize0)) + return SDValue(); + + // See if Op's second operand matches (and (shl $src, pos), mask1). + if (And1.getOpcode() != ISD::AND) + return SDValue(); + + if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) || + !IsShiftedMask(CN->getZExtValue(), SMPos1, SMSize1)) + return SDValue(); + + // The shift masks must have the same position and size. + if (SMPos0 != SMPos1 || SMSize0 != SMSize1) + return SDValue(); + + SDValue Shl = And1.getOperand(0); + if (Shl.getOpcode() != ISD::SHL) + return SDValue(); + + if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1)))) + return SDValue(); + + unsigned Shamt = CN->getZExtValue(); + + // Return if the shift amount and the first bit position of mask are not the + // same. + EVT ValTy = N->getValueType(0); + if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits())) + return SDValue(); + + return DAG.getNode(MipsISD::Ins, N->getDebugLoc(), ValTy, Shl.getOperand(0), + DAG.getConstant(SMPos0, MVT::i32), + DAG.getConstant(SMSize0, MVT::i32), And0.getOperand(0)); +} + +SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) + const { + SelectionDAG &DAG = DCI.DAG; + unsigned opc = N->getOpcode(); + + switch (opc) { + default: break; + case ISD::ADDE: + return PerformADDECombine(N, DAG, DCI, Subtarget); + case ISD::SUBE: + return PerformSUBECombine(N, DAG, DCI, Subtarget); + case ISD::SDIVREM: + case ISD::UDIVREM: + return PerformDivRemCombine(N, DAG, DCI, Subtarget); + case ISD::SELECT: + return PerformSELECTCombine(N, DAG, DCI, Subtarget); + case ISD::AND: + return PerformANDCombine(N, DAG, DCI, Subtarget); + case ISD::OR: + return PerformORCombine(N, DAG, DCI, Subtarget); + } + + return SDValue(); +} + +SDValue MipsTargetLowering:: +LowerOperation(SDValue Op, SelectionDAG &DAG) const +{ + switch (Op.getOpcode()) + { + case ISD::BRCOND: return LowerBRCOND(Op, DAG); + case ISD::ConstantPool: return LowerConstantPool(Op, DAG); + case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); + case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG); + case ISD::BlockAddress: return LowerBlockAddress(Op, DAG); + case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG); + case ISD::JumpTable: return LowerJumpTable(Op, DAG); + case ISD::SELECT: return LowerSELECT(Op, DAG); + case ISD::SETCC: return LowerSETCC(Op, DAG); + case ISD::VASTART: return LowerVASTART(Op, DAG); + case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG); + case ISD::FABS: return LowerFABS(Op, DAG); + case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + case ISD::MEMBARRIER: return LowerMEMBARRIER(Op, DAG); + case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG); + } + return SDValue(); +} + +//===----------------------------------------------------------------------===// +// Lower helper functions +//===----------------------------------------------------------------------===// + +// AddLiveIn - This helper function adds the specified physical register to the +// MachineFunction as a live in value. It also creates a corresponding +// virtual register for it. +static unsigned +AddLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC) +{ + assert(RC->contains(PReg) && "Not the correct regclass!"); + unsigned VReg = MF.getRegInfo().createVirtualRegister(RC); + MF.getRegInfo().addLiveIn(PReg, VReg); + return VReg; +} + +// Get fp branch code (not opcode) from condition code. +static Mips::FPBranchCode GetFPBranchCodeFromCond(Mips::CondCode CC) { + if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT) + return Mips::BRANCH_T; + + assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) && + "Invalid CondCode."); + + return Mips::BRANCH_F; +} + +/* +static MachineBasicBlock* ExpandCondMov(MachineInstr *MI, MachineBasicBlock *BB, + DebugLoc dl, + const MipsSubtarget* Subtarget, + const TargetInstrInfo *TII, + bool isFPCmp, unsigned Opc) { + // There is no need to expand CMov instructions if target has + // conditional moves. + if (Subtarget->hasCondMov()) + return BB; + + // To "insert" a SELECT_CC instruction, we actually have to insert the + // diamond control-flow pattern. The incoming instruction knows the + // destination vreg to set, the condition code register to branch on, the + // true/false values to select between, and a branch opcode to use. + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = BB; + ++It; + + // thisMBB: + // ... + // TrueVal = ... + // setcc r1, r2, r3 + // bNE r1, r0, copy1MBB + // fallthrough --> copy0MBB + MachineBasicBlock *thisMBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, copy0MBB); + F->insert(It, sinkMBB); + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), BB, + llvm::next(MachineBasicBlock::iterator(MI)), + BB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(BB); + + // Next, add the true and fallthrough blocks as its successors. + BB->addSuccessor(copy0MBB); + BB->addSuccessor(sinkMBB); + + // Emit the right instruction according to the type of the operands compared + if (isFPCmp) + BuildMI(BB, dl, TII->get(Opc)).addMBB(sinkMBB); + else + BuildMI(BB, dl, TII->get(Opc)).addReg(MI->getOperand(2).getReg()) + .addReg(Mips::ZERO).addMBB(sinkMBB); + + // copy0MBB: + // %FalseValue = ... + // # fallthrough to sinkMBB + BB = copy0MBB; + + // Update machine-CFG edges + BB->addSuccessor(sinkMBB); + + // sinkMBB: + // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ] + // ... + BB = sinkMBB; + + if (isFPCmp) + BuildMI(*BB, BB->begin(), dl, + TII->get(Mips::PHI), MI->getOperand(0).getReg()) + .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB) + .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB); + else + BuildMI(*BB, BB->begin(), dl, + TII->get(Mips::PHI), MI->getOperand(0).getReg()) + .addReg(MI->getOperand(3).getReg()).addMBB(thisMBB) + .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB); + + MI->eraseFromParent(); // The pseudo instruction is gone now. + return BB; +} +*/ +MachineBasicBlock * +MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *BB) const { + switch (MI->getOpcode()) { + default: llvm_unreachable("Unexpected instr type to insert"); + case Mips::ATOMIC_LOAD_ADD_I8: + case Mips::ATOMIC_LOAD_ADD_I8_P8: + return EmitAtomicBinaryPartword(MI, BB, 1, Mips::ADDu); + case Mips::ATOMIC_LOAD_ADD_I16: + case Mips::ATOMIC_LOAD_ADD_I16_P8: + return EmitAtomicBinaryPartword(MI, BB, 2, Mips::ADDu); + case Mips::ATOMIC_LOAD_ADD_I32: + case Mips::ATOMIC_LOAD_ADD_I32_P8: + return EmitAtomicBinary(MI, BB, 4, Mips::ADDu); + case Mips::ATOMIC_LOAD_ADD_I64: + case Mips::ATOMIC_LOAD_ADD_I64_P8: + return EmitAtomicBinary(MI, BB, 8, Mips::DADDu); + + case Mips::ATOMIC_LOAD_AND_I8: + case Mips::ATOMIC_LOAD_AND_I8_P8: + return EmitAtomicBinaryPartword(MI, BB, 1, Mips::AND); + case Mips::ATOMIC_LOAD_AND_I16: + case Mips::ATOMIC_LOAD_AND_I16_P8: + return EmitAtomicBinaryPartword(MI, BB, 2, Mips::AND); + case Mips::ATOMIC_LOAD_AND_I32: + case Mips::ATOMIC_LOAD_AND_I32_P8: + return EmitAtomicBinary(MI, BB, 4, Mips::AND); + case Mips::ATOMIC_LOAD_AND_I64: + case Mips::ATOMIC_LOAD_AND_I64_P8: + return EmitAtomicBinary(MI, BB, 8, Mips::AND64); + + case Mips::ATOMIC_LOAD_OR_I8: + case Mips::ATOMIC_LOAD_OR_I8_P8: + return EmitAtomicBinaryPartword(MI, BB, 1, Mips::OR); + case Mips::ATOMIC_LOAD_OR_I16: + case Mips::ATOMIC_LOAD_OR_I16_P8: + return EmitAtomicBinaryPartword(MI, BB, 2, Mips::OR); + case Mips::ATOMIC_LOAD_OR_I32: + case Mips::ATOMIC_LOAD_OR_I32_P8: + return EmitAtomicBinary(MI, BB, 4, Mips::OR); + case Mips::ATOMIC_LOAD_OR_I64: + case Mips::ATOMIC_LOAD_OR_I64_P8: + return EmitAtomicBinary(MI, BB, 8, Mips::OR64); + + case Mips::ATOMIC_LOAD_XOR_I8: + case Mips::ATOMIC_LOAD_XOR_I8_P8: + return EmitAtomicBinaryPartword(MI, BB, 1, Mips::XOR); + case Mips::ATOMIC_LOAD_XOR_I16: + case Mips::ATOMIC_LOAD_XOR_I16_P8: + return EmitAtomicBinaryPartword(MI, BB, 2, Mips::XOR); + case Mips::ATOMIC_LOAD_XOR_I32: + case Mips::ATOMIC_LOAD_XOR_I32_P8: + return EmitAtomicBinary(MI, BB, 4, Mips::XOR); + case Mips::ATOMIC_LOAD_XOR_I64: + case Mips::ATOMIC_LOAD_XOR_I64_P8: + return EmitAtomicBinary(MI, BB, 8, Mips::XOR64); + + case Mips::ATOMIC_LOAD_NAND_I8: + case Mips::ATOMIC_LOAD_NAND_I8_P8: + return EmitAtomicBinaryPartword(MI, BB, 1, 0, true); + case Mips::ATOMIC_LOAD_NAND_I16: + case Mips::ATOMIC_LOAD_NAND_I16_P8: + return EmitAtomicBinaryPartword(MI, BB, 2, 0, true); + case Mips::ATOMIC_LOAD_NAND_I32: + case Mips::ATOMIC_LOAD_NAND_I32_P8: + return EmitAtomicBinary(MI, BB, 4, 0, true); + case Mips::ATOMIC_LOAD_NAND_I64: + case Mips::ATOMIC_LOAD_NAND_I64_P8: + return EmitAtomicBinary(MI, BB, 8, 0, true); + + case Mips::ATOMIC_LOAD_SUB_I8: + case Mips::ATOMIC_LOAD_SUB_I8_P8: + return EmitAtomicBinaryPartword(MI, BB, 1, Mips::SUBu); + case Mips::ATOMIC_LOAD_SUB_I16: + case Mips::ATOMIC_LOAD_SUB_I16_P8: + return EmitAtomicBinaryPartword(MI, BB, 2, Mips::SUBu); + case Mips::ATOMIC_LOAD_SUB_I32: + case Mips::ATOMIC_LOAD_SUB_I32_P8: + return EmitAtomicBinary(MI, BB, 4, Mips::SUBu); + case Mips::ATOMIC_LOAD_SUB_I64: + case Mips::ATOMIC_LOAD_SUB_I64_P8: + return EmitAtomicBinary(MI, BB, 8, Mips::DSUBu); + + case Mips::ATOMIC_SWAP_I8: + case Mips::ATOMIC_SWAP_I8_P8: + return EmitAtomicBinaryPartword(MI, BB, 1, 0); + case Mips::ATOMIC_SWAP_I16: + case Mips::ATOMIC_SWAP_I16_P8: + return EmitAtomicBinaryPartword(MI, BB, 2, 0); + case Mips::ATOMIC_SWAP_I32: + case Mips::ATOMIC_SWAP_I32_P8: + return EmitAtomicBinary(MI, BB, 4, 0); + case Mips::ATOMIC_SWAP_I64: + case Mips::ATOMIC_SWAP_I64_P8: + return EmitAtomicBinary(MI, BB, 8, 0); + + case Mips::ATOMIC_CMP_SWAP_I8: + case Mips::ATOMIC_CMP_SWAP_I8_P8: + return EmitAtomicCmpSwapPartword(MI, BB, 1); + case Mips::ATOMIC_CMP_SWAP_I16: + case Mips::ATOMIC_CMP_SWAP_I16_P8: + return EmitAtomicCmpSwapPartword(MI, BB, 2); + case Mips::ATOMIC_CMP_SWAP_I32: + case Mips::ATOMIC_CMP_SWAP_I32_P8: + return EmitAtomicCmpSwap(MI, BB, 4); + case Mips::ATOMIC_CMP_SWAP_I64: + case Mips::ATOMIC_CMP_SWAP_I64_P8: + return EmitAtomicCmpSwap(MI, BB, 8); + } +} + +// This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and +// Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true) +MachineBasicBlock * +MipsTargetLowering::EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, + unsigned Size, unsigned BinOpcode, + bool Nand) const { + assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicBinary."); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &RegInfo = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8)); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + DebugLoc dl = MI->getDebugLoc(); + unsigned LL, SC, AND, NOR, ZERO, BEQ; + + if (Size == 4) { + LL = IsN64 ? Mips::LL_P8 : Mips::LL; + SC = IsN64 ? Mips::SC_P8 : Mips::SC; + AND = Mips::AND; + NOR = Mips::NOR; + ZERO = Mips::ZERO; + BEQ = Mips::BEQ; + } + else { + LL = IsN64 ? Mips::LLD_P8 : Mips::LLD; + SC = IsN64 ? Mips::SCD_P8 : Mips::SCD; + AND = Mips::AND64; + NOR = Mips::NOR64; + ZERO = Mips::ZERO_64; + BEQ = Mips::BEQ64; + } + + unsigned OldVal = MI->getOperand(0).getReg(); + unsigned Ptr = MI->getOperand(1).getReg(); + unsigned Incr = MI->getOperand(2).getReg(); + + unsigned StoreVal = RegInfo.createVirtualRegister(RC); + unsigned AndRes = RegInfo.createVirtualRegister(RC); + unsigned Success = RegInfo.createVirtualRegister(RC); + + // insert new blocks after the current block + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineFunction::iterator It = BB; + ++It; + MF->insert(It, loopMBB); + MF->insert(It, exitMBB); + + // Transfer the remainder of BB and its successor edges to exitMBB. + exitMBB->splice(exitMBB->begin(), BB, + llvm::next(MachineBasicBlock::iterator(MI)), + BB->end()); + exitMBB->transferSuccessorsAndUpdatePHIs(BB); + + // thisMBB: + // ... + // fallthrough --> loopMBB + BB->addSuccessor(loopMBB); + loopMBB->addSuccessor(loopMBB); + loopMBB->addSuccessor(exitMBB); + + // loopMBB: + // ll oldval, 0(ptr) + // <binop> storeval, oldval, incr + // sc success, storeval, 0(ptr) + // beq success, $0, loopMBB + BB = loopMBB; + BuildMI(BB, dl, TII->get(LL), OldVal).addReg(Ptr).addImm(0); + if (Nand) { + // and andres, oldval, incr + // nor storeval, $0, andres + BuildMI(BB, dl, TII->get(AND), AndRes).addReg(OldVal).addReg(Incr); + BuildMI(BB, dl, TII->get(NOR), StoreVal).addReg(ZERO).addReg(AndRes); + } else if (BinOpcode) { + // <binop> storeval, oldval, incr + BuildMI(BB, dl, TII->get(BinOpcode), StoreVal).addReg(OldVal).addReg(Incr); + } else { + StoreVal = Incr; + } + BuildMI(BB, dl, TII->get(SC), Success).addReg(StoreVal).addReg(Ptr).addImm(0); + BuildMI(BB, dl, TII->get(BEQ)).addReg(Success).addReg(ZERO).addMBB(loopMBB); + + MI->eraseFromParent(); // The instruction is gone now. + + return exitMBB; +} + +MachineBasicBlock * +MipsTargetLowering::EmitAtomicBinaryPartword(MachineInstr *MI, + MachineBasicBlock *BB, + unsigned Size, unsigned BinOpcode, + bool Nand) const { + assert((Size == 1 || Size == 2) && + "Unsupported size for EmitAtomicBinaryPartial."); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &RegInfo = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + DebugLoc dl = MI->getDebugLoc(); + unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL; + unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC; + + unsigned Dest = MI->getOperand(0).getReg(); + unsigned Ptr = MI->getOperand(1).getReg(); + unsigned Incr = MI->getOperand(2).getReg(); + + unsigned AlignedAddr = RegInfo.createVirtualRegister(RC); + unsigned ShiftAmt = RegInfo.createVirtualRegister(RC); + unsigned Mask = RegInfo.createVirtualRegister(RC); + unsigned Mask2 = RegInfo.createVirtualRegister(RC); + unsigned NewVal = RegInfo.createVirtualRegister(RC); + unsigned OldVal = RegInfo.createVirtualRegister(RC); + unsigned Incr2 = RegInfo.createVirtualRegister(RC); + unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC); + unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC); + unsigned MaskUpper = RegInfo.createVirtualRegister(RC); + unsigned AndRes = RegInfo.createVirtualRegister(RC); + unsigned BinOpRes = RegInfo.createVirtualRegister(RC); + unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC); + unsigned StoreVal = RegInfo.createVirtualRegister(RC); + unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC); + unsigned SrlRes = RegInfo.createVirtualRegister(RC); + unsigned SllRes = RegInfo.createVirtualRegister(RC); + unsigned Success = RegInfo.createVirtualRegister(RC); + + // insert new blocks after the current block + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineFunction::iterator It = BB; + ++It; + MF->insert(It, loopMBB); + MF->insert(It, sinkMBB); + MF->insert(It, exitMBB); + + // Transfer the remainder of BB and its successor edges to exitMBB. + exitMBB->splice(exitMBB->begin(), BB, + llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + exitMBB->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(loopMBB); + loopMBB->addSuccessor(loopMBB); + loopMBB->addSuccessor(sinkMBB); + sinkMBB->addSuccessor(exitMBB); + + // thisMBB: + // addiu masklsb2,$0,-4 # 0xfffffffc + // and alignedaddr,ptr,masklsb2 + // andi ptrlsb2,ptr,3 + // sll shiftamt,ptrlsb2,3 + // ori maskupper,$0,255 # 0xff + // sll mask,maskupper,shiftamt + // nor mask2,$0,mask + // sll incr2,incr,shiftamt + + int64_t MaskImm = (Size == 1) ? 255 : 65535; + BuildMI(BB, dl, TII->get(Mips::ADDiu), MaskLSB2) + .addReg(Mips::ZERO).addImm(-4); + BuildMI(BB, dl, TII->get(Mips::AND), AlignedAddr) + .addReg(Ptr).addReg(MaskLSB2); + BuildMI(BB, dl, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3); + BuildMI(BB, dl, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3); + BuildMI(BB, dl, TII->get(Mips::ORi), MaskUpper) + .addReg(Mips::ZERO).addImm(MaskImm); + BuildMI(BB, dl, TII->get(Mips::SLLV), Mask) + .addReg(ShiftAmt).addReg(MaskUpper); + BuildMI(BB, dl, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask); + BuildMI(BB, dl, TII->get(Mips::SLLV), Incr2).addReg(ShiftAmt).addReg(Incr); + + // atomic.load.binop + // loopMBB: + // ll oldval,0(alignedaddr) + // binop binopres,oldval,incr2 + // and newval,binopres,mask + // and maskedoldval0,oldval,mask2 + // or storeval,maskedoldval0,newval + // sc success,storeval,0(alignedaddr) + // beq success,$0,loopMBB + + // atomic.swap + // loopMBB: + // ll oldval,0(alignedaddr) + // and newval,incr2,mask + // and maskedoldval0,oldval,mask2 + // or storeval,maskedoldval0,newval + // sc success,storeval,0(alignedaddr) + // beq success,$0,loopMBB + + BB = loopMBB; + BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0); + if (Nand) { + // and andres, oldval, incr2 + // nor binopres, $0, andres + // and newval, binopres, mask + BuildMI(BB, dl, TII->get(Mips::AND), AndRes).addReg(OldVal).addReg(Incr2); + BuildMI(BB, dl, TII->get(Mips::NOR), BinOpRes) + .addReg(Mips::ZERO).addReg(AndRes); + BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask); + } else if (BinOpcode) { + // <binop> binopres, oldval, incr2 + // and newval, binopres, mask + BuildMI(BB, dl, TII->get(BinOpcode), BinOpRes).addReg(OldVal).addReg(Incr2); + BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(BinOpRes).addReg(Mask); + } else {// atomic.swap + // and newval, incr2, mask + BuildMI(BB, dl, TII->get(Mips::AND), NewVal).addReg(Incr2).addReg(Mask); + } + + BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0) + .addReg(OldVal).addReg(Mask2); + BuildMI(BB, dl, TII->get(Mips::OR), StoreVal) + .addReg(MaskedOldVal0).addReg(NewVal); + BuildMI(BB, dl, TII->get(SC), Success) + .addReg(StoreVal).addReg(AlignedAddr).addImm(0); + BuildMI(BB, dl, TII->get(Mips::BEQ)) + .addReg(Success).addReg(Mips::ZERO).addMBB(loopMBB); + + // sinkMBB: + // and maskedoldval1,oldval,mask + // srl srlres,maskedoldval1,shiftamt + // sll sllres,srlres,24 + // sra dest,sllres,24 + BB = sinkMBB; + int64_t ShiftImm = (Size == 1) ? 24 : 16; + + BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal1) + .addReg(OldVal).addReg(Mask); + BuildMI(BB, dl, TII->get(Mips::SRLV), SrlRes) + .addReg(ShiftAmt).addReg(MaskedOldVal1); + BuildMI(BB, dl, TII->get(Mips::SLL), SllRes) + .addReg(SrlRes).addImm(ShiftImm); + BuildMI(BB, dl, TII->get(Mips::SRA), Dest) + .addReg(SllRes).addImm(ShiftImm); + + MI->eraseFromParent(); // The instruction is gone now. + + return exitMBB; +} + +MachineBasicBlock * +MipsTargetLowering::EmitAtomicCmpSwap(MachineInstr *MI, + MachineBasicBlock *BB, + unsigned Size) const { + assert((Size == 4 || Size == 8) && "Unsupported size for EmitAtomicCmpSwap."); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &RegInfo = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8)); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + DebugLoc dl = MI->getDebugLoc(); + unsigned LL, SC, ZERO, BNE, BEQ; + + if (Size == 4) { + LL = IsN64 ? Mips::LL_P8 : Mips::LL; + SC = IsN64 ? Mips::SC_P8 : Mips::SC; + ZERO = Mips::ZERO; + BNE = Mips::BNE; + BEQ = Mips::BEQ; + } + else { + LL = IsN64 ? Mips::LLD_P8 : Mips::LLD; + SC = IsN64 ? Mips::SCD_P8 : Mips::SCD; + ZERO = Mips::ZERO_64; + BNE = Mips::BNE64; + BEQ = Mips::BEQ64; + } + + unsigned Dest = MI->getOperand(0).getReg(); + unsigned Ptr = MI->getOperand(1).getReg(); + unsigned OldVal = MI->getOperand(2).getReg(); + unsigned NewVal = MI->getOperand(3).getReg(); + + unsigned Success = RegInfo.createVirtualRegister(RC); + + // insert new blocks after the current block + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineFunction::iterator It = BB; + ++It; + MF->insert(It, loop1MBB); + MF->insert(It, loop2MBB); + MF->insert(It, exitMBB); + + // Transfer the remainder of BB and its successor edges to exitMBB. + exitMBB->splice(exitMBB->begin(), BB, + llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + exitMBB->transferSuccessorsAndUpdatePHIs(BB); + + // thisMBB: + // ... + // fallthrough --> loop1MBB + BB->addSuccessor(loop1MBB); + loop1MBB->addSuccessor(exitMBB); + loop1MBB->addSuccessor(loop2MBB); + loop2MBB->addSuccessor(loop1MBB); + loop2MBB->addSuccessor(exitMBB); + + // loop1MBB: + // ll dest, 0(ptr) + // bne dest, oldval, exitMBB + BB = loop1MBB; + BuildMI(BB, dl, TII->get(LL), Dest).addReg(Ptr).addImm(0); + BuildMI(BB, dl, TII->get(BNE)) + .addReg(Dest).addReg(OldVal).addMBB(exitMBB); + + // loop2MBB: + // sc success, newval, 0(ptr) + // beq success, $0, loop1MBB + BB = loop2MBB; + BuildMI(BB, dl, TII->get(SC), Success) + .addReg(NewVal).addReg(Ptr).addImm(0); + BuildMI(BB, dl, TII->get(BEQ)) + .addReg(Success).addReg(ZERO).addMBB(loop1MBB); + + MI->eraseFromParent(); // The instruction is gone now. + + return exitMBB; +} + +MachineBasicBlock * +MipsTargetLowering::EmitAtomicCmpSwapPartword(MachineInstr *MI, + MachineBasicBlock *BB, + unsigned Size) const { + assert((Size == 1 || Size == 2) && + "Unsupported size for EmitAtomicCmpSwapPartial."); + + MachineFunction *MF = BB->getParent(); + MachineRegisterInfo &RegInfo = MF->getRegInfo(); + const TargetRegisterClass *RC = getRegClassFor(MVT::i32); + const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); + DebugLoc dl = MI->getDebugLoc(); + unsigned LL = IsN64 ? Mips::LL_P8 : Mips::LL; + unsigned SC = IsN64 ? Mips::SC_P8 : Mips::SC; + + unsigned Dest = MI->getOperand(0).getReg(); + unsigned Ptr = MI->getOperand(1).getReg(); + unsigned CmpVal = MI->getOperand(2).getReg(); + unsigned NewVal = MI->getOperand(3).getReg(); + + unsigned AlignedAddr = RegInfo.createVirtualRegister(RC); + unsigned ShiftAmt = RegInfo.createVirtualRegister(RC); + unsigned Mask = RegInfo.createVirtualRegister(RC); + unsigned Mask2 = RegInfo.createVirtualRegister(RC); + unsigned ShiftedCmpVal = RegInfo.createVirtualRegister(RC); + unsigned OldVal = RegInfo.createVirtualRegister(RC); + unsigned MaskedOldVal0 = RegInfo.createVirtualRegister(RC); + unsigned ShiftedNewVal = RegInfo.createVirtualRegister(RC); + unsigned MaskLSB2 = RegInfo.createVirtualRegister(RC); + unsigned PtrLSB2 = RegInfo.createVirtualRegister(RC); + unsigned MaskUpper = RegInfo.createVirtualRegister(RC); + unsigned MaskedCmpVal = RegInfo.createVirtualRegister(RC); + unsigned MaskedNewVal = RegInfo.createVirtualRegister(RC); + unsigned MaskedOldVal1 = RegInfo.createVirtualRegister(RC); + unsigned StoreVal = RegInfo.createVirtualRegister(RC); + unsigned SrlRes = RegInfo.createVirtualRegister(RC); + unsigned SllRes = RegInfo.createVirtualRegister(RC); + unsigned Success = RegInfo.createVirtualRegister(RC); + + // insert new blocks after the current block + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB); + MachineFunction::iterator It = BB; + ++It; + MF->insert(It, loop1MBB); + MF->insert(It, loop2MBB); + MF->insert(It, sinkMBB); + MF->insert(It, exitMBB); + + // Transfer the remainder of BB and its successor edges to exitMBB. + exitMBB->splice(exitMBB->begin(), BB, + llvm::next(MachineBasicBlock::iterator(MI)), BB->end()); + exitMBB->transferSuccessorsAndUpdatePHIs(BB); + + BB->addSuccessor(loop1MBB); + loop1MBB->addSuccessor(sinkMBB); + loop1MBB->addSuccessor(loop2MBB); + loop2MBB->addSuccessor(loop1MBB); + loop2MBB->addSuccessor(sinkMBB); + sinkMBB->addSuccessor(exitMBB); + + // FIXME: computation of newval2 can be moved to loop2MBB. + // thisMBB: + // addiu masklsb2,$0,-4 # 0xfffffffc + // and alignedaddr,ptr,masklsb2 + // andi ptrlsb2,ptr,3 + // sll shiftamt,ptrlsb2,3 + // ori maskupper,$0,255 # 0xff + // sll mask,maskupper,shiftamt + // nor mask2,$0,mask + // andi maskedcmpval,cmpval,255 + // sll shiftedcmpval,maskedcmpval,shiftamt + // andi maskednewval,newval,255 + // sll shiftednewval,maskednewval,shiftamt + int64_t MaskImm = (Size == 1) ? 255 : 65535; + BuildMI(BB, dl, TII->get(Mips::ADDiu), MaskLSB2) + .addReg(Mips::ZERO).addImm(-4); + BuildMI(BB, dl, TII->get(Mips::AND), AlignedAddr) + .addReg(Ptr).addReg(MaskLSB2); + BuildMI(BB, dl, TII->get(Mips::ANDi), PtrLSB2).addReg(Ptr).addImm(3); + BuildMI(BB, dl, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3); + BuildMI(BB, dl, TII->get(Mips::ORi), MaskUpper) + .addReg(Mips::ZERO).addImm(MaskImm); + BuildMI(BB, dl, TII->get(Mips::SLLV), Mask) + .addReg(ShiftAmt).addReg(MaskUpper); + BuildMI(BB, dl, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask); + BuildMI(BB, dl, TII->get(Mips::ANDi), MaskedCmpVal) + .addReg(CmpVal).addImm(MaskImm); + BuildMI(BB, dl, TII->get(Mips::SLLV), ShiftedCmpVal) + .addReg(ShiftAmt).addReg(MaskedCmpVal); + BuildMI(BB, dl, TII->get(Mips::ANDi), MaskedNewVal) + .addReg(NewVal).addImm(MaskImm); + BuildMI(BB, dl, TII->get(Mips::SLLV), ShiftedNewVal) + .addReg(ShiftAmt).addReg(MaskedNewVal); + + // loop1MBB: + // ll oldval,0(alginedaddr) + // and maskedoldval0,oldval,mask + // bne maskedoldval0,shiftedcmpval,sinkMBB + BB = loop1MBB; + BuildMI(BB, dl, TII->get(LL), OldVal).addReg(AlignedAddr).addImm(0); + BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal0) + .addReg(OldVal).addReg(Mask); + BuildMI(BB, dl, TII->get(Mips::BNE)) + .addReg(MaskedOldVal0).addReg(ShiftedCmpVal).addMBB(sinkMBB); + + // loop2MBB: + // and maskedoldval1,oldval,mask2 + // or storeval,maskedoldval1,shiftednewval + // sc success,storeval,0(alignedaddr) + // beq success,$0,loop1MBB + BB = loop2MBB; + BuildMI(BB, dl, TII->get(Mips::AND), MaskedOldVal1) + .addReg(OldVal).addReg(Mask2); + BuildMI(BB, dl, TII->get(Mips::OR), StoreVal) + .addReg(MaskedOldVal1).addReg(ShiftedNewVal); + BuildMI(BB, dl, TII->get(SC), Success) + .addReg(StoreVal).addReg(AlignedAddr).addImm(0); + BuildMI(BB, dl, TII->get(Mips::BEQ)) + .addReg(Success).addReg(Mips::ZERO).addMBB(loop1MBB); + + // sinkMBB: + // srl srlres,maskedoldval0,shiftamt + // sll sllres,srlres,24 + // sra dest,sllres,24 + BB = sinkMBB; + int64_t ShiftImm = (Size == 1) ? 24 : 16; + + BuildMI(BB, dl, TII->get(Mips::SRLV), SrlRes) + .addReg(ShiftAmt).addReg(MaskedOldVal0); + BuildMI(BB, dl, TII->get(Mips::SLL), SllRes) + .addReg(SrlRes).addImm(ShiftImm); + BuildMI(BB, dl, TII->get(Mips::SRA), Dest) + .addReg(SllRes).addImm(ShiftImm); + + MI->eraseFromParent(); // The instruction is gone now. + + return exitMBB; +} + +//===----------------------------------------------------------------------===// +// Misc Lower Operation implementation +//===----------------------------------------------------------------------===// +SDValue MipsTargetLowering:: +LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const +{ + MachineFunction &MF = DAG.getMachineFunction(); + MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); + unsigned SP = IsN64 ? Mips::SP_64 : Mips::SP; + + assert(getTargetMachine().getFrameLowering()->getStackAlignment() >= + cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue() && + "Cannot lower if the alignment of the allocated space is larger than \ + that of the stack."); + + SDValue Chain = Op.getOperand(0); + SDValue Size = Op.getOperand(1); + DebugLoc dl = Op.getDebugLoc(); + + // Get a reference from Mips stack pointer + SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SP, getPointerTy()); + + // Subtract the dynamic size from the actual stack size to + // obtain the new stack size. + SDValue Sub = DAG.getNode(ISD::SUB, dl, getPointerTy(), StackPointer, Size); + + // The Sub result contains the new stack start address, so it + // must be placed in the stack pointer register. + Chain = DAG.getCopyToReg(StackPointer.getValue(1), dl, SP, Sub, SDValue()); + + // This node always has two return values: a new stack pointer + // value and a chain + SDVTList VTLs = DAG.getVTList(getPointerTy(), MVT::Other); + SDValue Ptr = DAG.getFrameIndex(MipsFI->getDynAllocFI(), getPointerTy()); + SDValue Ops[] = { Chain, Ptr, Chain.getValue(1) }; + + return DAG.getNode(MipsISD::DynAlloc, dl, VTLs, Ops, 3); +} + +SDValue MipsTargetLowering:: +LowerBRCOND(SDValue Op, SelectionDAG &DAG) const +{ + // The first operand is the chain, the second is the condition, the third is + // the block to branch to if the condition is true. + SDValue Chain = Op.getOperand(0); + SDValue Dest = Op.getOperand(2); + DebugLoc dl = Op.getDebugLoc(); + + SDValue CondRes = CreateFPCmp(DAG, Op.getOperand(1)); + + // Return if flag is not set by a floating point comparison. + if (CondRes.getOpcode() != MipsISD::FPCmp) + return Op; + + SDValue CCNode = CondRes.getOperand(2); + Mips::CondCode CC = + (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue(); + SDValue BrCode = DAG.getConstant(GetFPBranchCodeFromCond(CC), MVT::i32); + + return DAG.getNode(MipsISD::FPBrcond, dl, Op.getValueType(), Chain, BrCode, + Dest, CondRes); +} + +SDValue MipsTargetLowering:: +LowerSELECT(SDValue Op, SelectionDAG &DAG) const +{ + SDValue Cond = CreateFPCmp(DAG, Op.getOperand(0)); + + // Return if flag is not set by a floating point comparison. + if (Cond.getOpcode() != MipsISD::FPCmp) + return Op; + + return CreateCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2), + Op.getDebugLoc()); +} + +SDValue MipsTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { + SDValue Cond = CreateFPCmp(DAG, Op); + + assert(Cond.getOpcode() == MipsISD::FPCmp && + "Floating point operand expected."); + + SDValue True = DAG.getConstant(1, MVT::i32); + SDValue False = DAG.getConstant(0, MVT::i32); + + return CreateCMovFP(DAG, Cond, True, False, Op.getDebugLoc()); +} + +SDValue MipsTargetLowering::LowerGlobalAddress(SDValue Op, + SelectionDAG &DAG) const { + // FIXME there isn't actually debug info here + DebugLoc dl = Op.getDebugLoc(); + const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); + + if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) { + SDVTList VTs = DAG.getVTList(MVT::i32); + + MipsTargetObjectFile &TLOF = (MipsTargetObjectFile&)getObjFileLowering(); + + // %gp_rel relocation + if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) { + SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, + MipsII::MO_GPREL); + SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, dl, VTs, &GA, 1); + SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32); + return DAG.getNode(ISD::ADD, dl, MVT::i32, GOT, GPRelNode); + } + // %hi/%lo relocation + SDValue GAHi = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, + MipsII::MO_ABS_HI); + SDValue GALo = DAG.getTargetGlobalAddress(GV, dl, MVT::i32, 0, + MipsII::MO_ABS_LO); + SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, VTs, &GAHi, 1); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, GALo); + return DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo); + } + + EVT ValTy = Op.getValueType(); + bool HasGotOfst = (GV->hasInternalLinkage() || + (GV->hasLocalLinkage() && !isa<Function>(GV))); + unsigned GotFlag = HasMips64 ? + (HasGotOfst ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT_DISP) : + (HasGotOfst ? MipsII::MO_GOT : MipsII::MO_GOT16); + SDValue GA = DAG.getTargetGlobalAddress(GV, dl, ValTy, 0, GotFlag); + GA = DAG.getNode(MipsISD::Wrapper, dl, ValTy, GetGlobalReg(DAG, ValTy), GA); + SDValue ResNode = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), GA, + MachinePointerInfo(), false, false, false, 0); + // On functions and global targets not internal linked only + // a load from got/GP is necessary for PIC to work. + if (!HasGotOfst) + return ResNode; + SDValue GALo = DAG.getTargetGlobalAddress(GV, dl, ValTy, 0, + HasMips64 ? MipsII::MO_GOT_OFST : + MipsII::MO_ABS_LO); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, GALo); + return DAG.getNode(ISD::ADD, dl, ValTy, ResNode, Lo); +} + +SDValue MipsTargetLowering::LowerBlockAddress(SDValue Op, + SelectionDAG &DAG) const { + const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress(); + // FIXME there isn't actually debug info here + DebugLoc dl = Op.getDebugLoc(); + + if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) { + // %hi/%lo relocation + SDValue BAHi = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_HI); + SDValue BALo = DAG.getBlockAddress(BA, MVT::i32, true, MipsII::MO_ABS_LO); + SDValue Hi = DAG.getNode(MipsISD::Hi, dl, MVT::i32, BAHi); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, BALo); + return DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, Lo); + } + + EVT ValTy = Op.getValueType(); + unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT; + unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO; + SDValue BAGOTOffset = DAG.getBlockAddress(BA, ValTy, true, GOTFlag); + BAGOTOffset = DAG.getNode(MipsISD::Wrapper, dl, ValTy, + GetGlobalReg(DAG, ValTy), BAGOTOffset); + SDValue BALOOffset = DAG.getBlockAddress(BA, ValTy, true, OFSTFlag); + SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), BAGOTOffset, + MachinePointerInfo(), false, false, false, 0); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, BALOOffset); + return DAG.getNode(ISD::ADD, dl, ValTy, Load, Lo); +} + +SDValue MipsTargetLowering:: +LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const +{ + // If the relocation model is PIC, use the General Dynamic TLS Model or + // Local Dynamic TLS model, otherwise use the Initial Exec or + // Local Exec TLS Model. + + GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); + DebugLoc dl = GA->getDebugLoc(); + const GlobalValue *GV = GA->getGlobal(); + EVT PtrVT = getPointerTy(); + + if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { + // General Dynamic TLS Model + bool LocalDynamic = GV->hasInternalLinkage(); + unsigned Flag = LocalDynamic ? MipsII::MO_TLSLDM :MipsII::MO_TLSGD; + SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, Flag); + SDValue Argument = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, + GetGlobalReg(DAG, PtrVT), TGA); + unsigned PtrSize = PtrVT.getSizeInBits(); + IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize); + + SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT); + + ArgListTy Args; + ArgListEntry Entry; + Entry.Node = Argument; + Entry.Ty = PtrTy; + Args.push_back(Entry); + + std::pair<SDValue, SDValue> CallResult = + LowerCallTo(DAG.getEntryNode(), PtrTy, + false, false, false, false, 0, CallingConv::C, + /*isTailCall=*/false, /*doesNotRet=*/false, + /*isReturnValueUsed=*/true, + TlsGetAddr, Args, DAG, dl); + + SDValue Ret = CallResult.first; + + if (!LocalDynamic) + return Ret; + + SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, + MipsII::MO_DTPREL_HI); + SDValue Hi = DAG.getNode(MipsISD::Hi, dl, PtrVT, TGAHi); + SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, + MipsII::MO_DTPREL_LO); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, TGALo); + SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Ret); + return DAG.getNode(ISD::ADD, dl, PtrVT, Add, Lo); + } + + SDValue Offset; + if (GV->isDeclaration()) { + // Initial Exec TLS Model + SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, + MipsII::MO_GOTTPREL); + TGA = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, GetGlobalReg(DAG, PtrVT), + TGA); + Offset = DAG.getLoad(PtrVT, dl, + DAG.getEntryNode(), TGA, MachinePointerInfo(), + false, false, false, 0); + } else { + // Local Exec TLS Model + SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, + MipsII::MO_TPREL_HI); + SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, + MipsII::MO_TPREL_LO); + SDValue Hi = DAG.getNode(MipsISD::Hi, dl, PtrVT, TGAHi); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, TGALo); + Offset = DAG.getNode(ISD::ADD, dl, PtrVT, Hi, Lo); + } + + SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, dl, PtrVT); + return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset); +} + +SDValue MipsTargetLowering:: +LowerJumpTable(SDValue Op, SelectionDAG &DAG) const +{ + SDValue HiPart, JTI, JTILo; + // FIXME there isn't actually debug info here + DebugLoc dl = Op.getDebugLoc(); + bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; + EVT PtrVT = Op.getValueType(); + JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); + + if (!IsPIC && !IsN64) { + JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MipsII::MO_ABS_HI); + HiPart = DAG.getNode(MipsISD::Hi, dl, PtrVT, JTI); + JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MipsII::MO_ABS_LO); + } else {// Emit Load from Global Pointer + unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT; + unsigned OfstFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO; + JTI = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, GOTFlag); + JTI = DAG.getNode(MipsISD::Wrapper, dl, PtrVT, GetGlobalReg(DAG, PtrVT), + JTI); + HiPart = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), JTI, + MachinePointerInfo(), false, false, false, 0); + JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OfstFlag); + } + + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, PtrVT, JTILo); + return DAG.getNode(ISD::ADD, dl, PtrVT, HiPart, Lo); +} + +SDValue MipsTargetLowering:: +LowerConstantPool(SDValue Op, SelectionDAG &DAG) const +{ + SDValue ResNode; + ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op); + const Constant *C = N->getConstVal(); + // FIXME there isn't actually debug info here + DebugLoc dl = Op.getDebugLoc(); + + // gp_rel relocation + // FIXME: we should reference the constant pool using small data sections, + // but the asm printer currently doesn't support this feature without + // hacking it. This feature should come soon so we can uncomment the + // stuff below. + //if (IsInSmallSection(C->getType())) { + // SDValue GPRelNode = DAG.getNode(MipsISD::GPRel, MVT::i32, CP); + // SDValue GOT = DAG.getGLOBAL_OFFSET_TABLE(MVT::i32); + // ResNode = DAG.getNode(ISD::ADD, MVT::i32, GOT, GPRelNode); + + if (getTargetMachine().getRelocationModel() != Reloc::PIC_ && !IsN64) { + SDValue CPHi = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(), + N->getOffset(), MipsII::MO_ABS_HI); + SDValue CPLo = DAG.getTargetConstantPool(C, MVT::i32, N->getAlignment(), + N->getOffset(), MipsII::MO_ABS_LO); + SDValue HiPart = DAG.getNode(MipsISD::Hi, dl, MVT::i32, CPHi); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, MVT::i32, CPLo); + ResNode = DAG.getNode(ISD::ADD, dl, MVT::i32, HiPart, Lo); + } else { + EVT ValTy = Op.getValueType(); + unsigned GOTFlag = HasMips64 ? MipsII::MO_GOT_PAGE : MipsII::MO_GOT; + unsigned OFSTFlag = HasMips64 ? MipsII::MO_GOT_OFST : MipsII::MO_ABS_LO; + SDValue CP = DAG.getTargetConstantPool(C, ValTy, N->getAlignment(), + N->getOffset(), GOTFlag); + CP = DAG.getNode(MipsISD::Wrapper, dl, ValTy, GetGlobalReg(DAG, ValTy), CP); + SDValue Load = DAG.getLoad(ValTy, dl, DAG.getEntryNode(), CP, + MachinePointerInfo::getConstantPool(), false, + false, false, 0); + SDValue CPLo = DAG.getTargetConstantPool(C, ValTy, N->getAlignment(), + N->getOffset(), OFSTFlag); + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, ValTy, CPLo); + ResNode = DAG.getNode(ISD::ADD, dl, ValTy, Load, Lo); + } + + return ResNode; +} + +SDValue MipsTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const { + MachineFunction &MF = DAG.getMachineFunction(); + MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>(); + + DebugLoc dl = Op.getDebugLoc(); + SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), + getPointerTy()); + + // vastart just stores the address of the VarArgsFrameIndex slot into the + // memory location argument. + const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); + return DAG.getStore(Op.getOperand(0), dl, FI, Op.getOperand(1), + MachinePointerInfo(SV), false, false, 0); +} + +static SDValue LowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG, bool HasR2) { + EVT TyX = Op.getOperand(0).getValueType(); + EVT TyY = Op.getOperand(1).getValueType(); + SDValue Const1 = DAG.getConstant(1, MVT::i32); + SDValue Const31 = DAG.getConstant(31, MVT::i32); + DebugLoc DL = Op.getDebugLoc(); + SDValue Res; + + // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it + // to i32. + SDValue X = (TyX == MVT::f32) ? + DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) : + DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0), + Const1); + SDValue Y = (TyY == MVT::f32) ? + DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) : + DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1), + Const1); + + if (HasR2) { + // ext E, Y, 31, 1 ; extract bit31 of Y + // ins X, E, 31, 1 ; insert extracted bit at bit31 of X + SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1); + Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X); + } else { + // sll SllX, X, 1 + // srl SrlX, SllX, 1 + // srl SrlY, Y, 31 + // sll SllY, SrlX, 31 + // or Or, SrlX, SllY + SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1); + SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1); + SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31); + SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31); + Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY); + } + + if (TyX == MVT::f32) + return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res); + + SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, + Op.getOperand(0), DAG.getConstant(0, MVT::i32)); + return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res); +} + +static SDValue LowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG, bool HasR2) { + unsigned WidthX = Op.getOperand(0).getValueSizeInBits(); + unsigned WidthY = Op.getOperand(1).getValueSizeInBits(); + EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY); + SDValue Const1 = DAG.getConstant(1, MVT::i32); + DebugLoc DL = Op.getDebugLoc(); + + // Bitcast to integer nodes. + SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0)); + SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1)); + + if (HasR2) { + // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y + // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X + SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y, + DAG.getConstant(WidthY - 1, MVT::i32), Const1); + + if (WidthX > WidthY) + E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E); + else if (WidthY > WidthX) + E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E); + + SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E, + DAG.getConstant(WidthX - 1, MVT::i32), Const1, X); + return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I); + } + + // (d)sll SllX, X, 1 + // (d)srl SrlX, SllX, 1 + // (d)srl SrlY, Y, width(Y)-1 + // (d)sll SllY, SrlX, width(Y)-1 + // or Or, SrlX, SllY + SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1); + SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1); + SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y, + DAG.getConstant(WidthY - 1, MVT::i32)); + + if (WidthX > WidthY) + SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY); + else if (WidthY > WidthX) + SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY); + + SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY, + DAG.getConstant(WidthX - 1, MVT::i32)); + SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY); + return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or); +} + +SDValue +MipsTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const { + if (Subtarget->hasMips64()) + return LowerFCOPYSIGN64(Op, DAG, Subtarget->hasMips32r2()); + + return LowerFCOPYSIGN32(Op, DAG, Subtarget->hasMips32r2()); +} + +static SDValue LowerFABS32(SDValue Op, SelectionDAG &DAG, bool HasR2) { + SDValue Res, Const1 = DAG.getConstant(1, MVT::i32); + DebugLoc DL = Op.getDebugLoc(); + + // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it + // to i32. + SDValue X = (Op.getValueType() == MVT::f32) ? + DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) : + DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0), + Const1); + + // Clear MSB. + if (HasR2) + Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, + DAG.getRegister(Mips::ZERO, MVT::i32), + DAG.getConstant(31, MVT::i32), Const1, X); + else { + SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1); + Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1); + } + + if (Op.getValueType() == MVT::f32) + return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res); + + SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, + Op.getOperand(0), DAG.getConstant(0, MVT::i32)); + return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res); +} + +static SDValue LowerFABS64(SDValue Op, SelectionDAG &DAG, bool HasR2) { + SDValue Res, Const1 = DAG.getConstant(1, MVT::i32); + DebugLoc DL = Op.getDebugLoc(); + + // Bitcast to integer node. + SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0)); + + // Clear MSB. + if (HasR2) + Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64, + DAG.getRegister(Mips::ZERO_64, MVT::i64), + DAG.getConstant(63, MVT::i32), Const1, X); + else { + SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1); + Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1); + } + + return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res); +} + +SDValue +MipsTargetLowering::LowerFABS(SDValue Op, SelectionDAG &DAG) const { + if (Subtarget->hasMips64() && (Op.getValueType() == MVT::f64)) + return LowerFABS64(Op, DAG, Subtarget->hasMips32r2()); + + return LowerFABS32(Op, DAG, Subtarget->hasMips32r2()); +} + +SDValue MipsTargetLowering:: +LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const { + // check the depth + assert((cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() == 0) && + "Frame address can only be determined for current frame."); + + MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); + MFI->setFrameAddressIsTaken(true); + EVT VT = Op.getValueType(); + DebugLoc dl = Op.getDebugLoc(); + SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, + IsN64 ? Mips::FP_64 : Mips::FP, VT); + return FrameAddr; +} + +// TODO: set SType according to the desired memory barrier behavior. +SDValue +MipsTargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const { + unsigned SType = 0; + DebugLoc dl = Op.getDebugLoc(); + return DAG.getNode(MipsISD::Sync, dl, MVT::Other, Op.getOperand(0), + DAG.getConstant(SType, MVT::i32)); +} + +SDValue MipsTargetLowering::LowerATOMIC_FENCE(SDValue Op, + SelectionDAG& DAG) const { + // FIXME: Need pseudo-fence for 'singlethread' fences + // FIXME: Set SType for weaker fences where supported/appropriate. + unsigned SType = 0; + DebugLoc dl = Op.getDebugLoc(); + return DAG.getNode(MipsISD::Sync, dl, MVT::Other, Op.getOperand(0), + DAG.getConstant(SType, MVT::i32)); +} + +//===----------------------------------------------------------------------===// +// Calling Convention Implementation +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// TODO: Implement a generic logic using tblgen that can support this. +// Mips O32 ABI rules: +// --- +// i32 - Passed in A0, A1, A2, A3 and stack +// f32 - Only passed in f32 registers if no int reg has been used yet to hold +// an argument. Otherwise, passed in A1, A2, A3 and stack. +// f64 - Only passed in two aliased f32 registers if no int reg has been used +// yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is +// not used, it must be shadowed. If only A3 is avaiable, shadow it and +// go to stack. +// +// For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack. +//===----------------------------------------------------------------------===// + +static bool CC_MipsO32(unsigned ValNo, MVT ValVT, + MVT LocVT, CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State) { + + static const unsigned IntRegsSize=4, FloatRegsSize=2; + + static const uint16_t IntRegs[] = { + Mips::A0, Mips::A1, Mips::A2, Mips::A3 + }; + static const uint16_t F32Regs[] = { + Mips::F12, Mips::F14 + }; + static const uint16_t F64Regs[] = { + Mips::D6, Mips::D7 + }; + + // ByVal Args + if (ArgFlags.isByVal()) { + State.HandleByVal(ValNo, ValVT, LocVT, LocInfo, + 1 /*MinSize*/, 4 /*MinAlign*/, ArgFlags); + unsigned NextReg = (State.getNextStackOffset() + 3) / 4; + for (unsigned r = State.getFirstUnallocated(IntRegs, IntRegsSize); + r < std::min(IntRegsSize, NextReg); ++r) + State.AllocateReg(IntRegs[r]); + return false; + } + + // Promote i8 and i16 + if (LocVT == MVT::i8 || LocVT == MVT::i16) { + LocVT = MVT::i32; + if (ArgFlags.isSExt()) + LocInfo = CCValAssign::SExt; + else if (ArgFlags.isZExt()) + LocInfo = CCValAssign::ZExt; + else + LocInfo = CCValAssign::AExt; + } + + unsigned Reg; + + // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following + // is true: function is vararg, argument is 3rd or higher, there is previous + // argument which is not f32 or f64. + bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 + || State.getFirstUnallocated(F32Regs, FloatRegsSize) != ValNo; + unsigned OrigAlign = ArgFlags.getOrigAlign(); + bool isI64 = (ValVT == MVT::i32 && OrigAlign == 8); + + if (ValVT == MVT::i32 || (ValVT == MVT::f32 && AllocateFloatsInIntReg)) { + Reg = State.AllocateReg(IntRegs, IntRegsSize); + // If this is the first part of an i64 arg, + // the allocated register must be either A0 or A2. + if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3)) + Reg = State.AllocateReg(IntRegs, IntRegsSize); + LocVT = MVT::i32; + } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) { + // Allocate int register and shadow next int register. If first + // available register is Mips::A1 or Mips::A3, shadow it too. + Reg = State.AllocateReg(IntRegs, IntRegsSize); + if (Reg == Mips::A1 || Reg == Mips::A3) + Reg = State.AllocateReg(IntRegs, IntRegsSize); + State.AllocateReg(IntRegs, IntRegsSize); + LocVT = MVT::i32; + } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) { + // we are guaranteed to find an available float register + if (ValVT == MVT::f32) { + Reg = State.AllocateReg(F32Regs, FloatRegsSize); + // Shadow int register + State.AllocateReg(IntRegs, IntRegsSize); + } else { + Reg = State.AllocateReg(F64Regs, FloatRegsSize); + // Shadow int registers + unsigned Reg2 = State.AllocateReg(IntRegs, IntRegsSize); + if (Reg2 == Mips::A1 || Reg2 == Mips::A3) + State.AllocateReg(IntRegs, IntRegsSize); + State.AllocateReg(IntRegs, IntRegsSize); + } + } else + llvm_unreachable("Cannot handle this ValVT."); + + unsigned SizeInBytes = ValVT.getSizeInBits() >> 3; + unsigned Offset = State.AllocateStack(SizeInBytes, OrigAlign); + + if (!Reg) + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); + else + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo)); + + return false; // CC must always match +} + +static const uint16_t Mips64IntRegs[8] = + {Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64, + Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64}; +static const uint16_t Mips64DPRegs[8] = + {Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64, + Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64}; + +static bool CC_Mips64Byval(unsigned ValNo, MVT ValVT, MVT LocVT, + CCValAssign::LocInfo LocInfo, + ISD::ArgFlagsTy ArgFlags, CCState &State) { + unsigned Align = std::max(ArgFlags.getByValAlign(), (unsigned)8); + unsigned Size = (ArgFlags.getByValSize() + 7) / 8 * 8; + unsigned FirstIdx = State.getFirstUnallocated(Mips64IntRegs, 8); + + assert(Align <= 16 && "Cannot handle alignments larger than 16."); + + // If byval is 16-byte aligned, the first arg register must be even. + if ((Align == 16) && (FirstIdx % 2)) { + State.AllocateReg(Mips64IntRegs[FirstIdx], Mips64DPRegs[FirstIdx]); + ++FirstIdx; + } + + // Mark the registers allocated. + for (unsigned I = FirstIdx; Size && (I < 8); Size -= 8, ++I) + State.AllocateReg(Mips64IntRegs[I], Mips64DPRegs[I]); + + // Allocate space on caller's stack. + unsigned Offset = State.AllocateStack(Size, Align); + + if (FirstIdx < 8) + State.addLoc(CCValAssign::getReg(ValNo, ValVT, Mips64IntRegs[FirstIdx], + LocVT, LocInfo)); + else + State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); + + return true; +} + +#include "MipsGenCallingConv.inc" + +static void +AnalyzeMips64CallOperands(CCState &CCInfo, + const SmallVectorImpl<ISD::OutputArg> &Outs) { + unsigned NumOps = Outs.size(); + for (unsigned i = 0; i != NumOps; ++i) { + MVT ArgVT = Outs[i].VT; + ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; + bool R; + + if (Outs[i].IsFixed) + R = CC_MipsN(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); + else + R = CC_MipsN_VarArg(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); + + if (R) { +#ifndef NDEBUG + dbgs() << "Call operand #" << i << " has unhandled type " + << EVT(ArgVT).getEVTString(); +#endif + llvm_unreachable(0); + } + } +} + +//===----------------------------------------------------------------------===// +// Call Calling Convention Implementation +//===----------------------------------------------------------------------===// + +static const unsigned O32IntRegsSize = 4; + +static const uint16_t O32IntRegs[] = { + Mips::A0, Mips::A1, Mips::A2, Mips::A3 +}; + +// Return next O32 integer argument register. +static unsigned getNextIntArgReg(unsigned Reg) { + assert((Reg == Mips::A0) || (Reg == Mips::A2)); + return (Reg == Mips::A0) ? Mips::A1 : Mips::A3; +} + +// Write ByVal Arg to arg registers and stack. +static void +WriteByValArg(SDValue& ByValChain, SDValue Chain, DebugLoc dl, + SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass, + SmallVector<SDValue, 8>& MemOpChains, int& LastFI, + MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, + const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + MVT PtrType, bool isLittle) { + unsigned LocMemOffset = VA.getLocMemOffset(); + unsigned Offset = 0; + uint32_t RemainingSize = Flags.getByValSize(); + unsigned ByValAlign = Flags.getByValAlign(); + + // Copy the first 4 words of byval arg to registers A0 - A3. + // FIXME: Use a stricter alignment if it enables better optimization in passes + // run later. + for (; RemainingSize >= 4 && LocMemOffset < 4 * 4; + Offset += 4, RemainingSize -= 4, LocMemOffset += 4) { + SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, + DAG.getConstant(Offset, MVT::i32)); + SDValue LoadVal = DAG.getLoad(MVT::i32, dl, Chain, LoadPtr, + MachinePointerInfo(), false, false, false, + std::min(ByValAlign, (unsigned )4)); + MemOpChains.push_back(LoadVal.getValue(1)); + unsigned DstReg = O32IntRegs[LocMemOffset / 4]; + RegsToPass.push_back(std::make_pair(DstReg, LoadVal)); + } + + if (RemainingSize == 0) + return; + + // If there still is a register available for argument passing, write the + // remaining part of the structure to it using subword loads and shifts. + if (LocMemOffset < 4 * 4) { + assert(RemainingSize <= 3 && RemainingSize >= 1 && + "There must be one to three bytes remaining."); + unsigned LoadSize = (RemainingSize == 3 ? 2 : RemainingSize); + SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, + DAG.getConstant(Offset, MVT::i32)); + unsigned Alignment = std::min(ByValAlign, (unsigned )4); + SDValue LoadVal = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain, + LoadPtr, MachinePointerInfo(), + MVT::getIntegerVT(LoadSize * 8), false, + false, Alignment); + MemOpChains.push_back(LoadVal.getValue(1)); + + // If target is big endian, shift it to the most significant half-word or + // byte. + if (!isLittle) + LoadVal = DAG.getNode(ISD::SHL, dl, MVT::i32, LoadVal, + DAG.getConstant(32 - LoadSize * 8, MVT::i32)); + + Offset += LoadSize; + RemainingSize -= LoadSize; + + // Read second subword if necessary. + if (RemainingSize != 0) { + assert(RemainingSize == 1 && "There must be one byte remaining."); + LoadPtr = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, + DAG.getConstant(Offset, MVT::i32)); + unsigned Alignment = std::min(ByValAlign, (unsigned )2); + SDValue Subword = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain, + LoadPtr, MachinePointerInfo(), + MVT::i8, false, false, Alignment); + MemOpChains.push_back(Subword.getValue(1)); + // Insert the loaded byte to LoadVal. + // FIXME: Use INS if supported by target. + unsigned ShiftAmt = isLittle ? 16 : 8; + SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i32, Subword, + DAG.getConstant(ShiftAmt, MVT::i32)); + LoadVal = DAG.getNode(ISD::OR, dl, MVT::i32, LoadVal, Shift); + } + + unsigned DstReg = O32IntRegs[LocMemOffset / 4]; + RegsToPass.push_back(std::make_pair(DstReg, LoadVal)); + return; + } + + // Create a fixed object on stack at offset LocMemOffset and copy + // remaining part of byval arg to it using memcpy. + SDValue Src = DAG.getNode(ISD::ADD, dl, MVT::i32, Arg, + DAG.getConstant(Offset, MVT::i32)); + LastFI = MFI->CreateFixedObject(RemainingSize, LocMemOffset, true); + SDValue Dst = DAG.getFrameIndex(LastFI, PtrType); + ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src, + DAG.getConstant(RemainingSize, MVT::i32), + std::min(ByValAlign, (unsigned)4), + /*isVolatile=*/false, /*AlwaysInline=*/false, + MachinePointerInfo(0), MachinePointerInfo(0)); +} + +// Copy Mips64 byVal arg to registers and stack. +void static +PassByValArg64(SDValue& ByValChain, SDValue Chain, DebugLoc dl, + SmallVector<std::pair<unsigned, SDValue>, 16>& RegsToPass, + SmallVector<SDValue, 8>& MemOpChains, int& LastFI, + MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg, + const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + EVT PtrTy, bool isLittle) { + unsigned ByValSize = Flags.getByValSize(); + unsigned Alignment = std::min(Flags.getByValAlign(), (unsigned)8); + bool IsRegLoc = VA.isRegLoc(); + unsigned Offset = 0; // Offset in # of bytes from the beginning of struct. + unsigned LocMemOffset = 0; + unsigned MemCpySize = ByValSize; + + if (!IsRegLoc) + LocMemOffset = VA.getLocMemOffset(); + else { + const uint16_t *Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, + VA.getLocReg()); + const uint16_t *RegEnd = Mips64IntRegs + 8; + + // Copy double words to registers. + for (; (Reg != RegEnd) && (ByValSize >= Offset + 8); ++Reg, Offset += 8) { + SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg, + DAG.getConstant(Offset, PtrTy)); + SDValue LoadVal = DAG.getLoad(MVT::i64, dl, Chain, LoadPtr, + MachinePointerInfo(), false, false, false, + Alignment); + MemOpChains.push_back(LoadVal.getValue(1)); + RegsToPass.push_back(std::make_pair(*Reg, LoadVal)); + } + + // Return if the struct has been fully copied. + if (!(MemCpySize = ByValSize - Offset)) + return; + + // If there is an argument register available, copy the remainder of the + // byval argument with sub-doubleword loads and shifts. + if (Reg != RegEnd) { + assert((ByValSize < Offset + 8) && + "Size of the remainder should be smaller than 8-byte."); + SDValue Val; + for (unsigned LoadSize = 4; Offset < ByValSize; LoadSize /= 2) { + unsigned RemSize = ByValSize - Offset; + + if (RemSize < LoadSize) + continue; + + SDValue LoadPtr = DAG.getNode(ISD::ADD, dl, PtrTy, Arg, + DAG.getConstant(Offset, PtrTy)); + SDValue LoadVal = + DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i64, Chain, LoadPtr, + MachinePointerInfo(), MVT::getIntegerVT(LoadSize * 8), + false, false, Alignment); + MemOpChains.push_back(LoadVal.getValue(1)); + + // Offset in number of bits from double word boundary. + unsigned OffsetDW = (Offset % 8) * 8; + unsigned Shamt = isLittle ? OffsetDW : 64 - (OffsetDW + LoadSize * 8); + SDValue Shift = DAG.getNode(ISD::SHL, dl, MVT::i64, LoadVal, + DAG.getConstant(Shamt, MVT::i32)); + + Val = Val.getNode() ? DAG.getNode(ISD::OR, dl, MVT::i64, Val, Shift) : + Shift; + Offset += LoadSize; + Alignment = std::min(Alignment, LoadSize); + } + + RegsToPass.push_back(std::make_pair(*Reg, Val)); + return; + } + } + + assert(MemCpySize && "MemCpySize must not be zero."); + + // Create a fixed object on stack at offset LocMemOffset and copy + // remainder of byval arg to it with memcpy. + SDValue Src = DAG.getNode(ISD::ADD, dl, PtrTy, Arg, + DAG.getConstant(Offset, PtrTy)); + LastFI = MFI->CreateFixedObject(MemCpySize, LocMemOffset, true); + SDValue Dst = DAG.getFrameIndex(LastFI, PtrTy); + ByValChain = DAG.getMemcpy(ByValChain, dl, Dst, Src, + DAG.getConstant(MemCpySize, PtrTy), Alignment, + /*isVolatile=*/false, /*AlwaysInline=*/false, + MachinePointerInfo(0), MachinePointerInfo(0)); +} + +/// LowerCall - functions arguments are copied from virtual regs to +/// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted. +/// TODO: isTailCall. +SDValue +MipsTargetLowering::LowerCall(SDValue InChain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool doesNotRet, bool &isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { + // MIPs target does not yet support tail call optimization. + isTailCall = false; + + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + const TargetFrameLowering *TFL = MF.getTarget().getFrameLowering(); + bool IsPIC = getTargetMachine().getRelocationModel() == Reloc::PIC_; + MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); + + // Analyze operands of the call, assigning locations to each operand. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), ArgLocs, *DAG.getContext()); + + if (IsO32) + CCInfo.AnalyzeCallOperands(Outs, CC_MipsO32); + else if (HasMips64) + AnalyzeMips64CallOperands(CCInfo, Outs); + else + CCInfo.AnalyzeCallOperands(Outs, CC_Mips); + + // Get a count of how many bytes are to be pushed on the stack. + unsigned NextStackOffset = CCInfo.getNextStackOffset(); + + // Chain is the output chain of the last Load/Store or CopyToReg node. + // ByValChain is the output chain of the last Memcpy node created for copying + // byval arguments to the stack. + SDValue Chain, CallSeqStart, ByValChain; + SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, true); + Chain = CallSeqStart = DAG.getCALLSEQ_START(InChain, NextStackOffsetVal); + ByValChain = InChain; + + // If this is the first call, create a stack frame object that points to + // a location to which .cprestore saves $gp. + if (IsO32 && IsPIC && MipsFI->globalBaseRegFixed() && !MipsFI->getGPFI()) + MipsFI->setGPFI(MFI->CreateFixedObject(4, 0, true)); + + // Get the frame index of the stack frame object that points to the location + // of dynamically allocated area on the stack. + int DynAllocFI = MipsFI->getDynAllocFI(); + + // Update size of the maximum argument space. + // For O32, a minimum of four words (16 bytes) of argument space is + // allocated. + if (IsO32) + NextStackOffset = std::max(NextStackOffset, (unsigned)16); + + unsigned MaxCallFrameSize = MipsFI->getMaxCallFrameSize(); + + if (MaxCallFrameSize < NextStackOffset) { + MipsFI->setMaxCallFrameSize(NextStackOffset); + + // Set the offsets relative to $sp of the $gp restore slot and dynamically + // allocated stack space. These offsets must be aligned to a boundary + // determined by the stack alignment of the ABI. + unsigned StackAlignment = TFL->getStackAlignment(); + NextStackOffset = (NextStackOffset + StackAlignment - 1) / + StackAlignment * StackAlignment; + + if (MipsFI->needGPSaveRestore()) + MFI->setObjectOffset(MipsFI->getGPFI(), NextStackOffset); + + MFI->setObjectOffset(DynAllocFI, NextStackOffset); + } + + // With EABI is it possible to have 16 args on registers. + SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass; + SmallVector<SDValue, 8> MemOpChains; + + int FirstFI = -MFI->getNumFixedObjects() - 1, LastFI = 0; + + // Walk the register/memloc assignments, inserting copies/loads. + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { + SDValue Arg = OutVals[i]; + CCValAssign &VA = ArgLocs[i]; + MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT(); + ISD::ArgFlagsTy Flags = Outs[i].Flags; + + // ByVal Arg. + if (Flags.isByVal()) { + assert(Flags.getByValSize() && + "ByVal args of size 0 should have been ignored by front-end."); + if (IsO32) + WriteByValArg(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI, + MFI, DAG, Arg, VA, Flags, getPointerTy(), + Subtarget->isLittle()); + else + PassByValArg64(ByValChain, Chain, dl, RegsToPass, MemOpChains, LastFI, + MFI, DAG, Arg, VA, Flags, getPointerTy(), + Subtarget->isLittle()); + continue; + } + + // Promote the value if needed. + switch (VA.getLocInfo()) { + default: llvm_unreachable("Unknown loc info!"); + case CCValAssign::Full: + if (VA.isRegLoc()) { + if ((ValVT == MVT::f32 && LocVT == MVT::i32) || + (ValVT == MVT::f64 && LocVT == MVT::i64)) + Arg = DAG.getNode(ISD::BITCAST, dl, LocVT, Arg); + else if (ValVT == MVT::f64 && LocVT == MVT::i32) { + SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32, + Arg, DAG.getConstant(0, MVT::i32)); + SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, dl, MVT::i32, + Arg, DAG.getConstant(1, MVT::i32)); + if (!Subtarget->isLittle()) + std::swap(Lo, Hi); + unsigned LocRegLo = VA.getLocReg(); + unsigned LocRegHigh = getNextIntArgReg(LocRegLo); + RegsToPass.push_back(std::make_pair(LocRegLo, Lo)); + RegsToPass.push_back(std::make_pair(LocRegHigh, Hi)); + continue; + } + } + break; + case CCValAssign::SExt: + Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, LocVT, Arg); + break; + case CCValAssign::ZExt: + Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, LocVT, Arg); + break; + case CCValAssign::AExt: + Arg = DAG.getNode(ISD::ANY_EXTEND, dl, LocVT, Arg); + break; + } + + // Arguments that can be passed on register must be kept at + // RegsToPass vector + if (VA.isRegLoc()) { + RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + continue; + } + + // Register can't get to this point... + assert(VA.isMemLoc()); + + // Create the frame index object for this incoming parameter + LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, + VA.getLocMemOffset(), true); + SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy()); + + // emit ISD::STORE whichs stores the + // parameter value to a stack Location + MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff, + MachinePointerInfo(), false, false, 0)); + } + + // Extend range of indices of frame objects for outgoing arguments that were + // created during this function call. Skip this step if no such objects were + // created. + if (LastFI) + MipsFI->extendOutArgFIRange(FirstFI, LastFI); + + // If a memcpy has been created to copy a byval arg to a stack, replace the + // chain input of CallSeqStart with ByValChain. + if (InChain != ByValChain) + DAG.UpdateNodeOperands(CallSeqStart.getNode(), ByValChain, + NextStackOffsetVal); + + // Transform all store nodes into one single node because all store + // nodes are independent of each other. + if (!MemOpChains.empty()) + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &MemOpChains[0], MemOpChains.size()); + + // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every + // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol + // node so that legalize doesn't hack it. + unsigned char OpFlag; + bool IsPICCall = (IsN64 || IsPIC); // true if calls are translated to jalr $25 + bool GlobalOrExternal = false; + SDValue CalleeLo; + + if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { + if (IsPICCall && G->getGlobal()->hasInternalLinkage()) { + OpFlag = IsO32 ? MipsII::MO_GOT : MipsII::MO_GOT_PAGE; + unsigned char LoFlag = IsO32 ? MipsII::MO_ABS_LO : MipsII::MO_GOT_OFST; + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy(), 0, + OpFlag); + CalleeLo = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy(), + 0, LoFlag); + } else { + OpFlag = IsPICCall ? MipsII::MO_GOT_CALL : MipsII::MO_NO_FLAG; + Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, + getPointerTy(), 0, OpFlag); + } + + GlobalOrExternal = true; + } + else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { + if (IsN64 || (!IsO32 && IsPIC)) + OpFlag = MipsII::MO_GOT_DISP; + else if (!IsPIC) // !N64 && static + OpFlag = MipsII::MO_NO_FLAG; + else // O32 & PIC + OpFlag = MipsII::MO_GOT_CALL; + Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(), + OpFlag); + GlobalOrExternal = true; + } + + SDValue InFlag; + + // Create nodes that load address of callee and copy it to T9 + if (IsPICCall) { + if (GlobalOrExternal) { + // Load callee address + Callee = DAG.getNode(MipsISD::Wrapper, dl, getPointerTy(), + GetGlobalReg(DAG, getPointerTy()), Callee); + SDValue LoadValue = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), + Callee, MachinePointerInfo::getGOT(), + false, false, false, 0); + + // Use GOT+LO if callee has internal linkage. + if (CalleeLo.getNode()) { + SDValue Lo = DAG.getNode(MipsISD::Lo, dl, getPointerTy(), CalleeLo); + Callee = DAG.getNode(ISD::ADD, dl, getPointerTy(), LoadValue, Lo); + } else + Callee = LoadValue; + } + } + + // T9 should contain the address of the callee function if + // -reloction-model=pic or it is an indirect call. + if (IsPICCall || !GlobalOrExternal) { + // copy to T9 + unsigned T9Reg = IsN64 ? Mips::T9_64 : Mips::T9; + Chain = DAG.getCopyToReg(Chain, dl, T9Reg, Callee, SDValue(0, 0)); + InFlag = Chain.getValue(1); + Callee = DAG.getRegister(T9Reg, getPointerTy()); + } + + // Build a sequence of copy-to-reg nodes chained together with token + // chain and flag operands which copy the outgoing args into registers. + // The InFlag in necessary since all emitted instructions must be + // stuck together. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { + Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, + RegsToPass[i].second, InFlag); + InFlag = Chain.getValue(1); + } + + // MipsJmpLink = #chain, #target_address, #opt_in_flags... + // = Chain, Callee, Reg#1, Reg#2, ... + // + // Returns a chain & a flag for retval copy to use. + SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); + SmallVector<SDValue, 8> Ops; + Ops.push_back(Chain); + Ops.push_back(Callee); + + // Add argument registers to the end of the list so that they are + // known live into the call. + for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) + Ops.push_back(DAG.getRegister(RegsToPass[i].first, + RegsToPass[i].second.getValueType())); + + // Add a register mask operand representing the call-preserved registers. + const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo(); + const uint32_t *Mask = TRI->getCallPreservedMask(CallConv); + assert(Mask && "Missing call preserved mask for calling convention"); + Ops.push_back(DAG.getRegisterMask(Mask)); + + if (InFlag.getNode()) + Ops.push_back(InFlag); + + Chain = DAG.getNode(MipsISD::JmpLink, dl, NodeTys, &Ops[0], Ops.size()); + InFlag = Chain.getValue(1); + + // Create the CALLSEQ_END node. + Chain = DAG.getCALLSEQ_END(Chain, + DAG.getIntPtrConstant(NextStackOffset, true), + DAG.getIntPtrConstant(0, true), InFlag); + InFlag = Chain.getValue(1); + + // Handle result values, copying them out of physregs into vregs that we + // return. + return LowerCallResult(Chain, InFlag, CallConv, isVarArg, + Ins, dl, DAG, InVals); +} + +/// LowerCallResult - Lower the result values of a call into the +/// appropriate copies out of appropriate physical registers. +SDValue +MipsTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const { + // Assign locations to each value returned by this call. + SmallVector<CCValAssign, 16> RVLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), RVLocs, *DAG.getContext()); + + CCInfo.AnalyzeCallResult(Ins, RetCC_Mips); + + // Copy all of the result registers out of their specified physreg. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(), + RVLocs[i].getValVT(), InFlag).getValue(1); + InFlag = Chain.getValue(2); + InVals.push_back(Chain.getValue(0)); + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Formal Arguments Calling Convention Implementation +//===----------------------------------------------------------------------===// +static void ReadByValArg(MachineFunction &MF, SDValue Chain, DebugLoc dl, + std::vector<SDValue>& OutChains, + SelectionDAG &DAG, unsigned NumWords, SDValue FIN, + const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + const Argument *FuncArg) { + unsigned LocMem = VA.getLocMemOffset(); + unsigned FirstWord = LocMem / 4; + + // copy register A0 - A3 to frame object + for (unsigned i = 0; i < NumWords; ++i) { + unsigned CurWord = FirstWord + i; + if (CurWord >= O32IntRegsSize) + break; + + unsigned SrcReg = O32IntRegs[CurWord]; + unsigned Reg = AddLiveIn(MF, SrcReg, Mips::CPURegsRegisterClass); + SDValue StorePtr = DAG.getNode(ISD::ADD, dl, MVT::i32, FIN, + DAG.getConstant(i * 4, MVT::i32)); + SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(Reg, MVT::i32), + StorePtr, MachinePointerInfo(FuncArg, i * 4), + false, false, 0); + OutChains.push_back(Store); + } +} + +// Create frame object on stack and copy registers used for byval passing to it. +static unsigned +CopyMips64ByValRegs(MachineFunction &MF, SDValue Chain, DebugLoc dl, + std::vector<SDValue>& OutChains, SelectionDAG &DAG, + const CCValAssign &VA, const ISD::ArgFlagsTy& Flags, + MachineFrameInfo *MFI, bool IsRegLoc, + SmallVectorImpl<SDValue> &InVals, MipsFunctionInfo *MipsFI, + EVT PtrTy, const Argument *FuncArg) { + const uint16_t *Reg = Mips64IntRegs + 8; + int FOOffset; // Frame object offset from virtual frame pointer. + + if (IsRegLoc) { + Reg = std::find(Mips64IntRegs, Mips64IntRegs + 8, VA.getLocReg()); + FOOffset = (Reg - Mips64IntRegs) * 8 - 8 * 8; + } + else + FOOffset = VA.getLocMemOffset(); + + // Create frame object. + unsigned NumRegs = (Flags.getByValSize() + 7) / 8; + unsigned LastFI = MFI->CreateFixedObject(NumRegs * 8, FOOffset, true); + SDValue FIN = DAG.getFrameIndex(LastFI, PtrTy); + InVals.push_back(FIN); + + // Copy arg registers. + for (unsigned I = 0; (Reg != Mips64IntRegs + 8) && (I < NumRegs); + ++Reg, ++I) { + unsigned VReg = AddLiveIn(MF, *Reg, Mips::CPU64RegsRegisterClass); + SDValue StorePtr = DAG.getNode(ISD::ADD, dl, PtrTy, FIN, + DAG.getConstant(I * 8, PtrTy)); + SDValue Store = DAG.getStore(Chain, dl, DAG.getRegister(VReg, MVT::i64), + StorePtr, MachinePointerInfo(FuncArg, I * 8), + false, false, 0); + OutChains.push_back(Store); + } + + return LastFI; +} + +/// LowerFormalArguments - transform physical registers into virtual registers +/// and generate load operations for arguments places on the stack. +SDValue +MipsTargetLowering::LowerFormalArguments(SDValue Chain, + CallingConv::ID CallConv, + bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) + const { + MachineFunction &MF = DAG.getMachineFunction(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); + + MipsFI->setVarArgsFrameIndex(0); + + // Used with vargs to acumulate store chains. + std::vector<SDValue> OutChains; + + // Assign locations to all of the incoming arguments. + SmallVector<CCValAssign, 16> ArgLocs; + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), ArgLocs, *DAG.getContext()); + + if (IsO32) + CCInfo.AnalyzeFormalArguments(Ins, CC_MipsO32); + else + CCInfo.AnalyzeFormalArguments(Ins, CC_Mips); + + Function::const_arg_iterator FuncArg = + DAG.getMachineFunction().getFunction()->arg_begin(); + int LastFI = 0;// MipsFI->LastInArgFI is 0 at the entry of this function. + + for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++FuncArg) { + CCValAssign &VA = ArgLocs[i]; + EVT ValVT = VA.getValVT(); + ISD::ArgFlagsTy Flags = Ins[i].Flags; + bool IsRegLoc = VA.isRegLoc(); + + if (Flags.isByVal()) { + assert(Flags.getByValSize() && + "ByVal args of size 0 should have been ignored by front-end."); + if (IsO32) { + unsigned NumWords = (Flags.getByValSize() + 3) / 4; + LastFI = MFI->CreateFixedObject(NumWords * 4, VA.getLocMemOffset(), + true); + SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy()); + InVals.push_back(FIN); + ReadByValArg(MF, Chain, dl, OutChains, DAG, NumWords, FIN, VA, Flags, + &*FuncArg); + } else // N32/64 + LastFI = CopyMips64ByValRegs(MF, Chain, dl, OutChains, DAG, VA, Flags, + MFI, IsRegLoc, InVals, MipsFI, + getPointerTy(), &*FuncArg); + continue; + } + + // Arguments stored on registers + if (IsRegLoc) { + EVT RegVT = VA.getLocVT(); + unsigned ArgReg = VA.getLocReg(); + const TargetRegisterClass *RC; + + if (RegVT == MVT::i32) + RC = Mips::CPURegsRegisterClass; + else if (RegVT == MVT::i64) + RC = Mips::CPU64RegsRegisterClass; + else if (RegVT == MVT::f32) + RC = Mips::FGR32RegisterClass; + else if (RegVT == MVT::f64) + RC = HasMips64 ? Mips::FGR64RegisterClass : Mips::AFGR64RegisterClass; + else + llvm_unreachable("RegVT not supported by FormalArguments Lowering"); + + // Transform the arguments stored on + // physical registers into virtual ones + unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgReg, RC); + SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT); + + // If this is an 8 or 16-bit value, it has been passed promoted + // to 32 bits. Insert an assert[sz]ext to capture this, then + // truncate to the right size. + if (VA.getLocInfo() != CCValAssign::Full) { + unsigned Opcode = 0; + if (VA.getLocInfo() == CCValAssign::SExt) + Opcode = ISD::AssertSext; + else if (VA.getLocInfo() == CCValAssign::ZExt) + Opcode = ISD::AssertZext; + if (Opcode) + ArgValue = DAG.getNode(Opcode, dl, RegVT, ArgValue, + DAG.getValueType(ValVT)); + ArgValue = DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue); + } + + // Handle floating point arguments passed in integer registers. + if ((RegVT == MVT::i32 && ValVT == MVT::f32) || + (RegVT == MVT::i64 && ValVT == MVT::f64)) + ArgValue = DAG.getNode(ISD::BITCAST, dl, ValVT, ArgValue); + else if (IsO32 && RegVT == MVT::i32 && ValVT == MVT::f64) { + unsigned Reg2 = AddLiveIn(DAG.getMachineFunction(), + getNextIntArgReg(ArgReg), RC); + SDValue ArgValue2 = DAG.getCopyFromReg(Chain, dl, Reg2, RegVT); + if (!Subtarget->isLittle()) + std::swap(ArgValue, ArgValue2); + ArgValue = DAG.getNode(MipsISD::BuildPairF64, dl, MVT::f64, + ArgValue, ArgValue2); + } + + InVals.push_back(ArgValue); + } else { // VA.isRegLoc() + + // sanity check + assert(VA.isMemLoc()); + + // The stack pointer offset is relative to the caller stack frame. + LastFI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8, + VA.getLocMemOffset(), true); + + // Create load nodes to retrieve arguments from the stack + SDValue FIN = DAG.getFrameIndex(LastFI, getPointerTy()); + InVals.push_back(DAG.getLoad(ValVT, dl, Chain, FIN, + MachinePointerInfo::getFixedStack(LastFI), + false, false, false, 0)); + } + } + + // The mips ABIs for returning structs by value requires that we copy + // the sret argument into $v0 for the return. Save the argument into + // a virtual register so that we can access it from the return points. + if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { + unsigned Reg = MipsFI->getSRetReturnReg(); + if (!Reg) { + Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(MVT::i32)); + MipsFI->setSRetReturnReg(Reg); + } + SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain); + } + + if (isVarArg) { + unsigned NumOfRegs = IsO32 ? 4 : 8; + const uint16_t *ArgRegs = IsO32 ? O32IntRegs : Mips64IntRegs; + unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs, NumOfRegs); + int FirstRegSlotOffset = IsO32 ? 0 : -64 ; // offset of $a0's slot. + const TargetRegisterClass *RC + = IsO32 ? Mips::CPURegsRegisterClass : Mips::CPU64RegsRegisterClass; + unsigned RegSize = RC->getSize(); + int RegSlotOffset = FirstRegSlotOffset + Idx * RegSize; + + // Offset of the first variable argument from stack pointer. + int FirstVaArgOffset; + + if (IsO32 || (Idx == NumOfRegs)) { + FirstVaArgOffset = + (CCInfo.getNextStackOffset() + RegSize - 1) / RegSize * RegSize; + } else + FirstVaArgOffset = RegSlotOffset; + + // Record the frame index of the first variable argument + // which is a value necessary to VASTART. + LastFI = MFI->CreateFixedObject(RegSize, FirstVaArgOffset, true); + MipsFI->setVarArgsFrameIndex(LastFI); + + // Copy the integer registers that have not been used for argument passing + // to the argument register save area. For O32, the save area is allocated + // in the caller's stack frame, while for N32/64, it is allocated in the + // callee's stack frame. + for (int StackOffset = RegSlotOffset; + Idx < NumOfRegs; ++Idx, StackOffset += RegSize) { + unsigned Reg = AddLiveIn(DAG.getMachineFunction(), ArgRegs[Idx], RC); + SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, + MVT::getIntegerVT(RegSize * 8)); + LastFI = MFI->CreateFixedObject(RegSize, StackOffset, true); + SDValue PtrOff = DAG.getFrameIndex(LastFI, getPointerTy()); + OutChains.push_back(DAG.getStore(Chain, dl, ArgValue, PtrOff, + MachinePointerInfo(), false, false, 0)); + } + } + + MipsFI->setLastInArgFI(LastFI); + + // All stores are grouped in one node to allow the matching between + // the size of Ins and InVals. This only happens when on varg functions + if (!OutChains.empty()) { + OutChains.push_back(Chain); + Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + &OutChains[0], OutChains.size()); + } + + return Chain; +} + +//===----------------------------------------------------------------------===// +// Return Value Calling Convention Implementation +//===----------------------------------------------------------------------===// + +SDValue +MipsTargetLowering::LowerReturn(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + DebugLoc dl, SelectionDAG &DAG) const { + + // CCValAssign - represent the assignment of + // the return value to a location + SmallVector<CCValAssign, 16> RVLocs; + + // CCState - Info about the registers and stack slot. + CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), + getTargetMachine(), RVLocs, *DAG.getContext()); + + // Analize return values. + CCInfo.AnalyzeReturn(Outs, RetCC_Mips); + + // If this is the first return lowered for this function, add + // the regs to the liveout set for the function. + if (DAG.getMachineFunction().getRegInfo().liveout_empty()) { + for (unsigned i = 0; i != RVLocs.size(); ++i) + if (RVLocs[i].isRegLoc()) + DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg()); + } + + SDValue Flag; + + // Copy the result values into the output registers. + for (unsigned i = 0; i != RVLocs.size(); ++i) { + CCValAssign &VA = RVLocs[i]; + assert(VA.isRegLoc() && "Can only return in registers!"); + + Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag); + + // guarantee that all emitted copies are + // stuck together, avoiding something bad + Flag = Chain.getValue(1); + } + + // The mips ABIs for returning structs by value requires that we copy + // the sret argument into $v0 for the return. We saved the argument into + // a virtual register in the entry block, so now we copy the value out + // and into $v0. + if (DAG.getMachineFunction().getFunction()->hasStructRetAttr()) { + MachineFunction &MF = DAG.getMachineFunction(); + MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); + unsigned Reg = MipsFI->getSRetReturnReg(); + + if (!Reg) + llvm_unreachable("sret virtual register not created in the entry block"); + SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy()); + + Chain = DAG.getCopyToReg(Chain, dl, Mips::V0, Val, Flag); + Flag = Chain.getValue(1); + } + + // Return on Mips is always a "jr $ra" + if (Flag.getNode()) + return DAG.getNode(MipsISD::Ret, dl, MVT::Other, + Chain, DAG.getRegister(Mips::RA, MVT::i32), Flag); + else // Return Void + return DAG.getNode(MipsISD::Ret, dl, MVT::Other, + Chain, DAG.getRegister(Mips::RA, MVT::i32)); +} + +//===----------------------------------------------------------------------===// +// Mips Inline Assembly Support +//===----------------------------------------------------------------------===// + +/// getConstraintType - Given a constraint letter, return the type of +/// constraint it is for this target. +MipsTargetLowering::ConstraintType MipsTargetLowering:: +getConstraintType(const std::string &Constraint) const +{ + // Mips specific constrainy + // GCC config/mips/constraints.md + // + // 'd' : An address register. Equivalent to r + // unless generating MIPS16 code. + // 'y' : Equivalent to r; retained for + // backwards compatibility. + // 'f' : Floating Point registers. + if (Constraint.size() == 1) { + switch (Constraint[0]) { + default : break; + case 'd': + case 'y': + case 'f': + return C_RegisterClass; + } + } + return TargetLowering::getConstraintType(Constraint); +} + +/// Examine constraint type and operand type and determine a weight value. +/// This object must already have been set up with the operand type +/// and the current alternative constraint selected. +TargetLowering::ConstraintWeight +MipsTargetLowering::getSingleConstraintMatchWeight( + AsmOperandInfo &info, const char *constraint) const { + ConstraintWeight weight = CW_Invalid; + Value *CallOperandVal = info.CallOperandVal; + // If we don't have a value, we can't do a match, + // but allow it at the lowest weight. + if (CallOperandVal == NULL) + return CW_Default; + Type *type = CallOperandVal->getType(); + // Look at the constraint type. + switch (*constraint) { + default: + weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); + break; + case 'd': + case 'y': + if (type->isIntegerTy()) + weight = CW_Register; + break; + case 'f': + if (type->isFloatTy()) + weight = CW_Register; + break; + } + return weight; +} + +/// Given a register class constraint, like 'r', if this corresponds directly +/// to an LLVM register class, return a register of 0 and the register class +/// pointer. +std::pair<unsigned, const TargetRegisterClass*> MipsTargetLowering:: +getRegForInlineAsmConstraint(const std::string &Constraint, EVT VT) const +{ + if (Constraint.size() == 1) { + switch (Constraint[0]) { + case 'd': // Address register. Same as 'r' unless generating MIPS16 code. + case 'y': // Same as 'r'. Exists for compatibility. + case 'r': + if (VT == MVT::i32) + return std::make_pair(0U, Mips::CPURegsRegisterClass); + assert(VT == MVT::i64 && "Unexpected type."); + return std::make_pair(0U, Mips::CPU64RegsRegisterClass); + case 'f': + if (VT == MVT::f32) + return std::make_pair(0U, Mips::FGR32RegisterClass); + if ((VT == MVT::f64) && (!Subtarget->isSingleFloat())) { + if (Subtarget->isFP64bit()) + return std::make_pair(0U, Mips::FGR64RegisterClass); + else + return std::make_pair(0U, Mips::AFGR64RegisterClass); + } + } + } + return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); +} + +bool +MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { + // The Mips target isn't yet aware of offsets. + return false; +} + +bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { + if (VT != MVT::f32 && VT != MVT::f64) + return false; + if (Imm.isNegZero()) + return false; + return Imm.isZero(); +} + +unsigned MipsTargetLowering::getJumpTableEncoding() const { + if (IsN64) + return MachineJumpTableInfo::EK_GPRel64BlockAddress; + + return TargetLowering::getJumpTableEncoding(); +} diff --git a/contrib/llvm/lib/Target/Mips/MipsISelLowering.h b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h new file mode 100644 index 0000000..c36f40f --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsISelLowering.h @@ -0,0 +1,200 @@ +//===-- MipsISelLowering.h - Mips DAG Lowering Interface --------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the interfaces that Mips uses to lower LLVM code into a +// selection DAG. +// +//===----------------------------------------------------------------------===// + +#ifndef MipsISELLOWERING_H +#define MipsISELLOWERING_H + +#include "Mips.h" +#include "MipsSubtarget.h" +#include "llvm/CodeGen/SelectionDAG.h" +#include "llvm/Target/TargetLowering.h" + +namespace llvm { + namespace MipsISD { + enum NodeType { + // Start the numbering from where ISD NodeType finishes. + FIRST_NUMBER = ISD::BUILTIN_OP_END, + + // Jump and link (call) + JmpLink, + + // Get the Higher 16 bits from a 32-bit immediate + // No relation with Mips Hi register + Hi, + + // Get the Lower 16 bits from a 32-bit immediate + // No relation with Mips Lo register + Lo, + + // Handle gp_rel (small data/bss sections) relocation. + GPRel, + + // Thread Pointer + ThreadPointer, + + // Floating Point Branch Conditional + FPBrcond, + + // Floating Point Compare + FPCmp, + + // Floating Point Conditional Moves + CMovFP_T, + CMovFP_F, + + // Floating Point Rounding + FPRound, + + // Return + Ret, + + // MAdd/Sub nodes + MAdd, + MAddu, + MSub, + MSubu, + + // DivRem(u) + DivRem, + DivRemU, + + BuildPairF64, + ExtractElementF64, + + Wrapper, + + DynAlloc, + + Sync, + + Ext, + Ins + }; + } + + //===--------------------------------------------------------------------===// + // TargetLowering Implementation + //===--------------------------------------------------------------------===// + + class MipsTargetLowering : public TargetLowering { + public: + explicit MipsTargetLowering(MipsTargetMachine &TM); + + virtual MVT getShiftAmountTy(EVT LHSTy) const { return MVT::i32; } + + virtual bool allowsUnalignedMemoryAccesses (EVT VT) const; + + /// LowerOperation - Provide custom lowering hooks for some operations. + virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; + + /// getTargetNodeName - This method returns the name of a target specific + // DAG node. + virtual const char *getTargetNodeName(unsigned Opcode) const; + + /// getSetCCResultType - get the ISD::SETCC result ValueType + EVT getSetCCResultType(EVT VT) const; + + virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + private: + // Subtarget Info + const MipsSubtarget *Subtarget; + + bool HasMips64, IsN64, IsO32; + + // Lower Operand helpers + SDValue LowerCallResult(SDValue Chain, SDValue InFlag, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const; + + // Lower Operand specifics + SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG& DAG) const; + SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const; + + virtual SDValue + LowerFormalArguments(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const; + + virtual SDValue + LowerCall(SDValue Chain, SDValue Callee, + CallingConv::ID CallConv, bool isVarArg, + bool doesNotRet, bool &isTailCall, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + const SmallVectorImpl<ISD::InputArg> &Ins, + DebugLoc dl, SelectionDAG &DAG, + SmallVectorImpl<SDValue> &InVals) const; + + virtual SDValue + LowerReturn(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl<ISD::OutputArg> &Outs, + const SmallVectorImpl<SDValue> &OutVals, + DebugLoc dl, SelectionDAG &DAG) const; + + virtual MachineBasicBlock * + EmitInstrWithCustomInserter(MachineInstr *MI, + MachineBasicBlock *MBB) const; + + // Inline asm support + ConstraintType getConstraintType(const std::string &Constraint) const; + + /// Examine constraint string and operand type and determine a weight value. + /// The operand object must already have been set up with the operand type. + ConstraintWeight getSingleConstraintMatchWeight( + AsmOperandInfo &info, const char *constraint) const; + + std::pair<unsigned, const TargetRegisterClass*> + getRegForInlineAsmConstraint(const std::string &Constraint, + EVT VT) const; + + virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const; + + /// isFPImmLegal - Returns true if the target can instruction select the + /// specified FP immediate natively. If false, the legalizer will + /// materialize the FP immediate as a load from a constant pool. + virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; + + virtual unsigned getJumpTableEncoding() const; + + MachineBasicBlock *EmitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB, + unsigned Size, unsigned BinOpcode, bool Nand = false) const; + MachineBasicBlock *EmitAtomicBinaryPartword(MachineInstr *MI, + MachineBasicBlock *BB, unsigned Size, unsigned BinOpcode, + bool Nand = false) const; + MachineBasicBlock *EmitAtomicCmpSwap(MachineInstr *MI, + MachineBasicBlock *BB, unsigned Size) const; + MachineBasicBlock *EmitAtomicCmpSwapPartword(MachineInstr *MI, + MachineBasicBlock *BB, unsigned Size) const; + }; +} + +#endif // MipsISELLOWERING_H diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td new file mode 100644 index 0000000..b655945 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsInstrFPU.td @@ -0,0 +1,450 @@ +//===-- MipsInstrFPU.td - Mips FPU Instruction Information -*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the Mips FPU instruction set. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Floating Point Instructions +// ------------------------ +// * 64bit fp: +// - 32 64-bit registers (default mode) +// - 16 even 32-bit registers (32-bit compatible mode) for +// single and double access. +// * 32bit fp: +// - 16 even 32-bit registers - single and double (aliased) +// - 32 32-bit registers (within single-only mode) +//===----------------------------------------------------------------------===// + +// Floating Point Compare and Branch +def SDT_MipsFPBrcond : SDTypeProfile<0, 2, [SDTCisInt<0>, + SDTCisVT<1, OtherVT>]>; +def SDT_MipsFPCmp : SDTypeProfile<0, 3, [SDTCisSameAs<0, 1>, SDTCisFP<1>, + SDTCisVT<2, i32>]>; +def SDT_MipsCMovFP : SDTypeProfile<1, 2, [SDTCisSameAs<0, 1>, + SDTCisSameAs<1, 2>]>; +def SDT_MipsBuildPairF64 : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, + SDTCisVT<1, i32>, + SDTCisSameAs<1, 2>]>; +def SDT_MipsExtractElementF64 : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, + SDTCisVT<1, f64>, + SDTCisVT<2, i32>]>; + +def MipsFPCmp : SDNode<"MipsISD::FPCmp", SDT_MipsFPCmp, [SDNPOutGlue]>; +def MipsCMovFP_T : SDNode<"MipsISD::CMovFP_T", SDT_MipsCMovFP, [SDNPInGlue]>; +def MipsCMovFP_F : SDNode<"MipsISD::CMovFP_F", SDT_MipsCMovFP, [SDNPInGlue]>; +def MipsFPBrcond : SDNode<"MipsISD::FPBrcond", SDT_MipsFPBrcond, + [SDNPHasChain, SDNPOptInGlue]>; +def MipsBuildPairF64 : SDNode<"MipsISD::BuildPairF64", SDT_MipsBuildPairF64>; +def MipsExtractElementF64 : SDNode<"MipsISD::ExtractElementF64", + SDT_MipsExtractElementF64>; + +// Operand for printing out a condition code. +let PrintMethod = "printFCCOperand" in + def condcode : Operand<i32>; + +//===----------------------------------------------------------------------===// +// Feature predicates. +//===----------------------------------------------------------------------===// + +def IsFP64bit : Predicate<"Subtarget.isFP64bit()">; +def NotFP64bit : Predicate<"!Subtarget.isFP64bit()">; +def IsSingleFloat : Predicate<"Subtarget.isSingleFloat()">; +def IsNotSingleFloat : Predicate<"!Subtarget.isSingleFloat()">; + +// FP immediate patterns. +def fpimm0 : PatLeaf<(fpimm), [{ + return N->isExactlyValue(+0.0); +}]>; + +def fpimm0neg : PatLeaf<(fpimm), [{ + return N->isExactlyValue(-0.0); +}]>; + +//===----------------------------------------------------------------------===// +// Instruction Class Templates +// +// A set of multiclasses is used to address the register usage. +// +// S32 - single precision in 16 32bit even fp registers +// single precision in 32 32bit fp registers in SingleOnly mode +// S64 - single precision in 32 64bit fp registers (In64BitMode) +// D32 - double precision in 16 32bit even fp registers +// D64 - double precision in 32 64bit fp registers (In64BitMode) +// +// Only S32 and D32 are supported right now. +//===----------------------------------------------------------------------===// + +// FP load. +class FPLoad<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>: + FMem<op, (outs RC:$ft), (ins MemOpnd:$addr), + !strconcat(opstr, "\t$ft, $addr"), [(set RC:$ft, (load_a addr:$addr))], + IILoad>; + +// FP store. +class FPStore<bits<6> op, string opstr, RegisterClass RC, Operand MemOpnd>: + FMem<op, (outs), (ins RC:$ft, MemOpnd:$addr), + !strconcat(opstr, "\t$ft, $addr"), [(store_a RC:$ft, addr:$addr)], + IIStore>; + +// FP indexed load. +class FPIdxLoad<bits<6> funct, string opstr, RegisterClass DRC, + RegisterClass PRC, PatFrag FOp>: + FFMemIdx<funct, (outs DRC:$fd), (ins PRC:$base, PRC:$index), + !strconcat(opstr, "\t$fd, $index($base)"), + [(set DRC:$fd, (FOp (add PRC:$base, PRC:$index)))]> { + let fs = 0; +} + +// FP indexed store. +class FPIdxStore<bits<6> funct, string opstr, RegisterClass DRC, + RegisterClass PRC, PatFrag FOp>: + FFMemIdx<funct, (outs), (ins DRC:$fs, PRC:$base, PRC:$index), + !strconcat(opstr, "\t$fs, $index($base)"), + [(FOp DRC:$fs, (add PRC:$base, PRC:$index))]> { + let fd = 0; +} + +// Instructions that convert an FP value to 32-bit fixed point. +multiclass FFR1_W_M<bits<6> funct, string opstr> { + def _S : FFR1<funct, 16, opstr, "w.s", FGR32, FGR32>; + def _D32 : FFR1<funct, 17, opstr, "w.d", FGR32, AFGR64>, + Requires<[NotFP64bit]>; + def _D64 : FFR1<funct, 17, opstr, "w.d", FGR32, FGR64>, + Requires<[IsFP64bit]>; +} + +// Instructions that convert an FP value to 64-bit fixed point. +let Predicates = [IsFP64bit] in +multiclass FFR1_L_M<bits<6> funct, string opstr> { + def _S : FFR1<funct, 16, opstr, "l.s", FGR64, FGR32>; + def _D64 : FFR1<funct, 17, opstr, "l.d", FGR64, FGR64>; +} + +// FP-to-FP conversion instructions. +multiclass FFR1P_M<bits<6> funct, string opstr, SDNode OpNode> { + def _S : FFR1P<funct, 16, opstr, "s", FGR32, FGR32, OpNode>; + def _D32 : FFR1P<funct, 17, opstr, "d", AFGR64, AFGR64, OpNode>, + Requires<[NotFP64bit]>; + def _D64 : FFR1P<funct, 17, opstr, "d", FGR64, FGR64, OpNode>, + Requires<[IsFP64bit]>; +} + +multiclass FFR2P_M<bits<6> funct, string opstr, SDNode OpNode, bit isComm = 0> { + let isCommutable = isComm in { + def _S : FFR2P<funct, 16, opstr, "s", FGR32, OpNode>; + def _D32 : FFR2P<funct, 17, opstr, "d", AFGR64, OpNode>, + Requires<[NotFP64bit]>; + def _D64 : FFR2P<funct, 17, opstr, "d", FGR64, OpNode>, + Requires<[IsFP64bit]>; + } +} + +// FP madd/msub/nmadd/nmsub instruction classes. +class FMADDSUB<bits<3> funct, bits<3> fmt, string opstr, string fmtstr, + SDNode OpNode, RegisterClass RC> : + FFMADDSUB<funct, fmt, (outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft), + !strconcat(opstr, ".", fmtstr, "\t$fd, $fr, $fs, $ft"), + [(set RC:$fd, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr))]>; + +class FNMADDSUB<bits<3> funct, bits<3> fmt, string opstr, string fmtstr, + SDNode OpNode, RegisterClass RC> : + FFMADDSUB<funct, fmt, (outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft), + !strconcat(opstr, ".", fmtstr, "\t$fd, $fr, $fs, $ft"), + [(set RC:$fd, (fsub fpimm0, (OpNode (fmul RC:$fs, RC:$ft), RC:$fr)))]>; + +//===----------------------------------------------------------------------===// +// Floating Point Instructions +//===----------------------------------------------------------------------===// +defm ROUND_W : FFR1_W_M<0xc, "round">; +defm ROUND_L : FFR1_L_M<0x8, "round">; +defm TRUNC_W : FFR1_W_M<0xd, "trunc">; +defm TRUNC_L : FFR1_L_M<0x9, "trunc">; +defm CEIL_W : FFR1_W_M<0xe, "ceil">; +defm CEIL_L : FFR1_L_M<0xa, "ceil">; +defm FLOOR_W : FFR1_W_M<0xf, "floor">; +defm FLOOR_L : FFR1_L_M<0xb, "floor">; +defm CVT_W : FFR1_W_M<0x24, "cvt">; +defm CVT_L : FFR1_L_M<0x25, "cvt">; + +def CVT_S_W : FFR1<0x20, 20, "cvt", "s.w", FGR32, FGR32>; + +let Predicates = [NotFP64bit] in { + def CVT_S_D32 : FFR1<0x20, 17, "cvt", "s.d", FGR32, AFGR64>; + def CVT_D32_W : FFR1<0x21, 20, "cvt", "d.w", AFGR64, FGR32>; + def CVT_D32_S : FFR1<0x21, 16, "cvt", "d.s", AFGR64, FGR32>; +} + +let Predicates = [IsFP64bit] in { + def CVT_S_D64 : FFR1<0x20, 17, "cvt", "s.d", FGR32, FGR64>; + def CVT_S_L : FFR1<0x20, 21, "cvt", "s.l", FGR32, FGR64>; + def CVT_D64_W : FFR1<0x21, 20, "cvt", "d.w", FGR64, FGR32>; + def CVT_D64_S : FFR1<0x21, 16, "cvt", "d.s", FGR64, FGR32>; + def CVT_D64_L : FFR1<0x21, 21, "cvt", "d.l", FGR64, FGR64>; +} + +let Predicates = [NoNaNsFPMath] in { + defm FABS : FFR1P_M<0x5, "abs", fabs>; + defm FNEG : FFR1P_M<0x7, "neg", fneg>; +} +defm FSQRT : FFR1P_M<0x4, "sqrt", fsqrt>; + +// The odd-numbered registers are only referenced when doing loads, +// stores, and moves between floating-point and integer registers. +// When defining instructions, we reference all 32-bit registers, +// regardless of register aliasing. + +class FFRGPR<bits<5> _fmt, dag outs, dag ins, string asmstr, list<dag> pattern>: + FFR<0x11, 0x0, _fmt, outs, ins, asmstr, pattern> { + bits<5> rt; + let ft = rt; + let fd = 0; +} + +/// Move Control Registers From/To CPU Registers +def CFC1 : FFRGPR<0x2, (outs CPURegs:$rt), (ins CCR:$fs), + "cfc1\t$rt, $fs", []>; + +def CTC1 : FFRGPR<0x6, (outs CCR:$fs), (ins CPURegs:$rt), + "ctc1\t$rt, $fs", []>; + +def MFC1 : FFRGPR<0x00, (outs CPURegs:$rt), (ins FGR32:$fs), + "mfc1\t$rt, $fs", + [(set CPURegs:$rt, (bitconvert FGR32:$fs))]>; + +def MTC1 : FFRGPR<0x04, (outs FGR32:$fs), (ins CPURegs:$rt), + "mtc1\t$rt, $fs", + [(set FGR32:$fs, (bitconvert CPURegs:$rt))]>; + +def DMFC1 : FFRGPR<0x01, (outs CPU64Regs:$rt), (ins FGR64:$fs), + "dmfc1\t$rt, $fs", + [(set CPU64Regs:$rt, (bitconvert FGR64:$fs))]>; + +def DMTC1 : FFRGPR<0x05, (outs FGR64:$fs), (ins CPU64Regs:$rt), + "dmtc1\t$rt, $fs", + [(set FGR64:$fs, (bitconvert CPU64Regs:$rt))]>; + +def FMOV_S : FFR1<0x6, 16, "mov", "s", FGR32, FGR32>; +def FMOV_D32 : FFR1<0x6, 17, "mov", "d", AFGR64, AFGR64>, + Requires<[NotFP64bit]>; +def FMOV_D64 : FFR1<0x6, 17, "mov", "d", FGR64, FGR64>, + Requires<[IsFP64bit]>; + +/// Floating Point Memory Instructions +let Predicates = [IsN64] in { + def LWC1_P8 : FPLoad<0x31, "lwc1", FGR32, mem64>; + def SWC1_P8 : FPStore<0x39, "swc1", FGR32, mem64>; + def LDC164_P8 : FPLoad<0x35, "ldc1", FGR64, mem64>; + def SDC164_P8 : FPStore<0x3d, "sdc1", FGR64, mem64>; +} + +let Predicates = [NotN64] in { + def LWC1 : FPLoad<0x31, "lwc1", FGR32, mem>; + def SWC1 : FPStore<0x39, "swc1", FGR32, mem>; +} + +let Predicates = [NotN64, HasMips64] in { + def LDC164 : FPLoad<0x35, "ldc1", FGR64, mem>; + def SDC164 : FPStore<0x3d, "sdc1", FGR64, mem>; +} + +let Predicates = [NotN64, NotMips64] in { + def LDC1 : FPLoad<0x35, "ldc1", AFGR64, mem>; + def SDC1 : FPStore<0x3d, "sdc1", AFGR64, mem>; +} + +// Indexed loads and stores. +let Predicates = [HasMips32r2Or64] in { + def LWXC1 : FPIdxLoad<0x0, "lwxc1", FGR32, CPURegs, load_a>; + def LUXC1 : FPIdxLoad<0x5, "luxc1", FGR32, CPURegs, load_u>; + def SWXC1 : FPIdxStore<0x8, "swxc1", FGR32, CPURegs, store_a>; + def SUXC1 : FPIdxStore<0xd, "suxc1", FGR32, CPURegs, store_u>; +} + +let Predicates = [HasMips32r2, NotMips64] in { + def LDXC1 : FPIdxLoad<0x1, "ldxc1", AFGR64, CPURegs, load_a>; + def SDXC1 : FPIdxStore<0x9, "sdxc1", AFGR64, CPURegs, store_a>; +} + +let Predicates = [HasMips64, NotN64] in { + def LDXC164 : FPIdxLoad<0x1, "ldxc1", FGR64, CPURegs, load_a>; + def SDXC164 : FPIdxStore<0x9, "sdxc1", FGR64, CPURegs, store_a>; +} + +// n64 +let Predicates = [IsN64] in { + def LWXC1_P8 : FPIdxLoad<0x0, "lwxc1", FGR32, CPU64Regs, load_a>; + def LUXC1_P8 : FPIdxLoad<0x5, "luxc1", FGR32, CPU64Regs, load_u>; + def LDXC164_P8 : FPIdxLoad<0x1, "ldxc1", FGR64, CPU64Regs, load_a>; + def SWXC1_P8 : FPIdxStore<0x8, "swxc1", FGR32, CPU64Regs, store_a>; + def SUXC1_P8 : FPIdxStore<0xd, "suxc1", FGR32, CPU64Regs, store_u>; + def SDXC164_P8 : FPIdxStore<0x9, "sdxc1", FGR64, CPU64Regs, store_a>; +} + +/// Floating-point Aritmetic +defm FADD : FFR2P_M<0x00, "add", fadd, 1>; +defm FDIV : FFR2P_M<0x03, "div", fdiv>; +defm FMUL : FFR2P_M<0x02, "mul", fmul, 1>; +defm FSUB : FFR2P_M<0x01, "sub", fsub>; + +let Predicates = [HasMips32r2] in { + def MADD_S : FMADDSUB<0x4, 0, "madd", "s", fadd, FGR32>; + def MSUB_S : FMADDSUB<0x5, 0, "msub", "s", fsub, FGR32>; +} + +let Predicates = [HasMips32r2, NoNaNsFPMath] in { + def NMADD_S : FNMADDSUB<0x6, 0, "nmadd", "s", fadd, FGR32>; + def NMSUB_S : FNMADDSUB<0x7, 0, "nmsub", "s", fsub, FGR32>; +} + +let Predicates = [HasMips32r2, NotFP64bit] in { + def MADD_D32 : FMADDSUB<0x4, 1, "madd", "d", fadd, AFGR64>; + def MSUB_D32 : FMADDSUB<0x5, 1, "msub", "d", fsub, AFGR64>; +} + +let Predicates = [HasMips32r2, NotFP64bit, NoNaNsFPMath] in { + def NMADD_D32 : FNMADDSUB<0x6, 1, "nmadd", "d", fadd, AFGR64>; + def NMSUB_D32 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, AFGR64>; +} + +let Predicates = [HasMips32r2, IsFP64bit] in { + def MADD_D64 : FMADDSUB<0x4, 1, "madd", "d", fadd, FGR64>; + def MSUB_D64 : FMADDSUB<0x5, 1, "msub", "d", fsub, FGR64>; +} + +let Predicates = [HasMips32r2, IsFP64bit, NoNaNsFPMath] in { + def NMADD_D64 : FNMADDSUB<0x6, 1, "nmadd", "d", fadd, FGR64>; + def NMSUB_D64 : FNMADDSUB<0x7, 1, "nmsub", "d", fsub, FGR64>; +} + +//===----------------------------------------------------------------------===// +// Floating Point Branch Codes +//===----------------------------------------------------------------------===// +// Mips branch codes. These correspond to condcode in MipsInstrInfo.h. +// They must be kept in synch. +def MIPS_BRANCH_F : PatLeaf<(i32 0)>; +def MIPS_BRANCH_T : PatLeaf<(i32 1)>; + +/// Floating Point Branch of False/True (Likely) +let isBranch=1, isTerminator=1, hasDelaySlot=1, base=0x8, Uses=[FCR31] in + class FBRANCH<bits<1> nd, bits<1> tf, PatLeaf op, string asmstr> : + FFI<0x11, (outs), (ins brtarget:$dst), !strconcat(asmstr, "\t$dst"), + [(MipsFPBrcond op, bb:$dst)]> { + let Inst{20-18} = 0; + let Inst{17} = nd; + let Inst{16} = tf; +} + +def BC1F : FBRANCH<0, 0, MIPS_BRANCH_F, "bc1f">; +def BC1T : FBRANCH<0, 1, MIPS_BRANCH_T, "bc1t">; + +//===----------------------------------------------------------------------===// +// Floating Point Flag Conditions +//===----------------------------------------------------------------------===// +// Mips condition codes. They must correspond to condcode in MipsInstrInfo.h. +// They must be kept in synch. +def MIPS_FCOND_F : PatLeaf<(i32 0)>; +def MIPS_FCOND_UN : PatLeaf<(i32 1)>; +def MIPS_FCOND_OEQ : PatLeaf<(i32 2)>; +def MIPS_FCOND_UEQ : PatLeaf<(i32 3)>; +def MIPS_FCOND_OLT : PatLeaf<(i32 4)>; +def MIPS_FCOND_ULT : PatLeaf<(i32 5)>; +def MIPS_FCOND_OLE : PatLeaf<(i32 6)>; +def MIPS_FCOND_ULE : PatLeaf<(i32 7)>; +def MIPS_FCOND_SF : PatLeaf<(i32 8)>; +def MIPS_FCOND_NGLE : PatLeaf<(i32 9)>; +def MIPS_FCOND_SEQ : PatLeaf<(i32 10)>; +def MIPS_FCOND_NGL : PatLeaf<(i32 11)>; +def MIPS_FCOND_LT : PatLeaf<(i32 12)>; +def MIPS_FCOND_NGE : PatLeaf<(i32 13)>; +def MIPS_FCOND_LE : PatLeaf<(i32 14)>; +def MIPS_FCOND_NGT : PatLeaf<(i32 15)>; + +class FCMP<bits<5> fmt, RegisterClass RC, string typestr> : + FCC<fmt, (outs), (ins RC:$fs, RC:$ft, condcode:$cc), + !strconcat("c.$cc.", typestr, "\t$fs, $ft"), + [(MipsFPCmp RC:$fs, RC:$ft, imm:$cc)]>; + +/// Floating Point Compare +let Defs=[FCR31] in { + def FCMP_S32 : FCMP<0x10, FGR32, "s">; + def FCMP_D32 : FCMP<0x11, AFGR64, "d">, Requires<[NotFP64bit]>; + def FCMP_D64 : FCMP<0x11, FGR64, "d">, Requires<[IsFP64bit]>; +} + +//===----------------------------------------------------------------------===// +// Floating Point Pseudo-Instructions +//===----------------------------------------------------------------------===// +def MOVCCRToCCR : MipsPseudo<(outs CCR:$dst), (ins CCR:$src), + "# MOVCCRToCCR", []>; + +// This pseudo instr gets expanded into 2 mtc1 instrs after register +// allocation. +def BuildPairF64 : + MipsPseudo<(outs AFGR64:$dst), + (ins CPURegs:$lo, CPURegs:$hi), "", + [(set AFGR64:$dst, (MipsBuildPairF64 CPURegs:$lo, CPURegs:$hi))]>; + +// This pseudo instr gets expanded into 2 mfc1 instrs after register +// allocation. +// if n is 0, lower part of src is extracted. +// if n is 1, higher part of src is extracted. +def ExtractElementF64 : + MipsPseudo<(outs CPURegs:$dst), + (ins AFGR64:$src, i32imm:$n), "", + [(set CPURegs:$dst, + (MipsExtractElementF64 AFGR64:$src, imm:$n))]>; + +//===----------------------------------------------------------------------===// +// Floating Point Patterns +//===----------------------------------------------------------------------===// +def : Pat<(f32 fpimm0), (MTC1 ZERO)>; +def : Pat<(f32 fpimm0neg), (FNEG_S (MTC1 ZERO))>; + +def : Pat<(f32 (sint_to_fp CPURegs:$src)), (CVT_S_W (MTC1 CPURegs:$src))>; +def : Pat<(i32 (fp_to_sint FGR32:$src)), (MFC1 (TRUNC_W_S FGR32:$src))>; + +let Predicates = [NotFP64bit] in { + def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVT_D32_W (MTC1 CPURegs:$src))>; + def : Pat<(i32 (fp_to_sint AFGR64:$src)), (MFC1 (TRUNC_W_D32 AFGR64:$src))>; + def : Pat<(f32 (fround AFGR64:$src)), (CVT_S_D32 AFGR64:$src)>; + def : Pat<(f64 (fextend FGR32:$src)), (CVT_D32_S FGR32:$src)>; +} + +let Predicates = [IsFP64bit] in { + def : Pat<(f64 fpimm0), (DMTC1 ZERO_64)>; + def : Pat<(f64 fpimm0neg), (FNEG_D64 (DMTC1 ZERO_64))>; + + def : Pat<(f64 (sint_to_fp CPURegs:$src)), (CVT_D64_W (MTC1 CPURegs:$src))>; + def : Pat<(f32 (sint_to_fp CPU64Regs:$src)), + (CVT_S_L (DMTC1 CPU64Regs:$src))>; + def : Pat<(f64 (sint_to_fp CPU64Regs:$src)), + (CVT_D64_L (DMTC1 CPU64Regs:$src))>; + + def : Pat<(i32 (fp_to_sint FGR64:$src)), (MFC1 (TRUNC_W_D64 FGR64:$src))>; + def : Pat<(i64 (fp_to_sint FGR32:$src)), (DMFC1 (TRUNC_L_S FGR32:$src))>; + def : Pat<(i64 (fp_to_sint FGR64:$src)), (DMFC1 (TRUNC_L_D64 FGR64:$src))>; + + def : Pat<(f32 (fround FGR64:$src)), (CVT_S_D64 FGR64:$src)>; + def : Pat<(f64 (fextend FGR32:$src)), (CVT_D64_S FGR32:$src)>; +} + +// Patterns for unaligned floating point loads and stores. +let Predicates = [HasMips32r2Or64, NotN64] in { + def : Pat<(f32 (load_u CPURegs:$addr)), (LUXC1 CPURegs:$addr, ZERO)>; + def : Pat<(store_u FGR32:$src, CPURegs:$addr), + (SUXC1 FGR32:$src, CPURegs:$addr, ZERO)>; +} + +let Predicates = [IsN64] in { + def : Pat<(f32 (load_u CPU64Regs:$addr)), (LUXC1_P8 CPU64Regs:$addr, ZERO_64)>; + def : Pat<(store_u FGR32:$src, CPU64Regs:$addr), + (SUXC1_P8 FGR32:$src, CPU64Regs:$addr, ZERO_64)>; +} diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td new file mode 100644 index 0000000..4555303 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsInstrFormats.td @@ -0,0 +1,329 @@ +//===-- MipsInstrFormats.td - Mips Instruction Formats -----*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Describe MIPS instructions format +// +// CPU INSTRUCTION FORMATS +// +// opcode - operation code. +// rs - src reg. +// rt - dst reg (on a 2 regs instr) or src reg (on a 3 reg instr). +// rd - dst reg, only used on 3 regs instr. +// shamt - only used on shift instructions, contains the shift amount. +// funct - combined with opcode field give us an operation code. +// +//===----------------------------------------------------------------------===// + +// Format specifies the encoding used by the instruction. This is part of the +// ad-hoc solution used to emit machine instruction encodings by our machine +// code emitter. +class Format<bits<4> val> { + bits<4> Value = val; +} + +def Pseudo : Format<0>; +def FrmR : Format<1>; +def FrmI : Format<2>; +def FrmJ : Format<3>; +def FrmFR : Format<4>; +def FrmFI : Format<5>; +def FrmOther : Format<6>; // Instruction w/ a custom format + +// Generic Mips Format +class MipsInst<dag outs, dag ins, string asmstr, list<dag> pattern, + InstrItinClass itin, Format f>: Instruction +{ + field bits<32> Inst; + Format Form = f; + + let Namespace = "Mips"; + + bits<6> Opcode = 0; + + // Top 6 bits are the 'opcode' field + let Inst{31-26} = Opcode; + + let OutOperandList = outs; + let InOperandList = ins; + + let AsmString = asmstr; + let Pattern = pattern; + let Itinerary = itin; + + // + // Attributes specific to Mips instructions... + // + bits<4> FormBits = Form.Value; + + // TSFlags layout should be kept in sync with MipsInstrInfo.h. + let TSFlags{3-0} = FormBits; +} + +// Mips Pseudo Instructions Format +class MipsPseudo<dag outs, dag ins, string asmstr, list<dag> pattern>: + MipsInst<outs, ins, asmstr, pattern, IIPseudo, Pseudo> { + let isCodeGenOnly = 1; + let isPseudo = 1; +} + +//===----------------------------------------------------------------------===// +// Format R instruction class in Mips : <|opcode|rs|rt|rd|shamt|funct|> +//===----------------------------------------------------------------------===// + +class FR<bits<6> op, bits<6> _funct, dag outs, dag ins, string asmstr, + list<dag> pattern, InstrItinClass itin>: + MipsInst<outs, ins, asmstr, pattern, itin, FrmR> +{ + bits<5> rd; + bits<5> rs; + bits<5> rt; + bits<5> shamt; + bits<6> funct; + + let Opcode = op; + let funct = _funct; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-11} = rd; + let Inst{10-6} = shamt; + let Inst{5-0} = funct; +} + +//===----------------------------------------------------------------------===// +// Format I instruction class in Mips : <|opcode|rs|rt|immediate|> +//===----------------------------------------------------------------------===// + +class FI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, + InstrItinClass itin>: MipsInst<outs, ins, asmstr, pattern, itin, FrmI> +{ + bits<5> rt; + bits<5> rs; + bits<16> imm16; + + let Opcode = op; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-0} = imm16; +} + +class BranchBase<bits<6> op, dag outs, dag ins, string asmstr, + list<dag> pattern, InstrItinClass itin>: + MipsInst<outs, ins, asmstr, pattern, itin, FrmI> +{ + bits<5> rs; + bits<5> rt; + bits<16> imm16; + + let Opcode = op; + + let Inst{25-21} = rs; + let Inst{20-16} = rt; + let Inst{15-0} = imm16; +} + +//===----------------------------------------------------------------------===// +// Format J instruction class in Mips : <|opcode|address|> +//===----------------------------------------------------------------------===// + +class FJ<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, + InstrItinClass itin>: MipsInst<outs, ins, asmstr, pattern, itin, FrmJ> +{ + bits<26> addr; + + let Opcode = op; + + let Inst{25-0} = addr; +} + +//===----------------------------------------------------------------------===// +// +// FLOATING POINT INSTRUCTION FORMATS +// +// opcode - operation code. +// fs - src reg. +// ft - dst reg (on a 2 regs instr) or src reg (on a 3 reg instr). +// fd - dst reg, only used on 3 regs instr. +// fmt - double or single precision. +// funct - combined with opcode field give us an operation code. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Format FR instruction class in Mips : <|opcode|fmt|ft|fs|fd|funct|> +//===----------------------------------------------------------------------===// + +class FFR<bits<6> op, bits<6> _funct, bits<5> _fmt, dag outs, dag ins, + string asmstr, list<dag> pattern> : + MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmFR> +{ + bits<5> fd; + bits<5> fs; + bits<5> ft; + bits<5> fmt; + bits<6> funct; + + let Opcode = op; + let funct = _funct; + let fmt = _fmt; + + let Inst{25-21} = fmt; + let Inst{20-16} = ft; + let Inst{15-11} = fs; + let Inst{10-6} = fd; + let Inst{5-0} = funct; +} + +//===----------------------------------------------------------------------===// +// Format FI instruction class in Mips : <|opcode|base|ft|immediate|> +//===----------------------------------------------------------------------===// + +class FFI<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern>: + MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmFI> +{ + bits<5> ft; + bits<5> base; + bits<16> imm16; + + let Opcode = op; + + let Inst{25-21} = base; + let Inst{20-16} = ft; + let Inst{15-0} = imm16; +} + +//===----------------------------------------------------------------------===// +// Compare instruction class in Mips : <|010001|fmt|ft|fs|0000011|condcode|> +//===----------------------------------------------------------------------===// + +class FCC<bits<5> _fmt, dag outs, dag ins, string asmstr, list<dag> pattern> : + MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther> +{ + bits<5> fs; + bits<5> ft; + bits<4> cc; + bits<5> fmt; + + let Opcode = 0x11; + let fmt = _fmt; + + let Inst{25-21} = fmt; + let Inst{20-16} = ft; + let Inst{15-11} = fs; + let Inst{10-6} = 0; + let Inst{5-4} = 0b11; + let Inst{3-0} = cc; +} + + +class FCMOV<bits<1> _tf, dag outs, dag ins, string asmstr, + list<dag> pattern> : + MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther> +{ + bits<5> rd; + bits<5> rs; + bits<3> cc; + bits<1> tf; + + let Opcode = 0; + let tf = _tf; + + let Inst{25-21} = rs; + let Inst{20-18} = cc; + let Inst{17} = 0; + let Inst{16} = tf; + let Inst{15-11} = rd; + let Inst{10-6} = 0; + let Inst{5-0} = 1; +} + +class FFCMOV<bits<5> _fmt, bits<1> _tf, dag outs, dag ins, string asmstr, + list<dag> pattern> : + MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther> +{ + bits<5> fd; + bits<5> fs; + bits<3> cc; + bits<5> fmt; + bits<1> tf; + + let Opcode = 17; + let fmt = _fmt; + let tf = _tf; + + let Inst{25-21} = fmt; + let Inst{20-18} = cc; + let Inst{17} = 0; + let Inst{16} = tf; + let Inst{15-11} = fs; + let Inst{10-6} = fd; + let Inst{5-0} = 17; +} + +// FP unary instructions without patterns. +class FFR1<bits<6> funct, bits<5> fmt, string opstr, string fmtstr, + RegisterClass DstRC, RegisterClass SrcRC> : + FFR<0x11, funct, fmt, (outs DstRC:$fd), (ins SrcRC:$fs), + !strconcat(opstr, ".", fmtstr, "\t$fd, $fs"), []> { + let ft = 0; +} + +// FP unary instructions with patterns. +class FFR1P<bits<6> funct, bits<5> fmt, string opstr, string fmtstr, + RegisterClass DstRC, RegisterClass SrcRC, SDNode OpNode> : + FFR<0x11, funct, fmt, (outs DstRC:$fd), (ins SrcRC:$fs), + !strconcat(opstr, ".", fmtstr, "\t$fd, $fs"), + [(set DstRC:$fd, (OpNode SrcRC:$fs))]> { + let ft = 0; +} + +class FFR2P<bits<6> funct, bits<5> fmt, string opstr, + string fmtstr, RegisterClass RC, SDNode OpNode> : + FFR<0x11, funct, fmt, (outs RC:$fd), (ins RC:$fs, RC:$ft), + !strconcat(opstr, ".", fmtstr, "\t$fd, $fs, $ft"), + [(set RC:$fd, (OpNode RC:$fs, RC:$ft))]>; + +// Floating point madd/msub/nmadd/nmsub. +class FFMADDSUB<bits<3> funct, bits<3> fmt, dag outs, dag ins, string asmstr, + list<dag> pattern> + : MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther> { + bits<5> fd; + bits<5> fr; + bits<5> fs; + bits<5> ft; + + let Opcode = 0x13; + let Inst{25-21} = fr; + let Inst{20-16} = ft; + let Inst{15-11} = fs; + let Inst{10-6} = fd; + let Inst{5-3} = funct; + let Inst{2-0} = fmt; +} + +// FP indexed load/store instructions. +class FFMemIdx<bits<6> funct, dag outs, dag ins, string asmstr, + list<dag> pattern> : + MipsInst<outs, ins, asmstr, pattern, NoItinerary, FrmOther> +{ + bits<5> base; + bits<5> index; + bits<5> fs; + bits<5> fd; + + let Opcode = 0x13; + + let Inst{25-21} = base; + let Inst{20-16} = index; + let Inst{15-11} = fs; + let Inst{10-6} = fd; + let Inst{5-0} = funct; +} diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp new file mode 100644 index 0000000..a3a18bf --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.cpp @@ -0,0 +1,456 @@ +//===-- MipsInstrInfo.cpp - Mips Instruction Information ------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the Mips implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#include "MipsInstrInfo.h" +#include "MipsTargetMachine.h" +#include "MipsMachineFunction.h" +#include "InstPrinter/MipsInstPrinter.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/ADT/STLExtras.h" + +#define GET_INSTRINFO_CTOR +#include "MipsGenInstrInfo.inc" + +using namespace llvm; + +MipsInstrInfo::MipsInstrInfo(MipsTargetMachine &tm) + : MipsGenInstrInfo(Mips::ADJCALLSTACKDOWN, Mips::ADJCALLSTACKUP), + TM(tm), IsN64(TM.getSubtarget<MipsSubtarget>().isABI_N64()), + RI(*TM.getSubtargetImpl(), *this), + UncondBrOpc(TM.getRelocationModel() == Reloc::PIC_ ? Mips::B : Mips::J) {} + +const MipsRegisterInfo &MipsInstrInfo::getRegisterInfo() const { + return RI; +} + +static bool isZeroImm(const MachineOperand &op) { + return op.isImm() && op.getImm() == 0; +} + +/// isLoadFromStackSlot - If the specified machine instruction is a direct +/// load from a stack slot, return the virtual or physical register number of +/// the destination along with the FrameIndex of the loaded stack slot. If +/// not, return 0. This predicate must return 0 if the instruction has +/// any side effects other than loading from the stack slot. +unsigned MipsInstrInfo:: +isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const +{ + unsigned Opc = MI->getOpcode(); + + if ((Opc == Mips::LW) || (Opc == Mips::LW_P8) || (Opc == Mips::LD) || + (Opc == Mips::LD_P8) || (Opc == Mips::LWC1) || (Opc == Mips::LWC1_P8) || + (Opc == Mips::LDC1) || (Opc == Mips::LDC164) || + (Opc == Mips::LDC164_P8)) { + if ((MI->getOperand(1).isFI()) && // is a stack slot + (MI->getOperand(2).isImm()) && // the imm is zero + (isZeroImm(MI->getOperand(2)))) { + FrameIndex = MI->getOperand(1).getIndex(); + return MI->getOperand(0).getReg(); + } + } + + return 0; +} + +/// isStoreToStackSlot - If the specified machine instruction is a direct +/// store to a stack slot, return the virtual or physical register number of +/// the source reg along with the FrameIndex of the loaded stack slot. If +/// not, return 0. This predicate must return 0 if the instruction has +/// any side effects other than storing to the stack slot. +unsigned MipsInstrInfo:: +isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const +{ + unsigned Opc = MI->getOpcode(); + + if ((Opc == Mips::SW) || (Opc == Mips::SW_P8) || (Opc == Mips::SD) || + (Opc == Mips::SD_P8) || (Opc == Mips::SWC1) || (Opc == Mips::SWC1_P8) || + (Opc == Mips::SDC1) || (Opc == Mips::SDC164) || + (Opc == Mips::SDC164_P8)) { + if ((MI->getOperand(1).isFI()) && // is a stack slot + (MI->getOperand(2).isImm()) && // the imm is zero + (isZeroImm(MI->getOperand(2)))) { + FrameIndex = MI->getOperand(1).getIndex(); + return MI->getOperand(0).getReg(); + } + } + return 0; +} + +/// insertNoop - If data hazard condition is found insert the target nop +/// instruction. +void MipsInstrInfo:: +insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const +{ + DebugLoc DL; + BuildMI(MBB, MI, DL, get(Mips::NOP)); +} + +void MipsInstrInfo:: +copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator I, DebugLoc DL, + unsigned DestReg, unsigned SrcReg, + bool KillSrc) const { + unsigned Opc = 0, ZeroReg = 0; + + if (Mips::CPURegsRegClass.contains(DestReg)) { // Copy to CPU Reg. + if (Mips::CPURegsRegClass.contains(SrcReg)) + Opc = Mips::ADDu, ZeroReg = Mips::ZERO; + else if (Mips::CCRRegClass.contains(SrcReg)) + Opc = Mips::CFC1; + else if (Mips::FGR32RegClass.contains(SrcReg)) + Opc = Mips::MFC1; + else if (SrcReg == Mips::HI) + Opc = Mips::MFHI, SrcReg = 0; + else if (SrcReg == Mips::LO) + Opc = Mips::MFLO, SrcReg = 0; + } + else if (Mips::CPURegsRegClass.contains(SrcReg)) { // Copy from CPU Reg. + if (Mips::CCRRegClass.contains(DestReg)) + Opc = Mips::CTC1; + else if (Mips::FGR32RegClass.contains(DestReg)) + Opc = Mips::MTC1; + else if (DestReg == Mips::HI) + Opc = Mips::MTHI, DestReg = 0; + else if (DestReg == Mips::LO) + Opc = Mips::MTLO, DestReg = 0; + } + else if (Mips::FGR32RegClass.contains(DestReg, SrcReg)) + Opc = Mips::FMOV_S; + else if (Mips::AFGR64RegClass.contains(DestReg, SrcReg)) + Opc = Mips::FMOV_D32; + else if (Mips::FGR64RegClass.contains(DestReg, SrcReg)) + Opc = Mips::FMOV_D64; + else if (Mips::CCRRegClass.contains(DestReg, SrcReg)) + Opc = Mips::MOVCCRToCCR; + else if (Mips::CPU64RegsRegClass.contains(DestReg)) { // Copy to CPU64 Reg. + if (Mips::CPU64RegsRegClass.contains(SrcReg)) + Opc = Mips::DADDu, ZeroReg = Mips::ZERO_64; + else if (SrcReg == Mips::HI64) + Opc = Mips::MFHI64, SrcReg = 0; + else if (SrcReg == Mips::LO64) + Opc = Mips::MFLO64, SrcReg = 0; + else if (Mips::FGR64RegClass.contains(SrcReg)) + Opc = Mips::DMFC1; + } + else if (Mips::CPU64RegsRegClass.contains(SrcReg)) { // Copy from CPU64 Reg. + if (DestReg == Mips::HI64) + Opc = Mips::MTHI64, DestReg = 0; + else if (DestReg == Mips::LO64) + Opc = Mips::MTLO64, DestReg = 0; + else if (Mips::FGR64RegClass.contains(DestReg)) + Opc = Mips::DMTC1; + } + + assert(Opc && "Cannot copy registers"); + + MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc)); + + if (DestReg) + MIB.addReg(DestReg, RegState::Define); + + if (ZeroReg) + MIB.addReg(ZeroReg); + + if (SrcReg) + MIB.addReg(SrcReg, getKillRegState(KillSrc)); +} + +static MachineMemOperand* GetMemOperand(MachineBasicBlock &MBB, int FI, + unsigned Flag) { + MachineFunction &MF = *MBB.getParent(); + MachineFrameInfo &MFI = *MF.getFrameInfo(); + unsigned Align = MFI.getObjectAlignment(FI); + + return MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), Flag, + MFI.getObjectSize(FI), Align); +} + +void MipsInstrInfo:: +storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned SrcReg, bool isKill, int FI, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const { + DebugLoc DL; + if (I != MBB.end()) DL = I->getDebugLoc(); + MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore); + + unsigned Opc = 0; + + if (RC == Mips::CPURegsRegisterClass) + Opc = IsN64 ? Mips::SW_P8 : Mips::SW; + else if (RC == Mips::CPU64RegsRegisterClass) + Opc = IsN64 ? Mips::SD_P8 : Mips::SD; + else if (RC == Mips::FGR32RegisterClass) + Opc = IsN64 ? Mips::SWC1_P8 : Mips::SWC1; + else if (RC == Mips::AFGR64RegisterClass) + Opc = Mips::SDC1; + else if (RC == Mips::FGR64RegisterClass) + Opc = IsN64 ? Mips::SDC164_P8 : Mips::SDC164; + + assert(Opc && "Register class not handled!"); + BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill)) + .addFrameIndex(FI).addImm(0).addMemOperand(MMO); +} + +void MipsInstrInfo:: +loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, + unsigned DestReg, int FI, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const +{ + DebugLoc DL; + if (I != MBB.end()) DL = I->getDebugLoc(); + MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad); + unsigned Opc = 0; + + if (RC == Mips::CPURegsRegisterClass) + Opc = IsN64 ? Mips::LW_P8 : Mips::LW; + else if (RC == Mips::CPU64RegsRegisterClass) + Opc = IsN64 ? Mips::LD_P8 : Mips::LD; + else if (RC == Mips::FGR32RegisterClass) + Opc = IsN64 ? Mips::LWC1_P8 : Mips::LWC1; + else if (RC == Mips::AFGR64RegisterClass) + Opc = Mips::LDC1; + else if (RC == Mips::FGR64RegisterClass) + Opc = IsN64 ? Mips::LDC164_P8 : Mips::LDC164; + + assert(Opc && "Register class not handled!"); + BuildMI(MBB, I, DL, get(Opc), DestReg).addFrameIndex(FI).addImm(0) + .addMemOperand(MMO); +} + +MachineInstr* +MipsInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, + uint64_t Offset, const MDNode *MDPtr, + DebugLoc DL) const { + MachineInstrBuilder MIB = BuildMI(MF, DL, get(Mips::DBG_VALUE)) + .addFrameIndex(FrameIx).addImm(0).addImm(Offset).addMetadata(MDPtr); + return &*MIB; +} + +//===----------------------------------------------------------------------===// +// Branch Analysis +//===----------------------------------------------------------------------===// + +static unsigned GetAnalyzableBrOpc(unsigned Opc) { + return (Opc == Mips::BEQ || Opc == Mips::BNE || Opc == Mips::BGTZ || + Opc == Mips::BGEZ || Opc == Mips::BLTZ || Opc == Mips::BLEZ || + Opc == Mips::BEQ64 || Opc == Mips::BNE64 || Opc == Mips::BGTZ64 || + Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 || + Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B || + Opc == Mips::J) ? + Opc : 0; +} + +/// GetOppositeBranchOpc - Return the inverse of the specified +/// opcode, e.g. turning BEQ to BNE. +unsigned Mips::GetOppositeBranchOpc(unsigned Opc) +{ + switch (Opc) { + default: llvm_unreachable("Illegal opcode!"); + case Mips::BEQ: return Mips::BNE; + case Mips::BNE: return Mips::BEQ; + case Mips::BGTZ: return Mips::BLEZ; + case Mips::BGEZ: return Mips::BLTZ; + case Mips::BLTZ: return Mips::BGEZ; + case Mips::BLEZ: return Mips::BGTZ; + case Mips::BEQ64: return Mips::BNE64; + case Mips::BNE64: return Mips::BEQ64; + case Mips::BGTZ64: return Mips::BLEZ64; + case Mips::BGEZ64: return Mips::BLTZ64; + case Mips::BLTZ64: return Mips::BGEZ64; + case Mips::BLEZ64: return Mips::BGTZ64; + case Mips::BC1T: return Mips::BC1F; + case Mips::BC1F: return Mips::BC1T; + } +} + +static void AnalyzeCondBr(const MachineInstr* Inst, unsigned Opc, + MachineBasicBlock *&BB, + SmallVectorImpl<MachineOperand>& Cond) { + assert(GetAnalyzableBrOpc(Opc) && "Not an analyzable branch"); + int NumOp = Inst->getNumExplicitOperands(); + + // for both int and fp branches, the last explicit operand is the + // MBB. + BB = Inst->getOperand(NumOp-1).getMBB(); + Cond.push_back(MachineOperand::CreateImm(Opc)); + + for (int i=0; i<NumOp-1; i++) + Cond.push_back(Inst->getOperand(i)); +} + +bool MipsInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, + MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify) const +{ + MachineBasicBlock::reverse_iterator I = MBB.rbegin(), REnd = MBB.rend(); + + // Skip all the debug instructions. + while (I != REnd && I->isDebugValue()) + ++I; + + if (I == REnd || !isUnpredicatedTerminator(&*I)) { + // If this block ends with no branches (it just falls through to its succ) + // just return false, leaving TBB/FBB null. + TBB = FBB = NULL; + return false; + } + + MachineInstr *LastInst = &*I; + unsigned LastOpc = LastInst->getOpcode(); + + // Not an analyzable branch (must be an indirect jump). + if (!GetAnalyzableBrOpc(LastOpc)) + return true; + + // Get the second to last instruction in the block. + unsigned SecondLastOpc = 0; + MachineInstr *SecondLastInst = NULL; + + if (++I != REnd) { + SecondLastInst = &*I; + SecondLastOpc = GetAnalyzableBrOpc(SecondLastInst->getOpcode()); + + // Not an analyzable branch (must be an indirect jump). + if (isUnpredicatedTerminator(SecondLastInst) && !SecondLastOpc) + return true; + } + + // If there is only one terminator instruction, process it. + if (!SecondLastOpc) { + // Unconditional branch + if (LastOpc == UncondBrOpc) { + TBB = LastInst->getOperand(0).getMBB(); + return false; + } + + // Conditional branch + AnalyzeCondBr(LastInst, LastOpc, TBB, Cond); + return false; + } + + // If we reached here, there are two branches. + // If there are three terminators, we don't know what sort of block this is. + if (++I != REnd && isUnpredicatedTerminator(&*I)) + return true; + + // If second to last instruction is an unconditional branch, + // analyze it and remove the last instruction. + if (SecondLastOpc == UncondBrOpc) { + // Return if the last instruction cannot be removed. + if (!AllowModify) + return true; + + TBB = SecondLastInst->getOperand(0).getMBB(); + LastInst->eraseFromParent(); + return false; + } + + // Conditional branch followed by an unconditional branch. + // The last one must be unconditional. + if (LastOpc != UncondBrOpc) + return true; + + AnalyzeCondBr(SecondLastInst, SecondLastOpc, TBB, Cond); + FBB = LastInst->getOperand(0).getMBB(); + + return false; +} + +void MipsInstrInfo::BuildCondBr(MachineBasicBlock &MBB, + MachineBasicBlock *TBB, DebugLoc DL, + const SmallVectorImpl<MachineOperand>& Cond) + const { + unsigned Opc = Cond[0].getImm(); + const MCInstrDesc &MCID = get(Opc); + MachineInstrBuilder MIB = BuildMI(&MBB, DL, MCID); + + for (unsigned i = 1; i < Cond.size(); ++i) + MIB.addReg(Cond[i].getReg()); + + MIB.addMBB(TBB); +} + +unsigned MipsInstrInfo:: +InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond, + DebugLoc DL) const { + // Shouldn't be a fall through. + assert(TBB && "InsertBranch must not be told to insert a fallthrough"); + + // # of condition operands: + // Unconditional branches: 0 + // Floating point branches: 1 (opc) + // Int BranchZero: 2 (opc, reg) + // Int Branch: 3 (opc, reg0, reg1) + assert((Cond.size() <= 3) && + "# of Mips branch conditions must be <= 3!"); + + // Two-way Conditional branch. + if (FBB) { + BuildCondBr(MBB, TBB, DL, Cond); + BuildMI(&MBB, DL, get(UncondBrOpc)).addMBB(FBB); + return 2; + } + + // One way branch. + // Unconditional branch. + if (Cond.empty()) + BuildMI(&MBB, DL, get(UncondBrOpc)).addMBB(TBB); + else // Conditional branch. + BuildCondBr(MBB, TBB, DL, Cond); + return 1; +} + +unsigned MipsInstrInfo:: +RemoveBranch(MachineBasicBlock &MBB) const +{ + MachineBasicBlock::reverse_iterator I = MBB.rbegin(), REnd = MBB.rend(); + MachineBasicBlock::reverse_iterator FirstBr; + unsigned removed; + + // Skip all the debug instructions. + while (I != REnd && I->isDebugValue()) + ++I; + + FirstBr = I; + + // Up to 2 branches are removed. + // Note that indirect branches are not removed. + for(removed = 0; I != REnd && removed < 2; ++I, ++removed) + if (!GetAnalyzableBrOpc(I->getOpcode())) + break; + + MBB.erase(I.base(), FirstBr.base()); + + return removed; +} + +/// ReverseBranchCondition - Return the inverse opcode of the +/// specified Branch instruction. +bool MipsInstrInfo:: +ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const +{ + assert( (Cond.size() && Cond.size() <= 3) && + "Invalid Mips branch condition!"); + Cond[0].setImm(Mips::GetOppositeBranchOpc(Cond[0].getImm())); + return false; +} + diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h new file mode 100644 index 0000000..4be727d --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.h @@ -0,0 +1,110 @@ +//===-- MipsInstrInfo.h - Mips Instruction Information ----------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the Mips implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSINSTRUCTIONINFO_H +#define MIPSINSTRUCTIONINFO_H + +#include "Mips.h" +#include "MipsRegisterInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Target/TargetInstrInfo.h" + +#define GET_INSTRINFO_HEADER +#include "MipsGenInstrInfo.inc" + +namespace llvm { + +namespace Mips { + /// GetOppositeBranchOpc - Return the inverse of the specified + /// opcode, e.g. turning BEQ to BNE. + unsigned GetOppositeBranchOpc(unsigned Opc); +} + +class MipsInstrInfo : public MipsGenInstrInfo { + MipsTargetMachine &TM; + bool IsN64; + const MipsRegisterInfo RI; + unsigned UncondBrOpc; +public: + explicit MipsInstrInfo(MipsTargetMachine &TM); + + /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As + /// such, whenever a client has an instance of instruction info, it should + /// always be able to get register info as well (through this method). + /// + virtual const MipsRegisterInfo &getRegisterInfo() const; + + /// isLoadFromStackSlot - If the specified machine instruction is a direct + /// load from a stack slot, return the virtual or physical register number of + /// the destination along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than loading from the stack slot. + virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, + int &FrameIndex) const; + + /// isStoreToStackSlot - If the specified machine instruction is a direct + /// store to a stack slot, return the virtual or physical register number of + /// the source reg along with the FrameIndex of the loaded stack slot. If + /// not, return 0. This predicate must return 0 if the instruction has + /// any side effects other than storing to the stack slot. + virtual unsigned isStoreToStackSlot(const MachineInstr *MI, + int &FrameIndex) const; + + /// Branch Analysis + virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, + MachineBasicBlock *&FBB, + SmallVectorImpl<MachineOperand> &Cond, + bool AllowModify) const; + virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; + +private: + void BuildCondBr(MachineBasicBlock &MBB, MachineBasicBlock *TBB, DebugLoc DL, + const SmallVectorImpl<MachineOperand>& Cond) const; + +public: + virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, + MachineBasicBlock *FBB, + const SmallVectorImpl<MachineOperand> &Cond, + DebugLoc DL) const; + virtual void copyPhysReg(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI, DebugLoc DL, + unsigned DestReg, unsigned SrcReg, + bool KillSrc) const; + virtual void storeRegToStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned SrcReg, bool isKill, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const; + + virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MBBI, + unsigned DestReg, int FrameIndex, + const TargetRegisterClass *RC, + const TargetRegisterInfo *TRI) const; + + virtual MachineInstr* emitFrameIndexDebugValue(MachineFunction &MF, + int FrameIx, uint64_t Offset, + const MDNode *MDPtr, + DebugLoc DL) const; + + virtual + bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const; + + /// Insert nop instruction when hazard condition is found + virtual void insertNoop(MachineBasicBlock &MBB, + MachineBasicBlock::iterator MI) const; +}; + +} + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td new file mode 100644 index 0000000..be74f8e --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsInstrInfo.td @@ -0,0 +1,1138 @@ +//===- MipsInstrInfo.td - Target Description for Mips Target -*- tablegen -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the Mips implementation of the TargetInstrInfo class. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Instruction format superclass +//===----------------------------------------------------------------------===// + +include "MipsInstrFormats.td" + +//===----------------------------------------------------------------------===// +// Mips profiles and nodes +//===----------------------------------------------------------------------===// + +def SDT_MipsRet : SDTypeProfile<0, 1, [SDTCisInt<0>]>; +def SDT_MipsJmpLink : SDTypeProfile<0, 1, [SDTCisVT<0, iPTR>]>; +def SDT_MipsCMov : SDTypeProfile<1, 4, [SDTCisSameAs<0, 1>, + SDTCisSameAs<1, 2>, + SDTCisSameAs<3, 4>, + SDTCisInt<4>]>; +def SDT_MipsCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>]>; +def SDT_MipsCallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, SDTCisVT<1, i32>]>; +def SDT_MipsMAddMSub : SDTypeProfile<0, 4, + [SDTCisVT<0, i32>, SDTCisSameAs<0, 1>, + SDTCisSameAs<1, 2>, + SDTCisSameAs<2, 3>]>; +def SDT_MipsDivRem : SDTypeProfile<0, 2, + [SDTCisInt<0>, + SDTCisSameAs<0, 1>]>; + +def SDT_MipsThreadPointer : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>; + +def SDT_MipsDynAlloc : SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, + SDTCisSameAs<0, 1>]>; +def SDT_Sync : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>; + +def SDT_Ext : SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>, + SDTCisVT<2, i32>, SDTCisSameAs<2, 3>]>; +def SDT_Ins : SDTypeProfile<1, 4, [SDTCisInt<0>, SDTCisSameAs<0, 1>, + SDTCisVT<2, i32>, SDTCisSameAs<2, 3>, + SDTCisSameAs<0, 4>]>; + +// Call +def MipsJmpLink : SDNode<"MipsISD::JmpLink",SDT_MipsJmpLink, + [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue, + SDNPVariadic]>; + +// Hi and Lo nodes are used to handle global addresses. Used on +// MipsISelLowering to lower stuff like GlobalAddress, ExternalSymbol +// static model. (nothing to do with Mips Registers Hi and Lo) +def MipsHi : SDNode<"MipsISD::Hi", SDTIntUnaryOp>; +def MipsLo : SDNode<"MipsISD::Lo", SDTIntUnaryOp>; +def MipsGPRel : SDNode<"MipsISD::GPRel", SDTIntUnaryOp>; + +// TlsGd node is used to handle General Dynamic TLS +def MipsTlsGd : SDNode<"MipsISD::TlsGd", SDTIntUnaryOp>; + +// TprelHi and TprelLo nodes are used to handle Local Exec TLS +def MipsTprelHi : SDNode<"MipsISD::TprelHi", SDTIntUnaryOp>; +def MipsTprelLo : SDNode<"MipsISD::TprelLo", SDTIntUnaryOp>; + +// Thread pointer +def MipsThreadPointer: SDNode<"MipsISD::ThreadPointer", SDT_MipsThreadPointer>; + +// Return +def MipsRet : SDNode<"MipsISD::Ret", SDT_MipsRet, [SDNPHasChain, + SDNPOptInGlue]>; + +// These are target-independent nodes, but have target-specific formats. +def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_MipsCallSeqStart, + [SDNPHasChain, SDNPOutGlue]>; +def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_MipsCallSeqEnd, + [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; + +// MAdd*/MSub* nodes +def MipsMAdd : SDNode<"MipsISD::MAdd", SDT_MipsMAddMSub, + [SDNPOptInGlue, SDNPOutGlue]>; +def MipsMAddu : SDNode<"MipsISD::MAddu", SDT_MipsMAddMSub, + [SDNPOptInGlue, SDNPOutGlue]>; +def MipsMSub : SDNode<"MipsISD::MSub", SDT_MipsMAddMSub, + [SDNPOptInGlue, SDNPOutGlue]>; +def MipsMSubu : SDNode<"MipsISD::MSubu", SDT_MipsMAddMSub, + [SDNPOptInGlue, SDNPOutGlue]>; + +// DivRem(u) nodes +def MipsDivRem : SDNode<"MipsISD::DivRem", SDT_MipsDivRem, + [SDNPOutGlue]>; +def MipsDivRemU : SDNode<"MipsISD::DivRemU", SDT_MipsDivRem, + [SDNPOutGlue]>; + +// Target constant nodes that are not part of any isel patterns and remain +// unchanged can cause instructions with illegal operands to be emitted. +// Wrapper node patterns give the instruction selector a chance to replace +// target constant nodes that would otherwise remain unchanged with ADDiu +// nodes. Without these wrapper node patterns, the following conditional move +// instrucion is emitted when function cmov2 in test/CodeGen/Mips/cmov.ll is +// compiled: +// movn %got(d)($gp), %got(c)($gp), $4 +// This instruction is illegal since movn can take only register operands. + +def MipsWrapper : SDNode<"MipsISD::Wrapper", SDTIntBinOp>; + +// Pointer to dynamically allocated stack area. +def MipsDynAlloc : SDNode<"MipsISD::DynAlloc", SDT_MipsDynAlloc, + [SDNPHasChain, SDNPInGlue]>; + +def MipsSync : SDNode<"MipsISD::Sync", SDT_Sync, [SDNPHasChain]>; + +def MipsExt : SDNode<"MipsISD::Ext", SDT_Ext>; +def MipsIns : SDNode<"MipsISD::Ins", SDT_Ins>; + +//===----------------------------------------------------------------------===// +// Mips Instruction Predicate Definitions. +//===----------------------------------------------------------------------===// +def HasSEInReg : Predicate<"Subtarget.hasSEInReg()">; +def HasBitCount : Predicate<"Subtarget.hasBitCount()">; +def HasSwap : Predicate<"Subtarget.hasSwap()">; +def HasCondMov : Predicate<"Subtarget.hasCondMov()">; +def HasMips32 : Predicate<"Subtarget.hasMips32()">; +def HasMips32r2 : Predicate<"Subtarget.hasMips32r2()">; +def HasMips64 : Predicate<"Subtarget.hasMips64()">; +def HasMips32r2Or64 : Predicate<"Subtarget.hasMips32r2Or64()">; +def NotMips64 : Predicate<"!Subtarget.hasMips64()">; +def HasMips64r2 : Predicate<"Subtarget.hasMips64r2()">; +def IsN64 : Predicate<"Subtarget.isABI_N64()">; +def NotN64 : Predicate<"!Subtarget.isABI_N64()">; +def RelocStatic : Predicate<"TM.getRelocationModel() == Reloc::Static">; +def RelocPIC : Predicate<"TM.getRelocationModel() == Reloc::PIC_">; +def NoNaNsFPMath : Predicate<"TM.Options.NoNaNsFPMath">; + +//===----------------------------------------------------------------------===// +// Mips Operand, Complex Patterns and Transformations Definitions. +//===----------------------------------------------------------------------===// + +// Instruction operand types +def jmptarget : Operand<OtherVT> { + let EncoderMethod = "getJumpTargetOpValue"; +} +def brtarget : Operand<OtherVT> { + let EncoderMethod = "getBranchTargetOpValue"; + let OperandType = "OPERAND_PCREL"; +} +def calltarget : Operand<iPTR> { + let EncoderMethod = "getJumpTargetOpValue"; +} +def calltarget64: Operand<i64>; +def simm16 : Operand<i32>; +def simm16_64 : Operand<i64>; +def shamt : Operand<i32>; + +// Unsigned Operand +def uimm16 : Operand<i32> { + let PrintMethod = "printUnsignedImm"; +} + +// Address operand +def mem : Operand<i32> { + let PrintMethod = "printMemOperand"; + let MIOperandInfo = (ops CPURegs, simm16); + let EncoderMethod = "getMemEncoding"; +} + +def mem64 : Operand<i64> { + let PrintMethod = "printMemOperand"; + let MIOperandInfo = (ops CPU64Regs, simm16_64); +} + +def mem_ea : Operand<i32> { + let PrintMethod = "printMemOperandEA"; + let MIOperandInfo = (ops CPURegs, simm16); + let EncoderMethod = "getMemEncoding"; +} + +def mem_ea_64 : Operand<i64> { + let PrintMethod = "printMemOperandEA"; + let MIOperandInfo = (ops CPU64Regs, simm16_64); + let EncoderMethod = "getMemEncoding"; +} + +// size operand of ext instruction +def size_ext : Operand<i32> { + let EncoderMethod = "getSizeExtEncoding"; +} + +// size operand of ins instruction +def size_ins : Operand<i32> { + let EncoderMethod = "getSizeInsEncoding"; +} + +// Transformation Function - get the lower 16 bits. +def LO16 : SDNodeXForm<imm, [{ + return getImm(N, N->getZExtValue() & 0xFFFF); +}]>; + +// Transformation Function - get the higher 16 bits. +def HI16 : SDNodeXForm<imm, [{ + return getImm(N, (N->getZExtValue() >> 16) & 0xFFFF); +}]>; + +// Node immediate fits as 16-bit sign extended on target immediate. +// e.g. addi, andi +def immSExt16 : PatLeaf<(imm), [{ return isInt<16>(N->getSExtValue()); }]>; + +// Node immediate fits as 16-bit zero extended on target immediate. +// The LO16 param means that only the lower 16 bits of the node +// immediate are caught. +// e.g. addiu, sltiu +def immZExt16 : PatLeaf<(imm), [{ + if (N->getValueType(0) == MVT::i32) + return (uint32_t)N->getZExtValue() == (unsigned short)N->getZExtValue(); + else + return (uint64_t)N->getZExtValue() == (unsigned short)N->getZExtValue(); +}], LO16>; + +// Immediate can be loaded with LUi (32-bit int with lower 16-bit cleared). +def immLow16Zero : PatLeaf<(imm), [{ + int64_t Val = N->getSExtValue(); + return isInt<32>(Val) && !(Val & 0xffff); +}]>; + +// shamt field must fit in 5 bits. +def immZExt5 : ImmLeaf<i32, [{return Imm == (Imm & 0x1f);}]>; + +// Mips Address Mode! SDNode frameindex could possibily be a match +// since load and store instructions from stack used it. +def addr : ComplexPattern<iPTR, 2, "SelectAddr", [frameindex], [SDNPWantParent]>; + +//===----------------------------------------------------------------------===// +// Pattern fragment for load/store +//===----------------------------------------------------------------------===// +class UnalignedLoad<PatFrag Node> : + PatFrag<(ops node:$ptr), (Node node:$ptr), [{ + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getMemoryVT().getSizeInBits()/8 > LD->getAlignment(); +}]>; + +class AlignedLoad<PatFrag Node> : + PatFrag<(ops node:$ptr), (Node node:$ptr), [{ + LoadSDNode *LD = cast<LoadSDNode>(N); + return LD->getMemoryVT().getSizeInBits()/8 <= LD->getAlignment(); +}]>; + +class UnalignedStore<PatFrag Node> : + PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{ + StoreSDNode *SD = cast<StoreSDNode>(N); + return SD->getMemoryVT().getSizeInBits()/8 > SD->getAlignment(); +}]>; + +class AlignedStore<PatFrag Node> : + PatFrag<(ops node:$val, node:$ptr), (Node node:$val, node:$ptr), [{ + StoreSDNode *SD = cast<StoreSDNode>(N); + return SD->getMemoryVT().getSizeInBits()/8 <= SD->getAlignment(); +}]>; + +// Load/Store PatFrags. +def sextloadi16_a : AlignedLoad<sextloadi16>; +def zextloadi16_a : AlignedLoad<zextloadi16>; +def extloadi16_a : AlignedLoad<extloadi16>; +def load_a : AlignedLoad<load>; +def sextloadi32_a : AlignedLoad<sextloadi32>; +def zextloadi32_a : AlignedLoad<zextloadi32>; +def extloadi32_a : AlignedLoad<extloadi32>; +def truncstorei16_a : AlignedStore<truncstorei16>; +def store_a : AlignedStore<store>; +def truncstorei32_a : AlignedStore<truncstorei32>; +def sextloadi16_u : UnalignedLoad<sextloadi16>; +def zextloadi16_u : UnalignedLoad<zextloadi16>; +def extloadi16_u : UnalignedLoad<extloadi16>; +def load_u : UnalignedLoad<load>; +def sextloadi32_u : UnalignedLoad<sextloadi32>; +def zextloadi32_u : UnalignedLoad<zextloadi32>; +def extloadi32_u : UnalignedLoad<extloadi32>; +def truncstorei16_u : UnalignedStore<truncstorei16>; +def store_u : UnalignedStore<store>; +def truncstorei32_u : UnalignedStore<truncstorei32>; + +//===----------------------------------------------------------------------===// +// Instructions specific format +//===----------------------------------------------------------------------===// + +// Arithmetic and logical instructions with 3 register operands. +class ArithLogicR<bits<6> op, bits<6> func, string instr_asm, SDNode OpNode, + InstrItinClass itin, RegisterClass RC, bit isComm = 0>: + FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt), + !strconcat(instr_asm, "\t$rd, $rs, $rt"), + [(set RC:$rd, (OpNode RC:$rs, RC:$rt))], itin> { + let shamt = 0; + let isCommutable = isComm; +} + +class ArithOverflowR<bits<6> op, bits<6> func, string instr_asm, + InstrItinClass itin, RegisterClass RC, bit isComm = 0>: + FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt), + !strconcat(instr_asm, "\t$rd, $rs, $rt"), [], itin> { + let shamt = 0; + let isCommutable = isComm; +} + +// Arithmetic and logical instructions with 2 register operands. +class ArithLogicI<bits<6> op, string instr_asm, SDNode OpNode, + Operand Od, PatLeaf imm_type, RegisterClass RC> : + FI<op, (outs RC:$rt), (ins RC:$rs, Od:$imm16), + !strconcat(instr_asm, "\t$rt, $rs, $imm16"), + [(set RC:$rt, (OpNode RC:$rs, imm_type:$imm16))], IIAlu>; + +class ArithOverflowI<bits<6> op, string instr_asm, SDNode OpNode, + Operand Od, PatLeaf imm_type, RegisterClass RC> : + FI<op, (outs RC:$rt), (ins RC:$rs, Od:$imm16), + !strconcat(instr_asm, "\t$rt, $rs, $imm16"), [], IIAlu>; + +// Arithmetic Multiply ADD/SUB +let rd = 0, shamt = 0, Defs = [HI, LO], Uses = [HI, LO] in +class MArithR<bits<6> func, string instr_asm, SDNode op, bit isComm = 0> : + FR<0x1c, func, (outs), (ins CPURegs:$rs, CPURegs:$rt), + !strconcat(instr_asm, "\t$rs, $rt"), + [(op CPURegs:$rs, CPURegs:$rt, LO, HI)], IIImul> { + let rd = 0; + let shamt = 0; + let isCommutable = isComm; +} + +// Logical +class LogicNOR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>: + FR<op, func, (outs RC:$rd), (ins RC:$rs, RC:$rt), + !strconcat(instr_asm, "\t$rd, $rs, $rt"), + [(set RC:$rd, (not (or RC:$rs, RC:$rt)))], IIAlu> { + let shamt = 0; + let isCommutable = 1; +} + +// Shifts +class shift_rotate_imm<bits<6> func, bits<5> isRotate, string instr_asm, + SDNode OpNode, PatFrag PF, Operand ImmOpnd, + RegisterClass RC>: + FR<0x00, func, (outs RC:$rd), (ins RC:$rt, ImmOpnd:$shamt), + !strconcat(instr_asm, "\t$rd, $rt, $shamt"), + [(set RC:$rd, (OpNode RC:$rt, PF:$shamt))], IIAlu> { + let rs = isRotate; +} + +// 32-bit shift instructions. +class shift_rotate_imm32<bits<6> func, bits<5> isRotate, string instr_asm, + SDNode OpNode>: + shift_rotate_imm<func, isRotate, instr_asm, OpNode, immZExt5, shamt, CPURegs>; + +class shift_rotate_reg<bits<6> func, bits<5> isRotate, string instr_asm, + SDNode OpNode, RegisterClass RC>: + FR<0x00, func, (outs RC:$rd), (ins CPURegs:$rs, RC:$rt), + !strconcat(instr_asm, "\t$rd, $rt, $rs"), + [(set RC:$rd, (OpNode RC:$rt, CPURegs:$rs))], IIAlu> { + let shamt = isRotate; +} + +// Load Upper Imediate +class LoadUpper<bits<6> op, string instr_asm, RegisterClass RC, Operand Imm>: + FI<op, (outs RC:$rt), (ins Imm:$imm16), + !strconcat(instr_asm, "\t$rt, $imm16"), [], IIAlu> { + let rs = 0; + let neverHasSideEffects = 1; +} + +class FMem<bits<6> op, dag outs, dag ins, string asmstr, list<dag> pattern, + InstrItinClass itin>: FFI<op, outs, ins, asmstr, pattern> { + bits<21> addr; + let Inst{25-21} = addr{20-16}; + let Inst{15-0} = addr{15-0}; +} + +// Memory Load/Store +let canFoldAsLoad = 1 in +class LoadM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC, + Operand MemOpnd, bit Pseudo>: + FMem<op, (outs RC:$rt), (ins MemOpnd:$addr), + !strconcat(instr_asm, "\t$rt, $addr"), + [(set RC:$rt, (OpNode addr:$addr))], IILoad> { + let isPseudo = Pseudo; +} + +class StoreM<bits<6> op, string instr_asm, PatFrag OpNode, RegisterClass RC, + Operand MemOpnd, bit Pseudo>: + FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr), + !strconcat(instr_asm, "\t$rt, $addr"), + [(OpNode RC:$rt, addr:$addr)], IIStore> { + let isPseudo = Pseudo; +} + +// Unaligned Memory Load/Store +let canFoldAsLoad = 1 in +class LoadUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>: + FMem<op, (outs RC:$rt), (ins MemOpnd:$addr), "", [], IILoad> {} + +class StoreUnAlign<bits<6> op, RegisterClass RC, Operand MemOpnd>: + FMem<op, (outs), (ins RC:$rt, MemOpnd:$addr), "", [], IIStore> {} + +// 32-bit load. +multiclass LoadM32<bits<6> op, string instr_asm, PatFrag OpNode, + bit Pseudo = 0> { + def #NAME# : LoadM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>, + Requires<[NotN64]>; + def _P8 : LoadM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>, + Requires<[IsN64]>; +} + +// 64-bit load. +multiclass LoadM64<bits<6> op, string instr_asm, PatFrag OpNode, + bit Pseudo = 0> { + def #NAME# : LoadM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>, + Requires<[NotN64]>; + def _P8 : LoadM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>, + Requires<[IsN64]>; +} + +// 32-bit load. +multiclass LoadUnAlign32<bits<6> op> { + def #NAME# : LoadUnAlign<op, CPURegs, mem>, + Requires<[NotN64]>; + def _P8 : LoadUnAlign<op, CPURegs, mem64>, + Requires<[IsN64]>; +} +// 32-bit store. +multiclass StoreM32<bits<6> op, string instr_asm, PatFrag OpNode, + bit Pseudo = 0> { + def #NAME# : StoreM<op, instr_asm, OpNode, CPURegs, mem, Pseudo>, + Requires<[NotN64]>; + def _P8 : StoreM<op, instr_asm, OpNode, CPURegs, mem64, Pseudo>, + Requires<[IsN64]>; +} + +// 64-bit store. +multiclass StoreM64<bits<6> op, string instr_asm, PatFrag OpNode, + bit Pseudo = 0> { + def #NAME# : StoreM<op, instr_asm, OpNode, CPU64Regs, mem, Pseudo>, + Requires<[NotN64]>; + def _P8 : StoreM<op, instr_asm, OpNode, CPU64Regs, mem64, Pseudo>, + Requires<[IsN64]>; +} + +// 32-bit store. +multiclass StoreUnAlign32<bits<6> op> { + def #NAME# : StoreUnAlign<op, CPURegs, mem>, + Requires<[NotN64]>; + def _P8 : StoreUnAlign<op, CPURegs, mem64>, + Requires<[IsN64]>; +} + +// Conditional Branch +class CBranch<bits<6> op, string instr_asm, PatFrag cond_op, RegisterClass RC>: + BranchBase<op, (outs), (ins RC:$rs, RC:$rt, brtarget:$imm16), + !strconcat(instr_asm, "\t$rs, $rt, $imm16"), + [(brcond (i32 (cond_op RC:$rs, RC:$rt)), bb:$imm16)], IIBranch> { + let isBranch = 1; + let isTerminator = 1; + let hasDelaySlot = 1; +} + +class CBranchZero<bits<6> op, bits<5> _rt, string instr_asm, PatFrag cond_op, + RegisterClass RC>: + BranchBase<op, (outs), (ins RC:$rs, brtarget:$imm16), + !strconcat(instr_asm, "\t$rs, $imm16"), + [(brcond (i32 (cond_op RC:$rs, 0)), bb:$imm16)], IIBranch> { + let rt = _rt; + let isBranch = 1; + let isTerminator = 1; + let hasDelaySlot = 1; +} + +// SetCC +class SetCC_R<bits<6> op, bits<6> func, string instr_asm, PatFrag cond_op, + RegisterClass RC>: + FR<op, func, (outs CPURegs:$rd), (ins RC:$rs, RC:$rt), + !strconcat(instr_asm, "\t$rd, $rs, $rt"), + [(set CPURegs:$rd, (cond_op RC:$rs, RC:$rt))], + IIAlu> { + let shamt = 0; +} + +class SetCC_I<bits<6> op, string instr_asm, PatFrag cond_op, Operand Od, + PatLeaf imm_type, RegisterClass RC>: + FI<op, (outs CPURegs:$rt), (ins RC:$rs, Od:$imm16), + !strconcat(instr_asm, "\t$rt, $rs, $imm16"), + [(set CPURegs:$rt, (cond_op RC:$rs, imm_type:$imm16))], + IIAlu>; + +// Jump +class JumpFJ<bits<6> op, string instr_asm>: + FJ<op, (outs), (ins jmptarget:$target), + !strconcat(instr_asm, "\t$target"), [(br bb:$target)], IIBranch> { + let isBranch=1; + let isTerminator=1; + let isBarrier=1; + let hasDelaySlot = 1; + let Predicates = [RelocStatic]; +} + +// Unconditional branch +class UncondBranch<bits<6> op, string instr_asm>: + BranchBase<op, (outs), (ins brtarget:$imm16), + !strconcat(instr_asm, "\t$imm16"), [(br bb:$imm16)], IIBranch> { + let rs = 0; + let rt = 0; + let isBranch = 1; + let isTerminator = 1; + let isBarrier = 1; + let hasDelaySlot = 1; + let Predicates = [RelocPIC]; +} + +let isBranch=1, isTerminator=1, isBarrier=1, rd=0, hasDelaySlot = 1, + isIndirectBranch = 1 in +class JumpFR<bits<6> op, bits<6> func, string instr_asm, RegisterClass RC>: + FR<op, func, (outs), (ins RC:$rs), + !strconcat(instr_asm, "\t$rs"), [(brind RC:$rs)], IIBranch> { + let rt = 0; + let rd = 0; + let shamt = 0; +} + +// Jump and Link (Call) +let isCall=1, hasDelaySlot=1 in { + class JumpLink<bits<6> op, string instr_asm>: + FJ<op, (outs), (ins calltarget:$target, variable_ops), + !strconcat(instr_asm, "\t$target"), [(MipsJmpLink imm:$target)], + IIBranch>; + + class JumpLinkReg<bits<6> op, bits<6> func, string instr_asm, + RegisterClass RC>: + FR<op, func, (outs), (ins RC:$rs, variable_ops), + !strconcat(instr_asm, "\t$rs"), [(MipsJmpLink RC:$rs)], IIBranch> { + let rt = 0; + let rd = 31; + let shamt = 0; + } + + class BranchLink<string instr_asm, bits<5> _rt, RegisterClass RC>: + FI<0x1, (outs), (ins RC:$rs, brtarget:$imm16, variable_ops), + !strconcat(instr_asm, "\t$rs, $imm16"), [], IIBranch> { + let rt = _rt; + } +} + +// Mul, Div +class Mult<bits<6> func, string instr_asm, InstrItinClass itin, + RegisterClass RC, list<Register> DefRegs>: + FR<0x00, func, (outs), (ins RC:$rs, RC:$rt), + !strconcat(instr_asm, "\t$rs, $rt"), [], itin> { + let rd = 0; + let shamt = 0; + let isCommutable = 1; + let Defs = DefRegs; + let neverHasSideEffects = 1; +} + +class Mult32<bits<6> func, string instr_asm, InstrItinClass itin>: + Mult<func, instr_asm, itin, CPURegs, [HI, LO]>; + +class Div<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin, + RegisterClass RC, list<Register> DefRegs>: + FR<0x00, func, (outs), (ins RC:$rs, RC:$rt), + !strconcat(instr_asm, "\t$$zero, $rs, $rt"), + [(op RC:$rs, RC:$rt)], itin> { + let rd = 0; + let shamt = 0; + let Defs = DefRegs; +} + +class Div32<SDNode op, bits<6> func, string instr_asm, InstrItinClass itin>: + Div<op, func, instr_asm, itin, CPURegs, [HI, LO]>; + +// Move from Hi/Lo +class MoveFromLOHI<bits<6> func, string instr_asm, RegisterClass RC, + list<Register> UseRegs>: + FR<0x00, func, (outs RC:$rd), (ins), + !strconcat(instr_asm, "\t$rd"), [], IIHiLo> { + let rs = 0; + let rt = 0; + let shamt = 0; + let Uses = UseRegs; + let neverHasSideEffects = 1; +} + +class MoveToLOHI<bits<6> func, string instr_asm, RegisterClass RC, + list<Register> DefRegs>: + FR<0x00, func, (outs), (ins RC:$rs), + !strconcat(instr_asm, "\t$rs"), [], IIHiLo> { + let rt = 0; + let rd = 0; + let shamt = 0; + let Defs = DefRegs; + let neverHasSideEffects = 1; +} + +class EffectiveAddress<string instr_asm, RegisterClass RC, Operand Mem> : + FMem<0x09, (outs RC:$rt), (ins Mem:$addr), + instr_asm, [(set RC:$rt, addr:$addr)], IIAlu>; + +// Count Leading Ones/Zeros in Word +class CountLeading0<bits<6> func, string instr_asm, RegisterClass RC>: + FR<0x1c, func, (outs RC:$rd), (ins RC:$rs), + !strconcat(instr_asm, "\t$rd, $rs"), + [(set RC:$rd, (ctlz RC:$rs))], IIAlu>, + Requires<[HasBitCount]> { + let shamt = 0; + let rt = rd; +} + +class CountLeading1<bits<6> func, string instr_asm, RegisterClass RC>: + FR<0x1c, func, (outs RC:$rd), (ins RC:$rs), + !strconcat(instr_asm, "\t$rd, $rs"), + [(set RC:$rd, (ctlz (not RC:$rs)))], IIAlu>, + Requires<[HasBitCount]> { + let shamt = 0; + let rt = rd; +} + +// Sign Extend in Register. +class SignExtInReg<bits<5> sa, string instr_asm, ValueType vt, + RegisterClass RC>: + FR<0x1f, 0x20, (outs RC:$rd), (ins RC:$rt), + !strconcat(instr_asm, "\t$rd, $rt"), + [(set RC:$rd, (sext_inreg RC:$rt, vt))], NoItinerary> { + let rs = 0; + let shamt = sa; + let Predicates = [HasSEInReg]; +} + +// Subword Swap +class SubwordSwap<bits<6> func, bits<5> sa, string instr_asm, RegisterClass RC>: + FR<0x1f, func, (outs RC:$rd), (ins RC:$rt), + !strconcat(instr_asm, "\t$rd, $rt"), [], NoItinerary> { + let rs = 0; + let shamt = sa; + let Predicates = [HasSwap]; + let neverHasSideEffects = 1; +} + +// Read Hardware +class ReadHardware<RegisterClass CPURegClass, RegisterClass HWRegClass> + : FR<0x1f, 0x3b, (outs CPURegClass:$rt), (ins HWRegClass:$rd), + "rdhwr\t$rt, $rd", [], IIAlu> { + let rs = 0; + let shamt = 0; +} + +// Ext and Ins +class ExtBase<bits<6> _funct, string instr_asm, RegisterClass RC>: + FR<0x1f, _funct, (outs RC:$rt), (ins RC:$rs, uimm16:$pos, size_ext:$sz), + !strconcat(instr_asm, " $rt, $rs, $pos, $sz"), + [(set RC:$rt, (MipsExt RC:$rs, imm:$pos, imm:$sz))], NoItinerary> { + bits<5> pos; + bits<5> sz; + let rd = sz; + let shamt = pos; + let Predicates = [HasMips32r2]; +} + +class InsBase<bits<6> _funct, string instr_asm, RegisterClass RC>: + FR<0x1f, _funct, (outs RC:$rt), + (ins RC:$rs, uimm16:$pos, size_ins:$sz, RC:$src), + !strconcat(instr_asm, " $rt, $rs, $pos, $sz"), + [(set RC:$rt, (MipsIns RC:$rs, imm:$pos, imm:$sz, RC:$src))], + NoItinerary> { + bits<5> pos; + bits<5> sz; + let rd = sz; + let shamt = pos; + let Predicates = [HasMips32r2]; + let Constraints = "$src = $rt"; +} + +// Atomic instructions with 2 source operands (ATOMIC_SWAP & ATOMIC_LOAD_*). +class Atomic2Ops<PatFrag Op, string Opstr, RegisterClass DRC, + RegisterClass PRC> : + MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$incr), + !strconcat("atomic_", Opstr, "\t$dst, $ptr, $incr"), + [(set DRC:$dst, (Op PRC:$ptr, DRC:$incr))]>; + +multiclass Atomic2Ops32<PatFrag Op, string Opstr> { + def #NAME# : Atomic2Ops<Op, Opstr, CPURegs, CPURegs>, Requires<[NotN64]>; + def _P8 : Atomic2Ops<Op, Opstr, CPURegs, CPU64Regs>, Requires<[IsN64]>; +} + +// Atomic Compare & Swap. +class AtomicCmpSwap<PatFrag Op, string Width, RegisterClass DRC, + RegisterClass PRC> : + MipsPseudo<(outs DRC:$dst), (ins PRC:$ptr, DRC:$cmp, DRC:$swap), + !strconcat("atomic_cmp_swap_", Width, "\t$dst, $ptr, $cmp, $swap"), + [(set DRC:$dst, (Op PRC:$ptr, DRC:$cmp, DRC:$swap))]>; + +multiclass AtomicCmpSwap32<PatFrag Op, string Width> { + def #NAME# : AtomicCmpSwap<Op, Width, CPURegs, CPURegs>, Requires<[NotN64]>; + def _P8 : AtomicCmpSwap<Op, Width, CPURegs, CPU64Regs>, Requires<[IsN64]>; +} + +class LLBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> : + FMem<Opc, (outs RC:$rt), (ins Mem:$addr), + !strconcat(opstring, "\t$rt, $addr"), [], IILoad> { + let mayLoad = 1; +} + +class SCBase<bits<6> Opc, string opstring, RegisterClass RC, Operand Mem> : + FMem<Opc, (outs RC:$dst), (ins RC:$rt, Mem:$addr), + !strconcat(opstring, "\t$rt, $addr"), [], IIStore> { + let mayStore = 1; + let Constraints = "$rt = $dst"; +} + +//===----------------------------------------------------------------------===// +// Pseudo instructions +//===----------------------------------------------------------------------===// + +// As stack alignment is always done with addiu, we need a 16-bit immediate +let Defs = [SP], Uses = [SP] in { +def ADJCALLSTACKDOWN : MipsPseudo<(outs), (ins uimm16:$amt), + "!ADJCALLSTACKDOWN $amt", + [(callseq_start timm:$amt)]>; +def ADJCALLSTACKUP : MipsPseudo<(outs), (ins uimm16:$amt1, uimm16:$amt2), + "!ADJCALLSTACKUP $amt1", + [(callseq_end timm:$amt1, timm:$amt2)]>; +} + +// When handling PIC code the assembler needs .cpload and .cprestore +// directives. If the real instructions corresponding these directives +// are used, we have the same behavior, but get also a bunch of warnings +// from the assembler. +let neverHasSideEffects = 1 in +def CPRESTORE : MipsPseudo<(outs), (ins i32imm:$loc, CPURegs:$gp), + ".cprestore\t$loc", []>; + +// For O32 ABI & PIC & non-fixed global base register, the following instruction +// seqeunce is emitted to set the global base register: +// +// 0. lui $2, %hi(_gp_disp) +// 1. addiu $2, $2, %lo(_gp_disp) +// 2. addu $globalbasereg, $2, $t9 +// +// SETGP01 is emitted during Prologue/Epilogue insertion and then converted to +// instructions 0 and 1 in the sequence above during MC lowering. +// SETGP2 is emitted just before register allocation and converted to +// instruction 2 just prior to post-RA scheduling. +// +// These pseudo instructions are needed to ensure no instructions are inserted +// before or between instructions 0 and 1, which is a limitation imposed by +// GNU linker. + +let isTerminator = 1, isBarrier = 1 in +def SETGP01 : MipsPseudo<(outs CPURegs:$dst), (ins), "", []>; + +let neverHasSideEffects = 1 in +def SETGP2 : MipsPseudo<(outs CPURegs:$globalreg), (ins CPURegs:$picreg), "", + []>; + +let usesCustomInserter = 1 in { + defm ATOMIC_LOAD_ADD_I8 : Atomic2Ops32<atomic_load_add_8, "load_add_8">; + defm ATOMIC_LOAD_ADD_I16 : Atomic2Ops32<atomic_load_add_16, "load_add_16">; + defm ATOMIC_LOAD_ADD_I32 : Atomic2Ops32<atomic_load_add_32, "load_add_32">; + defm ATOMIC_LOAD_SUB_I8 : Atomic2Ops32<atomic_load_sub_8, "load_sub_8">; + defm ATOMIC_LOAD_SUB_I16 : Atomic2Ops32<atomic_load_sub_16, "load_sub_16">; + defm ATOMIC_LOAD_SUB_I32 : Atomic2Ops32<atomic_load_sub_32, "load_sub_32">; + defm ATOMIC_LOAD_AND_I8 : Atomic2Ops32<atomic_load_and_8, "load_and_8">; + defm ATOMIC_LOAD_AND_I16 : Atomic2Ops32<atomic_load_and_16, "load_and_16">; + defm ATOMIC_LOAD_AND_I32 : Atomic2Ops32<atomic_load_and_32, "load_and_32">; + defm ATOMIC_LOAD_OR_I8 : Atomic2Ops32<atomic_load_or_8, "load_or_8">; + defm ATOMIC_LOAD_OR_I16 : Atomic2Ops32<atomic_load_or_16, "load_or_16">; + defm ATOMIC_LOAD_OR_I32 : Atomic2Ops32<atomic_load_or_32, "load_or_32">; + defm ATOMIC_LOAD_XOR_I8 : Atomic2Ops32<atomic_load_xor_8, "load_xor_8">; + defm ATOMIC_LOAD_XOR_I16 : Atomic2Ops32<atomic_load_xor_16, "load_xor_16">; + defm ATOMIC_LOAD_XOR_I32 : Atomic2Ops32<atomic_load_xor_32, "load_xor_32">; + defm ATOMIC_LOAD_NAND_I8 : Atomic2Ops32<atomic_load_nand_8, "load_nand_8">; + defm ATOMIC_LOAD_NAND_I16 : Atomic2Ops32<atomic_load_nand_16, "load_nand_16">; + defm ATOMIC_LOAD_NAND_I32 : Atomic2Ops32<atomic_load_nand_32, "load_nand_32">; + + defm ATOMIC_SWAP_I8 : Atomic2Ops32<atomic_swap_8, "swap_8">; + defm ATOMIC_SWAP_I16 : Atomic2Ops32<atomic_swap_16, "swap_16">; + defm ATOMIC_SWAP_I32 : Atomic2Ops32<atomic_swap_32, "swap_32">; + + defm ATOMIC_CMP_SWAP_I8 : AtomicCmpSwap32<atomic_cmp_swap_8, "8">; + defm ATOMIC_CMP_SWAP_I16 : AtomicCmpSwap32<atomic_cmp_swap_16, "16">; + defm ATOMIC_CMP_SWAP_I32 : AtomicCmpSwap32<atomic_cmp_swap_32, "32">; +} + +//===----------------------------------------------------------------------===// +// Instruction definition +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// MipsI Instructions +//===----------------------------------------------------------------------===// + +/// Arithmetic Instructions (ALU Immediate) +def ADDiu : ArithLogicI<0x09, "addiu", add, simm16, immSExt16, CPURegs>; +def ADDi : ArithOverflowI<0x08, "addi", add, simm16, immSExt16, CPURegs>; +def SLTi : SetCC_I<0x0a, "slti", setlt, simm16, immSExt16, CPURegs>; +def SLTiu : SetCC_I<0x0b, "sltiu", setult, simm16, immSExt16, CPURegs>; +def ANDi : ArithLogicI<0x0c, "andi", and, uimm16, immZExt16, CPURegs>; +def ORi : ArithLogicI<0x0d, "ori", or, uimm16, immZExt16, CPURegs>; +def XORi : ArithLogicI<0x0e, "xori", xor, uimm16, immZExt16, CPURegs>; +def LUi : LoadUpper<0x0f, "lui", CPURegs, uimm16>; + +/// Arithmetic Instructions (3-Operand, R-Type) +def ADDu : ArithLogicR<0x00, 0x21, "addu", add, IIAlu, CPURegs, 1>; +def SUBu : ArithLogicR<0x00, 0x23, "subu", sub, IIAlu, CPURegs>; +def ADD : ArithOverflowR<0x00, 0x20, "add", IIAlu, CPURegs, 1>; +def SUB : ArithOverflowR<0x00, 0x22, "sub", IIAlu, CPURegs>; +def SLT : SetCC_R<0x00, 0x2a, "slt", setlt, CPURegs>; +def SLTu : SetCC_R<0x00, 0x2b, "sltu", setult, CPURegs>; +def AND : ArithLogicR<0x00, 0x24, "and", and, IIAlu, CPURegs, 1>; +def OR : ArithLogicR<0x00, 0x25, "or", or, IIAlu, CPURegs, 1>; +def XOR : ArithLogicR<0x00, 0x26, "xor", xor, IIAlu, CPURegs, 1>; +def NOR : LogicNOR<0x00, 0x27, "nor", CPURegs>; + +/// Shift Instructions +def SLL : shift_rotate_imm32<0x00, 0x00, "sll", shl>; +def SRL : shift_rotate_imm32<0x02, 0x00, "srl", srl>; +def SRA : shift_rotate_imm32<0x03, 0x00, "sra", sra>; +def SLLV : shift_rotate_reg<0x04, 0x00, "sllv", shl, CPURegs>; +def SRLV : shift_rotate_reg<0x06, 0x00, "srlv", srl, CPURegs>; +def SRAV : shift_rotate_reg<0x07, 0x00, "srav", sra, CPURegs>; + +// Rotate Instructions +let Predicates = [HasMips32r2] in { + def ROTR : shift_rotate_imm32<0x02, 0x01, "rotr", rotr>; + def ROTRV : shift_rotate_reg<0x06, 0x01, "rotrv", rotr, CPURegs>; +} + +/// Load and Store Instructions +/// aligned +defm LB : LoadM32<0x20, "lb", sextloadi8>; +defm LBu : LoadM32<0x24, "lbu", zextloadi8>; +defm LH : LoadM32<0x21, "lh", sextloadi16_a>; +defm LHu : LoadM32<0x25, "lhu", zextloadi16_a>; +defm LW : LoadM32<0x23, "lw", load_a>; +defm SB : StoreM32<0x28, "sb", truncstorei8>; +defm SH : StoreM32<0x29, "sh", truncstorei16_a>; +defm SW : StoreM32<0x2b, "sw", store_a>; + +/// unaligned +defm ULH : LoadM32<0x21, "ulh", sextloadi16_u, 1>; +defm ULHu : LoadM32<0x25, "ulhu", zextloadi16_u, 1>; +defm ULW : LoadM32<0x23, "ulw", load_u, 1>; +defm USH : StoreM32<0x29, "ush", truncstorei16_u, 1>; +defm USW : StoreM32<0x2b, "usw", store_u, 1>; + +/// Primitives for unaligned +defm LWL : LoadUnAlign32<0x22>; +defm LWR : LoadUnAlign32<0x26>; +defm SWL : StoreUnAlign32<0x2A>; +defm SWR : StoreUnAlign32<0x2E>; + +let hasSideEffects = 1 in +def SYNC : MipsInst<(outs), (ins i32imm:$stype), "sync $stype", + [(MipsSync imm:$stype)], NoItinerary, FrmOther> +{ + bits<5> stype; + let Opcode = 0; + let Inst{25-11} = 0; + let Inst{10-6} = stype; + let Inst{5-0} = 15; +} + +/// Load-linked, Store-conditional +def LL : LLBase<0x30, "ll", CPURegs, mem>, Requires<[NotN64]>; +def LL_P8 : LLBase<0x30, "ll", CPURegs, mem64>, Requires<[IsN64]>; +def SC : SCBase<0x38, "sc", CPURegs, mem>, Requires<[NotN64]>; +def SC_P8 : SCBase<0x38, "sc", CPURegs, mem64>, Requires<[IsN64]>; + +/// Jump and Branch Instructions +def J : JumpFJ<0x02, "j">; +def JR : JumpFR<0x00, 0x08, "jr", CPURegs>; +def B : UncondBranch<0x04, "b">; +def BEQ : CBranch<0x04, "beq", seteq, CPURegs>; +def BNE : CBranch<0x05, "bne", setne, CPURegs>; +def BGEZ : CBranchZero<0x01, 1, "bgez", setge, CPURegs>; +def BGTZ : CBranchZero<0x07, 0, "bgtz", setgt, CPURegs>; +def BLEZ : CBranchZero<0x06, 0, "blez", setle, CPURegs>; +def BLTZ : CBranchZero<0x01, 0, "bltz", setlt, CPURegs>; + +def JAL : JumpLink<0x03, "jal">; +def JALR : JumpLinkReg<0x00, 0x09, "jalr", CPURegs>; +def BGEZAL : BranchLink<"bgezal", 0x11, CPURegs>; +def BLTZAL : BranchLink<"bltzal", 0x10, CPURegs>; + +let isReturn=1, isTerminator=1, hasDelaySlot=1, + isBarrier=1, hasCtrlDep=1, rd=0, rt=0, shamt=0 in + def RET : FR <0x00, 0x08, (outs), (ins CPURegs:$target), + "jr\t$target", [(MipsRet CPURegs:$target)], IIBranch>; + +/// Multiply and Divide Instructions. +def MULT : Mult32<0x18, "mult", IIImul>; +def MULTu : Mult32<0x19, "multu", IIImul>; +def SDIV : Div32<MipsDivRem, 0x1a, "div", IIIdiv>; +def UDIV : Div32<MipsDivRemU, 0x1b, "divu", IIIdiv>; + +def MTHI : MoveToLOHI<0x11, "mthi", CPURegs, [HI]>; +def MTLO : MoveToLOHI<0x13, "mtlo", CPURegs, [LO]>; +def MFHI : MoveFromLOHI<0x10, "mfhi", CPURegs, [HI]>; +def MFLO : MoveFromLOHI<0x12, "mflo", CPURegs, [LO]>; + +/// Sign Ext In Register Instructions. +def SEB : SignExtInReg<0x10, "seb", i8, CPURegs>; +def SEH : SignExtInReg<0x18, "seh", i16, CPURegs>; + +/// Count Leading +def CLZ : CountLeading0<0x20, "clz", CPURegs>; +def CLO : CountLeading1<0x21, "clo", CPURegs>; + +/// Word Swap Bytes Within Halfwords +def WSBH : SubwordSwap<0x20, 0x2, "wsbh", CPURegs>; + +/// No operation +let addr=0 in + def NOP : FJ<0, (outs), (ins), "nop", [], IIAlu>; + +// FrameIndexes are legalized when they are operands from load/store +// instructions. The same not happens for stack address copies, so an +// add op with mem ComplexPattern is used and the stack address copy +// can be matched. It's similar to Sparc LEA_ADDRi +def LEA_ADDiu : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>; + +// DynAlloc node points to dynamically allocated stack space. +// $sp is added to the list of implicitly used registers to prevent dead code +// elimination from removing instructions that modify $sp. +let Uses = [SP] in +def DynAlloc : EffectiveAddress<"addiu\t$rt, $addr", CPURegs, mem_ea>; + +// MADD*/MSUB* +def MADD : MArithR<0, "madd", MipsMAdd, 1>; +def MADDU : MArithR<1, "maddu", MipsMAddu, 1>; +def MSUB : MArithR<4, "msub", MipsMSub>; +def MSUBU : MArithR<5, "msubu", MipsMSubu>; + +// MUL is a assembly macro in the current used ISAs. In recent ISA's +// it is a real instruction. +def MUL : ArithLogicR<0x1c, 0x02, "mul", mul, IIImul, CPURegs, 1>, + Requires<[HasMips32]>; + +def RDHWR : ReadHardware<CPURegs, HWRegs>; + +def EXT : ExtBase<0, "ext", CPURegs>; +def INS : InsBase<4, "ins", CPURegs>; + +//===----------------------------------------------------------------------===// +// Arbitrary patterns that map to one or more instructions +//===----------------------------------------------------------------------===// + +// Small immediates +def : Pat<(i32 immSExt16:$in), + (ADDiu ZERO, imm:$in)>; +def : Pat<(i32 immZExt16:$in), + (ORi ZERO, imm:$in)>; +def : Pat<(i32 immLow16Zero:$in), + (LUi (HI16 imm:$in))>; + +// Arbitrary immediates +def : Pat<(i32 imm:$imm), + (ORi (LUi (HI16 imm:$imm)), (LO16 imm:$imm))>; + +// Carry patterns +def : Pat<(subc CPURegs:$lhs, CPURegs:$rhs), + (SUBu CPURegs:$lhs, CPURegs:$rhs)>; +def : Pat<(addc CPURegs:$lhs, CPURegs:$rhs), + (ADDu CPURegs:$lhs, CPURegs:$rhs)>; +def : Pat<(addc CPURegs:$src, immSExt16:$imm), + (ADDiu CPURegs:$src, imm:$imm)>; + +// Call +def : Pat<(MipsJmpLink (i32 tglobaladdr:$dst)), + (JAL tglobaladdr:$dst)>; +def : Pat<(MipsJmpLink (i32 texternalsym:$dst)), + (JAL texternalsym:$dst)>; +//def : Pat<(MipsJmpLink CPURegs:$dst), +// (JALR CPURegs:$dst)>; + +// hi/lo relocs +def : Pat<(MipsHi tglobaladdr:$in), (LUi tglobaladdr:$in)>; +def : Pat<(MipsHi tblockaddress:$in), (LUi tblockaddress:$in)>; +def : Pat<(MipsHi tjumptable:$in), (LUi tjumptable:$in)>; +def : Pat<(MipsHi tconstpool:$in), (LUi tconstpool:$in)>; +def : Pat<(MipsHi tglobaltlsaddr:$in), (LUi tglobaltlsaddr:$in)>; + +def : Pat<(MipsLo tglobaladdr:$in), (ADDiu ZERO, tglobaladdr:$in)>; +def : Pat<(MipsLo tblockaddress:$in), (ADDiu ZERO, tblockaddress:$in)>; +def : Pat<(MipsLo tjumptable:$in), (ADDiu ZERO, tjumptable:$in)>; +def : Pat<(MipsLo tconstpool:$in), (ADDiu ZERO, tconstpool:$in)>; +def : Pat<(MipsLo tglobaltlsaddr:$in), (ADDiu ZERO, tglobaltlsaddr:$in)>; + +def : Pat<(add CPURegs:$hi, (MipsLo tglobaladdr:$lo)), + (ADDiu CPURegs:$hi, tglobaladdr:$lo)>; +def : Pat<(add CPURegs:$hi, (MipsLo tblockaddress:$lo)), + (ADDiu CPURegs:$hi, tblockaddress:$lo)>; +def : Pat<(add CPURegs:$hi, (MipsLo tjumptable:$lo)), + (ADDiu CPURegs:$hi, tjumptable:$lo)>; +def : Pat<(add CPURegs:$hi, (MipsLo tconstpool:$lo)), + (ADDiu CPURegs:$hi, tconstpool:$lo)>; +def : Pat<(add CPURegs:$hi, (MipsLo tglobaltlsaddr:$lo)), + (ADDiu CPURegs:$hi, tglobaltlsaddr:$lo)>; + +// gp_rel relocs +def : Pat<(add CPURegs:$gp, (MipsGPRel tglobaladdr:$in)), + (ADDiu CPURegs:$gp, tglobaladdr:$in)>; +def : Pat<(add CPURegs:$gp, (MipsGPRel tconstpool:$in)), + (ADDiu CPURegs:$gp, tconstpool:$in)>; + +// wrapper_pic +class WrapperPat<SDNode node, Instruction ADDiuOp, RegisterClass RC>: + Pat<(MipsWrapper RC:$gp, node:$in), + (ADDiuOp RC:$gp, node:$in)>; + +def : WrapperPat<tglobaladdr, ADDiu, CPURegs>; +def : WrapperPat<tconstpool, ADDiu, CPURegs>; +def : WrapperPat<texternalsym, ADDiu, CPURegs>; +def : WrapperPat<tblockaddress, ADDiu, CPURegs>; +def : WrapperPat<tjumptable, ADDiu, CPURegs>; +def : WrapperPat<tglobaltlsaddr, ADDiu, CPURegs>; + +// Mips does not have "not", so we expand our way +def : Pat<(not CPURegs:$in), + (NOR CPURegs:$in, ZERO)>; + +// extended loads +let Predicates = [NotN64] in { + def : Pat<(i32 (extloadi1 addr:$src)), (LBu addr:$src)>; + def : Pat<(i32 (extloadi8 addr:$src)), (LBu addr:$src)>; + def : Pat<(i32 (extloadi16_a addr:$src)), (LHu addr:$src)>; + def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu addr:$src)>; +} +let Predicates = [IsN64] in { + def : Pat<(i32 (extloadi1 addr:$src)), (LBu_P8 addr:$src)>; + def : Pat<(i32 (extloadi8 addr:$src)), (LBu_P8 addr:$src)>; + def : Pat<(i32 (extloadi16_a addr:$src)), (LHu_P8 addr:$src)>; + def : Pat<(i32 (extloadi16_u addr:$src)), (ULHu_P8 addr:$src)>; +} + +// peepholes +let Predicates = [NotN64] in { + def : Pat<(store_a (i32 0), addr:$dst), (SW ZERO, addr:$dst)>; + def : Pat<(store_u (i32 0), addr:$dst), (USW ZERO, addr:$dst)>; +} +let Predicates = [IsN64] in { + def : Pat<(store_a (i32 0), addr:$dst), (SW_P8 ZERO, addr:$dst)>; + def : Pat<(store_u (i32 0), addr:$dst), (USW_P8 ZERO, addr:$dst)>; +} + +// brcond patterns +multiclass BrcondPats<RegisterClass RC, Instruction BEQOp, Instruction BNEOp, + Instruction SLTOp, Instruction SLTuOp, Instruction SLTiOp, + Instruction SLTiuOp, Register ZEROReg> { +def : Pat<(brcond (i32 (setne RC:$lhs, 0)), bb:$dst), + (BNEOp RC:$lhs, ZEROReg, bb:$dst)>; +def : Pat<(brcond (i32 (seteq RC:$lhs, 0)), bb:$dst), + (BEQOp RC:$lhs, ZEROReg, bb:$dst)>; + +def : Pat<(brcond (i32 (setge RC:$lhs, RC:$rhs)), bb:$dst), + (BEQ (SLTOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>; +def : Pat<(brcond (i32 (setuge RC:$lhs, RC:$rhs)), bb:$dst), + (BEQ (SLTuOp RC:$lhs, RC:$rhs), ZERO, bb:$dst)>; +def : Pat<(brcond (i32 (setge RC:$lhs, immSExt16:$rhs)), bb:$dst), + (BEQ (SLTiOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>; +def : Pat<(brcond (i32 (setuge RC:$lhs, immSExt16:$rhs)), bb:$dst), + (BEQ (SLTiuOp RC:$lhs, immSExt16:$rhs), ZERO, bb:$dst)>; + +def : Pat<(brcond (i32 (setle RC:$lhs, RC:$rhs)), bb:$dst), + (BEQ (SLTOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>; +def : Pat<(brcond (i32 (setule RC:$lhs, RC:$rhs)), bb:$dst), + (BEQ (SLTuOp RC:$rhs, RC:$lhs), ZERO, bb:$dst)>; + +def : Pat<(brcond RC:$cond, bb:$dst), + (BNEOp RC:$cond, ZEROReg, bb:$dst)>; +} + +defm : BrcondPats<CPURegs, BEQ, BNE, SLT, SLTu, SLTi, SLTiu, ZERO>; + +// setcc patterns +multiclass SeteqPats<RegisterClass RC, Instruction SLTiuOp, Instruction XOROp, + Instruction SLTuOp, Register ZEROReg> { + def : Pat<(seteq RC:$lhs, RC:$rhs), + (SLTiuOp (XOROp RC:$lhs, RC:$rhs), 1)>; + def : Pat<(setne RC:$lhs, RC:$rhs), + (SLTuOp ZEROReg, (XOROp RC:$lhs, RC:$rhs))>; +} + +multiclass SetlePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> { + def : Pat<(setle RC:$lhs, RC:$rhs), + (XORi (SLTOp RC:$rhs, RC:$lhs), 1)>; + def : Pat<(setule RC:$lhs, RC:$rhs), + (XORi (SLTuOp RC:$rhs, RC:$lhs), 1)>; +} + +multiclass SetgtPats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> { + def : Pat<(setgt RC:$lhs, RC:$rhs), + (SLTOp RC:$rhs, RC:$lhs)>; + def : Pat<(setugt RC:$lhs, RC:$rhs), + (SLTuOp RC:$rhs, RC:$lhs)>; +} + +multiclass SetgePats<RegisterClass RC, Instruction SLTOp, Instruction SLTuOp> { + def : Pat<(setge RC:$lhs, RC:$rhs), + (XORi (SLTOp RC:$lhs, RC:$rhs), 1)>; + def : Pat<(setuge RC:$lhs, RC:$rhs), + (XORi (SLTuOp RC:$lhs, RC:$rhs), 1)>; +} + +multiclass SetgeImmPats<RegisterClass RC, Instruction SLTiOp, + Instruction SLTiuOp> { + def : Pat<(setge RC:$lhs, immSExt16:$rhs), + (XORi (SLTiOp RC:$lhs, immSExt16:$rhs), 1)>; + def : Pat<(setuge RC:$lhs, immSExt16:$rhs), + (XORi (SLTiuOp RC:$lhs, immSExt16:$rhs), 1)>; +} + +defm : SeteqPats<CPURegs, SLTiu, XOR, SLTu, ZERO>; +defm : SetlePats<CPURegs, SLT, SLTu>; +defm : SetgtPats<CPURegs, SLT, SLTu>; +defm : SetgePats<CPURegs, SLT, SLTu>; +defm : SetgeImmPats<CPURegs, SLTi, SLTiu>; + +// select MipsDynAlloc +def : Pat<(MipsDynAlloc addr:$f), (DynAlloc addr:$f)>; + +// bswap pattern +def : Pat<(bswap CPURegs:$rt), (ROTR (WSBH CPURegs:$rt), 16)>; + +//===----------------------------------------------------------------------===// +// Floating Point Support +//===----------------------------------------------------------------------===// + +include "MipsInstrFPU.td" +include "Mips64InstrInfo.td" +include "MipsCondMov.td" + diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp new file mode 100644 index 0000000..76ca3e1 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.cpp @@ -0,0 +1,233 @@ +//===-- MipsJITInfo.cpp - Implement the Mips JIT Interface ----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the JIT interfaces for the Mips target. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "jit" +#include "MipsJITInfo.h" +#include "MipsInstrInfo.h" +#include "MipsRelocations.h" +#include "MipsSubtarget.h" +#include "llvm/Function.h" +#include "llvm/CodeGen/JITCodeEmitter.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Support/Memory.h" +#include <cstdlib> +using namespace llvm; + + +void MipsJITInfo::replaceMachineCodeForFunction(void *Old, void *New) { + report_fatal_error("MipsJITInfo::replaceMachineCodeForFunction"); +} + +/// JITCompilerFunction - This contains the address of the JIT function used to +/// compile a function lazily. +static TargetJITInfo::JITCompilerFn JITCompilerFunction; + +// Get the ASMPREFIX for the current host. This is often '_'. +#ifndef __USER_LABEL_PREFIX__ +#define __USER_LABEL_PREFIX__ +#endif +#define GETASMPREFIX2(X) #X +#define GETASMPREFIX(X) GETASMPREFIX2(X) +#define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__) + +// CompilationCallback stub - We can't use a C function with inline assembly in +// it, because the prolog/epilog inserted by GCC won't work for us. Instead, +// write our own wrapper, which does things our way, so we have complete control +// over register saving and restoring. This code saves registers, calls +// MipsCompilationCallbackC and restores registers. +extern "C" { +#if defined (__mips__) +void MipsCompilationCallback(); + + asm( + ".text\n" + ".align 2\n" + ".globl " ASMPREFIX "MipsCompilationCallback\n" + ASMPREFIX "MipsCompilationCallback:\n" + ".ent " ASMPREFIX "MipsCompilationCallback\n" + ".frame $sp, 32, $ra\n" + ".set noreorder\n" + ".cpload $t9\n" + + "addiu $sp, $sp, -64\n" + ".cprestore 16\n" + + // Save argument registers a0, a1, a2, a3, f12, f14 since they may contain + // stuff for the real target function right now. We have to act as if this + // whole compilation callback doesn't exist as far as the caller is + // concerned. We also need to save the ra register since it contains the + // original return address, and t8 register since it contains the address + // of the end of function stub. + "sw $a0, 20($sp)\n" + "sw $a1, 24($sp)\n" + "sw $a2, 28($sp)\n" + "sw $a3, 32($sp)\n" + "sw $ra, 36($sp)\n" + "sw $t8, 40($sp)\n" + "sdc1 $f12, 48($sp)\n" + "sdc1 $f14, 56($sp)\n" + + // t8 points at the end of function stub. Pass the beginning of the stub + // to the MipsCompilationCallbackC. + "addiu $a0, $t8, -16\n" + "jal " ASMPREFIX "MipsCompilationCallbackC\n" + "nop\n" + + // Restore registers. + "lw $a0, 20($sp)\n" + "lw $a1, 24($sp)\n" + "lw $a2, 28($sp)\n" + "lw $a3, 32($sp)\n" + "lw $ra, 36($sp)\n" + "lw $t8, 40($sp)\n" + "ldc1 $f12, 48($sp)\n" + "ldc1 $f14, 56($sp)\n" + "addiu $sp, $sp, 64\n" + + // Jump to the (newly modified) stub to invoke the real function. + "addiu $t8, $t8, -16\n" + "jr $t8\n" + "nop\n" + + ".set reorder\n" + ".end " ASMPREFIX "MipsCompilationCallback\n" + ); +#else // host != Mips + void MipsCompilationCallback() { + llvm_unreachable( + "Cannot call MipsCompilationCallback() on a non-Mips arch!"); + } +#endif +} + +/// MipsCompilationCallbackC - This is the target-specific function invoked +/// by the function stub when we did not know the real target of a call. +/// This function must locate the start of the stub or call site and pass +/// it into the JIT compiler function. +extern "C" void MipsCompilationCallbackC(intptr_t StubAddr) { + // Get the address of the compiled code for this function. + intptr_t NewVal = (intptr_t) JITCompilerFunction((void*) StubAddr); + + // Rewrite the function stub so that we don't end up here every time we + // execute the call. We're replacing the first four instructions of the + // stub with code that jumps to the compiled function: + // lui $t9, %hi(NewVal) + // addiu $t9, $t9, %lo(NewVal) + // jr $t9 + // nop + + int Hi = ((unsigned)NewVal & 0xffff0000) >> 16; + if ((NewVal & 0x8000) != 0) + Hi++; + int Lo = (int)(NewVal & 0xffff); + + *(intptr_t *)(StubAddr) = 0xf << 26 | 25 << 16 | Hi; + *(intptr_t *)(StubAddr + 4) = 9 << 26 | 25 << 21 | 25 << 16 | Lo; + *(intptr_t *)(StubAddr + 8) = 25 << 21 | 8; + *(intptr_t *)(StubAddr + 12) = 0; + + sys::Memory::InvalidateInstructionCache((void*) StubAddr, 16); +} + +TargetJITInfo::LazyResolverFn MipsJITInfo::getLazyResolverFunction( + JITCompilerFn F) { + JITCompilerFunction = F; + return MipsCompilationCallback; +} + +TargetJITInfo::StubLayout MipsJITInfo::getStubLayout() { + // The stub contains 4 4-byte instructions, aligned at 4 bytes. See + // emitFunctionStub for details. + StubLayout Result = { 4*4, 4 }; + return Result; +} + +void *MipsJITInfo::emitFunctionStub(const Function* F, void *Fn, + JITCodeEmitter &JCE) { + JCE.emitAlignment(4); + void *Addr = (void*) (JCE.getCurrentPCValue()); + if (!sys::Memory::setRangeWritable(Addr, 16)) + llvm_unreachable("ERROR: Unable to mark stub writable."); + + intptr_t EmittedAddr; + if (Fn != (void*)(intptr_t)MipsCompilationCallback) + EmittedAddr = (intptr_t)Fn; + else + EmittedAddr = (intptr_t)MipsCompilationCallback; + + + int Hi = ((unsigned)EmittedAddr & 0xffff0000) >> 16; + if ((EmittedAddr & 0x8000) != 0) + Hi++; + int Lo = (int)(EmittedAddr & 0xffff); + + // lui t9, %hi(EmittedAddr) + // addiu t9, t9, %lo(EmittedAddr) + // jalr t8, t9 + // nop + JCE.emitWordLE(0xf << 26 | 25 << 16 | Hi); + JCE.emitWordLE(9 << 26 | 25 << 21 | 25 << 16 | Lo); + JCE.emitWordLE(25 << 21 | 24 << 11 | 9); + JCE.emitWordLE(0); + + sys::Memory::InvalidateInstructionCache(Addr, 16); + if (!sys::Memory::setRangeExecutable(Addr, 16)) + llvm_unreachable("ERROR: Unable to mark stub executable."); + + return Addr; +} + +/// relocate - Before the JIT can run a block of code that has been emitted, +/// it must rewrite the code to contain the actual addresses of any +/// referenced global symbols. +void MipsJITInfo::relocate(void *Function, MachineRelocation *MR, + unsigned NumRelocs, unsigned char* GOTBase) { + for (unsigned i = 0; i != NumRelocs; ++i, ++MR) { + + void *RelocPos = (char*) Function + MR->getMachineCodeOffset(); + intptr_t ResultPtr = (intptr_t) MR->getResultPointer(); + + switch ((Mips::RelocationType) MR->getRelocationType()) { + case Mips::reloc_mips_pc16: + ResultPtr = (((ResultPtr - (intptr_t) RelocPos) - 4) >> 2) & 0xffff; + *((unsigned*) RelocPos) |= (unsigned) ResultPtr; + break; + + case Mips::reloc_mips_26: + ResultPtr = (ResultPtr & 0x0fffffff) >> 2; + *((unsigned*) RelocPos) |= (unsigned) ResultPtr; + break; + + case Mips::reloc_mips_hi: + ResultPtr = ResultPtr >> 16; + if ((((intptr_t) (MR->getResultPointer()) & 0xffff) >> 15) == 1) { + ResultPtr += 1; + } + *((unsigned*) RelocPos) |= (unsigned) ResultPtr; + break; + + case Mips::reloc_mips_lo: { + // Addend is needed for unaligned load/store instructions, where offset + // for the second load/store in the expanded instruction sequence must + // be modified by +1 or +3. Otherwise, Addend is 0. + int Addend = *((unsigned*) RelocPos) & 0xffff; + ResultPtr = (ResultPtr + Addend) & 0xffff; + *((unsigned*) RelocPos) &= 0xffff0000; + *((unsigned*) RelocPos) |= (unsigned) ResultPtr; + break; + } + } + } +} diff --git a/contrib/llvm/lib/Target/Mips/MipsJITInfo.h b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h new file mode 100644 index 0000000..f4c4ae8 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsJITInfo.h @@ -0,0 +1,68 @@ +//===- MipsJITInfo.h - Mips Implementation of the JIT Interface -*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the MipsJITInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSJITINFO_H +#define MIPSJITINFO_H + +#include "MipsMachineFunction.h" +#include "llvm/CodeGen/MachineConstantPool.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/Target/TargetJITInfo.h" + +namespace llvm { +class MipsTargetMachine; + +class MipsJITInfo : public TargetJITInfo { + + bool IsPIC; + + public: + explicit MipsJITInfo() : + IsPIC(false) {} + + /// replaceMachineCodeForFunction - Make it so that calling the function + /// whose machine code is at OLD turns into a call to NEW, perhaps by + /// overwriting OLD with a branch to NEW. This is used for self-modifying + /// code. + /// + virtual void replaceMachineCodeForFunction(void *Old, void *New); + + // getStubLayout - Returns the size and alignment of the largest call stub + // on Mips. + virtual StubLayout getStubLayout(); + + /// emitFunctionStub - Use the specified JITCodeEmitter object to emit a + /// small native function that simply calls the function at the specified + /// address. + virtual void *emitFunctionStub(const Function* F, void *Fn, + JITCodeEmitter &JCE); + + /// getLazyResolverFunction - Expose the lazy resolver to the JIT. + virtual LazyResolverFn getLazyResolverFunction(JITCompilerFn); + + /// relocate - Before the JIT can run a block of code that has been emitted, + /// it must rewrite the code to contain the actual addresses of any + /// referenced global symbols. + virtual void relocate(void *Function, MachineRelocation *MR, + unsigned NumRelocs, unsigned char* GOTBase); + + /// Initialize - Initialize internal stage for the function being JITted. + void Initialize(const MachineFunction &MF, bool isPIC) { + IsPIC = isPIC; + } + +}; +} + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp new file mode 100644 index 0000000..1597b93 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.cpp @@ -0,0 +1,343 @@ +//===-- MipsMCInstLower.cpp - Convert Mips MachineInstr to MCInst ---------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains code to lower Mips MachineInstrs to their corresponding +// MCInst records. +// +//===----------------------------------------------------------------------===// + +#include "MipsMCInstLower.h" +#include "MipsAsmPrinter.h" +#include "MipsInstrInfo.h" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/Target/Mangler.h" + +using namespace llvm; + +MipsMCInstLower::MipsMCInstLower(MipsAsmPrinter &asmprinter) + : AsmPrinter(asmprinter) {} + +void MipsMCInstLower::Initialize(Mangler *M, MCContext* C) { + Mang = M; + Ctx = C; +} + +MCOperand MipsMCInstLower::LowerSymbolOperand(const MachineOperand &MO, + MachineOperandType MOTy, + unsigned Offset) const { + MCSymbolRefExpr::VariantKind Kind; + const MCSymbol *Symbol; + + switch(MO.getTargetFlags()) { + default: llvm_unreachable("Invalid target flag!"); + case MipsII::MO_NO_FLAG: Kind = MCSymbolRefExpr::VK_None; break; + case MipsII::MO_GPREL: Kind = MCSymbolRefExpr::VK_Mips_GPREL; break; + case MipsII::MO_GOT_CALL: Kind = MCSymbolRefExpr::VK_Mips_GOT_CALL; break; + case MipsII::MO_GOT16: Kind = MCSymbolRefExpr::VK_Mips_GOT16; break; + case MipsII::MO_GOT: Kind = MCSymbolRefExpr::VK_Mips_GOT; break; + case MipsII::MO_ABS_HI: Kind = MCSymbolRefExpr::VK_Mips_ABS_HI; break; + case MipsII::MO_ABS_LO: Kind = MCSymbolRefExpr::VK_Mips_ABS_LO; break; + case MipsII::MO_TLSGD: Kind = MCSymbolRefExpr::VK_Mips_TLSGD; break; + case MipsII::MO_TLSLDM: Kind = MCSymbolRefExpr::VK_Mips_TLSLDM; break; + case MipsII::MO_DTPREL_HI: Kind = MCSymbolRefExpr::VK_Mips_DTPREL_HI; break; + case MipsII::MO_DTPREL_LO: Kind = MCSymbolRefExpr::VK_Mips_DTPREL_LO; break; + case MipsII::MO_GOTTPREL: Kind = MCSymbolRefExpr::VK_Mips_GOTTPREL; break; + case MipsII::MO_TPREL_HI: Kind = MCSymbolRefExpr::VK_Mips_TPREL_HI; break; + case MipsII::MO_TPREL_LO: Kind = MCSymbolRefExpr::VK_Mips_TPREL_LO; break; + case MipsII::MO_GPOFF_HI: Kind = MCSymbolRefExpr::VK_Mips_GPOFF_HI; break; + case MipsII::MO_GPOFF_LO: Kind = MCSymbolRefExpr::VK_Mips_GPOFF_LO; break; + case MipsII::MO_GOT_DISP: Kind = MCSymbolRefExpr::VK_Mips_GOT_DISP; break; + case MipsII::MO_GOT_PAGE: Kind = MCSymbolRefExpr::VK_Mips_GOT_PAGE; break; + case MipsII::MO_GOT_OFST: Kind = MCSymbolRefExpr::VK_Mips_GOT_OFST; break; + } + + switch (MOTy) { + case MachineOperand::MO_MachineBasicBlock: + Symbol = MO.getMBB()->getSymbol(); + break; + + case MachineOperand::MO_GlobalAddress: + Symbol = Mang->getSymbol(MO.getGlobal()); + break; + + case MachineOperand::MO_BlockAddress: + Symbol = AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress()); + break; + + case MachineOperand::MO_ExternalSymbol: + Symbol = AsmPrinter.GetExternalSymbolSymbol(MO.getSymbolName()); + break; + + case MachineOperand::MO_JumpTableIndex: + Symbol = AsmPrinter.GetJTISymbol(MO.getIndex()); + break; + + case MachineOperand::MO_ConstantPoolIndex: + Symbol = AsmPrinter.GetCPISymbol(MO.getIndex()); + if (MO.getOffset()) + Offset += MO.getOffset(); + break; + + default: + llvm_unreachable("<unknown operand type>"); + } + + const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::Create(Symbol, Kind, *Ctx); + + if (!Offset) + return MCOperand::CreateExpr(MCSym); + + // Assume offset is never negative. + assert(Offset > 0); + + const MCConstantExpr *OffsetExpr = MCConstantExpr::Create(Offset, *Ctx); + const MCBinaryExpr *AddExpr = MCBinaryExpr::CreateAdd(MCSym, OffsetExpr, *Ctx); + return MCOperand::CreateExpr(AddExpr); +} + +static void CreateMCInst(MCInst& Inst, unsigned Opc, const MCOperand& Opnd0, + const MCOperand& Opnd1, + const MCOperand& Opnd2 = MCOperand()) { + Inst.setOpcode(Opc); + Inst.addOperand(Opnd0); + Inst.addOperand(Opnd1); + if (Opnd2.isValid()) + Inst.addOperand(Opnd2); +} + +// Lower ".cpload $reg" to +// "lui $gp, %hi(_gp_disp)" +// "addiu $gp, $gp, %lo(_gp_disp)" +// "addu $gp, $gp, $t9" +void MipsMCInstLower::LowerCPLOAD(SmallVector<MCInst, 4>& MCInsts) { + MCOperand GPReg = MCOperand::CreateReg(Mips::GP); + MCOperand T9Reg = MCOperand::CreateReg(Mips::T9); + StringRef SymName("_gp_disp"); + const MCSymbol *Sym = Ctx->GetOrCreateSymbol(SymName); + const MCSymbolRefExpr *MCSym; + + MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_HI, *Ctx); + MCOperand SymHi = MCOperand::CreateExpr(MCSym); + MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_LO, *Ctx); + MCOperand SymLo = MCOperand::CreateExpr(MCSym); + + MCInsts.resize(3); + + CreateMCInst(MCInsts[0], Mips::LUi, GPReg, SymHi); + CreateMCInst(MCInsts[1], Mips::ADDiu, GPReg, GPReg, SymLo); + CreateMCInst(MCInsts[2], Mips::ADDu, GPReg, GPReg, T9Reg); +} + +// Lower ".cprestore offset" to "sw $gp, offset($sp)". +void MipsMCInstLower::LowerCPRESTORE(int64_t Offset, + SmallVector<MCInst, 4>& MCInsts) { + assert(isInt<32>(Offset) && (Offset >= 0) && + "Imm operand of .cprestore must be a non-negative 32-bit value."); + + MCOperand SPReg = MCOperand::CreateReg(Mips::SP), BaseReg = SPReg; + MCOperand GPReg = MCOperand::CreateReg(Mips::GP); + + if (!isInt<16>(Offset)) { + unsigned Hi = ((Offset + 0x8000) >> 16) & 0xffff; + Offset &= 0xffff; + MCOperand ATReg = MCOperand::CreateReg(Mips::AT); + BaseReg = ATReg; + + // lui at,hi + // addu at,at,sp + MCInsts.resize(2); + CreateMCInst(MCInsts[0], Mips::LUi, ATReg, MCOperand::CreateImm(Hi)); + CreateMCInst(MCInsts[1], Mips::ADDu, ATReg, ATReg, SPReg); + } + + MCInst Sw; + CreateMCInst(Sw, Mips::SW, GPReg, BaseReg, MCOperand::CreateImm(Offset)); + MCInsts.push_back(Sw); +} + +MCOperand MipsMCInstLower::LowerOperand(const MachineOperand& MO, + unsigned offset) const { + MachineOperandType MOTy = MO.getType(); + + switch (MOTy) { + default: llvm_unreachable("unknown operand type"); + case MachineOperand::MO_Register: + // Ignore all implicit register operands. + if (MO.isImplicit()) break; + return MCOperand::CreateReg(MO.getReg()); + case MachineOperand::MO_Immediate: + return MCOperand::CreateImm(MO.getImm() + offset); + case MachineOperand::MO_MachineBasicBlock: + case MachineOperand::MO_GlobalAddress: + case MachineOperand::MO_ExternalSymbol: + case MachineOperand::MO_JumpTableIndex: + case MachineOperand::MO_ConstantPoolIndex: + case MachineOperand::MO_BlockAddress: + return LowerSymbolOperand(MO, MOTy, offset); + case MachineOperand::MO_RegisterMask: + break; + } + + return MCOperand(); +} + +void MipsMCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { + OutMI.setOpcode(MI->getOpcode()); + + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + MCOperand MCOp = LowerOperand(MO); + + if (MCOp.isValid()) + OutMI.addOperand(MCOp); + } +} + +void MipsMCInstLower::LowerUnalignedLoadStore(const MachineInstr *MI, + SmallVector<MCInst, + 4>& MCInsts) { + unsigned Opc = MI->getOpcode(); + MCInst Instr1, Instr2, Instr3, Move; + + bool TwoInstructions = false; + + assert(MI->getNumOperands() == 3); + assert(MI->getOperand(0).isReg()); + assert(MI->getOperand(1).isReg()); + + MCOperand Target = LowerOperand(MI->getOperand(0)); + MCOperand Base = LowerOperand(MI->getOperand(1)); + MCOperand ATReg = MCOperand::CreateReg(Mips::AT); + MCOperand ZeroReg = MCOperand::CreateReg(Mips::ZERO); + + MachineOperand UnLoweredName = MI->getOperand(2); + MCOperand Name = LowerOperand(UnLoweredName); + + Move.setOpcode(Mips::ADDu); + Move.addOperand(Target); + Move.addOperand(ATReg); + Move.addOperand(ZeroReg); + + switch (Opc) { + case Mips::ULW: { + // FIXME: only works for little endian right now + MCOperand AdjName = LowerOperand(UnLoweredName, 3); + if (Base.getReg() == (Target.getReg())) { + Instr1.setOpcode(Mips::LWL); + Instr1.addOperand(ATReg); + Instr1.addOperand(Base); + Instr1.addOperand(AdjName); + Instr2.setOpcode(Mips::LWR); + Instr2.addOperand(ATReg); + Instr2.addOperand(Base); + Instr2.addOperand(Name); + Instr3 = Move; + } else { + TwoInstructions = true; + Instr1.setOpcode(Mips::LWL); + Instr1.addOperand(Target); + Instr1.addOperand(Base); + Instr1.addOperand(AdjName); + Instr2.setOpcode(Mips::LWR); + Instr2.addOperand(Target); + Instr2.addOperand(Base); + Instr2.addOperand(Name); + } + break; + } + case Mips::ULHu: { + // FIXME: only works for little endian right now + MCOperand AdjName = LowerOperand(UnLoweredName, 1); + Instr1.setOpcode(Mips::LBu); + Instr1.addOperand(ATReg); + Instr1.addOperand(Base); + Instr1.addOperand(AdjName); + Instr2.setOpcode(Mips::LBu); + Instr2.addOperand(Target); + Instr2.addOperand(Base); + Instr2.addOperand(Name); + Instr3.setOpcode(Mips::INS); + Instr3.addOperand(Target); + Instr3.addOperand(ATReg); + Instr3.addOperand(MCOperand::CreateImm(0x8)); + Instr3.addOperand(MCOperand::CreateImm(0x18)); + break; + } + + case Mips::USW: { + // FIXME: only works for little endian right now + assert (Base.getReg() != Target.getReg()); + TwoInstructions = true; + MCOperand AdjName = LowerOperand(UnLoweredName, 3); + Instr1.setOpcode(Mips::SWL); + Instr1.addOperand(Target); + Instr1.addOperand(Base); + Instr1.addOperand(AdjName); + Instr2.setOpcode(Mips::SWR); + Instr2.addOperand(Target); + Instr2.addOperand(Base); + Instr2.addOperand(Name); + break; + } + case Mips::USH: { + MCOperand AdjName = LowerOperand(UnLoweredName, 1); + Instr1.setOpcode(Mips::SB); + Instr1.addOperand(Target); + Instr1.addOperand(Base); + Instr1.addOperand(Name); + Instr2.setOpcode(Mips::SRL); + Instr2.addOperand(ATReg); + Instr2.addOperand(Target); + Instr2.addOperand(MCOperand::CreateImm(8)); + Instr3.setOpcode(Mips::SB); + Instr3.addOperand(ATReg); + Instr3.addOperand(Base); + Instr3.addOperand(AdjName); + break; + } + default: + // FIXME: need to add others + llvm_unreachable("unaligned instruction not processed"); + } + + MCInsts.push_back(Instr1); + MCInsts.push_back(Instr2); + if (!TwoInstructions) MCInsts.push_back(Instr3); +} + +// Convert +// "setgp01 $reg" +// to +// "lui $reg, %hi(_gp_disp)" +// "addiu $reg, $reg, %lo(_gp_disp)" +void MipsMCInstLower::LowerSETGP01(const MachineInstr *MI, + SmallVector<MCInst, 4>& MCInsts) { + const MachineOperand &MO = MI->getOperand(0); + assert(MO.isReg()); + MCOperand RegOpnd = MCOperand::CreateReg(MO.getReg()); + StringRef SymName("_gp_disp"); + const MCSymbol *Sym = Ctx->GetOrCreateSymbol(SymName); + const MCSymbolRefExpr *MCSym; + + MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_HI, *Ctx); + MCOperand SymHi = MCOperand::CreateExpr(MCSym); + MCSym = MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_Mips_ABS_LO, *Ctx); + MCOperand SymLo = MCOperand::CreateExpr(MCSym); + + MCInsts.resize(2); + + CreateMCInst(MCInsts[0], Mips::LUi, RegOpnd, SymHi); + CreateMCInst(MCInsts[1], Mips::ADDiu, RegOpnd, RegOpnd, SymLo); +} diff --git a/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h new file mode 100644 index 0000000..c1d007d --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsMCInstLower.h @@ -0,0 +1,48 @@ +//===-- MipsMCInstLower.h - Lower MachineInstr to MCInst -------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSMCINSTLOWER_H +#define MIPSMCINSTLOWER_H +#include "llvm/ADT/SmallVector.h" +#include "llvm/CodeGen/MachineOperand.h" +#include "llvm/Support/Compiler.h" + +namespace llvm { + class MCContext; + class MCInst; + class MCOperand; + class MachineInstr; + class MachineFunction; + class Mangler; + class MipsAsmPrinter; + +/// MipsMCInstLower - This class is used to lower an MachineInstr into an +// MCInst. +class LLVM_LIBRARY_VISIBILITY MipsMCInstLower { + typedef MachineOperand::MachineOperandType MachineOperandType; + MCContext *Ctx; + Mangler *Mang; + MipsAsmPrinter &AsmPrinter; +public: + MipsMCInstLower(MipsAsmPrinter &asmprinter); + void Initialize(Mangler *mang, MCContext* C); + void Lower(const MachineInstr *MI, MCInst &OutMI) const; + void LowerCPLOAD(SmallVector<MCInst, 4>& MCInsts); + void LowerCPRESTORE(int64_t Offset, SmallVector<MCInst, 4>& MCInsts); + void LowerUnalignedLoadStore(const MachineInstr *MI, + SmallVector<MCInst, 4>& MCInsts); + void LowerSETGP01(const MachineInstr *MI, SmallVector<MCInst, 4>& MCInsts); +private: + MCOperand LowerSymbolOperand(const MachineOperand &MO, + MachineOperandType MOTy, unsigned Offset) const; + MCOperand LowerOperand(const MachineOperand& MO, unsigned offset = 0) const; +}; +} + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp new file mode 100644 index 0000000..b00c62b --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.cpp @@ -0,0 +1,50 @@ +//===-- MipsMachineFunctionInfo.cpp - Private data used for Mips ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MipsMachineFunction.h" +#include "MipsInstrInfo.h" +#include "MipsSubtarget.h" +#include "MCTargetDesc/MipsBaseInfo.h" +#include "llvm/Function.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineRegisterInfo.h" +#include "llvm/Support/CommandLine.h" + +using namespace llvm; + +static cl::opt<bool> +FixGlobalBaseReg("mips-fix-global-base-reg", cl::Hidden, cl::init(true), + cl::desc("Always use $gp as the global base register.")); + +bool MipsFunctionInfo::globalBaseRegFixed() const { + return FixGlobalBaseReg; +} + +bool MipsFunctionInfo::globalBaseRegSet() const { + return GlobalBaseReg; +} + +unsigned MipsFunctionInfo::getGlobalBaseReg() { + // Return if it has already been initialized. + if (GlobalBaseReg) + return GlobalBaseReg; + + const MipsSubtarget &ST = MF.getTarget().getSubtarget<MipsSubtarget>(); + + if (FixGlobalBaseReg) // $gp is the global base register. + return GlobalBaseReg = ST.isABI_N64() ? Mips::GP_64 : Mips::GP; + + const TargetRegisterClass *RC; + RC = ST.isABI_N64() ? + Mips::CPU64RegsRegisterClass : Mips::CPURegsRegisterClass; + + return GlobalBaseReg = MF.getRegInfo().createVirtualRegister(RC); +} + +void MipsFunctionInfo::anchor() { } diff --git a/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h new file mode 100644 index 0000000..0fde55c --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsMachineFunction.h @@ -0,0 +1,111 @@ +//===-- MipsMachineFunctionInfo.h - Private data used for Mips ----*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the Mips specific subclass of MachineFunctionInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPS_MACHINE_FUNCTION_INFO_H +#define MIPS_MACHINE_FUNCTION_INFO_H + +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include <utility> + +namespace llvm { + +/// MipsFunctionInfo - This class is derived from MachineFunction private +/// Mips target-specific information for each MachineFunction. +class MipsFunctionInfo : public MachineFunctionInfo { + virtual void anchor(); + + MachineFunction& MF; + /// SRetReturnReg - Some subtargets require that sret lowering includes + /// returning the value of the returned struct in a register. This field + /// holds the virtual register into which the sret argument is passed. + unsigned SRetReturnReg; + + /// GlobalBaseReg - keeps track of the virtual register initialized for + /// use as the global base register. This is used for PIC in some PIC + /// relocation models. + unsigned GlobalBaseReg; + + /// VarArgsFrameIndex - FrameIndex for start of varargs area. + int VarArgsFrameIndex; + + // Range of frame object indices. + // InArgFIRange: Range of indices of all frame objects created during call to + // LowerFormalArguments. + // OutArgFIRange: Range of indices of all frame objects created during call to + // LowerCall except for the frame object for restoring $gp. + std::pair<int, int> InArgFIRange, OutArgFIRange; + int GPFI; // Index of the frame object for restoring $gp + mutable int DynAllocFI; // Frame index of dynamically allocated stack area. + unsigned MaxCallFrameSize; + + bool EmitNOAT; + +public: + MipsFunctionInfo(MachineFunction& MF) + : MF(MF), SRetReturnReg(0), GlobalBaseReg(0), + VarArgsFrameIndex(0), InArgFIRange(std::make_pair(-1, 0)), + OutArgFIRange(std::make_pair(-1, 0)), GPFI(0), DynAllocFI(0), + MaxCallFrameSize(0), EmitNOAT(false) + {} + + bool isInArgFI(int FI) const { + return FI <= InArgFIRange.first && FI >= InArgFIRange.second; + } + void setLastInArgFI(int FI) { InArgFIRange.second = FI; } + + bool isOutArgFI(int FI) const { + return FI <= OutArgFIRange.first && FI >= OutArgFIRange.second; + } + void extendOutArgFIRange(int FirstFI, int LastFI) { + if (!OutArgFIRange.second) + // this must be the first time this function was called. + OutArgFIRange.first = FirstFI; + OutArgFIRange.second = LastFI; + } + + int getGPFI() const { return GPFI; } + void setGPFI(int FI) { GPFI = FI; } + bool needGPSaveRestore() const { return getGPFI(); } + bool isGPFI(int FI) const { return GPFI && GPFI == FI; } + + // The first call to this function creates a frame object for dynamically + // allocated stack area. + int getDynAllocFI() const { + if (!DynAllocFI) + DynAllocFI = MF.getFrameInfo()->CreateFixedObject(4, 0, true); + + return DynAllocFI; + } + bool isDynAllocFI(int FI) const { return DynAllocFI && DynAllocFI == FI; } + + unsigned getSRetReturnReg() const { return SRetReturnReg; } + void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; } + + bool globalBaseRegFixed() const; + bool globalBaseRegSet() const; + unsigned getGlobalBaseReg(); + + int getVarArgsFrameIndex() const { return VarArgsFrameIndex; } + void setVarArgsFrameIndex(int Index) { VarArgsFrameIndex = Index; } + + unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; } + void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; } + + bool getEmitNOAT() const { return EmitNOAT; } + void setEmitNOAT() { EmitNOAT = true; } +}; + +} // end of namespace llvm + +#endif // MIPS_MACHINE_FUNCTION_INFO_H diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp new file mode 100644 index 0000000..f30de44 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.cpp @@ -0,0 +1,279 @@ +//===-- MipsRegisterInfo.cpp - MIPS Register Information -== --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the MIPS implementation of the TargetRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mips-reg-info" + +#include "MipsRegisterInfo.h" +#include "Mips.h" +#include "MipsAnalyzeImmediate.h" +#include "MipsSubtarget.h" +#include "MipsMachineFunction.h" +#include "llvm/Constants.h" +#include "llvm/Type.h" +#include "llvm/Function.h" +#include "llvm/CodeGen/ValueTypes.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineFunction.h" +#include "llvm/CodeGen/MachineFrameInfo.h" +#include "llvm/Target/TargetFrameLowering.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetOptions.h" +#include "llvm/Target/TargetInstrInfo.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/ADT/BitVector.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/Analysis/DebugInfo.h" + +#define GET_REGINFO_TARGET_DESC +#include "MipsGenRegisterInfo.inc" + +using namespace llvm; + +MipsRegisterInfo::MipsRegisterInfo(const MipsSubtarget &ST, + const TargetInstrInfo &tii) + : MipsGenRegisterInfo(Mips::RA), Subtarget(ST), TII(tii) {} + +unsigned MipsRegisterInfo::getPICCallReg() { return Mips::T9; } + +//===----------------------------------------------------------------------===// +// Callee Saved Registers methods +//===----------------------------------------------------------------------===// + +/// Mips Callee Saved Registers +const uint16_t* MipsRegisterInfo:: +getCalleeSavedRegs(const MachineFunction *MF) const +{ + if (Subtarget.isSingleFloat()) + return CSR_SingleFloatOnly_SaveList; + else if (!Subtarget.hasMips64()) + return CSR_O32_SaveList; + else if (Subtarget.isABI_N32()) + return CSR_N32_SaveList; + + assert(Subtarget.isABI_N64()); + return CSR_N64_SaveList; +} + +const uint32_t* +MipsRegisterInfo::getCallPreservedMask(CallingConv::ID) const +{ + if (Subtarget.isSingleFloat()) + return CSR_SingleFloatOnly_RegMask; + else if (!Subtarget.hasMips64()) + return CSR_O32_RegMask; + else if (Subtarget.isABI_N32()) + return CSR_N32_RegMask; + + assert(Subtarget.isABI_N64()); + return CSR_N64_RegMask; +} + +BitVector MipsRegisterInfo:: +getReservedRegs(const MachineFunction &MF) const { + static const uint16_t ReservedCPURegs[] = { + Mips::ZERO, Mips::AT, Mips::K0, Mips::K1, + Mips::SP, Mips::FP, Mips::RA + }; + + static const uint16_t ReservedCPU64Regs[] = { + Mips::ZERO_64, Mips::AT_64, Mips::K0_64, Mips::K1_64, + Mips::SP_64, Mips::FP_64, Mips::RA_64 + }; + + BitVector Reserved(getNumRegs()); + typedef TargetRegisterClass::iterator RegIter; + + for (unsigned I = 0; I < array_lengthof(ReservedCPURegs); ++I) + Reserved.set(ReservedCPURegs[I]); + + if (Subtarget.hasMips64()) { + for (unsigned I = 0; I < array_lengthof(ReservedCPU64Regs); ++I) + Reserved.set(ReservedCPU64Regs[I]); + + // Reserve all registers in AFGR64. + for (RegIter Reg = Mips::AFGR64RegisterClass->begin(); + Reg != Mips::AFGR64RegisterClass->end(); ++Reg) + Reserved.set(*Reg); + } + else { + // Reserve all registers in CPU64Regs & FGR64. + for (RegIter Reg = Mips::CPU64RegsRegisterClass->begin(); + Reg != Mips::CPU64RegsRegisterClass->end(); ++Reg) + Reserved.set(*Reg); + + for (RegIter Reg = Mips::FGR64RegisterClass->begin(); + Reg != Mips::FGR64RegisterClass->end(); ++Reg) + Reserved.set(*Reg); + } + + // If GP is dedicated as a global base register, reserve it. + if (MF.getInfo<MipsFunctionInfo>()->globalBaseRegFixed()) { + Reserved.set(Mips::GP); + Reserved.set(Mips::GP_64); + } + + // Reserve hardware registers. + Reserved.set(Mips::HWR29); + Reserved.set(Mips::HWR29_64); + + return Reserved; +} + +bool +MipsRegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const { + return true; +} + +// This function eliminate ADJCALLSTACKDOWN, +// ADJCALLSTACKUP pseudo instructions +void MipsRegisterInfo:: +eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const { + // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions. + MBB.erase(I); +} + +// FrameIndex represent objects inside a abstract stack. +// We must replace FrameIndex with an stack/frame pointer +// direct reference. +void MipsRegisterInfo:: +eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj, + RegScavenger *RS) const { + MachineInstr &MI = *II; + MachineFunction &MF = *MI.getParent()->getParent(); + MachineFrameInfo *MFI = MF.getFrameInfo(); + MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>(); + + unsigned i = 0; + while (!MI.getOperand(i).isFI()) { + ++i; + assert(i < MI.getNumOperands() && + "Instr doesn't have FrameIndex operand!"); + } + + DEBUG(errs() << "\nFunction : " << MF.getFunction()->getName() << "\n"; + errs() << "<--------->\n" << MI); + + int FrameIndex = MI.getOperand(i).getIndex(); + uint64_t stackSize = MF.getFrameInfo()->getStackSize(); + int64_t spOffset = MF.getFrameInfo()->getObjectOffset(FrameIndex); + + DEBUG(errs() << "FrameIndex : " << FrameIndex << "\n" + << "spOffset : " << spOffset << "\n" + << "stackSize : " << stackSize << "\n"); + + const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo(); + int MinCSFI = 0; + int MaxCSFI = -1; + + if (CSI.size()) { + MinCSFI = CSI[0].getFrameIdx(); + MaxCSFI = CSI[CSI.size() - 1].getFrameIdx(); + } + + // The following stack frame objects are always referenced relative to $sp: + // 1. Outgoing arguments. + // 2. Pointer to dynamically allocated stack space. + // 3. Locations for callee-saved registers. + // Everything else is referenced relative to whatever register + // getFrameRegister() returns. + unsigned FrameReg; + + if (MipsFI->isOutArgFI(FrameIndex) || MipsFI->isDynAllocFI(FrameIndex) || + (FrameIndex >= MinCSFI && FrameIndex <= MaxCSFI)) + FrameReg = Subtarget.isABI_N64() ? Mips::SP_64 : Mips::SP; + else + FrameReg = getFrameRegister(MF); + + // Calculate final offset. + // - There is no need to change the offset if the frame object is one of the + // following: an outgoing argument, pointer to a dynamically allocated + // stack space or a $gp restore location, + // - If the frame object is any of the following, its offset must be adjusted + // by adding the size of the stack: + // incoming argument, callee-saved register location or local variable. + int64_t Offset; + + if (MipsFI->isOutArgFI(FrameIndex) || MipsFI->isGPFI(FrameIndex) || + MipsFI->isDynAllocFI(FrameIndex)) + Offset = spOffset; + else + Offset = spOffset + (int64_t)stackSize; + + Offset += MI.getOperand(i+1).getImm(); + + DEBUG(errs() << "Offset : " << Offset << "\n" << "<--------->\n"); + + // If MI is not a debug value, make sure Offset fits in the 16-bit immediate + // field. + if (!MI.isDebugValue() && !isInt<16>(Offset)) { + MachineBasicBlock &MBB = *MI.getParent(); + DebugLoc DL = II->getDebugLoc(); + MipsAnalyzeImmediate AnalyzeImm; + unsigned Size = Subtarget.isABI_N64() ? 64 : 32; + unsigned LUi = Subtarget.isABI_N64() ? Mips::LUi64 : Mips::LUi; + unsigned ADDu = Subtarget.isABI_N64() ? Mips::DADDu : Mips::ADDu; + unsigned ZEROReg = Subtarget.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO; + unsigned ATReg = Subtarget.isABI_N64() ? Mips::AT_64 : Mips::AT; + const MipsAnalyzeImmediate::InstSeq &Seq = + AnalyzeImm.Analyze(Offset, Size, true /* LastInstrIsADDiu */); + MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin(); + + MipsFI->setEmitNOAT(); + + // The first instruction can be a LUi, which is different from other + // instructions (ADDiu, ORI and SLL) in that it does not have a register + // operand. + if (Inst->Opc == LUi) + BuildMI(MBB, II, DL, TII.get(LUi), ATReg) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + else + BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ZEROReg) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + + // Build the remaining instructions in Seq except for the last one. + for (++Inst; Inst != Seq.end() - 1; ++Inst) + BuildMI(MBB, II, DL, TII.get(Inst->Opc), ATReg).addReg(ATReg) + .addImm(SignExtend64<16>(Inst->ImmOpnd)); + + BuildMI(MBB, II, DL, TII.get(ADDu), ATReg).addReg(FrameReg).addReg(ATReg); + + FrameReg = ATReg; + Offset = SignExtend64<16>(Inst->ImmOpnd); + } + + MI.getOperand(i).ChangeToRegister(FrameReg, false); + MI.getOperand(i+1).ChangeToImmediate(Offset); +} + +unsigned MipsRegisterInfo:: +getFrameRegister(const MachineFunction &MF) const { + const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); + bool IsN64 = Subtarget.isABI_N64(); + + return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) : + (IsN64 ? Mips::SP_64 : Mips::SP); +} + +unsigned MipsRegisterInfo:: +getEHExceptionRegister() const { + llvm_unreachable("What is the exception register"); +} + +unsigned MipsRegisterInfo:: +getEHHandlerRegister() const { + llvm_unreachable("What is the exception handler register"); +} diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h new file mode 100644 index 0000000..0716d29 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.h @@ -0,0 +1,72 @@ +//===-- MipsRegisterInfo.h - Mips Register Information Impl -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the Mips implementation of the TargetRegisterInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSREGISTERINFO_H +#define MIPSREGISTERINFO_H + +#include "Mips.h" +#include "llvm/Target/TargetRegisterInfo.h" + +#define GET_REGINFO_HEADER +#include "MipsGenRegisterInfo.inc" + +namespace llvm { +class MipsSubtarget; +class TargetInstrInfo; +class Type; + +struct MipsRegisterInfo : public MipsGenRegisterInfo { + const MipsSubtarget &Subtarget; + const TargetInstrInfo &TII; + + MipsRegisterInfo(const MipsSubtarget &Subtarget, const TargetInstrInfo &tii); + + /// getRegisterNumbering - Given the enum value for some register, e.g. + /// Mips::RA, return the number that it corresponds to (e.g. 31). + static unsigned getRegisterNumbering(unsigned RegEnum); + + /// Get PIC indirect call register + static unsigned getPICCallReg(); + + /// Adjust the Mips stack frame. + void adjustMipsStackFrame(MachineFunction &MF) const; + + /// Code Generation virtual methods... + const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const; + const uint32_t *getCallPreservedMask(CallingConv::ID) const; + + BitVector getReservedRegs(const MachineFunction &MF) const; + + virtual bool requiresRegisterScavenging(const MachineFunction &MF) const; + + void eliminateCallFramePseudoInstr(MachineFunction &MF, + MachineBasicBlock &MBB, + MachineBasicBlock::iterator I) const; + + /// Stack Frame Processing Methods + void eliminateFrameIndex(MachineBasicBlock::iterator II, + int SPAdj, RegScavenger *RS = NULL) const; + + void processFunctionBeforeFrameFinalized(MachineFunction &MF) const; + + /// Debug information queries. + unsigned getFrameRegister(const MachineFunction &MF) const; + + /// Exception handling queries. + unsigned getEHExceptionRegister() const; + unsigned getEHHandlerRegister() const; +}; + +} // end namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td new file mode 100644 index 0000000..ce399a0 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsRegisterInfo.td @@ -0,0 +1,305 @@ +//===-- MipsRegisterInfo.td - Mips Register defs -----------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Declarations that describe the MIPS register file +//===----------------------------------------------------------------------===// +let Namespace = "Mips" in { +def sub_fpeven : SubRegIndex; +def sub_fpodd : SubRegIndex; +def sub_32 : SubRegIndex; +} + +// We have banks of 32 registers each. +class MipsReg<string n> : Register<n> { + field bits<5> Num; + let Namespace = "Mips"; +} + +class MipsRegWithSubRegs<string n, list<Register> subregs> + : RegisterWithSubRegs<n, subregs> { + field bits<5> Num; + let Namespace = "Mips"; +} + +// Mips CPU Registers +class MipsGPRReg<bits<5> num, string n> : MipsReg<n> { + let Num = num; +} + +// Mips 64-bit CPU Registers +class Mips64GPRReg<bits<5> num, string n, list<Register> subregs> + : MipsRegWithSubRegs<n, subregs> { + let Num = num; + let SubRegIndices = [sub_32]; +} + +// Mips 32-bit FPU Registers +class FPR<bits<5> num, string n> : MipsReg<n> { + let Num = num; +} + +// Mips 64-bit (aliased) FPU Registers +class AFPR<bits<5> num, string n, list<Register> subregs> + : MipsRegWithSubRegs<n, subregs> { + let Num = num; + let SubRegIndices = [sub_fpeven, sub_fpodd]; + let CoveredBySubRegs = 1; +} + +class AFPR64<bits<5> num, string n, list<Register> subregs> + : MipsRegWithSubRegs<n, subregs> { + let Num = num; + let SubRegIndices = [sub_32]; +} + +// Mips Hardware Registers +class HWR<bits<5> num, string n> : MipsReg<n> { + let Num = num; +} + +//===----------------------------------------------------------------------===// +// Registers +//===----------------------------------------------------------------------===// + +let Namespace = "Mips" in { + // General Purpose Registers + def ZERO : MipsGPRReg< 0, "ZERO">, DwarfRegNum<[0]>; + def AT : MipsGPRReg< 1, "AT">, DwarfRegNum<[1]>; + def V0 : MipsGPRReg< 2, "2">, DwarfRegNum<[2]>; + def V1 : MipsGPRReg< 3, "3">, DwarfRegNum<[3]>; + def A0 : MipsGPRReg< 4, "4">, DwarfRegNum<[4]>; + def A1 : MipsGPRReg< 5, "5">, DwarfRegNum<[5]>; + def A2 : MipsGPRReg< 6, "6">, DwarfRegNum<[6]>; + def A3 : MipsGPRReg< 7, "7">, DwarfRegNum<[7]>; + def T0 : MipsGPRReg< 8, "8">, DwarfRegNum<[8]>; + def T1 : MipsGPRReg< 9, "9">, DwarfRegNum<[9]>; + def T2 : MipsGPRReg< 10, "10">, DwarfRegNum<[10]>; + def T3 : MipsGPRReg< 11, "11">, DwarfRegNum<[11]>; + def T4 : MipsGPRReg< 12, "12">, DwarfRegNum<[12]>; + def T5 : MipsGPRReg< 13, "13">, DwarfRegNum<[13]>; + def T6 : MipsGPRReg< 14, "14">, DwarfRegNum<[14]>; + def T7 : MipsGPRReg< 15, "15">, DwarfRegNum<[15]>; + def S0 : MipsGPRReg< 16, "16">, DwarfRegNum<[16]>; + def S1 : MipsGPRReg< 17, "17">, DwarfRegNum<[17]>; + def S2 : MipsGPRReg< 18, "18">, DwarfRegNum<[18]>; + def S3 : MipsGPRReg< 19, "19">, DwarfRegNum<[19]>; + def S4 : MipsGPRReg< 20, "20">, DwarfRegNum<[20]>; + def S5 : MipsGPRReg< 21, "21">, DwarfRegNum<[21]>; + def S6 : MipsGPRReg< 22, "22">, DwarfRegNum<[22]>; + def S7 : MipsGPRReg< 23, "23">, DwarfRegNum<[23]>; + def T8 : MipsGPRReg< 24, "24">, DwarfRegNum<[24]>; + def T9 : MipsGPRReg< 25, "25">, DwarfRegNum<[25]>; + def K0 : MipsGPRReg< 26, "26">, DwarfRegNum<[26]>; + def K1 : MipsGPRReg< 27, "27">, DwarfRegNum<[27]>; + def GP : MipsGPRReg< 28, "GP">, DwarfRegNum<[28]>; + def SP : MipsGPRReg< 29, "SP">, DwarfRegNum<[29]>; + def FP : MipsGPRReg< 30, "FP">, DwarfRegNum<[30]>; + def RA : MipsGPRReg< 31, "RA">, DwarfRegNum<[31]>; + + // General Purpose 64-bit Registers + def ZERO_64 : Mips64GPRReg< 0, "ZERO", [ZERO]>, DwarfRegNum<[0]>; + def AT_64 : Mips64GPRReg< 1, "AT", [AT]>, DwarfRegNum<[1]>; + def V0_64 : Mips64GPRReg< 2, "2", [V0]>, DwarfRegNum<[2]>; + def V1_64 : Mips64GPRReg< 3, "3", [V1]>, DwarfRegNum<[3]>; + def A0_64 : Mips64GPRReg< 4, "4", [A0]>, DwarfRegNum<[4]>; + def A1_64 : Mips64GPRReg< 5, "5", [A1]>, DwarfRegNum<[5]>; + def A2_64 : Mips64GPRReg< 6, "6", [A2]>, DwarfRegNum<[6]>; + def A3_64 : Mips64GPRReg< 7, "7", [A3]>, DwarfRegNum<[7]>; + def T0_64 : Mips64GPRReg< 8, "8", [T0]>, DwarfRegNum<[8]>; + def T1_64 : Mips64GPRReg< 9, "9", [T1]>, DwarfRegNum<[9]>; + def T2_64 : Mips64GPRReg< 10, "10", [T2]>, DwarfRegNum<[10]>; + def T3_64 : Mips64GPRReg< 11, "11", [T3]>, DwarfRegNum<[11]>; + def T4_64 : Mips64GPRReg< 12, "12", [T4]>, DwarfRegNum<[12]>; + def T5_64 : Mips64GPRReg< 13, "13", [T5]>, DwarfRegNum<[13]>; + def T6_64 : Mips64GPRReg< 14, "14", [T6]>, DwarfRegNum<[14]>; + def T7_64 : Mips64GPRReg< 15, "15", [T7]>, DwarfRegNum<[15]>; + def S0_64 : Mips64GPRReg< 16, "16", [S0]>, DwarfRegNum<[16]>; + def S1_64 : Mips64GPRReg< 17, "17", [S1]>, DwarfRegNum<[17]>; + def S2_64 : Mips64GPRReg< 18, "18", [S2]>, DwarfRegNum<[18]>; + def S3_64 : Mips64GPRReg< 19, "19", [S3]>, DwarfRegNum<[19]>; + def S4_64 : Mips64GPRReg< 20, "20", [S4]>, DwarfRegNum<[20]>; + def S5_64 : Mips64GPRReg< 21, "21", [S5]>, DwarfRegNum<[21]>; + def S6_64 : Mips64GPRReg< 22, "22", [S6]>, DwarfRegNum<[22]>; + def S7_64 : Mips64GPRReg< 23, "23", [S7]>, DwarfRegNum<[23]>; + def T8_64 : Mips64GPRReg< 24, "24", [T8]>, DwarfRegNum<[24]>; + def T9_64 : Mips64GPRReg< 25, "25", [T9]>, DwarfRegNum<[25]>; + def K0_64 : Mips64GPRReg< 26, "26", [K0]>, DwarfRegNum<[26]>; + def K1_64 : Mips64GPRReg< 27, "27", [K1]>, DwarfRegNum<[27]>; + def GP_64 : Mips64GPRReg< 28, "GP", [GP]>, DwarfRegNum<[28]>; + def SP_64 : Mips64GPRReg< 29, "SP", [SP]>, DwarfRegNum<[29]>; + def FP_64 : Mips64GPRReg< 30, "FP", [FP]>, DwarfRegNum<[30]>; + def RA_64 : Mips64GPRReg< 31, "RA", [RA]>, DwarfRegNum<[31]>; + + /// Mips Single point precision FPU Registers + def F0 : FPR< 0, "F0">, DwarfRegNum<[32]>; + def F1 : FPR< 1, "F1">, DwarfRegNum<[33]>; + def F2 : FPR< 2, "F2">, DwarfRegNum<[34]>; + def F3 : FPR< 3, "F3">, DwarfRegNum<[35]>; + def F4 : FPR< 4, "F4">, DwarfRegNum<[36]>; + def F5 : FPR< 5, "F5">, DwarfRegNum<[37]>; + def F6 : FPR< 6, "F6">, DwarfRegNum<[38]>; + def F7 : FPR< 7, "F7">, DwarfRegNum<[39]>; + def F8 : FPR< 8, "F8">, DwarfRegNum<[40]>; + def F9 : FPR< 9, "F9">, DwarfRegNum<[41]>; + def F10 : FPR<10, "F10">, DwarfRegNum<[42]>; + def F11 : FPR<11, "F11">, DwarfRegNum<[43]>; + def F12 : FPR<12, "F12">, DwarfRegNum<[44]>; + def F13 : FPR<13, "F13">, DwarfRegNum<[45]>; + def F14 : FPR<14, "F14">, DwarfRegNum<[46]>; + def F15 : FPR<15, "F15">, DwarfRegNum<[47]>; + def F16 : FPR<16, "F16">, DwarfRegNum<[48]>; + def F17 : FPR<17, "F17">, DwarfRegNum<[49]>; + def F18 : FPR<18, "F18">, DwarfRegNum<[50]>; + def F19 : FPR<19, "F19">, DwarfRegNum<[51]>; + def F20 : FPR<20, "F20">, DwarfRegNum<[52]>; + def F21 : FPR<21, "F21">, DwarfRegNum<[53]>; + def F22 : FPR<22, "F22">, DwarfRegNum<[54]>; + def F23 : FPR<23, "F23">, DwarfRegNum<[55]>; + def F24 : FPR<24, "F24">, DwarfRegNum<[56]>; + def F25 : FPR<25, "F25">, DwarfRegNum<[57]>; + def F26 : FPR<26, "F26">, DwarfRegNum<[58]>; + def F27 : FPR<27, "F27">, DwarfRegNum<[59]>; + def F28 : FPR<28, "F28">, DwarfRegNum<[60]>; + def F29 : FPR<29, "F29">, DwarfRegNum<[61]>; + def F30 : FPR<30, "F30">, DwarfRegNum<[62]>; + def F31 : FPR<31, "F31">, DwarfRegNum<[63]>; + + /// Mips Double point precision FPU Registers (aliased + /// with the single precision to hold 64 bit values) + def D0 : AFPR< 0, "F0", [F0, F1]>; + def D1 : AFPR< 2, "F2", [F2, F3]>; + def D2 : AFPR< 4, "F4", [F4, F5]>; + def D3 : AFPR< 6, "F6", [F6, F7]>; + def D4 : AFPR< 8, "F8", [F8, F9]>; + def D5 : AFPR<10, "F10", [F10, F11]>; + def D6 : AFPR<12, "F12", [F12, F13]>; + def D7 : AFPR<14, "F14", [F14, F15]>; + def D8 : AFPR<16, "F16", [F16, F17]>; + def D9 : AFPR<18, "F18", [F18, F19]>; + def D10 : AFPR<20, "F20", [F20, F21]>; + def D11 : AFPR<22, "F22", [F22, F23]>; + def D12 : AFPR<24, "F24", [F24, F25]>; + def D13 : AFPR<26, "F26", [F26, F27]>; + def D14 : AFPR<28, "F28", [F28, F29]>; + def D15 : AFPR<30, "F30", [F30, F31]>; + + /// Mips Double point precision FPU Registers in MFP64 mode. + def D0_64 : AFPR64<0, "F0", [F0]>, DwarfRegNum<[32]>; + def D1_64 : AFPR64<1, "F1", [F1]>, DwarfRegNum<[33]>; + def D2_64 : AFPR64<2, "F2", [F2]>, DwarfRegNum<[34]>; + def D3_64 : AFPR64<3, "F3", [F3]>, DwarfRegNum<[35]>; + def D4_64 : AFPR64<4, "F4", [F4]>, DwarfRegNum<[36]>; + def D5_64 : AFPR64<5, "F5", [F5]>, DwarfRegNum<[37]>; + def D6_64 : AFPR64<6, "F6", [F6]>, DwarfRegNum<[38]>; + def D7_64 : AFPR64<7, "F7", [F7]>, DwarfRegNum<[39]>; + def D8_64 : AFPR64<8, "F8", [F8]>, DwarfRegNum<[40]>; + def D9_64 : AFPR64<9, "F9", [F9]>, DwarfRegNum<[41]>; + def D10_64 : AFPR64<10, "F10", [F10]>, DwarfRegNum<[42]>; + def D11_64 : AFPR64<11, "F11", [F11]>, DwarfRegNum<[43]>; + def D12_64 : AFPR64<12, "F12", [F12]>, DwarfRegNum<[44]>; + def D13_64 : AFPR64<13, "F13", [F13]>, DwarfRegNum<[45]>; + def D14_64 : AFPR64<14, "F14", [F14]>, DwarfRegNum<[46]>; + def D15_64 : AFPR64<15, "F15", [F15]>, DwarfRegNum<[47]>; + def D16_64 : AFPR64<16, "F16", [F16]>, DwarfRegNum<[48]>; + def D17_64 : AFPR64<17, "F17", [F17]>, DwarfRegNum<[49]>; + def D18_64 : AFPR64<18, "F18", [F18]>, DwarfRegNum<[50]>; + def D19_64 : AFPR64<19, "F19", [F19]>, DwarfRegNum<[51]>; + def D20_64 : AFPR64<20, "F20", [F20]>, DwarfRegNum<[52]>; + def D21_64 : AFPR64<21, "F21", [F21]>, DwarfRegNum<[53]>; + def D22_64 : AFPR64<22, "F22", [F22]>, DwarfRegNum<[54]>; + def D23_64 : AFPR64<23, "F23", [F23]>, DwarfRegNum<[55]>; + def D24_64 : AFPR64<24, "F24", [F24]>, DwarfRegNum<[56]>; + def D25_64 : AFPR64<25, "F25", [F25]>, DwarfRegNum<[57]>; + def D26_64 : AFPR64<26, "F26", [F26]>, DwarfRegNum<[58]>; + def D27_64 : AFPR64<27, "F27", [F27]>, DwarfRegNum<[59]>; + def D28_64 : AFPR64<28, "F28", [F28]>, DwarfRegNum<[60]>; + def D29_64 : AFPR64<29, "F29", [F29]>, DwarfRegNum<[61]>; + def D30_64 : AFPR64<30, "F30", [F30]>, DwarfRegNum<[62]>; + def D31_64 : AFPR64<31, "F31", [F31]>, DwarfRegNum<[63]>; + + // Hi/Lo registers + def HI : Register<"hi">, DwarfRegNum<[64]>; + def LO : Register<"lo">, DwarfRegNum<[65]>; + + let SubRegIndices = [sub_32] in { + def HI64 : RegisterWithSubRegs<"hi", [HI]>; + def LO64 : RegisterWithSubRegs<"lo", [LO]>; + } + + // Status flags register + def FCR31 : Register<"31">; + + // Hardware register $29 + def HWR29 : Register<"29">; + def HWR29_64 : Register<"29">; +} + +//===----------------------------------------------------------------------===// +// Register Classes +//===----------------------------------------------------------------------===// + +def CPURegs : RegisterClass<"Mips", [i32], 32, (add + // Return Values and Arguments + V0, V1, A0, A1, A2, A3, + // Not preserved across procedure calls + T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, + // Callee save + S0, S1, S2, S3, S4, S5, S6, S7, + // Reserved + ZERO, AT, K0, K1, GP, SP, FP, RA)>; + +def CPU64Regs : RegisterClass<"Mips", [i64], 64, (add + // Return Values and Arguments + V0_64, V1_64, A0_64, A1_64, A2_64, A3_64, + // Not preserved across procedure calls + T0_64, T1_64, T2_64, T3_64, T4_64, T5_64, T6_64, T7_64, T8_64, T9_64, + // Callee save + S0_64, S1_64, S2_64, S3_64, S4_64, S5_64, S6_64, S7_64, + // Reserved + ZERO_64, AT_64, K0_64, K1_64, GP_64, SP_64, FP_64, RA_64)> { + let SubRegClasses = [(CPURegs sub_32)]; +} + +// 64bit fp: +// * FGR64 - 32 64-bit registers +// * AFGR64 - 16 32-bit even registers (32-bit FP Mode) +// +// 32bit fp: +// * FGR32 - 16 32-bit even registers +// * FGR32 - 32 32-bit registers (single float only mode) +def FGR32 : RegisterClass<"Mips", [f32], 32, (sequence "F%u", 0, 31)>; + +def AFGR64 : RegisterClass<"Mips", [f64], 64, (add + // Return Values and Arguments + D0, D1, D6, D7, + // Not preserved across procedure calls + D2, D3, D4, D5, D8, D9, + // Callee save + D10, D11, D12, D13, D14, D15)> { + let SubRegClasses = [(FGR32 sub_fpeven, sub_fpodd)]; +} + +def FGR64 : RegisterClass<"Mips", [f64], 64, (sequence "D%u_64", 0, 31)> { + let SubRegClasses = [(FGR32 sub_32)]; +} + +// Condition Register for floating point operations +def CCR : RegisterClass<"Mips", [i32], 32, (add FCR31)>; + +// Hi/Lo Registers +def HILO : RegisterClass<"Mips", [i32], 32, (add HI, LO)>; +def HILO64 : RegisterClass<"Mips", [i64], 64, (add HI64, LO64)> { + let SubRegClasses = [(HILO sub_32)]; +} + +// Hardware registers +def HWRegs : RegisterClass<"Mips", [i32], 32, (add HWR29)>; +def HWRegs64 : RegisterClass<"Mips", [i64], 32, (add HWR29_64)>; + diff --git a/contrib/llvm/lib/Target/Mips/MipsRelocations.h b/contrib/llvm/lib/Target/Mips/MipsRelocations.h new file mode 100644 index 0000000..0787ed3 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsRelocations.h @@ -0,0 +1,41 @@ +//===-- MipsRelocations.h - Mips Code Relocations ---------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the Mips target-specific relocation types +// (for relocation-model=static). +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSRELOCATIONS_H_ +#define MIPSRELOCATIONS_H_ + +#include "llvm/CodeGen/MachineRelocation.h" + +namespace llvm { + namespace Mips{ + enum RelocationType { + // reloc_mips_pc16 - pc relative relocation for branches. The lower 18 + // bits of the difference between the branch target and the branch + // instruction, shifted right by 2. + reloc_mips_pc16 = 1, + + // reloc_mips_hi - upper 16 bits of the address (modified by +1 if the + // lower 16 bits of the address is negative). + reloc_mips_hi = 2, + + // reloc_mips_lo - lower 16 bits of the address. + reloc_mips_lo = 3, + + // reloc_mips_26 - lower 28 bits of the address, shifted right by 2. + reloc_mips_26 = 4 + }; + } +} + +#endif /* MIPSRELOCATIONS_H_ */ diff --git a/contrib/llvm/lib/Target/Mips/MipsSchedule.td b/contrib/llvm/lib/Target/Mips/MipsSchedule.td new file mode 100644 index 0000000..1add02f --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsSchedule.td @@ -0,0 +1,63 @@ +//===-- MipsSchedule.td - Mips Scheduling Definitions ------*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// Functional units across Mips chips sets. Based on GCC/Mips backend files. +//===----------------------------------------------------------------------===// +def ALU : FuncUnit; +def IMULDIV : FuncUnit; + +//===----------------------------------------------------------------------===// +// Instruction Itinerary classes used for Mips +//===----------------------------------------------------------------------===// +def IIAlu : InstrItinClass; +def IILoad : InstrItinClass; +def IIStore : InstrItinClass; +def IIXfer : InstrItinClass; +def IIBranch : InstrItinClass; +def IIHiLo : InstrItinClass; +def IIImul : InstrItinClass; +def IIIdiv : InstrItinClass; +def IIFcvt : InstrItinClass; +def IIFmove : InstrItinClass; +def IIFcmp : InstrItinClass; +def IIFadd : InstrItinClass; +def IIFmulSingle : InstrItinClass; +def IIFmulDouble : InstrItinClass; +def IIFdivSingle : InstrItinClass; +def IIFdivDouble : InstrItinClass; +def IIFsqrtSingle : InstrItinClass; +def IIFsqrtDouble : InstrItinClass; +def IIFrecipFsqrtStep : InstrItinClass; +def IIPseudo : InstrItinClass; + +//===----------------------------------------------------------------------===// +// Mips Generic instruction itineraries. +//===----------------------------------------------------------------------===// +def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [ + InstrItinData<IIAlu , [InstrStage<1, [ALU]>]>, + InstrItinData<IILoad , [InstrStage<3, [ALU]>]>, + InstrItinData<IIStore , [InstrStage<1, [ALU]>]>, + InstrItinData<IIXfer , [InstrStage<2, [ALU]>]>, + InstrItinData<IIBranch , [InstrStage<1, [ALU]>]>, + InstrItinData<IIHiLo , [InstrStage<1, [IMULDIV]>]>, + InstrItinData<IIImul , [InstrStage<17, [IMULDIV]>]>, + InstrItinData<IIIdiv , [InstrStage<38, [IMULDIV]>]>, + InstrItinData<IIFcvt , [InstrStage<1, [ALU]>]>, + InstrItinData<IIFmove , [InstrStage<2, [ALU]>]>, + InstrItinData<IIFcmp , [InstrStage<3, [ALU]>]>, + InstrItinData<IIFadd , [InstrStage<4, [ALU]>]>, + InstrItinData<IIFmulSingle , [InstrStage<7, [ALU]>]>, + InstrItinData<IIFmulDouble , [InstrStage<8, [ALU]>]>, + InstrItinData<IIFdivSingle , [InstrStage<23, [ALU]>]>, + InstrItinData<IIFdivDouble , [InstrStage<36, [ALU]>]>, + InstrItinData<IIFsqrtSingle , [InstrStage<54, [ALU]>]>, + InstrItinData<IIFsqrtDouble , [InstrStage<12, [ALU]>]>, + InstrItinData<IIFrecipFsqrtStep , [InstrStage<5, [ALU]>]> +]>; diff --git a/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp new file mode 100644 index 0000000..e4d70fc --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.cpp @@ -0,0 +1,23 @@ +//===-- MipsSelectionDAGInfo.cpp - Mips SelectionDAG Info -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the MipsSelectionDAGInfo class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mips-selectiondag-info" +#include "MipsTargetMachine.h" +using namespace llvm; + +MipsSelectionDAGInfo::MipsSelectionDAGInfo(const MipsTargetMachine &TM) + : TargetSelectionDAGInfo(TM) { +} + +MipsSelectionDAGInfo::~MipsSelectionDAGInfo() { +} diff --git a/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h new file mode 100644 index 0000000..6cafb55 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsSelectionDAGInfo.h @@ -0,0 +1,31 @@ +//===-- MipsSelectionDAGInfo.h - Mips SelectionDAG Info ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file defines the Mips subclass for TargetSelectionDAGInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSSELECTIONDAGINFO_H +#define MIPSSELECTIONDAGINFO_H + +#include "llvm/Target/TargetSelectionDAGInfo.h" + +namespace llvm { + +class MipsTargetMachine; + +class MipsSelectionDAGInfo : public TargetSelectionDAGInfo { +public: + explicit MipsSelectionDAGInfo(const MipsTargetMachine &TM); + ~MipsSelectionDAGInfo(); +}; + +} + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp new file mode 100644 index 0000000..00347df --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.cpp @@ -0,0 +1,68 @@ +//===-- MipsSubtarget.cpp - Mips Subtarget Information --------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the Mips specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#include "MipsSubtarget.h" +#include "Mips.h" +#include "MipsRegisterInfo.h" +#include "llvm/Support/TargetRegistry.h" + +#define GET_SUBTARGETINFO_TARGET_DESC +#define GET_SUBTARGETINFO_CTOR +#include "MipsGenSubtargetInfo.inc" + +using namespace llvm; + +void MipsSubtarget::anchor() { } + +MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU, + const std::string &FS, bool little) : + MipsGenSubtargetInfo(TT, CPU, FS), + MipsArchVersion(Mips32), MipsABI(UnknownABI), IsLittle(little), + IsSingleFloat(false), IsFP64bit(false), IsGP64bit(false), HasVFPU(false), + IsLinux(true), HasSEInReg(false), HasCondMov(false), HasMulDivAdd(false), + HasMinMax(false), HasSwap(false), HasBitCount(false) +{ + std::string CPUName = CPU; + if (CPUName.empty()) + CPUName = "mips32"; + + // Parse features string. + ParseSubtargetFeatures(CPUName, FS); + + // Initialize scheduling itinerary for the specified CPU. + InstrItins = getInstrItineraryForCPU(CPUName); + + // Set MipsABI if it hasn't been set yet. + if (MipsABI == UnknownABI) + MipsABI = hasMips64() ? N64 : O32; + + // Check if Architecture and ABI are compatible. + assert(((!hasMips64() && (isABI_O32() || isABI_EABI())) || + (hasMips64() && (isABI_N32() || isABI_N64()))) && + "Invalid Arch & ABI pair."); + + // Is the target system Linux ? + if (TT.find("linux") == std::string::npos) + IsLinux = false; +} + +bool +MipsSubtarget::enablePostRAScheduler(CodeGenOpt::Level OptLevel, + TargetSubtargetInfo::AntiDepBreakMode& Mode, + RegClassVector& CriticalPathRCs) const { + Mode = TargetSubtargetInfo::ANTIDEP_CRITICAL; + CriticalPathRCs.clear(); + CriticalPathRCs.push_back(hasMips64() ? + &Mips::CPU64RegsRegClass : &Mips::CPURegsRegClass); + return OptLevel >= CodeGenOpt::Aggressive; +} diff --git a/contrib/llvm/lib/Target/Mips/MipsSubtarget.h b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h new file mode 100644 index 0000000..7faf77b --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsSubtarget.h @@ -0,0 +1,139 @@ +//===-- MipsSubtarget.h - Define Subtarget for the Mips ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the Mips specific subclass of TargetSubtargetInfo. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSSUBTARGET_H +#define MIPSSUBTARGET_H + +#include "llvm/Target/TargetSubtargetInfo.h" +#include "llvm/MC/MCInstrItineraries.h" +#include <string> + +#define GET_SUBTARGETINFO_HEADER +#include "MipsGenSubtargetInfo.inc" + +namespace llvm { +class StringRef; + +class MipsSubtarget : public MipsGenSubtargetInfo { + virtual void anchor(); + +public: + // NOTE: O64 will not be supported. + enum MipsABIEnum { + UnknownABI, O32, N32, N64, EABI + }; + +protected: + + enum MipsArchEnum { + Mips32, Mips32r2, Mips64, Mips64r2 + }; + + // Mips architecture version + MipsArchEnum MipsArchVersion; + + // Mips supported ABIs + MipsABIEnum MipsABI; + + // IsLittle - The target is Little Endian + bool IsLittle; + + // IsSingleFloat - The target only supports single precision float + // point operations. This enable the target to use all 32 32-bit + // floating point registers instead of only using even ones. + bool IsSingleFloat; + + // IsFP64bit - The target processor has 64-bit floating point registers. + bool IsFP64bit; + + // IsFP64bit - General-purpose registers are 64 bits wide + bool IsGP64bit; + + // HasVFPU - Processor has a vector floating point unit. + bool HasVFPU; + + // isLinux - Target system is Linux. Is false we consider ELFOS for now. + bool IsLinux; + + /// Features related to the presence of specific instructions. + + // HasSEInReg - SEB and SEH (signext in register) instructions. + bool HasSEInReg; + + // HasCondMov - Conditional mov (MOVZ, MOVN) instructions. + bool HasCondMov; + + // HasMulDivAdd - Multiply add and sub (MADD, MADDu, MSUB, MSUBu) + // instructions. + bool HasMulDivAdd; + + // HasMinMax - MIN and MAX instructions. + bool HasMinMax; + + // HasSwap - Byte and half swap instructions. + bool HasSwap; + + // HasBitCount - Count leading '1' and '0' bits. + bool HasBitCount; + + InstrItineraryData InstrItins; + +public: + virtual bool enablePostRAScheduler(CodeGenOpt::Level OptLevel, + AntiDepBreakMode& Mode, + RegClassVector& CriticalPathRCs) const; + + /// Only O32 and EABI supported right now. + bool isABI_EABI() const { return MipsABI == EABI; } + bool isABI_N64() const { return MipsABI == N64; } + bool isABI_N32() const { return MipsABI == N32; } + bool isABI_O32() const { return MipsABI == O32; } + unsigned getTargetABI() const { return MipsABI; } + + /// This constructor initializes the data members to match that + /// of the specified triple. + MipsSubtarget(const std::string &TT, const std::string &CPU, + const std::string &FS, bool little); + + /// ParseSubtargetFeatures - Parses features string setting specified + /// subtarget options. Definition of function is auto generated by tblgen. + void ParseSubtargetFeatures(StringRef CPU, StringRef FS); + + bool hasMips32() const { return MipsArchVersion >= Mips32; } + bool hasMips32r2() const { return MipsArchVersion == Mips32r2 || + MipsArchVersion == Mips64r2; } + bool hasMips64() const { return MipsArchVersion >= Mips64; } + bool hasMips64r2() const { return MipsArchVersion == Mips64r2; } + + bool hasMips32r2Or64() const { return hasMips32r2() || hasMips64(); } + + bool isLittle() const { return IsLittle; } + bool isFP64bit() const { return IsFP64bit; } + bool isGP64bit() const { return IsGP64bit; } + bool isGP32bit() const { return !IsGP64bit; } + bool isSingleFloat() const { return IsSingleFloat; } + bool isNotSingleFloat() const { return !IsSingleFloat; } + bool hasVFPU() const { return HasVFPU; } + bool isLinux() const { return IsLinux; } + + /// Features related to the presence of specific instructions. + bool hasSEInReg() const { return HasSEInReg; } + bool hasCondMov() const { return HasCondMov; } + bool hasMulDivAdd() const { return HasMulDivAdd; } + bool hasMinMax() const { return HasMinMax; } + bool hasSwap() const { return HasSwap; } + bool hasBitCount() const { return HasBitCount; } +}; +} // End llvm namespace + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp new file mode 100644 index 0000000..ad02231 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.cpp @@ -0,0 +1,153 @@ +//===-- MipsTargetMachine.cpp - Define TargetMachine for Mips -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implements the info about Mips target spec. +// +//===----------------------------------------------------------------------===// + +#include "MipsTargetMachine.h" +#include "Mips.h" +#include "llvm/PassManager.h" +#include "llvm/CodeGen/Passes.h" +#include "llvm/Support/TargetRegistry.h" +using namespace llvm; + +extern "C" void LLVMInitializeMipsTarget() { + // Register the target. + RegisterTargetMachine<MipsebTargetMachine> X(TheMipsTarget); + RegisterTargetMachine<MipselTargetMachine> Y(TheMipselTarget); + RegisterTargetMachine<Mips64ebTargetMachine> A(TheMips64Target); + RegisterTargetMachine<Mips64elTargetMachine> B(TheMips64elTarget); +} + +// DataLayout --> Big-endian, 32-bit pointer/ABI/alignment +// The stack is always 8 byte aligned +// On function prologue, the stack is created by decrementing +// its pointer. Once decremented, all references are done with positive +// offset from the stack/frame pointer, using StackGrowsUp enables +// an easier handling. +// Using CodeModel::Large enables different CALL behavior. +MipsTargetMachine:: +MipsTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL, + bool isLittle) + : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + Subtarget(TT, CPU, FS, isLittle), + DataLayout(isLittle ? + (Subtarget.isABI_N64() ? + "e-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" : + "e-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32") : + (Subtarget.isABI_N64() ? + "E-p:64:64:64-i8:8:32-i16:16:32-i64:64:64-f128:128:128-n32" : + "E-p:32:32:32-i8:8:32-i16:16:32-i64:64:64-n32")), + InstrInfo(*this), + FrameLowering(Subtarget), + TLInfo(*this), TSInfo(*this), JITInfo() { +} + +void MipsebTargetMachine::anchor() { } + +MipsebTargetMachine:: +MipsebTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} + +void MipselTargetMachine::anchor() { } + +MipselTargetMachine:: +MipselTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} + +void Mips64ebTargetMachine::anchor() { } + +Mips64ebTargetMachine:: +Mips64ebTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, false) {} + +void Mips64elTargetMachine::anchor() { } + +Mips64elTargetMachine:: +Mips64elTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL) + : MipsTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, true) {} + +namespace { +/// Mips Code Generator Pass Configuration Options. +class MipsPassConfig : public TargetPassConfig { +public: + MipsPassConfig(MipsTargetMachine *TM, PassManagerBase &PM) + : TargetPassConfig(TM, PM) {} + + MipsTargetMachine &getMipsTargetMachine() const { + return getTM<MipsTargetMachine>(); + } + + const MipsSubtarget &getMipsSubtarget() const { + return *getMipsTargetMachine().getSubtargetImpl(); + } + + virtual bool addInstSelector(); + virtual bool addPreRegAlloc(); + virtual bool addPreSched2(); + virtual bool addPreEmitPass(); +}; +} // namespace + +TargetPassConfig *MipsTargetMachine::createPassConfig(PassManagerBase &PM) { + return new MipsPassConfig(this, PM); +} + +// Install an instruction selector pass using +// the ISelDag to gen Mips code. +bool MipsPassConfig::addInstSelector() +{ + PM.add(createMipsISelDag(getMipsTargetMachine())); + return false; +} + +// Implemented by targets that want to run passes immediately before +// machine code is emitted. return true if -print-machineinstrs should +// print out the code after the passes. +bool MipsPassConfig::addPreEmitPass() +{ + PM.add(createMipsDelaySlotFillerPass(getMipsTargetMachine())); + return true; +} + +bool MipsPassConfig::addPreRegAlloc() { + // Do not restore $gp if target is Mips64. + // In N32/64, $gp is a callee-saved register. + if (!getMipsSubtarget().hasMips64()) + PM.add(createMipsEmitGPRestorePass(getMipsTargetMachine())); + return true; +} + +bool MipsPassConfig::addPreSched2() { + PM.add(createMipsExpandPseudoPass(getMipsTargetMachine())); + return true; +} + +bool MipsTargetMachine::addCodeEmitter(PassManagerBase &PM, + JITCodeEmitter &JCE) { + // Machine code emitter pass for Mips. + PM.add(createMipsJITCodeEmitterPass(*this, JCE)); + return false; +} diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h new file mode 100644 index 0000000..80c00e8 --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsTargetMachine.h @@ -0,0 +1,124 @@ +//===-- MipsTargetMachine.h - Define TargetMachine for Mips -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file declares the Mips specific subclass of TargetMachine. +// +//===----------------------------------------------------------------------===// + +#ifndef MIPSTARGETMACHINE_H +#define MIPSTARGETMACHINE_H + +#include "MipsFrameLowering.h" +#include "MipsInstrInfo.h" +#include "MipsISelLowering.h" +#include "MipsJITInfo.h" +#include "MipsSelectionDAGInfo.h" +#include "MipsSubtarget.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetFrameLowering.h" + +namespace llvm { + class formatted_raw_ostream; + + class MipsTargetMachine : public LLVMTargetMachine { + MipsSubtarget Subtarget; + const TargetData DataLayout; // Calculates type size & alignment + MipsInstrInfo InstrInfo; + MipsFrameLowering FrameLowering; + MipsTargetLowering TLInfo; + MipsSelectionDAGInfo TSInfo; + MipsJITInfo JITInfo; + + public: + MipsTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL, + bool isLittle); + + virtual const MipsInstrInfo *getInstrInfo() const + { return &InstrInfo; } + virtual const TargetFrameLowering *getFrameLowering() const + { return &FrameLowering; } + virtual const MipsSubtarget *getSubtargetImpl() const + { return &Subtarget; } + virtual const TargetData *getTargetData() const + { return &DataLayout;} + virtual MipsJITInfo *getJITInfo() + { return &JITInfo; } + + + virtual const MipsRegisterInfo *getRegisterInfo() const { + return &InstrInfo.getRegisterInfo(); + } + + virtual const MipsTargetLowering *getTargetLowering() const { + return &TLInfo; + } + + virtual const MipsSelectionDAGInfo* getSelectionDAGInfo() const { + return &TSInfo; + } + + // Pass Pipeline Configuration + virtual TargetPassConfig *createPassConfig(PassManagerBase &PM); + virtual bool addCodeEmitter(PassManagerBase &PM, + JITCodeEmitter &JCE); + + }; + +/// MipsebTargetMachine - Mips32 big endian target machine. +/// +class MipsebTargetMachine : public MipsTargetMachine { + virtual void anchor(); +public: + MipsebTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL); +}; + +/// MipselTargetMachine - Mips32 little endian target machine. +/// +class MipselTargetMachine : public MipsTargetMachine { + virtual void anchor(); +public: + MipselTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL); +}; + +/// Mips64ebTargetMachine - Mips64 big endian target machine. +/// +class Mips64ebTargetMachine : public MipsTargetMachine { + virtual void anchor(); +public: + Mips64ebTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL); +}; + +/// Mips64elTargetMachine - Mips64 little endian target machine. +/// +class Mips64elTargetMachine : public MipsTargetMachine { + virtual void anchor(); +public: + Mips64elTargetMachine(const Target &T, StringRef TT, + StringRef CPU, StringRef FS, + const TargetOptions &Options, + Reloc::Model RM, CodeModel::Model CM, + CodeGenOpt::Level OL); +}; +} // End llvm namespace + +#endif diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp new file mode 100644 index 0000000..04dc60a --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.cpp @@ -0,0 +1,102 @@ +//===-- MipsTargetObjectFile.cpp - Mips Object Files ----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "MipsTargetObjectFile.h" +#include "MipsSubtarget.h" +#include "llvm/DerivedTypes.h" +#include "llvm/GlobalVariable.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetMachine.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/ELF.h" +using namespace llvm; + +static cl::opt<unsigned> +SSThreshold("mips-ssection-threshold", cl::Hidden, + cl::desc("Small data and bss section threshold size (default=8)"), + cl::init(8)); + +void MipsTargetObjectFile::Initialize(MCContext &Ctx, const TargetMachine &TM){ + TargetLoweringObjectFileELF::Initialize(Ctx, TM); + + SmallDataSection = + getContext().getELFSection(".sdata", ELF::SHT_PROGBITS, + ELF::SHF_WRITE |ELF::SHF_ALLOC, + SectionKind::getDataRel()); + + SmallBSSSection = + getContext().getELFSection(".sbss", ELF::SHT_NOBITS, + ELF::SHF_WRITE |ELF::SHF_ALLOC, + SectionKind::getBSS()); + +} + +// A address must be loaded from a small section if its size is less than the +// small section size threshold. Data in this section must be addressed using +// gp_rel operator. +static bool IsInSmallSection(uint64_t Size) { + return Size > 0 && Size <= SSThreshold; +} + +bool MipsTargetObjectFile::IsGlobalInSmallSection(const GlobalValue *GV, + const TargetMachine &TM) const { + if (GV->isDeclaration() || GV->hasAvailableExternallyLinkage()) + return false; + + return IsGlobalInSmallSection(GV, TM, getKindForGlobal(GV, TM)); +} + +/// IsGlobalInSmallSection - Return true if this global address should be +/// placed into small data/bss section. +bool MipsTargetObjectFile:: +IsGlobalInSmallSection(const GlobalValue *GV, const TargetMachine &TM, + SectionKind Kind) const { + + // Only use small section for non linux targets. + const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>(); + if (Subtarget.isLinux()) + return false; + + // Only global variables, not functions. + const GlobalVariable *GVA = dyn_cast<GlobalVariable>(GV); + if (!GVA) + return false; + + // We can only do this for datarel or BSS objects for now. + if (!Kind.isBSS() && !Kind.isDataRel()) + return false; + + // If this is a internal constant string, there is a special + // section for it, but not in small data/bss. + if (Kind.isMergeable1ByteCString()) + return false; + + Type *Ty = GV->getType()->getElementType(); + return IsInSmallSection(TM.getTargetData()->getTypeAllocSize(Ty)); +} + + + +const MCSection *MipsTargetObjectFile:: +SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind, + Mangler *Mang, const TargetMachine &TM) const { + // TODO: Could also support "weak" symbols as well with ".gnu.linkonce.s.*" + // sections? + + // Handle Small Section classification here. + if (Kind.isBSS() && IsGlobalInSmallSection(GV, TM, Kind)) + return SmallBSSSection; + if (Kind.isDataNoRel() && IsGlobalInSmallSection(GV, TM, Kind)) + return SmallDataSection; + + // Otherwise, we work the same as ELF. + return TargetLoweringObjectFileELF::SelectSectionForGlobal(GV, Kind, Mang,TM); +} diff --git a/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h new file mode 100644 index 0000000..c394a9d --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/MipsTargetObjectFile.h @@ -0,0 +1,41 @@ +//===-- llvm/Target/MipsTargetObjectFile.h - Mips Object Info ---*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_TARGET_MIPS_TARGETOBJECTFILE_H +#define LLVM_TARGET_MIPS_TARGETOBJECTFILE_H + +#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" + +namespace llvm { + + class MipsTargetObjectFile : public TargetLoweringObjectFileELF { + const MCSection *SmallDataSection; + const MCSection *SmallBSSSection; + public: + + void Initialize(MCContext &Ctx, const TargetMachine &TM); + + + /// IsGlobalInSmallSection - Return true if this global address should be + /// placed into small data/bss section. + bool IsGlobalInSmallSection(const GlobalValue *GV, + const TargetMachine &TM, SectionKind Kind)const; + bool IsGlobalInSmallSection(const GlobalValue *GV, + const TargetMachine &TM) const; + + const MCSection *SelectSectionForGlobal(const GlobalValue *GV, + SectionKind Kind, + Mangler *Mang, + const TargetMachine &TM) const; + + // TODO: Classify globals as mips wishes. + }; +} // end namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/Mips/TargetInfo/MipsTargetInfo.cpp b/contrib/llvm/lib/Target/Mips/TargetInfo/MipsTargetInfo.cpp new file mode 100644 index 0000000..243632b --- /dev/null +++ b/contrib/llvm/lib/Target/Mips/TargetInfo/MipsTargetInfo.cpp @@ -0,0 +1,31 @@ +//===-- MipsTargetInfo.cpp - Mips Target Implementation -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +#include "Mips.h" +#include "llvm/Module.h" +#include "llvm/Support/TargetRegistry.h" +using namespace llvm; + +Target llvm::TheMipsTarget, llvm::TheMipselTarget; +Target llvm::TheMips64Target, llvm::TheMips64elTarget; + +extern "C" void LLVMInitializeMipsTargetInfo() { + RegisterTarget<Triple::mips, + /*HasJIT=*/true> X(TheMipsTarget, "mips", "Mips"); + + RegisterTarget<Triple::mipsel, + /*HasJIT=*/true> Y(TheMipselTarget, "mipsel", "Mipsel"); + + RegisterTarget<Triple::mips64, + /*HasJIT=*/false> A(TheMips64Target, "mips64", "Mips64 [experimental]"); + + RegisterTarget<Triple::mips64el, + /*HasJIT=*/false> B(TheMips64elTarget, + "mips64el", "Mips64el [experimental]"); +} |