summaryrefslogtreecommitdiffstats
path: root/lib/Target/X86
diff options
context:
space:
mode:
authorrdivacky <rdivacky@FreeBSD.org>2010-03-16 16:51:38 +0000
committerrdivacky <rdivacky@FreeBSD.org>2010-03-16 16:51:38 +0000
commit0f448b841684305c051796982f300c9bff959307 (patch)
tree458dd25677a43aef6390ecadb4423817f00e08b0 /lib/Target/X86
parent9e2446b38c94db61b2416c28fee415c03663c11c (diff)
downloadFreeBSD-src-0f448b841684305c051796982f300c9bff959307.zip
FreeBSD-src-0f448b841684305c051796982f300c9bff959307.tar.gz
Update LLVM to r98631.
Diffstat (limited to 'lib/Target/X86')
-rw-r--r--lib/Target/X86/AsmParser/Makefile2
-rw-r--r--lib/Target/X86/AsmParser/X86AsmParser.cpp9
-rw-r--r--lib/Target/X86/AsmPrinter/Makefile2
-rw-r--r--lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp97
-rw-r--r--lib/Target/X86/AsmPrinter/X86AsmPrinter.h9
-rw-r--r--lib/Target/X86/AsmPrinter/X86MCInstLower.cpp121
-rw-r--r--lib/Target/X86/CMakeLists.txt1
-rw-r--r--lib/Target/X86/Disassembler/Makefile2
-rw-r--r--lib/Target/X86/TargetInfo/Makefile2
-rw-r--r--lib/Target/X86/X86.h5
-rw-r--r--lib/Target/X86/X86.td3
-rw-r--r--lib/Target/X86/X86AsmBackend.cpp56
-rw-r--r--lib/Target/X86/X86COFFMachineModuleInfo.cpp73
-rw-r--r--lib/Target/X86/X86COFFMachineModuleInfo.h15
-rw-r--r--lib/Target/X86/X86CallingConv.td22
-rw-r--r--lib/Target/X86/X86CodeEmitter.cpp10
-rw-r--r--lib/Target/X86/X86FastISel.cpp30
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp37
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp222
-rw-r--r--lib/Target/X86/X86ISelLowering.h9
-rw-r--r--lib/Target/X86/X86Instr64bit.td114
-rw-r--r--lib/Target/X86/X86InstrFPStack.td15
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp17
-rw-r--r--lib/Target/X86/X86InstrInfo.td144
-rw-r--r--lib/Target/X86/X86InstrMMX.td4
-rw-r--r--lib/Target/X86/X86InstrSSE.td4
-rw-r--r--lib/Target/X86/X86JITInfo.cpp7
-rw-r--r--lib/Target/X86/X86MCAsmInfo.cpp7
-rw-r--r--lib/Target/X86/X86MCTargetExpr.cpp48
-rw-r--r--lib/Target/X86/X86MCTargetExpr.h49
-rw-r--r--lib/Target/X86/X86RegisterInfo.cpp80
-rw-r--r--lib/Target/X86/X86RegisterInfo.h2
-rw-r--r--lib/Target/X86/X86RegisterInfo.td7
-rw-r--r--lib/Target/X86/X86Subtarget.cpp7
-rw-r--r--lib/Target/X86/X86Subtarget.h1
-rw-r--r--lib/Target/X86/X86TargetObjectFile.cpp21
-rw-r--r--lib/Target/X86/X86TargetObjectFile.h10
37 files changed, 731 insertions, 533 deletions
diff --git a/lib/Target/X86/AsmParser/Makefile b/lib/Target/X86/AsmParser/Makefile
index 25fb0a2..fb97607 100644
--- a/lib/Target/X86/AsmParser/Makefile
+++ b/lib/Target/X86/AsmParser/Makefile
@@ -10,6 +10,6 @@ LEVEL = ../../../..
LIBRARYNAME = LLVMX86AsmParser
# Hack: we need to include 'main' x86 target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp
index 84d7bb7..dde86fb 100644
--- a/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -542,6 +542,15 @@ ParseInstruction(const StringRef &Name, SMLoc NameLoc,
}
}
+ // FIXME: Hack to handle recognizing s{hr,ar,hl}? $1.
+ if ((Name.startswith("shr") || Name.startswith("sar") ||
+ Name.startswith("shl")) &&
+ Operands.size() == 3 &&
+ static_cast<X86Operand*>(Operands[1])->isImm() &&
+ isa<MCConstantExpr>(static_cast<X86Operand*>(Operands[1])->getImm()) &&
+ cast<MCConstantExpr>(static_cast<X86Operand*>(Operands[1])->getImm())->getValue() == 1)
+ Operands.erase(Operands.begin() + 1);
+
return false;
}
diff --git a/lib/Target/X86/AsmPrinter/Makefile b/lib/Target/X86/AsmPrinter/Makefile
index 2368761..c82aa33 100644
--- a/lib/Target/X86/AsmPrinter/Makefile
+++ b/lib/Target/X86/AsmPrinter/Makefile
@@ -10,6 +10,6 @@ LEVEL = ../../../..
LIBRARYNAME = LLVMX86AsmPrinter
# Hack: we need to include 'main' x86 target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp b/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
index caf84b6..c3dcf8e 100644
--- a/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
+++ b/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp
@@ -53,30 +53,6 @@ void X86AsmPrinter::PrintPICBaseSymbol() const {
OutContext);
}
-MCSymbol *X86AsmPrinter::GetGlobalValueSymbol(const GlobalValue *GV) const {
- SmallString<60> NameStr;
- Mang->getNameWithPrefix(NameStr, GV, false);
- MCSymbol *Symb;
- if (GV->hasPrivateLinkage())
- Symb = OutContext.GetOrCreateTemporarySymbol(NameStr.str());
- else
- Symb = OutContext.GetOrCreateSymbol(NameStr.str());
-
- if (Subtarget->isTargetCygMing()) {
- X86COFFMachineModuleInfo &COFFMMI =
- MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
- COFFMMI.DecorateCygMingName(Symb, OutContext, GV, *TM.getTargetData());
-
- // Save function name for later type emission.
- if (const Function *F = dyn_cast<Function>(GV))
- if (F->isDeclaration())
- COFFMMI.addExternalFunction(Symb->getName());
-
- }
-
- return Symb;
-}
-
/// runOnMachineFunction - Emit the function body.
///
bool X86AsmPrinter::runOnMachineFunction(MachineFunction &MF) {
@@ -124,7 +100,7 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO) {
MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
GVSym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
else
- GVSym = GetGlobalValueSymbol(GV);
+ GVSym = Mang->getSymbol(GV);
// Handle dllimport linkage.
if (MO.getTargetFlags() == X86II::MO_DLLIMPORT)
@@ -133,24 +109,25 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO) {
if (MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY_PIC_BASE) {
MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
-
- MCSymbol *&StubSym =
+ MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getGVStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
-
+ if (StubSym.getPointer() == 0)
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
} else if (MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE){
MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$non_lazy_ptr");
- MCSymbol *&StubSym =
+ MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getHiddenGVStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
+ if (StubSym.getPointer() == 0)
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
} else if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB) {
MCSymbol *Sym = GetSymbolWithGlobalValueBase(GV, "$stub");
- MCSymbol *&StubSym =
+ MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym == 0)
- StubSym = GetGlobalValueSymbol(GV);
+ if (StubSym.getPointer() == 0)
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(Mang->getSymbol(GV), !GV->hasInternalLinkage());
}
// If the name begins with a dollar-sign, enclose it in parens. We do this
@@ -170,13 +147,15 @@ void X86AsmPrinter::printSymbolOperand(const MachineOperand &MO) {
TempNameStr += StringRef("$stub");
MCSymbol *Sym = GetExternalSymbolSymbol(TempNameStr.str());
- MCSymbol *&StubSym =
+ MachineModuleInfoImpl::StubValueTy &StubSym =
MMI->getObjFileInfo<MachineModuleInfoMachO>().getFnStubEntry(Sym);
- if (StubSym == 0) {
+ if (StubSym.getPointer() == 0) {
TempNameStr.erase(TempNameStr.end()-5, TempNameStr.end());
- StubSym = OutContext.GetOrCreateSymbol(TempNameStr.str());
+ StubSym = MachineModuleInfoImpl::
+ StubValueTy(OutContext.GetOrCreateSymbol(TempNameStr.str()),
+ true);
}
- SymToPrint = StubSym;
+ SymToPrint = StubSym.getPointer();
} else {
SymToPrint = GetExternalSymbolSymbol(MO.getSymbolName());
}
@@ -235,7 +214,7 @@ void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo) {
O << MO.getImm();
return;
case MachineOperand::MO_MachineBasicBlock:
- O << *MO.getMBB()->getSymbol(OutContext);
+ O << *MO.getMBB()->getSymbol();
return;
case MachineOperand::MO_GlobalAddress:
case MachineOperand::MO_ExternalSymbol:
@@ -480,6 +459,11 @@ bool X86AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
return false;
}
+void X86AsmPrinter::EmitStartOfAsmFile(Module &M) {
+ if (Subtarget->isTargetDarwin())
+ OutStreamer.SwitchSection(getObjFileLowering().getTextSection());
+}
+
void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
if (Subtarget->isTargetDarwin()) {
@@ -507,7 +491,8 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
// L_foo$stub:
OutStreamer.EmitLabel(Stubs[i].first);
// .indirect_symbol _foo
- OutStreamer.EmitSymbolAttribute(Stubs[i].second, MCSA_IndirectSymbol);
+ OutStreamer.EmitSymbolAttribute(Stubs[i].second.getPointer(),
+ MCSA_IndirectSymbol);
// hlt; hlt; hlt; hlt; hlt hlt = 0xf4 = -12.
const char HltInsts[] = { -12, -12, -12, -12, -12 };
OutStreamer.EmitBytes(StringRef(HltInsts, 5), 0/*addrspace*/);
@@ -530,9 +515,18 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
// L_foo$non_lazy_ptr:
OutStreamer.EmitLabel(Stubs[i].first);
// .indirect_symbol _foo
- OutStreamer.EmitSymbolAttribute(Stubs[i].second, MCSA_IndirectSymbol);
+ MachineModuleInfoImpl::StubValueTy &MCSym = Stubs[i].second;
+ OutStreamer.EmitSymbolAttribute(MCSym.getPointer(),
+ MCSA_IndirectSymbol);
// .long 0
- OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
+ if (MCSym.getInt())
+ // External to current translation unit.
+ OutStreamer.EmitIntValue(0, 4/*size*/, 0/*addrspace*/);
+ else
+ // Internal to current translation unit.
+ OutStreamer.EmitValue(MCSymbolRefExpr::Create(MCSym.getPointer(),
+ OutContext),
+ 4/*size*/, 0/*addrspace*/);
}
Stubs.clear();
OutStreamer.AddBlankLine();
@@ -547,8 +541,9 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
// L_foo$non_lazy_ptr:
OutStreamer.EmitLabel(Stubs[i].first);
// .long _foo
- OutStreamer.EmitValue(MCSymbolRefExpr::Create(Stubs[i].second,
- OutContext),
+ OutStreamer.EmitValue(MCSymbolRefExpr::
+ Create(Stubs[i].second.getPointer(),
+ OutContext),
4/*size*/, 0/*addrspace*/);
}
Stubs.clear();
@@ -584,15 +579,13 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
static_cast<TargetLoweringObjectFileCOFF&>(getObjFileLowering());
for (Module::const_iterator I = M.begin(), E = M.end(); I != E; ++I)
- if (I->hasDLLExportLinkage()) {
- MCSymbol *Sym = GetGlobalValueSymbol(I);
- DLLExportedFns.push_back(Sym);
- }
+ if (I->hasDLLExportLinkage())
+ DLLExportedFns.push_back(Mang->getSymbol(I));
for (Module::const_global_iterator I = M.global_begin(),
E = M.global_end(); I != E; ++I)
if (I->hasDLLExportLinkage())
- DLLExportedGlobals.push_back(GetGlobalValueSymbol(I));
+ DLLExportedGlobals.push_back(Mang->getSymbol(I));
// Output linker support code for dllexported globals on windows.
if (!DLLExportedGlobals.empty() || !DLLExportedFns.empty()) {
@@ -624,7 +617,7 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
O << *Stubs[i].first << ":\n"
<< (TD->getPointerSize() == 8 ?
MAI->getData64bitsDirective() : MAI->getData32bitsDirective())
- << *Stubs[i].second << '\n';
+ << *Stubs[i].second.getPointer() << '\n';
Stubs.clear();
}
diff --git a/lib/Target/X86/AsmPrinter/X86AsmPrinter.h b/lib/Target/X86/AsmPrinter/X86AsmPrinter.h
index 039214a..28c25f9 100644
--- a/lib/Target/X86/AsmPrinter/X86AsmPrinter.h
+++ b/lib/Target/X86/AsmPrinter/X86AsmPrinter.h
@@ -36,9 +36,8 @@ class VISIBILITY_HIDDEN X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget;
public:
explicit X86AsmPrinter(formatted_raw_ostream &O, TargetMachine &TM,
- MCContext &Ctx, MCStreamer &Streamer,
- const MCAsmInfo *T)
- : AsmPrinter(O, TM, Ctx, Streamer, T) {
+ MCStreamer &Streamer)
+ : AsmPrinter(O, TM, Streamer) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
}
@@ -55,13 +54,13 @@ class VISIBILITY_HIDDEN X86AsmPrinter : public AsmPrinter {
AsmPrinter::getAnalysisUsage(AU);
}
-
+ virtual void EmitStartOfAsmFile(Module &M);
+
virtual void EmitEndOfAsmFile(Module &M);
virtual void EmitInstruction(const MachineInstr *MI);
void printSymbolOperand(const MachineOperand &MO);
- virtual MCSymbol *GetGlobalValueSymbol(const GlobalValue *GV) const;
// These methods are used by the tablegen'erated instruction printer.
void printOperand(const MachineInstr *MI, unsigned OpNo,
diff --git a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp b/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
index b8a6eeb..cbfc57a 100644
--- a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
+++ b/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp
@@ -16,13 +16,13 @@
#include "X86AsmPrinter.h"
#include "X86COFFMachineModuleInfo.h"
#include "X86MCAsmInfo.h"
-#include "X86MCTargetExpr.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/ADT/SmallString.h"
@@ -54,7 +54,12 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
SmallString<128> Name;
- if (MO.isGlobal()) {
+ if (!MO.isGlobal()) {
+ assert(MO.isSymbol());
+ Name += AsmPrinter.MAI->getGlobalPrefix();
+ Name += MO.getSymbolName();
+ } else {
+ const GlobalValue *GV = MO.getGlobal();
bool isImplicitlyPrivate = false;
if (MO.getTargetFlags() == X86II::MO_DARWIN_STUB ||
MO.getTargetFlags() == X86II::MO_DARWIN_NONLAZY ||
@@ -62,18 +67,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
MO.getTargetFlags() == X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE)
isImplicitlyPrivate = true;
- const GlobalValue *GV = MO.getGlobal();
Mang->getNameWithPrefix(Name, GV, isImplicitlyPrivate);
-
- if (getSubtarget().isTargetCygMing()) {
- X86COFFMachineModuleInfo &COFFMMI =
- AsmPrinter.MMI->getObjFileInfo<X86COFFMachineModuleInfo>();
- COFFMMI.DecorateCygMingName(Name, GV, *AsmPrinter.TM.getTargetData());
- }
- } else {
- assert(MO.isSymbol());
- Name += AsmPrinter.MAI->getGlobalPrefix();
- Name += MO.getSymbolName();
}
// If the target flags on the operand changes the name of the symbol, do that
@@ -91,35 +85,49 @@ GetSymbolFromOperand(const MachineOperand &MO) const {
Name += "$non_lazy_ptr";
MCSymbol *Sym = Ctx.GetOrCreateTemporarySymbol(Name.str());
- MCSymbol *&StubSym = getMachOMMI().getGVStubEntry(Sym);
- if (StubSym == 0) {
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ getMachOMMI().getGVStubEntry(Sym);
+ if (StubSym.getPointer() == 0) {
assert(MO.isGlobal() && "Extern symbol not handled yet");
- StubSym = AsmPrinter.GetGlobalValueSymbol(MO.getGlobal());
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(AsmPrinter.Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
}
return Sym;
}
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE: {
Name += "$non_lazy_ptr";
MCSymbol *Sym = Ctx.GetOrCreateTemporarySymbol(Name.str());
- MCSymbol *&StubSym = getMachOMMI().getHiddenGVStubEntry(Sym);
- if (StubSym == 0) {
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ getMachOMMI().getHiddenGVStubEntry(Sym);
+ if (StubSym.getPointer() == 0) {
assert(MO.isGlobal() && "Extern symbol not handled yet");
- StubSym = AsmPrinter.GetGlobalValueSymbol(MO.getGlobal());
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(AsmPrinter.Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
}
return Sym;
}
case X86II::MO_DARWIN_STUB: {
Name += "$stub";
MCSymbol *Sym = Ctx.GetOrCreateTemporarySymbol(Name.str());
- MCSymbol *&StubSym = getMachOMMI().getFnStubEntry(Sym);
- if (StubSym)
+ MachineModuleInfoImpl::StubValueTy &StubSym =
+ getMachOMMI().getFnStubEntry(Sym);
+ if (StubSym.getPointer())
return Sym;
if (MO.isGlobal()) {
- StubSym = AsmPrinter.GetGlobalValueSymbol(MO.getGlobal());
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(AsmPrinter.Mang->getSymbol(MO.getGlobal()),
+ !MO.getGlobal()->hasInternalLinkage());
} else {
Name.erase(Name.end()-5, Name.end());
- StubSym = Ctx.GetOrCreateTemporarySymbol(Name.str());
+ StubSym =
+ MachineModuleInfoImpl::
+ StubValueTy(Ctx.GetOrCreateTemporarySymbol(Name.str()), false);
}
return Sym;
}
@@ -133,7 +141,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
// FIXME: We would like an efficient form for this, so we don't have to do a
// lot of extra uniquing.
const MCExpr *Expr = 0;
- X86MCTargetExpr::VariantKind RefKind = X86MCTargetExpr::Invalid;
+ MCSymbolRefExpr::VariantKind RefKind = MCSymbolRefExpr::VK_None;
switch (MO.getTargetFlags()) {
default: llvm_unreachable("Unknown target flag on GV operand");
@@ -144,15 +152,15 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
case X86II::MO_DARWIN_STUB:
break;
- case X86II::MO_TLSGD: RefKind = X86MCTargetExpr::TLSGD; break;
- case X86II::MO_GOTTPOFF: RefKind = X86MCTargetExpr::GOTTPOFF; break;
- case X86II::MO_INDNTPOFF: RefKind = X86MCTargetExpr::INDNTPOFF; break;
- case X86II::MO_TPOFF: RefKind = X86MCTargetExpr::TPOFF; break;
- case X86II::MO_NTPOFF: RefKind = X86MCTargetExpr::NTPOFF; break;
- case X86II::MO_GOTPCREL: RefKind = X86MCTargetExpr::GOTPCREL; break;
- case X86II::MO_GOT: RefKind = X86MCTargetExpr::GOT; break;
- case X86II::MO_GOTOFF: RefKind = X86MCTargetExpr::GOTOFF; break;
- case X86II::MO_PLT: RefKind = X86MCTargetExpr::PLT; break;
+ case X86II::MO_TLSGD: RefKind = MCSymbolRefExpr::VK_TLSGD; break;
+ case X86II::MO_GOTTPOFF: RefKind = MCSymbolRefExpr::VK_GOTTPOFF; break;
+ case X86II::MO_INDNTPOFF: RefKind = MCSymbolRefExpr::VK_INDNTPOFF; break;
+ case X86II::MO_TPOFF: RefKind = MCSymbolRefExpr::VK_TPOFF; break;
+ case X86II::MO_NTPOFF: RefKind = MCSymbolRefExpr::VK_NTPOFF; break;
+ case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break;
+ case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break;
+ case X86II::MO_GOTOFF: RefKind = MCSymbolRefExpr::VK_GOTOFF; break;
+ case X86II::MO_PLT: RefKind = MCSymbolRefExpr::VK_PLT; break;
case X86II::MO_PIC_BASE_OFFSET:
case X86II::MO_DARWIN_NONLAZY_PIC_BASE:
case X86II::MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE:
@@ -164,12 +172,8 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO,
break;
}
- if (Expr == 0) {
- if (RefKind == X86MCTargetExpr::Invalid)
- Expr = MCSymbolRefExpr::Create(Sym, Ctx);
- else
- Expr = X86MCTargetExpr::Create(Sym, RefKind, Ctx);
- }
+ if (Expr == 0)
+ Expr = MCSymbolRefExpr::Create(Sym, RefKind, Ctx);
if (!MO.isJTI() && MO.getOffset())
Expr = MCBinaryExpr::CreateAdd(Expr,
@@ -233,7 +237,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
case MachineOperand::MO_MachineBasicBlock:
MCOp = MCOperand::CreateExpr(MCSymbolRefExpr::Create(
- MO.getMBB()->getSymbol(Ctx), Ctx));
+ MO.getMBB()->getSymbol(), Ctx));
break;
case MachineOperand::MO_GlobalAddress:
MCOp = LowerSymbolOperand(MO, GetSymbolFromOperand(MO));
@@ -294,6 +298,29 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV64r0 -> MOV32r0
LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr
break;
+
+
+ // The assembler backend wants to see branches in their small form and relax
+ // them to their large form. The JIT can only handle the large form because
+ // it does not do relaxation. For now, translate the large form to the
+ // small one here.
+ case X86::JMP_4: OutMI.setOpcode(X86::JMP_1); break;
+ case X86::JO_4: OutMI.setOpcode(X86::JO_1); break;
+ case X86::JNO_4: OutMI.setOpcode(X86::JNO_1); break;
+ case X86::JB_4: OutMI.setOpcode(X86::JB_1); break;
+ case X86::JAE_4: OutMI.setOpcode(X86::JAE_1); break;
+ case X86::JE_4: OutMI.setOpcode(X86::JE_1); break;
+ case X86::JNE_4: OutMI.setOpcode(X86::JNE_1); break;
+ case X86::JBE_4: OutMI.setOpcode(X86::JBE_1); break;
+ case X86::JA_4: OutMI.setOpcode(X86::JA_1); break;
+ case X86::JS_4: OutMI.setOpcode(X86::JS_1); break;
+ case X86::JNS_4: OutMI.setOpcode(X86::JNS_1); break;
+ case X86::JP_4: OutMI.setOpcode(X86::JP_1); break;
+ case X86::JNP_4: OutMI.setOpcode(X86::JNP_1); break;
+ case X86::JL_4: OutMI.setOpcode(X86::JL_1); break;
+ case X86::JGE_4: OutMI.setOpcode(X86::JGE_1); break;
+ case X86::JLE_4: OutMI.setOpcode(X86::JLE_1); break;
+ case X86::JG_4: OutMI.setOpcode(X86::JG_1); break;
}
}
@@ -344,6 +371,13 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
} else
printOperand(MI, 0);
} else {
+ if (MI->getOperand(0).getType()==MachineOperand::MO_Register &&
+ MI->getOperand(0).getReg()==0) {
+ // Suppress offset in this case, it is not meaningful.
+ O << "undef";
+ OutStreamer.AddBlankLine();
+ return;
+ }
// Frame address. Currently handles register +- offset only.
assert(MI->getOperand(0).getType()==MachineOperand::MO_Register);
assert(MI->getOperand(3).getType()==MachineOperand::MO_Immediate);
@@ -392,11 +426,8 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
// For this, we want to print something like:
// MYGLOBAL + (. - PICBASE)
// However, we can't generate a ".", so just emit a new label here and refer
- // to it. We know that this operand flag occurs at most once per function.
- const char *Prefix = MAI->getPrivateGlobalPrefix();
- MCSymbol *DotSym = OutContext.GetOrCreateTemporarySymbol(Twine(Prefix)+
- "picbaseref" +
- Twine(getFunctionNumber()));
+ // to it.
+ MCSymbol *DotSym = OutContext.GetOrCreateTemporarySymbol();
OutStreamer.EmitLabel(DotSym);
// Now that we have emitted the label, lower the complex operand expression.
diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt
index eed3b45..4d3dedf 100644
--- a/lib/Target/X86/CMakeLists.txt
+++ b/lib/Target/X86/CMakeLists.txt
@@ -27,7 +27,6 @@ set(sources
X86JITInfo.cpp
X86MCAsmInfo.cpp
X86MCCodeEmitter.cpp
- X86MCTargetExpr.cpp
X86RegisterInfo.cpp
X86Subtarget.cpp
X86TargetMachine.cpp
diff --git a/lib/Target/X86/Disassembler/Makefile b/lib/Target/X86/Disassembler/Makefile
index b289647..8669fd8 100644
--- a/lib/Target/X86/Disassembler/Makefile
+++ b/lib/Target/X86/Disassembler/Makefile
@@ -11,6 +11,6 @@ LEVEL = ../../../..
LIBRARYNAME = LLVMX86Disassembler
# Hack: we need to include 'main' x86 target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/TargetInfo/Makefile b/lib/Target/X86/TargetInfo/Makefile
index 9858e6a..ee91982 100644
--- a/lib/Target/X86/TargetInfo/Makefile
+++ b/lib/Target/X86/TargetInfo/Makefile
@@ -11,6 +11,6 @@ LEVEL = ../../../..
LIBRARYNAME = LLVMX86Info
# Hack: we need to include 'main' target directory to grab private headers
-CPPFLAGS = -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
+CPP.Flags += -I$(PROJ_OBJ_DIR)/.. -I$(PROJ_SRC_DIR)/..
include $(LEVEL)/Makefile.common
diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h
index ba0ee6c..c753cf2 100644
--- a/lib/Target/X86/X86.h
+++ b/lib/Target/X86/X86.h
@@ -21,7 +21,6 @@ namespace llvm {
class FunctionPass;
class JITCodeEmitter;
-class MCAssembler;
class MCCodeEmitter;
class MCContext;
class MachineCodeEmitter;
@@ -57,8 +56,8 @@ MCCodeEmitter *createX86_32MCCodeEmitter(const Target &, TargetMachine &TM,
MCCodeEmitter *createX86_64MCCodeEmitter(const Target &, TargetMachine &TM,
MCContext &Ctx);
-TargetAsmBackend *createX86_32AsmBackend(const Target &, MCAssembler &);
-TargetAsmBackend *createX86_64AsmBackend(const Target &, MCAssembler &);
+TargetAsmBackend *createX86_32AsmBackend(const Target &, const std::string &);
+TargetAsmBackend *createX86_64AsmBackend(const Target &, const std::string &);
/// createX86EmitCodeToMemory - Returns a pass that converts a register
/// allocated function into raw machine code in a dynamically
diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td
index 7919559..6a4bdb5 100644
--- a/lib/Target/X86/X86.td
+++ b/lib/Target/X86/X86.td
@@ -55,7 +55,8 @@ def Feature3DNowA : SubtargetFeature<"3dnowa", "X863DNowLevel", "ThreeDNowA",
// feature, because SSE2 can be disabled (e.g. for compiling OS kernels)
// without disabling 64-bit mode.
def Feature64Bit : SubtargetFeature<"64bit", "HasX86_64", "true",
- "Support 64-bit instructions">;
+ "Support 64-bit instructions",
+ [FeatureCMOV]>;
def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
"Bit testing of memory is slow">;
def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true",
diff --git a/lib/Target/X86/X86AsmBackend.cpp b/lib/Target/X86/X86AsmBackend.cpp
index e6654ef..a44afc6 100644
--- a/lib/Target/X86/X86AsmBackend.cpp
+++ b/lib/Target/X86/X86AsmBackend.cpp
@@ -9,6 +9,7 @@
#include "llvm/Target/TargetAsmBackend.h"
#include "X86.h"
+#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/TargetRegistry.h"
#include "llvm/Target/TargetAsmBackend.h"
using namespace llvm;
@@ -17,18 +18,63 @@ namespace {
class X86AsmBackend : public TargetAsmBackend {
public:
- X86AsmBackend(const Target &T, MCAssembler &A)
+ X86AsmBackend(const Target &T)
: TargetAsmBackend(T) {}
};
+class DarwinX86AsmBackend : public X86AsmBackend {
+public:
+ DarwinX86AsmBackend(const Target &T)
+ : X86AsmBackend(T) {}
+
+ virtual bool hasAbsolutizedSet() const { return true; }
+
+ virtual bool hasScatteredSymbols() const { return true; }
+};
+
+class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
+public:
+ DarwinX86_32AsmBackend(const Target &T)
+ : DarwinX86AsmBackend(T) {}
+};
+
+class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
+public:
+ DarwinX86_64AsmBackend(const Target &T)
+ : DarwinX86AsmBackend(T) {}
+
+ virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
+ // Temporary labels in the string literals sections require symbols. The
+ // issue is that the x86_64 relocation format does not allow symbol +
+ // offset, and so the linker does not have enough information to resolve the
+ // access to the appropriate atom unless an external relocation is used. For
+ // non-cstring sections, we expect the compiler to use a non-temporary label
+ // for anything that could have an addend pointing outside the symbol.
+ //
+ // See <rdar://problem/4765733>.
+ const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
+ return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
+ }
+};
+
}
TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
- MCAssembler &A) {
- return new X86AsmBackend(T, A);
+ const std::string &TT) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return new DarwinX86_32AsmBackend(T);
+ default:
+ return new X86AsmBackend(T);
+ }
}
TargetAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
- MCAssembler &A) {
- return new X86AsmBackend(T, A);
+ const std::string &TT) {
+ switch (Triple(TT).getOS()) {
+ case Triple::Darwin:
+ return new DarwinX86_64AsmBackend(T);
+ default:
+ return new X86AsmBackend(T);
+ }
}
diff --git a/lib/Target/X86/X86COFFMachineModuleInfo.cpp b/lib/Target/X86/X86COFFMachineModuleInfo.cpp
index ab67acb..4326814 100644
--- a/lib/Target/X86/X86COFFMachineModuleInfo.cpp
+++ b/lib/Target/X86/X86COFFMachineModuleInfo.cpp
@@ -12,80 +12,9 @@
//===----------------------------------------------------------------------===//
#include "X86COFFMachineModuleInfo.h"
-#include "X86MachineFunctionInfo.h"
-#include "llvm/DerivedTypes.h"
-#include "llvm/Function.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Target/TargetData.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/Support/raw_ostream.h"
using namespace llvm;
-X86COFFMachineModuleInfo::X86COFFMachineModuleInfo(const MachineModuleInfo &) {
-}
-X86COFFMachineModuleInfo::~X86COFFMachineModuleInfo() {
-}
-
-void X86COFFMachineModuleInfo::addExternalFunction(const StringRef& Name) {
- CygMingStubs.insert(Name);
-}
-
-/// DecorateCygMingName - Apply various name decorations if the function uses
-/// stdcall or fastcall calling convention.
-void X86COFFMachineModuleInfo::DecorateCygMingName(SmallVectorImpl<char> &Name,
- const GlobalValue *GV,
- const TargetData &TD) {
- const Function *F = dyn_cast<Function>(GV);
- if (!F) return;
-
- // We don't want to decorate non-stdcall or non-fastcall functions right now
- CallingConv::ID CC = F->getCallingConv();
- if (CC != CallingConv::X86_StdCall && CC != CallingConv::X86_FastCall)
- return;
-
- unsigned ArgWords = 0;
- DenseMap<const Function*, unsigned>::const_iterator item = FnArgWords.find(F);
- if (item == FnArgWords.end()) {
- // Calculate arguments sizes
- for (Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
- AI != AE; ++AI) {
- const Type* Ty = AI->getType();
- // 'Dereference' type in case of byval parameter attribute
- if (AI->hasByValAttr())
- Ty = cast<PointerType>(Ty)->getElementType();
-
- // Size should be aligned to DWORD boundary
- ArgWords += ((TD.getTypeAllocSize(Ty) + 3)/4)*4;
- }
-
- FnArgWords[F] = ArgWords;
- } else
- ArgWords = item->second;
-
- const FunctionType *FT = F->getFunctionType();
- // "Pure" variadic functions do not receive @0 suffix.
- if (!FT->isVarArg() || FT->getNumParams() == 0 ||
- (FT->getNumParams() == 1 && F->hasStructRetAttr()))
- raw_svector_ostream(Name) << '@' << ArgWords;
-
- if (CC == CallingConv::X86_FastCall) {
- if (Name[0] == '_')
- Name[0] = '@';
- else
- Name.insert(Name.begin(), '@');
- }
+X86COFFMachineModuleInfo::~X86COFFMachineModuleInfo() {
}
-/// DecorateCygMingName - Query FunctionInfoMap and use this information for
-/// various name decorations for Cygwin and MingW.
-void X86COFFMachineModuleInfo::DecorateCygMingName(MCSymbol *&Name,
- MCContext &Ctx,
- const GlobalValue *GV,
- const TargetData &TD) {
- SmallString<128> NameStr(Name->getName().begin(), Name->getName().end());
- DecorateCygMingName(NameStr, GV, TD);
-
- Name = Ctx.GetOrCreateSymbol(NameStr.str());
-}
diff --git a/lib/Target/X86/X86COFFMachineModuleInfo.h b/lib/Target/X86/X86COFFMachineModuleInfo.h
index 9de3dcd..eece462 100644
--- a/lib/Target/X86/X86COFFMachineModuleInfo.h
+++ b/lib/Target/X86/X86COFFMachineModuleInfo.h
@@ -26,17 +26,14 @@ namespace llvm {
/// for X86 COFF targets.
class X86COFFMachineModuleInfo : public MachineModuleInfoImpl {
StringSet<> CygMingStubs;
- DenseMap<const Function*, unsigned> FnArgWords;
public:
- X86COFFMachineModuleInfo(const MachineModuleInfo &);
- ~X86COFFMachineModuleInfo();
+ X86COFFMachineModuleInfo(const MachineModuleInfo &) {}
+ virtual ~X86COFFMachineModuleInfo();
- void DecorateCygMingName(MCSymbol* &Name, MCContext &Ctx,
- const GlobalValue *GV, const TargetData &TD);
- void DecorateCygMingName(SmallVectorImpl<char> &Name, const GlobalValue *GV,
- const TargetData &TD);
-
- void addExternalFunction(const StringRef& Name);
+ void addExternalFunction(StringRef Name) {
+ CygMingStubs.insert(Name);
+ }
+
typedef StringSet<>::const_iterator stub_iterator;
stub_iterator stub_begin() const { return CygMingStubs.begin(); }
stub_iterator stub_end() const { return CygMingStubs.end(); }
diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td
index 12d3d04..fd15efd 100644
--- a/lib/Target/X86/X86CallingConv.td
+++ b/lib/Target/X86/X86CallingConv.td
@@ -221,6 +221,20 @@ def CC_X86_Win64_C : CallingConv<[
CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>>
]>;
+def CC_X86_64_GHC : CallingConv<[
+ // Promote i8/i16/i32 arguments to i64.
+ CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
+
+ // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
+ CCIfType<[i64],
+ CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
+
+ // Pass in STG registers: F1, F2, F3, F4, D1, D2
+ CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
+ CCIfSubtarget<"hasSSE1()",
+ CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
+]>;
+
//===----------------------------------------------------------------------===//
// X86 C Calling Convention
//===----------------------------------------------------------------------===//
@@ -320,3 +334,11 @@ def CC_X86_32_FastCC : CallingConv<[
// Otherwise, same as everything else.
CCDelegateTo<CC_X86_32_Common>
]>;
+
+def CC_X86_32_GHC : CallingConv<[
+ // Promote i8/i16 arguments to i32.
+ CCIfType<[i8, i16], CCPromoteToType<i32>>,
+
+ // Pass in STG registers: Base, Sp, Hp, R1
+ CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
+]>;
diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp
index 8deadf6..6638e11 100644
--- a/lib/Target/X86/X86CodeEmitter.cpp
+++ b/lib/Target/X86/X86CodeEmitter.cpp
@@ -46,6 +46,7 @@ namespace {
const TargetData *TD;
X86TargetMachine &TM;
CodeEmitter &MCE;
+ MachineModuleInfo *MMI;
intptr_t PICBaseOffset;
bool Is64BitMode;
bool IsPIC;
@@ -115,8 +116,8 @@ FunctionPass *llvm::createX86JITCodeEmitterPass(X86TargetMachine &TM,
template<class CodeEmitter>
bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) {
-
- MCE.setModuleInfo(&getAnalysis<MachineModuleInfo>());
+ MMI = &getAnalysis<MachineModuleInfo>();
+ MCE.setModuleInfo(MMI);
II = TM.getInstrInfo();
TD = TM.getTargetData();
@@ -602,10 +603,11 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI,
llvm_report_error("JIT does not support inline asm!");
break;
case TargetOpcode::DBG_LABEL:
- case TargetOpcode::EH_LABEL:
case TargetOpcode::GC_LABEL:
- MCE.emitLabel(MI.getOperand(0).getImm());
+ case TargetOpcode::EH_LABEL:
+ MCE.emitLabel(MI.getOperand(0).getMCSymbol());
break;
+
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
case X86::FP_REG_KILL:
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 98e3f4e..96b652d 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -172,7 +172,9 @@ bool X86FastISel::isTypeLegal(const Type *Ty, EVT &VT, bool AllowI1) {
CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
bool isTaillCall) {
if (Subtarget->is64Bit()) {
- if (Subtarget->isTargetWin64())
+ if (CC == CallingConv::GHC)
+ return CC_X86_64_GHC;
+ else if (Subtarget->isTargetWin64())
return CC_X86_Win64_C;
else
return CC_X86_64_C;
@@ -182,6 +184,8 @@ CCAssignFn *X86FastISel::CCAssignFnForCall(CallingConv::ID CC,
return CC_X86_32_FastCall;
else if (CC == CallingConv::Fast)
return CC_X86_32_FastCC;
+ else if (CC == CallingConv::GHC)
+ return CC_X86_32_GHC;
else
return CC_X86_32_C;
}
@@ -1162,6 +1166,30 @@ bool X86FastISel::X86VisitIntrinsicCall(IntrinsicInst &I) {
// FIXME: Handle more intrinsics.
switch (I.getIntrinsicID()) {
default: return false;
+ case Intrinsic::objectsize: {
+ ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand(2));
+ const Type *Ty = I.getCalledFunction()->getReturnType();
+
+ assert(CI && "Non-constant type in Intrinsic::objectsize?");
+
+ EVT VT;
+ if (!isTypeLegal(Ty, VT))
+ return false;
+
+ unsigned OpC = 0;
+ if (VT == MVT::i32)
+ OpC = X86::MOV32ri;
+ else if (VT == MVT::i64)
+ OpC = X86::MOV64ri;
+ else
+ return false;
+
+ unsigned ResultReg = createResultReg(TLI.getRegClassFor(VT));
+ BuildMI(MBB, DL, TII.get(OpC), ResultReg).
+ addImm(CI->getZExtValue() == 0 ? -1ULL : 0);
+ UpdateValueMap(&I, ResultReg);
+ return true;
+ }
case Intrinsic::dbg_declare: {
DbgDeclareInst *DI = cast<DbgDeclareInst>(&I);
X86AddressMode AM;
diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp
index 3fad8ad..4058885 100644
--- a/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -349,17 +349,17 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
return true;
}
-/// MoveBelowCallSeqStart - Replace CALLSEQ_START operand with load's chain
-/// operand and move load below the call's chain operand.
-static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
- SDValue Call, SDValue CallSeqStart) {
+/// MoveBelowCallOrigChain - Replace the original chain operand of the call with
+/// load's chain operand and move load below the call's chain operand.
+static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
+ SDValue Call, SDValue OrigChain) {
SmallVector<SDValue, 8> Ops;
- SDValue Chain = CallSeqStart.getOperand(0);
+ SDValue Chain = OrigChain.getOperand(0);
if (Chain.getNode() == Load.getNode())
Ops.push_back(Load.getOperand(0));
else {
assert(Chain.getOpcode() == ISD::TokenFactor &&
- "Unexpected CallSeqStart chain operand");
+ "Unexpected chain operand");
for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
if (Chain.getOperand(i).getNode() == Load.getNode())
Ops.push_back(Load.getOperand(0));
@@ -371,9 +371,9 @@ static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
Ops.clear();
Ops.push_back(NewChain);
}
- for (unsigned i = 1, e = CallSeqStart.getNumOperands(); i != e; ++i)
- Ops.push_back(CallSeqStart.getOperand(i));
- CurDAG->UpdateNodeOperands(CallSeqStart, &Ops[0], Ops.size());
+ for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
+ Ops.push_back(OrigChain.getOperand(i));
+ CurDAG->UpdateNodeOperands(OrigChain, &Ops[0], Ops.size());
CurDAG->UpdateNodeOperands(Load, Call.getOperand(0),
Load.getOperand(1), Load.getOperand(2));
Ops.clear();
@@ -386,7 +386,9 @@ static void MoveBelowCallSeqStart(SelectionDAG *CurDAG, SDValue Load,
/// isCalleeLoad - Return true if call address is a load and it can be
/// moved below CALLSEQ_START and the chains leading up to the call.
/// Return the CALLSEQ_START by reference as a second output.
-static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
+/// In the case of a tail call, there isn't a callseq node between the call
+/// chain and the load.
+static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
return false;
LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
@@ -397,12 +399,14 @@ static bool isCalleeLoad(SDValue Callee, SDValue &Chain) {
return false;
// Now let's find the callseq_start.
- while (Chain.getOpcode() != ISD::CALLSEQ_START) {
+ while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
if (!Chain.hasOneUse())
return false;
Chain = Chain.getOperand(0);
}
-
+
+ if (!Chain.getNumOperands())
+ return false;
if (Chain.getOperand(0).getNode() == Callee.getNode())
return true;
if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
@@ -420,7 +424,9 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
E = CurDAG->allnodes_end(); I != E; ) {
SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
- if (OptLevel != CodeGenOpt::None && N->getOpcode() == X86ISD::CALL) {
+ if (OptLevel != CodeGenOpt::None &&
+ (N->getOpcode() == X86ISD::CALL ||
+ N->getOpcode() == X86ISD::TC_RETURN)) {
/// Also try moving call address load from outside callseq_start to just
/// before the call to allow it to be folded.
///
@@ -440,11 +446,12 @@ void X86DAGToDAGISel::PreprocessISelDAG() {
/// \ /
/// \ /
/// [CALL]
+ bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
SDValue Chain = N->getOperand(0);
SDValue Load = N->getOperand(1);
- if (!isCalleeLoad(Load, Chain))
+ if (!isCalleeLoad(Load, Chain, HasCallSeq))
continue;
- MoveBelowCallSeqStart(CurDAG, Load, SDValue(N, 0), Chain);
+ MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
++NumLoadMoved;
continue;
}
diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp
index 0cfcbb6..7d2140b 100644
--- a/lib/Target/X86/X86ISelLowering.cpp
+++ b/lib/Target/X86/X86ISelLowering.cpp
@@ -16,7 +16,6 @@
#include "X86.h"
#include "X86InstrBuilder.h"
#include "X86ISelLowering.h"
-#include "X86MCTargetExpr.h"
#include "X86TargetMachine.h"
#include "X86TargetObjectFile.h"
#include "llvm/CallingConv.h"
@@ -37,6 +36,7 @@
#include "llvm/CodeGen/PseudoSourceValue.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/SmallSet.h"
@@ -45,10 +45,12 @@
#include "llvm/ADT/VectorExtras.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
+#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
+using namespace dwarf;
STATISTIC(NumTailCalls, "Number of tail calls");
@@ -988,6 +990,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
// We have target-specific dag combine patterns for the following nodes:
setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
+ setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
setTargetDAGCombine(ISD::BUILD_VECTOR);
setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::SHL);
@@ -1118,8 +1121,8 @@ X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
Subtarget->isPICStyleGOT());
// In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
// entries.
- return X86MCTargetExpr::Create(MBB->getSymbol(Ctx),
- X86MCTargetExpr::GOTOFF, Ctx);
+ return MCSymbolRefExpr::Create(MBB->getSymbol(),
+ MCSymbolRefExpr::VK_GOTOFF, Ctx);
}
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
@@ -1378,6 +1381,8 @@ bool X86TargetLowering::IsCalleePop(bool IsVarArg, CallingConv::ID CallingConv){
return !Subtarget->is64Bit();
case CallingConv::Fast:
return GuaranteedTailCallOpt;
+ case CallingConv::GHC:
+ return GuaranteedTailCallOpt;
}
}
@@ -1385,7 +1390,9 @@ bool X86TargetLowering::IsCalleePop(bool IsVarArg, CallingConv::ID CallingConv){
/// given CallingConvention value.
CCAssignFn *X86TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
if (Subtarget->is64Bit()) {
- if (Subtarget->isTargetWin64())
+ if (CC == CallingConv::GHC)
+ return CC_X86_64_GHC;
+ else if (Subtarget->isTargetWin64())
return CC_X86_Win64_C;
else
return CC_X86_64_C;
@@ -1395,6 +1402,8 @@ CCAssignFn *X86TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
return CC_X86_32_FastCall;
else if (CC == CallingConv::Fast)
return CC_X86_32_FastCC;
+ else if (CC == CallingConv::GHC)
+ return CC_X86_32_GHC;
else
return CC_X86_32_C;
}
@@ -1412,10 +1421,16 @@ CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
/*AlwaysInline=*/true, NULL, 0, NULL, 0);
}
+/// IsTailCallConvention - Return true if the calling convention is one that
+/// supports tail call optimization.
+static bool IsTailCallConvention(CallingConv::ID CC) {
+ return (CC == CallingConv::Fast || CC == CallingConv::GHC);
+}
+
/// FuncIsMadeTailCallSafe - Return true if the function is being made into
/// a tailcall target by changing its ABI.
static bool FuncIsMadeTailCallSafe(CallingConv::ID CC) {
- return GuaranteedTailCallOpt && CC == CallingConv::Fast;
+ return GuaranteedTailCallOpt && IsTailCallConvention(CC);
}
SDValue
@@ -1465,7 +1480,6 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
DebugLoc dl,
SelectionDAG &DAG,
SmallVectorImpl<SDValue> &InVals) {
-
MachineFunction &MF = DAG.getMachineFunction();
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
@@ -1479,8 +1493,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
bool Is64Bit = Subtarget->is64Bit();
bool IsWin64 = Subtarget->isTargetWin64();
- assert(!(isVarArg && CallConv == CallingConv::Fast) &&
- "Var args not supported with calling convention fastcc");
+ assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
+ "Var args not supported with calling convention fastcc or ghc");
// Assign locations to all of the incoming arguments.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -1683,7 +1697,7 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain,
} else {
BytesToPopOnReturn = 0; // Callee pops nothing.
// If this is an sret function, the return should pop the hidden pointer.
- if (!Is64Bit && CallConv != CallingConv::Fast && ArgsAreStructReturn(Ins))
+ if (!Is64Bit && !IsTailCallConvention(CallConv) && ArgsAreStructReturn(Ins))
BytesToPopOnReturn = 4;
}
@@ -1767,7 +1781,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (isTailCall) {
// Check if it's really possible to do a tail call.
- isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
+ isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
+ isVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
Outs, Ins, DAG);
// Sibcalls are automatically detected tailcalls which do not require
@@ -1779,8 +1794,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
++NumTailCalls;
}
- assert(!(isVarArg && CallConv == CallingConv::Fast) &&
- "Var args not supported with calling convention fastcc");
+ assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
+ "Var args not supported with calling convention fastcc or ghc");
// Analyze operands of the call, assigning locations to each operand.
SmallVector<CCValAssign, 16> ArgLocs;
@@ -1794,7 +1809,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
// This is a sibcall. The memory operands are available in caller's
// own caller's stack.
NumBytes = 0;
- else if (GuaranteedTailCallOpt && CallConv == CallingConv::Fast)
+ else if (GuaranteedTailCallOpt && IsTailCallConvention(CallConv))
NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
int FPDiff = 0;
@@ -2074,18 +2089,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
OpFlags);
}
- if (isTailCall && !WasGlobalOrExternal) {
- // Force the address into a (call preserved) caller-saved register since
- // tailcall must happen after callee-saved registers are poped.
- // FIXME: Give it a special register class that contains caller-saved
- // register instead?
- unsigned TCReg = Is64Bit ? X86::R11 : X86::EAX;
- Chain = DAG.getCopyToReg(Chain, dl,
- DAG.getRegister(TCReg, getPointerTy()),
- Callee,InFlag);
- Callee = DAG.getRegister(TCReg, getPointerTy());
- }
-
// Returns a chain & a flag for retval copy to use.
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
SmallVector<SDValue, 8> Ops;
@@ -2131,14 +2134,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
if (RVLocs[i].isRegLoc())
MF.getRegInfo().addLiveOut(RVLocs[i].getLocReg());
}
-
- assert(((Callee.getOpcode() == ISD::Register &&
- (cast<RegisterSDNode>(Callee)->getReg() == X86::EAX ||
- cast<RegisterSDNode>(Callee)->getReg() == X86::R11)) ||
- Callee.getOpcode() == ISD::TargetExternalSymbol ||
- Callee.getOpcode() == ISD::TargetGlobalAddress) &&
- "Expecting a global address, external symbol, or scratch register");
-
return DAG.getNode(X86ISD::TC_RETURN, dl,
NodeTys, &Ops[0], Ops.size());
}
@@ -2150,7 +2145,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee,
unsigned NumBytesForCalleeToPush;
if (IsCalleePop(isVarArg, CallConv))
NumBytesForCalleeToPush = NumBytes; // Callee pops everything
- else if (!Is64Bit && CallConv != CallingConv::Fast && IsStructRet)
+ else if (!Is64Bit && !IsTailCallConvention(CallConv) && IsStructRet)
// If this is a call to a struct-return function, the callee
// pops the hidden struct pointer, so we have to push it back.
// This is common for Darwin/X86, Linux & Mingw32 targets.
@@ -2285,17 +2280,19 @@ bool
X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
CallingConv::ID CalleeCC,
bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const {
- if (CalleeCC != CallingConv::Fast &&
+ if (!IsTailCallConvention(CalleeCC) &&
CalleeCC != CallingConv::C)
return false;
// If -tailcallopt is specified, make fastcc functions tail-callable.
const Function *CallerF = DAG.getMachineFunction().getFunction();
if (GuaranteedTailCallOpt) {
- if (CalleeCC == CallingConv::Fast &&
+ if (IsTailCallConvention(CalleeCC) &&
CallerF->getCallingConv() == CalleeCC)
return true;
return false;
@@ -2304,10 +2301,15 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
// Look for obvious safe cases to perform tail call optimization that does not
// requite ABI changes. This is what gcc calls sibcall.
- // Do not tail call optimize vararg calls for now.
+ // Do not sibcall optimize vararg calls for now.
if (isVarArg)
return false;
+ // Also avoid sibcall optimization if either caller or callee uses struct
+ // return semantics.
+ if (isCalleeStructRet || isCallerStructRet)
+ return false;
+
// If the callee takes no arguments then go on to check the results of the
// call.
if (!Outs.empty()) {
@@ -6158,7 +6160,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) {
N2C && N2C->isNullValue() &&
RHSC && RHSC->isNullValue()) {
SDValue CmpOp0 = Cmp.getOperand(0);
- Cmp = DAG.getNode(X86ISD::CMP, dl, CmpOp0.getValueType(),
+ Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
return DAG.getNode(X86ISD::SETCC_CARRY, dl, Op.getValueType(),
DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
@@ -8439,6 +8441,11 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
case X86::CMOV_V4F32:
case X86::CMOV_V2F64:
case X86::CMOV_V2I64:
+ case X86::CMOV_GR16:
+ case X86::CMOV_GR32:
+ case X86::CMOV_RFP32:
+ case X86::CMOV_RFP64:
+ case X86::CMOV_RFP80:
return EmitLoweredSelect(MI, BB, EM);
case X86::FP32_TO_INT16_IN_MEM:
@@ -8521,6 +8528,21 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
return BB;
}
+ // DBG_VALUE. Only the frame index case is done here.
+ case X86::DBG_VALUE: {
+ const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
+ DebugLoc DL = MI->getDebugLoc();
+ X86AddressMode AM;
+ MachineFunction *F = BB->getParent();
+ AM.BaseType = X86AddressMode::FrameIndexBase;
+ AM.Base.FrameIndex = MI->getOperand(0).getImm();
+ addFullAddress(BuildMI(BB, DL, TII->get(X86::DBG_VALUE)), AM).
+ addImm(MI->getOperand(1).getImm()).
+ addMetadata(MI->getOperand(2).getMetadata());
+ F->DeleteMachineInstr(MI); // Remove pseudo.
+ return BB;
+ }
+
// String/text processing lowering.
case X86::PCMPISTRM128REG:
return EmitPCMP(MI, BB, 3, false /* in-mem */);
@@ -8832,6 +8854,87 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+/// PerformShuffleCombine - Detect vector gather/scatter index generation
+/// and convert it from being a bunch of shuffles and extracts to a simple
+/// store and scalar loads to extract the elements.
+static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ SDValue InputVector = N->getOperand(0);
+
+ // Only operate on vectors of 4 elements, where the alternative shuffling
+ // gets to be more expensive.
+ if (InputVector.getValueType() != MVT::v4i32)
+ return SDValue();
+
+ // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
+ // single use which is a sign-extend or zero-extend, and all elements are
+ // used.
+ SmallVector<SDNode *, 4> Uses;
+ unsigned ExtractedElements = 0;
+ for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
+ UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
+ if (UI.getUse().getResNo() != InputVector.getResNo())
+ return SDValue();
+
+ SDNode *Extract = *UI;
+ if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
+ return SDValue();
+
+ if (Extract->getValueType(0) != MVT::i32)
+ return SDValue();
+ if (!Extract->hasOneUse())
+ return SDValue();
+ if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
+ Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
+ return SDValue();
+ if (!isa<ConstantSDNode>(Extract->getOperand(1)))
+ return SDValue();
+
+ // Record which element was extracted.
+ ExtractedElements |=
+ 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
+
+ Uses.push_back(Extract);
+ }
+
+ // If not all the elements were used, this may not be worthwhile.
+ if (ExtractedElements != 15)
+ return SDValue();
+
+ // Ok, we've now decided to do the transformation.
+ DebugLoc dl = InputVector.getDebugLoc();
+
+ // Store the value to a temporary stack slot.
+ SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
+ SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, NULL, 0,
+ false, false, 0);
+
+ // Replace each use (extract) with a load of the appropriate element.
+ for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
+ UE = Uses.end(); UI != UE; ++UI) {
+ SDNode *Extract = *UI;
+
+ // Compute the element's address.
+ SDValue Idx = Extract->getOperand(1);
+ unsigned EltSize =
+ InputVector.getValueType().getVectorElementType().getSizeInBits()/8;
+ uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue();
+ SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
+
+ SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), OffsetVal, StackPtr);
+
+ // Load the scalar.
+ SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, ScalarAddr,
+ NULL, 0, false, false, 0);
+
+ // Replace the exact with the load.
+ DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar);
+ }
+
+ // The replacement was made in place; don't return anything.
+ return SDValue();
+}
+
/// PerformSELECTCombine - Do target-specific dag combines on SELECT nodes.
static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget *Subtarget) {
@@ -9721,6 +9824,8 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
switch (N->getOpcode()) {
default: break;
case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
+ case ISD::EXTRACT_VECTOR_ELT:
+ return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI);
case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
@@ -9810,7 +9915,8 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
AsmPieces[2] == "${0:w}" &&
IA->getConstraintString().compare(0, 5, "=r,0,") == 0) {
AsmPieces.clear();
- SplitString(IA->getConstraintString().substr(5), AsmPieces, ",");
+ const std::string &Constraints = IA->getConstraintString();
+ SplitString(StringRef(Constraints).substr(5), AsmPieces, ",");
std::sort(AsmPieces.begin(), AsmPieces.end());
if (AsmPieces.size() == 4 &&
AsmPieces[0] == "~{cc}" &&
@@ -10265,41 +10371,3 @@ X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
return Res;
}
-
-//===----------------------------------------------------------------------===//
-// X86 Widen vector type
-//===----------------------------------------------------------------------===//
-
-/// getWidenVectorType: given a vector type, returns the type to widen
-/// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
-/// If there is no vector type that we want to widen to, returns MVT::Other
-/// When and where to widen is target dependent based on the cost of
-/// scalarizing vs using the wider vector type.
-
-EVT X86TargetLowering::getWidenVectorType(EVT VT) const {
- assert(VT.isVector());
- if (isTypeLegal(VT))
- return VT;
-
- // TODO: In computeRegisterProperty, we can compute the list of legal vector
- // type based on element type. This would speed up our search (though
- // it may not be worth it since the size of the list is relatively
- // small).
- EVT EltVT = VT.getVectorElementType();
- unsigned NElts = VT.getVectorNumElements();
-
- // On X86, it make sense to widen any vector wider than 1
- if (NElts <= 1)
- return MVT::Other;
-
- for (unsigned nVT = MVT::FIRST_VECTOR_VALUETYPE;
- nVT <= MVT::LAST_VECTOR_VALUETYPE; ++nVT) {
- EVT SVT = (MVT::SimpleValueType)nVT;
-
- if (isTypeLegal(SVT) &&
- SVT.getVectorElementType() == EltVT &&
- SVT.getVectorNumElements() > NElts)
- return SVT;
- }
- return MVT::Other;
-}
diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h
index 4c12fcc..0f15eba 100644
--- a/lib/Target/X86/X86ISelLowering.h
+++ b/lib/Target/X86/X86ISelLowering.h
@@ -564,13 +564,6 @@ namespace llvm {
(VT == MVT::f32 && X86ScalarSSEf32); // f32 is when SSE1
}
- /// getWidenVectorType: given a vector type, returns the type to widen
- /// to (e.g., v7i8 to v8i8). If the vector type is legal, it returns itself.
- /// If there is no vector type that we want to widen to, returns EVT::Other
- /// When and were to widen is target dependent based on the cost of
- /// scalarizing vs using the wider vector type.
- virtual EVT getWidenVectorType(EVT VT) const;
-
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
virtual FastISel *
@@ -637,6 +630,8 @@ namespace llvm {
bool IsEligibleForTailCallOptimization(SDValue Callee,
CallingConv::ID CalleeCC,
bool isVarArg,
+ bool isCalleeStructRet,
+ bool isCallerStructRet,
const SmallVectorImpl<ISD::OutputArg> &Outs,
const SmallVectorImpl<ISD::InputArg> &Ins,
SelectionDAG& DAG) const;
diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td
index 8e684c9..4262c0ac 100644
--- a/lib/Target/X86/X86Instr64bit.td
+++ b/lib/Target/X86/X86Instr64bit.td
@@ -24,6 +24,7 @@ def i64i32imm : Operand<i64>;
// pc relative.
def i64i32imm_pcrel : Operand<i64> {
let PrintMethod = "print_pcrel_imm";
+ let ParserMatchClass = X86AbsMemAsmOperand;
}
@@ -32,17 +33,26 @@ def i64i8imm : Operand<i64> {
let ParserMatchClass = ImmSExt8AsmOperand;
}
+// Special i64mem for addresses of load folding tail calls. These are not
+// allowed to use callee-saved registers since they must be scheduled
+// after callee-saved register are popped.
+def i64mem_TC : Operand<i64> {
+ let PrintMethod = "printi64mem";
+ let MIOperandInfo = (ops GR64_TC, i8imm, GR64_TC, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+
def lea64mem : Operand<i64> {
let PrintMethod = "printlea64mem";
let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = X86NoSegMemAsmOperand;
}
def lea64_32mem : Operand<i32> {
let PrintMethod = "printlea64_32mem";
let AsmOperandLowerMethod = "lower_lea64_32mem";
let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
- let ParserMatchClass = X86MemAsmOperand;
+ let ParserMatchClass = X86NoSegMemAsmOperand;
}
//===----------------------------------------------------------------------===//
@@ -176,22 +186,31 @@ let isCall = 1 in
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNdi64 : I<0, Pseudo, (outs), (ins i64imm:$dst, i32imm:$offset,
- variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64:$dst, i32imm:$offset,
- variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64:$dst, variable_ops),
- "jmp{q}\t{*}$dst # TAILCALL",
- []>;
+ let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
+ FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [RSP] in {
+ def TCRETURNdi64 : I<0, Pseudo, (outs),
+ (ins i64i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ def TCRETURNri64 : I<0, Pseudo, (outs), (ins GR64_TC:$dst, i32imm:$offset,
+ variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ def TCRETURNmi64 : I<0, Pseudo, (outs),
+ (ins i64mem_TC:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+
+ def TAILJMPd64 : Ii32PCRel<0xE9, RawFrm, (outs),
+ (ins i64i32imm_pcrel:$dst, variable_ops),
+ "jmp\t$dst # TAILCALL", []>;
+ def TAILJMPr64 : I<0xFF, MRM4r, (outs), (ins GR64_TC:$dst, variable_ops),
+ "jmp{q}\t{*}$dst # TAILCALL", []>;
+
+ def TAILJMPm64 : I<0xFF, MRM4m, (outs), (ins i64mem_TC:$dst, variable_ops),
+ "jmp{q}\t{*}$dst # TAILCALL", []>;
+}
// Branches
let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
@@ -339,6 +358,22 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src),
"mov{q}\t{$src, $dst|$dst, $src}",
[(store i64immSExt32:$src, addr:$dst)]>;
+/// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC.
+let neverHasSideEffects = 1 in
+def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}", []>;
+
+let mayLoad = 1,
+ canFoldAsLoad = 1, isReMaterializable = 1 in
+def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ []>;
+
+let mayStore = 1 in
+def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
+ "mov{q}\t{$src, $dst|$dst, $src}",
+ []>;
+
def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src),
"mov{q}\t{$src, %rax|%rax, $src}", []>;
def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src),
@@ -463,8 +498,8 @@ let neverHasSideEffects = 1 in {
let Defs = [EFLAGS] in {
-def ADD64i32 : RI<0x05, RawFrm, (outs), (ins i32imm:$src),
- "add{q}\t{$src, %rax|%rax, $src}", []>;
+def ADD64i32 : RIi32<0x05, RawFrm, (outs), (ins i32imm:$src),
+ "add{q}\t{$src, %rax|%rax, $src}", []>;
let isTwoAddress = 1 in {
let isConvertibleToThreeAddress = 1 in {
@@ -520,8 +555,8 @@ def ADD64mi32 : RIi32<0x81, MRM0m, (outs), (ins i64mem:$dst, i64i32imm :$src2),
let Uses = [EFLAGS] in {
-def ADC64i32 : RI<0x15, RawFrm, (outs), (ins i32imm:$src),
- "adc{q}\t{$src, %rax|%rax, $src}", []>;
+def ADC64i32 : RIi32<0x15, RawFrm, (outs), (ins i32imm:$src),
+ "adc{q}\t{$src, %rax|%rax, $src}", []>;
let isTwoAddress = 1 in {
let isCommutable = 1 in
@@ -594,8 +629,8 @@ def SUB64ri32 : RIi32<0x81, MRM5r, (outs GR64:$dst),
(implicit EFLAGS)]>;
} // isTwoAddress
-def SUB64i32 : RI<0x2D, RawFrm, (outs), (ins i32imm:$src),
- "sub{q}\t{$src, %rax|%rax, $src}", []>;
+def SUB64i32 : RIi32<0x2D, RawFrm, (outs), (ins i32imm:$src),
+ "sub{q}\t{$src, %rax|%rax, $src}", []>;
// Memory-Register Subtraction
def SUB64mr : RI<0x29, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
@@ -641,8 +676,8 @@ def SBB64ri32 : RIi32<0x81, MRM3r, (outs GR64:$dst),
[(set GR64:$dst, (sube GR64:$src1, i64immSExt32:$src2))]>;
} // isTwoAddress
-def SBB64i32 : RI<0x1D, RawFrm, (outs), (ins i32imm:$src),
- "sbb{q}\t{$src, %rax|%rax, $src}", []>;
+def SBB64i32 : RIi32<0x1D, RawFrm, (outs), (ins i32imm:$src),
+ "sbb{q}\t{$src, %rax|%rax, $src}", []>;
def SBB64mr : RI<0x19, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2),
"sbb{q}\t{$src2, $dst|$dst, $src2}",
@@ -1047,8 +1082,8 @@ def NOT64m : RI<0xF7, MRM2m, (outs), (ins i64mem:$dst), "not{q}\t$dst",
[(store (not (loadi64 addr:$dst)), addr:$dst)]>;
let Defs = [EFLAGS] in {
-def AND64i32 : RI<0x25, RawFrm, (outs), (ins i32imm:$src),
- "and{q}\t{$src, %rax|%rax, $src}", []>;
+def AND64i32 : RIi32<0x25, RawFrm, (outs), (ins i32imm:$src),
+ "and{q}\t{$src, %rax|%rax, $src}", []>;
let isTwoAddress = 1 in {
let isCommutable = 1 in
@@ -1187,8 +1222,8 @@ def XOR64i32 : RIi32<0x35, RawFrm, (outs), (ins i32imm:$src),
// Integer comparison
let Defs = [EFLAGS] in {
-def TEST64i32 : RI<0xa9, RawFrm, (outs), (ins i32imm:$src),
- "test{q}\t{$src, %rax|%rax, $src}", []>;
+def TEST64i32 : RIi32<0xa9, RawFrm, (outs), (ins i32imm:$src),
+ "test{q}\t{$src, %rax|%rax, $src}", []>;
let isCommutable = 1 in
def TEST64rr : RI<0x85, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"test{q}\t{$src2, $src1|$src1, $src2}",
@@ -1210,8 +1245,8 @@ def TEST64mi32 : RIi32<0xF7, MRM0m, (outs),
(implicit EFLAGS)]>;
-def CMP64i32 : RI<0x3D, RawFrm, (outs), (ins i32imm:$src),
- "cmp{q}\t{$src, %rax|%rax, $src}", []>;
+def CMP64i32 : RIi32<0x3D, RawFrm, (outs), (ins i32imm:$src),
+ "cmp{q}\t{$src, %rax|%rax, $src}", []>;
def CMP64rr : RI<0x39, MRMDestReg, (outs), (ins GR64:$src1, GR64:$src2),
"cmp{q}\t{$src2, $src1|$src1, $src2}",
[(X86cmp GR64:$src1, GR64:$src2),
@@ -1884,14 +1919,21 @@ def : Pat<(X86call (i64 texternalsym:$dst)),
(WINCALL64pcrel32 texternalsym:$dst)>, Requires<[IsWin64]>;
// tailcall stuff
-def : Pat<(X86tcret GR64:$dst, imm:$off),
- (TCRETURNri64 GR64:$dst, imm:$off)>;
+def : Pat<(X86tcret GR64_TC:$dst, imm:$off),
+ (TCRETURNri64 GR64_TC:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
+
+def : Pat<(X86tcret (load addr:$dst), imm:$off),
+ (TCRETURNmi64 addr:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
def : Pat<(X86tcret (i64 tglobaladdr:$dst), imm:$off),
- (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>;
+ (TCRETURNdi64 tglobaladdr:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
def : Pat<(X86tcret (i64 texternalsym:$dst), imm:$off),
- (TCRETURNdi64 texternalsym:$dst, imm:$off)>;
+ (TCRETURNdi64 texternalsym:$dst, imm:$off)>,
+ Requires<[In64BitMode]>;
// Comparisons.
diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td
index ae24bfb..b730918 100644
--- a/lib/Target/X86/X86InstrFPStack.td
+++ b/lib/Target/X86/X86InstrFPStack.td
@@ -350,20 +350,27 @@ def FBLDm : FPI<0xDF, MRM4m, (outs), (ins f32mem:$src), "fbld\t$src">;
def FBSTPm : FPI<0xDF, MRM6m, (outs f32mem:$dst), (ins), "fbstp\t$dst">;
// Floating point cmovs.
+class FpIf32CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
+ FpI_<outs, ins, fp, pattern>, Requires<[FPStackf32, HasCMov]>;
+class FpIf64CMov<dag outs, dag ins, FPFormat fp, list<dag> pattern> :
+ FpI_<outs, ins, fp, pattern>, Requires<[FPStackf64, HasCMov]>;
+
multiclass FPCMov<PatLeaf cc> {
- def _Fp32 : FpIf32<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2),
+ def _Fp32 : FpIf32CMov<(outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2),
CondMovFP,
[(set RFP32:$dst, (X86cmov RFP32:$src1, RFP32:$src2,
cc, EFLAGS))]>;
- def _Fp64 : FpIf64<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2),
+ def _Fp64 : FpIf64CMov<(outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2),
CondMovFP,
[(set RFP64:$dst, (X86cmov RFP64:$src1, RFP64:$src2,
cc, EFLAGS))]>;
def _Fp80 : FpI_<(outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2),
CondMovFP,
[(set RFP80:$dst, (X86cmov RFP80:$src1, RFP80:$src2,
- cc, EFLAGS))]>;
+ cc, EFLAGS))]>,
+ Requires<[HasCMov]>;
}
+
let Uses = [EFLAGS], isTwoAddress = 1 in {
defm CMOVB : FPCMov<X86_COND_B>;
defm CMOVBE : FPCMov<X86_COND_BE>;
@@ -375,6 +382,7 @@ defm CMOVNE : FPCMov<X86_COND_NE>;
defm CMOVNP : FPCMov<X86_COND_NP>;
}
+let Predicates = [HasCMov] in {
// These are not factored because there's no clean way to pass DA/DB.
def CMOVB_F : FPI<0xC0, AddRegFrm, (outs RST:$op), (ins),
"fcmovb\t{$op, %st(0)|%ST(0), $op}">, DA;
@@ -392,6 +400,7 @@ def CMOVNE_F : FPI<0xC8, AddRegFrm, (outs RST:$op), (ins),
"fcmovne\t{$op, %st(0)|%ST(0), $op}">, DB;
def CMOVNP_F : FPI<0xD8, AddRegFrm, (outs RST:$op), (ins),
"fcmovnu\t{$op, %st(0)|%ST(0), $op}">, DB;
+} // Predicates = [HasCMov]
// Floating point loads & stores.
let canFoldAsLoad = 1 in {
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 4fd91bb..139a905 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -266,6 +266,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::MOV16rr, X86::MOV16mr, 0, 0 },
{ X86::MOV32ri, X86::MOV32mi, 0, 0 },
{ X86::MOV32rr, X86::MOV32mr, 0, 0 },
+ { X86::MOV32rr_TC, X86::MOV32mr_TC, 0, 0 },
{ X86::MOV64ri32, X86::MOV64mi32, 0, 0 },
{ X86::MOV64rr, X86::MOV64mr, 0, 0 },
{ X86::MOV8ri, X86::MOV8mi, 0, 0 },
@@ -301,6 +302,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::SETPr, X86::SETPm, 0, 0 },
{ X86::SETSr, X86::SETSm, 0, 0 },
{ X86::TAILJMPr, X86::TAILJMPm, 1, 0 },
+ { X86::TAILJMPr64, X86::TAILJMPm64, 1, 0 },
{ X86::TEST16ri, X86::TEST16mi, 1, 0 },
{ X86::TEST32ri, X86::TEST32mi, 1, 0 },
{ X86::TEST64ri32, X86::TEST64mi32, 1, 0 },
@@ -376,6 +378,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 },
{ X86::MOV16rr, X86::MOV16rm, 0 },
{ X86::MOV32rr, X86::MOV32rm, 0 },
+ { X86::MOV32rr_TC, X86::MOV32rm_TC, 0 },
{ X86::MOV64rr, X86::MOV64rm, 0 },
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 },
{ X86::MOV64toSDrr, X86::MOV64toSDrm, 0 },
@@ -675,6 +678,8 @@ bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
case X86::MOV16rr:
case X86::MOV32rr:
case X86::MOV64rr:
+ case X86::MOV32rr_TC:
+ case X86::MOV64rr_TC:
// FP Stack register class copies
case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080:
@@ -1901,6 +1906,10 @@ bool X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
Opc = X86::MOV16rr;
} else if (CommonRC == &X86::GR8_NOREXRegClass) {
Opc = X86::MOV8rr;
+ } else if (CommonRC == &X86::GR64_TCRegClass) {
+ Opc = X86::MOV64rr_TC;
+ } else if (CommonRC == &X86::GR32_TCRegClass) {
+ Opc = X86::MOV32rr_TC;
} else if (CommonRC == &X86::RFP32RegClass) {
Opc = X86::MOV_Fp3232;
} else if (CommonRC == &X86::RFP64RegClass || CommonRC == &X86::RSTRegClass) {
@@ -2038,6 +2047,10 @@ static unsigned getStoreRegOpcode(unsigned SrcReg,
Opc = X86::MOV16mr;
} else if (RC == &X86::GR8_NOREXRegClass) {
Opc = X86::MOV8mr;
+ } else if (RC == &X86::GR64_TCRegClass) {
+ Opc = X86::MOV64mr_TC;
+ } else if (RC == &X86::GR32_TCRegClass) {
+ Opc = X86::MOV32mr_TC;
} else if (RC == &X86::RFP80RegClass) {
Opc = X86::ST_FpP80m; // pops
} else if (RC == &X86::RFP64RegClass) {
@@ -2131,6 +2144,10 @@ static unsigned getLoadRegOpcode(unsigned DestReg,
Opc = X86::MOV16rm;
} else if (RC == &X86::GR8_NOREXRegClass) {
Opc = X86::MOV8rm;
+ } else if (RC == &X86::GR64_TCRegClass) {
+ Opc = X86::MOV64rm_TC;
+ } else if (RC == &X86::GR32_TCRegClass) {
+ Opc = X86::MOV32rm_TC;
} else if (RC == &X86::RFP80RegClass) {
Opc = X86::LD_Fp80m;
} else if (RC == &X86::RFP64RegClass) {
diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td
index 071c5aa..1225b68 100644
--- a/lib/Target/X86/X86InstrInfo.td
+++ b/lib/Target/X86/X86InstrInfo.td
@@ -21,6 +21,7 @@ def SDTIntShiftDOp: SDTypeProfile<1, 3,
[SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>,
SDTCisInt<0>, SDTCisInt<3>]>;
+// FIXME: Should be modelled as returning i32
def SDTX86CmpTest : SDTypeProfile<0, 2, [SDTCisSameAs<0, 1>]>;
def SDTX86Cmov : SDTypeProfile<1, 4,
@@ -83,7 +84,6 @@ def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>;
def X86shrd : SDNode<"X86ISD::SHRD", SDTIntShiftDOp>;
def X86cmp : SDNode<"X86ISD::CMP" , SDTX86CmpTest>;
-
def X86bt : SDNode<"X86ISD::BT", SDTX86CmpTest>;
def X86cmov : SDNode<"X86ISD::CMOV", SDTX86Cmov>;
@@ -234,6 +234,15 @@ def i8mem_NOREX : Operand<i64> {
let ParserMatchClass = X86MemAsmOperand;
}
+// Special i32mem for addresses of load folding tail calls. These are not
+// allowed to use callee-saved registers since they must be scheduled
+// after callee-saved register are popped.
+def i32mem_TC : Operand<i32> {
+ let PrintMethod = "printi32mem";
+ let MIOperandInfo = (ops GR32_TC, i8imm, GR32_TC, i32imm, i8imm);
+ let ParserMatchClass = X86MemAsmOperand;
+}
+
def lea32mem : Operand<i32> {
let PrintMethod = "printlea32mem";
let MIOperandInfo = (ops GR32, i8imm, GR32_NOSP, i32imm);
@@ -288,6 +297,8 @@ def tls32addr : ComplexPattern<i32, 4, "SelectTLSADDRAddr",
//===----------------------------------------------------------------------===//
// X86 Instruction Predicate Definitions.
+def HasCMov : Predicate<"Subtarget->hasCMov()">;
+def NoCMov : Predicate<"!Subtarget->hasCMov()">;
def HasMMX : Predicate<"Subtarget->hasMMX()">;
def HasSSE1 : Predicate<"Subtarget->hasSSE1()">;
def HasSSE2 : Predicate<"Subtarget->hasSSE2()">;
@@ -696,30 +707,33 @@ def ENTER : I<0xC8, RawFrm, (outs), (ins i16imm:$len, i8imm:$lvl),
// Tail call stuff.
let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNdi : I<0, Pseudo, (outs),
- (ins i32imm:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
-def TCRETURNri : I<0, Pseudo, (outs),
- (ins GR32:$dst, i32imm:$offset, variable_ops),
- "#TC_RETURN $dst $offset",
- []>;
-
-// FIXME: The should be pseudo instructions that are lowered when going to
-// mcinst.
-let isCall = 1, isBranch = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPd : Ii32<0xE9, RawFrm, (outs),(ins i32imm_pcrel:$dst,variable_ops),
+ let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0,
+ MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7,
+ XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7,
+ XMM8, XMM9, XMM10, XMM11, XMM12, XMM13, XMM14, XMM15, EFLAGS],
+ Uses = [ESP] in {
+ def TCRETURNdi : I<0, Pseudo, (outs),
+ (ins i32imm_pcrel:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ def TCRETURNri : I<0, Pseudo, (outs),
+ (ins GR32_TC:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+ def TCRETURNmi : I<0, Pseudo, (outs),
+ (ins i32mem_TC:$dst, i32imm:$offset, variable_ops),
+ "#TC_RETURN $dst $offset", []>;
+
+ // FIXME: The should be pseudo instructions that are lowered when going to
+ // mcinst.
+ def TAILJMPd : Ii32PCRel<0xE9, RawFrm, (outs),
+ (ins i32imm_pcrel:$dst, variable_ops),
"jmp\t$dst # TAILCALL",
[]>;
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32:$dst, variable_ops),
+ def TAILJMPr : I<0xFF, MRM4r, (outs), (ins GR32_TC:$dst, variable_ops),
"jmp{l}\t{*}$dst # TAILCALL",
[]>;
-let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in
- def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem:$dst, variable_ops),
- "jmp\t{*}$dst # TAILCALL", []>;
+ def TAILJMPm : I<0xFF, MRM4m, (outs), (ins i32mem_TC:$dst, variable_ops),
+ "jmp{l}\t{*}$dst # TAILCALL", []>;
+}
//===----------------------------------------------------------------------===//
// Miscellaneous Instructions...
@@ -1032,6 +1046,22 @@ def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
"mov{l}\t{$src, $dst|$dst, $src}",
[(store GR32:$src, addr:$dst)]>;
+/// Versions of MOV32rr, MOV32rm, and MOV32mr for i32mem_TC and GR32_TC.
+let neverHasSideEffects = 1 in
+def MOV32rr_TC : I<0x89, MRMDestReg, (outs GR32_TC:$dst), (ins GR32_TC:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}", []>;
+
+let mayLoad = 1,
+ canFoldAsLoad = 1, isReMaterializable = 1 in
+def MOV32rm_TC : I<0x8B, MRMSrcMem, (outs GR32_TC:$dst), (ins i32mem_TC:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}",
+ []>;
+
+let mayStore = 1 in
+def MOV32mr_TC : I<0x89, MRMDestMem, (outs), (ins i32mem_TC:$dst, GR32_TC:$src),
+ "mov{l}\t{$src, $dst|$dst, $src}",
+ []>;
+
// Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
// that they can be used for copying and storing h registers, which can't be
// encoded when a REX prefix is present.
@@ -1185,19 +1215,7 @@ let isTwoAddress = 1 in {
// Conditional moves
let Uses = [EFLAGS] in {
-// X86 doesn't have 8-bit conditional moves. Use a customInserter to
-// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
-// however that requires promoting the operands, and can induce additional
-// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
-// clobber EFLAGS, because if one of the operands is zero, the expansion
-// could involve an xor.
-let usesCustomInserter = 1, isTwoAddress = 0, Defs = [EFLAGS] in
-def CMOV_GR8 : I<0, Pseudo,
- (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
- "#CMOV_GR8 PSEUDO!",
- [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
- imm:$cond, EFLAGS))]>;
-
+let Predicates = [HasCMov] in {
let isCommutable = 1 in {
def CMOVB16rr : I<0x42, MRMSrcReg, // if <u, GR16 = GR16
(outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
@@ -1585,6 +1603,49 @@ def CMOVNO32rm : I<0x41, MRMSrcMem, // if !overflow, GR32 = [mem32]
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
X86_COND_NO, EFLAGS))]>,
TB;
+} // Predicates = [HasCMov]
+
+// X86 doesn't have 8-bit conditional moves. Use a customInserter to
+// emit control flow. An alternative to this is to mark i8 SELECT as Promote,
+// however that requires promoting the operands, and can induce additional
+// i8 register pressure. Note that CMOV_GR8 is conservatively considered to
+// clobber EFLAGS, because if one of the operands is zero, the expansion
+// could involve an xor.
+let usesCustomInserter = 1, isTwoAddress = 0, Defs = [EFLAGS] in {
+def CMOV_GR8 : I<0, Pseudo,
+ (outs GR8:$dst), (ins GR8:$src1, GR8:$src2, i8imm:$cond),
+ "#CMOV_GR8 PSEUDO!",
+ [(set GR8:$dst, (X86cmov GR8:$src1, GR8:$src2,
+ imm:$cond, EFLAGS))]>;
+
+let Predicates = [NoCMov] in {
+def CMOV_GR32 : I<0, Pseudo,
+ (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, i8imm:$cond),
+ "#CMOV_GR32* PSEUDO!",
+ [(set GR32:$dst,
+ (X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>;
+def CMOV_GR16 : I<0, Pseudo,
+ (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, i8imm:$cond),
+ "#CMOV_GR16* PSEUDO!",
+ [(set GR16:$dst,
+ (X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>;
+def CMOV_RFP32 : I<0, Pseudo,
+ (outs RFP32:$dst), (ins RFP32:$src1, RFP32:$src2, i8imm:$cond),
+ "#CMOV_RFP32 PSEUDO!",
+ [(set RFP32:$dst, (X86cmov RFP32:$src1, RFP32:$src2, imm:$cond,
+ EFLAGS))]>;
+def CMOV_RFP64 : I<0, Pseudo,
+ (outs RFP64:$dst), (ins RFP64:$src1, RFP64:$src2, i8imm:$cond),
+ "#CMOV_RFP64 PSEUDO!",
+ [(set RFP64:$dst, (X86cmov RFP64:$src1, RFP64:$src2, imm:$cond,
+ EFLAGS))]>;
+def CMOV_RFP80 : I<0, Pseudo,
+ (outs RFP80:$dst), (ins RFP80:$src1, RFP80:$src2, i8imm:$cond),
+ "#CMOV_RFP80 PSEUDO!",
+ [(set RFP80:$dst, (X86cmov RFP80:$src1, RFP80:$src2, imm:$cond,
+ EFLAGS))]>;
+} // Predicates = [NoCMov]
+} // UsesCustomInserter = 1, isTwoAddress = 0, Defs = [EFLAGS]
} // Uses = [EFLAGS]
@@ -4294,14 +4355,21 @@ def : Pat<(store (i32 (X86Wrapper tblockaddress:$src)), addr:$dst),
// Calls
// tailcall stuff
-def : Pat<(X86tcret GR32:$dst, imm:$off),
- (TCRETURNri GR32:$dst, imm:$off)>;
+def : Pat<(X86tcret GR32_TC:$dst, imm:$off),
+ (TCRETURNri GR32_TC:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
+
+def : Pat<(X86tcret (load addr:$dst), imm:$off),
+ (TCRETURNmi addr:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
def : Pat<(X86tcret (i32 tglobaladdr:$dst), imm:$off),
- (TCRETURNdi texternalsym:$dst, imm:$off)>;
+ (TCRETURNdi texternalsym:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
def : Pat<(X86tcret (i32 texternalsym:$dst), imm:$off),
- (TCRETURNdi texternalsym:$dst, imm:$off)>;
+ (TCRETURNdi texternalsym:$dst, imm:$off)>,
+ Requires<[In32BitMode]>;
// Normal calls, with various flavors of addresses.
def : Pat<(X86call (i32 tglobaladdr:$dst)),
diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td
index eea0eb8..e1203e2 100644
--- a/lib/Target/X86/X86InstrMMX.td
+++ b/lib/Target/X86/X86InstrMMX.td
@@ -272,9 +272,9 @@ defm MMX_PSRAD : MMXI_binop_rmi_int<0xE2, 0x72, MRM4r, "psrad",
// Shift up / down and insert zero's.
def : Pat<(v1i64 (X86vshl VR64:$src, (i8 imm:$amt))),
- (MMX_PSLLQri VR64:$src, imm:$amt)>;
+ (MMX_PSLLQri VR64:$src, (GetLo32XForm imm:$amt))>;
def : Pat<(v1i64 (X86vshr VR64:$src, (i8 imm:$amt))),
- (MMX_PSRLQri VR64:$src, imm:$amt)>;
+ (MMX_PSRLQri VR64:$src, (GetLo32XForm imm:$amt))>;
// Comparison Instructions
defm MMX_PCMPEQB : MMXI_binop_rm_int<0x74, "pcmpeqb", int_x86_mmx_pcmpeq_b>;
diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td
index bd6e1b8..18f9e52 100644
--- a/lib/Target/X86/X86InstrSSE.td
+++ b/lib/Target/X86/X86InstrSSE.td
@@ -379,7 +379,7 @@ let Constraints = "$src1 = $dst" in
def MOVSSrr : SSI<0x10, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, FR32:$src2),
"movss\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
+ [(set (v4f32 VR128:$dst),
(movl VR128:$src1, (scalar_to_vector FR32:$src2)))]>;
// Extract the low 32-bit value from one vector and insert it into another.
@@ -1141,7 +1141,7 @@ let Constraints = "$src1 = $dst" in
def MOVSDrr : SDI<0x10, MRMSrcReg,
(outs VR128:$dst), (ins VR128:$src1, FR64:$src2),
"movsd\t{$src2, $dst|$dst, $src2}",
- [(set VR128:$dst,
+ [(set (v2f64 VR128:$dst),
(movl VR128:$src1, (scalar_to_vector FR64:$src2)))]>;
// Extract the low 64-bit value from one vector and insert it into another.
diff --git a/lib/Target/X86/X86JITInfo.cpp b/lib/Target/X86/X86JITInfo.cpp
index d297d24..6f0a8d9 100644
--- a/lib/Target/X86/X86JITInfo.cpp
+++ b/lib/Target/X86/X86JITInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/Function.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/System/Valgrind.h"
#include <cstdlib>
#include <cstring>
using namespace llvm;
@@ -37,6 +38,10 @@ void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
unsigned NewAddr = (intptr_t)New;
unsigned OldAddr = (intptr_t)OldWord;
*OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.
+
+ // X86 doesn't need to invalidate the processor cache, so just invalidate
+ // Valgrind's cache directly.
+ sys::ValgrindDiscardTranslations(Old, 5);
}
@@ -393,8 +398,10 @@ X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
*(intptr_t *)(RetAddr - 0xa) = NewVal;
((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
}
+ sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd);
#else
((unsigned char*)RetAddr)[-1] = 0xE9;
+ sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5);
#endif
}
diff --git a/lib/Target/X86/X86MCAsmInfo.cpp b/lib/Target/X86/X86MCAsmInfo.cpp
index 9498810..1afabc9 100644
--- a/lib/Target/X86/X86MCAsmInfo.cpp
+++ b/lib/Target/X86/X86MCAsmInfo.cpp
@@ -70,7 +70,7 @@ X86MCAsmInfoDarwin::X86MCAsmInfoDarwin(const Triple &Triple) {
ExceptionsType = ExceptionHandling::Dwarf;
}
-X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &Triple) {
+X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &T) {
AsmTransCBE = x86_asm_table;
AssemblerDialect = AsmWriterFlavor;
@@ -89,6 +89,11 @@ X86ELFMCAsmInfo::X86ELFMCAsmInfo(const Triple &Triple) {
// Exceptions handling
ExceptionsType = ExceptionHandling::Dwarf;
+
+ // OpenBSD has buggy support for .quad in 32-bit mode, just split into two
+ // .words.
+ if (T.getOS() == Triple::OpenBSD && T.getArch() == Triple::x86)
+ Data64bitsDirective = 0;
}
MCSection *X86ELFMCAsmInfo::getNonexecutableStackSection(MCContext &Ctx) const {
diff --git a/lib/Target/X86/X86MCTargetExpr.cpp b/lib/Target/X86/X86MCTargetExpr.cpp
deleted file mode 100644
index 17b4fe8..0000000
--- a/lib/Target/X86/X86MCTargetExpr.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-//===- X86MCTargetExpr.cpp - X86 Target Specific MCExpr Implementation ----===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "X86MCTargetExpr.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/MC/MCValue.h"
-#include "llvm/Support/raw_ostream.h"
-using namespace llvm;
-
-X86MCTargetExpr *X86MCTargetExpr::Create(const MCSymbol *Sym, VariantKind K,
- MCContext &Ctx) {
- return new (Ctx) X86MCTargetExpr(Sym, K);
-}
-
-void X86MCTargetExpr::PrintImpl(raw_ostream &OS) const {
- OS << *Sym;
-
- switch (Kind) {
- case Invalid: OS << "@<invalid>"; break;
- case GOT: OS << "@GOT"; break;
- case GOTOFF: OS << "@GOTOFF"; break;
- case GOTPCREL: OS << "@GOTPCREL"; break;
- case GOTTPOFF: OS << "@GOTTPOFF"; break;
- case INDNTPOFF: OS << "@INDNTPOFF"; break;
- case NTPOFF: OS << "@NTPOFF"; break;
- case PLT: OS << "@PLT"; break;
- case TLSGD: OS << "@TLSGD"; break;
- case TPOFF: OS << "@TPOFF"; break;
- }
-}
-
-bool X86MCTargetExpr::EvaluateAsRelocatableImpl(MCValue &Res) const {
- // FIXME: I don't know if this is right, it followed MCSymbolRefExpr.
-
- // Evaluate recursively if this is a variable.
- if (Sym->isVariable())
- return Sym->getValue()->EvaluateAsRelocatable(Res);
-
- Res = MCValue::get(Sym, 0, 0);
- return true;
-}
diff --git a/lib/Target/X86/X86MCTargetExpr.h b/lib/Target/X86/X86MCTargetExpr.h
deleted file mode 100644
index 7de8a5c..0000000
--- a/lib/Target/X86/X86MCTargetExpr.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===- X86MCTargetExpr.h - X86 Target Specific MCExpr -----------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef X86_MCTARGETEXPR_H
-#define X86_MCTARGETEXPR_H
-
-#include "llvm/MC/MCExpr.h"
-
-namespace llvm {
-
-/// X86MCTargetExpr - This class represents symbol variants, like foo@GOT.
-class X86MCTargetExpr : public MCTargetExpr {
-public:
- enum VariantKind {
- Invalid,
- GOT,
- GOTOFF,
- GOTPCREL,
- GOTTPOFF,
- INDNTPOFF,
- NTPOFF,
- PLT,
- TLSGD,
- TPOFF
- };
-private:
- /// Sym - The symbol being referenced.
- const MCSymbol * const Sym;
- /// Kind - The modifier.
- const VariantKind Kind;
-
- X86MCTargetExpr(const MCSymbol *S, VariantKind K) : Sym(S), Kind(K) {}
-public:
- static X86MCTargetExpr *Create(const MCSymbol *Sym, VariantKind K,
- MCContext &Ctx);
-
- void PrintImpl(raw_ostream &OS) const;
- bool EvaluateAsRelocatableImpl(MCValue &Res) const;
-};
-
-} // end namespace llvm
-
-#endif
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index cdb579c..3238cce 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -294,13 +294,20 @@ X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
const unsigned *
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
bool callsEHReturn = false;
+ bool ghcCall = false;
if (MF) {
const MachineFrameInfo *MFI = MF->getFrameInfo();
const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
callsEHReturn = (MMI ? MMI->callsEHReturn() : false);
+ const Function *F = MF->getFunction();
+ ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
}
+ static const unsigned GhcCalleeSavedRegs[] = {
+ 0
+ };
+
static const unsigned CalleeSavedRegs32Bit[] = {
X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
};
@@ -326,7 +333,9 @@ X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
X86::XMM14, X86::XMM15, 0
};
- if (Is64Bit) {
+ if (ghcCall) {
+ return GhcCalleeSavedRegs;
+ } else if (Is64Bit) {
if (IsWin64)
return CalleeSavedRegsWin64;
else
@@ -788,7 +797,7 @@ static int mergeSPUpdates(MachineBasicBlock &MBB,
}
void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
- unsigned LabelId,
+ MCSymbol *Label,
unsigned FramePtr) const {
MachineFrameInfo *MFI = MF.getFrameInfo();
MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
@@ -851,7 +860,7 @@ void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
MachineLocation CSSrc(Reg);
- Moves.push_back(MachineMove(LabelId, CSDst, CSSrc));
+ Moves.push_back(MachineMove(Label, CSDst, CSSrc));
}
}
@@ -929,10 +938,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
std::vector<MachineMove> &Moves = MMI->getFrameMoves();
const TargetData *TD = MF.getTarget().getTargetData();
uint64_t NumBytes = 0;
- int stackGrowth =
- (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
- TargetFrameInfo::StackGrowsUp ?
- TD->getPointerSize() : -TD->getPointerSize());
+ int stackGrowth = -TD->getPointerSize();
if (HasFP) {
// Calculate required stack adjustment.
@@ -953,26 +959,25 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if (needsFrameMoves) {
// Mark the place where EBP/RBP was saved.
- unsigned FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
+ MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel);
// Define the current CFA rule to use the provided offset.
if (StackSize) {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
- Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
} else {
// FIXME: Verify & implement for FP
MachineLocation SPDst(StackPtr);
MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
}
// Change the rule for the FramePtr to be an "offset" rule.
- MachineLocation FPDst(MachineLocation::VirtualFP,
- 2 * stackGrowth);
+ MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
MachineLocation FPSrc(FramePtr);
- Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
}
// Update EBP with the new base value...
@@ -982,13 +987,13 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if (needsFrameMoves) {
// Mark effective beginning of when frame pointer becomes valid.
- unsigned FrameLabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
+ MCSymbol *FrameLabel = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel);
// Define the current CFA to use the EBP/RBP register.
MachineLocation FPDst(FramePtr);
MachineLocation FPSrc(MachineLocation::VirtualFP);
- Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
+ Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
}
// Mark the FramePtr as live-in in every block except the entry.
@@ -1022,15 +1027,15 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if (!HasFP && needsFrameMoves) {
// Mark callee-saved push instruction.
- unsigned LabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId);
+ MCSymbol *Label = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label);
// Define the current CFA rule to use the provided offset.
unsigned Ptr = StackSize ?
MachineLocation::VirtualFP : StackPtr;
MachineLocation SPDst(Ptr);
MachineLocation SPSrc(Ptr, StackOffset);
- Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
StackOffset += stackGrowth;
}
}
@@ -1094,8 +1099,8 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
if ((NumBytes || PushedRegs) && needsFrameMoves) {
// Mark end of stack pointer adjustment.
- unsigned LabelId = MMI->NextLabelID();
- BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId);
+ MCSymbol *Label = MMI->getContext().CreateTempSymbol();
+ BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label);
if (!HasFP && NumBytes) {
// Define the current CFA rule to use the provided offset.
@@ -1103,18 +1108,18 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
MachineLocation SPDst(MachineLocation::VirtualFP);
MachineLocation SPSrc(MachineLocation::VirtualFP,
-StackSize + stackGrowth);
- Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
} else {
// FIXME: Verify & implement for FP
MachineLocation SPDst(StackPtr);
MachineLocation SPSrc(StackPtr, stackGrowth);
- Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
+ Moves.push_back(MachineMove(Label, SPDst, SPSrc));
}
}
// Emit DWARF info specifying the offsets of the callee-saved registers.
if (PushedRegs)
- emitCalleeSavedFrameMoves(MF, LabelId, HasFP ? FramePtr : StackPtr);
+ emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
}
}
@@ -1133,13 +1138,12 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
case X86::RETI:
case X86::TCRETURNdi:
case X86::TCRETURNri:
- case X86::TCRETURNri64:
+ case X86::TCRETURNmi:
case X86::TCRETURNdi64:
+ case X86::TCRETURNri64:
+ case X86::TCRETURNmi64:
case X86::EH_RETURN:
case X86::EH_RETURN64:
- case X86::TAILJMPd:
- case X86::TAILJMPr:
- case X86::TAILJMPm:
break; // These are ok
}
@@ -1224,11 +1228,14 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
StackPtr).addReg(DestAddr.getReg());
} else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
- RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) {
+ RetOpcode == X86::TCRETURNmi ||
+ RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
+ RetOpcode == X86::TCRETURNmi64) {
+ bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
// Tail call return: adjust the stack pointer and jump to callee.
MBBI = prior(MBB.end());
MachineOperand &JumpTarget = MBBI->getOperand(0);
- MachineOperand &StackAdjust = MBBI->getOperand(1);
+ MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
assert(StackAdjust.isImm() && "Expecting immediate value.");
// Adjust stack pointer.
@@ -1248,10 +1255,17 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
}
// Jump to label or value in register.
- if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64) {
- BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)).
+ if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
+ BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
+ ? X86::TAILJMPd : X86::TAILJMPd64)).
addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
JumpTarget.getTargetFlags());
+ } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
+ MachineInstrBuilder MIB =
+ BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
+ ? X86::TAILJMPm : X86::TAILJMPm64));
+ for (unsigned i = 0; i != 5; ++i)
+ MIB.addOperand(MBBI->getOperand(i));
} else if (RetOpcode == X86::TCRETURNri64) {
BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
} else {
diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h
index 12b2f3e..ac96c4c 100644
--- a/lib/Target/X86/X86RegisterInfo.h
+++ b/lib/Target/X86/X86RegisterInfo.h
@@ -149,7 +149,7 @@ public:
void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS = NULL) const;
- void emitCalleeSavedFrameMoves(MachineFunction &MF, unsigned LabelId,
+ void emitCalleeSavedFrameMoves(MachineFunction &MF, MCSymbol *Label,
unsigned FramePtr) const;
void emitPrologue(MachineFunction &MF) const;
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td
index ed2ce6c..76b8f7a 100644
--- a/lib/Target/X86/X86RegisterInfo.td
+++ b/lib/Target/X86/X86RegisterInfo.td
@@ -535,6 +535,13 @@ def GR32_ABCD : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX, EBX]> {
def GR64_ABCD : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX, RBX]> {
let SubRegClassList = [GR8_ABCD_L, GR8_ABCD_H, GR16_ABCD, GR32_ABCD];
}
+def GR32_TC : RegisterClass<"X86", [i32], 32, [EAX, ECX, EDX]> {
+ let SubRegClassList = [GR8, GR8, GR16];
+}
+def GR64_TC : RegisterClass<"X86", [i64], 64, [RAX, RCX, RDX, RSI, RDI,
+ R8, R9, R11]> {
+ let SubRegClassList = [GR8, GR8, GR16, GR32_TC];
+}
// GR8_NOREX - GR8 registers which do not require a REX prefix.
def GR8_NOREX : RegisterClass<"X86", [i8], 8,
diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp
index adef5bc..f907614 100644
--- a/lib/Target/X86/X86Subtarget.cpp
+++ b/lib/Target/X86/X86Subtarget.cpp
@@ -315,9 +315,14 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS,
// If requesting codegen for X86-64, make sure that 64-bit features
// are enabled.
- if (Is64Bit)
+ if (Is64Bit) {
HasX86_64 = true;
+ // All 64-bit cpus have cmov support.
+ HasCMov = true;
+ }
+
+
DEBUG(dbgs() << "Subtarget features: SSELevel " << X86SSELevel
<< ", 3DNowLevel " << X863DNowLevel
<< ", 64bit " << HasX86_64 << "\n");
diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h
index 594a470..50338d3 100644
--- a/lib/Target/X86/X86Subtarget.h
+++ b/lib/Target/X86/X86Subtarget.h
@@ -133,6 +133,7 @@ public:
PICStyles::Style getPICStyle() const { return PICStyle; }
void setPICStyle(PICStyles::Style Style) { PICStyle = Style; }
+ bool hasCMov() const { return HasCMov; }
bool hasMMX() const { return X86SSELevel >= MMX; }
bool hasSSE1() const { return X86SSELevel >= SSE1; }
bool hasSSE2() const { return X86SSELevel >= SSE2; }
diff --git a/lib/Target/X86/X86TargetObjectFile.cpp b/lib/Target/X86/X86TargetObjectFile.cpp
index c80ae19..c15dfbb 100644
--- a/lib/Target/X86/X86TargetObjectFile.cpp
+++ b/lib/Target/X86/X86TargetObjectFile.cpp
@@ -7,11 +7,12 @@
//
//===----------------------------------------------------------------------===//
-#include "X86MCTargetExpr.h"
#include "X86TargetObjectFile.h"
#include "X86TargetMachine.h"
#include "llvm/CodeGen/MachineModuleInfoImpls.h"
#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCSectionMachO.h"
#include "llvm/Target/Mangler.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/Dwarf.h"
@@ -19,28 +20,22 @@ using namespace llvm;
using namespace dwarf;
const MCExpr *X8664_MachoTargetObjectFile::
-getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI,
- unsigned Encoding) const {
+getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const {
// On Darwin/X86-64, we can reference dwarf symbols with foo@GOTPCREL+4, which
// is an indirect pc-relative reference.
if (Encoding & (DW_EH_PE_indirect | DW_EH_PE_pcrel)) {
- SmallString<128> Name;
- Mang->getNameWithPrefix(Name, GV, false);
- const MCSymbol *Sym;
- if (GV->hasPrivateLinkage())
- Sym = getContext().GetOrCreateTemporarySymbol(Name);
- else
- Sym = getContext().GetOrCreateSymbol(Name);
+ const MCSymbol *Sym = Mang->getSymbol(GV);
const MCExpr *Res =
- X86MCTargetExpr::Create(Sym, X86MCTargetExpr::GOTPCREL, getContext());
+ MCSymbolRefExpr::Create(Sym, MCSymbolRefExpr::VK_GOTPCREL, getContext());
const MCExpr *Four = MCConstantExpr::Create(4, getContext());
return MCBinaryExpr::CreateAdd(Res, Four, getContext());
}
return TargetLoweringObjectFileMachO::
- getSymbolForDwarfGlobalReference(GV, Mang, MMI, Encoding);
+ getExprForDwarfGlobalReference(GV, Mang, MMI, Encoding, Streamer);
}
unsigned X8632_ELFTargetObjectFile::getPersonalityEncoding() const {
diff --git a/lib/Target/X86/X86TargetObjectFile.h b/lib/Target/X86/X86TargetObjectFile.h
index 0444417..f2fd49c 100644
--- a/lib/Target/X86/X86TargetObjectFile.h
+++ b/lib/Target/X86/X86TargetObjectFile.h
@@ -17,14 +17,14 @@
namespace llvm {
class X86TargetMachine;
- /// X8664_MachoTargetObjectFile - This TLOF implementation is used for
- /// Darwin/x86-64.
+ /// X8664_MachoTargetObjectFile - This TLOF implementation is used for Darwin
+ /// x86-64.
class X8664_MachoTargetObjectFile : public TargetLoweringObjectFileMachO {
public:
-
virtual const MCExpr *
- getSymbolForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
- MachineModuleInfo *MMI, unsigned Encoding) const;
+ getExprForDwarfGlobalReference(const GlobalValue *GV, Mangler *Mang,
+ MachineModuleInfo *MMI, unsigned Encoding,
+ MCStreamer &Streamer) const;
};
class X8632_ELFTargetObjectFile : public TargetLoweringObjectFileELF {
OpenPOWER on IntegriCloud