diff options
Diffstat (limited to 'contrib/llvm/lib/Target/X86/X86TargetMachine.cpp')
-rw-r--r-- | contrib/llvm/lib/Target/X86/X86TargetMachine.cpp | 119 |
1 files changed, 84 insertions, 35 deletions
diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp index 1fc6b20..646cff7 100644 --- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -14,9 +14,10 @@ #include "X86TargetMachine.h" #include "X86.h" #include "X86TargetObjectFile.h" +#include "X86TargetTransformInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/IR/Function.h" -#include "llvm/PassManager.h" +#include "llvm/IR/LegacyPassManager.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/FormattedStream.h" #include "llvm/Support/TargetRegistry.h" @@ -36,10 +37,10 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { return make_unique<TargetLoweringObjectFileMachO>(); } - if (TT.isOSLinux()) - return make_unique<X86LinuxTargetObjectFile>(); + if (TT.isOSLinux() || TT.isOSNaCl()) + return make_unique<X86LinuxNaClTargetObjectFile>(); if (TT.isOSBinFormatELF()) - return make_unique<TargetLoweringObjectFileELF>(); + return make_unique<X86ELFTargetObjectFile>(); if (TT.isKnownWindowsMSVCEnvironment()) return make_unique<X86WindowsTargetObjectFile>(); if (TT.isOSBinFormatCOFF()) @@ -47,19 +48,56 @@ static std::unique_ptr<TargetLoweringObjectFile> createTLOF(const Triple &TT) { llvm_unreachable("unknown subtarget type"); } +static std::string computeDataLayout(const Triple &TT) { + // X86 is little endian + std::string Ret = "e"; + + Ret += DataLayout::getManglingComponent(TT); + // X86 and x32 have 32 bit pointers. + if ((TT.isArch64Bit() && + (TT.getEnvironment() == Triple::GNUX32 || TT.isOSNaCl())) || + !TT.isArch64Bit()) + Ret += "-p:32:32"; + + // Some ABIs align 64 bit integers and doubles to 64 bits, others to 32. + if (TT.isArch64Bit() || TT.isOSWindows() || TT.isOSNaCl()) + Ret += "-i64:64"; + else + Ret += "-f64:32:64"; + + // Some ABIs align long double to 128 bits, others to 32. + if (TT.isOSNaCl()) + ; // No f80 + else if (TT.isArch64Bit() || TT.isOSDarwin()) + Ret += "-f80:128"; + else + Ret += "-f80:32"; + + // The registers can hold 8, 16, 32 or, in x86-64, 64 bits. + if (TT.isArch64Bit()) + Ret += "-n8:16:32:64"; + else + Ret += "-n8:16:32"; + + // The stack is aligned to 32 bits on some ABIs and 128 bits on others. + if (!TT.isArch64Bit() && TT.isOSWindows()) + Ret += "-a:0:32-S32"; + else + Ret += "-S128"; + + return Ret; +} + /// X86TargetMachine ctor - Create an X86 target. /// X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, StringRef CPU, StringRef FS, const TargetOptions &Options, Reloc::Model RM, CodeModel::Model CM, CodeGenOpt::Level OL) - : LLVMTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL), + : LLVMTargetMachine(T, computeDataLayout(Triple(TT)), TT, CPU, FS, Options, + RM, CM, OL), TLOF(createTLOF(Triple(getTargetTriple()))), Subtarget(TT, CPU, FS, *this, Options.StackAlignmentOverride) { - // default to hard float ABI - if (Options.FloatABIType == FloatABI::Default) - this->Options.FloatABIType = FloatABI::Hard; - // Windows stack unwinder gets confused when execution flow "falls through" // after a call to 'noreturn' function. // To prevent that, we emit a trap for 'unreachable' IR instructions. @@ -67,6 +105,13 @@ X86TargetMachine::X86TargetMachine(const Target &T, StringRef TT, StringRef CPU, if (Subtarget.isTargetWin64()) this->Options.TrapUnreachable = true; + // TODO: By default, all reciprocal estimate operations are off because + // that matches the behavior before TargetRecip was added (except for btver2 + // which used subtarget features to enable this type of codegen). + // We should change this to match GCC behavior where everything but + // scalar division estimates are turned on by default with -ffast-math. + this->Options.Reciprocals.setDefaults("all", false, 1); + initAsmInfo(); } @@ -74,11 +119,8 @@ X86TargetMachine::~X86TargetMachine() {} const X86Subtarget * X86TargetMachine::getSubtargetImpl(const Function &F) const { - AttributeSet FnAttrs = F.getAttributes(); - Attribute CPUAttr = - FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-cpu"); - Attribute FSAttr = - FnAttrs.getAttribute(AttributeSet::FunctionIndex, "target-features"); + Attribute CPUAttr = F.getFnAttribute("target-cpu"); + Attribute FSAttr = F.getFnAttribute("target-features"); std::string CPU = !CPUAttr.hasAttribute(Attribute::None) ? CPUAttr.getValueAsString().str() @@ -92,14 +134,15 @@ X86TargetMachine::getSubtargetImpl(const Function &F) const { // function before we can generate a subtarget. We also need to use // it as a key for the subtarget since that can be the only difference // between two functions. - Attribute SFAttr = - FnAttrs.getAttribute(AttributeSet::FunctionIndex, "use-soft-float"); - bool SoftFloat = !SFAttr.hasAttribute(Attribute::None) - ? SFAttr.getValueAsString() == "true" - : Options.UseSoftFloat; - - auto &I = SubtargetMap[CPU + FS + (SoftFloat ? "use-soft-float=true" - : "use-soft-float=false")]; + bool SoftFloat = + F.hasFnAttribute("use-soft-float") && + F.getFnAttribute("use-soft-float").getValueAsString() == "true"; + // If the soft float attribute is set on the function turn on the soft float + // subtarget feature. + if (SoftFloat) + FS += FS.empty() ? "+soft-float" : ",+soft-float"; + + auto &I = SubtargetMap[CPU + FS]; if (!I) { // This needs to be done before we create a new subtarget since any // creation will depend on the TM and the code generation flags on the @@ -120,15 +163,12 @@ UseVZeroUpper("x86-use-vzeroupper", cl::Hidden, cl::init(true)); //===----------------------------------------------------------------------===// -// X86 Analysis Pass Setup +// X86 TTI query. //===----------------------------------------------------------------------===// -void X86TargetMachine::addAnalysisPasses(PassManagerBase &PM) { - // Add first the target-independent BasicTTI pass, then our X86 pass. This - // allows the X86 pass to delegate to the target independent layer when - // appropriate. - PM.add(createBasicTargetTransformInfoPass(this)); - PM.add(createX86TargetTransformInfoPass(this)); +TargetIRAnalysis X86TargetMachine::getTargetIRAnalysis() { + return TargetIRAnalysis( + [this](Function &F) { return TargetTransformInfo(X86TTIImpl(this, F)); }); } @@ -147,16 +187,14 @@ public: return getTM<X86TargetMachine>(); } - const X86Subtarget &getX86Subtarget() const { - return *getX86TargetMachine().getSubtargetImpl(); - } - void addIRPasses() override; bool addInstSelector() override; bool addILPOpts() override; + bool addPreISel() override; void addPreRegAlloc() override; void addPostRegAlloc() override; void addPreEmitPass() override; + void addPreSched2() override; }; } // namespace @@ -175,7 +213,8 @@ bool X86PassConfig::addInstSelector() { addPass(createX86ISelDag(getX86TargetMachine(), getOptLevel())); // For ELF, cleanup any local-dynamic TLS accesses. - if (getX86Subtarget().isTargetELF() && getOptLevel() != CodeGenOpt::None) + if (Triple(TM->getTargetTriple()).isOSBinFormatELF() && + getOptLevel() != CodeGenOpt::None) addPass(createCleanupLocalDynamicTLSPass()); addPass(createX86GlobalBaseRegPass()); @@ -188,6 +227,14 @@ bool X86PassConfig::addILPOpts() { return true; } +bool X86PassConfig::addPreISel() { + // Only add this pass for 32-bit x86 Windows. + Triple TT(TM->getTargetTriple()); + if (TT.isOSWindows() && TT.getArch() == Triple::x86) + addPass(createX86WinEHStatePass()); + return true; +} + void X86PassConfig::addPreRegAlloc() { addPass(createX86CallFrameOptimization()); } @@ -196,8 +243,10 @@ void X86PassConfig::addPostRegAlloc() { addPass(createX86FloatingPointStackifierPass()); } +void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); } + void X86PassConfig::addPreEmitPass() { - if (getOptLevel() != CodeGenOpt::None && getX86Subtarget().hasSSE2()) + if (getOptLevel() != CodeGenOpt::None) addPass(createExecutionDependencyFixPass(&X86::VR128RegClass)); if (UseVZeroUpper) |