summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/patches/patch-r263312-llvm-r169939-inline-asm-with-realign.diff
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/patches/patch-r263312-llvm-r169939-inline-asm-with-realign.diff')
-rw-r--r--contrib/llvm/patches/patch-r263312-llvm-r169939-inline-asm-with-realign.diff338
1 files changed, 338 insertions, 0 deletions
diff --git a/contrib/llvm/patches/patch-r263312-llvm-r169939-inline-asm-with-realign.diff b/contrib/llvm/patches/patch-r263312-llvm-r169939-inline-asm-with-realign.diff
new file mode 100644
index 0000000..f92dd3a
--- /dev/null
+++ b/contrib/llvm/patches/patch-r263312-llvm-r169939-inline-asm-with-realign.diff
@@ -0,0 +1,338 @@
+Pull in r196939 from upstream llvm trunk (by Reid Kleckner):
+
+ Reland "Fix miscompile of MS inline assembly with stack realignment"
+
+ This re-lands commit r196876, which was reverted in r196879.
+
+ The tests have been fixed to pass on platforms with a stack alignment
+ larger than 4.
+
+ Update to clang side tests will land shortly.
+
+Introduced here: http://svn.freebsd.org/changeset/base/263312
+
+Index: test/CodeGen/X86/inline-asm-stack-realign2.ll
+===================================================================
+--- test/CodeGen/X86/inline-asm-stack-realign2.ll
++++ test/CodeGen/X86/inline-asm-stack-realign2.ll
+@@ -0,0 +1,15 @@
++; RUN: not llc -mtriple=i686-pc-win32 < %s 2>&1 | FileCheck %s
++
++; We don't currently support realigning the stack and adjusting the stack
++; pointer in inline asm. This can even happen in GNU asm.
++
++; CHECK: Stack realignment in presence of dynamic stack adjustments is not supported with inline assembly
++
++define i32 @foo() {
++entry:
++ %r = alloca i32, align 16
++ store i32 -1, i32* %r, align 16
++ call void asm sideeffect "push %esi\0A\09xor %esi, %esi\0A\09mov %esi, $0\0A\09pop %esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
++ %0 = load i32* %r, align 16
++ ret i32 %0
++}
+Index: test/CodeGen/X86/inline-asm-stack-realign.ll
+===================================================================
+--- test/CodeGen/X86/inline-asm-stack-realign.ll
++++ test/CodeGen/X86/inline-asm-stack-realign.ll
+@@ -0,0 +1,16 @@
++; RUN: not llc -mtriple=i686-pc-win32 < %s 2>&1 | FileCheck %s
++
++; We don't currently support realigning the stack and adjusting the stack
++; pointer in inline asm. This commonly happens in MS inline assembly using
++; push and pop.
++
++; CHECK: Stack realignment in presence of dynamic stack adjustments is not supported with inline assembly
++
++define i32 @foo() {
++entry:
++ %r = alloca i32, align 16
++ store i32 -1, i32* %r, align 16
++ call void asm sideeffect inteldialect "push esi\0A\09xor esi, esi\0A\09mov dword ptr $0, esi\0A\09pop esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r)
++ %0 = load i32* %r, align 16
++ ret i32 %0
++}
+Index: test/CodeGen/X86/ms-inline-asm.ll
+===================================================================
+--- test/CodeGen/X86/ms-inline-asm.ll
++++ test/CodeGen/X86/ms-inline-asm.ll
+@@ -5,7 +5,6 @@ entry:
+ %0 = tail call i32 asm sideeffect inteldialect "mov eax, $1\0A\09mov $0, eax", "=r,r,~{eax},~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind
+ ret i32 %0
+ ; CHECK: t1
+-; CHECK: movl %esp, %ebp
+ ; CHECK: {{## InlineAsm Start|#APP}}
+ ; CHECK: .intel_syntax
+ ; CHECK: mov eax, ecx
+@@ -19,7 +18,6 @@ entry:
+ call void asm sideeffect inteldialect "mov eax, $$1", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind
+ ret void
+ ; CHECK: t2
+-; CHECK: movl %esp, %ebp
+ ; CHECK: {{## InlineAsm Start|#APP}}
+ ; CHECK: .intel_syntax
+ ; CHECK: mov eax, 1
+@@ -34,7 +32,6 @@ entry:
+ call void asm sideeffect inteldialect "mov eax, DWORD PTR [$0]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %V.addr) nounwind
+ ret void
+ ; CHECK: t3
+-; CHECK: movl %esp, %ebp
+ ; CHECK: {{## InlineAsm Start|#APP}}
+ ; CHECK: .intel_syntax
+ ; CHECK: mov eax, DWORD PTR {{[[esp]}}
+@@ -56,7 +53,6 @@ entry:
+ %0 = load i32* %b1, align 4
+ ret i32 %0
+ ; CHECK: t18
+-; CHECK: movl %esp, %ebp
+ ; CHECK: {{## InlineAsm Start|#APP}}
+ ; CHECK: .intel_syntax
+ ; CHECK: lea ebx, foo
+@@ -76,7 +72,6 @@ entry:
+ call void asm sideeffect inteldialect "call $0", "r,~{dirflag},~{fpsr},~{flags}"(void ()* @t19_helper) nounwind
+ ret void
+ ; CHECK-LABEL: t19:
+-; CHECK: movl %esp, %ebp
+ ; CHECK: movl ${{_?}}t19_helper, %eax
+ ; CHECK: {{## InlineAsm Start|#APP}}
+ ; CHECK: .intel_syntax
+@@ -95,7 +90,6 @@ entry:
+ %0 = load i32** %res, align 4
+ ret i32* %0
+ ; CHECK-LABEL: t30:
+-; CHECK: movl %esp, %ebp
+ ; CHECK: {{## InlineAsm Start|#APP}}
+ ; CHECK: .intel_syntax
+ ; CHECK: lea edi, dword ptr [{{_?}}results]
+@@ -103,8 +97,31 @@ entry:
+ ; CHECK: {{## InlineAsm End|#NO_APP}}
+ ; CHECK: {{## InlineAsm Start|#APP}}
+ ; CHECK: .intel_syntax
+-; CHECK: mov dword ptr [esi], edi
++; CHECK: mov dword ptr [esp], edi
+ ; CHECK: .att_syntax
+ ; CHECK: {{## InlineAsm End|#NO_APP}}
+-; CHECK: movl (%esi), %eax
++; CHECK: movl (%esp), %eax
+ }
++
++; Stack realignment plus MS inline asm that does *not* adjust the stack is no
++; longer an error.
++
++define i32 @t31() {
++entry:
++ %val = alloca i32, align 64
++ store i32 -1, i32* %val, align 64
++ call void asm sideeffect inteldialect "mov dword ptr $0, esp", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %val) #1
++ %sp = load i32* %val, align 64
++ ret i32 %sp
++; CHECK-LABEL: t31:
++; CHECK: pushl %ebp
++; CHECK: movl %esp, %ebp
++; CHECK: andl $-64, %esp
++; CHECK: {{## InlineAsm Start|#APP}}
++; CHECK: .intel_syntax
++; CHECK: mov dword ptr [esp], esp
++; CHECK: .att_syntax
++; CHECK: {{## InlineAsm End|#NO_APP}}
++; CHECK: movl (%esp), %eax
++; CHECK: ret
++}
+Index: include/llvm/CodeGen/MachineFrameInfo.h
+===================================================================
+--- include/llvm/CodeGen/MachineFrameInfo.h
++++ include/llvm/CodeGen/MachineFrameInfo.h
+@@ -223,6 +223,10 @@ class MachineFrameInfo {
+ /// Whether the "realign-stack" option is on.
+ bool RealignOption;
+
++ /// True if the function includes inline assembly that adjusts the stack
++ /// pointer.
++ bool HasInlineAsmWithSPAdjust;
++
+ const TargetFrameLowering *getFrameLowering() const;
+ public:
+ explicit MachineFrameInfo(const TargetMachine &TM, bool RealignOpt)
+@@ -451,6 +455,10 @@ class MachineFrameInfo {
+ bool hasCalls() const { return HasCalls; }
+ void setHasCalls(bool V) { HasCalls = V; }
+
++ /// Returns true if the function contains any stack-adjusting inline assembly.
++ bool hasInlineAsmWithSPAdjust() const { return HasInlineAsmWithSPAdjust; }
++ void setHasInlineAsmWithSPAdjust(bool B) { HasInlineAsmWithSPAdjust = B; }
++
+ /// getMaxCallFrameSize - Return the maximum size of a call frame that must be
+ /// allocated for an outgoing function call. This is only available if
+ /// CallFrameSetup/Destroy pseudo instructions are used by the target, and
+Index: include/llvm/CodeGen/MachineFunction.h
+===================================================================
+--- include/llvm/CodeGen/MachineFunction.h
++++ include/llvm/CodeGen/MachineFunction.h
+@@ -131,8 +131,8 @@ class MachineFunction {
+ /// about the control flow of such functions.
+ bool ExposesReturnsTwice;
+
+- /// True if the function includes MS-style inline assembly.
+- bool HasMSInlineAsm;
++ /// True if the function includes any inline assembly.
++ bool HasInlineAsm;
+
+ MachineFunction(const MachineFunction &) LLVM_DELETED_FUNCTION;
+ void operator=(const MachineFunction&) LLVM_DELETED_FUNCTION;
+@@ -218,15 +218,14 @@ class MachineFunction {
+ ExposesReturnsTwice = B;
+ }
+
+- /// Returns true if the function contains any MS-style inline assembly.
+- bool hasMSInlineAsm() const {
+- return HasMSInlineAsm;
++ /// Returns true if the function contains any inline assembly.
++ bool hasInlineAsm() const {
++ return HasInlineAsm;
+ }
+
+- /// Set a flag that indicates that the function contains MS-style inline
+- /// assembly.
+- void setHasMSInlineAsm(bool B) {
+- HasMSInlineAsm = B;
++ /// Set a flag that indicates that the function contains inline assembly.
++ void setHasInlineAsm(bool B) {
++ HasInlineAsm = B;
+ }
+
+ /// getInfo - Keep track of various per-function pieces of information for
+Index: lib/Target/X86/X86FrameLowering.cpp
+===================================================================
+--- lib/Target/X86/X86FrameLowering.cpp
++++ lib/Target/X86/X86FrameLowering.cpp
+@@ -50,7 +50,7 @@ bool X86FrameLowering::hasFP(const MachineFunction
+ return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ RegInfo->needsStackRealignment(MF) ||
+ MFI->hasVarSizedObjects() ||
+- MFI->isFrameAddressTaken() || MF.hasMSInlineAsm() ||
++ MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() ||
+ MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
+ MMI.callsUnwindInit() || MMI.callsEHReturn());
+ }
+Index: lib/Target/X86/X86RegisterInfo.cpp
+===================================================================
+--- lib/Target/X86/X86RegisterInfo.cpp
++++ lib/Target/X86/X86RegisterInfo.cpp
+@@ -347,6 +347,12 @@ BitVector X86RegisterInfo::getReservedRegs(const M
+ "Stack realignment in presence of dynamic allocas is not supported with"
+ "this calling convention.");
+
++ // FIXME: Do a proper analysis of the inline asm to see if it actually
++ // conflicts with the base register we chose.
++ if (MF.hasInlineAsm())
++ report_fatal_error("Stack realignment in presence of dynamic stack "
++ "adjustments is not supported with inline assembly.");
++
+ for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true);
+ I.isValid(); ++I)
+ Reserved.set(*I);
+@@ -403,18 +409,15 @@ bool X86RegisterInfo::hasBasePointer(const Machine
+ if (!EnableBasePointer)
+ return false;
+
+- // When we need stack realignment and there are dynamic allocas, we can't
+- // reference off of the stack pointer, so we reserve a base pointer.
+- //
+- // This is also true if the function contain MS-style inline assembly. We
+- // do this because if any stack changes occur in the inline assembly, e.g.,
+- // "pusha", then any C local variable or C argument references in the
+- // inline assembly will be wrong because the SP is not properly tracked.
+- if ((needsStackRealignment(MF) && MFI->hasVarSizedObjects()) ||
+- MF.hasMSInlineAsm())
+- return true;
+-
+- return false;
++ // When we need stack realignment, we can't address the stack from the frame
++ // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
++ // can't address variables from the stack pointer. MS inline asm can
++ // reference locals while also adjusting the stack pointer. When we can't
++ // use both the SP and the FP, we need a separate base pointer register.
++ bool CantUseFP = needsStackRealignment(MF);
++ bool CantUseSP =
++ MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust();
++ return CantUseFP && CantUseSP;
+ }
+
+ bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
+Index: lib/MC/MCParser/AsmParser.cpp
+===================================================================
+--- lib/MC/MCParser/AsmParser.cpp
++++ lib/MC/MCParser/AsmParser.cpp
+@@ -4192,6 +4192,11 @@ bool AsmParser::parseMSInlineAsm(
+ AsmStrRewrites.push_back(AsmRewrite(AOK_Input, Start, SymName.size()));
+ }
+ }
++
++ // Consider implicit defs to be clobbers. Think of cpuid and push.
++ const uint16_t *ImpDefs = Desc.getImplicitDefs();
++ for (unsigned I = 0, E = Desc.getNumImplicitDefs(); I != E; ++I)
++ ClobberRegs.push_back(ImpDefs[I]);
+ }
+
+ // Set the number of Outputs and Inputs.
+Index: lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+===================================================================
+--- lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
++++ lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+@@ -851,12 +851,20 @@ void RegsForValue::AddInlineAsmOperands(unsigned C
+ SDValue Res = DAG.getTargetConstant(Flag, MVT::i32);
+ Ops.push_back(Res);
+
++ unsigned SP = TLI.getStackPointerRegisterToSaveRestore();
+ for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
+ unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]);
+ MVT RegisterVT = RegVTs[Value];
+ for (unsigned i = 0; i != NumRegs; ++i) {
+ assert(Reg < Regs.size() && "Mismatch in # registers expected");
+- Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT));
++ unsigned TheReg = Regs[Reg++];
++ Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
++
++ // Notice if we clobbered the stack pointer. Yes, inline asm can do this.
++ if (TheReg == SP && Code == InlineAsm::Kind_Clobber) {
++ MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
++ MFI->setHasInlineAsmWithSPAdjust(true);
++ }
+ }
+ }
+ }
+Index: lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+===================================================================
+--- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
++++ lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+@@ -428,7 +428,9 @@ bool SelectionDAGISel::runOnMachineFunction(Machin
+
+ SDB->init(GFI, *AA, LibInfo);
+
+- MF->setHasMSInlineAsm(false);
++ MF->setHasInlineAsm(false);
++ MF->getFrameInfo()->setHasInlineAsmWithSPAdjust(false);
++
+ SelectAllBasicBlocks(Fn);
+
+ // If the first basic block in the function has live ins that need to be
+@@ -511,7 +513,7 @@ bool SelectionDAGISel::runOnMachineFunction(Machin
+ for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); I != E;
+ ++I) {
+
+- if (MFI->hasCalls() && MF->hasMSInlineAsm())
++ if (MFI->hasCalls() && MF->hasInlineAsm())
+ break;
+
+ const MachineBasicBlock *MBB = I;
+@@ -522,8 +524,8 @@ bool SelectionDAGISel::runOnMachineFunction(Machin
+ II->isStackAligningInlineAsm()) {
+ MFI->setHasCalls(true);
+ }
+- if (II->isMSInlineAsm()) {
+- MF->setHasMSInlineAsm(true);
++ if (II->isInlineAsm()) {
++ MF->setHasInlineAsm(true);
+ }
+ }
+ }
OpenPOWER on IntegriCloud