diff options
author | dim <dim@FreeBSD.org> | 2014-03-26 07:42:43 +0000 |
---|---|---|
committer | dim <dim@FreeBSD.org> | 2014-03-26 07:42:43 +0000 |
commit | 45ae227ed48f53447b0000be4c2f1cb142fa5237 (patch) | |
tree | 2c3d1790f54e2af0e10eeb88cb26a0d91f029053 /contrib/llvm/patches/patch-r262261-llvm-r198286-sparc.diff | |
parent | fb422e6d310915f9e2641190198698d922f7ef58 (diff) | |
download | FreeBSD-src-45ae227ed48f53447b0000be4c2f1cb142fa5237.zip FreeBSD-src-45ae227ed48f53447b0000be4c2f1cb142fa5237.tar.gz |
MFC r263312:
Pull in r196939 from upstream llvm trunk (by Reid Kleckner):
Reland "Fix miscompile of MS inline assembly with stack realignment"
This re-lands commit r196876, which was reverted in r196879.
The tests have been fixed to pass on platforms with a stack alignment
larger than 4.
Update to clang side tests will land shortly.
Pull in r196986 from upstream llvm trunk (by Reid Kleckner):
Revert the backend fatal error from r196939
The combination of inline asm, stack realignment, and dynamic allocas
turns out to be too common to reject out of hand.
ASan inserts empy inline asm fragments and uses aligned allocas.
Compiling any trivial function containing a dynamic alloca with ASan is
enough to trigger the check.
XFAIL the test cases that would be miscompiled and add one that uses the
relevant functionality.
Pull in r202930 from upstream llvm trunk (by Hans Wennborg):
Check for dynamic allocas and inline asm that clobbers sp before building
selection dag (PR19012)
In X86SelectionDagInfo::EmitTargetCodeForMemcpy we check with MachineFrameInfo
to make sure that ESI isn't used as a base pointer register before we choose to
emit rep movs (which clobbers esi).
The problem is that MachineFrameInfo wouldn't know about dynamic allocas or
inline asm that clobbers the stack pointer until SelectionDAGBuilder has
encountered them.
This patch fixes the problem by checking for such things when building the
FunctionLoweringInfo.
Differential Revision: http://llvm-reviews.chandlerc.com/D2954
Together, these commits fix the problem encountered in the devel/emacs
port on the i386 architecture, where a combination of stack realignment,
alloca() and memcpy() could incidentally clobber the %esi register,
leading to segfaults in the temacs build-time utility.
See also: http://llvm.org/PR18171 and http://llvm.org/PR19012
Reported by: ashish
PR: ports/183064
MFC r263313:
Pull in r203311 from upstream llvm trunk (by Arnold Schwaighofer):
ISel: Make VSELECT selection terminate in cases where the condition type has to
be split and the result type widened.
When the condition of a vselect has to be split it makes no sense widening the
vselect and thereby widening the condition. We end up in an endless loop of
widening (vselect result type) and splitting (condition mask type) doing this.
Instead, split both the condition and the vselect and widen the result.
I ran this over the test suite with i686 and mattr=+sse and saw no regressions.
Fixes PR18036.
With this fix the original problem case from the graphics/rawtherapee
port (posted in http://llvm.org/PR18036 ) now compiles within ~97MB RSS.
Reported by: mandree
MFC r263320:
Add separate patch files for all the customizations we have currently
applied to our copy of llvm/clang. These can be applied in alphabetical
order to a pristine llvm/clang 3.4 release source tree, to result in the
same version used in FreeBSD.
This is intended to clearly document all the changes until now, which
mostly consist of cherry pickings from the respective upstream trunks,
plus a number of hand-written FreeBSD-specific ones. Hopefully those
can eventually be cleaned up and sent upstream too.
Diffstat (limited to 'contrib/llvm/patches/patch-r262261-llvm-r198286-sparc.diff')
-rw-r--r-- | contrib/llvm/patches/patch-r262261-llvm-r198286-sparc.diff | 231 |
1 files changed, 231 insertions, 0 deletions
diff --git a/contrib/llvm/patches/patch-r262261-llvm-r198286-sparc.diff b/contrib/llvm/patches/patch-r262261-llvm-r198286-sparc.diff new file mode 100644 index 0000000..041e263 --- /dev/null +++ b/contrib/llvm/patches/patch-r262261-llvm-r198286-sparc.diff @@ -0,0 +1,231 @@ +Pull in r198286 from upstream llvm trunk (by Venkatraman Govindaraju): + + [Sparc] Handle atomic loads/stores in sparc backend. + +Introduced here: http://svn.freebsd.org/changeset/base/262261 + +Index: lib/Target/Sparc/SparcInstrInfo.td +=================================================================== +--- lib/Target/Sparc/SparcInstrInfo.td ++++ lib/Target/Sparc/SparcInstrInfo.td +@@ -975,6 +975,33 @@ let rs1 = 0 in + def : Pat<(ctpop i32:$src), + (POPCrr (SRLri $src, 0))>; + ++// Atomic swap. ++let hasSideEffects =1, rd = 0, rs1 = 0b01111, rs2 = 0 in ++ def STBAR : F3_1<2, 0b101000, (outs), (ins), "stbar", []>; ++ ++let Predicates = [HasV9], hasSideEffects = 1, rd = 0, rs1 = 0b01111 in ++ def MEMBARi : F3_2<2, 0b101000, (outs), (ins i32imm:$simm13), ++ "membar $simm13", []>; ++ ++let Constraints = "$val = $rd" in { ++ def SWAPrr : F3_1<3, 0b001111, ++ (outs IntRegs:$rd), (ins IntRegs:$val, MEMrr:$addr), ++ "swap [$addr], $rd", ++ [(set i32:$rd, (atomic_swap_32 ADDRrr:$addr, i32:$val))]>; ++ def SWAPri : F3_2<3, 0b001111, ++ (outs IntRegs:$rd), (ins IntRegs:$val, MEMri:$addr), ++ "swap [$addr], $rd", ++ [(set i32:$rd, (atomic_swap_32 ADDRri:$addr, i32:$val))]>; ++} ++ ++let Predicates = [HasV9], Constraints = "$swap = $rd" in ++ def CASrr: F3_1<3, 0b111100, ++ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2, ++ IntRegs:$swap), ++ "cas [$rs1], $rs2, $rd", ++ [(set i32:$rd, ++ (atomic_cmp_swap iPTR:$rs1, i32:$rs2, i32:$swap))]>; ++ + //===----------------------------------------------------------------------===// + // Non-Instruction Patterns + //===----------------------------------------------------------------------===// +@@ -1036,4 +1063,17 @@ def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri + def : Pat<(store (i32 0), ADDRrr:$dst), (STrr ADDRrr:$dst, (i32 G0))>; + def : Pat<(store (i32 0), ADDRri:$dst), (STri ADDRri:$dst, (i32 G0))>; + ++// store bar for all atomic_fence in V8. ++let Predicates = [HasNoV9] in ++ def : Pat<(atomic_fence imm, imm), (STBAR)>; ++ ++// atomic_load_32 addr -> load addr ++def : Pat<(i32 (atomic_load ADDRrr:$src)), (LDrr ADDRrr:$src)>; ++def : Pat<(i32 (atomic_load ADDRri:$src)), (LDri ADDRri:$src)>; ++ ++// atomic_store_32 val, addr -> store val, addr ++def : Pat<(atomic_store ADDRrr:$dst, i32:$val), (STrr ADDRrr:$dst, $val)>; ++def : Pat<(atomic_store ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>; ++ ++ + include "SparcInstr64Bit.td" +Index: lib/Target/Sparc/SparcISelLowering.cpp +=================================================================== +--- lib/Target/Sparc/SparcISelLowering.cpp ++++ lib/Target/Sparc/SparcISelLowering.cpp +@@ -1472,10 +1472,30 @@ SparcTargetLowering::SparcTargetLowering(TargetMac + setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); + } + +- // FIXME: There are instructions available for ATOMIC_FENCE +- // on SparcV8 and later. +- setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand); ++ // ATOMICs. ++ // FIXME: We insert fences for each atomics and generate sub-optimal code ++ // for PSO/TSO. Also, implement other atomicrmw operations. + ++ setInsertFencesForAtomic(true); ++ ++ setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal); ++ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, ++ (Subtarget->isV9() ? Legal: Expand)); ++ ++ ++ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal); ++ ++ // Custom Lower Atomic LOAD/STORE ++ setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom); ++ setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom); ++ ++ if (Subtarget->is64Bit()) { ++ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal); ++ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand); ++ setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom); ++ setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom); ++ } ++ + if (!Subtarget->isV9()) { + // SparcV8 does not have FNEGD and FABSD. + setOperationAction(ISD::FNEG, MVT::f64, Custom); +@@ -2723,6 +2743,16 @@ static SDValue LowerUMULO_SMULO(SDValue Op, Select + return DAG.getMergeValues(Ops, 2, dl); + } + ++static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { ++ // Monotonic load/stores are legal. ++ if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic) ++ return Op; ++ ++ // Otherwise, expand with a fence. ++ return SDValue(); ++} ++ ++ + SDValue SparcTargetLowering:: + LowerOperation(SDValue Op, SelectionDAG &DAG) const { + +@@ -2778,6 +2808,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) cons + case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); + case ISD::UMULO: + case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this); ++ case ISD::ATOMIC_LOAD: ++ case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG); + } + } + +Index: lib/Target/Sparc/SparcInstr64Bit.td +=================================================================== +--- lib/Target/Sparc/SparcInstr64Bit.td ++++ lib/Target/Sparc/SparcInstr64Bit.td +@@ -415,6 +415,32 @@ def SETHIXi : F2_1<0b100, + "sethi $imm22, $rd", + [(set i64:$rd, SETHIimm:$imm22)]>; + } ++ ++// ATOMICS. ++let Predicates = [Is64Bit], Constraints = "$swap = $rd" in { ++ def CASXrr: F3_1<3, 0b111110, ++ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2, ++ I64Regs:$swap), ++ "casx [$rs1], $rs2, $rd", ++ [(set i64:$rd, ++ (atomic_cmp_swap i64:$rs1, i64:$rs2, i64:$swap))]>; ++ ++} // Predicates = [Is64Bit], Constraints = ... ++ ++let Predicates = [Is64Bit] in { ++ ++def : Pat<(atomic_fence imm, imm), (MEMBARi 0xf)>; ++ ++// atomic_load_64 addr -> load addr ++def : Pat<(i64 (atomic_load ADDRrr:$src)), (LDXrr ADDRrr:$src)>; ++def : Pat<(i64 (atomic_load ADDRri:$src)), (LDXri ADDRri:$src)>; ++ ++// atomic_store_64 val, addr -> store val, addr ++def : Pat<(atomic_store ADDRrr:$dst, i64:$val), (STXrr ADDRrr:$dst, $val)>; ++def : Pat<(atomic_store ADDRri:$dst, i64:$val), (STXri ADDRri:$dst, $val)>; ++ ++} // Predicates = [Is64Bit] ++ + // Global addresses, constant pool entries + let Predicates = [Is64Bit] in { + +Index: test/CodeGen/SPARC/atomics.ll +=================================================================== +--- test/CodeGen/SPARC/atomics.ll ++++ test/CodeGen/SPARC/atomics.ll +@@ -0,0 +1,63 @@ ++; RUN: llc < %s -march=sparcv9 | FileCheck %s ++ ++; CHECK-LABEL: test_atomic_i32 ++; CHECK: ld [%o0] ++; CHECK: membar ++; CHECK: ld [%o1] ++; CHECK: membar ++; CHECK: membar ++; CHECK: st {{.+}}, [%o2] ++define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) { ++entry: ++ %0 = load atomic i32* %ptr1 acquire, align 8 ++ %1 = load atomic i32* %ptr2 acquire, align 8 ++ %2 = add i32 %0, %1 ++ store atomic i32 %2, i32* %ptr3 release, align 8 ++ ret i32 %2 ++} ++ ++; CHECK-LABEL: test_atomic_i64 ++; CHECK: ldx [%o0] ++; CHECK: membar ++; CHECK: ldx [%o1] ++; CHECK: membar ++; CHECK: membar ++; CHECK: stx {{.+}}, [%o2] ++define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) { ++entry: ++ %0 = load atomic i64* %ptr1 acquire, align 8 ++ %1 = load atomic i64* %ptr2 acquire, align 8 ++ %2 = add i64 %0, %1 ++ store atomic i64 %2, i64* %ptr3 release, align 8 ++ ret i64 %2 ++} ++ ++; CHECK-LABEL: test_cmpxchg_i32 ++; CHECK: or %g0, 123, [[R:%[gilo][0-7]]] ++; CHECK: cas [%o1], %o0, [[R]] ++ ++define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) { ++entry: ++ %b = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic ++ ret i32 %b ++} ++ ++; CHECK-LABEL: test_cmpxchg_i64 ++; CHECK: or %g0, 123, [[R:%[gilo][0-7]]] ++; CHECK: casx [%o1], %o0, [[R]] ++ ++define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) { ++entry: ++ %b = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic ++ ret i64 %b ++} ++ ++; CHECK-LABEL: test_swap_i32 ++; CHECK: or %g0, 42, [[R:%[gilo][0-7]]] ++; CHECK: swap [%o1], [[R]] ++ ++define i32 @test_swap_i32(i32 %a, i32* %ptr) { ++entry: ++ %b = atomicrmw xchg i32* %ptr, i32 42 monotonic ++ ret i32 %b ++} |