summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/patches/patch-r262261-llvm-r199975-sparc.diff
diff options
context:
space:
mode:
authordim <dim@FreeBSD.org>2014-03-26 07:42:43 +0000
committerdim <dim@FreeBSD.org>2014-03-26 07:42:43 +0000
commit45ae227ed48f53447b0000be4c2f1cb142fa5237 (patch)
tree2c3d1790f54e2af0e10eeb88cb26a0d91f029053 /contrib/llvm/patches/patch-r262261-llvm-r199975-sparc.diff
parentfb422e6d310915f9e2641190198698d922f7ef58 (diff)
downloadFreeBSD-src-45ae227ed48f53447b0000be4c2f1cb142fa5237.zip
FreeBSD-src-45ae227ed48f53447b0000be4c2f1cb142fa5237.tar.gz
MFC r263312:
Pull in r196939 from upstream llvm trunk (by Reid Kleckner): Reland "Fix miscompile of MS inline assembly with stack realignment" This re-lands commit r196876, which was reverted in r196879. The tests have been fixed to pass on platforms with a stack alignment larger than 4. Update to clang side tests will land shortly. Pull in r196986 from upstream llvm trunk (by Reid Kleckner): Revert the backend fatal error from r196939 The combination of inline asm, stack realignment, and dynamic allocas turns out to be too common to reject out of hand. ASan inserts empy inline asm fragments and uses aligned allocas. Compiling any trivial function containing a dynamic alloca with ASan is enough to trigger the check. XFAIL the test cases that would be miscompiled and add one that uses the relevant functionality. Pull in r202930 from upstream llvm trunk (by Hans Wennborg): Check for dynamic allocas and inline asm that clobbers sp before building selection dag (PR19012) In X86SelectionDagInfo::EmitTargetCodeForMemcpy we check with MachineFrameInfo to make sure that ESI isn't used as a base pointer register before we choose to emit rep movs (which clobbers esi). The problem is that MachineFrameInfo wouldn't know about dynamic allocas or inline asm that clobbers the stack pointer until SelectionDAGBuilder has encountered them. This patch fixes the problem by checking for such things when building the FunctionLoweringInfo. Differential Revision: http://llvm-reviews.chandlerc.com/D2954 Together, these commits fix the problem encountered in the devel/emacs port on the i386 architecture, where a combination of stack realignment, alloca() and memcpy() could incidentally clobber the %esi register, leading to segfaults in the temacs build-time utility. See also: http://llvm.org/PR18171 and http://llvm.org/PR19012 Reported by: ashish PR: ports/183064 MFC r263313: Pull in r203311 from upstream llvm trunk (by Arnold Schwaighofer): ISel: Make VSELECT selection terminate in cases where the condition type has to be split and the result type widened. When the condition of a vselect has to be split it makes no sense widening the vselect and thereby widening the condition. We end up in an endless loop of widening (vselect result type) and splitting (condition mask type) doing this. Instead, split both the condition and the vselect and widen the result. I ran this over the test suite with i686 and mattr=+sse and saw no regressions. Fixes PR18036. With this fix the original problem case from the graphics/rawtherapee port (posted in http://llvm.org/PR18036 ) now compiles within ~97MB RSS. Reported by: mandree MFC r263320: Add separate patch files for all the customizations we have currently applied to our copy of llvm/clang. These can be applied in alphabetical order to a pristine llvm/clang 3.4 release source tree, to result in the same version used in FreeBSD. This is intended to clearly document all the changes until now, which mostly consist of cherry pickings from the respective upstream trunks, plus a number of hand-written FreeBSD-specific ones. Hopefully those can eventually be cleaned up and sent upstream too.
Diffstat (limited to 'contrib/llvm/patches/patch-r262261-llvm-r199975-sparc.diff')
-rw-r--r--contrib/llvm/patches/patch-r262261-llvm-r199975-sparc.diff344
1 files changed, 344 insertions, 0 deletions
diff --git a/contrib/llvm/patches/patch-r262261-llvm-r199975-sparc.diff b/contrib/llvm/patches/patch-r262261-llvm-r199975-sparc.diff
new file mode 100644
index 0000000..b1dec41
--- /dev/null
+++ b/contrib/llvm/patches/patch-r262261-llvm-r199975-sparc.diff
@@ -0,0 +1,344 @@
+Pull in r199975 from upstream llvm trunk (by Jakob Stoklund Olesen):
+
+ Implement atomicrmw operations in 32 and 64 bits for SPARCv9.
+
+ These all use the compare-and-swap CASA/CASXA instructions.
+
+Introduced here: http://svn.freebsd.org/changeset/base/262261
+
+Index: test/CodeGen/SPARC/atomics.ll
+===================================================================
+--- test/CodeGen/SPARC/atomics.ll
++++ test/CodeGen/SPARC/atomics.ll
+@@ -1,4 +1,4 @@
+-; RUN: llc < %s -march=sparcv9 | FileCheck %s
++; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s
+
+ ; CHECK-LABEL: test_atomic_i32
+ ; CHECK: ld [%o0]
+@@ -61,3 +61,84 @@ entry:
+ %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
+ ret i32 %b
+ }
++
++; CHECK-LABEL: test_load_add_32
++; CHECK: membar
++; CHECK: add
++; CHECK: cas [%o0]
++; CHECK: membar
++define zeroext i32 @test_load_add_32(i32* %p, i32 zeroext %v) {
++entry:
++ %0 = atomicrmw add i32* %p, i32 %v seq_cst
++ ret i32 %0
++}
++
++; CHECK-LABEL: test_load_sub_64
++; CHECK: membar
++; CHECK: sub
++; CHECK: casx [%o0]
++; CHECK: membar
++define zeroext i64 @test_load_sub_64(i64* %p, i64 zeroext %v) {
++entry:
++ %0 = atomicrmw sub i64* %p, i64 %v seq_cst
++ ret i64 %0
++}
++
++; CHECK-LABEL: test_load_xor_32
++; CHECK: membar
++; CHECK: xor
++; CHECK: cas [%o0]
++; CHECK: membar
++define zeroext i32 @test_load_xor_32(i32* %p, i32 zeroext %v) {
++entry:
++ %0 = atomicrmw xor i32* %p, i32 %v seq_cst
++ ret i32 %0
++}
++
++; CHECK-LABEL: test_load_and_32
++; CHECK: membar
++; CHECK: and
++; CHECK-NOT: xor
++; CHECK: cas [%o0]
++; CHECK: membar
++define zeroext i32 @test_load_and_32(i32* %p, i32 zeroext %v) {
++entry:
++ %0 = atomicrmw and i32* %p, i32 %v seq_cst
++ ret i32 %0
++}
++
++; CHECK-LABEL: test_load_nand_32
++; CHECK: membar
++; CHECK: and
++; CHECK: xor
++; CHECK: cas [%o0]
++; CHECK: membar
++define zeroext i32 @test_load_nand_32(i32* %p, i32 zeroext %v) {
++entry:
++ %0 = atomicrmw nand i32* %p, i32 %v seq_cst
++ ret i32 %0
++}
++
++; CHECK-LABEL: test_load_max_64
++; CHECK: membar
++; CHECK: cmp
++; CHECK: movg %xcc
++; CHECK: casx [%o0]
++; CHECK: membar
++define zeroext i64 @test_load_max_64(i64* %p, i64 zeroext %v) {
++entry:
++ %0 = atomicrmw max i64* %p, i64 %v seq_cst
++ ret i64 %0
++}
++
++; CHECK-LABEL: test_load_umin_32
++; CHECK: membar
++; CHECK: cmp
++; CHECK: movleu %icc
++; CHECK: cas [%o0]
++; CHECK: membar
++define zeroext i32 @test_load_umin_32(i32* %p, i32 zeroext %v) {
++entry:
++ %0 = atomicrmw umin i32* %p, i32 %v seq_cst
++ ret i32 %0
++}
+Index: lib/Target/Sparc/SparcInstr64Bit.td
+===================================================================
+--- lib/Target/Sparc/SparcInstr64Bit.td
++++ lib/Target/Sparc/SparcInstr64Bit.td
+@@ -438,6 +438,31 @@ def : Pat<(atomic_store ADDRri:$dst, i64:$val), (S
+
+ } // Predicates = [Is64Bit]
+
++let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1,
++ Defs = [ICC] in
++multiclass AtomicRMW<SDPatternOperator op32, SDPatternOperator op64> {
++
++ def _32 : Pseudo<(outs IntRegs:$rd),
++ (ins ptr_rc:$addr, IntRegs:$rs2), "",
++ [(set i32:$rd, (op32 iPTR:$addr, i32:$rs2))]>;
++
++ let Predicates = [Is64Bit] in
++ def _64 : Pseudo<(outs I64Regs:$rd),
++ (ins ptr_rc:$addr, I64Regs:$rs2), "",
++ [(set i64:$rd, (op64 iPTR:$addr, i64:$rs2))]>;
++}
++
++defm ATOMIC_LOAD_ADD : AtomicRMW<atomic_load_add_32, atomic_load_add_64>;
++defm ATOMIC_LOAD_SUB : AtomicRMW<atomic_load_sub_32, atomic_load_sub_64>;
++defm ATOMIC_LOAD_AND : AtomicRMW<atomic_load_and_32, atomic_load_and_64>;
++defm ATOMIC_LOAD_OR : AtomicRMW<atomic_load_or_32, atomic_load_or_64>;
++defm ATOMIC_LOAD_XOR : AtomicRMW<atomic_load_xor_32, atomic_load_xor_64>;
++defm ATOMIC_LOAD_NAND : AtomicRMW<atomic_load_nand_32, atomic_load_nand_64>;
++defm ATOMIC_LOAD_MIN : AtomicRMW<atomic_load_min_32, atomic_load_min_64>;
++defm ATOMIC_LOAD_MAX : AtomicRMW<atomic_load_max_32, atomic_load_max_64>;
++defm ATOMIC_LOAD_UMIN : AtomicRMW<atomic_load_umin_32, atomic_load_umin_64>;
++defm ATOMIC_LOAD_UMAX : AtomicRMW<atomic_load_umax_32, atomic_load_umax_64>;
++
+ // Global addresses, constant pool entries
+ let Predicates = [Is64Bit] in {
+
+Index: lib/Target/Sparc/SparcISelLowering.cpp
+===================================================================
+--- lib/Target/Sparc/SparcISelLowering.cpp
++++ lib/Target/Sparc/SparcISelLowering.cpp
+@@ -2831,11 +2831,6 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) cons
+ MachineBasicBlock *
+ SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
+ MachineBasicBlock *BB) const {
+- const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
+- unsigned BROpcode;
+- unsigned CC;
+- DebugLoc dl = MI->getDebugLoc();
+- // Figure out the conditional branch opcode to use for this select_cc.
+ switch (MI->getOpcode()) {
+ default: llvm_unreachable("Unknown SELECT_CC!");
+ case SP::SELECT_CC_Int_ICC:
+@@ -2842,17 +2837,64 @@ SparcTargetLowering::EmitInstrWithCustomInserter(M
+ case SP::SELECT_CC_FP_ICC:
+ case SP::SELECT_CC_DFP_ICC:
+ case SP::SELECT_CC_QFP_ICC:
+- BROpcode = SP::BCOND;
+- break;
++ return expandSelectCC(MI, BB, SP::BCOND);
+ case SP::SELECT_CC_Int_FCC:
+ case SP::SELECT_CC_FP_FCC:
+ case SP::SELECT_CC_DFP_FCC:
+ case SP::SELECT_CC_QFP_FCC:
+- BROpcode = SP::FBCOND;
+- break;
++ return expandSelectCC(MI, BB, SP::FBCOND);
++
++ case SP::ATOMIC_LOAD_ADD_32:
++ return expandAtomicRMW(MI, BB, SP::ADDrr);
++ case SP::ATOMIC_LOAD_ADD_64:
++ return expandAtomicRMW(MI, BB, SP::ADDXrr);
++ case SP::ATOMIC_LOAD_SUB_32:
++ return expandAtomicRMW(MI, BB, SP::SUBrr);
++ case SP::ATOMIC_LOAD_SUB_64:
++ return expandAtomicRMW(MI, BB, SP::SUBXrr);
++ case SP::ATOMIC_LOAD_AND_32:
++ return expandAtomicRMW(MI, BB, SP::ANDrr);
++ case SP::ATOMIC_LOAD_AND_64:
++ return expandAtomicRMW(MI, BB, SP::ANDXrr);
++ case SP::ATOMIC_LOAD_OR_32:
++ return expandAtomicRMW(MI, BB, SP::ORrr);
++ case SP::ATOMIC_LOAD_OR_64:
++ return expandAtomicRMW(MI, BB, SP::ORXrr);
++ case SP::ATOMIC_LOAD_XOR_32:
++ return expandAtomicRMW(MI, BB, SP::XORrr);
++ case SP::ATOMIC_LOAD_XOR_64:
++ return expandAtomicRMW(MI, BB, SP::XORXrr);
++ case SP::ATOMIC_LOAD_NAND_32:
++ return expandAtomicRMW(MI, BB, SP::ANDrr);
++ case SP::ATOMIC_LOAD_NAND_64:
++ return expandAtomicRMW(MI, BB, SP::ANDXrr);
++
++ case SP::ATOMIC_LOAD_MAX_32:
++ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
++ case SP::ATOMIC_LOAD_MAX_64:
++ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_G);
++ case SP::ATOMIC_LOAD_MIN_32:
++ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LE);
++ case SP::ATOMIC_LOAD_MIN_64:
++ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LE);
++ case SP::ATOMIC_LOAD_UMAX_32:
++ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_GU);
++ case SP::ATOMIC_LOAD_UMAX_64:
++ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_GU);
++ case SP::ATOMIC_LOAD_UMIN_32:
++ return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_LEU);
++ case SP::ATOMIC_LOAD_UMIN_64:
++ return expandAtomicRMW(MI, BB, SP::MOVXCCrr, SPCC::ICC_LEU);
+ }
++}
+
+- CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
++MachineBasicBlock*
++SparcTargetLowering::expandSelectCC(MachineInstr *MI,
++ MachineBasicBlock *BB,
++ unsigned BROpcode) const {
++ const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
++ DebugLoc dl = MI->getDebugLoc();
++ unsigned CC = (SPCC::CondCodes)MI->getOperand(3).getImm();
+
+ // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
+ // control-flow pattern. The incoming instruction knows the destination vreg
+@@ -2906,6 +2948,100 @@ SparcTargetLowering::EmitInstrWithCustomInserter(M
+ return BB;
+ }
+
++MachineBasicBlock*
++SparcTargetLowering::expandAtomicRMW(MachineInstr *MI,
++ MachineBasicBlock *MBB,
++ unsigned Opcode,
++ unsigned CondCode) const {
++ const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
++ MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
++ DebugLoc DL = MI->getDebugLoc();
++
++ // MI is an atomic read-modify-write instruction of the form:
++ //
++ // rd = atomicrmw<op> addr, rs2
++ //
++ // All three operands are registers.
++ unsigned DestReg = MI->getOperand(0).getReg();
++ unsigned AddrReg = MI->getOperand(1).getReg();
++ unsigned Rs2Reg = MI->getOperand(2).getReg();
++
++ // SelectionDAG has already inserted memory barriers before and after MI, so
++ // we simply have to implement the operatiuon in terms of compare-and-swap.
++ //
++ // %val0 = load %addr
++ // loop:
++ // %val = phi %val0, %dest
++ // %upd = op %val, %rs2
++ // %dest = cas %addr, %upd, %val
++ // cmp %val, %dest
++ // bne loop
++ // done:
++ //
++ bool is64Bit = SP::I64RegsRegClass.hasSubClassEq(MRI.getRegClass(DestReg));
++ const TargetRegisterClass *ValueRC =
++ is64Bit ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
++ unsigned Val0Reg = MRI.createVirtualRegister(ValueRC);
++
++ BuildMI(*MBB, MI, DL, TII.get(is64Bit ? SP::LDXri : SP::LDri), Val0Reg)
++ .addReg(AddrReg).addImm(0);
++
++ // Split the basic block MBB before MI and insert the loop block in the hole.
++ MachineFunction::iterator MFI = MBB;
++ const BasicBlock *LLVM_BB = MBB->getBasicBlock();
++ MachineFunction *MF = MBB->getParent();
++ MachineBasicBlock *LoopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
++ MachineBasicBlock *DoneMBB = MF->CreateMachineBasicBlock(LLVM_BB);
++ ++MFI;
++ MF->insert(MFI, LoopMBB);
++ MF->insert(MFI, DoneMBB);
++
++ // Move MI and following instructions to DoneMBB.
++ DoneMBB->splice(DoneMBB->begin(), MBB, MI, MBB->end());
++ DoneMBB->transferSuccessorsAndUpdatePHIs(MBB);
++
++ // Connect the CFG again.
++ MBB->addSuccessor(LoopMBB);
++ LoopMBB->addSuccessor(LoopMBB);
++ LoopMBB->addSuccessor(DoneMBB);
++
++ // Build the loop block.
++ unsigned ValReg = MRI.createVirtualRegister(ValueRC);
++ unsigned UpdReg = MRI.createVirtualRegister(ValueRC);
++
++ BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
++ .addReg(Val0Reg).addMBB(MBB)
++ .addReg(DestReg).addMBB(LoopMBB);
++
++ if (CondCode) {
++ // This is one of the min/max operations. We need a CMPrr followed by a
++ // MOVXCC/MOVICC.
++ BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
++ BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
++ .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
++ } else {
++ BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
++ .addReg(ValReg).addReg(Rs2Reg);
++ }
++
++ if (MI->getOpcode() == SP::ATOMIC_LOAD_NAND_32 ||
++ MI->getOpcode() == SP::ATOMIC_LOAD_NAND_64) {
++ unsigned TmpReg = UpdReg;
++ UpdReg = MRI.createVirtualRegister(ValueRC);
++ BuildMI(LoopMBB, DL, TII.get(SP::XORri), UpdReg).addReg(TmpReg).addImm(-1);
++ }
++
++ BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::CASXrr : SP::CASrr), DestReg)
++ .addReg(AddrReg).addReg(UpdReg).addReg(ValReg)
++ .setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
++ BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(DestReg);
++ BuildMI(LoopMBB, DL, TII.get(is64Bit ? SP::BPXCC : SP::BCOND))
++ .addMBB(LoopMBB).addImm(SPCC::ICC_NE);
++
++ MI->eraseFromParent();
++ return DoneMBB;
++}
++
+ //===----------------------------------------------------------------------===//
+ // Sparc Inline Assembly Support
+ //===----------------------------------------------------------------------===//
+Index: lib/Target/Sparc/SparcISelLowering.h
+===================================================================
+--- lib/Target/Sparc/SparcISelLowering.h
++++ lib/Target/Sparc/SparcISelLowering.h
+@@ -165,6 +165,13 @@ namespace llvm {
+ virtual void ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue>& Results,
+ SelectionDAG &DAG) const;
++
++ MachineBasicBlock *expandSelectCC(MachineInstr *MI, MachineBasicBlock *BB,
++ unsigned BROpcode) const;
++ MachineBasicBlock *expandAtomicRMW(MachineInstr *MI,
++ MachineBasicBlock *BB,
++ unsigned Opcode,
++ unsigned CondCode = 0) const;
+ };
+ } // end namespace llvm
+
OpenPOWER on IntegriCloud