summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/patches/patch-r262264-llvm-r200453-sparc.diff
blob: 2c4470ccf34de49bbe263bf783461178acf50534 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
Pull in r200453 from upstream llvm trunk (by Jakob Stoklund Olesen):

  Implement SPARCv9 atomic_swap_64 with a pseudo.

  The SWAP instruction only exists in a 32-bit variant, but the 64-bit
  atomic swap can be implemented in terms of CASX, like the other atomic
  rmw primitives.

Introduced here: http://svn.freebsd.org/changeset/base/262264

Index: lib/Target/Sparc/SparcInstr64Bit.td
===================================================================
--- lib/Target/Sparc/SparcInstr64Bit.td
+++ lib/Target/Sparc/SparcInstr64Bit.td
@@ -463,6 +463,14 @@ defm ATOMIC_LOAD_MAX  : AtomicRMW<atomic_load_max_
 defm ATOMIC_LOAD_UMIN : AtomicRMW<atomic_load_umin_32, atomic_load_umin_64>;
 defm ATOMIC_LOAD_UMAX : AtomicRMW<atomic_load_umax_32, atomic_load_umax_64>;
 
+// There is no 64-bit variant of SWAP, so use a pseudo.
+let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1,
+    Defs = [ICC], Predicates = [Is64Bit] in
+def ATOMIC_SWAP_64 : Pseudo<(outs I64Regs:$rd),
+                            (ins ptr_rc:$addr, I64Regs:$rs2), "",
+                            [(set i64:$rd,
+                                  (atomic_swap_64 iPTR:$addr, i64:$rs2))]>;
+
 // Global addresses, constant pool entries
 let Predicates = [Is64Bit] in {
 
Index: lib/Target/Sparc/SparcISelLowering.cpp
===================================================================
--- lib/Target/Sparc/SparcISelLowering.cpp
+++ lib/Target/Sparc/SparcISelLowering.cpp
@@ -1498,7 +1498,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMac
 
   if (Subtarget->is64Bit()) {
     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
-    setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
+    setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
     setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
   }
@@ -2885,6 +2885,9 @@ SparcTargetLowering::EmitInstrWithCustomInserter(M
   case SP::ATOMIC_LOAD_NAND_64:
     return expandAtomicRMW(MI, BB, SP::ANDXrr);
 
+  case SP::ATOMIC_SWAP_64:
+    return expandAtomicRMW(MI, BB, 0);
+
   case SP::ATOMIC_LOAD_MAX_32:
     return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
   case SP::ATOMIC_LOAD_MAX_64:
@@ -3023,7 +3026,8 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr
 
   // Build the loop block.
   unsigned ValReg = MRI.createVirtualRegister(ValueRC);
-  unsigned UpdReg = MRI.createVirtualRegister(ValueRC);
+  // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP).
+  unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg);
 
   BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
     .addReg(Val0Reg).addMBB(MBB)
@@ -3035,7 +3039,7 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr
     BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
     BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
       .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
-  } else {
+  } else if (Opcode) {
     BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
       .addReg(ValReg).addReg(Rs2Reg);
   }
Index: test/CodeGen/SPARC/atomics.ll
===================================================================
--- test/CodeGen/SPARC/atomics.ll
+++ test/CodeGen/SPARC/atomics.ll
@@ -62,6 +62,15 @@ entry:
   ret i32 %b
 }
 
+; CHECK-LABEL: test_swap_i64
+; CHECK:       casx [%o1],
+
+define i64 @test_swap_i64(i64 %a, i64* %ptr) {
+entry:
+  %b = atomicrmw xchg i64* %ptr, i64 42 monotonic
+  ret i64 %b
+}
+
 ; CHECK-LABEL: test_load_add_32
 ; CHECK: membar
 ; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
OpenPOWER on IntegriCloud