patch-r262264-llvm-r200453-sparc.diff revision 269012
1Pull in r200453 from upstream llvm trunk (by Jakob Stoklund Olesen):
2
3  Implement SPARCv9 atomic_swap_64 with a pseudo.
4
5  The SWAP instruction only exists in a 32-bit variant, but the 64-bit
6  atomic swap can be implemented in terms of CASX, like the other atomic
7  rmw primitives.
8
9Introduced here: http://svnweb.freebsd.org/changeset/base/262264
10
11Index: lib/Target/Sparc/SparcInstr64Bit.td
12===================================================================
13--- lib/Target/Sparc/SparcInstr64Bit.td
14+++ lib/Target/Sparc/SparcInstr64Bit.td
15@@ -463,6 +463,14 @@ defm ATOMIC_LOAD_MAX  : AtomicRMW<atomic_load_max_
16 defm ATOMIC_LOAD_UMIN : AtomicRMW<atomic_load_umin_32, atomic_load_umin_64>;
17 defm ATOMIC_LOAD_UMAX : AtomicRMW<atomic_load_umax_32, atomic_load_umax_64>;
18 
19+// There is no 64-bit variant of SWAP, so use a pseudo.
20+let usesCustomInserter = 1, hasCtrlDep = 1, mayLoad = 1, mayStore = 1,
21+    Defs = [ICC], Predicates = [Is64Bit] in
22+def ATOMIC_SWAP_64 : Pseudo<(outs I64Regs:$rd),
23+                            (ins ptr_rc:$addr, I64Regs:$rs2), "",
24+                            [(set i64:$rd,
25+                                  (atomic_swap_64 iPTR:$addr, i64:$rs2))]>;
26+
27 // Global addresses, constant pool entries
28 let Predicates = [Is64Bit] in {
29 
30Index: lib/Target/Sparc/SparcISelLowering.cpp
31===================================================================
32--- lib/Target/Sparc/SparcISelLowering.cpp
33+++ lib/Target/Sparc/SparcISelLowering.cpp
34@@ -1498,7 +1498,7 @@ SparcTargetLowering::SparcTargetLowering(TargetMac
35 
36   if (Subtarget->is64Bit()) {
37     setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
38-    setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
39+    setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
40     setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
41     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
42   }
43@@ -2885,6 +2885,9 @@ SparcTargetLowering::EmitInstrWithCustomInserter(M
44   case SP::ATOMIC_LOAD_NAND_64:
45     return expandAtomicRMW(MI, BB, SP::ANDXrr);
46 
47+  case SP::ATOMIC_SWAP_64:
48+    return expandAtomicRMW(MI, BB, 0);
49+
50   case SP::ATOMIC_LOAD_MAX_32:
51     return expandAtomicRMW(MI, BB, SP::MOVICCrr, SPCC::ICC_G);
52   case SP::ATOMIC_LOAD_MAX_64:
53@@ -3023,7 +3026,8 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr
54 
55   // Build the loop block.
56   unsigned ValReg = MRI.createVirtualRegister(ValueRC);
57-  unsigned UpdReg = MRI.createVirtualRegister(ValueRC);
58+  // Opcode == 0 means try to write Rs2Reg directly (ATOMIC_SWAP).
59+  unsigned UpdReg = (Opcode ? MRI.createVirtualRegister(ValueRC) : Rs2Reg);
60 
61   BuildMI(LoopMBB, DL, TII.get(SP::PHI), ValReg)
62     .addReg(Val0Reg).addMBB(MBB)
63@@ -3035,7 +3039,7 @@ SparcTargetLowering::expandAtomicRMW(MachineInstr
64     BuildMI(LoopMBB, DL, TII.get(SP::CMPrr)).addReg(ValReg).addReg(Rs2Reg);
65     BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
66       .addReg(ValReg).addReg(Rs2Reg).addImm(CondCode);
67-  } else {
68+  } else if (Opcode) {
69     BuildMI(LoopMBB, DL, TII.get(Opcode), UpdReg)
70       .addReg(ValReg).addReg(Rs2Reg);
71   }
72Index: test/CodeGen/SPARC/atomics.ll
73===================================================================
74--- test/CodeGen/SPARC/atomics.ll
75+++ test/CodeGen/SPARC/atomics.ll
76@@ -62,6 +62,15 @@ entry:
77   ret i32 %b
78 }
79 
80+; CHECK-LABEL: test_swap_i64
81+; CHECK:       casx [%o1],
82+
83+define i64 @test_swap_i64(i64 %a, i64* %ptr) {
84+entry:
85+  %b = atomicrmw xchg i64* %ptr, i64 42 monotonic
86+  ret i64 %b
87+}
88+
89 ; CHECK-LABEL: test_load_add_32
90 ; CHECK: membar
91 ; CHECK: add [[V:%[gilo][0-7]]], %o1, [[U:%[gilo][0-7]]]
92