patch-r262261-llvm-r198286-sparc.diff revision 269012
1Pull in r198286 from upstream llvm trunk (by Venkatraman Govindaraju):
2
3  [Sparc] Handle atomic loads/stores in sparc backend.
4
5Introduced here: http://svnweb.freebsd.org/changeset/base/262261
6
7Index: lib/Target/Sparc/SparcInstrInfo.td
8===================================================================
9--- lib/Target/Sparc/SparcInstrInfo.td
10+++ lib/Target/Sparc/SparcInstrInfo.td
11@@ -975,6 +975,33 @@ let rs1 = 0 in
12 def : Pat<(ctpop i32:$src),
13           (POPCrr (SRLri $src, 0))>;
14 
15+// Atomic swap.
16+let hasSideEffects =1, rd = 0, rs1 = 0b01111, rs2 = 0 in
17+  def STBAR : F3_1<2, 0b101000, (outs), (ins), "stbar", []>;
18+
19+let Predicates = [HasV9], hasSideEffects = 1, rd = 0, rs1 = 0b01111 in
20+ def MEMBARi : F3_2<2, 0b101000, (outs), (ins i32imm:$simm13),
21+                    "membar $simm13", []>;
22+
23+let Constraints = "$val = $rd" in {
24+  def SWAPrr : F3_1<3, 0b001111,
25+                 (outs IntRegs:$rd), (ins IntRegs:$val, MEMrr:$addr),
26+                 "swap [$addr], $rd",
27+                 [(set i32:$rd, (atomic_swap_32 ADDRrr:$addr, i32:$val))]>;
28+  def SWAPri : F3_2<3, 0b001111,
29+                 (outs IntRegs:$rd), (ins IntRegs:$val, MEMri:$addr),
30+                 "swap [$addr], $rd",
31+                 [(set i32:$rd, (atomic_swap_32 ADDRri:$addr, i32:$val))]>;
32+}
33+
34+let Predicates = [HasV9], Constraints = "$swap = $rd" in
35+  def CASrr: F3_1<3, 0b111100,
36+                (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
37+                                     IntRegs:$swap),
38+                 "cas [$rs1], $rs2, $rd",
39+                 [(set i32:$rd,
40+                     (atomic_cmp_swap iPTR:$rs1, i32:$rs2, i32:$swap))]>;
41+
42 //===----------------------------------------------------------------------===//
43 // Non-Instruction Patterns
44 //===----------------------------------------------------------------------===//
45@@ -1036,4 +1063,17 @@ def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri
46 def : Pat<(store (i32 0), ADDRrr:$dst), (STrr ADDRrr:$dst, (i32 G0))>;
47 def : Pat<(store (i32 0), ADDRri:$dst), (STri ADDRri:$dst, (i32 G0))>;
48 
49+// store bar for all atomic_fence in V8.
50+let Predicates = [HasNoV9] in
51+  def : Pat<(atomic_fence imm, imm), (STBAR)>;
52+
53+// atomic_load_32 addr -> load addr
54+def : Pat<(i32 (atomic_load ADDRrr:$src)), (LDrr ADDRrr:$src)>;
55+def : Pat<(i32 (atomic_load ADDRri:$src)), (LDri ADDRri:$src)>;
56+
57+// atomic_store_32 val, addr -> store val, addr
58+def : Pat<(atomic_store ADDRrr:$dst, i32:$val), (STrr ADDRrr:$dst, $val)>;
59+def : Pat<(atomic_store ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>;
60+
61+
62 include "SparcInstr64Bit.td"
63Index: lib/Target/Sparc/SparcISelLowering.cpp
64===================================================================
65--- lib/Target/Sparc/SparcISelLowering.cpp
66+++ lib/Target/Sparc/SparcISelLowering.cpp
67@@ -1472,10 +1472,30 @@ SparcTargetLowering::SparcTargetLowering(TargetMac
68     setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
69   }
70 
71-  // FIXME: There are instructions available for ATOMIC_FENCE
72-  // on SparcV8 and later.
73-  setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
74+  // ATOMICs.
75+  // FIXME: We insert fences for each atomics and generate sub-optimal code
76+  // for PSO/TSO. Also, implement other atomicrmw operations.
77 
78+  setInsertFencesForAtomic(true);
79+
80+  setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
81+  setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32,
82+                     (Subtarget->isV9() ? Legal: Expand));
83+
84+
85+  setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
86+
87+  // Custom Lower Atomic LOAD/STORE
88+  setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
89+  setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
90+
91+  if (Subtarget->is64Bit()) {
92+    setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
93+    setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Expand);
94+    setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
95+    setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
96+  }
97+
98   if (!Subtarget->isV9()) {
99     // SparcV8 does not have FNEGD and FABSD.
100     setOperationAction(ISD::FNEG, MVT::f64, Custom);
101@@ -2723,6 +2743,16 @@ static SDValue LowerUMULO_SMULO(SDValue Op, Select
102   return DAG.getMergeValues(Ops, 2, dl);
103 }
104 
105+static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
106+  // Monotonic load/stores are legal.
107+  if (cast<AtomicSDNode>(Op)->getOrdering() <= Monotonic)
108+    return Op;
109+
110+  // Otherwise, expand with a fence.
111+  return SDValue();
112+}
113+
114+
115 SDValue SparcTargetLowering::
116 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
117 
118@@ -2778,6 +2808,8 @@ LowerOperation(SDValue Op, SelectionDAG &DAG) cons
119   case ISD::SUBE:               return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
120   case ISD::UMULO:
121   case ISD::SMULO:              return LowerUMULO_SMULO(Op, DAG, *this);
122+  case ISD::ATOMIC_LOAD:
123+  case ISD::ATOMIC_STORE:       return LowerATOMIC_LOAD_STORE(Op, DAG);
124   }
125 }
126 
127Index: lib/Target/Sparc/SparcInstr64Bit.td
128===================================================================
129--- lib/Target/Sparc/SparcInstr64Bit.td
130+++ lib/Target/Sparc/SparcInstr64Bit.td
131@@ -415,6 +415,32 @@ def SETHIXi : F2_1<0b100,
132                    "sethi $imm22, $rd",
133                    [(set i64:$rd, SETHIimm:$imm22)]>;
134 }
135+
136+// ATOMICS.
137+let Predicates = [Is64Bit], Constraints = "$swap = $rd" in {
138+  def CASXrr: F3_1<3, 0b111110,
139+                (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2,
140+                                     I64Regs:$swap),
141+                 "casx [$rs1], $rs2, $rd",
142+                 [(set i64:$rd,
143+                     (atomic_cmp_swap i64:$rs1, i64:$rs2, i64:$swap))]>;
144+
145+} // Predicates = [Is64Bit], Constraints = ...
146+
147+let Predicates = [Is64Bit] in {
148+
149+def : Pat<(atomic_fence imm, imm), (MEMBARi 0xf)>;
150+
151+// atomic_load_64 addr -> load addr
152+def : Pat<(i64 (atomic_load ADDRrr:$src)), (LDXrr ADDRrr:$src)>;
153+def : Pat<(i64 (atomic_load ADDRri:$src)), (LDXri ADDRri:$src)>;
154+
155+// atomic_store_64 val, addr -> store val, addr
156+def : Pat<(atomic_store ADDRrr:$dst, i64:$val), (STXrr ADDRrr:$dst, $val)>;
157+def : Pat<(atomic_store ADDRri:$dst, i64:$val), (STXri ADDRri:$dst, $val)>;
158+
159+} // Predicates = [Is64Bit]
160+
161 // Global addresses, constant pool entries
162 let Predicates = [Is64Bit] in {
163 
164Index: test/CodeGen/SPARC/atomics.ll
165===================================================================
166--- test/CodeGen/SPARC/atomics.ll
167+++ test/CodeGen/SPARC/atomics.ll
168@@ -0,0 +1,63 @@
169+; RUN: llc < %s -march=sparcv9 | FileCheck %s
170+
171+; CHECK-LABEL: test_atomic_i32
172+; CHECK:       ld [%o0]
173+; CHECK:       membar
174+; CHECK:       ld [%o1]
175+; CHECK:       membar
176+; CHECK:       membar
177+; CHECK:       st {{.+}}, [%o2]
178+define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
179+entry:
180+  %0 = load atomic i32* %ptr1 acquire, align 8
181+  %1 = load atomic i32* %ptr2 acquire, align 8
182+  %2 = add i32 %0, %1
183+  store atomic i32 %2, i32* %ptr3 release, align 8
184+  ret i32 %2
185+}
186+
187+; CHECK-LABEL: test_atomic_i64
188+; CHECK:       ldx [%o0]
189+; CHECK:       membar
190+; CHECK:       ldx [%o1]
191+; CHECK:       membar
192+; CHECK:       membar
193+; CHECK:       stx {{.+}}, [%o2]
194+define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
195+entry:
196+  %0 = load atomic i64* %ptr1 acquire, align 8
197+  %1 = load atomic i64* %ptr2 acquire, align 8
198+  %2 = add i64 %0, %1
199+  store atomic i64 %2, i64* %ptr3 release, align 8
200+  ret i64 %2
201+}
202+
203+; CHECK-LABEL: test_cmpxchg_i32
204+; CHECK:       or  %g0, 123, [[R:%[gilo][0-7]]]
205+; CHECK:       cas [%o1], %o0, [[R]]
206+
207+define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
208+entry:
209+  %b = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic
210+  ret i32 %b
211+}
212+
213+; CHECK-LABEL: test_cmpxchg_i64
214+; CHECK:       or  %g0, 123, [[R:%[gilo][0-7]]]
215+; CHECK:       casx [%o1], %o0, [[R]]
216+
217+define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
218+entry:
219+  %b = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic
220+  ret i64 %b
221+}
222+
223+; CHECK-LABEL: test_swap_i32
224+; CHECK:       or  %g0, 42, [[R:%[gilo][0-7]]]
225+; CHECK:       swap [%o1], [[R]]
226+
227+define i32 @test_swap_i32(i32 %a, i32* %ptr) {
228+entry:
229+  %b = atomicrmw xchg i32* %ptr, i32 42 monotonic
230+  ret i32 %b
231+}
232