1//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the code for emitting atomic operations.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenFunction.h"
15#include "CGCall.h"
16#include "CodeGenModule.h"
17#include "clang/AST/ASTContext.h"
18#include "llvm/ADT/StringExtras.h"
19#include "llvm/IR/DataLayout.h"
20#include "llvm/IR/Intrinsics.h"
21#include "llvm/IR/Operator.h"
22
23using namespace clang;
24using namespace CodeGen;
25
26// The ABI values for various atomic memory orderings.
27enum AtomicOrderingKind {
28  AO_ABI_memory_order_relaxed = 0,
29  AO_ABI_memory_order_consume = 1,
30  AO_ABI_memory_order_acquire = 2,
31  AO_ABI_memory_order_release = 3,
32  AO_ABI_memory_order_acq_rel = 4,
33  AO_ABI_memory_order_seq_cst = 5
34};
35
36namespace {
37  class AtomicInfo {
38    CodeGenFunction &CGF;
39    QualType AtomicTy;
40    QualType ValueTy;
41    uint64_t AtomicSizeInBits;
42    uint64_t ValueSizeInBits;
43    CharUnits AtomicAlign;
44    CharUnits ValueAlign;
45    CharUnits LValueAlign;
46    TypeEvaluationKind EvaluationKind;
47    bool UseLibcall;
48  public:
49    AtomicInfo(CodeGenFunction &CGF, LValue &lvalue) : CGF(CGF) {
50      assert(lvalue.isSimple());
51
52      AtomicTy = lvalue.getType();
53      ValueTy = AtomicTy->castAs<AtomicType>()->getValueType();
54      EvaluationKind = CGF.getEvaluationKind(ValueTy);
55
56      ASTContext &C = CGF.getContext();
57
58      uint64_t valueAlignInBits;
59      llvm::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
60
61      uint64_t atomicAlignInBits;
62      llvm::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
63
64      assert(ValueSizeInBits <= AtomicSizeInBits);
65      assert(valueAlignInBits <= atomicAlignInBits);
66
67      AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
68      ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
69      if (lvalue.getAlignment().isZero())
70        lvalue.setAlignment(AtomicAlign);
71
72      UseLibcall =
73        (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
74         AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
75    }
76
77    QualType getAtomicType() const { return AtomicTy; }
78    QualType getValueType() const { return ValueTy; }
79    CharUnits getAtomicAlignment() const { return AtomicAlign; }
80    CharUnits getValueAlignment() const { return ValueAlign; }
81    uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
82    uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
83    TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
84    bool shouldUseLibcall() const { return UseLibcall; }
85
86    /// Is the atomic size larger than the underlying value type?
87    ///
88    /// Note that the absence of padding does not mean that atomic
89    /// objects are completely interchangeable with non-atomic
90    /// objects: we might have promoted the alignment of a type
91    /// without making it bigger.
92    bool hasPadding() const {
93      return (ValueSizeInBits != AtomicSizeInBits);
94    }
95
96    void emitMemSetZeroIfNecessary(LValue dest) const;
97
98    llvm::Value *getAtomicSizeValue() const {
99      CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
100      return CGF.CGM.getSize(size);
101    }
102
103    /// Cast the given pointer to an integer pointer suitable for
104    /// atomic operations.
105    llvm::Value *emitCastToAtomicIntPointer(llvm::Value *addr) const;
106
107    /// Turn an atomic-layout object into an r-value.
108    RValue convertTempToRValue(llvm::Value *addr,
109                               AggValueSlot resultSlot) const;
110
111    /// Copy an atomic r-value into atomic-layout memory.
112    void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
113
114    /// Project an l-value down to the value field.
115    LValue projectValue(LValue lvalue) const {
116      llvm::Value *addr = lvalue.getAddress();
117      if (hasPadding())
118        addr = CGF.Builder.CreateStructGEP(addr, 0);
119
120      return LValue::MakeAddr(addr, getValueType(), lvalue.getAlignment(),
121                              CGF.getContext(), lvalue.getTBAAInfo());
122    }
123
124    /// Materialize an atomic r-value in atomic-layout memory.
125    llvm::Value *materializeRValue(RValue rvalue) const;
126
127  private:
128    bool requiresMemSetZero(llvm::Type *type) const;
129  };
130}
131
132static RValue emitAtomicLibcall(CodeGenFunction &CGF,
133                                StringRef fnName,
134                                QualType resultType,
135                                CallArgList &args) {
136  const CGFunctionInfo &fnInfo =
137    CGF.CGM.getTypes().arrangeFreeFunctionCall(resultType, args,
138            FunctionType::ExtInfo(), RequiredArgs::All);
139  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
140  llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
141  return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
142}
143
144/// Does a store of the given IR type modify the full expected width?
145static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
146                           uint64_t expectedSize) {
147  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
148}
149
150/// Does the atomic type require memsetting to zero before initialization?
151///
152/// The IR type is provided as a way of making certain queries faster.
153bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
154  // If the atomic type has size padding, we definitely need a memset.
155  if (hasPadding()) return true;
156
157  // Otherwise, do some simple heuristics to try to avoid it:
158  switch (getEvaluationKind()) {
159  // For scalars and complexes, check whether the store size of the
160  // type uses the full size.
161  case TEK_Scalar:
162    return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
163  case TEK_Complex:
164    return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
165                           AtomicSizeInBits / 2);
166
167  // Just be pessimistic about aggregates.
168  case TEK_Aggregate:
169    return true;
170  }
171  llvm_unreachable("bad evaluation kind");
172}
173
174void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
175  llvm::Value *addr = dest.getAddress();
176  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
177    return;
178
179  CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
180                           AtomicSizeInBits / 8,
181                           dest.getAlignment().getQuantity());
182}
183
184static void
185EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
186             llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
187             uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
188  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
189  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
190
191  switch (E->getOp()) {
192  case AtomicExpr::AO__c11_atomic_init:
193    llvm_unreachable("Already handled!");
194
195  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
196  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
197  case AtomicExpr::AO__atomic_compare_exchange:
198  case AtomicExpr::AO__atomic_compare_exchange_n: {
199    // Note that cmpxchg only supports specifying one ordering and
200    // doesn't support weak cmpxchg, at least at the moment.
201    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
202    LoadVal1->setAlignment(Align);
203    llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
204    LoadVal2->setAlignment(Align);
205    llvm::AtomicCmpXchgInst *CXI =
206        CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
207    CXI->setVolatile(E->isVolatile());
208    llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
209    StoreVal1->setAlignment(Align);
210    llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
211    CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
212    return;
213  }
214
215  case AtomicExpr::AO__c11_atomic_load:
216  case AtomicExpr::AO__atomic_load_n:
217  case AtomicExpr::AO__atomic_load: {
218    llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
219    Load->setAtomic(Order);
220    Load->setAlignment(Size);
221    Load->setVolatile(E->isVolatile());
222    llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
223    StoreDest->setAlignment(Align);
224    return;
225  }
226
227  case AtomicExpr::AO__c11_atomic_store:
228  case AtomicExpr::AO__atomic_store:
229  case AtomicExpr::AO__atomic_store_n: {
230    assert(!Dest && "Store does not return a value");
231    llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
232    LoadVal1->setAlignment(Align);
233    llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
234    Store->setAtomic(Order);
235    Store->setAlignment(Size);
236    Store->setVolatile(E->isVolatile());
237    return;
238  }
239
240  case AtomicExpr::AO__c11_atomic_exchange:
241  case AtomicExpr::AO__atomic_exchange_n:
242  case AtomicExpr::AO__atomic_exchange:
243    Op = llvm::AtomicRMWInst::Xchg;
244    break;
245
246  case AtomicExpr::AO__atomic_add_fetch:
247    PostOp = llvm::Instruction::Add;
248    // Fall through.
249  case AtomicExpr::AO__c11_atomic_fetch_add:
250  case AtomicExpr::AO__atomic_fetch_add:
251    Op = llvm::AtomicRMWInst::Add;
252    break;
253
254  case AtomicExpr::AO__atomic_sub_fetch:
255    PostOp = llvm::Instruction::Sub;
256    // Fall through.
257  case AtomicExpr::AO__c11_atomic_fetch_sub:
258  case AtomicExpr::AO__atomic_fetch_sub:
259    Op = llvm::AtomicRMWInst::Sub;
260    break;
261
262  case AtomicExpr::AO__atomic_and_fetch:
263    PostOp = llvm::Instruction::And;
264    // Fall through.
265  case AtomicExpr::AO__c11_atomic_fetch_and:
266  case AtomicExpr::AO__atomic_fetch_and:
267    Op = llvm::AtomicRMWInst::And;
268    break;
269
270  case AtomicExpr::AO__atomic_or_fetch:
271    PostOp = llvm::Instruction::Or;
272    // Fall through.
273  case AtomicExpr::AO__c11_atomic_fetch_or:
274  case AtomicExpr::AO__atomic_fetch_or:
275    Op = llvm::AtomicRMWInst::Or;
276    break;
277
278  case AtomicExpr::AO__atomic_xor_fetch:
279    PostOp = llvm::Instruction::Xor;
280    // Fall through.
281  case AtomicExpr::AO__c11_atomic_fetch_xor:
282  case AtomicExpr::AO__atomic_fetch_xor:
283    Op = llvm::AtomicRMWInst::Xor;
284    break;
285
286  case AtomicExpr::AO__atomic_nand_fetch:
287    PostOp = llvm::Instruction::And;
288    // Fall through.
289  case AtomicExpr::AO__atomic_fetch_nand:
290    Op = llvm::AtomicRMWInst::Nand;
291    break;
292  }
293
294  llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
295  LoadVal1->setAlignment(Align);
296  llvm::AtomicRMWInst *RMWI =
297      CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
298  RMWI->setVolatile(E->isVolatile());
299
300  // For __atomic_*_fetch operations, perform the operation again to
301  // determine the value which was written.
302  llvm::Value *Result = RMWI;
303  if (PostOp)
304    Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
305  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
306    Result = CGF.Builder.CreateNot(Result);
307  llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
308  StoreDest->setAlignment(Align);
309}
310
311// This function emits any expression (scalar, complex, or aggregate)
312// into a temporary alloca.
313static llvm::Value *
314EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
315  llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
316  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
317                       /*Init*/ true);
318  return DeclPtr;
319}
320
321static void
322AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
323                  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy) {
324  if (UseOptimizedLibcall) {
325    // Load value and pass it to the function directly.
326    unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
327    Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy);
328    Args.add(RValue::get(Val), ValTy);
329  } else {
330    // Non-optimized functions always take a reference.
331    Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
332                         CGF.getContext().VoidPtrTy);
333  }
334}
335
336RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
337  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
338  QualType MemTy = AtomicTy;
339  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
340    MemTy = AT->getValueType();
341  CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
342  uint64_t Size = sizeChars.getQuantity();
343  CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
344  unsigned Align = alignChars.getQuantity();
345  unsigned MaxInlineWidthInBits =
346    getTarget().getMaxAtomicInlineWidth();
347  bool UseLibcall = (Size != Align ||
348                     getContext().toBits(sizeChars) > MaxInlineWidthInBits);
349
350  llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
351  Ptr = EmitScalarExpr(E->getPtr());
352
353  if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
354    assert(!Dest && "Init does not return a value");
355    LValue lvalue = LValue::MakeAddr(Ptr, AtomicTy, alignChars, getContext());
356    EmitAtomicInit(E->getVal1(), lvalue);
357    return RValue::get(0);
358  }
359
360  Order = EmitScalarExpr(E->getOrder());
361
362  switch (E->getOp()) {
363  case AtomicExpr::AO__c11_atomic_init:
364    llvm_unreachable("Already handled!");
365
366  case AtomicExpr::AO__c11_atomic_load:
367  case AtomicExpr::AO__atomic_load_n:
368    break;
369
370  case AtomicExpr::AO__atomic_load:
371    Dest = EmitScalarExpr(E->getVal1());
372    break;
373
374  case AtomicExpr::AO__atomic_store:
375    Val1 = EmitScalarExpr(E->getVal1());
376    break;
377
378  case AtomicExpr::AO__atomic_exchange:
379    Val1 = EmitScalarExpr(E->getVal1());
380    Dest = EmitScalarExpr(E->getVal2());
381    break;
382
383  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
384  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
385  case AtomicExpr::AO__atomic_compare_exchange_n:
386  case AtomicExpr::AO__atomic_compare_exchange:
387    Val1 = EmitScalarExpr(E->getVal1());
388    if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
389      Val2 = EmitScalarExpr(E->getVal2());
390    else
391      Val2 = EmitValToTemp(*this, E->getVal2());
392    OrderFail = EmitScalarExpr(E->getOrderFail());
393    // Evaluate and discard the 'weak' argument.
394    if (E->getNumSubExprs() == 6)
395      EmitScalarExpr(E->getWeak());
396    break;
397
398  case AtomicExpr::AO__c11_atomic_fetch_add:
399  case AtomicExpr::AO__c11_atomic_fetch_sub:
400    if (MemTy->isPointerType()) {
401      // For pointer arithmetic, we're required to do a bit of math:
402      // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
403      // ... but only for the C11 builtins. The GNU builtins expect the
404      // user to multiply by sizeof(T).
405      QualType Val1Ty = E->getVal1()->getType();
406      llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
407      CharUnits PointeeIncAmt =
408          getContext().getTypeSizeInChars(MemTy->getPointeeType());
409      Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
410      Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
411      EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
412      break;
413    }
414    // Fall through.
415  case AtomicExpr::AO__atomic_fetch_add:
416  case AtomicExpr::AO__atomic_fetch_sub:
417  case AtomicExpr::AO__atomic_add_fetch:
418  case AtomicExpr::AO__atomic_sub_fetch:
419  case AtomicExpr::AO__c11_atomic_store:
420  case AtomicExpr::AO__c11_atomic_exchange:
421  case AtomicExpr::AO__atomic_store_n:
422  case AtomicExpr::AO__atomic_exchange_n:
423  case AtomicExpr::AO__c11_atomic_fetch_and:
424  case AtomicExpr::AO__c11_atomic_fetch_or:
425  case AtomicExpr::AO__c11_atomic_fetch_xor:
426  case AtomicExpr::AO__atomic_fetch_and:
427  case AtomicExpr::AO__atomic_fetch_or:
428  case AtomicExpr::AO__atomic_fetch_xor:
429  case AtomicExpr::AO__atomic_fetch_nand:
430  case AtomicExpr::AO__atomic_and_fetch:
431  case AtomicExpr::AO__atomic_or_fetch:
432  case AtomicExpr::AO__atomic_xor_fetch:
433  case AtomicExpr::AO__atomic_nand_fetch:
434    Val1 = EmitValToTemp(*this, E->getVal1());
435    break;
436  }
437
438  if (!E->getType()->isVoidType() && !Dest)
439    Dest = CreateMemTemp(E->getType(), ".atomicdst");
440
441  // Use a library call.  See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
442  if (UseLibcall) {
443    bool UseOptimizedLibcall = false;
444    switch (E->getOp()) {
445    case AtomicExpr::AO__c11_atomic_fetch_add:
446    case AtomicExpr::AO__atomic_fetch_add:
447    case AtomicExpr::AO__c11_atomic_fetch_and:
448    case AtomicExpr::AO__atomic_fetch_and:
449    case AtomicExpr::AO__c11_atomic_fetch_or:
450    case AtomicExpr::AO__atomic_fetch_or:
451    case AtomicExpr::AO__c11_atomic_fetch_sub:
452    case AtomicExpr::AO__atomic_fetch_sub:
453    case AtomicExpr::AO__c11_atomic_fetch_xor:
454    case AtomicExpr::AO__atomic_fetch_xor:
455      // For these, only library calls for certain sizes exist.
456      UseOptimizedLibcall = true;
457      break;
458    default:
459      // Only use optimized library calls for sizes for which they exist.
460      if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
461        UseOptimizedLibcall = true;
462      break;
463    }
464
465    CallArgList Args;
466    if (!UseOptimizedLibcall) {
467      // For non-optimized library calls, the size is the first parameter
468      Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
469               getContext().getSizeType());
470    }
471    // Atomic address is the first or second parameter
472    Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
473             getContext().VoidPtrTy);
474
475    std::string LibCallName;
476    QualType RetTy;
477    bool HaveRetTy = false;
478    switch (E->getOp()) {
479    // There is only one libcall for compare an exchange, because there is no
480    // optimisation benefit possible from a libcall version of a weak compare
481    // and exchange.
482    // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
483    //                                void *desired, int success, int failure)
484    // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
485    //                                  int success, int failure)
486    case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
487    case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
488    case AtomicExpr::AO__atomic_compare_exchange:
489    case AtomicExpr::AO__atomic_compare_exchange_n:
490      LibCallName = "__atomic_compare_exchange";
491      RetTy = getContext().BoolTy;
492      HaveRetTy = true;
493      Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
494               getContext().VoidPtrTy);
495      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy);
496      Args.add(RValue::get(Order),
497               getContext().IntTy);
498      Order = OrderFail;
499      break;
500    // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
501    //                        int order)
502    // T __atomic_exchange_N(T *mem, T val, int order)
503    case AtomicExpr::AO__c11_atomic_exchange:
504    case AtomicExpr::AO__atomic_exchange_n:
505    case AtomicExpr::AO__atomic_exchange:
506      LibCallName = "__atomic_exchange";
507      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
508      break;
509    // void __atomic_store(size_t size, void *mem, void *val, int order)
510    // void __atomic_store_N(T *mem, T val, int order)
511    case AtomicExpr::AO__c11_atomic_store:
512    case AtomicExpr::AO__atomic_store:
513    case AtomicExpr::AO__atomic_store_n:
514      LibCallName = "__atomic_store";
515      RetTy = getContext().VoidTy;
516      HaveRetTy = true;
517      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
518      break;
519    // void __atomic_load(size_t size, void *mem, void *return, int order)
520    // T __atomic_load_N(T *mem, int order)
521    case AtomicExpr::AO__c11_atomic_load:
522    case AtomicExpr::AO__atomic_load:
523    case AtomicExpr::AO__atomic_load_n:
524      LibCallName = "__atomic_load";
525      break;
526    // T __atomic_fetch_add_N(T *mem, T val, int order)
527    case AtomicExpr::AO__c11_atomic_fetch_add:
528    case AtomicExpr::AO__atomic_fetch_add:
529      LibCallName = "__atomic_fetch_add";
530      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
531      break;
532    // T __atomic_fetch_and_N(T *mem, T val, int order)
533    case AtomicExpr::AO__c11_atomic_fetch_and:
534    case AtomicExpr::AO__atomic_fetch_and:
535      LibCallName = "__atomic_fetch_and";
536      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
537      break;
538    // T __atomic_fetch_or_N(T *mem, T val, int order)
539    case AtomicExpr::AO__c11_atomic_fetch_or:
540    case AtomicExpr::AO__atomic_fetch_or:
541      LibCallName = "__atomic_fetch_or";
542      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
543      break;
544    // T __atomic_fetch_sub_N(T *mem, T val, int order)
545    case AtomicExpr::AO__c11_atomic_fetch_sub:
546    case AtomicExpr::AO__atomic_fetch_sub:
547      LibCallName = "__atomic_fetch_sub";
548      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
549      break;
550    // T __atomic_fetch_xor_N(T *mem, T val, int order)
551    case AtomicExpr::AO__c11_atomic_fetch_xor:
552    case AtomicExpr::AO__atomic_fetch_xor:
553      LibCallName = "__atomic_fetch_xor";
554      AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy);
555      break;
556    default: return EmitUnsupportedRValue(E, "atomic library call");
557    }
558
559    // Optimized functions have the size in their name.
560    if (UseOptimizedLibcall)
561      LibCallName += "_" + llvm::utostr(Size);
562    // By default, assume we return a value of the atomic type.
563    if (!HaveRetTy) {
564      if (UseOptimizedLibcall) {
565        // Value is returned directly.
566        RetTy = MemTy;
567      } else {
568        // Value is returned through parameter before the order.
569        RetTy = getContext().VoidTy;
570        Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
571                 getContext().VoidPtrTy);
572      }
573    }
574    // order is always the last parameter
575    Args.add(RValue::get(Order),
576             getContext().IntTy);
577
578    const CGFunctionInfo &FuncInfo =
579        CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
580            FunctionType::ExtInfo(), RequiredArgs::All);
581    llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
582    llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
583    RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
584    if (!RetTy->isVoidType())
585      return Res;
586    if (E->getType()->isVoidType())
587      return RValue::get(0);
588    return convertTempToRValue(Dest, E->getType());
589  }
590
591  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
592                 E->getOp() == AtomicExpr::AO__atomic_store ||
593                 E->getOp() == AtomicExpr::AO__atomic_store_n;
594  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
595                E->getOp() == AtomicExpr::AO__atomic_load ||
596                E->getOp() == AtomicExpr::AO__atomic_load_n;
597
598  llvm::Type *IPtrTy =
599      llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
600  llvm::Value *OrigDest = Dest;
601  Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
602  if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
603  if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
604  if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
605
606  if (isa<llvm::ConstantInt>(Order)) {
607    int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
608    switch (ord) {
609    case AO_ABI_memory_order_relaxed:
610      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
611                   llvm::Monotonic);
612      break;
613    case AO_ABI_memory_order_consume:
614    case AO_ABI_memory_order_acquire:
615      if (IsStore)
616        break; // Avoid crashing on code with undefined behavior
617      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
618                   llvm::Acquire);
619      break;
620    case AO_ABI_memory_order_release:
621      if (IsLoad)
622        break; // Avoid crashing on code with undefined behavior
623      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
624                   llvm::Release);
625      break;
626    case AO_ABI_memory_order_acq_rel:
627      if (IsLoad || IsStore)
628        break; // Avoid crashing on code with undefined behavior
629      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
630                   llvm::AcquireRelease);
631      break;
632    case AO_ABI_memory_order_seq_cst:
633      EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
634                   llvm::SequentiallyConsistent);
635      break;
636    default: // invalid order
637      // We should not ever get here normally, but it's hard to
638      // enforce that in general.
639      break;
640    }
641    if (E->getType()->isVoidType())
642      return RValue::get(0);
643    return convertTempToRValue(OrigDest, E->getType());
644  }
645
646  // Long case, when Order isn't obviously constant.
647
648  // Create all the relevant BB's
649  llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
650                   *AcqRelBB = 0, *SeqCstBB = 0;
651  MonotonicBB = createBasicBlock("monotonic", CurFn);
652  if (!IsStore)
653    AcquireBB = createBasicBlock("acquire", CurFn);
654  if (!IsLoad)
655    ReleaseBB = createBasicBlock("release", CurFn);
656  if (!IsLoad && !IsStore)
657    AcqRelBB = createBasicBlock("acqrel", CurFn);
658  SeqCstBB = createBasicBlock("seqcst", CurFn);
659  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
660
661  // Create the switch for the split
662  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
663  // doesn't matter unless someone is crazy enough to use something that
664  // doesn't fold to a constant for the ordering.
665  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
666  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
667
668  // Emit all the different atomics
669  Builder.SetInsertPoint(MonotonicBB);
670  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
671               llvm::Monotonic);
672  Builder.CreateBr(ContBB);
673  if (!IsStore) {
674    Builder.SetInsertPoint(AcquireBB);
675    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
676                 llvm::Acquire);
677    Builder.CreateBr(ContBB);
678    SI->addCase(Builder.getInt32(1), AcquireBB);
679    SI->addCase(Builder.getInt32(2), AcquireBB);
680  }
681  if (!IsLoad) {
682    Builder.SetInsertPoint(ReleaseBB);
683    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
684                 llvm::Release);
685    Builder.CreateBr(ContBB);
686    SI->addCase(Builder.getInt32(3), ReleaseBB);
687  }
688  if (!IsLoad && !IsStore) {
689    Builder.SetInsertPoint(AcqRelBB);
690    EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
691                 llvm::AcquireRelease);
692    Builder.CreateBr(ContBB);
693    SI->addCase(Builder.getInt32(4), AcqRelBB);
694  }
695  Builder.SetInsertPoint(SeqCstBB);
696  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
697               llvm::SequentiallyConsistent);
698  Builder.CreateBr(ContBB);
699  SI->addCase(Builder.getInt32(5), SeqCstBB);
700
701  // Cleanup and return
702  Builder.SetInsertPoint(ContBB);
703  if (E->getType()->isVoidType())
704    return RValue::get(0);
705  return convertTempToRValue(OrigDest, E->getType());
706}
707
708llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
709  unsigned addrspace =
710    cast<llvm::PointerType>(addr->getType())->getAddressSpace();
711  llvm::IntegerType *ty =
712    llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
713  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
714}
715
716RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
717                                       AggValueSlot resultSlot) const {
718  if (EvaluationKind == TEK_Aggregate) {
719    // Nothing to do if the result is ignored.
720    if (resultSlot.isIgnored()) return resultSlot.asRValue();
721
722    assert(resultSlot.getAddr() == addr || hasPadding());
723
724    // In these cases, we should have emitted directly into the result slot.
725    if (!hasPadding() || resultSlot.isValueOfAtomic())
726      return resultSlot.asRValue();
727
728    // Otherwise, fall into the common path.
729  }
730
731  // Drill into the padding structure if we have one.
732  if (hasPadding())
733    addr = CGF.Builder.CreateStructGEP(addr, 0);
734
735  // If we're emitting to an aggregate, copy into the result slot.
736  if (EvaluationKind == TEK_Aggregate) {
737    CGF.EmitAggregateCopy(resultSlot.getAddr(), addr, getValueType(),
738                          resultSlot.isVolatile());
739    return resultSlot.asRValue();
740  }
741
742  // Otherwise, just convert the temporary to an r-value using the
743  // normal conversion routine.
744  return CGF.convertTempToRValue(addr, getValueType());
745}
746
747/// Emit a load from an l-value of atomic type.  Note that the r-value
748/// we produce is an r-value of the atomic *value* type.
749RValue CodeGenFunction::EmitAtomicLoad(LValue src, AggValueSlot resultSlot) {
750  AtomicInfo atomics(*this, src);
751
752  // Check whether we should use a library call.
753  if (atomics.shouldUseLibcall()) {
754    llvm::Value *tempAddr;
755    if (resultSlot.isValueOfAtomic()) {
756      assert(atomics.getEvaluationKind() == TEK_Aggregate);
757      tempAddr = resultSlot.getPaddedAtomicAddr();
758    } else if (!resultSlot.isIgnored() && !atomics.hasPadding()) {
759      assert(atomics.getEvaluationKind() == TEK_Aggregate);
760      tempAddr = resultSlot.getAddr();
761    } else {
762      tempAddr = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
763    }
764
765    // void __atomic_load(size_t size, void *mem, void *return, int order);
766    CallArgList args;
767    args.add(RValue::get(atomics.getAtomicSizeValue()),
768             getContext().getSizeType());
769    args.add(RValue::get(EmitCastToVoidPtr(src.getAddress())),
770             getContext().VoidPtrTy);
771    args.add(RValue::get(EmitCastToVoidPtr(tempAddr)),
772             getContext().VoidPtrTy);
773    args.add(RValue::get(llvm::ConstantInt::get(IntTy,
774                                                AO_ABI_memory_order_seq_cst)),
775             getContext().IntTy);
776    emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
777
778    // Produce the r-value.
779    return atomics.convertTempToRValue(tempAddr, resultSlot);
780  }
781
782  // Okay, we're doing this natively.
783  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(src.getAddress());
784  llvm::LoadInst *load = Builder.CreateLoad(addr, "atomic-load");
785  load->setAtomic(llvm::SequentiallyConsistent);
786
787  // Other decoration.
788  load->setAlignment(src.getAlignment().getQuantity());
789  if (src.isVolatileQualified())
790    load->setVolatile(true);
791  if (src.getTBAAInfo())
792    CGM.DecorateInstruction(load, src.getTBAAInfo());
793
794  // Okay, turn that back into the original value type.
795  QualType valueType = atomics.getValueType();
796  llvm::Value *result = load;
797
798  // If we're ignoring an aggregate return, don't do anything.
799  if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
800    return RValue::getAggregate(0, false);
801
802  // The easiest way to do this this is to go through memory, but we
803  // try not to in some easy cases.
804  if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
805    llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
806    if (isa<llvm::IntegerType>(resultTy)) {
807      assert(result->getType() == resultTy);
808      result = EmitFromMemory(result, valueType);
809    } else if (isa<llvm::PointerType>(resultTy)) {
810      result = Builder.CreateIntToPtr(result, resultTy);
811    } else {
812      result = Builder.CreateBitCast(result, resultTy);
813    }
814    return RValue::get(result);
815  }
816
817  // Create a temporary.  This needs to be big enough to hold the
818  // atomic integer.
819  llvm::Value *temp;
820  bool tempIsVolatile = false;
821  CharUnits tempAlignment;
822  if (atomics.getEvaluationKind() == TEK_Aggregate &&
823      (!atomics.hasPadding() || resultSlot.isValueOfAtomic())) {
824    assert(!resultSlot.isIgnored());
825    if (resultSlot.isValueOfAtomic()) {
826      temp = resultSlot.getPaddedAtomicAddr();
827      tempAlignment = atomics.getAtomicAlignment();
828    } else {
829      temp = resultSlot.getAddr();
830      tempAlignment = atomics.getValueAlignment();
831    }
832    tempIsVolatile = resultSlot.isVolatile();
833  } else {
834    temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
835    tempAlignment = atomics.getAtomicAlignment();
836  }
837
838  // Slam the integer into the temporary.
839  llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
840  Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
841    ->setVolatile(tempIsVolatile);
842
843  return atomics.convertTempToRValue(temp, resultSlot);
844}
845
846
847
848/// Copy an r-value into memory as part of storing to an atomic type.
849/// This needs to create a bit-pattern suitable for atomic operations.
850void AtomicInfo::emitCopyIntoMemory(RValue rvalue, LValue dest) const {
851  // If we have an r-value, the rvalue should be of the atomic type,
852  // which means that the caller is responsible for having zeroed
853  // any padding.  Just do an aggregate copy of that type.
854  if (rvalue.isAggregate()) {
855    CGF.EmitAggregateCopy(dest.getAddress(),
856                          rvalue.getAggregateAddr(),
857                          getAtomicType(),
858                          (rvalue.isVolatileQualified()
859                           || dest.isVolatileQualified()),
860                          dest.getAlignment());
861    return;
862  }
863
864  // Okay, otherwise we're copying stuff.
865
866  // Zero out the buffer if necessary.
867  emitMemSetZeroIfNecessary(dest);
868
869  // Drill past the padding if present.
870  dest = projectValue(dest);
871
872  // Okay, store the rvalue in.
873  if (rvalue.isScalar()) {
874    CGF.EmitStoreOfScalar(rvalue.getScalarVal(), dest, /*init*/ true);
875  } else {
876    CGF.EmitStoreOfComplex(rvalue.getComplexVal(), dest, /*init*/ true);
877  }
878}
879
880
881/// Materialize an r-value into memory for the purposes of storing it
882/// to an atomic type.
883llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
884  // Aggregate r-values are already in memory, and EmitAtomicStore
885  // requires them to be values of the atomic type.
886  if (rvalue.isAggregate())
887    return rvalue.getAggregateAddr();
888
889  // Otherwise, make a temporary and materialize into it.
890  llvm::Value *temp = CGF.CreateMemTemp(getAtomicType(), "atomic-store-temp");
891  LValue tempLV = CGF.MakeAddrLValue(temp, getAtomicType(), getAtomicAlignment());
892  emitCopyIntoMemory(rvalue, tempLV);
893  return temp;
894}
895
896/// Emit a store to an l-value of atomic type.
897///
898/// Note that the r-value is expected to be an r-value *of the atomic
899/// type*; this means that for aggregate r-values, it should include
900/// storage for any padding that was necessary.
901void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
902                                      bool isInit) {
903  // If this is an aggregate r-value, it should agree in type except
904  // maybe for address-space qualification.
905  assert(!rvalue.isAggregate() ||
906         rvalue.getAggregateAddr()->getType()->getPointerElementType()
907           == dest.getAddress()->getType()->getPointerElementType());
908
909  AtomicInfo atomics(*this, dest);
910
911  // If this is an initialization, just put the value there normally.
912  if (isInit) {
913    atomics.emitCopyIntoMemory(rvalue, dest);
914    return;
915  }
916
917  // Check whether we should use a library call.
918  if (atomics.shouldUseLibcall()) {
919    // Produce a source address.
920    llvm::Value *srcAddr = atomics.materializeRValue(rvalue);
921
922    // void __atomic_store(size_t size, void *mem, void *val, int order)
923    CallArgList args;
924    args.add(RValue::get(atomics.getAtomicSizeValue()),
925             getContext().getSizeType());
926    args.add(RValue::get(EmitCastToVoidPtr(dest.getAddress())),
927             getContext().VoidPtrTy);
928    args.add(RValue::get(EmitCastToVoidPtr(srcAddr)),
929             getContext().VoidPtrTy);
930    args.add(RValue::get(llvm::ConstantInt::get(IntTy,
931                                                AO_ABI_memory_order_seq_cst)),
932             getContext().IntTy);
933    emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
934    return;
935  }
936
937  // Okay, we're doing this natively.
938  llvm::Value *intValue;
939
940  // If we've got a scalar value of the right size, try to avoid going
941  // through memory.
942  if (rvalue.isScalar() && !atomics.hasPadding()) {
943    llvm::Value *value = rvalue.getScalarVal();
944    if (isa<llvm::IntegerType>(value->getType())) {
945      intValue = value;
946    } else {
947      llvm::IntegerType *inputIntTy =
948        llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
949      if (isa<llvm::PointerType>(value->getType())) {
950        intValue = Builder.CreatePtrToInt(value, inputIntTy);
951      } else {
952        intValue = Builder.CreateBitCast(value, inputIntTy);
953      }
954    }
955
956  // Otherwise, we need to go through memory.
957  } else {
958    // Put the r-value in memory.
959    llvm::Value *addr = atomics.materializeRValue(rvalue);
960
961    // Cast the temporary to the atomic int type and pull a value out.
962    addr = atomics.emitCastToAtomicIntPointer(addr);
963    intValue = Builder.CreateAlignedLoad(addr,
964                                 atomics.getAtomicAlignment().getQuantity());
965  }
966
967  // Do the atomic store.
968  llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
969  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
970
971  // Initializations don't need to be atomic.
972  if (!isInit) store->setAtomic(llvm::SequentiallyConsistent);
973
974  // Other decoration.
975  store->setAlignment(dest.getAlignment().getQuantity());
976  if (dest.isVolatileQualified())
977    store->setVolatile(true);
978  if (dest.getTBAAInfo())
979    CGM.DecorateInstruction(store, dest.getTBAAInfo());
980}
981
982void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
983  AtomicInfo atomics(*this, dest);
984
985  switch (atomics.getEvaluationKind()) {
986  case TEK_Scalar: {
987    llvm::Value *value = EmitScalarExpr(init);
988    atomics.emitCopyIntoMemory(RValue::get(value), dest);
989    return;
990  }
991
992  case TEK_Complex: {
993    ComplexPairTy value = EmitComplexExpr(init);
994    atomics.emitCopyIntoMemory(RValue::getComplex(value), dest);
995    return;
996  }
997
998  case TEK_Aggregate: {
999    // Memset the buffer first if there's any possibility of
1000    // uninitialized internal bits.
1001    atomics.emitMemSetZeroIfNecessary(dest);
1002
1003    // HACK: whether the initializer actually has an atomic type
1004    // doesn't really seem reliable right now.
1005    if (!init->getType()->isAtomicType()) {
1006      dest = atomics.projectValue(dest);
1007    }
1008
1009    // Evaluate the expression directly into the destination.
1010    AggValueSlot slot = AggValueSlot::forLValue(dest,
1011                                        AggValueSlot::IsNotDestructed,
1012                                        AggValueSlot::DoesNotNeedGCBarriers,
1013                                        AggValueSlot::IsNotAliased);
1014    EmitAggExpr(init, slot);
1015    return;
1016  }
1017  }
1018  llvm_unreachable("bad evaluation kind");
1019}
1020