CGExprAgg.cpp revision 263508
129088Smarkm//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
229088Smarkm//
329088Smarkm//                     The LLVM Compiler Infrastructure
429088Smarkm//
529088Smarkm// This file is distributed under the University of Illinois Open Source
629088Smarkm// License. See LICENSE.TXT for details.
729088Smarkm//
829088Smarkm//===----------------------------------------------------------------------===//
929088Smarkm//
1029088Smarkm// This contains code to emit Aggregate Expr nodes as LLVM code.
1129088Smarkm//
1229088Smarkm//===----------------------------------------------------------------------===//
1329088Smarkm
1429088Smarkm#include "CodeGenFunction.h"
1529088Smarkm#include "CGObjCRuntime.h"
1629088Smarkm#include "CodeGenModule.h"
1729088Smarkm#include "clang/AST/ASTContext.h"
1829088Smarkm#include "clang/AST/DeclCXX.h"
1929088Smarkm#include "clang/AST/DeclTemplate.h"
2029088Smarkm#include "clang/AST/StmtVisitor.h"
2129088Smarkm#include "llvm/IR/Constants.h"
2229088Smarkm#include "llvm/IR/Function.h"
2329088Smarkm#include "llvm/IR/GlobalVariable.h"
2429088Smarkm#include "llvm/IR/Intrinsics.h"
2529088Smarkmusing namespace clang;
2629088Smarkmusing namespace CodeGen;
2729088Smarkm
2829088Smarkm//===----------------------------------------------------------------------===//
2929088Smarkm//                        Aggregate Expression Emitter
3029088Smarkm//===----------------------------------------------------------------------===//
3129088Smarkm
3229088Smarkmnamespace  {
3329088Smarkmclass AggExprEmitter : public StmtVisitor<AggExprEmitter> {
3481965Smarkm  CodeGenFunction &CGF;
3529088Smarkm  CGBuilderTy &Builder;
3629088Smarkm  AggValueSlot Dest;
3729088Smarkm
3829088Smarkm  /// We want to use 'dest' as the return slot except under two
3929088Smarkm  /// conditions:
4029088Smarkm  ///   - The destination slot requires garbage collection, so we
4129088Smarkm  ///     need to use the GC API.
4229088Smarkm  ///   - The destination slot is potentially aliased.
4329088Smarkm  bool shouldUseDestForReturnSlot() const {
4429088Smarkm    return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
4529088Smarkm  }
4629088Smarkm
4729088Smarkm  ReturnValueSlot getReturnValueSlot() const {
4829088Smarkm    if (!shouldUseDestForReturnSlot())
4929088Smarkm      return ReturnValueSlot();
5029088Smarkm
5129088Smarkm    return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
5229088Smarkm  }
5329088Smarkm
5429088Smarkm  AggValueSlot EnsureSlot(QualType T) {
5529088Smarkm    if (!Dest.isIgnored()) return Dest;
5629088Smarkm    return CGF.CreateAggTemp(T, "agg.tmp.ensured");
5729088Smarkm  }
5829088Smarkm  void EnsureDest(QualType T) {
5929088Smarkm    if (!Dest.isIgnored()) return;
6029088Smarkm    Dest = CGF.CreateAggTemp(T, "agg.tmp.ensured");
6129088Smarkm  }
6229088Smarkm
6329088Smarkmpublic:
6429088Smarkm  AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest)
6529088Smarkm    : CGF(cgf), Builder(CGF.Builder), Dest(Dest) {
6629088Smarkm  }
6729088Smarkm
6829088Smarkm  //===--------------------------------------------------------------------===//
6987139Smarkm  //                               Utilities
7087139Smarkm  //===--------------------------------------------------------------------===//
7187139Smarkm
7287139Smarkm  /// EmitAggLoadOfLValue - Given an expression with aggregate type that
7387139Smarkm  /// represents a value lvalue, this method emits the address of the lvalue,
7487139Smarkm  /// then loads the result into DestPtr.
7529088Smarkm  void EmitAggLoadOfLValue(const Expr *E);
7629088Smarkm
7729088Smarkm  /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
7829088Smarkm  void EmitFinalDestCopy(QualType type, const LValue &src);
7981965Smarkm  void EmitFinalDestCopy(QualType type, RValue src,
8029088Smarkm                         CharUnits srcAlignment = CharUnits::Zero());
81  void EmitCopy(QualType type, const AggValueSlot &dest,
82                const AggValueSlot &src);
83
84  void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
85
86  void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
87                     QualType elementType, InitListExpr *E);
88
89  AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
90    if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
91      return AggValueSlot::NeedsGCBarriers;
92    return AggValueSlot::DoesNotNeedGCBarriers;
93  }
94
95  bool TypeRequiresGCollection(QualType T);
96
97  //===--------------------------------------------------------------------===//
98  //                            Visitor Methods
99  //===--------------------------------------------------------------------===//
100
101  void VisitStmt(Stmt *S) {
102    CGF.ErrorUnsupported(S, "aggregate expression");
103  }
104  void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
105  void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
106    Visit(GE->getResultExpr());
107  }
108  void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
109  void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
110    return Visit(E->getReplacement());
111  }
112
113  // l-values.
114  void VisitDeclRefExpr(DeclRefExpr *E) {
115    // For aggregates, we should always be able to emit the variable
116    // as an l-value unless it's a reference.  This is due to the fact
117    // that we can't actually ever see a normal l2r conversion on an
118    // aggregate in C++, and in C there's no language standard
119    // actively preventing us from listing variables in the captures
120    // list of a block.
121    if (E->getDecl()->getType()->isReferenceType()) {
122      if (CodeGenFunction::ConstantEmission result
123            = CGF.tryEmitAsConstant(E)) {
124        EmitFinalDestCopy(E->getType(), result.getReferenceLValue(CGF, E));
125        return;
126      }
127    }
128
129    EmitAggLoadOfLValue(E);
130  }
131
132  void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
133  void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
134  void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
135  void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
136  void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
137    EmitAggLoadOfLValue(E);
138  }
139  void VisitPredefinedExpr(const PredefinedExpr *E) {
140    EmitAggLoadOfLValue(E);
141  }
142
143  // Operators.
144  void VisitCastExpr(CastExpr *E);
145  void VisitCallExpr(const CallExpr *E);
146  void VisitStmtExpr(const StmtExpr *E);
147  void VisitBinaryOperator(const BinaryOperator *BO);
148  void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
149  void VisitBinAssign(const BinaryOperator *E);
150  void VisitBinComma(const BinaryOperator *E);
151
152  void VisitObjCMessageExpr(ObjCMessageExpr *E);
153  void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
154    EmitAggLoadOfLValue(E);
155  }
156
157  void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
158  void VisitChooseExpr(const ChooseExpr *CE);
159  void VisitInitListExpr(InitListExpr *E);
160  void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
161  void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
162    Visit(DAE->getExpr());
163  }
164  void VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
165    CodeGenFunction::CXXDefaultInitExprScope Scope(CGF);
166    Visit(DIE->getExpr());
167  }
168  void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
169  void VisitCXXConstructExpr(const CXXConstructExpr *E);
170  void VisitLambdaExpr(LambdaExpr *E);
171  void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
172  void VisitExprWithCleanups(ExprWithCleanups *E);
173  void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
174  void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
175  void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
176  void VisitOpaqueValueExpr(OpaqueValueExpr *E);
177
178  void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
179    if (E->isGLValue()) {
180      LValue LV = CGF.EmitPseudoObjectLValue(E);
181      return EmitFinalDestCopy(E->getType(), LV);
182    }
183
184    CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
185  }
186
187  void VisitVAArgExpr(VAArgExpr *E);
188
189  void EmitInitializationToLValue(Expr *E, LValue Address);
190  void EmitNullInitializationToLValue(LValue Address);
191  //  case Expr::ChooseExprClass:
192  void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
193  void VisitAtomicExpr(AtomicExpr *E) {
194    CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
195  }
196};
197}  // end anonymous namespace.
198
199//===----------------------------------------------------------------------===//
200//                                Utilities
201//===----------------------------------------------------------------------===//
202
203/// EmitAggLoadOfLValue - Given an expression with aggregate type that
204/// represents a value lvalue, this method emits the address of the lvalue,
205/// then loads the result into DestPtr.
206void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
207  LValue LV = CGF.EmitLValue(E);
208
209  // If the type of the l-value is atomic, then do an atomic load.
210  if (LV.getType()->isAtomicType()) {
211    CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
212    return;
213  }
214
215  EmitFinalDestCopy(E->getType(), LV);
216}
217
218/// \brief True if the given aggregate type requires special GC API calls.
219bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
220  // Only record types have members that might require garbage collection.
221  const RecordType *RecordTy = T->getAs<RecordType>();
222  if (!RecordTy) return false;
223
224  // Don't mess with non-trivial C++ types.
225  RecordDecl *Record = RecordTy->getDecl();
226  if (isa<CXXRecordDecl>(Record) &&
227      (cast<CXXRecordDecl>(Record)->hasNonTrivialCopyConstructor() ||
228       !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
229    return false;
230
231  // Check whether the type has an object member.
232  return Record->hasObjectMember();
233}
234
235/// \brief Perform the final move to DestPtr if for some reason
236/// getReturnValueSlot() didn't use it directly.
237///
238/// The idea is that you do something like this:
239///   RValue Result = EmitSomething(..., getReturnValueSlot());
240///   EmitMoveFromReturnSlot(E, Result);
241///
242/// If nothing interferes, this will cause the result to be emitted
243/// directly into the return value slot.  Otherwise, a final move
244/// will be performed.
245void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue src) {
246  if (shouldUseDestForReturnSlot()) {
247    // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
248    // The possibility of undef rvalues complicates that a lot,
249    // though, so we can't really assert.
250    return;
251  }
252
253  // Otherwise, copy from there to the destination.
254  assert(Dest.getAddr() != src.getAggregateAddr());
255  std::pair<CharUnits, CharUnits> typeInfo =
256    CGF.getContext().getTypeInfoInChars(E->getType());
257  EmitFinalDestCopy(E->getType(), src, typeInfo.second);
258}
259
260/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
261void AggExprEmitter::EmitFinalDestCopy(QualType type, RValue src,
262                                       CharUnits srcAlign) {
263  assert(src.isAggregate() && "value must be aggregate value!");
264  LValue srcLV = CGF.MakeAddrLValue(src.getAggregateAddr(), type, srcAlign);
265  EmitFinalDestCopy(type, srcLV);
266}
267
268/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
269void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src) {
270  // If Dest is ignored, then we're evaluating an aggregate expression
271  // in a context that doesn't care about the result.  Note that loads
272  // from volatile l-values force the existence of a non-ignored
273  // destination.
274  if (Dest.isIgnored())
275    return;
276
277  AggValueSlot srcAgg =
278    AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
279                            needsGC(type), AggValueSlot::IsAliased);
280  EmitCopy(type, Dest, srcAgg);
281}
282
283/// Perform a copy from the source into the destination.
284///
285/// \param type - the type of the aggregate being copied; qualifiers are
286///   ignored
287void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
288                              const AggValueSlot &src) {
289  if (dest.requiresGCollection()) {
290    CharUnits sz = CGF.getContext().getTypeSizeInChars(type);
291    llvm::Value *size = llvm::ConstantInt::get(CGF.SizeTy, sz.getQuantity());
292    CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
293                                                      dest.getAddr(),
294                                                      src.getAddr(),
295                                                      size);
296    return;
297  }
298
299  // If the result of the assignment is used, copy the LHS there also.
300  // It's volatile if either side is.  Use the minimum alignment of
301  // the two sides.
302  CGF.EmitAggregateCopy(dest.getAddr(), src.getAddr(), type,
303                        dest.isVolatile() || src.isVolatile(),
304                        std::min(dest.getAlignment(), src.getAlignment()));
305}
306
307/// \brief Emit the initializer for a std::initializer_list initialized with a
308/// real initializer list.
309void
310AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
311  // Emit an array containing the elements.  The array is externally destructed
312  // if the std::initializer_list object is.
313  ASTContext &Ctx = CGF.getContext();
314  LValue Array = CGF.EmitLValue(E->getSubExpr());
315  assert(Array.isSimple() && "initializer_list array not a simple lvalue");
316  llvm::Value *ArrayPtr = Array.getAddress();
317
318  const ConstantArrayType *ArrayType =
319      Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
320  assert(ArrayType && "std::initializer_list constructed from non-array");
321
322  // FIXME: Perform the checks on the field types in SemaInit.
323  RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
324  RecordDecl::field_iterator Field = Record->field_begin();
325  if (Field == Record->field_end()) {
326    CGF.ErrorUnsupported(E, "weird std::initializer_list");
327    return;
328  }
329
330  // Start pointer.
331  if (!Field->getType()->isPointerType() ||
332      !Ctx.hasSameType(Field->getType()->getPointeeType(),
333                       ArrayType->getElementType())) {
334    CGF.ErrorUnsupported(E, "weird std::initializer_list");
335    return;
336  }
337
338  AggValueSlot Dest = EnsureSlot(E->getType());
339  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
340                                     Dest.getAlignment());
341  LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
342  llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
343  llvm::Value *IdxStart[] = { Zero, Zero };
344  llvm::Value *ArrayStart =
345      Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
346  CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
347  ++Field;
348
349  if (Field == Record->field_end()) {
350    CGF.ErrorUnsupported(E, "weird std::initializer_list");
351    return;
352  }
353
354  llvm::Value *Size = Builder.getInt(ArrayType->getSize());
355  LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
356  if (Field->getType()->isPointerType() &&
357      Ctx.hasSameType(Field->getType()->getPointeeType(),
358                      ArrayType->getElementType())) {
359    // End pointer.
360    llvm::Value *IdxEnd[] = { Zero, Size };
361    llvm::Value *ArrayEnd =
362        Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
363    CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
364  } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
365    // Length.
366    CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
367  } else {
368    CGF.ErrorUnsupported(E, "weird std::initializer_list");
369    return;
370  }
371}
372
373/// \brief Emit initialization of an array from an initializer list.
374void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
375                                   QualType elementType, InitListExpr *E) {
376  uint64_t NumInitElements = E->getNumInits();
377
378  uint64_t NumArrayElements = AType->getNumElements();
379  assert(NumInitElements <= NumArrayElements);
380
381  // DestPtr is an array*.  Construct an elementType* by drilling
382  // down a level.
383  llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
384  llvm::Value *indices[] = { zero, zero };
385  llvm::Value *begin =
386    Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
387
388  // Exception safety requires us to destroy all the
389  // already-constructed members if an initializer throws.
390  // For that, we'll need an EH cleanup.
391  QualType::DestructionKind dtorKind = elementType.isDestructedType();
392  llvm::AllocaInst *endOfInit = 0;
393  EHScopeStack::stable_iterator cleanup;
394  llvm::Instruction *cleanupDominator = 0;
395  if (CGF.needsEHCleanup(dtorKind)) {
396    // In principle we could tell the cleanup where we are more
397    // directly, but the control flow can get so varied here that it
398    // would actually be quite complex.  Therefore we go through an
399    // alloca.
400    endOfInit = CGF.CreateTempAlloca(begin->getType(),
401                                     "arrayinit.endOfInit");
402    cleanupDominator = Builder.CreateStore(begin, endOfInit);
403    CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
404                                         CGF.getDestroyer(dtorKind));
405    cleanup = CGF.EHStack.stable_begin();
406
407  // Otherwise, remember that we didn't need a cleanup.
408  } else {
409    dtorKind = QualType::DK_none;
410  }
411
412  llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
413
414  // The 'current element to initialize'.  The invariants on this
415  // variable are complicated.  Essentially, after each iteration of
416  // the loop, it points to the last initialized element, except
417  // that it points to the beginning of the array before any
418  // elements have been initialized.
419  llvm::Value *element = begin;
420
421  // Emit the explicit initializers.
422  for (uint64_t i = 0; i != NumInitElements; ++i) {
423    // Advance to the next element.
424    if (i > 0) {
425      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
426
427      // Tell the cleanup that it needs to destroy up to this
428      // element.  TODO: some of these stores can be trivially
429      // observed to be unnecessary.
430      if (endOfInit) Builder.CreateStore(element, endOfInit);
431    }
432
433    LValue elementLV = CGF.MakeAddrLValue(element, elementType);
434    EmitInitializationToLValue(E->getInit(i), elementLV);
435  }
436
437  // Check whether there's a non-trivial array-fill expression.
438  // Note that this will be a CXXConstructExpr even if the element
439  // type is an array (or array of array, etc.) of class type.
440  Expr *filler = E->getArrayFiller();
441  bool hasTrivialFiller = true;
442  if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
443    assert(cons->getConstructor()->isDefaultConstructor());
444    hasTrivialFiller = cons->getConstructor()->isTrivial();
445  }
446
447  // Any remaining elements need to be zero-initialized, possibly
448  // using the filler expression.  We can skip this if the we're
449  // emitting to zeroed memory.
450  if (NumInitElements != NumArrayElements &&
451      !(Dest.isZeroed() && hasTrivialFiller &&
452        CGF.getTypes().isZeroInitializable(elementType))) {
453
454    // Use an actual loop.  This is basically
455    //   do { *array++ = filler; } while (array != end);
456
457    // Advance to the start of the rest of the array.
458    if (NumInitElements) {
459      element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
460      if (endOfInit) Builder.CreateStore(element, endOfInit);
461    }
462
463    // Compute the end of the array.
464    llvm::Value *end = Builder.CreateInBoundsGEP(begin,
465                      llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
466                                                 "arrayinit.end");
467
468    llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
469    llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
470
471    // Jump into the body.
472    CGF.EmitBlock(bodyBB);
473    llvm::PHINode *currentElement =
474      Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
475    currentElement->addIncoming(element, entryBB);
476
477    // Emit the actual filler expression.
478    LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
479    if (filler)
480      EmitInitializationToLValue(filler, elementLV);
481    else
482      EmitNullInitializationToLValue(elementLV);
483
484    // Move on to the next element.
485    llvm::Value *nextElement =
486      Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
487
488    // Tell the EH cleanup that we finished with the last element.
489    if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
490
491    // Leave the loop if we're done.
492    llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
493                                             "arrayinit.done");
494    llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
495    Builder.CreateCondBr(done, endBB, bodyBB);
496    currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
497
498    CGF.EmitBlock(endBB);
499  }
500
501  // Leave the partial-array cleanup if we entered one.
502  if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
503}
504
505//===----------------------------------------------------------------------===//
506//                            Visitor Methods
507//===----------------------------------------------------------------------===//
508
509void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
510  Visit(E->GetTemporaryExpr());
511}
512
513void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
514  EmitFinalDestCopy(e->getType(), CGF.getOpaqueLValueMapping(e));
515}
516
517void
518AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
519  if (Dest.isPotentiallyAliased() &&
520      E->getType().isPODType(CGF.getContext())) {
521    // For a POD type, just emit a load of the lvalue + a copy, because our
522    // compound literal might alias the destination.
523    EmitAggLoadOfLValue(E);
524    return;
525  }
526
527  AggValueSlot Slot = EnsureSlot(E->getType());
528  CGF.EmitAggExpr(E->getInitializer(), Slot);
529}
530
531/// Attempt to look through various unimportant expressions to find a
532/// cast of the given kind.
533static Expr *findPeephole(Expr *op, CastKind kind) {
534  while (true) {
535    op = op->IgnoreParens();
536    if (CastExpr *castE = dyn_cast<CastExpr>(op)) {
537      if (castE->getCastKind() == kind)
538        return castE->getSubExpr();
539      if (castE->getCastKind() == CK_NoOp)
540        continue;
541    }
542    return 0;
543  }
544}
545
546void AggExprEmitter::VisitCastExpr(CastExpr *E) {
547  switch (E->getCastKind()) {
548  case CK_Dynamic: {
549    // FIXME: Can this actually happen? We have no test coverage for it.
550    assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
551    LValue LV = CGF.EmitCheckedLValue(E->getSubExpr(),
552                                      CodeGenFunction::TCK_Load);
553    // FIXME: Do we also need to handle property references here?
554    if (LV.isSimple())
555      CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
556    else
557      CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
558
559    if (!Dest.isIgnored())
560      CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
561    break;
562  }
563
564  case CK_ToUnion: {
565    if (Dest.isIgnored()) break;
566
567    // GCC union extension
568    QualType Ty = E->getSubExpr()->getType();
569    QualType PtrTy = CGF.getContext().getPointerType(Ty);
570    llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
571                                                 CGF.ConvertType(PtrTy));
572    EmitInitializationToLValue(E->getSubExpr(),
573                               CGF.MakeAddrLValue(CastPtr, Ty));
574    break;
575  }
576
577  case CK_DerivedToBase:
578  case CK_BaseToDerived:
579  case CK_UncheckedDerivedToBase: {
580    llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
581                "should have been unpacked before we got here");
582  }
583
584  case CK_NonAtomicToAtomic:
585  case CK_AtomicToNonAtomic: {
586    bool isToAtomic = (E->getCastKind() == CK_NonAtomicToAtomic);
587
588    // Determine the atomic and value types.
589    QualType atomicType = E->getSubExpr()->getType();
590    QualType valueType = E->getType();
591    if (isToAtomic) std::swap(atomicType, valueType);
592
593    assert(atomicType->isAtomicType());
594    assert(CGF.getContext().hasSameUnqualifiedType(valueType,
595                          atomicType->castAs<AtomicType>()->getValueType()));
596
597    // Just recurse normally if we're ignoring the result or the
598    // atomic type doesn't change representation.
599    if (Dest.isIgnored() || !CGF.CGM.isPaddedAtomicType(atomicType)) {
600      return Visit(E->getSubExpr());
601    }
602
603    CastKind peepholeTarget =
604      (isToAtomic ? CK_AtomicToNonAtomic : CK_NonAtomicToAtomic);
605
606    // These two cases are reverses of each other; try to peephole them.
607    if (Expr *op = findPeephole(E->getSubExpr(), peepholeTarget)) {
608      assert(CGF.getContext().hasSameUnqualifiedType(op->getType(),
609                                                     E->getType()) &&
610           "peephole significantly changed types?");
611      return Visit(op);
612    }
613
614    // If we're converting an r-value of non-atomic type to an r-value
615    // of atomic type, just emit directly into the relevant sub-object.
616    if (isToAtomic) {
617      AggValueSlot valueDest = Dest;
618      if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
619        // Zero-initialize.  (Strictly speaking, we only need to intialize
620        // the padding at the end, but this is simpler.)
621        if (!Dest.isZeroed())
622          CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
623
624        // Build a GEP to refer to the subobject.
625        llvm::Value *valueAddr =
626            CGF.Builder.CreateStructGEP(valueDest.getAddr(), 0);
627        valueDest = AggValueSlot::forAddr(valueAddr,
628                                          valueDest.getAlignment(),
629                                          valueDest.getQualifiers(),
630                                          valueDest.isExternallyDestructed(),
631                                          valueDest.requiresGCollection(),
632                                          valueDest.isPotentiallyAliased(),
633                                          AggValueSlot::IsZeroed);
634      }
635
636      CGF.EmitAggExpr(E->getSubExpr(), valueDest);
637      return;
638    }
639
640    // Otherwise, we're converting an atomic type to a non-atomic type.
641    // Make an atomic temporary, emit into that, and then copy the value out.
642    AggValueSlot atomicSlot =
643      CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
644    CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
645
646    llvm::Value *valueAddr =
647      Builder.CreateStructGEP(atomicSlot.getAddr(), 0);
648    RValue rvalue = RValue::getAggregate(valueAddr, atomicSlot.isVolatile());
649    return EmitFinalDestCopy(valueType, rvalue);
650  }
651
652  case CK_LValueToRValue:
653    // If we're loading from a volatile type, force the destination
654    // into existence.
655    if (E->getSubExpr()->getType().isVolatileQualified()) {
656      EnsureDest(E->getType());
657      return Visit(E->getSubExpr());
658    }
659
660    // fallthrough
661
662  case CK_NoOp:
663  case CK_UserDefinedConversion:
664  case CK_ConstructorConversion:
665    assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
666                                                   E->getType()) &&
667           "Implicit cast types must be compatible");
668    Visit(E->getSubExpr());
669    break;
670
671  case CK_LValueBitCast:
672    llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
673
674  case CK_Dependent:
675  case CK_BitCast:
676  case CK_ArrayToPointerDecay:
677  case CK_FunctionToPointerDecay:
678  case CK_NullToPointer:
679  case CK_NullToMemberPointer:
680  case CK_BaseToDerivedMemberPointer:
681  case CK_DerivedToBaseMemberPointer:
682  case CK_MemberPointerToBoolean:
683  case CK_ReinterpretMemberPointer:
684  case CK_IntegralToPointer:
685  case CK_PointerToIntegral:
686  case CK_PointerToBoolean:
687  case CK_ToVoid:
688  case CK_VectorSplat:
689  case CK_IntegralCast:
690  case CK_IntegralToBoolean:
691  case CK_IntegralToFloating:
692  case CK_FloatingToIntegral:
693  case CK_FloatingToBoolean:
694  case CK_FloatingCast:
695  case CK_CPointerToObjCPointerCast:
696  case CK_BlockPointerToObjCPointerCast:
697  case CK_AnyPointerToBlockPointerCast:
698  case CK_ObjCObjectLValueCast:
699  case CK_FloatingRealToComplex:
700  case CK_FloatingComplexToReal:
701  case CK_FloatingComplexToBoolean:
702  case CK_FloatingComplexCast:
703  case CK_FloatingComplexToIntegralComplex:
704  case CK_IntegralRealToComplex:
705  case CK_IntegralComplexToReal:
706  case CK_IntegralComplexToBoolean:
707  case CK_IntegralComplexCast:
708  case CK_IntegralComplexToFloatingComplex:
709  case CK_ARCProduceObject:
710  case CK_ARCConsumeObject:
711  case CK_ARCReclaimReturnedObject:
712  case CK_ARCExtendBlockObject:
713  case CK_CopyAndAutoreleaseBlockObject:
714  case CK_BuiltinFnToFnPtr:
715  case CK_ZeroToOCLEvent:
716    llvm_unreachable("cast kind invalid for aggregate types");
717  }
718}
719
720void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
721  if (E->getCallReturnType()->isReferenceType()) {
722    EmitAggLoadOfLValue(E);
723    return;
724  }
725
726  RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
727  EmitMoveFromReturnSlot(E, RV);
728}
729
730void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
731  RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
732  EmitMoveFromReturnSlot(E, RV);
733}
734
735void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
736  CGF.EmitIgnoredExpr(E->getLHS());
737  Visit(E->getRHS());
738}
739
740void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
741  CodeGenFunction::StmtExprEvaluation eval(CGF);
742  CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
743}
744
745void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
746  if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
747    VisitPointerToDataMemberBinaryOperator(E);
748  else
749    CGF.ErrorUnsupported(E, "aggregate binary expression");
750}
751
752void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
753                                                    const BinaryOperator *E) {
754  LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
755  EmitFinalDestCopy(E->getType(), LV);
756}
757
758/// Is the value of the given expression possibly a reference to or
759/// into a __block variable?
760static bool isBlockVarRef(const Expr *E) {
761  // Make sure we look through parens.
762  E = E->IgnoreParens();
763
764  // Check for a direct reference to a __block variable.
765  if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
766    const VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
767    return (var && var->hasAttr<BlocksAttr>());
768  }
769
770  // More complicated stuff.
771
772  // Binary operators.
773  if (const BinaryOperator *op = dyn_cast<BinaryOperator>(E)) {
774    // For an assignment or pointer-to-member operation, just care
775    // about the LHS.
776    if (op->isAssignmentOp() || op->isPtrMemOp())
777      return isBlockVarRef(op->getLHS());
778
779    // For a comma, just care about the RHS.
780    if (op->getOpcode() == BO_Comma)
781      return isBlockVarRef(op->getRHS());
782
783    // FIXME: pointer arithmetic?
784    return false;
785
786  // Check both sides of a conditional operator.
787  } else if (const AbstractConditionalOperator *op
788               = dyn_cast<AbstractConditionalOperator>(E)) {
789    return isBlockVarRef(op->getTrueExpr())
790        || isBlockVarRef(op->getFalseExpr());
791
792  // OVEs are required to support BinaryConditionalOperators.
793  } else if (const OpaqueValueExpr *op
794               = dyn_cast<OpaqueValueExpr>(E)) {
795    if (const Expr *src = op->getSourceExpr())
796      return isBlockVarRef(src);
797
798  // Casts are necessary to get things like (*(int*)&var) = foo().
799  // We don't really care about the kind of cast here, except
800  // we don't want to look through l2r casts, because it's okay
801  // to get the *value* in a __block variable.
802  } else if (const CastExpr *cast = dyn_cast<CastExpr>(E)) {
803    if (cast->getCastKind() == CK_LValueToRValue)
804      return false;
805    return isBlockVarRef(cast->getSubExpr());
806
807  // Handle unary operators.  Again, just aggressively look through
808  // it, ignoring the operation.
809  } else if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E)) {
810    return isBlockVarRef(uop->getSubExpr());
811
812  // Look into the base of a field access.
813  } else if (const MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
814    return isBlockVarRef(mem->getBase());
815
816  // Look into the base of a subscript.
817  } else if (const ArraySubscriptExpr *sub = dyn_cast<ArraySubscriptExpr>(E)) {
818    return isBlockVarRef(sub->getBase());
819  }
820
821  return false;
822}
823
824void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
825  // For an assignment to work, the value on the right has
826  // to be compatible with the value on the left.
827  assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
828                                                 E->getRHS()->getType())
829         && "Invalid assignment");
830
831  // If the LHS might be a __block variable, and the RHS can
832  // potentially cause a block copy, we need to evaluate the RHS first
833  // so that the assignment goes the right place.
834  // This is pretty semantically fragile.
835  if (isBlockVarRef(E->getLHS()) &&
836      E->getRHS()->HasSideEffects(CGF.getContext())) {
837    // Ensure that we have a destination, and evaluate the RHS into that.
838    EnsureDest(E->getRHS()->getType());
839    Visit(E->getRHS());
840
841    // Now emit the LHS and copy into it.
842    LValue LHS = CGF.EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
843
844    // That copy is an atomic copy if the LHS is atomic.
845    if (LHS.getType()->isAtomicType()) {
846      CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
847      return;
848    }
849
850    EmitCopy(E->getLHS()->getType(),
851             AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
852                                     needsGC(E->getLHS()->getType()),
853                                     AggValueSlot::IsAliased),
854             Dest);
855    return;
856  }
857
858  LValue LHS = CGF.EmitLValue(E->getLHS());
859
860  // If we have an atomic type, evaluate into the destination and then
861  // do an atomic copy.
862  if (LHS.getType()->isAtomicType()) {
863    EnsureDest(E->getRHS()->getType());
864    Visit(E->getRHS());
865    CGF.EmitAtomicStore(Dest.asRValue(), LHS, /*isInit*/ false);
866    return;
867  }
868
869  // Codegen the RHS so that it stores directly into the LHS.
870  AggValueSlot LHSSlot =
871    AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
872                            needsGC(E->getLHS()->getType()),
873                            AggValueSlot::IsAliased);
874  // A non-volatile aggregate destination might have volatile member.
875  if (!LHSSlot.isVolatile() &&
876      CGF.hasVolatileMember(E->getLHS()->getType()))
877    LHSSlot.setVolatile(true);
878
879  CGF.EmitAggExpr(E->getRHS(), LHSSlot);
880
881  // Copy into the destination if the assignment isn't ignored.
882  EmitFinalDestCopy(E->getType(), LHS);
883}
884
885void AggExprEmitter::
886VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
887  llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
888  llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
889  llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
890
891  // Bind the common expression if necessary.
892  CodeGenFunction::OpaqueValueMapping binding(CGF, E);
893
894  CodeGenFunction::ConditionalEvaluation eval(CGF);
895  CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
896
897  // Save whether the destination's lifetime is externally managed.
898  bool isExternallyDestructed = Dest.isExternallyDestructed();
899
900  eval.begin(CGF);
901  CGF.EmitBlock(LHSBlock);
902  Visit(E->getTrueExpr());
903  eval.end(CGF);
904
905  assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
906  CGF.Builder.CreateBr(ContBlock);
907
908  // If the result of an agg expression is unused, then the emission
909  // of the LHS might need to create a destination slot.  That's fine
910  // with us, and we can safely emit the RHS into the same slot, but
911  // we shouldn't claim that it's already being destructed.
912  Dest.setExternallyDestructed(isExternallyDestructed);
913
914  eval.begin(CGF);
915  CGF.EmitBlock(RHSBlock);
916  Visit(E->getFalseExpr());
917  eval.end(CGF);
918
919  CGF.EmitBlock(ContBlock);
920}
921
922void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
923  Visit(CE->getChosenSubExpr());
924}
925
926void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
927  llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
928  llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
929
930  if (!ArgPtr) {
931    CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
932    return;
933  }
934
935  EmitFinalDestCopy(VE->getType(), CGF.MakeAddrLValue(ArgPtr, VE->getType()));
936}
937
938void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
939  // Ensure that we have a slot, but if we already do, remember
940  // whether it was externally destructed.
941  bool wasExternallyDestructed = Dest.isExternallyDestructed();
942  EnsureDest(E->getType());
943
944  // We're going to push a destructor if there isn't already one.
945  Dest.setExternallyDestructed();
946
947  Visit(E->getSubExpr());
948
949  // Push that destructor we promised.
950  if (!wasExternallyDestructed)
951    CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
952}
953
954void
955AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
956  AggValueSlot Slot = EnsureSlot(E->getType());
957  CGF.EmitCXXConstructExpr(E, Slot);
958}
959
960void
961AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
962  AggValueSlot Slot = EnsureSlot(E->getType());
963  CGF.EmitLambdaExpr(E, Slot);
964}
965
966void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
967  CGF.enterFullExpression(E);
968  CodeGenFunction::RunCleanupsScope cleanups(CGF);
969  Visit(E->getSubExpr());
970}
971
972void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
973  QualType T = E->getType();
974  AggValueSlot Slot = EnsureSlot(T);
975  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
976}
977
978void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
979  QualType T = E->getType();
980  AggValueSlot Slot = EnsureSlot(T);
981  EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
982}
983
984/// isSimpleZero - If emitting this value will obviously just cause a store of
985/// zero to memory, return true.  This can return false if uncertain, so it just
986/// handles simple cases.
987static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
988  E = E->IgnoreParens();
989
990  // 0
991  if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
992    return IL->getValue() == 0;
993  // +0.0
994  if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
995    return FL->getValue().isPosZero();
996  // int()
997  if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
998      CGF.getTypes().isZeroInitializable(E->getType()))
999    return true;
1000  // (int*)0 - Null pointer expressions.
1001  if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
1002    return ICE->getCastKind() == CK_NullToPointer;
1003  // '\0'
1004  if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
1005    return CL->getValue() == 0;
1006
1007  // Otherwise, hard case: conservatively return false.
1008  return false;
1009}
1010
1011
1012void
1013AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
1014  QualType type = LV.getType();
1015  // FIXME: Ignore result?
1016  // FIXME: Are initializers affected by volatile?
1017  if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
1018    // Storing "i32 0" to a zero'd memory location is a noop.
1019    return;
1020  } else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
1021    return EmitNullInitializationToLValue(LV);
1022  } else if (type->isReferenceType()) {
1023    RValue RV = CGF.EmitReferenceBindingToExpr(E);
1024    return CGF.EmitStoreThroughLValue(RV, LV);
1025  }
1026
1027  switch (CGF.getEvaluationKind(type)) {
1028  case TEK_Complex:
1029    CGF.EmitComplexExprIntoLValue(E, LV, /*isInit*/ true);
1030    return;
1031  case TEK_Aggregate:
1032    CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
1033                                               AggValueSlot::IsDestructed,
1034                                      AggValueSlot::DoesNotNeedGCBarriers,
1035                                               AggValueSlot::IsNotAliased,
1036                                               Dest.isZeroed()));
1037    return;
1038  case TEK_Scalar:
1039    if (LV.isSimple()) {
1040      CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
1041    } else {
1042      CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
1043    }
1044    return;
1045  }
1046  llvm_unreachable("bad evaluation kind");
1047}
1048
1049void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
1050  QualType type = lv.getType();
1051
1052  // If the destination slot is already zeroed out before the aggregate is
1053  // copied into it, we don't have to emit any zeros here.
1054  if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
1055    return;
1056
1057  if (CGF.hasScalarEvaluationKind(type)) {
1058    // For non-aggregates, we can store the appropriate null constant.
1059    llvm::Value *null = CGF.CGM.EmitNullConstant(type);
1060    // Note that the following is not equivalent to
1061    // EmitStoreThroughBitfieldLValue for ARC types.
1062    if (lv.isBitField()) {
1063      CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
1064    } else {
1065      assert(lv.isSimple());
1066      CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
1067    }
1068  } else {
1069    // There's a potential optimization opportunity in combining
1070    // memsets; that would be easy for arrays, but relatively
1071    // difficult for structures with the current code.
1072    CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
1073  }
1074}
1075
1076void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
1077#if 0
1078  // FIXME: Assess perf here?  Figure out what cases are worth optimizing here
1079  // (Length of globals? Chunks of zeroed-out space?).
1080  //
1081  // If we can, prefer a copy from a global; this is a lot less code for long
1082  // globals, and it's easier for the current optimizers to analyze.
1083  if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
1084    llvm::GlobalVariable* GV =
1085    new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
1086                             llvm::GlobalValue::InternalLinkage, C, "");
1087    EmitFinalDestCopy(E->getType(), CGF.MakeAddrLValue(GV, E->getType()));
1088    return;
1089  }
1090#endif
1091  if (E->hadArrayRangeDesignator())
1092    CGF.ErrorUnsupported(E, "GNU array range designator extension");
1093
1094  AggValueSlot Dest = EnsureSlot(E->getType());
1095
1096  LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
1097                                     Dest.getAlignment());
1098
1099  // Handle initialization of an array.
1100  if (E->getType()->isArrayType()) {
1101    if (E->isStringLiteralInit())
1102      return Visit(E->getInit(0));
1103
1104    QualType elementType =
1105        CGF.getContext().getAsArrayType(E->getType())->getElementType();
1106
1107    llvm::PointerType *APType =
1108      cast<llvm::PointerType>(Dest.getAddr()->getType());
1109    llvm::ArrayType *AType =
1110      cast<llvm::ArrayType>(APType->getElementType());
1111
1112    EmitArrayInit(Dest.getAddr(), AType, elementType, E);
1113    return;
1114  }
1115
1116  assert(E->getType()->isRecordType() && "Only support structs/unions here!");
1117
1118  // Do struct initialization; this code just sets each individual member
1119  // to the approprate value.  This makes bitfield support automatic;
1120  // the disadvantage is that the generated code is more difficult for
1121  // the optimizer, especially with bitfields.
1122  unsigned NumInitElements = E->getNumInits();
1123  RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
1124
1125  // Prepare a 'this' for CXXDefaultInitExprs.
1126  CodeGenFunction::FieldConstructionScope FCS(CGF, Dest.getAddr());
1127
1128  if (record->isUnion()) {
1129    // Only initialize one field of a union. The field itself is
1130    // specified by the initializer list.
1131    if (!E->getInitializedFieldInUnion()) {
1132      // Empty union; we have nothing to do.
1133
1134#ifndef NDEBUG
1135      // Make sure that it's really an empty and not a failure of
1136      // semantic analysis.
1137      for (RecordDecl::field_iterator Field = record->field_begin(),
1138                                   FieldEnd = record->field_end();
1139           Field != FieldEnd; ++Field)
1140        assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
1141#endif
1142      return;
1143    }
1144
1145    // FIXME: volatility
1146    FieldDecl *Field = E->getInitializedFieldInUnion();
1147
1148    LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestLV, Field);
1149    if (NumInitElements) {
1150      // Store the initializer into the field
1151      EmitInitializationToLValue(E->getInit(0), FieldLoc);
1152    } else {
1153      // Default-initialize to null.
1154      EmitNullInitializationToLValue(FieldLoc);
1155    }
1156
1157    return;
1158  }
1159
1160  // We'll need to enter cleanup scopes in case any of the member
1161  // initializers throw an exception.
1162  SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
1163  llvm::Instruction *cleanupDominator = 0;
1164
1165  // Here we iterate over the fields; this makes it simpler to both
1166  // default-initialize fields and skip over unnamed fields.
1167  unsigned curInitIndex = 0;
1168  for (RecordDecl::field_iterator field = record->field_begin(),
1169                               fieldEnd = record->field_end();
1170       field != fieldEnd; ++field) {
1171    // We're done once we hit the flexible array member.
1172    if (field->getType()->isIncompleteArrayType())
1173      break;
1174
1175    // Always skip anonymous bitfields.
1176    if (field->isUnnamedBitfield())
1177      continue;
1178
1179    // We're done if we reach the end of the explicit initializers, we
1180    // have a zeroed object, and the rest of the fields are
1181    // zero-initializable.
1182    if (curInitIndex == NumInitElements && Dest.isZeroed() &&
1183        CGF.getTypes().isZeroInitializable(E->getType()))
1184      break;
1185
1186
1187    LValue LV = CGF.EmitLValueForFieldInitialization(DestLV, *field);
1188    // We never generate write-barries for initialized fields.
1189    LV.setNonGC(true);
1190
1191    if (curInitIndex < NumInitElements) {
1192      // Store the initializer into the field.
1193      EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
1194    } else {
1195      // We're out of initalizers; default-initialize to null
1196      EmitNullInitializationToLValue(LV);
1197    }
1198
1199    // Push a destructor if necessary.
1200    // FIXME: if we have an array of structures, all explicitly
1201    // initialized, we can end up pushing a linear number of cleanups.
1202    bool pushedCleanup = false;
1203    if (QualType::DestructionKind dtorKind
1204          = field->getType().isDestructedType()) {
1205      assert(LV.isSimple());
1206      if (CGF.needsEHCleanup(dtorKind)) {
1207        if (!cleanupDominator)
1208          cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
1209
1210        CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
1211                        CGF.getDestroyer(dtorKind), false);
1212        cleanups.push_back(CGF.EHStack.stable_begin());
1213        pushedCleanup = true;
1214      }
1215    }
1216
1217    // If the GEP didn't get used because of a dead zero init or something
1218    // else, clean it up for -O0 builds and general tidiness.
1219    if (!pushedCleanup && LV.isSimple())
1220      if (llvm::GetElementPtrInst *GEP =
1221            dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
1222        if (GEP->use_empty())
1223          GEP->eraseFromParent();
1224  }
1225
1226  // Deactivate all the partial cleanups in reverse order, which
1227  // generally means popping them.
1228  for (unsigned i = cleanups.size(); i != 0; --i)
1229    CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
1230
1231  // Destroy the placeholder if we made one.
1232  if (cleanupDominator)
1233    cleanupDominator->eraseFromParent();
1234}
1235
1236//===----------------------------------------------------------------------===//
1237//                        Entry Points into this File
1238//===----------------------------------------------------------------------===//
1239
1240/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
1241/// non-zero bytes that will be stored when outputting the initializer for the
1242/// specified initializer expression.
1243static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
1244  E = E->IgnoreParens();
1245
1246  // 0 and 0.0 won't require any non-zero stores!
1247  if (isSimpleZero(E, CGF)) return CharUnits::Zero();
1248
1249  // If this is an initlist expr, sum up the size of sizes of the (present)
1250  // elements.  If this is something weird, assume the whole thing is non-zero.
1251  const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
1252  if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
1253    return CGF.getContext().getTypeSizeInChars(E->getType());
1254
1255  // InitListExprs for structs have to be handled carefully.  If there are
1256  // reference members, we need to consider the size of the reference, not the
1257  // referencee.  InitListExprs for unions and arrays can't have references.
1258  if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
1259    if (!RT->isUnionType()) {
1260      RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
1261      CharUnits NumNonZeroBytes = CharUnits::Zero();
1262
1263      unsigned ILEElement = 0;
1264      for (RecordDecl::field_iterator Field = SD->field_begin(),
1265           FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
1266        // We're done once we hit the flexible array member or run out of
1267        // InitListExpr elements.
1268        if (Field->getType()->isIncompleteArrayType() ||
1269            ILEElement == ILE->getNumInits())
1270          break;
1271        if (Field->isUnnamedBitfield())
1272          continue;
1273
1274        const Expr *E = ILE->getInit(ILEElement++);
1275
1276        // Reference values are always non-null and have the width of a pointer.
1277        if (Field->getType()->isReferenceType())
1278          NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
1279              CGF.getTarget().getPointerWidth(0));
1280        else
1281          NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
1282      }
1283
1284      return NumNonZeroBytes;
1285    }
1286  }
1287
1288
1289  CharUnits NumNonZeroBytes = CharUnits::Zero();
1290  for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
1291    NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
1292  return NumNonZeroBytes;
1293}
1294
1295/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
1296/// zeros in it, emit a memset and avoid storing the individual zeros.
1297///
1298static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
1299                                     CodeGenFunction &CGF) {
1300  // If the slot is already known to be zeroed, nothing to do.  Don't mess with
1301  // volatile stores.
1302  if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
1303
1304  // C++ objects with a user-declared constructor don't need zero'ing.
1305  if (CGF.getLangOpts().CPlusPlus)
1306    if (const RecordType *RT = CGF.getContext()
1307                       .getBaseElementType(E->getType())->getAs<RecordType>()) {
1308      const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
1309      if (RD->hasUserDeclaredConstructor())
1310        return;
1311    }
1312
1313  // If the type is 16-bytes or smaller, prefer individual stores over memset.
1314  std::pair<CharUnits, CharUnits> TypeInfo =
1315    CGF.getContext().getTypeInfoInChars(E->getType());
1316  if (TypeInfo.first <= CharUnits::fromQuantity(16))
1317    return;
1318
1319  // Check to see if over 3/4 of the initializer are known to be zero.  If so,
1320  // we prefer to emit memset + individual stores for the rest.
1321  CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
1322  if (NumNonZeroBytes*4 > TypeInfo.first)
1323    return;
1324
1325  // Okay, it seems like a good idea to use an initial memset, emit the call.
1326  llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
1327  CharUnits Align = TypeInfo.second;
1328
1329  llvm::Value *Loc = Slot.getAddr();
1330
1331  Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
1332  CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
1333                           Align.getQuantity(), false);
1334
1335  // Tell the AggExprEmitter that the slot is known zero.
1336  Slot.setZeroed();
1337}
1338
1339
1340
1341
1342/// EmitAggExpr - Emit the computation of the specified expression of aggregate
1343/// type.  The result is computed into DestPtr.  Note that if DestPtr is null,
1344/// the value of the aggregate expression is not needed.  If VolatileDest is
1345/// true, DestPtr cannot be 0.
1346void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot) {
1347  assert(E && hasAggregateEvaluationKind(E->getType()) &&
1348         "Invalid aggregate expression to emit");
1349  assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
1350         "slot has bits but no address");
1351
1352  // Optimize the slot if possible.
1353  CheckAggExprForMemSetUse(Slot, E, *this);
1354
1355  AggExprEmitter(*this, Slot).Visit(const_cast<Expr*>(E));
1356}
1357
1358LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
1359  assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
1360  llvm::Value *Temp = CreateMemTemp(E->getType());
1361  LValue LV = MakeAddrLValue(Temp, E->getType());
1362  EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
1363                                         AggValueSlot::DoesNotNeedGCBarriers,
1364                                         AggValueSlot::IsNotAliased));
1365  return LV;
1366}
1367
1368void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
1369                                        llvm::Value *SrcPtr, QualType Ty,
1370                                        bool isVolatile,
1371                                        CharUnits alignment,
1372                                        bool isAssignment) {
1373  assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
1374
1375  if (getLangOpts().CPlusPlus) {
1376    if (const RecordType *RT = Ty->getAs<RecordType>()) {
1377      CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
1378      assert((Record->hasTrivialCopyConstructor() ||
1379              Record->hasTrivialCopyAssignment() ||
1380              Record->hasTrivialMoveConstructor() ||
1381              Record->hasTrivialMoveAssignment()) &&
1382             "Trying to aggregate-copy a type without a trivial copy/move "
1383             "constructor or assignment operator");
1384      // Ignore empty classes in C++.
1385      if (Record->isEmpty())
1386        return;
1387    }
1388  }
1389
1390  // Aggregate assignment turns into llvm.memcpy.  This is almost valid per
1391  // C99 6.5.16.1p3, which states "If the value being stored in an object is
1392  // read from another object that overlaps in anyway the storage of the first
1393  // object, then the overlap shall be exact and the two objects shall have
1394  // qualified or unqualified versions of a compatible type."
1395  //
1396  // memcpy is not defined if the source and destination pointers are exactly
1397  // equal, but other compilers do this optimization, and almost every memcpy
1398  // implementation handles this case safely.  If there is a libc that does not
1399  // safely handle this, we can add a target hook.
1400
1401  // Get data size and alignment info for this aggregate. If this is an
1402  // assignment don't copy the tail padding. Otherwise copying it is fine.
1403  std::pair<CharUnits, CharUnits> TypeInfo;
1404  if (isAssignment)
1405    TypeInfo = getContext().getTypeInfoDataSizeInChars(Ty);
1406  else
1407    TypeInfo = getContext().getTypeInfoInChars(Ty);
1408
1409  if (alignment.isZero())
1410    alignment = TypeInfo.second;
1411
1412  // FIXME: Handle variable sized types.
1413
1414  // FIXME: If we have a volatile struct, the optimizer can remove what might
1415  // appear to be `extra' memory ops:
1416  //
1417  // volatile struct { int i; } a, b;
1418  //
1419  // int main() {
1420  //   a = b;
1421  //   a = b;
1422  // }
1423  //
1424  // we need to use a different call here.  We use isVolatile to indicate when
1425  // either the source or the destination is volatile.
1426
1427  llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
1428  llvm::Type *DBP =
1429    llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
1430  DestPtr = Builder.CreateBitCast(DestPtr, DBP);
1431
1432  llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
1433  llvm::Type *SBP =
1434    llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
1435  SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
1436
1437  // Don't do any of the memmove_collectable tests if GC isn't set.
1438  if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
1439    // fall through
1440  } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1441    RecordDecl *Record = RecordTy->getDecl();
1442    if (Record->hasObjectMember()) {
1443      CharUnits size = TypeInfo.first;
1444      llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1445      llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
1446      CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1447                                                    SizeVal);
1448      return;
1449    }
1450  } else if (Ty->isArrayType()) {
1451    QualType BaseType = getContext().getBaseElementType(Ty);
1452    if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
1453      if (RecordTy->getDecl()->hasObjectMember()) {
1454        CharUnits size = TypeInfo.first;
1455        llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
1456        llvm::Value *SizeVal =
1457          llvm::ConstantInt::get(SizeTy, size.getQuantity());
1458        CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
1459                                                      SizeVal);
1460        return;
1461      }
1462    }
1463  }
1464
1465  // Determine the metadata to describe the position of any padding in this
1466  // memcpy, as well as the TBAA tags for the members of the struct, in case
1467  // the optimizer wishes to expand it in to scalar memory operations.
1468  llvm::MDNode *TBAAStructTag = CGM.getTBAAStructInfo(Ty);
1469
1470  Builder.CreateMemCpy(DestPtr, SrcPtr,
1471                       llvm::ConstantInt::get(IntPtrTy,
1472                                              TypeInfo.first.getQuantity()),
1473                       alignment.getQuantity(), isVolatile,
1474                       /*TBAATag=*/0, TBAAStructTag);
1475}
1476