CGDecl.cpp revision 360784
1//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Decl nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBlocks.h"
14#include "CGCXXABI.h"
15#include "CGCleanup.h"
16#include "CGDebugInfo.h"
17#include "CGOpenCLRuntime.h"
18#include "CGOpenMPRuntime.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "PatternInit.h"
23#include "TargetInfo.h"
24#include "clang/AST/ASTContext.h"
25#include "clang/AST/Attr.h"
26#include "clang/AST/CharUnits.h"
27#include "clang/AST/Decl.h"
28#include "clang/AST/DeclObjC.h"
29#include "clang/AST/DeclOpenMP.h"
30#include "clang/Basic/CodeGenOptions.h"
31#include "clang/Basic/SourceManager.h"
32#include "clang/Basic/TargetInfo.h"
33#include "clang/CodeGen/CGFunctionInfo.h"
34#include "llvm/Analysis/ValueTracking.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/GlobalVariable.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/Type.h"
39
40using namespace clang;
41using namespace CodeGen;
42
43void CodeGenFunction::EmitDecl(const Decl &D) {
44  switch (D.getKind()) {
45  case Decl::BuiltinTemplate:
46  case Decl::TranslationUnit:
47  case Decl::ExternCContext:
48  case Decl::Namespace:
49  case Decl::UnresolvedUsingTypename:
50  case Decl::ClassTemplateSpecialization:
51  case Decl::ClassTemplatePartialSpecialization:
52  case Decl::VarTemplateSpecialization:
53  case Decl::VarTemplatePartialSpecialization:
54  case Decl::TemplateTypeParm:
55  case Decl::UnresolvedUsingValue:
56  case Decl::NonTypeTemplateParm:
57  case Decl::CXXDeductionGuide:
58  case Decl::CXXMethod:
59  case Decl::CXXConstructor:
60  case Decl::CXXDestructor:
61  case Decl::CXXConversion:
62  case Decl::Field:
63  case Decl::MSProperty:
64  case Decl::IndirectField:
65  case Decl::ObjCIvar:
66  case Decl::ObjCAtDefsField:
67  case Decl::ParmVar:
68  case Decl::ImplicitParam:
69  case Decl::ClassTemplate:
70  case Decl::VarTemplate:
71  case Decl::FunctionTemplate:
72  case Decl::TypeAliasTemplate:
73  case Decl::TemplateTemplateParm:
74  case Decl::ObjCMethod:
75  case Decl::ObjCCategory:
76  case Decl::ObjCProtocol:
77  case Decl::ObjCInterface:
78  case Decl::ObjCCategoryImpl:
79  case Decl::ObjCImplementation:
80  case Decl::ObjCProperty:
81  case Decl::ObjCCompatibleAlias:
82  case Decl::PragmaComment:
83  case Decl::PragmaDetectMismatch:
84  case Decl::AccessSpec:
85  case Decl::LinkageSpec:
86  case Decl::Export:
87  case Decl::ObjCPropertyImpl:
88  case Decl::FileScopeAsm:
89  case Decl::Friend:
90  case Decl::FriendTemplate:
91  case Decl::Block:
92  case Decl::Captured:
93  case Decl::ClassScopeFunctionSpecialization:
94  case Decl::UsingShadow:
95  case Decl::ConstructorUsingShadow:
96  case Decl::ObjCTypeParam:
97  case Decl::Binding:
98    llvm_unreachable("Declaration should not be in declstmts!");
99  case Decl::Function:  // void X();
100  case Decl::Record:    // struct/union/class X;
101  case Decl::Enum:      // enum X;
102  case Decl::EnumConstant: // enum ? { X = ? }
103  case Decl::CXXRecord: // struct/union/class X; [C++]
104  case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
105  case Decl::Label:        // __label__ x;
106  case Decl::Import:
107  case Decl::OMPThreadPrivate:
108  case Decl::OMPAllocate:
109  case Decl::OMPCapturedExpr:
110  case Decl::OMPRequires:
111  case Decl::Empty:
112  case Decl::Concept:
113  case Decl::LifetimeExtendedTemporary:
114  case Decl::RequiresExprBody:
115    // None of these decls require codegen support.
116    return;
117
118  case Decl::NamespaceAlias:
119    if (CGDebugInfo *DI = getDebugInfo())
120        DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
121    return;
122  case Decl::Using:          // using X; [C++]
123    if (CGDebugInfo *DI = getDebugInfo())
124        DI->EmitUsingDecl(cast<UsingDecl>(D));
125    return;
126  case Decl::UsingPack:
127    for (auto *Using : cast<UsingPackDecl>(D).expansions())
128      EmitDecl(*Using);
129    return;
130  case Decl::UsingDirective: // using namespace X; [C++]
131    if (CGDebugInfo *DI = getDebugInfo())
132      DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
133    return;
134  case Decl::Var:
135  case Decl::Decomposition: {
136    const VarDecl &VD = cast<VarDecl>(D);
137    assert(VD.isLocalVarDecl() &&
138           "Should not see file-scope variables inside a function!");
139    EmitVarDecl(VD);
140    if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
141      for (auto *B : DD->bindings())
142        if (auto *HD = B->getHoldingVar())
143          EmitVarDecl(*HD);
144    return;
145  }
146
147  case Decl::OMPDeclareReduction:
148    return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
149
150  case Decl::OMPDeclareMapper:
151    return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
152
153  case Decl::Typedef:      // typedef int X;
154  case Decl::TypeAlias: {  // using X = int; [C++0x]
155    const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
156    QualType Ty = TD.getUnderlyingType();
157
158    if (Ty->isVariablyModifiedType())
159      EmitVariablyModifiedType(Ty);
160
161    return;
162  }
163  }
164}
165
166/// EmitVarDecl - This method handles emission of any variable declaration
167/// inside a function, including static vars etc.
168void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
169  if (D.hasExternalStorage())
170    // Don't emit it now, allow it to be emitted lazily on its first use.
171    return;
172
173  // Some function-scope variable does not have static storage but still
174  // needs to be emitted like a static variable, e.g. a function-scope
175  // variable in constant address space in OpenCL.
176  if (D.getStorageDuration() != SD_Automatic) {
177    // Static sampler variables translated to function calls.
178    if (D.getType()->isSamplerT())
179      return;
180
181    llvm::GlobalValue::LinkageTypes Linkage =
182        CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
183
184    // FIXME: We need to force the emission/use of a guard variable for
185    // some variables even if we can constant-evaluate them because
186    // we can't guarantee every translation unit will constant-evaluate them.
187
188    return EmitStaticVarDecl(D, Linkage);
189  }
190
191  if (D.getType().getAddressSpace() == LangAS::opencl_local)
192    return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
193
194  assert(D.hasLocalStorage());
195  return EmitAutoVarDecl(D);
196}
197
198static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
199  if (CGM.getLangOpts().CPlusPlus)
200    return CGM.getMangledName(&D).str();
201
202  // If this isn't C++, we don't need a mangled name, just a pretty one.
203  assert(!D.isExternallyVisible() && "name shouldn't matter");
204  std::string ContextName;
205  const DeclContext *DC = D.getDeclContext();
206  if (auto *CD = dyn_cast<CapturedDecl>(DC))
207    DC = cast<DeclContext>(CD->getNonClosureContext());
208  if (const auto *FD = dyn_cast<FunctionDecl>(DC))
209    ContextName = CGM.getMangledName(FD);
210  else if (const auto *BD = dyn_cast<BlockDecl>(DC))
211    ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
212  else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
213    ContextName = OMD->getSelector().getAsString();
214  else
215    llvm_unreachable("Unknown context for static var decl");
216
217  ContextName += "." + D.getNameAsString();
218  return ContextName;
219}
220
221llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
222    const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
223  // In general, we don't always emit static var decls once before we reference
224  // them. It is possible to reference them before emitting the function that
225  // contains them, and it is possible to emit the containing function multiple
226  // times.
227  if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
228    return ExistingGV;
229
230  QualType Ty = D.getType();
231  assert(Ty->isConstantSizeType() && "VLAs can't be static");
232
233  // Use the label if the variable is renamed with the asm-label extension.
234  std::string Name;
235  if (D.hasAttr<AsmLabelAttr>())
236    Name = getMangledName(&D);
237  else
238    Name = getStaticDeclName(*this, D);
239
240  llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
241  LangAS AS = GetGlobalVarAddressSpace(&D);
242  unsigned TargetAS = getContext().getTargetAddressSpace(AS);
243
244  // OpenCL variables in local address space and CUDA shared
245  // variables cannot have an initializer.
246  llvm::Constant *Init = nullptr;
247  if (Ty.getAddressSpace() == LangAS::opencl_local ||
248      D.hasAttr<CUDASharedAttr>())
249    Init = llvm::UndefValue::get(LTy);
250  else
251    Init = EmitNullConstant(Ty);
252
253  llvm::GlobalVariable *GV = new llvm::GlobalVariable(
254      getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
255      nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
256  GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
257
258  if (supportsCOMDAT() && GV->isWeakForLinker())
259    GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
260
261  if (D.getTLSKind())
262    setTLSMode(GV, D);
263
264  setGVProperties(GV, &D);
265
266  // Make sure the result is of the correct type.
267  LangAS ExpectedAS = Ty.getAddressSpace();
268  llvm::Constant *Addr = GV;
269  if (AS != ExpectedAS) {
270    Addr = getTargetCodeGenInfo().performAddrSpaceCast(
271        *this, GV, AS, ExpectedAS,
272        LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
273  }
274
275  setStaticLocalDeclAddress(&D, Addr);
276
277  // Ensure that the static local gets initialized by making sure the parent
278  // function gets emitted eventually.
279  const Decl *DC = cast<Decl>(D.getDeclContext());
280
281  // We can't name blocks or captured statements directly, so try to emit their
282  // parents.
283  if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
284    DC = DC->getNonClosureContext();
285    // FIXME: Ensure that global blocks get emitted.
286    if (!DC)
287      return Addr;
288  }
289
290  GlobalDecl GD;
291  if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
292    GD = GlobalDecl(CD, Ctor_Base);
293  else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
294    GD = GlobalDecl(DD, Dtor_Base);
295  else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
296    GD = GlobalDecl(FD);
297  else {
298    // Don't do anything for Obj-C method decls or global closures. We should
299    // never defer them.
300    assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
301  }
302  if (GD.getDecl()) {
303    // Disable emission of the parent function for the OpenMP device codegen.
304    CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
305    (void)GetAddrOfGlobal(GD);
306  }
307
308  return Addr;
309}
310
311/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
312/// global variable that has already been created for it.  If the initializer
313/// has a different type than GV does, this may free GV and return a different
314/// one.  Otherwise it just returns GV.
315llvm::GlobalVariable *
316CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
317                                               llvm::GlobalVariable *GV) {
318  ConstantEmitter emitter(*this);
319  llvm::Constant *Init = emitter.tryEmitForInitializer(D);
320
321  // If constant emission failed, then this should be a C++ static
322  // initializer.
323  if (!Init) {
324    if (!getLangOpts().CPlusPlus)
325      CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
326    else if (HaveInsertPoint()) {
327      // Since we have a static initializer, this global variable can't
328      // be constant.
329      GV->setConstant(false);
330
331      EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
332    }
333    return GV;
334  }
335
336  // The initializer may differ in type from the global. Rewrite
337  // the global to match the initializer.  (We have to do this
338  // because some types, like unions, can't be completely represented
339  // in the LLVM type system.)
340  if (GV->getType()->getElementType() != Init->getType()) {
341    llvm::GlobalVariable *OldGV = GV;
342
343    GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
344                                  OldGV->isConstant(),
345                                  OldGV->getLinkage(), Init, "",
346                                  /*InsertBefore*/ OldGV,
347                                  OldGV->getThreadLocalMode(),
348                           CGM.getContext().getTargetAddressSpace(D.getType()));
349    GV->setVisibility(OldGV->getVisibility());
350    GV->setDSOLocal(OldGV->isDSOLocal());
351    GV->setComdat(OldGV->getComdat());
352
353    // Steal the name of the old global
354    GV->takeName(OldGV);
355
356    // Replace all uses of the old global with the new global
357    llvm::Constant *NewPtrForOldDecl =
358    llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
359    OldGV->replaceAllUsesWith(NewPtrForOldDecl);
360
361    // Erase the old global, since it is no longer used.
362    OldGV->eraseFromParent();
363  }
364
365  GV->setConstant(CGM.isTypeConstant(D.getType(), true));
366  GV->setInitializer(Init);
367
368  emitter.finalize(GV);
369
370  if (D.needsDestruction(getContext()) == QualType::DK_cxx_destructor &&
371      HaveInsertPoint()) {
372    // We have a constant initializer, but a nontrivial destructor. We still
373    // need to perform a guarded "initialization" in order to register the
374    // destructor.
375    EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
376  }
377
378  return GV;
379}
380
381void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
382                                      llvm::GlobalValue::LinkageTypes Linkage) {
383  // Check to see if we already have a global variable for this
384  // declaration.  This can happen when double-emitting function
385  // bodies, e.g. with complete and base constructors.
386  llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
387  CharUnits alignment = getContext().getDeclAlign(&D);
388
389  // Store into LocalDeclMap before generating initializer to handle
390  // circular references.
391  setAddrOfLocalVar(&D, Address(addr, alignment));
392
393  // We can't have a VLA here, but we can have a pointer to a VLA,
394  // even though that doesn't really make any sense.
395  // Make sure to evaluate VLA bounds now so that we have them for later.
396  if (D.getType()->isVariablyModifiedType())
397    EmitVariablyModifiedType(D.getType());
398
399  // Save the type in case adding the initializer forces a type change.
400  llvm::Type *expectedType = addr->getType();
401
402  llvm::GlobalVariable *var =
403    cast<llvm::GlobalVariable>(addr->stripPointerCasts());
404
405  // CUDA's local and local static __shared__ variables should not
406  // have any non-empty initializers. This is ensured by Sema.
407  // Whatever initializer such variable may have when it gets here is
408  // a no-op and should not be emitted.
409  bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
410                         D.hasAttr<CUDASharedAttr>();
411  // If this value has an initializer, emit it.
412  if (D.getInit() && !isCudaSharedVar)
413    var = AddInitializerToStaticVarDecl(D, var);
414
415  var->setAlignment(alignment.getAsAlign());
416
417  if (D.hasAttr<AnnotateAttr>())
418    CGM.AddGlobalAnnotations(&D, var);
419
420  if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
421    var->addAttribute("bss-section", SA->getName());
422  if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
423    var->addAttribute("data-section", SA->getName());
424  if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
425    var->addAttribute("rodata-section", SA->getName());
426  if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
427    var->addAttribute("relro-section", SA->getName());
428
429  if (const SectionAttr *SA = D.getAttr<SectionAttr>())
430    var->setSection(SA->getName());
431
432  if (D.hasAttr<UsedAttr>())
433    CGM.addUsedGlobal(var);
434
435  // We may have to cast the constant because of the initializer
436  // mismatch above.
437  //
438  // FIXME: It is really dangerous to store this in the map; if anyone
439  // RAUW's the GV uses of this constant will be invalid.
440  llvm::Constant *castedAddr =
441    llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
442  if (var != castedAddr)
443    LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
444  CGM.setStaticLocalDeclAddress(&D, castedAddr);
445
446  CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
447
448  // Emit global variable debug descriptor for static vars.
449  CGDebugInfo *DI = getDebugInfo();
450  if (DI && CGM.getCodeGenOpts().hasReducedDebugInfo()) {
451    DI->setLocation(D.getLocation());
452    DI->EmitGlobalVariable(var, &D);
453  }
454}
455
456namespace {
457  struct DestroyObject final : EHScopeStack::Cleanup {
458    DestroyObject(Address addr, QualType type,
459                  CodeGenFunction::Destroyer *destroyer,
460                  bool useEHCleanupForArray)
461      : addr(addr), type(type), destroyer(destroyer),
462        useEHCleanupForArray(useEHCleanupForArray) {}
463
464    Address addr;
465    QualType type;
466    CodeGenFunction::Destroyer *destroyer;
467    bool useEHCleanupForArray;
468
469    void Emit(CodeGenFunction &CGF, Flags flags) override {
470      // Don't use an EH cleanup recursively from an EH cleanup.
471      bool useEHCleanupForArray =
472        flags.isForNormalCleanup() && this->useEHCleanupForArray;
473
474      CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
475    }
476  };
477
478  template <class Derived>
479  struct DestroyNRVOVariable : EHScopeStack::Cleanup {
480    DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
481        : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
482
483    llvm::Value *NRVOFlag;
484    Address Loc;
485    QualType Ty;
486
487    void Emit(CodeGenFunction &CGF, Flags flags) override {
488      // Along the exceptions path we always execute the dtor.
489      bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
490
491      llvm::BasicBlock *SkipDtorBB = nullptr;
492      if (NRVO) {
493        // If we exited via NRVO, we skip the destructor call.
494        llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
495        SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
496        llvm::Value *DidNRVO =
497          CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
498        CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
499        CGF.EmitBlock(RunDtorBB);
500      }
501
502      static_cast<Derived *>(this)->emitDestructorCall(CGF);
503
504      if (NRVO) CGF.EmitBlock(SkipDtorBB);
505    }
506
507    virtual ~DestroyNRVOVariable() = default;
508  };
509
510  struct DestroyNRVOVariableCXX final
511      : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
512    DestroyNRVOVariableCXX(Address addr, QualType type,
513                           const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
514        : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
515          Dtor(Dtor) {}
516
517    const CXXDestructorDecl *Dtor;
518
519    void emitDestructorCall(CodeGenFunction &CGF) {
520      CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
521                                /*ForVirtualBase=*/false,
522                                /*Delegating=*/false, Loc, Ty);
523    }
524  };
525
526  struct DestroyNRVOVariableC final
527      : DestroyNRVOVariable<DestroyNRVOVariableC> {
528    DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
529        : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
530
531    void emitDestructorCall(CodeGenFunction &CGF) {
532      CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
533    }
534  };
535
536  struct CallStackRestore final : EHScopeStack::Cleanup {
537    Address Stack;
538    CallStackRestore(Address Stack) : Stack(Stack) {}
539    void Emit(CodeGenFunction &CGF, Flags flags) override {
540      llvm::Value *V = CGF.Builder.CreateLoad(Stack);
541      llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
542      CGF.Builder.CreateCall(F, V);
543    }
544  };
545
546  struct ExtendGCLifetime final : EHScopeStack::Cleanup {
547    const VarDecl &Var;
548    ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
549
550    void Emit(CodeGenFunction &CGF, Flags flags) override {
551      // Compute the address of the local variable, in case it's a
552      // byref or something.
553      DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
554                      Var.getType(), VK_LValue, SourceLocation());
555      llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
556                                                SourceLocation());
557      CGF.EmitExtendGCLifetime(value);
558    }
559  };
560
561  struct CallCleanupFunction final : EHScopeStack::Cleanup {
562    llvm::Constant *CleanupFn;
563    const CGFunctionInfo &FnInfo;
564    const VarDecl &Var;
565
566    CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
567                        const VarDecl *Var)
568      : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
569
570    void Emit(CodeGenFunction &CGF, Flags flags) override {
571      DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
572                      Var.getType(), VK_LValue, SourceLocation());
573      // Compute the address of the local variable, in case it's a byref
574      // or something.
575      llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer(CGF);
576
577      // In some cases, the type of the function argument will be different from
578      // the type of the pointer. An example of this is
579      // void f(void* arg);
580      // __attribute__((cleanup(f))) void *g;
581      //
582      // To fix this we insert a bitcast here.
583      QualType ArgTy = FnInfo.arg_begin()->type;
584      llvm::Value *Arg =
585        CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
586
587      CallArgList Args;
588      Args.add(RValue::get(Arg),
589               CGF.getContext().getPointerType(Var.getType()));
590      auto Callee = CGCallee::forDirect(CleanupFn);
591      CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
592    }
593  };
594} // end anonymous namespace
595
596/// EmitAutoVarWithLifetime - Does the setup required for an automatic
597/// variable with lifetime.
598static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
599                                    Address addr,
600                                    Qualifiers::ObjCLifetime lifetime) {
601  switch (lifetime) {
602  case Qualifiers::OCL_None:
603    llvm_unreachable("present but none");
604
605  case Qualifiers::OCL_ExplicitNone:
606    // nothing to do
607    break;
608
609  case Qualifiers::OCL_Strong: {
610    CodeGenFunction::Destroyer *destroyer =
611      (var.hasAttr<ObjCPreciseLifetimeAttr>()
612       ? CodeGenFunction::destroyARCStrongPrecise
613       : CodeGenFunction::destroyARCStrongImprecise);
614
615    CleanupKind cleanupKind = CGF.getARCCleanupKind();
616    CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
617                    cleanupKind & EHCleanup);
618    break;
619  }
620  case Qualifiers::OCL_Autoreleasing:
621    // nothing to do
622    break;
623
624  case Qualifiers::OCL_Weak:
625    // __weak objects always get EH cleanups; otherwise, exceptions
626    // could cause really nasty crashes instead of mere leaks.
627    CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
628                    CodeGenFunction::destroyARCWeak,
629                    /*useEHCleanup*/ true);
630    break;
631  }
632}
633
634static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
635  if (const Expr *e = dyn_cast<Expr>(s)) {
636    // Skip the most common kinds of expressions that make
637    // hierarchy-walking expensive.
638    s = e = e->IgnoreParenCasts();
639
640    if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
641      return (ref->getDecl() == &var);
642    if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
643      const BlockDecl *block = be->getBlockDecl();
644      for (const auto &I : block->captures()) {
645        if (I.getVariable() == &var)
646          return true;
647      }
648    }
649  }
650
651  for (const Stmt *SubStmt : s->children())
652    // SubStmt might be null; as in missing decl or conditional of an if-stmt.
653    if (SubStmt && isAccessedBy(var, SubStmt))
654      return true;
655
656  return false;
657}
658
659static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
660  if (!decl) return false;
661  if (!isa<VarDecl>(decl)) return false;
662  const VarDecl *var = cast<VarDecl>(decl);
663  return isAccessedBy(*var, e);
664}
665
666static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
667                                   const LValue &destLV, const Expr *init) {
668  bool needsCast = false;
669
670  while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
671    switch (castExpr->getCastKind()) {
672    // Look through casts that don't require representation changes.
673    case CK_NoOp:
674    case CK_BitCast:
675    case CK_BlockPointerToObjCPointerCast:
676      needsCast = true;
677      break;
678
679    // If we find an l-value to r-value cast from a __weak variable,
680    // emit this operation as a copy or move.
681    case CK_LValueToRValue: {
682      const Expr *srcExpr = castExpr->getSubExpr();
683      if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
684        return false;
685
686      // Emit the source l-value.
687      LValue srcLV = CGF.EmitLValue(srcExpr);
688
689      // Handle a formal type change to avoid asserting.
690      auto srcAddr = srcLV.getAddress(CGF);
691      if (needsCast) {
692        srcAddr = CGF.Builder.CreateElementBitCast(
693            srcAddr, destLV.getAddress(CGF).getElementType());
694      }
695
696      // If it was an l-value, use objc_copyWeak.
697      if (srcExpr->getValueKind() == VK_LValue) {
698        CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
699      } else {
700        assert(srcExpr->getValueKind() == VK_XValue);
701        CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
702      }
703      return true;
704    }
705
706    // Stop at anything else.
707    default:
708      return false;
709    }
710
711    init = castExpr->getSubExpr();
712  }
713  return false;
714}
715
716static void drillIntoBlockVariable(CodeGenFunction &CGF,
717                                   LValue &lvalue,
718                                   const VarDecl *var) {
719  lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
720}
721
722void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
723                                           SourceLocation Loc) {
724  if (!SanOpts.has(SanitizerKind::NullabilityAssign))
725    return;
726
727  auto Nullability = LHS.getType()->getNullability(getContext());
728  if (!Nullability || *Nullability != NullabilityKind::NonNull)
729    return;
730
731  // Check if the right hand side of the assignment is nonnull, if the left
732  // hand side must be nonnull.
733  SanitizerScope SanScope(this);
734  llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
735  llvm::Constant *StaticData[] = {
736      EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
737      llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
738      llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
739  EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
740            SanitizerHandler::TypeMismatch, StaticData, RHS);
741}
742
743void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
744                                     LValue lvalue, bool capturedByInit) {
745  Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
746  if (!lifetime) {
747    llvm::Value *value = EmitScalarExpr(init);
748    if (capturedByInit)
749      drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
750    EmitNullabilityCheck(lvalue, value, init->getExprLoc());
751    EmitStoreThroughLValue(RValue::get(value), lvalue, true);
752    return;
753  }
754
755  if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
756    init = DIE->getExpr();
757
758  // If we're emitting a value with lifetime, we have to do the
759  // initialization *before* we leave the cleanup scopes.
760  if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
761    enterFullExpression(fe);
762    init = fe->getSubExpr();
763  }
764  CodeGenFunction::RunCleanupsScope Scope(*this);
765
766  // We have to maintain the illusion that the variable is
767  // zero-initialized.  If the variable might be accessed in its
768  // initializer, zero-initialize before running the initializer, then
769  // actually perform the initialization with an assign.
770  bool accessedByInit = false;
771  if (lifetime != Qualifiers::OCL_ExplicitNone)
772    accessedByInit = (capturedByInit || isAccessedBy(D, init));
773  if (accessedByInit) {
774    LValue tempLV = lvalue;
775    // Drill down to the __block object if necessary.
776    if (capturedByInit) {
777      // We can use a simple GEP for this because it can't have been
778      // moved yet.
779      tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
780                                              cast<VarDecl>(D),
781                                              /*follow*/ false));
782    }
783
784    auto ty =
785        cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
786    llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
787
788    // If __weak, we want to use a barrier under certain conditions.
789    if (lifetime == Qualifiers::OCL_Weak)
790      EmitARCInitWeak(tempLV.getAddress(*this), zero);
791
792    // Otherwise just do a simple store.
793    else
794      EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
795  }
796
797  // Emit the initializer.
798  llvm::Value *value = nullptr;
799
800  switch (lifetime) {
801  case Qualifiers::OCL_None:
802    llvm_unreachable("present but none");
803
804  case Qualifiers::OCL_Strong: {
805    if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
806      value = EmitARCRetainScalarExpr(init);
807      break;
808    }
809    // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
810    // that we omit the retain, and causes non-autoreleased return values to be
811    // immediately released.
812    LLVM_FALLTHROUGH;
813  }
814
815  case Qualifiers::OCL_ExplicitNone:
816    value = EmitARCUnsafeUnretainedScalarExpr(init);
817    break;
818
819  case Qualifiers::OCL_Weak: {
820    // If it's not accessed by the initializer, try to emit the
821    // initialization with a copy or move.
822    if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
823      return;
824    }
825
826    // No way to optimize a producing initializer into this.  It's not
827    // worth optimizing for, because the value will immediately
828    // disappear in the common case.
829    value = EmitScalarExpr(init);
830
831    if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
832    if (accessedByInit)
833      EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
834    else
835      EmitARCInitWeak(lvalue.getAddress(*this), value);
836    return;
837  }
838
839  case Qualifiers::OCL_Autoreleasing:
840    value = EmitARCRetainAutoreleaseScalarExpr(init);
841    break;
842  }
843
844  if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
845
846  EmitNullabilityCheck(lvalue, value, init->getExprLoc());
847
848  // If the variable might have been accessed by its initializer, we
849  // might have to initialize with a barrier.  We have to do this for
850  // both __weak and __strong, but __weak got filtered out above.
851  if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
852    llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
853    EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
854    EmitARCRelease(oldValue, ARCImpreciseLifetime);
855    return;
856  }
857
858  EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
859}
860
861/// Decide whether we can emit the non-zero parts of the specified initializer
862/// with equal or fewer than NumStores scalar stores.
863static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
864                                               unsigned &NumStores) {
865  // Zero and Undef never requires any extra stores.
866  if (isa<llvm::ConstantAggregateZero>(Init) ||
867      isa<llvm::ConstantPointerNull>(Init) ||
868      isa<llvm::UndefValue>(Init))
869    return true;
870  if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
871      isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
872      isa<llvm::ConstantExpr>(Init))
873    return Init->isNullValue() || NumStores--;
874
875  // See if we can emit each element.
876  if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
877    for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
878      llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
879      if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
880        return false;
881    }
882    return true;
883  }
884
885  if (llvm::ConstantDataSequential *CDS =
886        dyn_cast<llvm::ConstantDataSequential>(Init)) {
887    for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
888      llvm::Constant *Elt = CDS->getElementAsConstant(i);
889      if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
890        return false;
891    }
892    return true;
893  }
894
895  // Anything else is hard and scary.
896  return false;
897}
898
899/// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
900/// the scalar stores that would be required.
901static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
902                                        llvm::Constant *Init, Address Loc,
903                                        bool isVolatile, CGBuilderTy &Builder) {
904  assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
905         "called emitStoresForInitAfterBZero for zero or undef value.");
906
907  if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
908      isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
909      isa<llvm::ConstantExpr>(Init)) {
910    Builder.CreateStore(Init, Loc, isVolatile);
911    return;
912  }
913
914  if (llvm::ConstantDataSequential *CDS =
915          dyn_cast<llvm::ConstantDataSequential>(Init)) {
916    for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
917      llvm::Constant *Elt = CDS->getElementAsConstant(i);
918
919      // If necessary, get a pointer to the element and emit it.
920      if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
921        emitStoresForInitAfterBZero(
922            CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
923            Builder);
924    }
925    return;
926  }
927
928  assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
929         "Unknown value type!");
930
931  for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
932    llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
933
934    // If necessary, get a pointer to the element and emit it.
935    if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
936      emitStoresForInitAfterBZero(CGM, Elt,
937                                  Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
938                                  isVolatile, Builder);
939  }
940}
941
942/// Decide whether we should use bzero plus some stores to initialize a local
943/// variable instead of using a memcpy from a constant global.  It is beneficial
944/// to use bzero if the global is all zeros, or mostly zeros and large.
945static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
946                                                 uint64_t GlobalSize) {
947  // If a global is all zeros, always use a bzero.
948  if (isa<llvm::ConstantAggregateZero>(Init)) return true;
949
950  // If a non-zero global is <= 32 bytes, always use a memcpy.  If it is large,
951  // do it if it will require 6 or fewer scalar stores.
952  // TODO: Should budget depends on the size?  Avoiding a large global warrants
953  // plopping in more stores.
954  unsigned StoreBudget = 6;
955  uint64_t SizeLimit = 32;
956
957  return GlobalSize > SizeLimit &&
958         canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
959}
960
961/// Decide whether we should use memset to initialize a local variable instead
962/// of using a memcpy from a constant global. Assumes we've already decided to
963/// not user bzero.
964/// FIXME We could be more clever, as we are for bzero above, and generate
965///       memset followed by stores. It's unclear that's worth the effort.
966static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
967                                                uint64_t GlobalSize,
968                                                const llvm::DataLayout &DL) {
969  uint64_t SizeLimit = 32;
970  if (GlobalSize <= SizeLimit)
971    return nullptr;
972  return llvm::isBytewiseValue(Init, DL);
973}
974
975/// Decide whether we want to split a constant structure or array store into a
976/// sequence of its fields' stores. This may cost us code size and compilation
977/// speed, but plays better with store optimizations.
978static bool shouldSplitConstantStore(CodeGenModule &CGM,
979                                     uint64_t GlobalByteSize) {
980  // Don't break things that occupy more than one cacheline.
981  uint64_t ByteSizeLimit = 64;
982  if (CGM.getCodeGenOpts().OptimizationLevel == 0)
983    return false;
984  if (GlobalByteSize <= ByteSizeLimit)
985    return true;
986  return false;
987}
988
989enum class IsPattern { No, Yes };
990
991/// Generate a constant filled with either a pattern or zeroes.
992static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
993                                        llvm::Type *Ty) {
994  if (isPattern == IsPattern::Yes)
995    return initializationPatternFor(CGM, Ty);
996  else
997    return llvm::Constant::getNullValue(Ty);
998}
999
1000static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1001                                        llvm::Constant *constant);
1002
1003/// Helper function for constWithPadding() to deal with padding in structures.
1004static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1005                                              IsPattern isPattern,
1006                                              llvm::StructType *STy,
1007                                              llvm::Constant *constant) {
1008  const llvm::DataLayout &DL = CGM.getDataLayout();
1009  const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1010  llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1011  unsigned SizeSoFar = 0;
1012  SmallVector<llvm::Constant *, 8> Values;
1013  bool NestedIntact = true;
1014  for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1015    unsigned CurOff = Layout->getElementOffset(i);
1016    if (SizeSoFar < CurOff) {
1017      assert(!STy->isPacked());
1018      auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1019      Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1020    }
1021    llvm::Constant *CurOp;
1022    if (constant->isZeroValue())
1023      CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1024    else
1025      CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1026    auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1027    if (CurOp != NewOp)
1028      NestedIntact = false;
1029    Values.push_back(NewOp);
1030    SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1031  }
1032  unsigned TotalSize = Layout->getSizeInBytes();
1033  if (SizeSoFar < TotalSize) {
1034    auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1035    Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1036  }
1037  if (NestedIntact && Values.size() == STy->getNumElements())
1038    return constant;
1039  return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1040}
1041
1042/// Replace all padding bytes in a given constant with either a pattern byte or
1043/// 0x00.
1044static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1045                                        llvm::Constant *constant) {
1046  llvm::Type *OrigTy = constant->getType();
1047  if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1048    return constStructWithPadding(CGM, isPattern, STy, constant);
1049  if (auto *STy = dyn_cast<llvm::SequentialType>(OrigTy)) {
1050    llvm::SmallVector<llvm::Constant *, 8> Values;
1051    unsigned Size = STy->getNumElements();
1052    if (!Size)
1053      return constant;
1054    llvm::Type *ElemTy = STy->getElementType();
1055    bool ZeroInitializer = constant->isZeroValue();
1056    llvm::Constant *OpValue, *PaddedOp;
1057    if (ZeroInitializer) {
1058      OpValue = llvm::Constant::getNullValue(ElemTy);
1059      PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1060    }
1061    for (unsigned Op = 0; Op != Size; ++Op) {
1062      if (!ZeroInitializer) {
1063        OpValue = constant->getAggregateElement(Op);
1064        PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1065      }
1066      Values.push_back(PaddedOp);
1067    }
1068    auto *NewElemTy = Values[0]->getType();
1069    if (NewElemTy == ElemTy)
1070      return constant;
1071    if (OrigTy->isArrayTy()) {
1072      auto *ArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1073      return llvm::ConstantArray::get(ArrayTy, Values);
1074    } else {
1075      return llvm::ConstantVector::get(Values);
1076    }
1077  }
1078  return constant;
1079}
1080
1081Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1082                                               llvm::Constant *Constant,
1083                                               CharUnits Align) {
1084  auto FunctionName = [&](const DeclContext *DC) -> std::string {
1085    if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1086      if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1087        return CC->getNameAsString();
1088      if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1089        return CD->getNameAsString();
1090      return getMangledName(FD);
1091    } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1092      return OM->getNameAsString();
1093    } else if (isa<BlockDecl>(DC)) {
1094      return "<block>";
1095    } else if (isa<CapturedDecl>(DC)) {
1096      return "<captured>";
1097    } else {
1098      llvm_unreachable("expected a function or method");
1099    }
1100  };
1101
1102  // Form a simple per-variable cache of these values in case we find we
1103  // want to reuse them.
1104  llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1105  if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1106    auto *Ty = Constant->getType();
1107    bool isConstant = true;
1108    llvm::GlobalVariable *InsertBefore = nullptr;
1109    unsigned AS =
1110        getContext().getTargetAddressSpace(getStringLiteralAddressSpace());
1111    std::string Name;
1112    if (D.hasGlobalStorage())
1113      Name = getMangledName(&D).str() + ".const";
1114    else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1115      Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1116    else
1117      llvm_unreachable("local variable has no parent function or method");
1118    llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1119        getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1120        Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1121    GV->setAlignment(Align.getAsAlign());
1122    GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1123    CacheEntry = GV;
1124  } else if (CacheEntry->getAlignment() < Align.getQuantity()) {
1125    CacheEntry->setAlignment(Align.getAsAlign());
1126  }
1127
1128  return Address(CacheEntry, Align);
1129}
1130
1131static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1132                                                const VarDecl &D,
1133                                                CGBuilderTy &Builder,
1134                                                llvm::Constant *Constant,
1135                                                CharUnits Align) {
1136  Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1137  llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
1138                                                   SrcPtr.getAddressSpace());
1139  if (SrcPtr.getType() != BP)
1140    SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
1141  return SrcPtr;
1142}
1143
1144static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1145                                  Address Loc, bool isVolatile,
1146                                  CGBuilderTy &Builder,
1147                                  llvm::Constant *constant) {
1148  auto *Ty = constant->getType();
1149  uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1150  if (!ConstantSize)
1151    return;
1152
1153  bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1154                          Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1155  if (canDoSingleStore) {
1156    Builder.CreateStore(constant, Loc, isVolatile);
1157    return;
1158  }
1159
1160  auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1161
1162  // If the initializer is all or mostly the same, codegen with bzero / memset
1163  // then do a few stores afterward.
1164  if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1165    Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0), SizeVal,
1166                         isVolatile);
1167
1168    bool valueAlreadyCorrect =
1169        constant->isNullValue() || isa<llvm::UndefValue>(constant);
1170    if (!valueAlreadyCorrect) {
1171      Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
1172      emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
1173    }
1174    return;
1175  }
1176
1177  // If the initializer is a repeated byte pattern, use memset.
1178  llvm::Value *Pattern =
1179      shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1180  if (Pattern) {
1181    uint64_t Value = 0x00;
1182    if (!isa<llvm::UndefValue>(Pattern)) {
1183      const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1184      assert(AP.getBitWidth() <= 8);
1185      Value = AP.getLimitedValue();
1186    }
1187    Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal,
1188                         isVolatile);
1189    return;
1190  }
1191
1192  // If the initializer is small, use a handful of stores.
1193  if (shouldSplitConstantStore(CGM, ConstantSize)) {
1194    if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1195      // FIXME: handle the case when STy != Loc.getElementType().
1196      if (STy == Loc.getElementType()) {
1197        for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1198          Address EltPtr = Builder.CreateStructGEP(Loc, i);
1199          emitStoresForConstant(
1200              CGM, D, EltPtr, isVolatile, Builder,
1201              cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
1202        }
1203        return;
1204      }
1205    } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1206      // FIXME: handle the case when ATy != Loc.getElementType().
1207      if (ATy == Loc.getElementType()) {
1208        for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1209          Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
1210          emitStoresForConstant(
1211              CGM, D, EltPtr, isVolatile, Builder,
1212              cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
1213        }
1214        return;
1215      }
1216    }
1217  }
1218
1219  // Copy from a global.
1220  Builder.CreateMemCpy(Loc,
1221                       createUnnamedGlobalForMemcpyFrom(
1222                           CGM, D, Builder, constant, Loc.getAlignment()),
1223                       SizeVal, isVolatile);
1224}
1225
1226static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1227                                  Address Loc, bool isVolatile,
1228                                  CGBuilderTy &Builder) {
1229  llvm::Type *ElTy = Loc.getElementType();
1230  llvm::Constant *constant =
1231      constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1232  emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1233}
1234
1235static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1236                                     Address Loc, bool isVolatile,
1237                                     CGBuilderTy &Builder) {
1238  llvm::Type *ElTy = Loc.getElementType();
1239  llvm::Constant *constant = constWithPadding(
1240      CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1241  assert(!isa<llvm::UndefValue>(constant));
1242  emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1243}
1244
1245static bool containsUndef(llvm::Constant *constant) {
1246  auto *Ty = constant->getType();
1247  if (isa<llvm::UndefValue>(constant))
1248    return true;
1249  if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1250    for (llvm::Use &Op : constant->operands())
1251      if (containsUndef(cast<llvm::Constant>(Op)))
1252        return true;
1253  return false;
1254}
1255
1256static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1257                                    llvm::Constant *constant) {
1258  auto *Ty = constant->getType();
1259  if (isa<llvm::UndefValue>(constant))
1260    return patternOrZeroFor(CGM, isPattern, Ty);
1261  if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1262    return constant;
1263  if (!containsUndef(constant))
1264    return constant;
1265  llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1266  for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1267    auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1268    Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1269  }
1270  if (Ty->isStructTy())
1271    return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1272  if (Ty->isArrayTy())
1273    return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1274  assert(Ty->isVectorTy());
1275  return llvm::ConstantVector::get(Values);
1276}
1277
1278/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1279/// variable declaration with auto, register, or no storage class specifier.
1280/// These turn into simple stack objects, or GlobalValues depending on target.
1281void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1282  AutoVarEmission emission = EmitAutoVarAlloca(D);
1283  EmitAutoVarInit(emission);
1284  EmitAutoVarCleanups(emission);
1285}
1286
1287/// Emit a lifetime.begin marker if some criteria are satisfied.
1288/// \return a pointer to the temporary size Value if a marker was emitted, null
1289/// otherwise
1290llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
1291                                                llvm::Value *Addr) {
1292  if (!ShouldEmitLifetimeMarkers)
1293    return nullptr;
1294
1295  assert(Addr->getType()->getPointerAddressSpace() ==
1296             CGM.getDataLayout().getAllocaAddrSpace() &&
1297         "Pointer should be in alloca address space");
1298  llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
1299  Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1300  llvm::CallInst *C =
1301      Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1302  C->setDoesNotThrow();
1303  return SizeV;
1304}
1305
1306void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1307  assert(Addr->getType()->getPointerAddressSpace() ==
1308             CGM.getDataLayout().getAllocaAddrSpace() &&
1309         "Pointer should be in alloca address space");
1310  Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1311  llvm::CallInst *C =
1312      Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1313  C->setDoesNotThrow();
1314}
1315
1316void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1317    CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1318  // For each dimension stores its QualType and corresponding
1319  // size-expression Value.
1320  SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1321  SmallVector<IdentifierInfo *, 4> VLAExprNames;
1322
1323  // Break down the array into individual dimensions.
1324  QualType Type1D = D.getType();
1325  while (getContext().getAsVariableArrayType(Type1D)) {
1326    auto VlaSize = getVLAElements1D(Type1D);
1327    if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1328      Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1329    else {
1330      // Generate a locally unique name for the size expression.
1331      Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1332      SmallString<12> Buffer;
1333      StringRef NameRef = Name.toStringRef(Buffer);
1334      auto &Ident = getContext().Idents.getOwn(NameRef);
1335      VLAExprNames.push_back(&Ident);
1336      auto SizeExprAddr =
1337          CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1338      Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1339      Dimensions.emplace_back(SizeExprAddr.getPointer(),
1340                              Type1D.getUnqualifiedType());
1341    }
1342    Type1D = VlaSize.Type;
1343  }
1344
1345  if (!EmitDebugInfo)
1346    return;
1347
1348  // Register each dimension's size-expression with a DILocalVariable,
1349  // so that it can be used by CGDebugInfo when instantiating a DISubrange
1350  // to describe this array.
1351  unsigned NameIdx = 0;
1352  for (auto &VlaSize : Dimensions) {
1353    llvm::Metadata *MD;
1354    if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1355      MD = llvm::ConstantAsMetadata::get(C);
1356    else {
1357      // Create an artificial VarDecl to generate debug info for.
1358      IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1359      auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
1360      auto QT = getContext().getIntTypeForBitwidth(
1361          VlaExprTy->getScalarSizeInBits(), false);
1362      auto *ArtificialDecl = VarDecl::Create(
1363          getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1364          D.getLocation(), D.getLocation(), NameIdent, QT,
1365          getContext().CreateTypeSourceInfo(QT), SC_Auto);
1366      ArtificialDecl->setImplicit();
1367
1368      MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1369                                         Builder);
1370    }
1371    assert(MD && "No Size expression debug node created");
1372    DI->registerVLASizeExpression(VlaSize.Type, MD);
1373  }
1374}
1375
1376/// EmitAutoVarAlloca - Emit the alloca and debug information for a
1377/// local variable.  Does not emit initialization or destruction.
1378CodeGenFunction::AutoVarEmission
1379CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1380  QualType Ty = D.getType();
1381  assert(
1382      Ty.getAddressSpace() == LangAS::Default ||
1383      (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1384
1385  AutoVarEmission emission(D);
1386
1387  bool isEscapingByRef = D.isEscapingByref();
1388  emission.IsEscapingByRef = isEscapingByRef;
1389
1390  CharUnits alignment = getContext().getDeclAlign(&D);
1391
1392  // If the type is variably-modified, emit all the VLA sizes for it.
1393  if (Ty->isVariablyModifiedType())
1394    EmitVariablyModifiedType(Ty);
1395
1396  auto *DI = getDebugInfo();
1397  bool EmitDebugInfo = DI && CGM.getCodeGenOpts().hasReducedDebugInfo();
1398
1399  Address address = Address::invalid();
1400  Address AllocaAddr = Address::invalid();
1401  Address OpenMPLocalAddr =
1402      getLangOpts().OpenMP
1403          ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1404          : Address::invalid();
1405  bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1406
1407  if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1408    address = OpenMPLocalAddr;
1409  } else if (Ty->isConstantSizeType()) {
1410    // If this value is an array or struct with a statically determinable
1411    // constant initializer, there are optimizations we can do.
1412    //
1413    // TODO: We should constant-evaluate the initializer of any variable,
1414    // as long as it is initialized by a constant expression. Currently,
1415    // isConstantInitializer produces wrong answers for structs with
1416    // reference or bitfield members, and a few other cases, and checking
1417    // for POD-ness protects us from some of these.
1418    if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1419        (D.isConstexpr() ||
1420         ((Ty.isPODType(getContext()) ||
1421           getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1422          D.getInit()->isConstantInitializer(getContext(), false)))) {
1423
1424      // If the variable's a const type, and it's neither an NRVO
1425      // candidate nor a __block variable and has no mutable members,
1426      // emit it as a global instead.
1427      // Exception is if a variable is located in non-constant address space
1428      // in OpenCL.
1429      if ((!getLangOpts().OpenCL ||
1430           Ty.getAddressSpace() == LangAS::opencl_constant) &&
1431          (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1432           !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
1433        EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1434
1435        // Signal this condition to later callbacks.
1436        emission.Addr = Address::invalid();
1437        assert(emission.wasEmittedAsGlobal());
1438        return emission;
1439      }
1440
1441      // Otherwise, tell the initialization code that we're in this case.
1442      emission.IsConstantAggregate = true;
1443    }
1444
1445    // A normal fixed sized variable becomes an alloca in the entry block,
1446    // unless:
1447    // - it's an NRVO variable.
1448    // - we are compiling OpenMP and it's an OpenMP local variable.
1449    if (NRVO) {
1450      // The named return value optimization: allocate this variable in the
1451      // return slot, so that we can elide the copy when returning this
1452      // variable (C++0x [class.copy]p34).
1453      address = ReturnValue;
1454
1455      if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1456        const auto *RD = RecordTy->getDecl();
1457        const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1458        if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1459            RD->isNonTrivialToPrimitiveDestroy()) {
1460          // Create a flag that is used to indicate when the NRVO was applied
1461          // to this variable. Set it to zero to indicate that NRVO was not
1462          // applied.
1463          llvm::Value *Zero = Builder.getFalse();
1464          Address NRVOFlag =
1465            CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1466          EnsureInsertPoint();
1467          Builder.CreateStore(Zero, NRVOFlag);
1468
1469          // Record the NRVO flag for this variable.
1470          NRVOFlags[&D] = NRVOFlag.getPointer();
1471          emission.NRVOFlag = NRVOFlag.getPointer();
1472        }
1473      }
1474    } else {
1475      CharUnits allocaAlignment;
1476      llvm::Type *allocaTy;
1477      if (isEscapingByRef) {
1478        auto &byrefInfo = getBlockByrefInfo(&D);
1479        allocaTy = byrefInfo.Type;
1480        allocaAlignment = byrefInfo.ByrefAlignment;
1481      } else {
1482        allocaTy = ConvertTypeForMem(Ty);
1483        allocaAlignment = alignment;
1484      }
1485
1486      // Create the alloca.  Note that we set the name separately from
1487      // building the instruction so that it's there even in no-asserts
1488      // builds.
1489      address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1490                                 /*ArraySize=*/nullptr, &AllocaAddr);
1491
1492      // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1493      // the catch parameter starts in the catchpad instruction, and we can't
1494      // insert code in those basic blocks.
1495      bool IsMSCatchParam =
1496          D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1497
1498      // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1499      // if we don't have a valid insertion point (?).
1500      if (HaveInsertPoint() && !IsMSCatchParam) {
1501        // If there's a jump into the lifetime of this variable, its lifetime
1502        // gets broken up into several regions in IR, which requires more work
1503        // to handle correctly. For now, just omit the intrinsics; this is a
1504        // rare case, and it's better to just be conservatively correct.
1505        // PR28267.
1506        //
1507        // We have to do this in all language modes if there's a jump past the
1508        // declaration. We also have to do it in C if there's a jump to an
1509        // earlier point in the current block because non-VLA lifetimes begin as
1510        // soon as the containing block is entered, not when its variables
1511        // actually come into scope; suppressing the lifetime annotations
1512        // completely in this case is unnecessarily pessimistic, but again, this
1513        // is rare.
1514        if (!Bypasses.IsBypassed(&D) &&
1515            !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1516          uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1517          emission.SizeForLifetimeMarkers =
1518              EmitLifetimeStart(size, AllocaAddr.getPointer());
1519        }
1520      } else {
1521        assert(!emission.useLifetimeMarkers());
1522      }
1523    }
1524  } else {
1525    EnsureInsertPoint();
1526
1527    if (!DidCallStackSave) {
1528      // Save the stack.
1529      Address Stack =
1530        CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1531
1532      llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1533      llvm::Value *V = Builder.CreateCall(F);
1534      Builder.CreateStore(V, Stack);
1535
1536      DidCallStackSave = true;
1537
1538      // Push a cleanup block and restore the stack there.
1539      // FIXME: in general circumstances, this should be an EH cleanup.
1540      pushStackRestore(NormalCleanup, Stack);
1541    }
1542
1543    auto VlaSize = getVLASize(Ty);
1544    llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1545
1546    // Allocate memory for the array.
1547    address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1548                               &AllocaAddr);
1549
1550    // If we have debug info enabled, properly describe the VLA dimensions for
1551    // this type by registering the vla size expression for each of the
1552    // dimensions.
1553    EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1554  }
1555
1556  setAddrOfLocalVar(&D, address);
1557  emission.Addr = address;
1558  emission.AllocaAddr = AllocaAddr;
1559
1560  // Emit debug info for local var declaration.
1561  if (EmitDebugInfo && HaveInsertPoint()) {
1562    Address DebugAddr = address;
1563    bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1564    DI->setLocation(D.getLocation());
1565
1566    // If NRVO, use a pointer to the return address.
1567    if (UsePointerValue)
1568      DebugAddr = ReturnValuePointer;
1569
1570    (void)DI->EmitDeclareOfAutoVariable(&D, DebugAddr.getPointer(), Builder,
1571                                        UsePointerValue);
1572  }
1573
1574  if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1575    EmitVarAnnotations(&D, address.getPointer());
1576
1577  // Make sure we call @llvm.lifetime.end.
1578  if (emission.useLifetimeMarkers())
1579    EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1580                                         emission.getOriginalAllocatedAddress(),
1581                                         emission.getSizeForLifetimeMarkers());
1582
1583  return emission;
1584}
1585
1586static bool isCapturedBy(const VarDecl &, const Expr *);
1587
1588/// Determines whether the given __block variable is potentially
1589/// captured by the given statement.
1590static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1591  if (const Expr *E = dyn_cast<Expr>(S))
1592    return isCapturedBy(Var, E);
1593  for (const Stmt *SubStmt : S->children())
1594    if (isCapturedBy(Var, SubStmt))
1595      return true;
1596  return false;
1597}
1598
1599/// Determines whether the given __block variable is potentially
1600/// captured by the given expression.
1601static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1602  // Skip the most common kinds of expressions that make
1603  // hierarchy-walking expensive.
1604  E = E->IgnoreParenCasts();
1605
1606  if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1607    const BlockDecl *Block = BE->getBlockDecl();
1608    for (const auto &I : Block->captures()) {
1609      if (I.getVariable() == &Var)
1610        return true;
1611    }
1612
1613    // No need to walk into the subexpressions.
1614    return false;
1615  }
1616
1617  if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1618    const CompoundStmt *CS = SE->getSubStmt();
1619    for (const auto *BI : CS->body())
1620      if (const auto *BIE = dyn_cast<Expr>(BI)) {
1621        if (isCapturedBy(Var, BIE))
1622          return true;
1623      }
1624      else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1625          // special case declarations
1626          for (const auto *I : DS->decls()) {
1627              if (const auto *VD = dyn_cast<VarDecl>((I))) {
1628                const Expr *Init = VD->getInit();
1629                if (Init && isCapturedBy(Var, Init))
1630                  return true;
1631              }
1632          }
1633      }
1634      else
1635        // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1636        // Later, provide code to poke into statements for capture analysis.
1637        return true;
1638    return false;
1639  }
1640
1641  for (const Stmt *SubStmt : E->children())
1642    if (isCapturedBy(Var, SubStmt))
1643      return true;
1644
1645  return false;
1646}
1647
1648/// Determine whether the given initializer is trivial in the sense
1649/// that it requires no code to be generated.
1650bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1651  if (!Init)
1652    return true;
1653
1654  if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1655    if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1656      if (Constructor->isTrivial() &&
1657          Constructor->isDefaultConstructor() &&
1658          !Construct->requiresZeroInitialization())
1659        return true;
1660
1661  return false;
1662}
1663
1664void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1665                                                      const VarDecl &D,
1666                                                      Address Loc) {
1667  auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1668  CharUnits Size = getContext().getTypeSizeInChars(type);
1669  bool isVolatile = type.isVolatileQualified();
1670  if (!Size.isZero()) {
1671    switch (trivialAutoVarInit) {
1672    case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1673      llvm_unreachable("Uninitialized handled by caller");
1674    case LangOptions::TrivialAutoVarInitKind::Zero:
1675      emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1676      break;
1677    case LangOptions::TrivialAutoVarInitKind::Pattern:
1678      emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1679      break;
1680    }
1681    return;
1682  }
1683
1684  // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1685  // them, so emit a memcpy with the VLA size to initialize each element.
1686  // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1687  // will catch that code, but there exists code which generates zero-sized
1688  // VLAs. Be nice and initialize whatever they requested.
1689  const auto *VlaType = getContext().getAsVariableArrayType(type);
1690  if (!VlaType)
1691    return;
1692  auto VlaSize = getVLASize(VlaType);
1693  auto SizeVal = VlaSize.NumElts;
1694  CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1695  switch (trivialAutoVarInit) {
1696  case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1697    llvm_unreachable("Uninitialized handled by caller");
1698
1699  case LangOptions::TrivialAutoVarInitKind::Zero:
1700    if (!EltSize.isOne())
1701      SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1702    Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
1703                         isVolatile);
1704    break;
1705
1706  case LangOptions::TrivialAutoVarInitKind::Pattern: {
1707    llvm::Type *ElTy = Loc.getElementType();
1708    llvm::Constant *Constant = constWithPadding(
1709        CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1710    CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1711    llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1712    llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1713    llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1714    llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1715        SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1716        "vla.iszerosized");
1717    Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1718    EmitBlock(SetupBB);
1719    if (!EltSize.isOne())
1720      SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1721    llvm::Value *BaseSizeInChars =
1722        llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1723    Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
1724    llvm::Value *End =
1725        Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
1726    llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1727    EmitBlock(LoopBB);
1728    llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1729    Cur->addIncoming(Begin.getPointer(), OriginBB);
1730    CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1731    Builder.CreateMemCpy(Address(Cur, CurAlign),
1732                         createUnnamedGlobalForMemcpyFrom(
1733                             CGM, D, Builder, Constant, ConstantAlign),
1734                         BaseSizeInChars, isVolatile);
1735    llvm::Value *Next =
1736        Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1737    llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1738    Builder.CreateCondBr(Done, ContBB, LoopBB);
1739    Cur->addIncoming(Next, LoopBB);
1740    EmitBlock(ContBB);
1741  } break;
1742  }
1743}
1744
1745void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1746  assert(emission.Variable && "emission was not valid!");
1747
1748  // If this was emitted as a global constant, we're done.
1749  if (emission.wasEmittedAsGlobal()) return;
1750
1751  const VarDecl &D = *emission.Variable;
1752  auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1753  QualType type = D.getType();
1754
1755  // If this local has an initializer, emit it now.
1756  const Expr *Init = D.getInit();
1757
1758  // If we are at an unreachable point, we don't need to emit the initializer
1759  // unless it contains a label.
1760  if (!HaveInsertPoint()) {
1761    if (!Init || !ContainsLabel(Init)) return;
1762    EnsureInsertPoint();
1763  }
1764
1765  // Initialize the structure of a __block variable.
1766  if (emission.IsEscapingByRef)
1767    emitByrefStructureInit(emission);
1768
1769  // Initialize the variable here if it doesn't have a initializer and it is a
1770  // C struct that is non-trivial to initialize or an array containing such a
1771  // struct.
1772  if (!Init &&
1773      type.isNonTrivialToPrimitiveDefaultInitialize() ==
1774          QualType::PDIK_Struct) {
1775    LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1776    if (emission.IsEscapingByRef)
1777      drillIntoBlockVariable(*this, Dst, &D);
1778    defaultInitNonTrivialCStructVar(Dst);
1779    return;
1780  }
1781
1782  // Check whether this is a byref variable that's potentially
1783  // captured and moved by its own initializer.  If so, we'll need to
1784  // emit the initializer first, then copy into the variable.
1785  bool capturedByInit =
1786      Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1787
1788  bool locIsByrefHeader = !capturedByInit;
1789  const Address Loc =
1790      locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1791
1792  // Note: constexpr already initializes everything correctly.
1793  LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1794      (D.isConstexpr()
1795           ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1796           : (D.getAttr<UninitializedAttr>()
1797                  ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1798                  : getContext().getLangOpts().getTrivialAutoVarInit()));
1799
1800  auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1801    if (trivialAutoVarInit ==
1802        LangOptions::TrivialAutoVarInitKind::Uninitialized)
1803      return;
1804
1805    // Only initialize a __block's storage: we always initialize the header.
1806    if (emission.IsEscapingByRef && !locIsByrefHeader)
1807      Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1808
1809    return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1810  };
1811
1812  if (isTrivialInitializer(Init))
1813    return initializeWhatIsTechnicallyUninitialized(Loc);
1814
1815  llvm::Constant *constant = nullptr;
1816  if (emission.IsConstantAggregate ||
1817      D.mightBeUsableInConstantExpressions(getContext())) {
1818    assert(!capturedByInit && "constant init contains a capturing block?");
1819    constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1820    if (constant && !constant->isZeroValue() &&
1821        (trivialAutoVarInit !=
1822         LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1823      IsPattern isPattern =
1824          (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1825              ? IsPattern::Yes
1826              : IsPattern::No;
1827      // C guarantees that brace-init with fewer initializers than members in
1828      // the aggregate will initialize the rest of the aggregate as-if it were
1829      // static initialization. In turn static initialization guarantees that
1830      // padding is initialized to zero bits. We could instead pattern-init if D
1831      // has any ImplicitValueInitExpr, but that seems to be unintuitive
1832      // behavior.
1833      constant = constWithPadding(CGM, IsPattern::No,
1834                                  replaceUndef(CGM, isPattern, constant));
1835    }
1836  }
1837
1838  if (!constant) {
1839    initializeWhatIsTechnicallyUninitialized(Loc);
1840    LValue lv = MakeAddrLValue(Loc, type);
1841    lv.setNonGC(true);
1842    return EmitExprAsInit(Init, &D, lv, capturedByInit);
1843  }
1844
1845  if (!emission.IsConstantAggregate) {
1846    // For simple scalar/complex initialization, store the value directly.
1847    LValue lv = MakeAddrLValue(Loc, type);
1848    lv.setNonGC(true);
1849    return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1850  }
1851
1852  llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
1853  emitStoresForConstant(
1854      CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
1855      type.isVolatileQualified(), Builder, constant);
1856}
1857
1858/// Emit an expression as an initializer for an object (variable, field, etc.)
1859/// at the given location.  The expression is not necessarily the normal
1860/// initializer for the object, and the address is not necessarily
1861/// its normal location.
1862///
1863/// \param init the initializing expression
1864/// \param D the object to act as if we're initializing
1865/// \param loc the address to initialize; its type is a pointer
1866///   to the LLVM mapping of the object's type
1867/// \param alignment the alignment of the address
1868/// \param capturedByInit true if \p D is a __block variable
1869///   whose address is potentially changed by the initializer
1870void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1871                                     LValue lvalue, bool capturedByInit) {
1872  QualType type = D->getType();
1873
1874  if (type->isReferenceType()) {
1875    RValue rvalue = EmitReferenceBindingToExpr(init);
1876    if (capturedByInit)
1877      drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1878    EmitStoreThroughLValue(rvalue, lvalue, true);
1879    return;
1880  }
1881  switch (getEvaluationKind(type)) {
1882  case TEK_Scalar:
1883    EmitScalarInit(init, D, lvalue, capturedByInit);
1884    return;
1885  case TEK_Complex: {
1886    ComplexPairTy complex = EmitComplexExpr(init);
1887    if (capturedByInit)
1888      drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1889    EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1890    return;
1891  }
1892  case TEK_Aggregate:
1893    if (type->isAtomicType()) {
1894      EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1895    } else {
1896      AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
1897      if (isa<VarDecl>(D))
1898        Overlap = AggValueSlot::DoesNotOverlap;
1899      else if (auto *FD = dyn_cast<FieldDecl>(D))
1900        Overlap = getOverlapForFieldInit(FD);
1901      // TODO: how can we delay here if D is captured by its initializer?
1902      EmitAggExpr(init, AggValueSlot::forLValue(
1903                            lvalue, *this, AggValueSlot::IsDestructed,
1904                            AggValueSlot::DoesNotNeedGCBarriers,
1905                            AggValueSlot::IsNotAliased, Overlap));
1906    }
1907    return;
1908  }
1909  llvm_unreachable("bad evaluation kind");
1910}
1911
1912/// Enter a destroy cleanup for the given local variable.
1913void CodeGenFunction::emitAutoVarTypeCleanup(
1914                            const CodeGenFunction::AutoVarEmission &emission,
1915                            QualType::DestructionKind dtorKind) {
1916  assert(dtorKind != QualType::DK_none);
1917
1918  // Note that for __block variables, we want to destroy the
1919  // original stack object, not the possibly forwarded object.
1920  Address addr = emission.getObjectAddress(*this);
1921
1922  const VarDecl *var = emission.Variable;
1923  QualType type = var->getType();
1924
1925  CleanupKind cleanupKind = NormalAndEHCleanup;
1926  CodeGenFunction::Destroyer *destroyer = nullptr;
1927
1928  switch (dtorKind) {
1929  case QualType::DK_none:
1930    llvm_unreachable("no cleanup for trivially-destructible variable");
1931
1932  case QualType::DK_cxx_destructor:
1933    // If there's an NRVO flag on the emission, we need a different
1934    // cleanup.
1935    if (emission.NRVOFlag) {
1936      assert(!type->isArrayType());
1937      CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
1938      EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
1939                                                  emission.NRVOFlag);
1940      return;
1941    }
1942    break;
1943
1944  case QualType::DK_objc_strong_lifetime:
1945    // Suppress cleanups for pseudo-strong variables.
1946    if (var->isARCPseudoStrong()) return;
1947
1948    // Otherwise, consider whether to use an EH cleanup or not.
1949    cleanupKind = getARCCleanupKind();
1950
1951    // Use the imprecise destroyer by default.
1952    if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
1953      destroyer = CodeGenFunction::destroyARCStrongImprecise;
1954    break;
1955
1956  case QualType::DK_objc_weak_lifetime:
1957    break;
1958
1959  case QualType::DK_nontrivial_c_struct:
1960    destroyer = CodeGenFunction::destroyNonTrivialCStruct;
1961    if (emission.NRVOFlag) {
1962      assert(!type->isArrayType());
1963      EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
1964                                                emission.NRVOFlag, type);
1965      return;
1966    }
1967    break;
1968  }
1969
1970  // If we haven't chosen a more specific destroyer, use the default.
1971  if (!destroyer) destroyer = getDestroyer(dtorKind);
1972
1973  // Use an EH cleanup in array destructors iff the destructor itself
1974  // is being pushed as an EH cleanup.
1975  bool useEHCleanup = (cleanupKind & EHCleanup);
1976  EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
1977                                     useEHCleanup);
1978}
1979
1980void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
1981  assert(emission.Variable && "emission was not valid!");
1982
1983  // If this was emitted as a global constant, we're done.
1984  if (emission.wasEmittedAsGlobal()) return;
1985
1986  // If we don't have an insertion point, we're done.  Sema prevents
1987  // us from jumping into any of these scopes anyway.
1988  if (!HaveInsertPoint()) return;
1989
1990  const VarDecl &D = *emission.Variable;
1991
1992  // Check the type for a cleanup.
1993  if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
1994    emitAutoVarTypeCleanup(emission, dtorKind);
1995
1996  // In GC mode, honor objc_precise_lifetime.
1997  if (getLangOpts().getGC() != LangOptions::NonGC &&
1998      D.hasAttr<ObjCPreciseLifetimeAttr>()) {
1999    EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2000  }
2001
2002  // Handle the cleanup attribute.
2003  if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2004    const FunctionDecl *FD = CA->getFunctionDecl();
2005
2006    llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2007    assert(F && "Could not find function!");
2008
2009    const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2010    EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2011  }
2012
2013  // If this is a block variable, call _Block_object_destroy
2014  // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2015  // mode.
2016  if (emission.IsEscapingByRef &&
2017      CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2018    BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2019    if (emission.Variable->getType().isObjCGCWeak())
2020      Flags |= BLOCK_FIELD_IS_WEAK;
2021    enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2022                      /*LoadBlockVarAddr*/ false,
2023                      cxxDestructorCanThrow(emission.Variable->getType()));
2024  }
2025}
2026
2027CodeGenFunction::Destroyer *
2028CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2029  switch (kind) {
2030  case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2031  case QualType::DK_cxx_destructor:
2032    return destroyCXXObject;
2033  case QualType::DK_objc_strong_lifetime:
2034    return destroyARCStrongPrecise;
2035  case QualType::DK_objc_weak_lifetime:
2036    return destroyARCWeak;
2037  case QualType::DK_nontrivial_c_struct:
2038    return destroyNonTrivialCStruct;
2039  }
2040  llvm_unreachable("Unknown DestructionKind");
2041}
2042
2043/// pushEHDestroy - Push the standard destructor for the given type as
2044/// an EH-only cleanup.
2045void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2046                                    Address addr, QualType type) {
2047  assert(dtorKind && "cannot push destructor for trivial type");
2048  assert(needsEHCleanup(dtorKind));
2049
2050  pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2051}
2052
2053/// pushDestroy - Push the standard destructor for the given type as
2054/// at least a normal cleanup.
2055void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2056                                  Address addr, QualType type) {
2057  assert(dtorKind && "cannot push destructor for trivial type");
2058
2059  CleanupKind cleanupKind = getCleanupKind(dtorKind);
2060  pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2061              cleanupKind & EHCleanup);
2062}
2063
2064void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2065                                  QualType type, Destroyer *destroyer,
2066                                  bool useEHCleanupForArray) {
2067  pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2068                                     destroyer, useEHCleanupForArray);
2069}
2070
2071void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2072  EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2073}
2074
2075void CodeGenFunction::pushLifetimeExtendedDestroy(
2076    CleanupKind cleanupKind, Address addr, QualType type,
2077    Destroyer *destroyer, bool useEHCleanupForArray) {
2078  // Push an EH-only cleanup for the object now.
2079  // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2080  // around in case a temporary's destructor throws an exception.
2081  if (cleanupKind & EHCleanup)
2082    EHStack.pushCleanup<DestroyObject>(
2083        static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
2084        destroyer, useEHCleanupForArray);
2085
2086  // Remember that we need to push a full cleanup for the object at the
2087  // end of the full-expression.
2088  pushCleanupAfterFullExpr<DestroyObject>(
2089      cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2090}
2091
2092/// emitDestroy - Immediately perform the destruction of the given
2093/// object.
2094///
2095/// \param addr - the address of the object; a type*
2096/// \param type - the type of the object; if an array type, all
2097///   objects are destroyed in reverse order
2098/// \param destroyer - the function to call to destroy individual
2099///   elements
2100/// \param useEHCleanupForArray - whether an EH cleanup should be
2101///   used when destroying array elements, in case one of the
2102///   destructions throws an exception
2103void CodeGenFunction::emitDestroy(Address addr, QualType type,
2104                                  Destroyer *destroyer,
2105                                  bool useEHCleanupForArray) {
2106  const ArrayType *arrayType = getContext().getAsArrayType(type);
2107  if (!arrayType)
2108    return destroyer(*this, addr, type);
2109
2110  llvm::Value *length = emitArrayLength(arrayType, type, addr);
2111
2112  CharUnits elementAlign =
2113    addr.getAlignment()
2114        .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2115
2116  // Normally we have to check whether the array is zero-length.
2117  bool checkZeroLength = true;
2118
2119  // But if the array length is constant, we can suppress that.
2120  if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2121    // ...and if it's constant zero, we can just skip the entire thing.
2122    if (constLength->isZero()) return;
2123    checkZeroLength = false;
2124  }
2125
2126  llvm::Value *begin = addr.getPointer();
2127  llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
2128  emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2129                   checkZeroLength, useEHCleanupForArray);
2130}
2131
2132/// emitArrayDestroy - Destroys all the elements of the given array,
2133/// beginning from last to first.  The array cannot be zero-length.
2134///
2135/// \param begin - a type* denoting the first element of the array
2136/// \param end - a type* denoting one past the end of the array
2137/// \param elementType - the element type of the array
2138/// \param destroyer - the function to call to destroy elements
2139/// \param useEHCleanup - whether to push an EH cleanup to destroy
2140///   the remaining elements in case the destruction of a single
2141///   element throws
2142void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2143                                       llvm::Value *end,
2144                                       QualType elementType,
2145                                       CharUnits elementAlign,
2146                                       Destroyer *destroyer,
2147                                       bool checkZeroLength,
2148                                       bool useEHCleanup) {
2149  assert(!elementType->isArrayType());
2150
2151  // The basic structure here is a do-while loop, because we don't
2152  // need to check for the zero-element case.
2153  llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2154  llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2155
2156  if (checkZeroLength) {
2157    llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2158                                                "arraydestroy.isempty");
2159    Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2160  }
2161
2162  // Enter the loop body, making that address the current address.
2163  llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2164  EmitBlock(bodyBB);
2165  llvm::PHINode *elementPast =
2166    Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2167  elementPast->addIncoming(end, entryBB);
2168
2169  // Shift the address back by one element.
2170  llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2171  llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
2172                                                   "arraydestroy.element");
2173
2174  if (useEHCleanup)
2175    pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2176                                   destroyer);
2177
2178  // Perform the actual destruction there.
2179  destroyer(*this, Address(element, elementAlign), elementType);
2180
2181  if (useEHCleanup)
2182    PopCleanupBlock();
2183
2184  // Check whether we've reached the end.
2185  llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2186  Builder.CreateCondBr(done, doneBB, bodyBB);
2187  elementPast->addIncoming(element, Builder.GetInsertBlock());
2188
2189  // Done.
2190  EmitBlock(doneBB);
2191}
2192
2193/// Perform partial array destruction as if in an EH cleanup.  Unlike
2194/// emitArrayDestroy, the element type here may still be an array type.
2195static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2196                                    llvm::Value *begin, llvm::Value *end,
2197                                    QualType type, CharUnits elementAlign,
2198                                    CodeGenFunction::Destroyer *destroyer) {
2199  // If the element type is itself an array, drill down.
2200  unsigned arrayDepth = 0;
2201  while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2202    // VLAs don't require a GEP index to walk into.
2203    if (!isa<VariableArrayType>(arrayType))
2204      arrayDepth++;
2205    type = arrayType->getElementType();
2206  }
2207
2208  if (arrayDepth) {
2209    llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2210
2211    SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2212    begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
2213    end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
2214  }
2215
2216  // Destroy the array.  We don't ever need an EH cleanup because we
2217  // assume that we're in an EH cleanup ourselves, so a throwing
2218  // destructor causes an immediate terminate.
2219  CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2220                       /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2221}
2222
2223namespace {
2224  /// RegularPartialArrayDestroy - a cleanup which performs a partial
2225  /// array destroy where the end pointer is regularly determined and
2226  /// does not need to be loaded from a local.
2227  class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2228    llvm::Value *ArrayBegin;
2229    llvm::Value *ArrayEnd;
2230    QualType ElementType;
2231    CodeGenFunction::Destroyer *Destroyer;
2232    CharUnits ElementAlign;
2233  public:
2234    RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2235                               QualType elementType, CharUnits elementAlign,
2236                               CodeGenFunction::Destroyer *destroyer)
2237      : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2238        ElementType(elementType), Destroyer(destroyer),
2239        ElementAlign(elementAlign) {}
2240
2241    void Emit(CodeGenFunction &CGF, Flags flags) override {
2242      emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2243                              ElementType, ElementAlign, Destroyer);
2244    }
2245  };
2246
2247  /// IrregularPartialArrayDestroy - a cleanup which performs a
2248  /// partial array destroy where the end pointer is irregularly
2249  /// determined and must be loaded from a local.
2250  class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2251    llvm::Value *ArrayBegin;
2252    Address ArrayEndPointer;
2253    QualType ElementType;
2254    CodeGenFunction::Destroyer *Destroyer;
2255    CharUnits ElementAlign;
2256  public:
2257    IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2258                                 Address arrayEndPointer,
2259                                 QualType elementType,
2260                                 CharUnits elementAlign,
2261                                 CodeGenFunction::Destroyer *destroyer)
2262      : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2263        ElementType(elementType), Destroyer(destroyer),
2264        ElementAlign(elementAlign) {}
2265
2266    void Emit(CodeGenFunction &CGF, Flags flags) override {
2267      llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2268      emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2269                              ElementType, ElementAlign, Destroyer);
2270    }
2271  };
2272} // end anonymous namespace
2273
2274/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2275/// already-constructed elements of the given array.  The cleanup
2276/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2277///
2278/// \param elementType - the immediate element type of the array;
2279///   possibly still an array type
2280void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2281                                                       Address arrayEndPointer,
2282                                                       QualType elementType,
2283                                                       CharUnits elementAlign,
2284                                                       Destroyer *destroyer) {
2285  pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2286                                                    arrayBegin, arrayEndPointer,
2287                                                    elementType, elementAlign,
2288                                                    destroyer);
2289}
2290
2291/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2292/// already-constructed elements of the given array.  The cleanup
2293/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2294///
2295/// \param elementType - the immediate element type of the array;
2296///   possibly still an array type
2297void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2298                                                     llvm::Value *arrayEnd,
2299                                                     QualType elementType,
2300                                                     CharUnits elementAlign,
2301                                                     Destroyer *destroyer) {
2302  pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2303                                                  arrayBegin, arrayEnd,
2304                                                  elementType, elementAlign,
2305                                                  destroyer);
2306}
2307
2308/// Lazily declare the @llvm.lifetime.start intrinsic.
2309llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2310  if (LifetimeStartFn)
2311    return LifetimeStartFn;
2312  LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2313    llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2314  return LifetimeStartFn;
2315}
2316
2317/// Lazily declare the @llvm.lifetime.end intrinsic.
2318llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2319  if (LifetimeEndFn)
2320    return LifetimeEndFn;
2321  LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2322    llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2323  return LifetimeEndFn;
2324}
2325
2326namespace {
2327  /// A cleanup to perform a release of an object at the end of a
2328  /// function.  This is used to balance out the incoming +1 of a
2329  /// ns_consumed argument when we can't reasonably do that just by
2330  /// not doing the initial retain for a __block argument.
2331  struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2332    ConsumeARCParameter(llvm::Value *param,
2333                        ARCPreciseLifetime_t precise)
2334      : Param(param), Precise(precise) {}
2335
2336    llvm::Value *Param;
2337    ARCPreciseLifetime_t Precise;
2338
2339    void Emit(CodeGenFunction &CGF, Flags flags) override {
2340      CGF.EmitARCRelease(Param, Precise);
2341    }
2342  };
2343} // end anonymous namespace
2344
2345/// Emit an alloca (or GlobalValue depending on target)
2346/// for the specified parameter and set up LocalDeclMap.
2347void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2348                                   unsigned ArgNo) {
2349  // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2350  assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2351         "Invalid argument to EmitParmDecl");
2352
2353  Arg.getAnyValue()->setName(D.getName());
2354
2355  QualType Ty = D.getType();
2356
2357  // Use better IR generation for certain implicit parameters.
2358  if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2359    // The only implicit argument a block has is its literal.
2360    // This may be passed as an inalloca'ed value on Windows x86.
2361    if (BlockInfo) {
2362      llvm::Value *V = Arg.isIndirect()
2363                           ? Builder.CreateLoad(Arg.getIndirectAddress())
2364                           : Arg.getDirectValue();
2365      setBlockContextParameter(IPD, ArgNo, V);
2366      return;
2367    }
2368  }
2369
2370  Address DeclPtr = Address::invalid();
2371  bool DoStore = false;
2372  bool IsScalar = hasScalarEvaluationKind(Ty);
2373  // If we already have a pointer to the argument, reuse the input pointer.
2374  if (Arg.isIndirect()) {
2375    DeclPtr = Arg.getIndirectAddress();
2376    // If we have a prettier pointer type at this point, bitcast to that.
2377    unsigned AS = DeclPtr.getType()->getAddressSpace();
2378    llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
2379    if (DeclPtr.getType() != IRTy)
2380      DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
2381    // Indirect argument is in alloca address space, which may be different
2382    // from the default address space.
2383    auto AllocaAS = CGM.getASTAllocaAddressSpace();
2384    auto *V = DeclPtr.getPointer();
2385    auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2386    auto DestLangAS =
2387        getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2388    if (SrcLangAS != DestLangAS) {
2389      assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2390             CGM.getDataLayout().getAllocaAddrSpace());
2391      auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2392      auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
2393      DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
2394                            *this, V, SrcLangAS, DestLangAS, T, true),
2395                        DeclPtr.getAlignment());
2396    }
2397
2398    // Push a destructor cleanup for this parameter if the ABI requires it.
2399    // Don't push a cleanup in a thunk for a method that will also emit a
2400    // cleanup.
2401    if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
2402        Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2403      if (QualType::DestructionKind DtorKind =
2404              D.needsDestruction(getContext())) {
2405        assert((DtorKind == QualType::DK_cxx_destructor ||
2406                DtorKind == QualType::DK_nontrivial_c_struct) &&
2407               "unexpected destructor type");
2408        pushDestroy(DtorKind, DeclPtr, Ty);
2409        CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2410            EHStack.stable_begin();
2411      }
2412    }
2413  } else {
2414    // Check if the parameter address is controlled by OpenMP runtime.
2415    Address OpenMPLocalAddr =
2416        getLangOpts().OpenMP
2417            ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2418            : Address::invalid();
2419    if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2420      DeclPtr = OpenMPLocalAddr;
2421    } else {
2422      // Otherwise, create a temporary to hold the value.
2423      DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2424                              D.getName() + ".addr");
2425    }
2426    DoStore = true;
2427  }
2428
2429  llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2430
2431  LValue lv = MakeAddrLValue(DeclPtr, Ty);
2432  if (IsScalar) {
2433    Qualifiers qs = Ty.getQualifiers();
2434    if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2435      // We honor __attribute__((ns_consumed)) for types with lifetime.
2436      // For __strong, it's handled by just skipping the initial retain;
2437      // otherwise we have to balance out the initial +1 with an extra
2438      // cleanup to do the release at the end of the function.
2439      bool isConsumed = D.hasAttr<NSConsumedAttr>();
2440
2441      // If a parameter is pseudo-strong then we can omit the implicit retain.
2442      if (D.isARCPseudoStrong()) {
2443        assert(lt == Qualifiers::OCL_Strong &&
2444               "pseudo-strong variable isn't strong?");
2445        assert(qs.hasConst() && "pseudo-strong variable should be const!");
2446        lt = Qualifiers::OCL_ExplicitNone;
2447      }
2448
2449      // Load objects passed indirectly.
2450      if (Arg.isIndirect() && !ArgVal)
2451        ArgVal = Builder.CreateLoad(DeclPtr);
2452
2453      if (lt == Qualifiers::OCL_Strong) {
2454        if (!isConsumed) {
2455          if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2456            // use objc_storeStrong(&dest, value) for retaining the
2457            // object. But first, store a null into 'dest' because
2458            // objc_storeStrong attempts to release its old value.
2459            llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2460            EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2461            EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
2462            DoStore = false;
2463          }
2464          else
2465          // Don't use objc_retainBlock for block pointers, because we
2466          // don't want to Block_copy something just because we got it
2467          // as a parameter.
2468            ArgVal = EmitARCRetainNonBlock(ArgVal);
2469        }
2470      } else {
2471        // Push the cleanup for a consumed parameter.
2472        if (isConsumed) {
2473          ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2474                                ? ARCPreciseLifetime : ARCImpreciseLifetime);
2475          EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2476                                                   precise);
2477        }
2478
2479        if (lt == Qualifiers::OCL_Weak) {
2480          EmitARCInitWeak(DeclPtr, ArgVal);
2481          DoStore = false; // The weak init is a store, no need to do two.
2482        }
2483      }
2484
2485      // Enter the cleanup scope.
2486      EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2487    }
2488  }
2489
2490  // Store the initial value into the alloca.
2491  if (DoStore)
2492    EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2493
2494  setAddrOfLocalVar(&D, DeclPtr);
2495
2496  // Emit debug info for param declarations in non-thunk functions.
2497  if (CGDebugInfo *DI = getDebugInfo()) {
2498    if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk) {
2499      DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
2500    }
2501  }
2502
2503  if (D.hasAttr<AnnotateAttr>())
2504    EmitVarAnnotations(&D, DeclPtr.getPointer());
2505
2506  // We can only check return value nullability if all arguments to the
2507  // function satisfy their nullability preconditions. This makes it necessary
2508  // to emit null checks for args in the function body itself.
2509  if (requiresReturnValueNullabilityCheck()) {
2510    auto Nullability = Ty->getNullability(getContext());
2511    if (Nullability && *Nullability == NullabilityKind::NonNull) {
2512      SanitizerScope SanScope(this);
2513      RetValNullabilityPrecondition =
2514          Builder.CreateAnd(RetValNullabilityPrecondition,
2515                            Builder.CreateIsNotNull(Arg.getAnyValue()));
2516    }
2517  }
2518}
2519
2520void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2521                                            CodeGenFunction *CGF) {
2522  if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2523    return;
2524  getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2525}
2526
2527void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2528                                         CodeGenFunction *CGF) {
2529  if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2530      (!LangOpts.EmitAllDecls && !D->isUsed()))
2531    return;
2532  getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2533}
2534
2535void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2536  getOpenMPRuntime().checkArchForUnifiedAddressing(D);
2537}
2538