CGDeclCXX.cpp revision 360784
1//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code dealing with code generation of C++ declarations
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGObjCRuntime.h"
15#include "CGOpenMPRuntime.h"
16#include "CodeGenFunction.h"
17#include "TargetInfo.h"
18#include "clang/AST/Attr.h"
19#include "clang/Basic/CodeGenOptions.h"
20#include "llvm/ADT/StringExtras.h"
21#include "llvm/IR/Intrinsics.h"
22#include "llvm/IR/MDBuilder.h"
23#include "llvm/Support/Path.h"
24
25using namespace clang;
26using namespace CodeGen;
27
28static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
29                         ConstantAddress DeclPtr) {
30  assert(
31      (D.hasGlobalStorage() ||
32       (D.hasLocalStorage() && CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
33      "VarDecl must have global or local (in the case of OpenCL) storage!");
34  assert(!D.getType()->isReferenceType() &&
35         "Should not call EmitDeclInit on a reference!");
36
37  QualType type = D.getType();
38  LValue lv = CGF.MakeAddrLValue(DeclPtr, type);
39
40  const Expr *Init = D.getInit();
41  switch (CGF.getEvaluationKind(type)) {
42  case TEK_Scalar: {
43    CodeGenModule &CGM = CGF.CGM;
44    if (lv.isObjCStrong())
45      CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
46                                                DeclPtr, D.getTLSKind());
47    else if (lv.isObjCWeak())
48      CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
49                                              DeclPtr);
50    else
51      CGF.EmitScalarInit(Init, &D, lv, false);
52    return;
53  }
54  case TEK_Complex:
55    CGF.EmitComplexExprIntoLValue(Init, lv, /*isInit*/ true);
56    return;
57  case TEK_Aggregate:
58    CGF.EmitAggExpr(Init,
59                    AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
60                                            AggValueSlot::DoesNotNeedGCBarriers,
61                                            AggValueSlot::IsNotAliased,
62                                            AggValueSlot::DoesNotOverlap));
63    return;
64  }
65  llvm_unreachable("bad evaluation kind");
66}
67
68/// Emit code to cause the destruction of the given variable with
69/// static storage duration.
70static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
71                            ConstantAddress Addr) {
72  // Honor __attribute__((no_destroy)) and bail instead of attempting
73  // to emit a reference to a possibly nonexistent destructor, which
74  // in turn can cause a crash. This will result in a global constructor
75  // that isn't balanced out by a destructor call as intended by the
76  // attribute. This also checks for -fno-c++-static-destructors and
77  // bails even if the attribute is not present.
78  QualType::DestructionKind DtorKind = D.needsDestruction(CGF.getContext());
79
80  // FIXME:  __attribute__((cleanup)) ?
81
82  switch (DtorKind) {
83  case QualType::DK_none:
84    return;
85
86  case QualType::DK_cxx_destructor:
87    break;
88
89  case QualType::DK_objc_strong_lifetime:
90  case QualType::DK_objc_weak_lifetime:
91  case QualType::DK_nontrivial_c_struct:
92    // We don't care about releasing objects during process teardown.
93    assert(!D.getTLSKind() && "should have rejected this");
94    return;
95  }
96
97  llvm::FunctionCallee Func;
98  llvm::Constant *Argument;
99
100  CodeGenModule &CGM = CGF.CGM;
101  QualType Type = D.getType();
102
103  // Special-case non-array C++ destructors, if they have the right signature.
104  // Under some ABIs, destructors return this instead of void, and cannot be
105  // passed directly to __cxa_atexit if the target does not allow this
106  // mismatch.
107  const CXXRecordDecl *Record = Type->getAsCXXRecordDecl();
108  bool CanRegisterDestructor =
109      Record && (!CGM.getCXXABI().HasThisReturn(
110                     GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
111                 CGM.getCXXABI().canCallMismatchedFunctionType());
112  // If __cxa_atexit is disabled via a flag, a different helper function is
113  // generated elsewhere which uses atexit instead, and it takes the destructor
114  // directly.
115  bool UsingExternalHelper = !CGM.getCodeGenOpts().CXAAtExit;
116  if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
117    assert(!Record->hasTrivialDestructor());
118    CXXDestructorDecl *Dtor = Record->getDestructor();
119
120    Func = CGM.getAddrAndTypeOfCXXStructor(GlobalDecl(Dtor, Dtor_Complete));
121    if (CGF.getContext().getLangOpts().OpenCL) {
122      auto DestAS =
123          CGM.getTargetCodeGenInfo().getAddrSpaceOfCxaAtexitPtrParam();
124      auto DestTy = CGF.getTypes().ConvertType(Type)->getPointerTo(
125          CGM.getContext().getTargetAddressSpace(DestAS));
126      auto SrcAS = D.getType().getQualifiers().getAddressSpace();
127      if (DestAS == SrcAS)
128        Argument = llvm::ConstantExpr::getBitCast(Addr.getPointer(), DestTy);
129      else
130        // FIXME: On addr space mismatch we are passing NULL. The generation
131        // of the global destructor function should be adjusted accordingly.
132        Argument = llvm::ConstantPointerNull::get(DestTy);
133    } else {
134      Argument = llvm::ConstantExpr::getBitCast(
135          Addr.getPointer(), CGF.getTypes().ConvertType(Type)->getPointerTo());
136    }
137  // Otherwise, the standard logic requires a helper function.
138  } else {
139    Func = CodeGenFunction(CGM)
140           .generateDestroyHelper(Addr, Type, CGF.getDestroyer(DtorKind),
141                                  CGF.needsEHCleanup(DtorKind), &D);
142    Argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
143  }
144
145  CGM.getCXXABI().registerGlobalDtor(CGF, D, Func, Argument);
146}
147
148/// Emit code to cause the variable at the given address to be considered as
149/// constant from this point onwards.
150static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
151                              llvm::Constant *Addr) {
152  return CGF.EmitInvariantStart(
153      Addr, CGF.getContext().getTypeSizeInChars(D.getType()));
154}
155
156void CodeGenFunction::EmitInvariantStart(llvm::Constant *Addr, CharUnits Size) {
157  // Do not emit the intrinsic if we're not optimizing.
158  if (!CGM.getCodeGenOpts().OptimizationLevel)
159    return;
160
161  // Grab the llvm.invariant.start intrinsic.
162  llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
163  // Overloaded address space type.
164  llvm::Type *ObjectPtr[1] = {Int8PtrTy};
165  llvm::Function *InvariantStart = CGM.getIntrinsic(InvStartID, ObjectPtr);
166
167  // Emit a call with the size in bytes of the object.
168  uint64_t Width = Size.getQuantity();
169  llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(Int64Ty, Width),
170                           llvm::ConstantExpr::getBitCast(Addr, Int8PtrTy)};
171  Builder.CreateCall(InvariantStart, Args);
172}
173
174void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
175                                               llvm::Constant *DeclPtr,
176                                               bool PerformInit) {
177
178  const Expr *Init = D.getInit();
179  QualType T = D.getType();
180
181  // The address space of a static local variable (DeclPtr) may be different
182  // from the address space of the "this" argument of the constructor. In that
183  // case, we need an addrspacecast before calling the constructor.
184  //
185  // struct StructWithCtor {
186  //   __device__ StructWithCtor() {...}
187  // };
188  // __device__ void foo() {
189  //   __shared__ StructWithCtor s;
190  //   ...
191  // }
192  //
193  // For example, in the above CUDA code, the static local variable s has a
194  // "shared" address space qualifier, but the constructor of StructWithCtor
195  // expects "this" in the "generic" address space.
196  unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(T);
197  unsigned ActualAddrSpace = DeclPtr->getType()->getPointerAddressSpace();
198  if (ActualAddrSpace != ExpectedAddrSpace) {
199    llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(T);
200    llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
201    DeclPtr = llvm::ConstantExpr::getAddrSpaceCast(DeclPtr, PTy);
202  }
203
204  ConstantAddress DeclAddr(DeclPtr, getContext().getDeclAlign(&D));
205
206  if (!T->isReferenceType()) {
207    if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
208        D.hasAttr<OMPThreadPrivateDeclAttr>()) {
209      (void)CGM.getOpenMPRuntime().emitThreadPrivateVarDefinition(
210          &D, DeclAddr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
211          PerformInit, this);
212    }
213    if (PerformInit)
214      EmitDeclInit(*this, D, DeclAddr);
215    if (CGM.isTypeConstant(D.getType(), true))
216      EmitDeclInvariant(*this, D, DeclPtr);
217    else
218      EmitDeclDestroy(*this, D, DeclAddr);
219    return;
220  }
221
222  assert(PerformInit && "cannot have constant initializer which needs "
223         "destruction for reference");
224  RValue RV = EmitReferenceBindingToExpr(Init);
225  EmitStoreOfScalar(RV.getScalarVal(), DeclAddr, false, T);
226}
227
228/// Create a stub function, suitable for being passed to atexit,
229/// which passes the given address to the given destructor function.
230llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
231                                                  llvm::FunctionCallee dtor,
232                                                  llvm::Constant *addr) {
233  // Get the destructor function type, void(*)(void).
234  llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
235  SmallString<256> FnName;
236  {
237    llvm::raw_svector_ostream Out(FnName);
238    CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
239  }
240
241  const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
242  llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
243      ty, FnName.str(), FI, VD.getLocation());
244
245  CodeGenFunction CGF(CGM);
246
247  CGF.StartFunction(GlobalDecl(&VD, DynamicInitKind::AtExit),
248                    CGM.getContext().VoidTy, fn, FI, FunctionArgList());
249
250  llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
251
252 // Make sure the call and the callee agree on calling convention.
253  if (auto *dtorFn = dyn_cast<llvm::Function>(
254          dtor.getCallee()->stripPointerCastsAndAliases()))
255    call->setCallingConv(dtorFn->getCallingConv());
256
257  CGF.FinishFunction();
258
259  return fn;
260}
261
262/// Register a global destructor using the C atexit runtime function.
263void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
264                                                   llvm::FunctionCallee dtor,
265                                                   llvm::Constant *addr) {
266  // Create a function which calls the destructor.
267  llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
268  registerGlobalDtorWithAtExit(dtorStub);
269}
270
271void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtorStub) {
272  // extern "C" int atexit(void (*f)(void));
273  llvm::FunctionType *atexitTy =
274    llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
275
276  llvm::FunctionCallee atexit =
277      CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
278                                /*Local=*/true);
279  if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit.getCallee()))
280    atexitFn->setDoesNotThrow();
281
282  EmitNounwindRuntimeCall(atexit, dtorStub);
283}
284
285void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
286                                         llvm::GlobalVariable *DeclPtr,
287                                         bool PerformInit) {
288  // If we've been asked to forbid guard variables, emit an error now.
289  // This diagnostic is hard-coded for Darwin's use case;  we can find
290  // better phrasing if someone else needs it.
291  if (CGM.getCodeGenOpts().ForbidGuardVariables)
292    CGM.Error(D.getLocation(),
293              "this initialization requires a guard variable, which "
294              "the kernel does not support");
295
296  CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
297}
298
299void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
300                                               llvm::BasicBlock *InitBlock,
301                                               llvm::BasicBlock *NoInitBlock,
302                                               GuardKind Kind,
303                                               const VarDecl *D) {
304  assert((Kind == GuardKind::TlsGuard || D) && "no guarded variable");
305
306  // A guess at how many times we will enter the initialization of a
307  // variable, depending on the kind of variable.
308  static const uint64_t InitsPerTLSVar = 1024;
309  static const uint64_t InitsPerLocalVar = 1024 * 1024;
310
311  llvm::MDNode *Weights;
312  if (Kind == GuardKind::VariableGuard && !D->isLocalVarDecl()) {
313    // For non-local variables, don't apply any weighting for now. Due to our
314    // use of COMDATs, we expect there to be at most one initialization of the
315    // variable per DSO, but we have no way to know how many DSOs will try to
316    // initialize the variable.
317    Weights = nullptr;
318  } else {
319    uint64_t NumInits;
320    // FIXME: For the TLS case, collect and use profiling information to
321    // determine a more accurate brach weight.
322    if (Kind == GuardKind::TlsGuard || D->getTLSKind())
323      NumInits = InitsPerTLSVar;
324    else
325      NumInits = InitsPerLocalVar;
326
327    // The probability of us entering the initializer is
328    //   1 / (total number of times we attempt to initialize the variable).
329    llvm::MDBuilder MDHelper(CGM.getLLVMContext());
330    Weights = MDHelper.createBranchWeights(1, NumInits - 1);
331  }
332
333  Builder.CreateCondBr(NeedsInit, InitBlock, NoInitBlock, Weights);
334}
335
336llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
337    llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
338    SourceLocation Loc, bool TLS) {
339  llvm::Function *Fn =
340    llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
341                           Name, &getModule());
342  if (!getLangOpts().AppleKext && !TLS) {
343    // Set the section if needed.
344    if (const char *Section = getTarget().getStaticInitSectionSpecifier())
345      Fn->setSection(Section);
346  }
347
348  SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
349
350  Fn->setCallingConv(getRuntimeCC());
351
352  if (!getLangOpts().Exceptions)
353    Fn->setDoesNotThrow();
354
355  if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
356      !isInSanitizerBlacklist(SanitizerKind::Address, Fn, Loc))
357    Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
358
359  if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
360      !isInSanitizerBlacklist(SanitizerKind::KernelAddress, Fn, Loc))
361    Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
362
363  if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
364      !isInSanitizerBlacklist(SanitizerKind::HWAddress, Fn, Loc))
365    Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
366
367  if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
368      !isInSanitizerBlacklist(SanitizerKind::KernelHWAddress, Fn, Loc))
369    Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
370
371  if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) &&
372      !isInSanitizerBlacklist(SanitizerKind::MemTag, Fn, Loc))
373    Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
374
375  if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
376      !isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc))
377    Fn->addFnAttr(llvm::Attribute::SanitizeThread);
378
379  if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
380      !isInSanitizerBlacklist(SanitizerKind::Memory, Fn, Loc))
381    Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
382
383  if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
384      !isInSanitizerBlacklist(SanitizerKind::KernelMemory, Fn, Loc))
385    Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
386
387  if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
388      !isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc))
389    Fn->addFnAttr(llvm::Attribute::SafeStack);
390
391  if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
392      !isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
393    Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
394
395  auto RASignKind = getCodeGenOpts().getSignReturnAddress();
396  if (RASignKind != CodeGenOptions::SignReturnAddressScope::None) {
397    Fn->addFnAttr("sign-return-address",
398                  RASignKind == CodeGenOptions::SignReturnAddressScope::All
399                      ? "all"
400                      : "non-leaf");
401    auto RASignKey = getCodeGenOpts().getSignReturnAddressKey();
402    Fn->addFnAttr("sign-return-address-key",
403                  RASignKey == CodeGenOptions::SignReturnAddressKeyValue::AKey
404                      ? "a_key"
405                      : "b_key");
406  }
407
408  if (getCodeGenOpts().BranchTargetEnforcement)
409    Fn->addFnAttr("branch-target-enforcement");
410
411  return Fn;
412}
413
414/// Create a global pointer to a function that will initialize a global
415/// variable.  The user has requested that this pointer be emitted in a specific
416/// section.
417void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
418                                          llvm::GlobalVariable *GV,
419                                          llvm::Function *InitFunc,
420                                          InitSegAttr *ISA) {
421  llvm::GlobalVariable *PtrArray = new llvm::GlobalVariable(
422      TheModule, InitFunc->getType(), /*isConstant=*/true,
423      llvm::GlobalValue::PrivateLinkage, InitFunc, "__cxx_init_fn_ptr");
424  PtrArray->setSection(ISA->getSection());
425  addUsedGlobal(PtrArray);
426
427  // If the GV is already in a comdat group, then we have to join it.
428  if (llvm::Comdat *C = GV->getComdat())
429    PtrArray->setComdat(C);
430}
431
432void
433CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
434                                            llvm::GlobalVariable *Addr,
435                                            bool PerformInit) {
436
437  // According to E.2.3.1 in CUDA-7.5 Programming guide: __device__,
438  // __constant__ and __shared__ variables defined in namespace scope,
439  // that are of class type, cannot have a non-empty constructor. All
440  // the checks have been done in Sema by now. Whatever initializers
441  // are allowed are empty and we just need to ignore them here.
442  if (getLangOpts().CUDAIsDevice && !getLangOpts().GPUAllowDeviceInit &&
443      (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
444       D->hasAttr<CUDASharedAttr>()))
445    return;
446
447  if (getLangOpts().OpenMP &&
448      getOpenMPRuntime().emitDeclareTargetVarDefinition(D, Addr, PerformInit))
449    return;
450
451  // Check if we've already initialized this decl.
452  auto I = DelayedCXXInitPosition.find(D);
453  if (I != DelayedCXXInitPosition.end() && I->second == ~0U)
454    return;
455
456  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
457  SmallString<256> FnName;
458  {
459    llvm::raw_svector_ostream Out(FnName);
460    getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
461  }
462
463  // Create a variable initialization function.
464  llvm::Function *Fn =
465      CreateGlobalInitOrDestructFunction(FTy, FnName.str(),
466                                         getTypes().arrangeNullaryFunction(),
467                                         D->getLocation());
468
469  auto *ISA = D->getAttr<InitSegAttr>();
470  CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
471                                                          PerformInit);
472
473  llvm::GlobalVariable *COMDATKey =
474      supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
475
476  if (D->getTLSKind()) {
477    // FIXME: Should we support init_priority for thread_local?
478    // FIXME: We only need to register one __cxa_thread_atexit function for the
479    // entire TU.
480    CXXThreadLocalInits.push_back(Fn);
481    CXXThreadLocalInitVars.push_back(D);
482  } else if (PerformInit && ISA) {
483    EmitPointerToInitFunc(D, Addr, Fn, ISA);
484  } else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
485    OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
486    PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
487  } else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
488             getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) {
489    // C++ [basic.start.init]p2:
490    //   Definitions of explicitly specialized class template static data
491    //   members have ordered initialization. Other class template static data
492    //   members (i.e., implicitly or explicitly instantiated specializations)
493    //   have unordered initialization.
494    //
495    // As a consequence, we can put them into their own llvm.global_ctors entry.
496    //
497    // If the global is externally visible, put the initializer into a COMDAT
498    // group with the global being initialized.  On most platforms, this is a
499    // minor startup time optimization.  In the MS C++ ABI, there are no guard
500    // variables, so this COMDAT key is required for correctness.
501    AddGlobalCtor(Fn, 65535, COMDATKey);
502    if (getTarget().getCXXABI().isMicrosoft() && COMDATKey) {
503      // In The MS C++, MS add template static data member in the linker
504      // drective.
505      addUsedGlobal(COMDATKey);
506    }
507  } else if (D->hasAttr<SelectAnyAttr>()) {
508    // SelectAny globals will be comdat-folded. Put the initializer into a
509    // COMDAT group associated with the global, so the initializers get folded
510    // too.
511    AddGlobalCtor(Fn, 65535, COMDATKey);
512  } else {
513    I = DelayedCXXInitPosition.find(D); // Re-do lookup in case of re-hash.
514    if (I == DelayedCXXInitPosition.end()) {
515      CXXGlobalInits.push_back(Fn);
516    } else if (I->second != ~0U) {
517      assert(I->second < CXXGlobalInits.size() &&
518             CXXGlobalInits[I->second] == nullptr);
519      CXXGlobalInits[I->second] = Fn;
520    }
521  }
522
523  // Remember that we already emitted the initializer for this global.
524  DelayedCXXInitPosition[D] = ~0U;
525}
526
527void CodeGenModule::EmitCXXThreadLocalInitFunc() {
528  getCXXABI().EmitThreadLocalInitFuncs(
529      *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
530
531  CXXThreadLocalInits.clear();
532  CXXThreadLocalInitVars.clear();
533  CXXThreadLocals.clear();
534}
535
536void
537CodeGenModule::EmitCXXGlobalInitFunc() {
538  while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
539    CXXGlobalInits.pop_back();
540
541  if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
542    return;
543
544  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
545  const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
546
547  // Create our global initialization function.
548  if (!PrioritizedCXXGlobalInits.empty()) {
549    SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
550    llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
551                         PrioritizedCXXGlobalInits.end());
552    // Iterate over "chunks" of ctors with same priority and emit each chunk
553    // into separate function. Note - everything is sorted first by priority,
554    // second - by lex order, so we emit ctor functions in proper order.
555    for (SmallVectorImpl<GlobalInitData >::iterator
556           I = PrioritizedCXXGlobalInits.begin(),
557           E = PrioritizedCXXGlobalInits.end(); I != E; ) {
558      SmallVectorImpl<GlobalInitData >::iterator
559        PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
560
561      LocalCXXGlobalInits.clear();
562      unsigned Priority = I->first.priority;
563      // Compute the function suffix from priority. Prepend with zeroes to make
564      // sure the function names are also ordered as priorities.
565      std::string PrioritySuffix = llvm::utostr(Priority);
566      // Priority is always <= 65535 (enforced by sema).
567      PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
568      llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
569          FTy, "_GLOBAL__I_" + PrioritySuffix, FI);
570
571      for (; I < PrioE; ++I)
572        LocalCXXGlobalInits.push_back(I->second);
573
574      CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, LocalCXXGlobalInits);
575      AddGlobalCtor(Fn, Priority);
576    }
577    PrioritizedCXXGlobalInits.clear();
578  }
579
580  // Include the filename in the symbol name. Including "sub_" matches gcc and
581  // makes sure these symbols appear lexicographically behind the symbols with
582  // priority emitted above.
583  SmallString<128> FileName = llvm::sys::path::filename(getModule().getName());
584  if (FileName.empty())
585    FileName = "<null>";
586
587  for (size_t i = 0; i < FileName.size(); ++i) {
588    // Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
589    // to be the set of C preprocessing numbers.
590    if (!isPreprocessingNumberBody(FileName[i]))
591      FileName[i] = '_';
592  }
593
594  llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
595      FTy, llvm::Twine("_GLOBAL__sub_I_", FileName), FI);
596
597  CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
598  AddGlobalCtor(Fn);
599
600  // In OpenCL global init functions must be converted to kernels in order to
601  // be able to launch them from the host.
602  // FIXME: Some more work might be needed to handle destructors correctly.
603  // Current initialization function makes use of function pointers callbacks.
604  // We can't support function pointers especially between host and device.
605  // However it seems global destruction has little meaning without any
606  // dynamic resource allocation on the device and program scope variables are
607  // destroyed by the runtime when program is released.
608  if (getLangOpts().OpenCL) {
609    GenOpenCLArgMetadata(Fn);
610    Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
611  }
612
613  if (getLangOpts().HIP) {
614    Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
615    Fn->addFnAttr("device-init");
616  }
617
618  CXXGlobalInits.clear();
619}
620
621void CodeGenModule::EmitCXXGlobalDtorFunc() {
622  if (CXXGlobalDtors.empty())
623    return;
624
625  llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
626
627  // Create our global destructor function.
628  const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
629  llvm::Function *Fn =
630      CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a", FI);
631
632  CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
633  AddGlobalDtor(Fn);
634}
635
636/// Emit the code necessary to initialize the given global variable.
637void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
638                                                       const VarDecl *D,
639                                                 llvm::GlobalVariable *Addr,
640                                                       bool PerformInit) {
641  // Check if we need to emit debug info for variable initializer.
642  if (D->hasAttr<NoDebugAttr>())
643    DebugInfo = nullptr; // disable debug info indefinitely for this function
644
645  CurEHLocation = D->getBeginLoc();
646
647  StartFunction(GlobalDecl(D, DynamicInitKind::Initializer),
648                getContext().VoidTy, Fn, getTypes().arrangeNullaryFunction(),
649                FunctionArgList(), D->getLocation(),
650                D->getInit()->getExprLoc());
651
652  // Use guarded initialization if the global variable is weak. This
653  // occurs for, e.g., instantiated static data members and
654  // definitions explicitly marked weak.
655  //
656  // Also use guarded initialization for a variable with dynamic TLS and
657  // unordered initialization. (If the initialization is ordered, the ABI
658  // layer will guard the whole-TU initialization for us.)
659  if (Addr->hasWeakLinkage() || Addr->hasLinkOnceLinkage() ||
660      (D->getTLSKind() == VarDecl::TLS_Dynamic &&
661       isTemplateInstantiation(D->getTemplateSpecializationKind()))) {
662    EmitCXXGuardedInit(*D, Addr, PerformInit);
663  } else {
664    EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
665  }
666
667  FinishFunction();
668}
669
670void
671CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
672                                           ArrayRef<llvm::Function *> Decls,
673                                           ConstantAddress Guard) {
674  {
675    auto NL = ApplyDebugLocation::CreateEmpty(*this);
676    StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
677                  getTypes().arrangeNullaryFunction(), FunctionArgList());
678    // Emit an artificial location for this function.
679    auto AL = ApplyDebugLocation::CreateArtificial(*this);
680
681    llvm::BasicBlock *ExitBlock = nullptr;
682    if (Guard.isValid()) {
683      // If we have a guard variable, check whether we've already performed
684      // these initializations. This happens for TLS initialization functions.
685      llvm::Value *GuardVal = Builder.CreateLoad(Guard);
686      llvm::Value *Uninit = Builder.CreateIsNull(GuardVal,
687                                                 "guard.uninitialized");
688      llvm::BasicBlock *InitBlock = createBasicBlock("init");
689      ExitBlock = createBasicBlock("exit");
690      EmitCXXGuardedInitBranch(Uninit, InitBlock, ExitBlock,
691                               GuardKind::TlsGuard, nullptr);
692      EmitBlock(InitBlock);
693      // Mark as initialized before initializing anything else. If the
694      // initializers use previously-initialized thread_local vars, that's
695      // probably supposed to be OK, but the standard doesn't say.
696      Builder.CreateStore(llvm::ConstantInt::get(GuardVal->getType(),1), Guard);
697
698      // The guard variable can't ever change again.
699      EmitInvariantStart(
700          Guard.getPointer(),
701          CharUnits::fromQuantity(
702              CGM.getDataLayout().getTypeAllocSize(GuardVal->getType())));
703    }
704
705    RunCleanupsScope Scope(*this);
706
707    // When building in Objective-C++ ARC mode, create an autorelease pool
708    // around the global initializers.
709    if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
710      llvm::Value *token = EmitObjCAutoreleasePoolPush();
711      EmitObjCAutoreleasePoolCleanup(token);
712    }
713
714    for (unsigned i = 0, e = Decls.size(); i != e; ++i)
715      if (Decls[i])
716        EmitRuntimeCall(Decls[i]);
717
718    Scope.ForceCleanup();
719
720    if (ExitBlock) {
721      Builder.CreateBr(ExitBlock);
722      EmitBlock(ExitBlock);
723    }
724  }
725
726  FinishFunction();
727}
728
729void CodeGenFunction::GenerateCXXGlobalDtorsFunc(
730    llvm::Function *Fn,
731    const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
732                                 llvm::Constant *>> &DtorsAndObjects) {
733  {
734    auto NL = ApplyDebugLocation::CreateEmpty(*this);
735    StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
736                  getTypes().arrangeNullaryFunction(), FunctionArgList());
737    // Emit an artificial location for this function.
738    auto AL = ApplyDebugLocation::CreateArtificial(*this);
739
740    // Emit the dtors, in reverse order from construction.
741    for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
742      llvm::FunctionType *CalleeTy;
743      llvm::Value *Callee;
744      llvm::Constant *Arg;
745      std::tie(CalleeTy, Callee, Arg) = DtorsAndObjects[e - i - 1];
746      llvm::CallInst *CI = Builder.CreateCall(CalleeTy, Callee, Arg);
747      // Make sure the call and the callee agree on calling convention.
748      if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
749        CI->setCallingConv(F->getCallingConv());
750    }
751  }
752
753  FinishFunction();
754}
755
756/// generateDestroyHelper - Generates a helper function which, when
757/// invoked, destroys the given object.  The address of the object
758/// should be in global memory.
759llvm::Function *CodeGenFunction::generateDestroyHelper(
760    Address addr, QualType type, Destroyer *destroyer,
761    bool useEHCleanupForArray, const VarDecl *VD) {
762  FunctionArgList args;
763  ImplicitParamDecl Dst(getContext(), getContext().VoidPtrTy,
764                        ImplicitParamDecl::Other);
765  args.push_back(&Dst);
766
767  const CGFunctionInfo &FI =
768    CGM.getTypes().arrangeBuiltinFunctionDeclaration(getContext().VoidTy, args);
769  llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
770  llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
771      FTy, "__cxx_global_array_dtor", FI, VD->getLocation());
772
773  CurEHLocation = VD->getBeginLoc();
774
775  StartFunction(VD, getContext().VoidTy, fn, FI, args);
776
777  emitDestroy(addr, type, destroyer, useEHCleanupForArray);
778
779  FinishFunction();
780
781  return fn;
782}
783