1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the code that handles AST -> LLVM type lowering.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CodeGenTypes.h"
14#include "CGCXXABI.h"
15#include "CGCall.h"
16#include "CGOpenCLRuntime.h"
17#include "CGRecordLayout.h"
18#include "TargetInfo.h"
19#include "clang/AST/ASTContext.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclObjC.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/RecordLayout.h"
24#include "clang/CodeGen/CGFunctionInfo.h"
25#include "llvm/IR/DataLayout.h"
26#include "llvm/IR/DerivedTypes.h"
27#include "llvm/IR/Module.h"
28
29using namespace clang;
30using namespace CodeGen;
31
32CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
33  : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
34    Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
35    TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) {
36  SkippedLayout = false;
37  LongDoubleReferenced = false;
38}
39
40CodeGenTypes::~CodeGenTypes() {
41  for (llvm::FoldingSet<CGFunctionInfo>::iterator
42       I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
43    delete &*I++;
44}
45
46const CodeGenOptions &CodeGenTypes::getCodeGenOpts() const {
47  return CGM.getCodeGenOpts();
48}
49
50void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
51                                     llvm::StructType *Ty,
52                                     StringRef suffix) {
53  SmallString<256> TypeName;
54  llvm::raw_svector_ostream OS(TypeName);
55  OS << RD->getKindName() << '.';
56
57  // FIXME: We probably want to make more tweaks to the printing policy. For
58  // example, we should probably enable PrintCanonicalTypes and
59  // FullyQualifiedNames.
60  PrintingPolicy Policy = RD->getASTContext().getPrintingPolicy();
61  Policy.SuppressInlineNamespace = false;
62
63  // Name the codegen type after the typedef name
64  // if there is no tag type name available
65  if (RD->getIdentifier()) {
66    // FIXME: We should not have to check for a null decl context here.
67    // Right now we do it because the implicit Obj-C decls don't have one.
68    if (RD->getDeclContext())
69      RD->printQualifiedName(OS, Policy);
70    else
71      RD->printName(OS, Policy);
72  } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
73    // FIXME: We should not have to check for a null decl context here.
74    // Right now we do it because the implicit Obj-C decls don't have one.
75    if (TDD->getDeclContext())
76      TDD->printQualifiedName(OS, Policy);
77    else
78      TDD->printName(OS);
79  } else
80    OS << "anon";
81
82  if (!suffix.empty())
83    OS << suffix;
84
85  Ty->setName(OS.str());
86}
87
88/// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
89/// ConvertType in that it is used to convert to the memory representation for
90/// a type.  For example, the scalar representation for _Bool is i1, but the
91/// memory representation is usually i8 or i32, depending on the target.
92llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
93  if (T->isConstantMatrixType()) {
94    const Type *Ty = Context.getCanonicalType(T).getTypePtr();
95    const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
96    return llvm::ArrayType::get(ConvertType(MT->getElementType()),
97                                MT->getNumRows() * MT->getNumColumns());
98  }
99
100  llvm::Type *R = ConvertType(T);
101
102  // Check for the boolean vector case.
103  if (T->isExtVectorBoolType()) {
104    auto *FixedVT = cast<llvm::FixedVectorType>(R);
105    // Pad to at least one byte.
106    uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
107    return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
108  }
109
110  // If this is a bool type, or a bit-precise integer type in a bitfield
111  // representation, map this integer to the target-specified size.
112  if ((ForBitField && T->isBitIntType()) ||
113      (!T->isBitIntType() && R->isIntegerTy(1)))
114    return llvm::IntegerType::get(getLLVMContext(),
115                                  (unsigned)Context.getTypeSize(T));
116
117  // Else, don't map it.
118  return R;
119}
120
121/// isRecordLayoutComplete - Return true if the specified type is already
122/// completely laid out.
123bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
124  llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
125  RecordDeclTypes.find(Ty);
126  return I != RecordDeclTypes.end() && !I->second->isOpaque();
127}
128
129/// isFuncParamTypeConvertible - Return true if the specified type in a
130/// function parameter or result position can be converted to an IR type at this
131/// point. This boils down to being whether it is complete.
132bool CodeGenTypes::isFuncParamTypeConvertible(QualType Ty) {
133  // Some ABIs cannot have their member pointers represented in IR unless
134  // certain circumstances have been reached.
135  if (const auto *MPT = Ty->getAs<MemberPointerType>())
136    return getCXXABI().isMemberPointerConvertible(MPT);
137
138  // If this isn't a tagged type, we can convert it!
139  const TagType *TT = Ty->getAs<TagType>();
140  if (!TT) return true;
141
142  // Incomplete types cannot be converted.
143  return !TT->isIncompleteType();
144}
145
146
147/// Code to verify a given function type is complete, i.e. the return type
148/// and all of the parameter types are complete.  Also check to see if we are in
149/// a RS_StructPointer context, and if so whether any struct types have been
150/// pended.  If so, we don't want to ask the ABI lowering code to handle a type
151/// that cannot be converted to an IR type.
152bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
153  if (!isFuncParamTypeConvertible(FT->getReturnType()))
154    return false;
155
156  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
157    for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
158      if (!isFuncParamTypeConvertible(FPT->getParamType(i)))
159        return false;
160
161  return true;
162}
163
164/// UpdateCompletedType - When we find the full definition for a TagDecl,
165/// replace the 'opaque' type we previously made for it if applicable.
166void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
167  // If this is an enum being completed, then we flush all non-struct types from
168  // the cache.  This allows function types and other things that may be derived
169  // from the enum to be recomputed.
170  if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
171    // Only flush the cache if we've actually already converted this type.
172    if (TypeCache.count(ED->getTypeForDecl())) {
173      // Okay, we formed some types based on this.  We speculated that the enum
174      // would be lowered to i32, so we only need to flush the cache if this
175      // didn't happen.
176      if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
177        TypeCache.clear();
178    }
179    // If necessary, provide the full definition of a type only used with a
180    // declaration so far.
181    if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
182      DI->completeType(ED);
183    return;
184  }
185
186  // If we completed a RecordDecl that we previously used and converted to an
187  // anonymous type, then go ahead and complete it now.
188  const RecordDecl *RD = cast<RecordDecl>(TD);
189  if (RD->isDependentType()) return;
190
191  // Only complete it if we converted it already.  If we haven't converted it
192  // yet, we'll just do it lazily.
193  if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
194    ConvertRecordDeclType(RD);
195
196  // If necessary, provide the full definition of a type only used with a
197  // declaration so far.
198  if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
199    DI->completeType(RD);
200}
201
202void CodeGenTypes::RefreshTypeCacheForClass(const CXXRecordDecl *RD) {
203  QualType T = Context.getRecordType(RD);
204  T = Context.getCanonicalType(T);
205
206  const Type *Ty = T.getTypePtr();
207  if (RecordsWithOpaqueMemberPointers.count(Ty)) {
208    TypeCache.clear();
209    RecordsWithOpaqueMemberPointers.clear();
210  }
211}
212
213static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
214                                    const llvm::fltSemantics &format,
215                                    bool UseNativeHalf = false) {
216  if (&format == &llvm::APFloat::IEEEhalf()) {
217    if (UseNativeHalf)
218      return llvm::Type::getHalfTy(VMContext);
219    else
220      return llvm::Type::getInt16Ty(VMContext);
221  }
222  if (&format == &llvm::APFloat::BFloat())
223    return llvm::Type::getBFloatTy(VMContext);
224  if (&format == &llvm::APFloat::IEEEsingle())
225    return llvm::Type::getFloatTy(VMContext);
226  if (&format == &llvm::APFloat::IEEEdouble())
227    return llvm::Type::getDoubleTy(VMContext);
228  if (&format == &llvm::APFloat::IEEEquad())
229    return llvm::Type::getFP128Ty(VMContext);
230  if (&format == &llvm::APFloat::PPCDoubleDouble())
231    return llvm::Type::getPPC_FP128Ty(VMContext);
232  if (&format == &llvm::APFloat::x87DoubleExtended())
233    return llvm::Type::getX86_FP80Ty(VMContext);
234  llvm_unreachable("Unknown float format!");
235}
236
237llvm::Type *CodeGenTypes::ConvertFunctionTypeInternal(QualType QFT) {
238  assert(QFT.isCanonical());
239  const FunctionType *FT = cast<FunctionType>(QFT.getTypePtr());
240  // First, check whether we can build the full function type.  If the
241  // function type depends on an incomplete type (e.g. a struct or enum), we
242  // cannot lower the function type.
243  if (!isFuncTypeConvertible(FT)) {
244    // This function's type depends on an incomplete tag type.
245
246    // Force conversion of all the relevant record types, to make sure
247    // we re-convert the FunctionType when appropriate.
248    if (const RecordType *RT = FT->getReturnType()->getAs<RecordType>())
249      ConvertRecordDeclType(RT->getDecl());
250    if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
251      for (unsigned i = 0, e = FPT->getNumParams(); i != e; i++)
252        if (const RecordType *RT = FPT->getParamType(i)->getAs<RecordType>())
253          ConvertRecordDeclType(RT->getDecl());
254
255    SkippedLayout = true;
256
257    // Return a placeholder type.
258    return llvm::StructType::get(getLLVMContext());
259  }
260
261  // The function type can be built; call the appropriate routines to
262  // build it.
263  const CGFunctionInfo *FI;
264  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
265    FI = &arrangeFreeFunctionType(
266        CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
267  } else {
268    const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
269    FI = &arrangeFreeFunctionType(
270        CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
271  }
272
273  llvm::Type *ResultType = nullptr;
274  // If there is something higher level prodding our CGFunctionInfo, then
275  // don't recurse into it again.
276  if (FunctionsBeingProcessed.count(FI)) {
277
278    ResultType = llvm::StructType::get(getLLVMContext());
279    SkippedLayout = true;
280  } else {
281
282    // Otherwise, we're good to go, go ahead and convert it.
283    ResultType = GetFunctionType(*FI);
284  }
285
286  return ResultType;
287}
288
289/// ConvertType - Convert the specified type to its LLVM form.
290llvm::Type *CodeGenTypes::ConvertType(QualType T) {
291  T = Context.getCanonicalType(T);
292
293  const Type *Ty = T.getTypePtr();
294
295  // For the device-side compilation, CUDA device builtin surface/texture types
296  // may be represented in different types.
297  if (Context.getLangOpts().CUDAIsDevice) {
298    if (T->isCUDADeviceBuiltinSurfaceType()) {
299      if (auto *Ty = CGM.getTargetCodeGenInfo()
300                         .getCUDADeviceBuiltinSurfaceDeviceType())
301        return Ty;
302    } else if (T->isCUDADeviceBuiltinTextureType()) {
303      if (auto *Ty = CGM.getTargetCodeGenInfo()
304                         .getCUDADeviceBuiltinTextureDeviceType())
305        return Ty;
306    }
307  }
308
309  // RecordTypes are cached and processed specially.
310  if (const RecordType *RT = dyn_cast<RecordType>(Ty))
311    return ConvertRecordDeclType(RT->getDecl());
312
313  llvm::Type *CachedType = nullptr;
314  auto TCI = TypeCache.find(Ty);
315  if (TCI != TypeCache.end())
316    CachedType = TCI->second;
317    // With expensive checks, check that the type we compute matches the
318    // cached type.
319#ifndef EXPENSIVE_CHECKS
320  if (CachedType)
321    return CachedType;
322#endif
323
324  // If we don't have it in the cache, convert it now.
325  llvm::Type *ResultType = nullptr;
326  switch (Ty->getTypeClass()) {
327  case Type::Record: // Handled above.
328#define TYPE(Class, Base)
329#define ABSTRACT_TYPE(Class, Base)
330#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
331#define DEPENDENT_TYPE(Class, Base) case Type::Class:
332#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
333#include "clang/AST/TypeNodes.inc"
334    llvm_unreachable("Non-canonical or dependent types aren't possible.");
335
336  case Type::Builtin: {
337    switch (cast<BuiltinType>(Ty)->getKind()) {
338    case BuiltinType::Void:
339    case BuiltinType::ObjCId:
340    case BuiltinType::ObjCClass:
341    case BuiltinType::ObjCSel:
342      // LLVM void type can only be used as the result of a function call.  Just
343      // map to the same as char.
344      ResultType = llvm::Type::getInt8Ty(getLLVMContext());
345      break;
346
347    case BuiltinType::Bool:
348      // Note that we always return bool as i1 for use as a scalar type.
349      ResultType = llvm::Type::getInt1Ty(getLLVMContext());
350      break;
351
352    case BuiltinType::Char_S:
353    case BuiltinType::Char_U:
354    case BuiltinType::SChar:
355    case BuiltinType::UChar:
356    case BuiltinType::Short:
357    case BuiltinType::UShort:
358    case BuiltinType::Int:
359    case BuiltinType::UInt:
360    case BuiltinType::Long:
361    case BuiltinType::ULong:
362    case BuiltinType::LongLong:
363    case BuiltinType::ULongLong:
364    case BuiltinType::WChar_S:
365    case BuiltinType::WChar_U:
366    case BuiltinType::Char8:
367    case BuiltinType::Char16:
368    case BuiltinType::Char32:
369    case BuiltinType::ShortAccum:
370    case BuiltinType::Accum:
371    case BuiltinType::LongAccum:
372    case BuiltinType::UShortAccum:
373    case BuiltinType::UAccum:
374    case BuiltinType::ULongAccum:
375    case BuiltinType::ShortFract:
376    case BuiltinType::Fract:
377    case BuiltinType::LongFract:
378    case BuiltinType::UShortFract:
379    case BuiltinType::UFract:
380    case BuiltinType::ULongFract:
381    case BuiltinType::SatShortAccum:
382    case BuiltinType::SatAccum:
383    case BuiltinType::SatLongAccum:
384    case BuiltinType::SatUShortAccum:
385    case BuiltinType::SatUAccum:
386    case BuiltinType::SatULongAccum:
387    case BuiltinType::SatShortFract:
388    case BuiltinType::SatFract:
389    case BuiltinType::SatLongFract:
390    case BuiltinType::SatUShortFract:
391    case BuiltinType::SatUFract:
392    case BuiltinType::SatULongFract:
393      ResultType = llvm::IntegerType::get(getLLVMContext(),
394                                 static_cast<unsigned>(Context.getTypeSize(T)));
395      break;
396
397    case BuiltinType::Float16:
398      ResultType =
399          getTypeForFormat(getLLVMContext(), Context.getFloatTypeSemantics(T),
400                           /* UseNativeHalf = */ true);
401      break;
402
403    case BuiltinType::Half:
404      // Half FP can either be storage-only (lowered to i16) or native.
405      ResultType = getTypeForFormat(
406          getLLVMContext(), Context.getFloatTypeSemantics(T),
407          Context.getLangOpts().NativeHalfType ||
408              !Context.getTargetInfo().useFP16ConversionIntrinsics());
409      break;
410    case BuiltinType::LongDouble:
411      LongDoubleReferenced = true;
412      LLVM_FALLTHROUGH;
413    case BuiltinType::BFloat16:
414    case BuiltinType::Float:
415    case BuiltinType::Double:
416    case BuiltinType::Float128:
417    case BuiltinType::Ibm128:
418      ResultType = getTypeForFormat(getLLVMContext(),
419                                    Context.getFloatTypeSemantics(T),
420                                    /* UseNativeHalf = */ false);
421      break;
422
423    case BuiltinType::NullPtr:
424      // Model std::nullptr_t as i8*
425      ResultType = llvm::PointerType::getUnqual(getLLVMContext());
426      break;
427
428    case BuiltinType::UInt128:
429    case BuiltinType::Int128:
430      ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
431      break;
432
433#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
434    case BuiltinType::Id:
435#include "clang/Basic/OpenCLImageTypes.def"
436#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
437    case BuiltinType::Id:
438#include "clang/Basic/OpenCLExtensionTypes.def"
439    case BuiltinType::OCLSampler:
440    case BuiltinType::OCLEvent:
441    case BuiltinType::OCLClkEvent:
442    case BuiltinType::OCLQueue:
443    case BuiltinType::OCLReserveID:
444      ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty);
445      break;
446    case BuiltinType::SveInt8:
447    case BuiltinType::SveUint8:
448    case BuiltinType::SveInt8x2:
449    case BuiltinType::SveUint8x2:
450    case BuiltinType::SveInt8x3:
451    case BuiltinType::SveUint8x3:
452    case BuiltinType::SveInt8x4:
453    case BuiltinType::SveUint8x4:
454    case BuiltinType::SveInt16:
455    case BuiltinType::SveUint16:
456    case BuiltinType::SveInt16x2:
457    case BuiltinType::SveUint16x2:
458    case BuiltinType::SveInt16x3:
459    case BuiltinType::SveUint16x3:
460    case BuiltinType::SveInt16x4:
461    case BuiltinType::SveUint16x4:
462    case BuiltinType::SveInt32:
463    case BuiltinType::SveUint32:
464    case BuiltinType::SveInt32x2:
465    case BuiltinType::SveUint32x2:
466    case BuiltinType::SveInt32x3:
467    case BuiltinType::SveUint32x3:
468    case BuiltinType::SveInt32x4:
469    case BuiltinType::SveUint32x4:
470    case BuiltinType::SveInt64:
471    case BuiltinType::SveUint64:
472    case BuiltinType::SveInt64x2:
473    case BuiltinType::SveUint64x2:
474    case BuiltinType::SveInt64x3:
475    case BuiltinType::SveUint64x3:
476    case BuiltinType::SveInt64x4:
477    case BuiltinType::SveUint64x4:
478    case BuiltinType::SveBool:
479    case BuiltinType::SveBoolx2:
480    case BuiltinType::SveBoolx4:
481    case BuiltinType::SveFloat16:
482    case BuiltinType::SveFloat16x2:
483    case BuiltinType::SveFloat16x3:
484    case BuiltinType::SveFloat16x4:
485    case BuiltinType::SveFloat32:
486    case BuiltinType::SveFloat32x2:
487    case BuiltinType::SveFloat32x3:
488    case BuiltinType::SveFloat32x4:
489    case BuiltinType::SveFloat64:
490    case BuiltinType::SveFloat64x2:
491    case BuiltinType::SveFloat64x3:
492    case BuiltinType::SveFloat64x4:
493    case BuiltinType::SveBFloat16:
494    case BuiltinType::SveBFloat16x2:
495    case BuiltinType::SveBFloat16x3:
496    case BuiltinType::SveBFloat16x4: {
497      ASTContext::BuiltinVectorTypeInfo Info =
498          Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
499      return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
500                                           Info.EC.getKnownMinValue() *
501                                               Info.NumVectors);
502    }
503    case BuiltinType::SveCount:
504      return llvm::TargetExtType::get(getLLVMContext(), "aarch64.svcount");
505#define PPC_VECTOR_TYPE(Name, Id, Size) \
506    case BuiltinType::Id: \
507      ResultType = \
508        llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
509      break;
510#include "clang/Basic/PPCTypes.def"
511#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
512#include "clang/Basic/RISCVVTypes.def"
513      {
514        ASTContext::BuiltinVectorTypeInfo Info =
515            Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
516        // Tuple types are expressed as aggregregate types of the same scalable
517        // vector type (e.g. vint32m1x2_t is two vint32m1_t, which is {<vscale x
518        // 2 x i32>, <vscale x 2 x i32>}).
519        if (Info.NumVectors != 1) {
520          llvm::Type *EltTy = llvm::ScalableVectorType::get(
521              ConvertType(Info.ElementType), Info.EC.getKnownMinValue());
522          llvm::SmallVector<llvm::Type *, 4> EltTys(Info.NumVectors, EltTy);
523          return llvm::StructType::get(getLLVMContext(), EltTys);
524        }
525        return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
526                                             Info.EC.getKnownMinValue() *
527                                                 Info.NumVectors);
528      }
529#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS)                  \
530  case BuiltinType::Id: {                                                      \
531    if (BuiltinType::Id == BuiltinType::WasmExternRef)                         \
532      ResultType = CGM.getTargetCodeGenInfo().getWasmExternrefReferenceType(); \
533    else                                                                       \
534      llvm_unreachable("Unexpected wasm reference builtin type!");             \
535  } break;
536#include "clang/Basic/WebAssemblyReferenceTypes.def"
537    case BuiltinType::Dependent:
538#define BUILTIN_TYPE(Id, SingletonId)
539#define PLACEHOLDER_TYPE(Id, SingletonId) \
540    case BuiltinType::Id:
541#include "clang/AST/BuiltinTypes.def"
542      llvm_unreachable("Unexpected placeholder builtin type!");
543    }
544    break;
545  }
546  case Type::Auto:
547  case Type::DeducedTemplateSpecialization:
548    llvm_unreachable("Unexpected undeduced type!");
549  case Type::Complex: {
550    llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
551    ResultType = llvm::StructType::get(EltTy, EltTy);
552    break;
553  }
554  case Type::LValueReference:
555  case Type::RValueReference: {
556    const ReferenceType *RTy = cast<ReferenceType>(Ty);
557    QualType ETy = RTy->getPointeeType();
558    unsigned AS = getTargetAddressSpace(ETy);
559    ResultType = llvm::PointerType::get(getLLVMContext(), AS);
560    break;
561  }
562  case Type::Pointer: {
563    const PointerType *PTy = cast<PointerType>(Ty);
564    QualType ETy = PTy->getPointeeType();
565    unsigned AS = getTargetAddressSpace(ETy);
566    ResultType = llvm::PointerType::get(getLLVMContext(), AS);
567    break;
568  }
569
570  case Type::VariableArray: {
571    const VariableArrayType *A = cast<VariableArrayType>(Ty);
572    assert(A->getIndexTypeCVRQualifiers() == 0 &&
573           "FIXME: We only handle trivial array types so far!");
574    // VLAs resolve to the innermost element type; this matches
575    // the return of alloca, and there isn't any obviously better choice.
576    ResultType = ConvertTypeForMem(A->getElementType());
577    break;
578  }
579  case Type::IncompleteArray: {
580    const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
581    assert(A->getIndexTypeCVRQualifiers() == 0 &&
582           "FIXME: We only handle trivial array types so far!");
583    // int X[] -> [0 x int], unless the element type is not sized.  If it is
584    // unsized (e.g. an incomplete struct) just use [0 x i8].
585    ResultType = ConvertTypeForMem(A->getElementType());
586    if (!ResultType->isSized()) {
587      SkippedLayout = true;
588      ResultType = llvm::Type::getInt8Ty(getLLVMContext());
589    }
590    ResultType = llvm::ArrayType::get(ResultType, 0);
591    break;
592  }
593  case Type::ConstantArray: {
594    const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
595    llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
596
597    // Lower arrays of undefined struct type to arrays of i8 just to have a
598    // concrete type.
599    if (!EltTy->isSized()) {
600      SkippedLayout = true;
601      EltTy = llvm::Type::getInt8Ty(getLLVMContext());
602    }
603
604    ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
605    break;
606  }
607  case Type::ExtVector:
608  case Type::Vector: {
609    const auto *VT = cast<VectorType>(Ty);
610    // An ext_vector_type of Bool is really a vector of bits.
611    llvm::Type *IRElemTy = VT->isExtVectorBoolType()
612                               ? llvm::Type::getInt1Ty(getLLVMContext())
613                               : ConvertType(VT->getElementType());
614    ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
615    break;
616  }
617  case Type::ConstantMatrix: {
618    const ConstantMatrixType *MT = cast<ConstantMatrixType>(Ty);
619    ResultType =
620        llvm::FixedVectorType::get(ConvertType(MT->getElementType()),
621                                   MT->getNumRows() * MT->getNumColumns());
622    break;
623  }
624  case Type::FunctionNoProto:
625  case Type::FunctionProto:
626    ResultType = ConvertFunctionTypeInternal(T);
627    break;
628  case Type::ObjCObject:
629    ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
630    break;
631
632  case Type::ObjCInterface: {
633    // Objective-C interfaces are always opaque (outside of the
634    // runtime, which can do whatever it likes); we never refine
635    // these.
636    llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
637    if (!T)
638      T = llvm::StructType::create(getLLVMContext());
639    ResultType = T;
640    break;
641  }
642
643  case Type::ObjCObjectPointer:
644    ResultType = llvm::PointerType::getUnqual(getLLVMContext());
645    break;
646
647  case Type::Enum: {
648    const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
649    if (ED->isCompleteDefinition() || ED->isFixed())
650      return ConvertType(ED->getIntegerType());
651    // Return a placeholder 'i32' type.  This can be changed later when the
652    // type is defined (see UpdateCompletedType), but is likely to be the
653    // "right" answer.
654    ResultType = llvm::Type::getInt32Ty(getLLVMContext());
655    break;
656  }
657
658  case Type::BlockPointer: {
659    // Block pointers lower to function type. For function type,
660    // getTargetAddressSpace() returns default address space for
661    // function pointer i.e. program address space. Therefore, for block
662    // pointers, it is important to pass the pointee AST address space when
663    // calling getTargetAddressSpace(), to ensure that we get the LLVM IR
664    // address space for data pointers and not function pointers.
665    const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
666    unsigned AS = Context.getTargetAddressSpace(FTy.getAddressSpace());
667    ResultType = llvm::PointerType::get(getLLVMContext(), AS);
668    break;
669  }
670
671  case Type::MemberPointer: {
672    auto *MPTy = cast<MemberPointerType>(Ty);
673    if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
674      auto *C = MPTy->getClass();
675      auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
676      if (Insertion.second)
677        Insertion.first->second = llvm::StructType::create(getLLVMContext());
678      ResultType = Insertion.first->second;
679    } else {
680      ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
681    }
682    break;
683  }
684
685  case Type::Atomic: {
686    QualType valueType = cast<AtomicType>(Ty)->getValueType();
687    ResultType = ConvertTypeForMem(valueType);
688
689    // Pad out to the inflated size if necessary.
690    uint64_t valueSize = Context.getTypeSize(valueType);
691    uint64_t atomicSize = Context.getTypeSize(Ty);
692    if (valueSize != atomicSize) {
693      assert(valueSize < atomicSize);
694      llvm::Type *elts[] = {
695        ResultType,
696        llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8)
697      };
698      ResultType =
699          llvm::StructType::get(getLLVMContext(), llvm::ArrayRef(elts));
700    }
701    break;
702  }
703  case Type::Pipe: {
704    ResultType = CGM.getOpenCLRuntime().getPipeType(cast<PipeType>(Ty));
705    break;
706  }
707  case Type::BitInt: {
708    const auto &EIT = cast<BitIntType>(Ty);
709    ResultType = llvm::Type::getIntNTy(getLLVMContext(), EIT->getNumBits());
710    break;
711  }
712  }
713
714  assert(ResultType && "Didn't convert a type?");
715  assert((!CachedType || CachedType == ResultType) &&
716         "Cached type doesn't match computed type");
717
718  TypeCache[Ty] = ResultType;
719  return ResultType;
720}
721
722bool CodeGenModule::isPaddedAtomicType(QualType type) {
723  return isPaddedAtomicType(type->castAs<AtomicType>());
724}
725
726bool CodeGenModule::isPaddedAtomicType(const AtomicType *type) {
727  return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType());
728}
729
730/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
731llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
732  // TagDecl's are not necessarily unique, instead use the (clang)
733  // type connected to the decl.
734  const Type *Key = Context.getTagDeclType(RD).getTypePtr();
735
736  llvm::StructType *&Entry = RecordDeclTypes[Key];
737
738  // If we don't have a StructType at all yet, create the forward declaration.
739  if (!Entry) {
740    Entry = llvm::StructType::create(getLLVMContext());
741    addRecordTypeName(RD, Entry, "");
742  }
743  llvm::StructType *Ty = Entry;
744
745  // If this is still a forward declaration, or the LLVM type is already
746  // complete, there's nothing more to do.
747  RD = RD->getDefinition();
748  if (!RD || !RD->isCompleteDefinition() || !Ty->isOpaque())
749    return Ty;
750
751  // Force conversion of non-virtual base classes recursively.
752  if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
753    for (const auto &I : CRD->bases()) {
754      if (I.isVirtual()) continue;
755      ConvertRecordDeclType(I.getType()->castAs<RecordType>()->getDecl());
756    }
757  }
758
759  // Layout fields.
760  std::unique_ptr<CGRecordLayout> Layout = ComputeRecordLayout(RD, Ty);
761  CGRecordLayouts[Key] = std::move(Layout);
762
763  // If this struct blocked a FunctionType conversion, then recompute whatever
764  // was derived from that.
765  // FIXME: This is hugely overconservative.
766  if (SkippedLayout)
767    TypeCache.clear();
768
769  return Ty;
770}
771
772/// getCGRecordLayout - Return record layout info for the given record decl.
773const CGRecordLayout &
774CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
775  const Type *Key = Context.getTagDeclType(RD).getTypePtr();
776
777  auto I = CGRecordLayouts.find(Key);
778  if (I != CGRecordLayouts.end())
779    return *I->second;
780  // Compute the type information.
781  ConvertRecordDeclType(RD);
782
783  // Now try again.
784  I = CGRecordLayouts.find(Key);
785
786  assert(I != CGRecordLayouts.end() &&
787         "Unable to find record layout information for type");
788  return *I->second;
789}
790
791bool CodeGenTypes::isPointerZeroInitializable(QualType T) {
792  assert((T->isAnyPointerType() || T->isBlockPointerType()) && "Invalid type");
793  return isZeroInitializable(T);
794}
795
796bool CodeGenTypes::isZeroInitializable(QualType T) {
797  if (T->getAs<PointerType>())
798    return Context.getTargetNullPointerValue(T) == 0;
799
800  if (const auto *AT = Context.getAsArrayType(T)) {
801    if (isa<IncompleteArrayType>(AT))
802      return true;
803    if (const auto *CAT = dyn_cast<ConstantArrayType>(AT))
804      if (Context.getConstantArrayElementCount(CAT) == 0)
805        return true;
806    T = Context.getBaseElementType(T);
807  }
808
809  // Records are non-zero-initializable if they contain any
810  // non-zero-initializable subobjects.
811  if (const RecordType *RT = T->getAs<RecordType>()) {
812    const RecordDecl *RD = RT->getDecl();
813    return isZeroInitializable(RD);
814  }
815
816  // We have to ask the ABI about member pointers.
817  if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
818    return getCXXABI().isZeroInitializable(MPT);
819
820  // Everything else is okay.
821  return true;
822}
823
824bool CodeGenTypes::isZeroInitializable(const RecordDecl *RD) {
825  return getCGRecordLayout(RD).isZeroInitializable();
826}
827
828unsigned CodeGenTypes::getTargetAddressSpace(QualType T) const {
829  // Return the address space for the type. If the type is a
830  // function type without an address space qualifier, the
831  // program address space is used. Otherwise, the target picks
832  // the best address space based on the type information
833  return T->isFunctionType() && !T.hasAddressSpace()
834             ? getDataLayout().getProgramAddressSpace()
835             : getContext().getTargetAddressSpace(T.getAddressSpace());
836}
837