1193326Sed//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// 2193326Sed// 3193326Sed// The LLVM Compiler Infrastructure 4193326Sed// 5193326Sed// This file is distributed under the University of Illinois Open Source 6193326Sed// License. See LICENSE.TXT for details. 7193326Sed// 8193326Sed//===----------------------------------------------------------------------===// 9193326Sed// 10198092Srdivacky// This is the code that handles AST -> LLVM type lowering. 11193326Sed// 12193326Sed//===----------------------------------------------------------------------===// 13193326Sed 14193326Sed#include "CodeGenTypes.h" 15249423Sdim#include "CGCXXABI.h" 16206084Srdivacky#include "CGCall.h" 17249423Sdim#include "CGOpenCLRuntime.h" 18206084Srdivacky#include "CGRecordLayout.h" 19234353Sdim#include "TargetInfo.h" 20193326Sed#include "clang/AST/ASTContext.h" 21249423Sdim#include "clang/AST/DeclCXX.h" 22193326Sed#include "clang/AST/DeclObjC.h" 23193326Sed#include "clang/AST/Expr.h" 24193326Sed#include "clang/AST/RecordLayout.h" 25263508Sdim#include "clang/CodeGen/CGFunctionInfo.h" 26249423Sdim#include "llvm/IR/DataLayout.h" 27249423Sdim#include "llvm/IR/DerivedTypes.h" 28249423Sdim#include "llvm/IR/Module.h" 29193326Sedusing namespace clang; 30193326Sedusing namespace CodeGen; 31193326Sed 32251662SdimCodeGenTypes::CodeGenTypes(CodeGenModule &cgm) 33251662Sdim : CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()), 34251662Sdim TheDataLayout(cgm.getDataLayout()), 35251662Sdim Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()), 36251662Sdim TheABIInfo(cgm.getTargetCodeGenInfo().getABIInfo()) { 37224145Sdim SkippedLayout = false; 38193326Sed} 39193326Sed 40193326SedCodeGenTypes::~CodeGenTypes() { 41198092Srdivacky for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator 42198092Srdivacky I = CGRecordLayouts.begin(), E = CGRecordLayouts.end(); 43193326Sed I != E; ++I) 44193326Sed delete I->second; 45202379Srdivacky 46202379Srdivacky for (llvm::FoldingSet<CGFunctionInfo>::iterator 47202379Srdivacky I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; ) 48202379Srdivacky delete &*I++; 49193326Sed} 50193326Sed 51224145Sdimvoid CodeGenTypes::addRecordTypeName(const RecordDecl *RD, 52224145Sdim llvm::StructType *Ty, 53226633Sdim StringRef suffix) { 54234353Sdim SmallString<256> TypeName; 55221345Sdim llvm::raw_svector_ostream OS(TypeName); 56221345Sdim OS << RD->getKindName() << '.'; 57221345Sdim 58221345Sdim // Name the codegen type after the typedef name 59221345Sdim // if there is no tag type name available 60221345Sdim if (RD->getIdentifier()) { 61221345Sdim // FIXME: We should not have to check for a null decl context here. 62221345Sdim // Right now we do it because the implicit Obj-C decls don't have one. 63221345Sdim if (RD->getDeclContext()) 64249423Sdim RD->printQualifiedName(OS); 65221345Sdim else 66221345Sdim RD->printName(OS); 67221345Sdim } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) { 68221345Sdim // FIXME: We should not have to check for a null decl context here. 69221345Sdim // Right now we do it because the implicit Obj-C decls don't have one. 70221345Sdim if (TDD->getDeclContext()) 71249423Sdim TDD->printQualifiedName(OS); 72221345Sdim else 73221345Sdim TDD->printName(OS); 74221345Sdim } else 75221345Sdim OS << "anon"; 76210299Sed 77221345Sdim if (!suffix.empty()) 78221345Sdim OS << suffix; 79221345Sdim 80224145Sdim Ty->setName(OS.str()); 81221345Sdim} 82221345Sdim 83193326Sed/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from 84193326Sed/// ConvertType in that it is used to convert to the memory representation for 85193326Sed/// a type. For example, the scalar representation for _Bool is i1, but the 86193326Sed/// memory representation is usually i8 or i32, depending on the target. 87224145Sdimllvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){ 88224145Sdim llvm::Type *R = ConvertType(T); 89198092Srdivacky 90193326Sed // If this is a non-bool type, don't map it. 91203955Srdivacky if (!R->isIntegerTy(1)) 92193326Sed return R; 93198092Srdivacky 94193326Sed // Otherwise, return an integer of the target-specified size. 95198092Srdivacky return llvm::IntegerType::get(getLLVMContext(), 96198092Srdivacky (unsigned)Context.getTypeSize(T)); 97224145Sdim} 98198092Srdivacky 99224145Sdim 100224145Sdim/// isRecordLayoutComplete - Return true if the specified type is already 101224145Sdim/// completely laid out. 102224145Sdimbool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const { 103224145Sdim llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I = 104224145Sdim RecordDeclTypes.find(Ty); 105224145Sdim return I != RecordDeclTypes.end() && !I->second->isOpaque(); 106193326Sed} 107193326Sed 108224145Sdimstatic bool 109224145SdimisSafeToConvert(QualType T, CodeGenTypes &CGT, 110224145Sdim llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked); 111224145Sdim 112224145Sdim 113224145Sdim/// isSafeToConvert - Return true if it is safe to convert the specified record 114224145Sdim/// decl to IR and lay it out, false if doing so would cause us to get into a 115224145Sdim/// recursive compilation mess. 116224145Sdimstatic bool 117224145SdimisSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT, 118224145Sdim llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 119224145Sdim // If we have already checked this type (maybe the same type is used by-value 120224145Sdim // multiple times in multiple structure fields, don't check again. 121224145Sdim if (!AlreadyChecked.insert(RD)) return true; 122224145Sdim 123224145Sdim const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr(); 124224145Sdim 125224145Sdim // If this type is already laid out, converting it is a noop. 126224145Sdim if (CGT.isRecordLayoutComplete(Key)) return true; 127224145Sdim 128224145Sdim // If this type is currently being laid out, we can't recursively compile it. 129224145Sdim if (CGT.isRecordBeingLaidOut(Key)) 130224145Sdim return false; 131224145Sdim 132224145Sdim // If this type would require laying out bases that are currently being laid 133224145Sdim // out, don't do it. This includes virtual base classes which get laid out 134224145Sdim // when a class is translated, even though they aren't embedded by-value into 135224145Sdim // the class. 136224145Sdim if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 137224145Sdim for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(), 138224145Sdim E = CRD->bases_end(); I != E; ++I) 139224145Sdim if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(), 140224145Sdim CGT, AlreadyChecked)) 141224145Sdim return false; 142224145Sdim } 143224145Sdim 144224145Sdim // If this type would require laying out members that are currently being laid 145224145Sdim // out, don't do it. 146224145Sdim for (RecordDecl::field_iterator I = RD->field_begin(), 147224145Sdim E = RD->field_end(); I != E; ++I) 148224145Sdim if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked)) 149224145Sdim return false; 150224145Sdim 151224145Sdim // If there are no problems, lets do it. 152224145Sdim return true; 153193326Sed} 154193326Sed 155224145Sdim/// isSafeToConvert - Return true if it is safe to convert this field type, 156224145Sdim/// which requires the structure elements contained by-value to all be 157224145Sdim/// recursively safe to convert. 158224145Sdimstatic bool 159224145SdimisSafeToConvert(QualType T, CodeGenTypes &CGT, 160224145Sdim llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) { 161224145Sdim T = T.getCanonicalType(); 162224145Sdim 163224145Sdim // If this is a record, check it. 164224145Sdim if (const RecordType *RT = dyn_cast<RecordType>(T)) 165224145Sdim return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked); 166224145Sdim 167224145Sdim // If this is an array, check the elements, which are embedded inline. 168224145Sdim if (const ArrayType *AT = dyn_cast<ArrayType>(T)) 169224145Sdim return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked); 170198092Srdivacky 171224145Sdim // Otherwise, there is no concern about transforming this. We only care about 172224145Sdim // things that are contained by-value in a structure that can have another 173224145Sdim // structure as a member. 174224145Sdim return true; 175224145Sdim} 176198092Srdivacky 177193326Sed 178224145Sdim/// isSafeToConvert - Return true if it is safe to convert the specified record 179224145Sdim/// decl to IR and lay it out, false if doing so would cause us to get into a 180224145Sdim/// recursive compilation mess. 181224145Sdimstatic bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) { 182224145Sdim // If no structs are being laid out, we can certainly do this one. 183224145Sdim if (CGT.noRecordsBeingLaidOut()) return true; 184224145Sdim 185224145Sdim llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked; 186224145Sdim return isSafeToConvert(RD, CGT, AlreadyChecked); 187224145Sdim} 188193326Sed 189193326Sed 190224145Sdim/// isFuncTypeArgumentConvertible - Return true if the specified type in a 191224145Sdim/// function argument or result position can be converted to an IR type at this 192224145Sdim/// point. This boils down to being whether it is complete, as well as whether 193224145Sdim/// we've temporarily deferred expanding the type because we're in a recursive 194224145Sdim/// context. 195224145Sdimbool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) { 196224145Sdim // If this isn't a tagged type, we can convert it! 197224145Sdim const TagType *TT = Ty->getAs<TagType>(); 198224145Sdim if (TT == 0) return true; 199234353Sdim 200234353Sdim // Incomplete types cannot be converted. 201234353Sdim if (TT->isIncompleteType()) 202224145Sdim return false; 203224145Sdim 204224145Sdim // If this is an enum, then it is always safe to convert. 205224145Sdim const RecordType *RT = dyn_cast<RecordType>(TT); 206224145Sdim if (RT == 0) return true; 207224145Sdim 208224145Sdim // Otherwise, we have to be careful. If it is a struct that we're in the 209224145Sdim // process of expanding, then we can't convert the function type. That's ok 210224145Sdim // though because we must be in a pointer context under the struct, so we can 211224145Sdim // just convert it to a dummy type. 212224145Sdim // 213224145Sdim // We decide this by checking whether ConvertRecordDeclType returns us an 214224145Sdim // opaque type for a struct that we know is defined. 215224145Sdim return isSafeToConvert(RT->getDecl(), *this); 216224145Sdim} 217224145Sdim 218224145Sdim 219224145Sdim/// Code to verify a given function type is complete, i.e. the return type 220224145Sdim/// and all of the argument types are complete. Also check to see if we are in 221224145Sdim/// a RS_StructPointer context, and if so whether any struct types have been 222224145Sdim/// pended. If so, we don't want to ask the ABI lowering code to handle a type 223224145Sdim/// that cannot be converted to an IR type. 224224145Sdimbool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) { 225224145Sdim if (!isFuncTypeArgumentConvertible(FT->getResultType())) 226224145Sdim return false; 227224145Sdim 228224145Sdim if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 229224145Sdim for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++) 230224145Sdim if (!isFuncTypeArgumentConvertible(FPT->getArgType(i))) 231224145Sdim return false; 232224145Sdim 233224145Sdim return true; 234224145Sdim} 235224145Sdim 236224145Sdim/// UpdateCompletedType - When we find the full definition for a TagDecl, 237224145Sdim/// replace the 'opaque' type we previously made for it if applicable. 238224145Sdimvoid CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { 239224145Sdim // If this is an enum being completed, then we flush all non-struct types from 240224145Sdim // the cache. This allows function types and other things that may be derived 241224145Sdim // from the enum to be recomputed. 242224145Sdim if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) { 243224145Sdim // Only flush the cache if we've actually already converted this type. 244224145Sdim if (TypeCache.count(ED->getTypeForDecl())) { 245224145Sdim // Okay, we formed some types based on this. We speculated that the enum 246224145Sdim // would be lowered to i32, so we only need to flush the cache if this 247224145Sdim // didn't happen. 248224145Sdim if (!ConvertType(ED->getIntegerType())->isIntegerTy(32)) 249224145Sdim TypeCache.clear(); 250193326Sed } 251224145Sdim return; 252193326Sed } 253224145Sdim 254224145Sdim // If we completed a RecordDecl that we previously used and converted to an 255224145Sdim // anonymous type, then go ahead and complete it now. 256224145Sdim const RecordDecl *RD = cast<RecordDecl>(TD); 257224145Sdim if (RD->isDependentType()) return; 258224145Sdim 259224145Sdim // Only complete it if we converted it already. If we haven't converted it 260224145Sdim // yet, we'll just do it lazily. 261224145Sdim if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr())) 262224145Sdim ConvertRecordDeclType(RD); 263263508Sdim 264263508Sdim // If necessary, provide the full definition of a type only used with a 265263508Sdim // declaration so far. 266263508Sdim if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) 267263508Sdim DI->completeType(RD); 268193326Sed} 269193326Sed 270224145Sdimstatic llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext, 271249423Sdim const llvm::fltSemantics &format, 272249423Sdim bool UseNativeHalf = false) { 273249423Sdim if (&format == &llvm::APFloat::IEEEhalf) { 274249423Sdim if (UseNativeHalf) 275249423Sdim return llvm::Type::getHalfTy(VMContext); 276249423Sdim else 277249423Sdim return llvm::Type::getInt16Ty(VMContext); 278249423Sdim } 279193326Sed if (&format == &llvm::APFloat::IEEEsingle) 280198092Srdivacky return llvm::Type::getFloatTy(VMContext); 281193326Sed if (&format == &llvm::APFloat::IEEEdouble) 282198092Srdivacky return llvm::Type::getDoubleTy(VMContext); 283193326Sed if (&format == &llvm::APFloat::IEEEquad) 284198092Srdivacky return llvm::Type::getFP128Ty(VMContext); 285193326Sed if (&format == &llvm::APFloat::PPCDoubleDouble) 286198092Srdivacky return llvm::Type::getPPC_FP128Ty(VMContext); 287193326Sed if (&format == &llvm::APFloat::x87DoubleExtended) 288198092Srdivacky return llvm::Type::getX86_FP80Ty(VMContext); 289226633Sdim llvm_unreachable("Unknown float format!"); 290193326Sed} 291193326Sed 292224145Sdim/// ConvertType - Convert the specified type to its LLVM form. 293224145Sdimllvm::Type *CodeGenTypes::ConvertType(QualType T) { 294224145Sdim T = Context.getCanonicalType(T); 295198092Srdivacky 296224145Sdim const Type *Ty = T.getTypePtr(); 297224145Sdim 298224145Sdim // RecordTypes are cached and processed specially. 299224145Sdim if (const RecordType *RT = dyn_cast<RecordType>(Ty)) 300224145Sdim return ConvertRecordDeclType(RT->getDecl()); 301224145Sdim 302224145Sdim // See if type is already cached. 303224145Sdim llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty); 304224145Sdim // If type is found in map then use it. Otherwise, convert type T. 305224145Sdim if (TCI != TypeCache.end()) 306224145Sdim return TCI->second; 307224145Sdim 308224145Sdim // If we don't have it in the cache, convert it now. 309224145Sdim llvm::Type *ResultType = 0; 310224145Sdim switch (Ty->getTypeClass()) { 311224145Sdim case Type::Record: // Handled above. 312193326Sed#define TYPE(Class, Base) 313193326Sed#define ABSTRACT_TYPE(Class, Base) 314193326Sed#define NON_CANONICAL_TYPE(Class, Base) case Type::Class: 315193326Sed#define DEPENDENT_TYPE(Class, Base) case Type::Class: 316204643Srdivacky#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class: 317193326Sed#include "clang/AST/TypeNodes.def" 318221345Sdim llvm_unreachable("Non-canonical or dependent types aren't possible."); 319193326Sed 320193326Sed case Type::Builtin: { 321224145Sdim switch (cast<BuiltinType>(Ty)->getKind()) { 322193326Sed case BuiltinType::Void: 323198092Srdivacky case BuiltinType::ObjCId: 324198092Srdivacky case BuiltinType::ObjCClass: 325199990Srdivacky case BuiltinType::ObjCSel: 326193326Sed // LLVM void type can only be used as the result of a function call. Just 327193326Sed // map to the same as char. 328224145Sdim ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 329224145Sdim break; 330193326Sed 331193326Sed case BuiltinType::Bool: 332193326Sed // Note that we always return bool as i1 for use as a scalar type. 333224145Sdim ResultType = llvm::Type::getInt1Ty(getLLVMContext()); 334224145Sdim break; 335198092Srdivacky 336193326Sed case BuiltinType::Char_S: 337193326Sed case BuiltinType::Char_U: 338193326Sed case BuiltinType::SChar: 339193326Sed case BuiltinType::UChar: 340193326Sed case BuiltinType::Short: 341193326Sed case BuiltinType::UShort: 342193326Sed case BuiltinType::Int: 343193326Sed case BuiltinType::UInt: 344193326Sed case BuiltinType::Long: 345193326Sed case BuiltinType::ULong: 346193326Sed case BuiltinType::LongLong: 347193326Sed case BuiltinType::ULongLong: 348218893Sdim case BuiltinType::WChar_S: 349218893Sdim case BuiltinType::WChar_U: 350198092Srdivacky case BuiltinType::Char16: 351198092Srdivacky case BuiltinType::Char32: 352224145Sdim ResultType = llvm::IntegerType::get(getLLVMContext(), 353224145Sdim static_cast<unsigned>(Context.getTypeSize(T))); 354224145Sdim break; 355198092Srdivacky 356226633Sdim case BuiltinType::Half: 357249423Sdim // Half FP can either be storage-only (lowered to i16) or native. 358249423Sdim ResultType = getTypeForFormat(getLLVMContext(), 359249423Sdim Context.getFloatTypeSemantics(T), 360249423Sdim Context.getLangOpts().NativeHalfType); 361226633Sdim break; 362193326Sed case BuiltinType::Float: 363193326Sed case BuiltinType::Double: 364193326Sed case BuiltinType::LongDouble: 365224145Sdim ResultType = getTypeForFormat(getLLVMContext(), 366249423Sdim Context.getFloatTypeSemantics(T), 367249423Sdim /* UseNativeHalf = */ false); 368224145Sdim break; 369198092Srdivacky 370224145Sdim case BuiltinType::NullPtr: 371198092Srdivacky // Model std::nullptr_t as i8* 372224145Sdim ResultType = llvm::Type::getInt8PtrTy(getLLVMContext()); 373224145Sdim break; 374198092Srdivacky 375193326Sed case BuiltinType::UInt128: 376193326Sed case BuiltinType::Int128: 377224145Sdim ResultType = llvm::IntegerType::get(getLLVMContext(), 128); 378224145Sdim break; 379249423Sdim 380249423Sdim case BuiltinType::OCLImage1d: 381249423Sdim case BuiltinType::OCLImage1dArray: 382249423Sdim case BuiltinType::OCLImage1dBuffer: 383249423Sdim case BuiltinType::OCLImage2d: 384249423Sdim case BuiltinType::OCLImage2dArray: 385249423Sdim case BuiltinType::OCLImage3d: 386249423Sdim case BuiltinType::OCLSampler: 387249423Sdim case BuiltinType::OCLEvent: 388249423Sdim ResultType = CGM.getOpenCLRuntime().convertOpenCLSpecificType(Ty); 389249423Sdim break; 390201361Srdivacky 391201361Srdivacky case BuiltinType::Dependent: 392234353Sdim#define BUILTIN_TYPE(Id, SingletonId) 393234353Sdim#define PLACEHOLDER_TYPE(Id, SingletonId) \ 394234353Sdim case BuiltinType::Id: 395234353Sdim#include "clang/AST/BuiltinTypes.def" 396221345Sdim llvm_unreachable("Unexpected placeholder builtin type!"); 397193326Sed } 398193326Sed break; 399193326Sed } 400251662Sdim case Type::Auto: 401251662Sdim llvm_unreachable("Unexpected undeduced auto type!"); 402193326Sed case Type::Complex: { 403224145Sdim llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType()); 404224145Sdim ResultType = llvm::StructType::get(EltTy, EltTy, NULL); 405224145Sdim break; 406193326Sed } 407193326Sed case Type::LValueReference: 408193326Sed case Type::RValueReference: { 409224145Sdim const ReferenceType *RTy = cast<ReferenceType>(Ty); 410224145Sdim QualType ETy = RTy->getPointeeType(); 411224145Sdim llvm::Type *PointeeType = ConvertTypeForMem(ETy); 412221345Sdim unsigned AS = Context.getTargetAddressSpace(ETy); 413224145Sdim ResultType = llvm::PointerType::get(PointeeType, AS); 414224145Sdim break; 415193326Sed } 416193326Sed case Type::Pointer: { 417224145Sdim const PointerType *PTy = cast<PointerType>(Ty); 418224145Sdim QualType ETy = PTy->getPointeeType(); 419224145Sdim llvm::Type *PointeeType = ConvertTypeForMem(ETy); 420224145Sdim if (PointeeType->isVoidTy()) 421224145Sdim PointeeType = llvm::Type::getInt8Ty(getLLVMContext()); 422221345Sdim unsigned AS = Context.getTargetAddressSpace(ETy); 423224145Sdim ResultType = llvm::PointerType::get(PointeeType, AS); 424224145Sdim break; 425193326Sed } 426198092Srdivacky 427193326Sed case Type::VariableArray: { 428224145Sdim const VariableArrayType *A = cast<VariableArrayType>(Ty); 429224145Sdim assert(A->getIndexTypeCVRQualifiers() == 0 && 430193326Sed "FIXME: We only handle trivial array types so far!"); 431193326Sed // VLAs resolve to the innermost element type; this matches 432193326Sed // the return of alloca, and there isn't any obviously better choice. 433224145Sdim ResultType = ConvertTypeForMem(A->getElementType()); 434224145Sdim break; 435193326Sed } 436193326Sed case Type::IncompleteArray: { 437224145Sdim const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty); 438224145Sdim assert(A->getIndexTypeCVRQualifiers() == 0 && 439193326Sed "FIXME: We only handle trivial array types so far!"); 440224145Sdim // int X[] -> [0 x int], unless the element type is not sized. If it is 441224145Sdim // unsized (e.g. an incomplete struct) just use [0 x i8]. 442224145Sdim ResultType = ConvertTypeForMem(A->getElementType()); 443224145Sdim if (!ResultType->isSized()) { 444224145Sdim SkippedLayout = true; 445224145Sdim ResultType = llvm::Type::getInt8Ty(getLLVMContext()); 446224145Sdim } 447224145Sdim ResultType = llvm::ArrayType::get(ResultType, 0); 448224145Sdim break; 449193326Sed } 450193326Sed case Type::ConstantArray: { 451224145Sdim const ConstantArrayType *A = cast<ConstantArrayType>(Ty); 452226633Sdim llvm::Type *EltTy = ConvertTypeForMem(A->getElementType()); 453226633Sdim 454226633Sdim // Lower arrays of undefined struct type to arrays of i8 just to have a 455226633Sdim // concrete type. 456226633Sdim if (!EltTy->isSized()) { 457226633Sdim SkippedLayout = true; 458226633Sdim EltTy = llvm::Type::getInt8Ty(getLLVMContext()); 459226633Sdim } 460226633Sdim 461224145Sdim ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue()); 462224145Sdim break; 463193326Sed } 464193326Sed case Type::ExtVector: 465193326Sed case Type::Vector: { 466224145Sdim const VectorType *VT = cast<VectorType>(Ty); 467224145Sdim ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()), 468224145Sdim VT->getNumElements()); 469224145Sdim break; 470193326Sed } 471193326Sed case Type::FunctionNoProto: 472193326Sed case Type::FunctionProto: { 473224145Sdim const FunctionType *FT = cast<FunctionType>(Ty); 474210299Sed // First, check whether we can build the full function type. If the 475210299Sed // function type depends on an incomplete type (e.g. a struct or enum), we 476224145Sdim // cannot lower the function type. 477224145Sdim if (!isFuncTypeConvertible(FT)) { 478224145Sdim // This function's type depends on an incomplete tag type. 479249423Sdim 480249423Sdim // Force conversion of all the relevant record types, to make sure 481249423Sdim // we re-convert the FunctionType when appropriate. 482249423Sdim if (const RecordType *RT = FT->getResultType()->getAs<RecordType>()) 483249423Sdim ConvertRecordDeclType(RT->getDecl()); 484249423Sdim if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) 485249423Sdim for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++) 486249423Sdim if (const RecordType *RT = FPT->getArgType(i)->getAs<RecordType>()) 487249423Sdim ConvertRecordDeclType(RT->getDecl()); 488249423Sdim 489224145Sdim // Return a placeholder type. 490224145Sdim ResultType = llvm::StructType::get(getLLVMContext()); 491249423Sdim 492224145Sdim SkippedLayout = true; 493224145Sdim break; 494193326Sed } 495224145Sdim 496224145Sdim // While we're converting the argument types for a function, we don't want 497224145Sdim // to recursively convert any pointed-to structs. Converting directly-used 498224145Sdim // structs is ok though. 499224145Sdim if (!RecordsBeingLaidOut.insert(Ty)) { 500224145Sdim ResultType = llvm::StructType::get(getLLVMContext()); 501224145Sdim 502224145Sdim SkippedLayout = true; 503224145Sdim break; 504224145Sdim } 505210299Sed 506193326Sed // The function type can be built; call the appropriate routines to 507193326Sed // build it. 508210299Sed const CGFunctionInfo *FI; 509224145Sdim if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) { 510239462Sdim FI = &arrangeFreeFunctionType( 511224145Sdim CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0))); 512210299Sed } else { 513224145Sdim const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT); 514239462Sdim FI = &arrangeFreeFunctionType( 515224145Sdim CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0))); 516210299Sed } 517224145Sdim 518224145Sdim // If there is something higher level prodding our CGFunctionInfo, then 519224145Sdim // don't recurse into it again. 520224145Sdim if (FunctionsBeingProcessed.count(FI)) { 521193326Sed 522224145Sdim ResultType = llvm::StructType::get(getLLVMContext()); 523224145Sdim SkippedLayout = true; 524224145Sdim } else { 525224145Sdim 526224145Sdim // Otherwise, we're good to go, go ahead and convert it. 527234353Sdim ResultType = GetFunctionType(*FI); 528224145Sdim } 529224145Sdim 530224145Sdim RecordsBeingLaidOut.erase(Ty); 531224145Sdim 532224145Sdim if (SkippedLayout) 533224145Sdim TypeCache.clear(); 534224145Sdim 535224145Sdim if (RecordsBeingLaidOut.empty()) 536224145Sdim while (!DeferredRecords.empty()) 537224145Sdim ConvertRecordDeclType(DeferredRecords.pop_back_val()); 538224145Sdim break; 539193326Sed } 540193326Sed 541208600Srdivacky case Type::ObjCObject: 542224145Sdim ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType()); 543224145Sdim break; 544208600Srdivacky 545193326Sed case Type::ObjCInterface: { 546193326Sed // Objective-C interfaces are always opaque (outside of the 547193326Sed // runtime, which can do whatever it likes); we never refine 548193326Sed // these. 549224145Sdim llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)]; 550193326Sed if (!T) 551226633Sdim T = llvm::StructType::create(getLLVMContext()); 552224145Sdim ResultType = T; 553224145Sdim break; 554193326Sed } 555193326Sed 556198092Srdivacky case Type::ObjCObjectPointer: { 557198092Srdivacky // Protocol qualifications do not influence the LLVM type, we just return a 558198092Srdivacky // pointer to the underlying interface type. We don't need to worry about 559198092Srdivacky // recursive conversion. 560226633Sdim llvm::Type *T = 561226633Sdim ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType()); 562224145Sdim ResultType = T->getPointerTo(); 563224145Sdim break; 564198092Srdivacky } 565198092Srdivacky 566193326Sed case Type::Enum: { 567224145Sdim const EnumDecl *ED = cast<EnumType>(Ty)->getDecl(); 568226633Sdim if (ED->isCompleteDefinition() || ED->isFixed()) 569224145Sdim return ConvertType(ED->getIntegerType()); 570224145Sdim // Return a placeholder 'i32' type. This can be changed later when the 571224145Sdim // type is defined (see UpdateCompletedType), but is likely to be the 572224145Sdim // "right" answer. 573224145Sdim ResultType = llvm::Type::getInt32Ty(getLLVMContext()); 574224145Sdim break; 575193326Sed } 576193326Sed 577193326Sed case Type::BlockPointer: { 578224145Sdim const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType(); 579224145Sdim llvm::Type *PointeeType = ConvertTypeForMem(FTy); 580221345Sdim unsigned AS = Context.getTargetAddressSpace(FTy); 581224145Sdim ResultType = llvm::PointerType::get(PointeeType, AS); 582224145Sdim break; 583193326Sed } 584193326Sed 585193326Sed case Type::MemberPointer: { 586224145Sdim ResultType = 587224145Sdim getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty)); 588224145Sdim break; 589193326Sed } 590226633Sdim 591226633Sdim case Type::Atomic: { 592249423Sdim QualType valueType = cast<AtomicType>(Ty)->getValueType(); 593249423Sdim ResultType = ConvertTypeForMem(valueType); 594249423Sdim 595249423Sdim // Pad out to the inflated size if necessary. 596249423Sdim uint64_t valueSize = Context.getTypeSize(valueType); 597249423Sdim uint64_t atomicSize = Context.getTypeSize(Ty); 598249423Sdim if (valueSize != atomicSize) { 599249423Sdim assert(valueSize < atomicSize); 600249423Sdim llvm::Type *elts[] = { 601249423Sdim ResultType, 602249423Sdim llvm::ArrayType::get(CGM.Int8Ty, (atomicSize - valueSize) / 8) 603249423Sdim }; 604249423Sdim ResultType = llvm::StructType::get(getLLVMContext(), 605249423Sdim llvm::makeArrayRef(elts)); 606249423Sdim } 607226633Sdim break; 608193326Sed } 609226633Sdim } 610224145Sdim 611224145Sdim assert(ResultType && "Didn't convert a type?"); 612224145Sdim 613224145Sdim TypeCache[Ty] = ResultType; 614224145Sdim return ResultType; 615193326Sed} 616193326Sed 617249423Sdimbool CodeGenModule::isPaddedAtomicType(QualType type) { 618249423Sdim return isPaddedAtomicType(type->castAs<AtomicType>()); 619249423Sdim} 620249423Sdim 621249423Sdimbool CodeGenModule::isPaddedAtomicType(const AtomicType *type) { 622249423Sdim return Context.getTypeSize(type) != Context.getTypeSize(type->getValueType()); 623249423Sdim} 624249423Sdim 625224145Sdim/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union. 626224145Sdimllvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) { 627193326Sed // TagDecl's are not necessarily unique, instead use the (clang) 628193326Sed // type connected to the decl. 629224145Sdim const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 630198092Srdivacky 631224145Sdim llvm::StructType *&Entry = RecordDeclTypes[Key]; 632198092Srdivacky 633224145Sdim // If we don't have a StructType at all yet, create the forward declaration. 634224145Sdim if (Entry == 0) { 635226633Sdim Entry = llvm::StructType::create(getLLVMContext()); 636224145Sdim addRecordTypeName(RD, Entry, ""); 637224145Sdim } 638224145Sdim llvm::StructType *Ty = Entry; 639218893Sdim 640224145Sdim // If this is still a forward declaration, or the LLVM type is already 641224145Sdim // complete, there's nothing more to do. 642224145Sdim RD = RD->getDefinition(); 643226633Sdim if (RD == 0 || !RD->isCompleteDefinition() || !Ty->isOpaque()) 644224145Sdim return Ty; 645224145Sdim 646224145Sdim // If converting this type would cause us to infinitely loop, don't do it! 647224145Sdim if (!isSafeToConvert(RD, *this)) { 648224145Sdim DeferredRecords.push_back(RD); 649224145Sdim return Ty; 650193326Sed } 651198092Srdivacky 652193326Sed // Okay, this is a definition of a type. Compile the implementation now. 653224145Sdim bool InsertResult = RecordsBeingLaidOut.insert(Key); (void)InsertResult; 654224145Sdim assert(InsertResult && "Recursively compiling a struct?"); 655224145Sdim 656203955Srdivacky // Force conversion of non-virtual base classes recursively. 657224145Sdim if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) { 658224145Sdim for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(), 659224145Sdim e = CRD->bases_end(); i != e; ++i) { 660224145Sdim if (i->isVirtual()) continue; 661224145Sdim 662224145Sdim ConvertRecordDeclType(i->getType()->getAs<RecordType>()->getDecl()); 663203955Srdivacky } 664203955Srdivacky } 665203955Srdivacky 666198092Srdivacky // Layout fields. 667224145Sdim CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty); 668198092Srdivacky CGRecordLayouts[Key] = Layout; 669198092Srdivacky 670224145Sdim // We're done laying out this struct. 671224145Sdim bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult; 672224145Sdim assert(EraseResult && "struct not in RecordsBeingLaidOut set?"); 673224145Sdim 674224145Sdim // If this struct blocked a FunctionType conversion, then recompute whatever 675224145Sdim // was derived from that. 676224145Sdim // FIXME: This is hugely overconservative. 677224145Sdim if (SkippedLayout) 678224145Sdim TypeCache.clear(); 679224145Sdim 680224145Sdim // If we're done converting the outer-most record, then convert any deferred 681224145Sdim // structs as well. 682224145Sdim if (RecordsBeingLaidOut.empty()) 683224145Sdim while (!DeferredRecords.empty()) 684224145Sdim ConvertRecordDeclType(DeferredRecords.pop_back_val()); 685198092Srdivacky 686224145Sdim return Ty; 687198092Srdivacky} 688193326Sed 689218893Sdim/// getCGRecordLayout - Return record layout info for the given record decl. 690198092Srdivackyconst CGRecordLayout & 691218893SdimCodeGenTypes::getCGRecordLayout(const RecordDecl *RD) { 692218893Sdim const Type *Key = Context.getTagDeclType(RD).getTypePtr(); 693218893Sdim 694206084Srdivacky const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key); 695218893Sdim if (!Layout) { 696218893Sdim // Compute the type information. 697224145Sdim ConvertRecordDeclType(RD); 698218893Sdim 699218893Sdim // Now try again. 700218893Sdim Layout = CGRecordLayouts.lookup(Key); 701218893Sdim } 702218893Sdim 703206084Srdivacky assert(Layout && "Unable to find record layout information for type"); 704206084Srdivacky return *Layout; 705193326Sed} 706208600Srdivacky 707212904Sdimbool CodeGenTypes::isZeroInitializable(QualType T) { 708208600Srdivacky // No need to check for member pointers when not compiling C++. 709234353Sdim if (!Context.getLangOpts().CPlusPlus) 710212904Sdim return true; 711208600Srdivacky 712208600Srdivacky T = Context.getBaseElementType(T); 713208600Srdivacky 714212904Sdim // Records are non-zero-initializable if they contain any 715212904Sdim // non-zero-initializable subobjects. 716208600Srdivacky if (const RecordType *RT = T->getAs<RecordType>()) { 717208600Srdivacky const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl()); 718212904Sdim return isZeroInitializable(RD); 719208600Srdivacky } 720212904Sdim 721212904Sdim // We have to ask the ABI about member pointers. 722208600Srdivacky if (const MemberPointerType *MPT = T->getAs<MemberPointerType>()) 723212904Sdim return getCXXABI().isZeroInitializable(MPT); 724208600Srdivacky 725212904Sdim // Everything else is okay. 726212904Sdim return true; 727208600Srdivacky} 728208600Srdivacky 729212904Sdimbool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) { 730218893Sdim return getCGRecordLayout(RD).isZeroInitializable(); 731208600Srdivacky} 732