CodeGenTypes.cpp revision 193326
1//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This is the code that handles AST -> LLVM type lowering.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CodeGenTypes.h"
15#include "clang/AST/ASTContext.h"
16#include "clang/AST/DeclObjC.h"
17#include "clang/AST/Expr.h"
18#include "clang/AST/RecordLayout.h"
19#include "llvm/DerivedTypes.h"
20#include "llvm/Module.h"
21#include "llvm/Target/TargetData.h"
22
23#include "CGCall.h"
24
25using namespace clang;
26using namespace CodeGen;
27
28namespace {
29  /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts
30  /// structs and unions. It manages transient information used during layout.
31  /// FIXME : Handle field aligments. Handle packed structs.
32  class RecordOrganizer {
33  public:
34    explicit RecordOrganizer(CodeGenTypes &Types, const RecordDecl& Record) :
35      CGT(Types), RD(Record), STy(NULL) {}
36
37    /// layoutStructFields - Do the actual work and lay out all fields. Create
38    /// corresponding llvm struct type.  This should be invoked only after
39    /// all fields are added.
40    void layoutStructFields(const ASTRecordLayout &RL);
41
42    /// layoutUnionFields - Do the actual work and lay out all fields. Create
43    /// corresponding llvm struct type.  This should be invoked only after
44    /// all fields are added.
45    void layoutUnionFields(const ASTRecordLayout &RL);
46
47    /// getLLVMType - Return associated llvm struct type. This may be NULL
48    /// if fields are not laid out.
49    llvm::Type *getLLVMType() const {
50      return STy;
51    }
52
53    llvm::SmallSet<unsigned, 8> &getPaddingFields() {
54      return PaddingFields;
55    }
56
57  private:
58    CodeGenTypes &CGT;
59    const RecordDecl& RD;
60    llvm::Type *STy;
61    llvm::SmallSet<unsigned, 8> PaddingFields;
62  };
63}
64
65CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M,
66                           const llvm::TargetData &TD)
67  : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD),
68    TheABIInfo(0) {
69}
70
71CodeGenTypes::~CodeGenTypes() {
72  for(llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
73        I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
74      I != E; ++I)
75    delete I->second;
76  CGRecordLayouts.clear();
77}
78
79/// ConvertType - Convert the specified type to its LLVM form.
80const llvm::Type *CodeGenTypes::ConvertType(QualType T) {
81  llvm::PATypeHolder Result = ConvertTypeRecursive(T);
82
83  // Any pointers that were converted defered evaluation of their pointee type,
84  // creating an opaque type instead.  This is in order to avoid problems with
85  // circular types.  Loop through all these defered pointees, if any, and
86  // resolve them now.
87  while (!PointersToResolve.empty()) {
88    std::pair<QualType, llvm::OpaqueType*> P =
89      PointersToResolve.back();
90    PointersToResolve.pop_back();
91    // We can handle bare pointers here because we know that the only pointers
92    // to the Opaque type are P.second and from other types.  Refining the
93    // opqaue type away will invalidate P.second, but we don't mind :).
94    const llvm::Type *NT = ConvertTypeForMemRecursive(P.first);
95    P.second->refineAbstractTypeTo(NT);
96  }
97
98  return Result;
99}
100
101const llvm::Type *CodeGenTypes::ConvertTypeRecursive(QualType T) {
102  T = Context.getCanonicalType(T);
103
104  // See if type is already cached.
105  llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator
106    I = TypeCache.find(T.getTypePtr());
107  // If type is found in map and this is not a definition for a opaque
108  // place holder type then use it. Otherwise, convert type T.
109  if (I != TypeCache.end())
110    return I->second.get();
111
112  const llvm::Type *ResultType = ConvertNewType(T);
113  TypeCache.insert(std::make_pair(T.getTypePtr(),
114                                  llvm::PATypeHolder(ResultType)));
115  return ResultType;
116}
117
118const llvm::Type *CodeGenTypes::ConvertTypeForMemRecursive(QualType T) {
119  const llvm::Type *ResultType = ConvertTypeRecursive(T);
120  if (ResultType == llvm::Type::Int1Ty)
121    return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
122  return ResultType;
123}
124
125/// ConvertTypeForMem - Convert type T into a llvm::Type.  This differs from
126/// ConvertType in that it is used to convert to the memory representation for
127/// a type.  For example, the scalar representation for _Bool is i1, but the
128/// memory representation is usually i8 or i32, depending on the target.
129const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) {
130  const llvm::Type *R = ConvertType(T);
131
132  // If this is a non-bool type, don't map it.
133  if (R != llvm::Type::Int1Ty)
134    return R;
135
136  // Otherwise, return an integer of the target-specified size.
137  return llvm::IntegerType::get((unsigned)Context.getTypeSize(T));
138
139}
140
141// Code to verify a given function type is complete, i.e. the return type
142// and all of the argument types are complete.
143static const TagType *VerifyFuncTypeComplete(const Type* T) {
144  const FunctionType *FT = cast<FunctionType>(T);
145  if (const TagType* TT = FT->getResultType()->getAsTagType())
146    if (!TT->getDecl()->isDefinition())
147      return TT;
148  if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(T))
149    for (unsigned i = 0; i < FPT->getNumArgs(); i++)
150      if (const TagType* TT = FPT->getArgType(i)->getAsTagType())
151        if (!TT->getDecl()->isDefinition())
152          return TT;
153  return 0;
154}
155
156/// UpdateCompletedType - When we find the full definition for a TagDecl,
157/// replace the 'opaque' type we previously made for it if applicable.
158void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
159  const Type *Key =
160    Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
161  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
162    TagDeclTypes.find(Key);
163  if (TDTI == TagDeclTypes.end()) return;
164
165  // Remember the opaque LLVM type for this tagdecl.
166  llvm::PATypeHolder OpaqueHolder = TDTI->second;
167  assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) &&
168         "Updating compilation of an already non-opaque type?");
169
170  // Remove it from TagDeclTypes so that it will be regenerated.
171  TagDeclTypes.erase(TDTI);
172
173  // Generate the new type.
174  const llvm::Type *NT = ConvertTagDeclType(TD);
175
176  // Refine the old opaque type to its new definition.
177  cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT);
178
179  // Since we just completed a tag type, check to see if any function types
180  // were completed along with the tag type.
181  // FIXME: This is very inefficient; if we track which function types depend
182  // on which tag types, though, it should be reasonably efficient.
183  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator i;
184  for (i = FunctionTypes.begin(); i != FunctionTypes.end(); ++i) {
185    if (const TagType* TT = VerifyFuncTypeComplete(i->first)) {
186      // This function type still depends on an incomplete tag type; make sure
187      // that tag type has an associated opaque type.
188      ConvertTagDeclType(TT->getDecl());
189    } else {
190      // This function no longer depends on an incomplete tag type; create the
191      // function type, and refine the opaque type to the new function type.
192      llvm::PATypeHolder OpaqueHolder = i->second;
193      const llvm::Type *NFT = ConvertNewType(QualType(i->first, 0));
194      cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NFT);
195      FunctionTypes.erase(i);
196    }
197  }
198}
199
200static const llvm::Type* getTypeForFormat(const llvm::fltSemantics &format) {
201  if (&format == &llvm::APFloat::IEEEsingle)
202    return llvm::Type::FloatTy;
203  if (&format == &llvm::APFloat::IEEEdouble)
204    return llvm::Type::DoubleTy;
205  if (&format == &llvm::APFloat::IEEEquad)
206    return llvm::Type::FP128Ty;
207  if (&format == &llvm::APFloat::PPCDoubleDouble)
208    return llvm::Type::PPC_FP128Ty;
209  if (&format == &llvm::APFloat::x87DoubleExtended)
210    return llvm::Type::X86_FP80Ty;
211  assert(0 && "Unknown float format!");
212  return 0;
213}
214
215const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
216  const clang::Type &Ty = *Context.getCanonicalType(T);
217
218  switch (Ty.getTypeClass()) {
219#define TYPE(Class, Base)
220#define ABSTRACT_TYPE(Class, Base)
221#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
222#define DEPENDENT_TYPE(Class, Base) case Type::Class:
223#include "clang/AST/TypeNodes.def"
224    assert(false && "Non-canonical or dependent types aren't possible.");
225    break;
226
227  case Type::Builtin: {
228    switch (cast<BuiltinType>(Ty).getKind()) {
229    default: assert(0 && "Unknown builtin type!");
230    case BuiltinType::Void:
231      // LLVM void type can only be used as the result of a function call.  Just
232      // map to the same as char.
233      return llvm::IntegerType::get(8);
234
235    case BuiltinType::Bool:
236      // Note that we always return bool as i1 for use as a scalar type.
237      return llvm::Type::Int1Ty;
238
239    case BuiltinType::Char_S:
240    case BuiltinType::Char_U:
241    case BuiltinType::SChar:
242    case BuiltinType::UChar:
243    case BuiltinType::Short:
244    case BuiltinType::UShort:
245    case BuiltinType::Int:
246    case BuiltinType::UInt:
247    case BuiltinType::Long:
248    case BuiltinType::ULong:
249    case BuiltinType::LongLong:
250    case BuiltinType::ULongLong:
251    case BuiltinType::WChar:
252      return llvm::IntegerType::get(
253        static_cast<unsigned>(Context.getTypeSize(T)));
254
255    case BuiltinType::Float:
256    case BuiltinType::Double:
257    case BuiltinType::LongDouble:
258      return getTypeForFormat(Context.getFloatTypeSemantics(T));
259
260    case BuiltinType::UInt128:
261    case BuiltinType::Int128:
262      return llvm::IntegerType::get(128);
263    }
264    break;
265  }
266  case Type::FixedWidthInt:
267    return llvm::IntegerType::get(cast<FixedWidthIntType>(T)->getWidth());
268  case Type::Complex: {
269    const llvm::Type *EltTy =
270      ConvertTypeRecursive(cast<ComplexType>(Ty).getElementType());
271    return llvm::StructType::get(EltTy, EltTy, NULL);
272  }
273  case Type::LValueReference:
274  case Type::RValueReference: {
275    const ReferenceType &RTy = cast<ReferenceType>(Ty);
276    QualType ETy = RTy.getPointeeType();
277    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
278    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
279    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
280  }
281  case Type::Pointer: {
282    const PointerType &PTy = cast<PointerType>(Ty);
283    QualType ETy = PTy.getPointeeType();
284    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
285    PointersToResolve.push_back(std::make_pair(ETy, PointeeType));
286    return llvm::PointerType::get(PointeeType, ETy.getAddressSpace());
287  }
288
289  case Type::VariableArray: {
290    const VariableArrayType &A = cast<VariableArrayType>(Ty);
291    assert(A.getIndexTypeQualifier() == 0 &&
292           "FIXME: We only handle trivial array types so far!");
293    // VLAs resolve to the innermost element type; this matches
294    // the return of alloca, and there isn't any obviously better choice.
295    return ConvertTypeForMemRecursive(A.getElementType());
296  }
297  case Type::IncompleteArray: {
298    const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty);
299    assert(A.getIndexTypeQualifier() == 0 &&
300           "FIXME: We only handle trivial array types so far!");
301    // int X[] -> [0 x int]
302    return llvm::ArrayType::get(ConvertTypeForMemRecursive(A.getElementType()), 0);
303  }
304  case Type::ConstantArray: {
305    const ConstantArrayType &A = cast<ConstantArrayType>(Ty);
306    const llvm::Type *EltTy = ConvertTypeForMemRecursive(A.getElementType());
307    return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue());
308  }
309  case Type::ExtVector:
310  case Type::Vector: {
311    const VectorType &VT = cast<VectorType>(Ty);
312    return llvm::VectorType::get(ConvertTypeRecursive(VT.getElementType()),
313                                 VT.getNumElements());
314  }
315  case Type::FunctionNoProto:
316  case Type::FunctionProto: {
317    // First, check whether we can build the full function type.
318    if (const TagType* TT = VerifyFuncTypeComplete(&Ty)) {
319      // This function's type depends on an incomplete tag type; make sure
320      // we have an opaque type corresponding to the tag type.
321      ConvertTagDeclType(TT->getDecl());
322      // Create an opaque type for this function type, save it, and return it.
323      llvm::Type *ResultType = llvm::OpaqueType::get();
324      FunctionTypes.insert(std::make_pair(&Ty, ResultType));
325      return ResultType;
326    }
327    // The function type can be built; call the appropriate routines to
328    // build it.
329    if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
330      return GetFunctionType(getFunctionInfo(FPT), FPT->isVariadic());
331
332    const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
333    return GetFunctionType(getFunctionInfo(FNPT), true);
334  }
335
336  case Type::ExtQual:
337    return
338      ConvertTypeRecursive(QualType(cast<ExtQualType>(Ty).getBaseType(), 0));
339
340  case Type::ObjCQualifiedInterface: {
341    // Lower foo<P1,P2> just like foo.
342    ObjCInterfaceDecl *ID = cast<ObjCQualifiedInterfaceType>(Ty).getDecl();
343    return ConvertTypeRecursive(Context.getObjCInterfaceType(ID));
344  }
345
346  case Type::ObjCInterface: {
347    // Objective-C interfaces are always opaque (outside of the
348    // runtime, which can do whatever it likes); we never refine
349    // these.
350    const llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(&Ty)];
351    if (!T)
352        T = llvm::OpaqueType::get();
353    return T;
354  }
355
356  case Type::ObjCQualifiedId:
357    // Protocols don't influence the LLVM type.
358    return ConvertTypeRecursive(Context.getObjCIdType());
359
360  case Type::Record:
361  case Type::Enum: {
362    const TagDecl *TD = cast<TagType>(Ty).getDecl();
363    const llvm::Type *Res = ConvertTagDeclType(TD);
364
365    std::string TypeName(TD->getKindName());
366    TypeName += '.';
367
368    // Name the codegen type after the typedef name
369    // if there is no tag type name available
370    if (TD->getIdentifier())
371      TypeName += TD->getNameAsString();
372    else if (const TypedefType *TdT = dyn_cast<TypedefType>(T))
373      TypeName += TdT->getDecl()->getNameAsString();
374    else
375      TypeName += "anon";
376
377    TheModule.addTypeName(TypeName, Res);
378    return Res;
379  }
380
381  case Type::BlockPointer: {
382    const QualType FTy = cast<BlockPointerType>(Ty).getPointeeType();
383    llvm::OpaqueType *PointeeType = llvm::OpaqueType::get();
384    PointersToResolve.push_back(std::make_pair(FTy, PointeeType));
385    return llvm::PointerType::get(PointeeType, FTy.getAddressSpace());
386  }
387
388  case Type::MemberPointer: {
389    // FIXME: This is ABI dependent. We use the Itanium C++ ABI.
390    // http://www.codesourcery.com/public/cxx-abi/abi.html#member-pointers
391    // If we ever want to support other ABIs this needs to be abstracted.
392
393    QualType ETy = cast<MemberPointerType>(Ty).getPointeeType();
394    if (ETy->isFunctionType()) {
395      return llvm::StructType::get(ConvertType(Context.getPointerDiffType()),
396                                   ConvertType(Context.getPointerDiffType()),
397                                   NULL);
398    } else
399      return ConvertType(Context.getPointerDiffType());
400  }
401
402  case Type::TemplateSpecialization:
403    assert(false && "Dependent types can't get here");
404  }
405
406  // FIXME: implement.
407  return llvm::OpaqueType::get();
408}
409
410/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
411/// enum.
412const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
413  // TagDecl's are not necessarily unique, instead use the (clang)
414  // type connected to the decl.
415  const Type *Key =
416    Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
417  llvm::DenseMap<const Type*, llvm::PATypeHolder>::iterator TDTI =
418    TagDeclTypes.find(Key);
419
420  // If we've already compiled this tag type, use the previous definition.
421  if (TDTI != TagDeclTypes.end())
422    return TDTI->second;
423
424  // If this is still a forward definition, just define an opaque type to use
425  // for this tagged decl.
426  if (!TD->isDefinition()) {
427    llvm::Type *ResultType = llvm::OpaqueType::get();
428    TagDeclTypes.insert(std::make_pair(Key, ResultType));
429    return ResultType;
430  }
431
432  // Okay, this is a definition of a type.  Compile the implementation now.
433
434  if (TD->isEnum()) {
435    // Don't bother storing enums in TagDeclTypes.
436    return ConvertTypeRecursive(cast<EnumDecl>(TD)->getIntegerType());
437  }
438
439  // This decl could well be recursive.  In this case, insert an opaque
440  // definition of this type, which the recursive uses will get.  We will then
441  // refine this opaque version later.
442
443  // Create new OpaqueType now for later use in case this is a recursive
444  // type.  This will later be refined to the actual type.
445  llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get();
446  TagDeclTypes.insert(std::make_pair(Key, ResultHolder));
447
448  const llvm::Type *ResultType;
449  const RecordDecl *RD = cast<const RecordDecl>(TD);
450
451  // There isn't any extra information for empty structures/unions.
452  if (RD->field_empty(getContext())) {
453    ResultType = llvm::StructType::get(std::vector<const llvm::Type*>());
454  } else {
455    // Layout fields.
456    RecordOrganizer RO(*this, *RD);
457
458    if (TD->isStruct() || TD->isClass())
459      RO.layoutStructFields(Context.getASTRecordLayout(RD));
460    else {
461      assert(TD->isUnion() && "unknown tag decl kind!");
462      RO.layoutUnionFields(Context.getASTRecordLayout(RD));
463    }
464
465    // Get llvm::StructType.
466    const Type *Key =
467      Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
468    CGRecordLayouts[Key] = new CGRecordLayout(RO.getLLVMType(),
469                                              RO.getPaddingFields());
470    ResultType = RO.getLLVMType();
471  }
472
473  // Refine our Opaque type to ResultType.  This can invalidate ResultType, so
474  // make sure to read the result out of the holder.
475  cast<llvm::OpaqueType>(ResultHolder.get())
476    ->refineAbstractTypeTo(ResultType);
477
478  return ResultHolder.get();
479}
480
481/// getLLVMFieldNo - Return llvm::StructType element number
482/// that corresponds to the field FD.
483unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
484  llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD);
485  assert (I != FieldInfo.end()  && "Unable to find field info");
486  return I->second;
487}
488
489/// addFieldInfo - Assign field number to field FD.
490void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) {
491  FieldInfo[FD] = No;
492}
493
494/// getBitFieldInfo - Return the BitFieldInfo  that corresponds to the field FD.
495CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) {
496  llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator
497    I = BitFields.find(FD);
498  assert (I != BitFields.end()  && "Unable to find bitfield info");
499  return I->second;
500}
501
502/// addBitFieldInfo - Assign a start bit and a size to field FD.
503void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned Begin,
504                                   unsigned Size) {
505  BitFields.insert(std::make_pair(FD, BitFieldInfo(Begin, Size)));
506}
507
508/// getCGRecordLayout - Return record layout info for the given llvm::Type.
509const CGRecordLayout *
510CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
511  const Type *Key =
512    Context.getTagDeclType(const_cast<TagDecl*>(TD)).getTypePtr();
513  llvm::DenseMap<const Type*, CGRecordLayout *>::iterator I
514    = CGRecordLayouts.find(Key);
515  assert (I != CGRecordLayouts.end()
516          && "Unable to find record layout information for type");
517  return I->second;
518}
519
520/// layoutStructFields - Do the actual work and lay out all fields. Create
521/// corresponding llvm struct type.
522/// Note that this doesn't actually try to do struct layout; it depends on
523/// the layout built by the AST.  (We have to do struct layout to do Sema,
524/// and there's no point to duplicating the work.)
525void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) {
526  // FIXME: This code currently always generates packed structures.
527  // Unpacked structures are more readable, and sometimes more efficient!
528  // (But note that any changes here are likely to impact CGExprConstant,
529  // which makes some messy assumptions.)
530  uint64_t llvmSize = 0;
531  // FIXME: Make this a SmallVector
532  std::vector<const llvm::Type*> LLVMFields;
533
534  unsigned curField = 0;
535  for (RecordDecl::field_iterator Field = RD.field_begin(CGT.getContext()),
536                               FieldEnd = RD.field_end(CGT.getContext());
537       Field != FieldEnd; ++Field) {
538    uint64_t offset = RL.getFieldOffset(curField);
539    const llvm::Type *Ty = CGT.ConvertTypeForMemRecursive(Field->getType());
540    uint64_t size = CGT.getTargetData().getTypeAllocSizeInBits(Ty);
541
542    if (Field->isBitField()) {
543      uint64_t BitFieldSize =
544          Field->getBitWidth()->EvaluateAsInt(CGT.getContext()).getZExtValue();
545
546      // Bitfield field info is different from other field info;
547      // it actually ignores the underlying LLVM struct because
548      // there isn't any convenient mapping.
549      CGT.addFieldInfo(*Field, offset / size);
550      CGT.addBitFieldInfo(*Field, offset % size, BitFieldSize);
551    } else {
552      // Put the element into the struct. This would be simpler
553      // if we didn't bother, but it seems a bit too strange to
554      // allocate all structs as i8 arrays.
555      while (llvmSize < offset) {
556        LLVMFields.push_back(llvm::Type::Int8Ty);
557        llvmSize += 8;
558      }
559
560      llvmSize += size;
561      CGT.addFieldInfo(*Field, LLVMFields.size());
562      LLVMFields.push_back(Ty);
563    }
564    ++curField;
565  }
566
567  while (llvmSize < RL.getSize()) {
568    LLVMFields.push_back(llvm::Type::Int8Ty);
569    llvmSize += 8;
570  }
571
572  STy = llvm::StructType::get(LLVMFields, true);
573  assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());
574}
575
576/// layoutUnionFields - Do the actual work and lay out all fields. Create
577/// corresponding llvm struct type.  This should be invoked only after
578/// all fields are added.
579void RecordOrganizer::layoutUnionFields(const ASTRecordLayout &RL) {
580  unsigned curField = 0;
581  for (RecordDecl::field_iterator Field = RD.field_begin(CGT.getContext()),
582                               FieldEnd = RD.field_end(CGT.getContext());
583       Field != FieldEnd; ++Field) {
584    // The offset should usually be zero, but bitfields could be strange
585    uint64_t offset = RL.getFieldOffset(curField);
586    CGT.ConvertTypeRecursive(Field->getType());
587
588    if (Field->isBitField()) {
589      Expr *BitWidth = Field->getBitWidth();
590      uint64_t BitFieldSize =
591        BitWidth->EvaluateAsInt(CGT.getContext()).getZExtValue();
592
593      CGT.addFieldInfo(*Field, 0);
594      CGT.addBitFieldInfo(*Field, offset, BitFieldSize);
595    } else {
596      CGT.addFieldInfo(*Field, 0);
597    }
598    ++curField;
599  }
600
601  // This looks stupid, but it is correct in the sense that
602  // it works no matter how complicated the sizes and alignments
603  // of the union elements are. The natural alignment
604  // of the result doesn't matter because anyone allocating
605  // structures should be aligning them appropriately anyway.
606  // FIXME: We can be a bit more intuitive in a lot of cases.
607  // FIXME: Make this a struct type to work around PR2399; the
608  // C backend doesn't like structs using array types.
609  std::vector<const llvm::Type*> LLVMFields;
610  LLVMFields.push_back(llvm::ArrayType::get(llvm::Type::Int8Ty,
611                                            RL.getSize() / 8));
612  STy = llvm::StructType::get(LLVMFields, true);
613  assert(CGT.getTargetData().getTypeAllocSizeInBits(STy) == RL.getSize());
614}
615