1//===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines several CodeGen-specific LLVM IR analysis utilties.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/Analysis.h"
15#include "llvm/Analysis/ValueTracking.h"
16#include "llvm/CodeGen/MachineFunction.h"
17#include "llvm/IR/DataLayout.h"
18#include "llvm/IR/DerivedTypes.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/Instructions.h"
21#include "llvm/IR/IntrinsicInst.h"
22#include "llvm/IR/LLVMContext.h"
23#include "llvm/IR/Module.h"
24#include "llvm/Support/ErrorHandling.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Target/TargetLowering.h"
27using namespace llvm;
28
29/// ComputeLinearIndex - Given an LLVM IR aggregate type and a sequence
30/// of insertvalue or extractvalue indices that identify a member, return
31/// the linearized index of the start of the member.
32///
33unsigned llvm::ComputeLinearIndex(Type *Ty,
34                                  const unsigned *Indices,
35                                  const unsigned *IndicesEnd,
36                                  unsigned CurIndex) {
37  // Base case: We're done.
38  if (Indices && Indices == IndicesEnd)
39    return CurIndex;
40
41  // Given a struct type, recursively traverse the elements.
42  if (StructType *STy = dyn_cast<StructType>(Ty)) {
43    for (StructType::element_iterator EB = STy->element_begin(),
44                                      EI = EB,
45                                      EE = STy->element_end();
46        EI != EE; ++EI) {
47      if (Indices && *Indices == unsigned(EI - EB))
48        return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
49      CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
50    }
51    return CurIndex;
52  }
53  // Given an array type, recursively traverse the elements.
54  else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
55    Type *EltTy = ATy->getElementType();
56    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
57      if (Indices && *Indices == i)
58        return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
59      CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
60    }
61    return CurIndex;
62  }
63  // We haven't found the type we're looking for, so keep searching.
64  return CurIndex + 1;
65}
66
67/// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
68/// EVTs that represent all the individual underlying
69/// non-aggregate types that comprise it.
70///
71/// If Offsets is non-null, it points to a vector to be filled in
72/// with the in-memory offsets of each of the individual values.
73///
74void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
75                           SmallVectorImpl<EVT> &ValueVTs,
76                           SmallVectorImpl<uint64_t> *Offsets,
77                           uint64_t StartingOffset) {
78  // Given a struct type, recursively traverse the elements.
79  if (StructType *STy = dyn_cast<StructType>(Ty)) {
80    const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
81    for (StructType::element_iterator EB = STy->element_begin(),
82                                      EI = EB,
83                                      EE = STy->element_end();
84         EI != EE; ++EI)
85      ComputeValueVTs(TLI, *EI, ValueVTs, Offsets,
86                      StartingOffset + SL->getElementOffset(EI - EB));
87    return;
88  }
89  // Given an array type, recursively traverse the elements.
90  if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
91    Type *EltTy = ATy->getElementType();
92    uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
93    for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
94      ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
95                      StartingOffset + i * EltSize);
96    return;
97  }
98  // Interpret void as zero return values.
99  if (Ty->isVoidTy())
100    return;
101  // Base case: we can get an EVT for this LLVM IR type.
102  ValueVTs.push_back(TLI.getValueType(Ty));
103  if (Offsets)
104    Offsets->push_back(StartingOffset);
105}
106
107/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
108GlobalVariable *llvm::ExtractTypeInfo(Value *V) {
109  V = V->stripPointerCasts();
110  GlobalVariable *GV = dyn_cast<GlobalVariable>(V);
111
112  if (GV && GV->getName() == "llvm.eh.catch.all.value") {
113    assert(GV->hasInitializer() &&
114           "The EH catch-all value must have an initializer");
115    Value *Init = GV->getInitializer();
116    GV = dyn_cast<GlobalVariable>(Init);
117    if (!GV) V = cast<ConstantPointerNull>(Init);
118  }
119
120  assert((GV || isa<ConstantPointerNull>(V)) &&
121         "TypeInfo must be a global variable or NULL");
122  return GV;
123}
124
125/// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
126/// processed uses a memory 'm' constraint.
127bool
128llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector &CInfos,
129                                const TargetLowering &TLI) {
130  for (unsigned i = 0, e = CInfos.size(); i != e; ++i) {
131    InlineAsm::ConstraintInfo &CI = CInfos[i];
132    for (unsigned j = 0, ee = CI.Codes.size(); j != ee; ++j) {
133      TargetLowering::ConstraintType CType = TLI.getConstraintType(CI.Codes[j]);
134      if (CType == TargetLowering::C_Memory)
135        return true;
136    }
137
138    // Indirect operand accesses access memory.
139    if (CI.isIndirect)
140      return true;
141  }
142
143  return false;
144}
145
146/// getFCmpCondCode - Return the ISD condition code corresponding to
147/// the given LLVM IR floating-point condition code.  This includes
148/// consideration of global floating-point math flags.
149///
150ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
151  switch (Pred) {
152  case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
153  case FCmpInst::FCMP_OEQ:   return ISD::SETOEQ;
154  case FCmpInst::FCMP_OGT:   return ISD::SETOGT;
155  case FCmpInst::FCMP_OGE:   return ISD::SETOGE;
156  case FCmpInst::FCMP_OLT:   return ISD::SETOLT;
157  case FCmpInst::FCMP_OLE:   return ISD::SETOLE;
158  case FCmpInst::FCMP_ONE:   return ISD::SETONE;
159  case FCmpInst::FCMP_ORD:   return ISD::SETO;
160  case FCmpInst::FCMP_UNO:   return ISD::SETUO;
161  case FCmpInst::FCMP_UEQ:   return ISD::SETUEQ;
162  case FCmpInst::FCMP_UGT:   return ISD::SETUGT;
163  case FCmpInst::FCMP_UGE:   return ISD::SETUGE;
164  case FCmpInst::FCMP_ULT:   return ISD::SETULT;
165  case FCmpInst::FCMP_ULE:   return ISD::SETULE;
166  case FCmpInst::FCMP_UNE:   return ISD::SETUNE;
167  case FCmpInst::FCMP_TRUE:  return ISD::SETTRUE;
168  default: llvm_unreachable("Invalid FCmp predicate opcode!");
169  }
170}
171
172ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
173  switch (CC) {
174    case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
175    case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
176    case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
177    case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
178    case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
179    case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
180    default: return CC;
181  }
182}
183
184/// getICmpCondCode - Return the ISD condition code corresponding to
185/// the given LLVM IR integer condition code.
186///
187ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
188  switch (Pred) {
189  case ICmpInst::ICMP_EQ:  return ISD::SETEQ;
190  case ICmpInst::ICMP_NE:  return ISD::SETNE;
191  case ICmpInst::ICMP_SLE: return ISD::SETLE;
192  case ICmpInst::ICMP_ULE: return ISD::SETULE;
193  case ICmpInst::ICMP_SGE: return ISD::SETGE;
194  case ICmpInst::ICMP_UGE: return ISD::SETUGE;
195  case ICmpInst::ICMP_SLT: return ISD::SETLT;
196  case ICmpInst::ICMP_ULT: return ISD::SETULT;
197  case ICmpInst::ICMP_SGT: return ISD::SETGT;
198  case ICmpInst::ICMP_UGT: return ISD::SETUGT;
199  default:
200    llvm_unreachable("Invalid ICmp predicate opcode!");
201  }
202}
203
204static bool isNoopBitcast(Type *T1, Type *T2,
205                          const TargetLowering& TLI) {
206  return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
207         (isa<VectorType>(T1) && isa<VectorType>(T2) &&
208          TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
209}
210
211/// sameNoopInput - Return true if V1 == V2, else if either V1 or V2 is a noop
212/// (i.e., lowers to no machine code), look through it (and any transitive noop
213/// operands to it) and check if it has the same noop input value.  This is
214/// used to determine if a tail call can be formed.
215static bool sameNoopInput(const Value *V1, const Value *V2,
216                          SmallVectorImpl<unsigned> &Els1,
217                          SmallVectorImpl<unsigned> &Els2,
218                          const TargetLowering &TLI) {
219  using std::swap;
220  bool swapParity = false;
221  bool equalEls = Els1 == Els2;
222  while (true) {
223    if ((equalEls && V1 == V2) || isa<UndefValue>(V1) || isa<UndefValue>(V2)) {
224      if (swapParity)
225        // Revert to original Els1 and Els2 to avoid confusing recursive calls
226        swap(Els1, Els2);
227      return true;
228    }
229
230    // Try to look through V1; if V1 is not an instruction, it can't be looked
231    // through.
232    const Instruction *I = dyn_cast<Instruction>(V1);
233    const Value *NoopInput = 0;
234    if (I != 0 && I->getNumOperands() > 0) {
235     Value *Op = I->getOperand(0);
236      if (isa<TruncInst>(I)) {
237        // Look through truly no-op truncates.
238        if (TLI.isTruncateFree(Op->getType(), I->getType()))
239          NoopInput = Op;
240      } else if (isa<BitCastInst>(I)) {
241        // Look through truly no-op bitcasts.
242        if (isNoopBitcast(Op->getType(), I->getType(), TLI))
243          NoopInput = Op;
244      } else if (isa<GetElementPtrInst>(I)) {
245        // Look through getelementptr
246        if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
247          NoopInput = Op;
248      } else if (isa<IntToPtrInst>(I)) {
249        // Look through inttoptr.
250        // Make sure this isn't a truncating or extending cast.  We could
251        // support this eventually, but don't bother for now.
252        if (!isa<VectorType>(I->getType()) &&
253            TLI.getPointerTy().getSizeInBits() ==
254              cast<IntegerType>(Op->getType())->getBitWidth())
255          NoopInput = Op;
256      } else if (isa<PtrToIntInst>(I)) {
257        // Look through ptrtoint.
258        // Make sure this isn't a truncating or extending cast.  We could
259        // support this eventually, but don't bother for now.
260        if (!isa<VectorType>(I->getType()) &&
261            TLI.getPointerTy().getSizeInBits() ==
262              cast<IntegerType>(I->getType())->getBitWidth())
263          NoopInput = Op;
264      } else if (isa<CallInst>(I)) {
265        // Look through call
266        for (User::const_op_iterator i = I->op_begin(),
267                                     // Skip Callee
268                                     e = I->op_end() - 1;
269             i != e; ++i) {
270          unsigned attrInd = i - I->op_begin() + 1;
271          if (cast<CallInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
272              isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
273            NoopInput = *i;
274            break;
275          }
276        }
277      } else if (isa<InvokeInst>(I)) {
278        // Look through invoke
279        for (User::const_op_iterator i = I->op_begin(),
280                                     // Skip BB, BB, Callee
281                                     e = I->op_end() - 3;
282             i != e; ++i) {
283          unsigned attrInd = i - I->op_begin() + 1;
284          if (cast<InvokeInst>(I)->paramHasAttr(attrInd, Attribute::Returned) &&
285              isNoopBitcast((*i)->getType(), I->getType(), TLI)) {
286            NoopInput = *i;
287            break;
288          }
289        }
290      }
291    }
292
293    if (NoopInput) {
294      V1 = NoopInput;
295      continue;
296    }
297
298    // If we already swapped, avoid infinite loop
299    if (swapParity)
300      break;
301
302    // Otherwise, swap V1<->V2, Els1<->Els2
303    swap(V1, V2);
304    swap(Els1, Els2);
305    swapParity = !swapParity;
306  }
307
308  for (unsigned n = 0; n < 2; ++n) {
309    if (isa<InsertValueInst>(V1)) {
310      if (isa<StructType>(V1->getType())) {
311        // Look through insertvalue
312        unsigned i, e;
313        for (i = 0, e = cast<StructType>(V1->getType())->getNumElements();
314             i != e; ++i) {
315          const Value *InScalar = FindInsertedValue(const_cast<Value*>(V1), i);
316          if (InScalar == 0)
317            break;
318          Els1.push_back(i);
319          if (!sameNoopInput(InScalar, V2, Els1, Els2, TLI)) {
320            Els1.pop_back();
321            break;
322          }
323          Els1.pop_back();
324        }
325        if (i == e) {
326          if (swapParity)
327            swap(Els1, Els2);
328          return true;
329        }
330      }
331    } else if (!Els1.empty() && isa<ExtractValueInst>(V1)) {
332      const ExtractValueInst *EVI = cast<ExtractValueInst>(V1);
333      unsigned i = Els1.back();
334      // If the scalar value being inserted is an extractvalue of the right
335      // index from the call, then everything is good.
336      if (isa<StructType>(EVI->getOperand(0)->getType()) &&
337          EVI->getNumIndices() == 1 && EVI->getIndices()[0] == i) {
338        // Look through extractvalue
339        Els1.pop_back();
340        if (sameNoopInput(EVI->getOperand(0), V2, Els1, Els2, TLI)) {
341          Els1.push_back(i);
342          if (swapParity)
343            swap(Els1, Els2);
344          return true;
345        }
346        Els1.push_back(i);
347      }
348    }
349
350    swap(V1, V2);
351    swap(Els1, Els2);
352    swapParity = !swapParity;
353  }
354
355  if (swapParity)
356    swap(Els1, Els2);
357  return false;
358}
359
360/// Test if the given instruction is in a position to be optimized
361/// with a tail-call. This roughly means that it's in a block with
362/// a return and there's nothing that needs to be scheduled
363/// between it and the return.
364///
365/// This function only tests target-independent requirements.
366bool llvm::isInTailCallPosition(ImmutableCallSite CS,
367                                const TargetLowering &TLI) {
368  const Instruction *I = CS.getInstruction();
369  const BasicBlock *ExitBB = I->getParent();
370  const TerminatorInst *Term = ExitBB->getTerminator();
371  const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
372
373  // The block must end in a return statement or unreachable.
374  //
375  // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
376  // an unreachable, for now. The way tailcall optimization is currently
377  // implemented means it will add an epilogue followed by a jump. That is
378  // not profitable. Also, if the callee is a special function (e.g.
379  // longjmp on x86), it can end up causing miscompilation that has not
380  // been fully understood.
381  if (!Ret &&
382      (!TLI.getTargetMachine().Options.GuaranteedTailCallOpt ||
383       !isa<UnreachableInst>(Term)))
384    return false;
385
386  // If I will have a chain, make sure no other instruction that will have a
387  // chain interposes between I and the return.
388  if (I->mayHaveSideEffects() || I->mayReadFromMemory() ||
389      !isSafeToSpeculativelyExecute(I))
390    for (BasicBlock::const_iterator BBI = prior(prior(ExitBB->end())); ;
391         --BBI) {
392      if (&*BBI == I)
393        break;
394      // Debug info intrinsics do not get in the way of tail call optimization.
395      if (isa<DbgInfoIntrinsic>(BBI))
396        continue;
397      if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
398          !isSafeToSpeculativelyExecute(BBI))
399        return false;
400    }
401
402  // If the block ends with a void return or unreachable, it doesn't matter
403  // what the call's return type is.
404  if (!Ret || Ret->getNumOperands() == 0) return true;
405
406  // If the return value is undef, it doesn't matter what the call's
407  // return type is.
408  if (isa<UndefValue>(Ret->getOperand(0))) return true;
409
410  // Conservatively require the attributes of the call to match those of
411  // the return. Ignore noalias because it doesn't affect the call sequence.
412  const Function *F = ExitBB->getParent();
413  AttributeSet CallerAttrs = F->getAttributes();
414  if (AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex).
415        removeAttribute(Attribute::NoAlias) !=
416      AttrBuilder(CallerAttrs, AttributeSet::ReturnIndex).
417        removeAttribute(Attribute::NoAlias))
418    return false;
419
420  // It's not safe to eliminate the sign / zero extension of the return value.
421  if (CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::ZExt) ||
422      CallerAttrs.hasAttribute(AttributeSet::ReturnIndex, Attribute::SExt))
423    return false;
424
425  // Otherwise, make sure the return value and I have the same value
426  SmallVector<unsigned, 4> Els1, Els2;
427  return sameNoopInput(Ret->getOperand(0), I, Els1, Els2, TLI);
428}
429