BasicTargetTransformInfo.cpp revision 263508
1//===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9/// \file
10/// This file provides the implementation of a basic TargetTransformInfo pass
11/// predicated on the target abstractions present in the target independent
12/// code generator. It uses these (primarily TargetLowering) to model as much
13/// of the TTI query interface as possible. It is included by most targets so
14/// that they can specialize only a small subset of the query space.
15///
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "basictti"
19#include "llvm/CodeGen/Passes.h"
20#include "llvm/Analysis/TargetTransformInfo.h"
21#include "llvm/Target/TargetLowering.h"
22#include <utility>
23
24using namespace llvm;
25
26namespace {
27
28class BasicTTI : public ImmutablePass, public TargetTransformInfo {
29  const TargetMachine *TM;
30
31  /// Estimate the overhead of scalarizing an instruction. Insert and Extract
32  /// are set if the result needs to be inserted and/or extracted from vectors.
33  unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
34
35  const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); }
36
37public:
38  BasicTTI() : ImmutablePass(ID), TM(0) {
39    llvm_unreachable("This pass cannot be directly constructed");
40  }
41
42  BasicTTI(const TargetMachine *TM) : ImmutablePass(ID), TM(TM) {
43    initializeBasicTTIPass(*PassRegistry::getPassRegistry());
44  }
45
46  virtual void initializePass() {
47    pushTTIStack(this);
48  }
49
50  virtual void finalizePass() {
51    popTTIStack();
52  }
53
54  virtual void getAnalysisUsage(AnalysisUsage &AU) const {
55    TargetTransformInfo::getAnalysisUsage(AU);
56  }
57
58  /// Pass identification.
59  static char ID;
60
61  /// Provide necessary pointer adjustments for the two base classes.
62  virtual void *getAdjustedAnalysisPointer(const void *ID) {
63    if (ID == &TargetTransformInfo::ID)
64      return (TargetTransformInfo*)this;
65    return this;
66  }
67
68  virtual bool hasBranchDivergence() const;
69
70  /// \name Scalar TTI Implementations
71  /// @{
72
73  virtual bool isLegalAddImmediate(int64_t imm) const;
74  virtual bool isLegalICmpImmediate(int64_t imm) const;
75  virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
76                                     int64_t BaseOffset, bool HasBaseReg,
77                                     int64_t Scale) const;
78  virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
79                                   int64_t BaseOffset, bool HasBaseReg,
80                                   int64_t Scale) const;
81  virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
82  virtual bool isTypeLegal(Type *Ty) const;
83  virtual unsigned getJumpBufAlignment() const;
84  virtual unsigned getJumpBufSize() const;
85  virtual bool shouldBuildLookupTables() const;
86  virtual bool haveFastSqrt(Type *Ty) const;
87  virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
88
89  /// @}
90
91  /// \name Vector TTI Implementations
92  /// @{
93
94  virtual unsigned getNumberOfRegisters(bool Vector) const;
95  virtual unsigned getMaximumUnrollFactor() const;
96  virtual unsigned getRegisterBitWidth(bool Vector) const;
97  virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
98                                          OperandValueKind,
99                                          OperandValueKind) const;
100  virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
101                                  int Index, Type *SubTp) const;
102  virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
103                                    Type *Src) const;
104  virtual unsigned getCFInstrCost(unsigned Opcode) const;
105  virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
106                                      Type *CondTy) const;
107  virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
108                                      unsigned Index) const;
109  virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
110                                   unsigned Alignment,
111                                   unsigned AddressSpace) const;
112  virtual unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
113                                         ArrayRef<Type*> Tys) const;
114  virtual unsigned getNumberOfParts(Type *Tp) const;
115  virtual unsigned getAddressComputationCost(Type *Ty, bool IsComplex) const;
116  virtual unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) const;
117
118  /// @}
119};
120
121}
122
123INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti",
124                   "Target independent code generator's TTI", true, true, false)
125char BasicTTI::ID = 0;
126
127ImmutablePass *
128llvm::createBasicTargetTransformInfoPass(const TargetMachine *TM) {
129  return new BasicTTI(TM);
130}
131
132bool BasicTTI::hasBranchDivergence() const { return false; }
133
134bool BasicTTI::isLegalAddImmediate(int64_t imm) const {
135  return getTLI()->isLegalAddImmediate(imm);
136}
137
138bool BasicTTI::isLegalICmpImmediate(int64_t imm) const {
139  return getTLI()->isLegalICmpImmediate(imm);
140}
141
142bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
143                                     int64_t BaseOffset, bool HasBaseReg,
144                                     int64_t Scale) const {
145  TargetLoweringBase::AddrMode AM;
146  AM.BaseGV = BaseGV;
147  AM.BaseOffs = BaseOffset;
148  AM.HasBaseReg = HasBaseReg;
149  AM.Scale = Scale;
150  return getTLI()->isLegalAddressingMode(AM, Ty);
151}
152
153int BasicTTI::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
154                                   int64_t BaseOffset, bool HasBaseReg,
155                                   int64_t Scale) const {
156  TargetLoweringBase::AddrMode AM;
157  AM.BaseGV = BaseGV;
158  AM.BaseOffs = BaseOffset;
159  AM.HasBaseReg = HasBaseReg;
160  AM.Scale = Scale;
161  return getTLI()->getScalingFactorCost(AM, Ty);
162}
163
164bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const {
165  return getTLI()->isTruncateFree(Ty1, Ty2);
166}
167
168bool BasicTTI::isTypeLegal(Type *Ty) const {
169  EVT T = getTLI()->getValueType(Ty);
170  return getTLI()->isTypeLegal(T);
171}
172
173unsigned BasicTTI::getJumpBufAlignment() const {
174  return getTLI()->getJumpBufAlignment();
175}
176
177unsigned BasicTTI::getJumpBufSize() const {
178  return getTLI()->getJumpBufSize();
179}
180
181bool BasicTTI::shouldBuildLookupTables() const {
182  const TargetLoweringBase *TLI = getTLI();
183  return TLI->supportJumpTables() &&
184      (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
185       TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
186}
187
188bool BasicTTI::haveFastSqrt(Type *Ty) const {
189  const TargetLoweringBase *TLI = getTLI();
190  EVT VT = TLI->getValueType(Ty);
191  return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
192}
193
194void BasicTTI::getUnrollingPreferences(Loop *, UnrollingPreferences &) const { }
195
196//===----------------------------------------------------------------------===//
197//
198// Calls used by the vectorizers.
199//
200//===----------------------------------------------------------------------===//
201
202unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert,
203                                            bool Extract) const {
204  assert (Ty->isVectorTy() && "Can only scalarize vectors");
205  unsigned Cost = 0;
206
207  for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
208    if (Insert)
209      Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
210    if (Extract)
211      Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
212  }
213
214  return Cost;
215}
216
217unsigned BasicTTI::getNumberOfRegisters(bool Vector) const {
218  return 1;
219}
220
221unsigned BasicTTI::getRegisterBitWidth(bool Vector) const {
222  return 32;
223}
224
225unsigned BasicTTI::getMaximumUnrollFactor() const {
226  return 1;
227}
228
229unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
230                                          OperandValueKind,
231                                          OperandValueKind) const {
232  // Check if any of the operands are vector operands.
233  const TargetLoweringBase *TLI = getTLI();
234  int ISD = TLI->InstructionOpcodeToISD(Opcode);
235  assert(ISD && "Invalid opcode");
236
237  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
238
239  bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
240  // Assume that floating point arithmetic operations cost twice as much as
241  // integer operations.
242  unsigned OpCost = (IsFloat ? 2 : 1);
243
244  if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
245    // The operation is legal. Assume it costs 1.
246    // If the type is split to multiple registers, assume that there is some
247    // overhead to this.
248    // TODO: Once we have extract/insert subvector cost we need to use them.
249    if (LT.first > 1)
250      return LT.first * 2 * OpCost;
251    return LT.first * 1 * OpCost;
252  }
253
254  if (!TLI->isOperationExpand(ISD, LT.second)) {
255    // If the operation is custom lowered then assume
256    // thare the code is twice as expensive.
257    return LT.first * 2 * OpCost;
258  }
259
260  // Else, assume that we need to scalarize this op.
261  if (Ty->isVectorTy()) {
262    unsigned Num = Ty->getVectorNumElements();
263    unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType());
264    // return the cost of multiple scalar invocation plus the cost of inserting
265    // and extracting the values.
266    return getScalarizationOverhead(Ty, true, true) + Num * Cost;
267  }
268
269  // We don't know anything about this scalar instruction.
270  return OpCost;
271}
272
273unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
274                                  Type *SubTp) const {
275  return 1;
276}
277
278unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
279                                    Type *Src) const {
280  const TargetLoweringBase *TLI = getTLI();
281  int ISD = TLI->InstructionOpcodeToISD(Opcode);
282  assert(ISD && "Invalid opcode");
283
284  std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
285  std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
286
287  // Check for NOOP conversions.
288  if (SrcLT.first == DstLT.first &&
289      SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
290
291      // Bitcast between types that are legalized to the same type are free.
292      if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
293        return 0;
294  }
295
296  if (Opcode == Instruction::Trunc &&
297      TLI->isTruncateFree(SrcLT.second, DstLT.second))
298    return 0;
299
300  if (Opcode == Instruction::ZExt &&
301      TLI->isZExtFree(SrcLT.second, DstLT.second))
302    return 0;
303
304  // If the cast is marked as legal (or promote) then assume low cost.
305  if (TLI->isOperationLegalOrPromote(ISD, DstLT.second))
306    return 1;
307
308  // Handle scalar conversions.
309  if (!Src->isVectorTy() && !Dst->isVectorTy()) {
310
311    // Scalar bitcasts are usually free.
312    if (Opcode == Instruction::BitCast)
313      return 0;
314
315    // Just check the op cost. If the operation is legal then assume it costs 1.
316    if (!TLI->isOperationExpand(ISD, DstLT.second))
317      return  1;
318
319    // Assume that illegal scalar instruction are expensive.
320    return 4;
321  }
322
323  // Check vector-to-vector casts.
324  if (Dst->isVectorTy() && Src->isVectorTy()) {
325
326    // If the cast is between same-sized registers, then the check is simple.
327    if (SrcLT.first == DstLT.first &&
328        SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
329
330      // Assume that Zext is done using AND.
331      if (Opcode == Instruction::ZExt)
332        return 1;
333
334      // Assume that sext is done using SHL and SRA.
335      if (Opcode == Instruction::SExt)
336        return 2;
337
338      // Just check the op cost. If the operation is legal then assume it costs
339      // 1 and multiply by the type-legalization overhead.
340      if (!TLI->isOperationExpand(ISD, DstLT.second))
341        return SrcLT.first * 1;
342    }
343
344    // If we are converting vectors and the operation is illegal, or
345    // if the vectors are legalized to different types, estimate the
346    // scalarization costs.
347    unsigned Num = Dst->getVectorNumElements();
348    unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(),
349                                             Src->getScalarType());
350
351    // Return the cost of multiple scalar invocation plus the cost of
352    // inserting and extracting the values.
353    return getScalarizationOverhead(Dst, true, true) + Num * Cost;
354  }
355
356  // We already handled vector-to-vector and scalar-to-scalar conversions. This
357  // is where we handle bitcast between vectors and scalars. We need to assume
358  //  that the conversion is scalarized in one way or another.
359  if (Opcode == Instruction::BitCast)
360    // Illegal bitcasts are done by storing and loading from a stack slot.
361    return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) +
362           (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0);
363
364  llvm_unreachable("Unhandled cast");
365 }
366
367unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const {
368  // Branches are assumed to be predicted.
369  return 0;
370}
371
372unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
373                                      Type *CondTy) const {
374  const TargetLoweringBase *TLI = getTLI();
375  int ISD = TLI->InstructionOpcodeToISD(Opcode);
376  assert(ISD && "Invalid opcode");
377
378  // Selects on vectors are actually vector selects.
379  if (ISD == ISD::SELECT) {
380    assert(CondTy && "CondTy must exist");
381    if (CondTy->isVectorTy())
382      ISD = ISD::VSELECT;
383  }
384
385  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
386
387  if (!TLI->isOperationExpand(ISD, LT.second)) {
388    // The operation is legal. Assume it costs 1. Multiply
389    // by the type-legalization overhead.
390    return LT.first * 1;
391  }
392
393  // Otherwise, assume that the cast is scalarized.
394  if (ValTy->isVectorTy()) {
395    unsigned Num = ValTy->getVectorNumElements();
396    if (CondTy)
397      CondTy = CondTy->getScalarType();
398    unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
399                                               CondTy);
400
401    // Return the cost of multiple scalar invocation plus the cost of inserting
402    // and extracting the values.
403    return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
404  }
405
406  // Unknown scalar opcode.
407  return 1;
408}
409
410unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
411                                      unsigned Index) const {
412  return 1;
413}
414
415unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
416                                   unsigned Alignment,
417                                   unsigned AddressSpace) const {
418  assert(!Src->isVoidTy() && "Invalid type");
419  std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
420
421  // Assume that all loads of legal types cost 1.
422  return LT.first;
423}
424
425unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
426                                         ArrayRef<Type *> Tys) const {
427  unsigned ISD = 0;
428  switch (IID) {
429  default: {
430    // Assume that we need to scalarize this intrinsic.
431    unsigned ScalarizationCost = 0;
432    unsigned ScalarCalls = 1;
433    if (RetTy->isVectorTy()) {
434      ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
435      ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
436    }
437    for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
438      if (Tys[i]->isVectorTy()) {
439        ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
440        ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
441      }
442    }
443
444    return ScalarCalls + ScalarizationCost;
445  }
446  // Look for intrinsics that can be lowered directly or turned into a scalar
447  // intrinsic call.
448  case Intrinsic::sqrt:    ISD = ISD::FSQRT;  break;
449  case Intrinsic::sin:     ISD = ISD::FSIN;   break;
450  case Intrinsic::cos:     ISD = ISD::FCOS;   break;
451  case Intrinsic::exp:     ISD = ISD::FEXP;   break;
452  case Intrinsic::exp2:    ISD = ISD::FEXP2;  break;
453  case Intrinsic::log:     ISD = ISD::FLOG;   break;
454  case Intrinsic::log10:   ISD = ISD::FLOG10; break;
455  case Intrinsic::log2:    ISD = ISD::FLOG2;  break;
456  case Intrinsic::fabs:    ISD = ISD::FABS;   break;
457  case Intrinsic::copysign: ISD = ISD::FCOPYSIGN; break;
458  case Intrinsic::floor:   ISD = ISD::FFLOOR; break;
459  case Intrinsic::ceil:    ISD = ISD::FCEIL;  break;
460  case Intrinsic::trunc:   ISD = ISD::FTRUNC; break;
461  case Intrinsic::nearbyint:
462                           ISD = ISD::FNEARBYINT; break;
463  case Intrinsic::rint:    ISD = ISD::FRINT;  break;
464  case Intrinsic::round:   ISD = ISD::FROUND; break;
465  case Intrinsic::pow:     ISD = ISD::FPOW;   break;
466  case Intrinsic::fma:     ISD = ISD::FMA;    break;
467  case Intrinsic::fmuladd: ISD = ISD::FMA;    break; // FIXME: mul + add?
468  case Intrinsic::lifetime_start:
469  case Intrinsic::lifetime_end:
470    return 0;
471  }
472
473  const TargetLoweringBase *TLI = getTLI();
474  std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
475
476  if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
477    // The operation is legal. Assume it costs 1.
478    // If the type is split to multiple registers, assume that thre is some
479    // overhead to this.
480    // TODO: Once we have extract/insert subvector cost we need to use them.
481    if (LT.first > 1)
482      return LT.first * 2;
483    return LT.first * 1;
484  }
485
486  if (!TLI->isOperationExpand(ISD, LT.second)) {
487    // If the operation is custom lowered then assume
488    // thare the code is twice as expensive.
489    return LT.first * 2;
490  }
491
492  // Else, assume that we need to scalarize this intrinsic. For math builtins
493  // this will emit a costly libcall, adding call overhead and spills. Make it
494  // very expensive.
495  if (RetTy->isVectorTy()) {
496    unsigned Num = RetTy->getVectorNumElements();
497    unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(),
498                                                  Tys);
499    return 10 * Cost * Num;
500  }
501
502  // This is going to be turned into a library call, make it expensive.
503  return 10;
504}
505
506unsigned BasicTTI::getNumberOfParts(Type *Tp) const {
507  std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
508  return LT.first;
509}
510
511unsigned BasicTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
512  return 0;
513}
514
515unsigned BasicTTI::getReductionCost(unsigned Opcode, Type *Ty,
516                                    bool IsPairwise) const {
517  assert(Ty->isVectorTy() && "Expect a vector type");
518  unsigned NumVecElts = Ty->getVectorNumElements();
519  unsigned NumReduxLevels = Log2_32(NumVecElts);
520  unsigned ArithCost = NumReduxLevels *
521    TopTTI->getArithmeticInstrCost(Opcode, Ty);
522  // Assume the pairwise shuffles add a cost.
523  unsigned ShuffleCost =
524      NumReduxLevels * (IsPairwise + 1) *
525      TopTTI->getShuffleCost(SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
526  return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
527}
528