1//===- CodeGenPrepare.cpp - Prepare a function for code generation --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass munges the code in the input function to better prepare it for
10// SelectionDAG-based code generation. This works around limitations in it's
11// basic-block-at-a-time approach. It should eventually be removed.
12//
13//===----------------------------------------------------------------------===//
14
15#include "llvm/CodeGen/CodeGenPrepare.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/MapVector.h"
20#include "llvm/ADT/PointerIntPair.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/BlockFrequencyInfo.h"
26#include "llvm/Analysis/BranchProbabilityInfo.h"
27#include "llvm/Analysis/InstructionSimplify.h"
28#include "llvm/Analysis/LoopInfo.h"
29#include "llvm/Analysis/ProfileSummaryInfo.h"
30#include "llvm/Analysis/TargetLibraryInfo.h"
31#include "llvm/Analysis/TargetTransformInfo.h"
32#include "llvm/Analysis/ValueTracking.h"
33#include "llvm/Analysis/VectorUtils.h"
34#include "llvm/CodeGen/Analysis.h"
35#include "llvm/CodeGen/BasicBlockSectionsProfileReader.h"
36#include "llvm/CodeGen/ISDOpcodes.h"
37#include "llvm/CodeGen/MachineValueType.h"
38#include "llvm/CodeGen/SelectionDAGNodes.h"
39#include "llvm/CodeGen/TargetLowering.h"
40#include "llvm/CodeGen/TargetPassConfig.h"
41#include "llvm/CodeGen/TargetSubtargetInfo.h"
42#include "llvm/CodeGen/ValueTypes.h"
43#include "llvm/Config/llvm-config.h"
44#include "llvm/IR/Argument.h"
45#include "llvm/IR/Attributes.h"
46#include "llvm/IR/BasicBlock.h"
47#include "llvm/IR/Constant.h"
48#include "llvm/IR/Constants.h"
49#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/DebugInfo.h"
51#include "llvm/IR/DerivedTypes.h"
52#include "llvm/IR/Dominators.h"
53#include "llvm/IR/Function.h"
54#include "llvm/IR/GetElementPtrTypeIterator.h"
55#include "llvm/IR/GlobalValue.h"
56#include "llvm/IR/GlobalVariable.h"
57#include "llvm/IR/IRBuilder.h"
58#include "llvm/IR/InlineAsm.h"
59#include "llvm/IR/InstrTypes.h"
60#include "llvm/IR/Instruction.h"
61#include "llvm/IR/Instructions.h"
62#include "llvm/IR/IntrinsicInst.h"
63#include "llvm/IR/Intrinsics.h"
64#include "llvm/IR/IntrinsicsAArch64.h"
65#include "llvm/IR/LLVMContext.h"
66#include "llvm/IR/MDBuilder.h"
67#include "llvm/IR/Module.h"
68#include "llvm/IR/Operator.h"
69#include "llvm/IR/PatternMatch.h"
70#include "llvm/IR/ProfDataUtils.h"
71#include "llvm/IR/Statepoint.h"
72#include "llvm/IR/Type.h"
73#include "llvm/IR/Use.h"
74#include "llvm/IR/User.h"
75#include "llvm/IR/Value.h"
76#include "llvm/IR/ValueHandle.h"
77#include "llvm/IR/ValueMap.h"
78#include "llvm/InitializePasses.h"
79#include "llvm/Pass.h"
80#include "llvm/Support/BlockFrequency.h"
81#include "llvm/Support/BranchProbability.h"
82#include "llvm/Support/Casting.h"
83#include "llvm/Support/CommandLine.h"
84#include "llvm/Support/Compiler.h"
85#include "llvm/Support/Debug.h"
86#include "llvm/Support/ErrorHandling.h"
87#include "llvm/Support/MathExtras.h"
88#include "llvm/Support/raw_ostream.h"
89#include "llvm/Target/TargetMachine.h"
90#include "llvm/Target/TargetOptions.h"
91#include "llvm/Transforms/Utils/BasicBlockUtils.h"
92#include "llvm/Transforms/Utils/BypassSlowDivision.h"
93#include "llvm/Transforms/Utils/Local.h"
94#include "llvm/Transforms/Utils/SimplifyLibCalls.h"
95#include "llvm/Transforms/Utils/SizeOpts.h"
96#include <algorithm>
97#include <cassert>
98#include <cstdint>
99#include <iterator>
100#include <limits>
101#include <memory>
102#include <optional>
103#include <utility>
104#include <vector>
105
106using namespace llvm;
107using namespace llvm::PatternMatch;
108
109#define DEBUG_TYPE "codegenprepare"
110
111STATISTIC(NumBlocksElim, "Number of blocks eliminated");
112STATISTIC(NumPHIsElim, "Number of trivial PHIs eliminated");
113STATISTIC(NumGEPsElim, "Number of GEPs converted to casts");
114STATISTIC(NumCmpUses, "Number of uses of Cmp expressions replaced with uses of "
115                      "sunken Cmps");
116STATISTIC(NumCastUses, "Number of uses of Cast expressions replaced with uses "
117                       "of sunken Casts");
118STATISTIC(NumMemoryInsts, "Number of memory instructions whose address "
119                          "computations were sunk");
120STATISTIC(NumMemoryInstsPhiCreated,
121          "Number of phis created when address "
122          "computations were sunk to memory instructions");
123STATISTIC(NumMemoryInstsSelectCreated,
124          "Number of select created when address "
125          "computations were sunk to memory instructions");
126STATISTIC(NumExtsMoved, "Number of [s|z]ext instructions combined with loads");
127STATISTIC(NumExtUses, "Number of uses of [s|z]ext instructions optimized");
128STATISTIC(NumAndsAdded,
129          "Number of and mask instructions added to form ext loads");
130STATISTIC(NumAndUses, "Number of uses of and mask instructions optimized");
131STATISTIC(NumRetsDup, "Number of return instructions duplicated");
132STATISTIC(NumDbgValueMoved, "Number of debug value instructions moved");
133STATISTIC(NumSelectsExpanded, "Number of selects turned into branches");
134STATISTIC(NumStoreExtractExposed, "Number of store(extractelement) exposed");
135
136static cl::opt<bool> DisableBranchOpts(
137    "disable-cgp-branch-opts", cl::Hidden, cl::init(false),
138    cl::desc("Disable branch optimizations in CodeGenPrepare"));
139
140static cl::opt<bool>
141    DisableGCOpts("disable-cgp-gc-opts", cl::Hidden, cl::init(false),
142                  cl::desc("Disable GC optimizations in CodeGenPrepare"));
143
144static cl::opt<bool>
145    DisableSelectToBranch("disable-cgp-select2branch", cl::Hidden,
146                          cl::init(false),
147                          cl::desc("Disable select to branch conversion."));
148
149static cl::opt<bool>
150    AddrSinkUsingGEPs("addr-sink-using-gep", cl::Hidden, cl::init(true),
151                      cl::desc("Address sinking in CGP using GEPs."));
152
153static cl::opt<bool>
154    EnableAndCmpSinking("enable-andcmp-sinking", cl::Hidden, cl::init(true),
155                        cl::desc("Enable sinkinig and/cmp into branches."));
156
157static cl::opt<bool> DisableStoreExtract(
158    "disable-cgp-store-extract", cl::Hidden, cl::init(false),
159    cl::desc("Disable store(extract) optimizations in CodeGenPrepare"));
160
161static cl::opt<bool> StressStoreExtract(
162    "stress-cgp-store-extract", cl::Hidden, cl::init(false),
163    cl::desc("Stress test store(extract) optimizations in CodeGenPrepare"));
164
165static cl::opt<bool> DisableExtLdPromotion(
166    "disable-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
167    cl::desc("Disable ext(promotable(ld)) -> promoted(ext(ld)) optimization in "
168             "CodeGenPrepare"));
169
170static cl::opt<bool> StressExtLdPromotion(
171    "stress-cgp-ext-ld-promotion", cl::Hidden, cl::init(false),
172    cl::desc("Stress test ext(promotable(ld)) -> promoted(ext(ld)) "
173             "optimization in CodeGenPrepare"));
174
175static cl::opt<bool> DisablePreheaderProtect(
176    "disable-preheader-prot", cl::Hidden, cl::init(false),
177    cl::desc("Disable protection against removing loop preheaders"));
178
179static cl::opt<bool> ProfileGuidedSectionPrefix(
180    "profile-guided-section-prefix", cl::Hidden, cl::init(true),
181    cl::desc("Use profile info to add section prefix for hot/cold functions"));
182
183static cl::opt<bool> ProfileUnknownInSpecialSection(
184    "profile-unknown-in-special-section", cl::Hidden,
185    cl::desc("In profiling mode like sampleFDO, if a function doesn't have "
186             "profile, we cannot tell the function is cold for sure because "
187             "it may be a function newly added without ever being sampled. "
188             "With the flag enabled, compiler can put such profile unknown "
189             "functions into a special section, so runtime system can choose "
190             "to handle it in a different way than .text section, to save "
191             "RAM for example. "));
192
193static cl::opt<bool> BBSectionsGuidedSectionPrefix(
194    "bbsections-guided-section-prefix", cl::Hidden, cl::init(true),
195    cl::desc("Use the basic-block-sections profile to determine the text "
196             "section prefix for hot functions. Functions with "
197             "basic-block-sections profile will be placed in `.text.hot` "
198             "regardless of their FDO profile info. Other functions won't be "
199             "impacted, i.e., their prefixes will be decided by FDO/sampleFDO "
200             "profiles."));
201
202static cl::opt<uint64_t> FreqRatioToSkipMerge(
203    "cgp-freq-ratio-to-skip-merge", cl::Hidden, cl::init(2),
204    cl::desc("Skip merging empty blocks if (frequency of empty block) / "
205             "(frequency of destination block) is greater than this ratio"));
206
207static cl::opt<bool> ForceSplitStore(
208    "force-split-store", cl::Hidden, cl::init(false),
209    cl::desc("Force store splitting no matter what the target query says."));
210
211static cl::opt<bool> EnableTypePromotionMerge(
212    "cgp-type-promotion-merge", cl::Hidden,
213    cl::desc("Enable merging of redundant sexts when one is dominating"
214             " the other."),
215    cl::init(true));
216
217static cl::opt<bool> DisableComplexAddrModes(
218    "disable-complex-addr-modes", cl::Hidden, cl::init(false),
219    cl::desc("Disables combining addressing modes with different parts "
220             "in optimizeMemoryInst."));
221
222static cl::opt<bool>
223    AddrSinkNewPhis("addr-sink-new-phis", cl::Hidden, cl::init(false),
224                    cl::desc("Allow creation of Phis in Address sinking."));
225
226static cl::opt<bool> AddrSinkNewSelects(
227    "addr-sink-new-select", cl::Hidden, cl::init(true),
228    cl::desc("Allow creation of selects in Address sinking."));
229
230static cl::opt<bool> AddrSinkCombineBaseReg(
231    "addr-sink-combine-base-reg", cl::Hidden, cl::init(true),
232    cl::desc("Allow combining of BaseReg field in Address sinking."));
233
234static cl::opt<bool> AddrSinkCombineBaseGV(
235    "addr-sink-combine-base-gv", cl::Hidden, cl::init(true),
236    cl::desc("Allow combining of BaseGV field in Address sinking."));
237
238static cl::opt<bool> AddrSinkCombineBaseOffs(
239    "addr-sink-combine-base-offs", cl::Hidden, cl::init(true),
240    cl::desc("Allow combining of BaseOffs field in Address sinking."));
241
242static cl::opt<bool> AddrSinkCombineScaledReg(
243    "addr-sink-combine-scaled-reg", cl::Hidden, cl::init(true),
244    cl::desc("Allow combining of ScaledReg field in Address sinking."));
245
246static cl::opt<bool>
247    EnableGEPOffsetSplit("cgp-split-large-offset-gep", cl::Hidden,
248                         cl::init(true),
249                         cl::desc("Enable splitting large offset of GEP."));
250
251static cl::opt<bool> EnableICMP_EQToICMP_ST(
252    "cgp-icmp-eq2icmp-st", cl::Hidden, cl::init(false),
253    cl::desc("Enable ICMP_EQ to ICMP_S(L|G)T conversion."));
254
255static cl::opt<bool>
256    VerifyBFIUpdates("cgp-verify-bfi-updates", cl::Hidden, cl::init(false),
257                     cl::desc("Enable BFI update verification for "
258                              "CodeGenPrepare."));
259
260static cl::opt<bool>
261    OptimizePhiTypes("cgp-optimize-phi-types", cl::Hidden, cl::init(true),
262                     cl::desc("Enable converting phi types in CodeGenPrepare"));
263
264static cl::opt<unsigned>
265    HugeFuncThresholdInCGPP("cgpp-huge-func", cl::init(10000), cl::Hidden,
266                            cl::desc("Least BB number of huge function."));
267
268static cl::opt<unsigned>
269    MaxAddressUsersToScan("cgp-max-address-users-to-scan", cl::init(100),
270                          cl::Hidden,
271                          cl::desc("Max number of address users to look at"));
272
273static cl::opt<bool>
274    DisableDeletePHIs("disable-cgp-delete-phis", cl::Hidden, cl::init(false),
275                      cl::desc("Disable elimination of dead PHI nodes."));
276
277namespace {
278
279enum ExtType {
280  ZeroExtension, // Zero extension has been seen.
281  SignExtension, // Sign extension has been seen.
282  BothExtension  // This extension type is used if we saw sext after
283                 // ZeroExtension had been set, or if we saw zext after
284                 // SignExtension had been set. It makes the type
285                 // information of a promoted instruction invalid.
286};
287
288enum ModifyDT {
289  NotModifyDT, // Not Modify any DT.
290  ModifyBBDT,  // Modify the Basic Block Dominator Tree.
291  ModifyInstDT // Modify the Instruction Dominator in a Basic Block,
292               // This usually means we move/delete/insert instruction
293               // in a Basic Block. So we should re-iterate instructions
294               // in such Basic Block.
295};
296
297using SetOfInstrs = SmallPtrSet<Instruction *, 16>;
298using TypeIsSExt = PointerIntPair<Type *, 2, ExtType>;
299using InstrToOrigTy = DenseMap<Instruction *, TypeIsSExt>;
300using SExts = SmallVector<Instruction *, 16>;
301using ValueToSExts = MapVector<Value *, SExts>;
302
303class TypePromotionTransaction;
304
305class CodeGenPrepare {
306  friend class CodeGenPrepareLegacyPass;
307  const TargetMachine *TM = nullptr;
308  const TargetSubtargetInfo *SubtargetInfo = nullptr;
309  const TargetLowering *TLI = nullptr;
310  const TargetRegisterInfo *TRI = nullptr;
311  const TargetTransformInfo *TTI = nullptr;
312  const BasicBlockSectionsProfileReader *BBSectionsProfileReader = nullptr;
313  const TargetLibraryInfo *TLInfo = nullptr;
314  LoopInfo *LI = nullptr;
315  std::unique_ptr<BlockFrequencyInfo> BFI;
316  std::unique_ptr<BranchProbabilityInfo> BPI;
317  ProfileSummaryInfo *PSI = nullptr;
318
319  /// As we scan instructions optimizing them, this is the next instruction
320  /// to optimize. Transforms that can invalidate this should update it.
321  BasicBlock::iterator CurInstIterator;
322
323  /// Keeps track of non-local addresses that have been sunk into a block.
324  /// This allows us to avoid inserting duplicate code for blocks with
325  /// multiple load/stores of the same address. The usage of WeakTrackingVH
326  /// enables SunkAddrs to be treated as a cache whose entries can be
327  /// invalidated if a sunken address computation has been erased.
328  ValueMap<Value *, WeakTrackingVH> SunkAddrs;
329
330  /// Keeps track of all instructions inserted for the current function.
331  SetOfInstrs InsertedInsts;
332
333  /// Keeps track of the type of the related instruction before their
334  /// promotion for the current function.
335  InstrToOrigTy PromotedInsts;
336
337  /// Keep track of instructions removed during promotion.
338  SetOfInstrs RemovedInsts;
339
340  /// Keep track of sext chains based on their initial value.
341  DenseMap<Value *, Instruction *> SeenChainsForSExt;
342
343  /// Keep track of GEPs accessing the same data structures such as structs or
344  /// arrays that are candidates to be split later because of their large
345  /// size.
346  MapVector<AssertingVH<Value>,
347            SmallVector<std::pair<AssertingVH<GetElementPtrInst>, int64_t>, 32>>
348      LargeOffsetGEPMap;
349
350  /// Keep track of new GEP base after splitting the GEPs having large offset.
351  SmallSet<AssertingVH<Value>, 2> NewGEPBases;
352
353  /// Map serial numbers to Large offset GEPs.
354  DenseMap<AssertingVH<GetElementPtrInst>, int> LargeOffsetGEPID;
355
356  /// Keep track of SExt promoted.
357  ValueToSExts ValToSExtendedUses;
358
359  /// True if the function has the OptSize attribute.
360  bool OptSize;
361
362  /// DataLayout for the Function being processed.
363  const DataLayout *DL = nullptr;
364
365  /// Building the dominator tree can be expensive, so we only build it
366  /// lazily and update it when required.
367  std::unique_ptr<DominatorTree> DT;
368
369public:
370  CodeGenPrepare(){};
371  CodeGenPrepare(const TargetMachine *TM) : TM(TM){};
372  /// If encounter huge function, we need to limit the build time.
373  bool IsHugeFunc = false;
374
375  /// FreshBBs is like worklist, it collected the updated BBs which need
376  /// to be optimized again.
377  /// Note: Consider building time in this pass, when a BB updated, we need
378  /// to insert such BB into FreshBBs for huge function.
379  SmallSet<BasicBlock *, 32> FreshBBs;
380
381  void releaseMemory() {
382    // Clear per function information.
383    InsertedInsts.clear();
384    PromotedInsts.clear();
385    FreshBBs.clear();
386    BPI.reset();
387    BFI.reset();
388  }
389
390  bool run(Function &F, FunctionAnalysisManager &AM);
391
392private:
393  template <typename F>
394  void resetIteratorIfInvalidatedWhileCalling(BasicBlock *BB, F f) {
395    // Substituting can cause recursive simplifications, which can invalidate
396    // our iterator.  Use a WeakTrackingVH to hold onto it in case this
397    // happens.
398    Value *CurValue = &*CurInstIterator;
399    WeakTrackingVH IterHandle(CurValue);
400
401    f();
402
403    // If the iterator instruction was recursively deleted, start over at the
404    // start of the block.
405    if (IterHandle != CurValue) {
406      CurInstIterator = BB->begin();
407      SunkAddrs.clear();
408    }
409  }
410
411  // Get the DominatorTree, building if necessary.
412  DominatorTree &getDT(Function &F) {
413    if (!DT)
414      DT = std::make_unique<DominatorTree>(F);
415    return *DT;
416  }
417
418  void removeAllAssertingVHReferences(Value *V);
419  bool eliminateAssumptions(Function &F);
420  bool eliminateFallThrough(Function &F, DominatorTree *DT = nullptr);
421  bool eliminateMostlyEmptyBlocks(Function &F);
422  BasicBlock *findDestBlockOfMergeableEmptyBlock(BasicBlock *BB);
423  bool canMergeBlocks(const BasicBlock *BB, const BasicBlock *DestBB) const;
424  void eliminateMostlyEmptyBlock(BasicBlock *BB);
425  bool isMergingEmptyBlockProfitable(BasicBlock *BB, BasicBlock *DestBB,
426                                     bool isPreheader);
427  bool makeBitReverse(Instruction &I);
428  bool optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT);
429  bool optimizeInst(Instruction *I, ModifyDT &ModifiedDT);
430  bool optimizeMemoryInst(Instruction *MemoryInst, Value *Addr, Type *AccessTy,
431                          unsigned AddrSpace);
432  bool optimizeGatherScatterInst(Instruction *MemoryInst, Value *Ptr);
433  bool optimizeInlineAsmInst(CallInst *CS);
434  bool optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT);
435  bool optimizeExt(Instruction *&I);
436  bool optimizeExtUses(Instruction *I);
437  bool optimizeLoadExt(LoadInst *Load);
438  bool optimizeShiftInst(BinaryOperator *BO);
439  bool optimizeFunnelShift(IntrinsicInst *Fsh);
440  bool optimizeSelectInst(SelectInst *SI);
441  bool optimizeShuffleVectorInst(ShuffleVectorInst *SVI);
442  bool optimizeSwitchType(SwitchInst *SI);
443  bool optimizeSwitchPhiConstants(SwitchInst *SI);
444  bool optimizeSwitchInst(SwitchInst *SI);
445  bool optimizeExtractElementInst(Instruction *Inst);
446  bool dupRetToEnableTailCallOpts(BasicBlock *BB, ModifyDT &ModifiedDT);
447  bool fixupDbgValue(Instruction *I);
448  bool fixupDPValue(DPValue &I);
449  bool fixupDPValuesOnInst(Instruction &I);
450  bool placeDbgValues(Function &F);
451  bool placePseudoProbes(Function &F);
452  bool canFormExtLd(const SmallVectorImpl<Instruction *> &MovedExts,
453                    LoadInst *&LI, Instruction *&Inst, bool HasPromoted);
454  bool tryToPromoteExts(TypePromotionTransaction &TPT,
455                        const SmallVectorImpl<Instruction *> &Exts,
456                        SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
457                        unsigned CreatedInstsCost = 0);
458  bool mergeSExts(Function &F);
459  bool splitLargeGEPOffsets();
460  bool optimizePhiType(PHINode *Inst, SmallPtrSetImpl<PHINode *> &Visited,
461                       SmallPtrSetImpl<Instruction *> &DeletedInstrs);
462  bool optimizePhiTypes(Function &F);
463  bool performAddressTypePromotion(
464      Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
465      bool HasPromoted, TypePromotionTransaction &TPT,
466      SmallVectorImpl<Instruction *> &SpeculativelyMovedExts);
467  bool splitBranchCondition(Function &F, ModifyDT &ModifiedDT);
468  bool simplifyOffsetableRelocate(GCStatepointInst &I);
469
470  bool tryToSinkFreeOperands(Instruction *I);
471  bool replaceMathCmpWithIntrinsic(BinaryOperator *BO, Value *Arg0, Value *Arg1,
472                                   CmpInst *Cmp, Intrinsic::ID IID);
473  bool optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT);
474  bool combineToUSubWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
475  bool combineToUAddWithOverflow(CmpInst *Cmp, ModifyDT &ModifiedDT);
476  void verifyBFIUpdates(Function &F);
477  bool _run(Function &F);
478};
479
480class CodeGenPrepareLegacyPass : public FunctionPass {
481public:
482  static char ID; // Pass identification, replacement for typeid
483
484  CodeGenPrepareLegacyPass() : FunctionPass(ID) {
485    initializeCodeGenPrepareLegacyPassPass(*PassRegistry::getPassRegistry());
486  }
487
488  bool runOnFunction(Function &F) override;
489
490  StringRef getPassName() const override { return "CodeGen Prepare"; }
491
492  void getAnalysisUsage(AnalysisUsage &AU) const override {
493    // FIXME: When we can selectively preserve passes, preserve the domtree.
494    AU.addRequired<ProfileSummaryInfoWrapperPass>();
495    AU.addRequired<TargetLibraryInfoWrapperPass>();
496    AU.addRequired<TargetPassConfig>();
497    AU.addRequired<TargetTransformInfoWrapperPass>();
498    AU.addRequired<LoopInfoWrapperPass>();
499    AU.addUsedIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
500  }
501};
502
503} // end anonymous namespace
504
505char CodeGenPrepareLegacyPass::ID = 0;
506
507bool CodeGenPrepareLegacyPass::runOnFunction(Function &F) {
508  if (skipFunction(F))
509    return false;
510  auto TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
511  CodeGenPrepare CGP(TM);
512  CGP.DL = &F.getParent()->getDataLayout();
513  CGP.SubtargetInfo = TM->getSubtargetImpl(F);
514  CGP.TLI = CGP.SubtargetInfo->getTargetLowering();
515  CGP.TRI = CGP.SubtargetInfo->getRegisterInfo();
516  CGP.TLInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
517  CGP.TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
518  CGP.LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
519  CGP.BPI.reset(new BranchProbabilityInfo(F, *CGP.LI));
520  CGP.BFI.reset(new BlockFrequencyInfo(F, *CGP.BPI, *CGP.LI));
521  CGP.PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
522  auto BBSPRWP =
523      getAnalysisIfAvailable<BasicBlockSectionsProfileReaderWrapperPass>();
524  CGP.BBSectionsProfileReader = BBSPRWP ? &BBSPRWP->getBBSPR() : nullptr;
525
526  return CGP._run(F);
527}
528
529INITIALIZE_PASS_BEGIN(CodeGenPrepareLegacyPass, DEBUG_TYPE,
530                      "Optimize for code generation", false, false)
531INITIALIZE_PASS_DEPENDENCY(BasicBlockSectionsProfileReaderWrapperPass)
532INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
533INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
534INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
535INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
536INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
537INITIALIZE_PASS_END(CodeGenPrepareLegacyPass, DEBUG_TYPE,
538                    "Optimize for code generation", false, false)
539
540FunctionPass *llvm::createCodeGenPrepareLegacyPass() {
541  return new CodeGenPrepareLegacyPass();
542}
543
544PreservedAnalyses CodeGenPreparePass::run(Function &F,
545                                          FunctionAnalysisManager &AM) {
546  CodeGenPrepare CGP(TM);
547
548  bool Changed = CGP.run(F, AM);
549  if (!Changed)
550    return PreservedAnalyses::all();
551
552  PreservedAnalyses PA;
553  PA.preserve<TargetLibraryAnalysis>();
554  PA.preserve<TargetIRAnalysis>();
555  PA.preserve<LoopAnalysis>();
556  return PA;
557}
558
559bool CodeGenPrepare::run(Function &F, FunctionAnalysisManager &AM) {
560  DL = &F.getParent()->getDataLayout();
561  SubtargetInfo = TM->getSubtargetImpl(F);
562  TLI = SubtargetInfo->getTargetLowering();
563  TRI = SubtargetInfo->getRegisterInfo();
564  TLInfo = &AM.getResult<TargetLibraryAnalysis>(F);
565  TTI = &AM.getResult<TargetIRAnalysis>(F);
566  LI = &AM.getResult<LoopAnalysis>(F);
567  BPI.reset(new BranchProbabilityInfo(F, *LI));
568  BFI.reset(new BlockFrequencyInfo(F, *BPI, *LI));
569  auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
570  PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
571  BBSectionsProfileReader =
572      AM.getCachedResult<BasicBlockSectionsProfileReaderAnalysis>(F);
573  return _run(F);
574}
575
576bool CodeGenPrepare::_run(Function &F) {
577  bool EverMadeChange = false;
578
579  OptSize = F.hasOptSize();
580  // Use the basic-block-sections profile to promote hot functions to .text.hot
581  // if requested.
582  if (BBSectionsGuidedSectionPrefix && BBSectionsProfileReader &&
583      BBSectionsProfileReader->isFunctionHot(F.getName())) {
584    F.setSectionPrefix("hot");
585  } else if (ProfileGuidedSectionPrefix) {
586    // The hot attribute overwrites profile count based hotness while profile
587    // counts based hotness overwrite the cold attribute.
588    // This is a conservative behabvior.
589    if (F.hasFnAttribute(Attribute::Hot) ||
590        PSI->isFunctionHotInCallGraph(&F, *BFI))
591      F.setSectionPrefix("hot");
592    // If PSI shows this function is not hot, we will placed the function
593    // into unlikely section if (1) PSI shows this is a cold function, or
594    // (2) the function has a attribute of cold.
595    else if (PSI->isFunctionColdInCallGraph(&F, *BFI) ||
596             F.hasFnAttribute(Attribute::Cold))
597      F.setSectionPrefix("unlikely");
598    else if (ProfileUnknownInSpecialSection && PSI->hasPartialSampleProfile() &&
599             PSI->isFunctionHotnessUnknown(F))
600      F.setSectionPrefix("unknown");
601  }
602
603  /// This optimization identifies DIV instructions that can be
604  /// profitably bypassed and carried out with a shorter, faster divide.
605  if (!OptSize && !PSI->hasHugeWorkingSetSize() && TLI->isSlowDivBypassed()) {
606    const DenseMap<unsigned int, unsigned int> &BypassWidths =
607        TLI->getBypassSlowDivWidths();
608    BasicBlock *BB = &*F.begin();
609    while (BB != nullptr) {
610      // bypassSlowDivision may create new BBs, but we don't want to reapply the
611      // optimization to those blocks.
612      BasicBlock *Next = BB->getNextNode();
613      // F.hasOptSize is already checked in the outer if statement.
614      if (!llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
615        EverMadeChange |= bypassSlowDivision(BB, BypassWidths);
616      BB = Next;
617    }
618  }
619
620  // Get rid of @llvm.assume builtins before attempting to eliminate empty
621  // blocks, since there might be blocks that only contain @llvm.assume calls
622  // (plus arguments that we can get rid of).
623  EverMadeChange |= eliminateAssumptions(F);
624
625  // Eliminate blocks that contain only PHI nodes and an
626  // unconditional branch.
627  EverMadeChange |= eliminateMostlyEmptyBlocks(F);
628
629  ModifyDT ModifiedDT = ModifyDT::NotModifyDT;
630  if (!DisableBranchOpts)
631    EverMadeChange |= splitBranchCondition(F, ModifiedDT);
632
633  // Split some critical edges where one of the sources is an indirect branch,
634  // to help generate sane code for PHIs involving such edges.
635  EverMadeChange |=
636      SplitIndirectBrCriticalEdges(F, /*IgnoreBlocksWithoutPHI=*/true);
637
638  // If we are optimzing huge function, we need to consider the build time.
639  // Because the basic algorithm's complex is near O(N!).
640  IsHugeFunc = F.size() > HugeFuncThresholdInCGPP;
641
642  // Transformations above may invalidate dominator tree and/or loop info.
643  DT.reset();
644  LI->releaseMemory();
645  LI->analyze(getDT(F));
646
647  bool MadeChange = true;
648  bool FuncIterated = false;
649  while (MadeChange) {
650    MadeChange = false;
651
652    for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
653      if (FuncIterated && !FreshBBs.contains(&BB))
654        continue;
655
656      ModifyDT ModifiedDTOnIteration = ModifyDT::NotModifyDT;
657      bool Changed = optimizeBlock(BB, ModifiedDTOnIteration);
658
659      if (ModifiedDTOnIteration == ModifyDT::ModifyBBDT)
660        DT.reset();
661
662      MadeChange |= Changed;
663      if (IsHugeFunc) {
664        // If the BB is updated, it may still has chance to be optimized.
665        // This usually happen at sink optimization.
666        // For example:
667        //
668        // bb0���
669        // %and = and i32 %a, 4
670        // %cmp = icmp eq i32 %and, 0
671        //
672        // If the %cmp sink to other BB, the %and will has chance to sink.
673        if (Changed)
674          FreshBBs.insert(&BB);
675        else if (FuncIterated)
676          FreshBBs.erase(&BB);
677      } else {
678        // For small/normal functions, we restart BB iteration if the dominator
679        // tree of the Function was changed.
680        if (ModifiedDTOnIteration != ModifyDT::NotModifyDT)
681          break;
682      }
683    }
684    // We have iterated all the BB in the (only work for huge) function.
685    FuncIterated = IsHugeFunc;
686
687    if (EnableTypePromotionMerge && !ValToSExtendedUses.empty())
688      MadeChange |= mergeSExts(F);
689    if (!LargeOffsetGEPMap.empty())
690      MadeChange |= splitLargeGEPOffsets();
691    MadeChange |= optimizePhiTypes(F);
692
693    if (MadeChange)
694      eliminateFallThrough(F, DT.get());
695
696#ifndef NDEBUG
697    if (MadeChange && VerifyLoopInfo)
698      LI->verify(getDT(F));
699#endif
700
701    // Really free removed instructions during promotion.
702    for (Instruction *I : RemovedInsts)
703      I->deleteValue();
704
705    EverMadeChange |= MadeChange;
706    SeenChainsForSExt.clear();
707    ValToSExtendedUses.clear();
708    RemovedInsts.clear();
709    LargeOffsetGEPMap.clear();
710    LargeOffsetGEPID.clear();
711  }
712
713  NewGEPBases.clear();
714  SunkAddrs.clear();
715
716  if (!DisableBranchOpts) {
717    MadeChange = false;
718    // Use a set vector to get deterministic iteration order. The order the
719    // blocks are removed may affect whether or not PHI nodes in successors
720    // are removed.
721    SmallSetVector<BasicBlock *, 8> WorkList;
722    for (BasicBlock &BB : F) {
723      SmallVector<BasicBlock *, 2> Successors(successors(&BB));
724      MadeChange |= ConstantFoldTerminator(&BB, true);
725      if (!MadeChange)
726        continue;
727
728      for (BasicBlock *Succ : Successors)
729        if (pred_empty(Succ))
730          WorkList.insert(Succ);
731    }
732
733    // Delete the dead blocks and any of their dead successors.
734    MadeChange |= !WorkList.empty();
735    while (!WorkList.empty()) {
736      BasicBlock *BB = WorkList.pop_back_val();
737      SmallVector<BasicBlock *, 2> Successors(successors(BB));
738
739      DeleteDeadBlock(BB);
740
741      for (BasicBlock *Succ : Successors)
742        if (pred_empty(Succ))
743          WorkList.insert(Succ);
744    }
745
746    // Merge pairs of basic blocks with unconditional branches, connected by
747    // a single edge.
748    if (EverMadeChange || MadeChange)
749      MadeChange |= eliminateFallThrough(F);
750
751    EverMadeChange |= MadeChange;
752  }
753
754  if (!DisableGCOpts) {
755    SmallVector<GCStatepointInst *, 2> Statepoints;
756    for (BasicBlock &BB : F)
757      for (Instruction &I : BB)
758        if (auto *SP = dyn_cast<GCStatepointInst>(&I))
759          Statepoints.push_back(SP);
760    for (auto &I : Statepoints)
761      EverMadeChange |= simplifyOffsetableRelocate(*I);
762  }
763
764  // Do this last to clean up use-before-def scenarios introduced by other
765  // preparatory transforms.
766  EverMadeChange |= placeDbgValues(F);
767  EverMadeChange |= placePseudoProbes(F);
768
769#ifndef NDEBUG
770  if (VerifyBFIUpdates)
771    verifyBFIUpdates(F);
772#endif
773
774  return EverMadeChange;
775}
776
777bool CodeGenPrepare::eliminateAssumptions(Function &F) {
778  bool MadeChange = false;
779  for (BasicBlock &BB : F) {
780    CurInstIterator = BB.begin();
781    while (CurInstIterator != BB.end()) {
782      Instruction *I = &*(CurInstIterator++);
783      if (auto *Assume = dyn_cast<AssumeInst>(I)) {
784        MadeChange = true;
785        Value *Operand = Assume->getOperand(0);
786        Assume->eraseFromParent();
787
788        resetIteratorIfInvalidatedWhileCalling(&BB, [&]() {
789          RecursivelyDeleteTriviallyDeadInstructions(Operand, TLInfo, nullptr);
790        });
791      }
792    }
793  }
794  return MadeChange;
795}
796
797/// An instruction is about to be deleted, so remove all references to it in our
798/// GEP-tracking data strcutures.
799void CodeGenPrepare::removeAllAssertingVHReferences(Value *V) {
800  LargeOffsetGEPMap.erase(V);
801  NewGEPBases.erase(V);
802
803  auto GEP = dyn_cast<GetElementPtrInst>(V);
804  if (!GEP)
805    return;
806
807  LargeOffsetGEPID.erase(GEP);
808
809  auto VecI = LargeOffsetGEPMap.find(GEP->getPointerOperand());
810  if (VecI == LargeOffsetGEPMap.end())
811    return;
812
813  auto &GEPVector = VecI->second;
814  llvm::erase_if(GEPVector, [=](auto &Elt) { return Elt.first == GEP; });
815
816  if (GEPVector.empty())
817    LargeOffsetGEPMap.erase(VecI);
818}
819
820// Verify BFI has been updated correctly by recomputing BFI and comparing them.
821void LLVM_ATTRIBUTE_UNUSED CodeGenPrepare::verifyBFIUpdates(Function &F) {
822  DominatorTree NewDT(F);
823  LoopInfo NewLI(NewDT);
824  BranchProbabilityInfo NewBPI(F, NewLI, TLInfo);
825  BlockFrequencyInfo NewBFI(F, NewBPI, NewLI);
826  NewBFI.verifyMatch(*BFI);
827}
828
829/// Merge basic blocks which are connected by a single edge, where one of the
830/// basic blocks has a single successor pointing to the other basic block,
831/// which has a single predecessor.
832bool CodeGenPrepare::eliminateFallThrough(Function &F, DominatorTree *DT) {
833  bool Changed = false;
834  // Scan all of the blocks in the function, except for the entry block.
835  // Use a temporary array to avoid iterator being invalidated when
836  // deleting blocks.
837  SmallVector<WeakTrackingVH, 16> Blocks;
838  for (auto &Block : llvm::drop_begin(F))
839    Blocks.push_back(&Block);
840
841  SmallSet<WeakTrackingVH, 16> Preds;
842  for (auto &Block : Blocks) {
843    auto *BB = cast_or_null<BasicBlock>(Block);
844    if (!BB)
845      continue;
846    // If the destination block has a single pred, then this is a trivial
847    // edge, just collapse it.
848    BasicBlock *SinglePred = BB->getSinglePredecessor();
849
850    // Don't merge if BB's address is taken.
851    if (!SinglePred || SinglePred == BB || BB->hasAddressTaken())
852      continue;
853
854    // Make an effort to skip unreachable blocks.
855    if (DT && !DT->isReachableFromEntry(BB))
856      continue;
857
858    BranchInst *Term = dyn_cast<BranchInst>(SinglePred->getTerminator());
859    if (Term && !Term->isConditional()) {
860      Changed = true;
861      LLVM_DEBUG(dbgs() << "To merge:\n" << *BB << "\n\n\n");
862
863      // Merge BB into SinglePred and delete it.
864      MergeBlockIntoPredecessor(BB, /* DTU */ nullptr, LI, /* MSSAU */ nullptr,
865                                /* MemDep */ nullptr,
866                                /* PredecessorWithTwoSuccessors */ false, DT);
867      Preds.insert(SinglePred);
868
869      if (IsHugeFunc) {
870        // Update FreshBBs to optimize the merged BB.
871        FreshBBs.insert(SinglePred);
872        FreshBBs.erase(BB);
873      }
874    }
875  }
876
877  // (Repeatedly) merging blocks into their predecessors can create redundant
878  // debug intrinsics.
879  for (const auto &Pred : Preds)
880    if (auto *BB = cast_or_null<BasicBlock>(Pred))
881      RemoveRedundantDbgInstrs(BB);
882
883  return Changed;
884}
885
886/// Find a destination block from BB if BB is mergeable empty block.
887BasicBlock *CodeGenPrepare::findDestBlockOfMergeableEmptyBlock(BasicBlock *BB) {
888  // If this block doesn't end with an uncond branch, ignore it.
889  BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator());
890  if (!BI || !BI->isUnconditional())
891    return nullptr;
892
893  // If the instruction before the branch (skipping debug info) isn't a phi
894  // node, then other stuff is happening here.
895  BasicBlock::iterator BBI = BI->getIterator();
896  if (BBI != BB->begin()) {
897    --BBI;
898    while (isa<DbgInfoIntrinsic>(BBI)) {
899      if (BBI == BB->begin())
900        break;
901      --BBI;
902    }
903    if (!isa<DbgInfoIntrinsic>(BBI) && !isa<PHINode>(BBI))
904      return nullptr;
905  }
906
907  // Do not break infinite loops.
908  BasicBlock *DestBB = BI->getSuccessor(0);
909  if (DestBB == BB)
910    return nullptr;
911
912  if (!canMergeBlocks(BB, DestBB))
913    DestBB = nullptr;
914
915  return DestBB;
916}
917
918/// Eliminate blocks that contain only PHI nodes, debug info directives, and an
919/// unconditional branch. Passes before isel (e.g. LSR/loopsimplify) often split
920/// edges in ways that are non-optimal for isel. Start by eliminating these
921/// blocks so we can split them the way we want them.
922bool CodeGenPrepare::eliminateMostlyEmptyBlocks(Function &F) {
923  SmallPtrSet<BasicBlock *, 16> Preheaders;
924  SmallVector<Loop *, 16> LoopList(LI->begin(), LI->end());
925  while (!LoopList.empty()) {
926    Loop *L = LoopList.pop_back_val();
927    llvm::append_range(LoopList, *L);
928    if (BasicBlock *Preheader = L->getLoopPreheader())
929      Preheaders.insert(Preheader);
930  }
931
932  bool MadeChange = false;
933  // Copy blocks into a temporary array to avoid iterator invalidation issues
934  // as we remove them.
935  // Note that this intentionally skips the entry block.
936  SmallVector<WeakTrackingVH, 16> Blocks;
937  for (auto &Block : llvm::drop_begin(F)) {
938    // Delete phi nodes that could block deleting other empty blocks.
939    if (!DisableDeletePHIs)
940      MadeChange |= DeleteDeadPHIs(&Block, TLInfo);
941    Blocks.push_back(&Block);
942  }
943
944  for (auto &Block : Blocks) {
945    BasicBlock *BB = cast_or_null<BasicBlock>(Block);
946    if (!BB)
947      continue;
948    BasicBlock *DestBB = findDestBlockOfMergeableEmptyBlock(BB);
949    if (!DestBB ||
950        !isMergingEmptyBlockProfitable(BB, DestBB, Preheaders.count(BB)))
951      continue;
952
953    eliminateMostlyEmptyBlock(BB);
954    MadeChange = true;
955  }
956  return MadeChange;
957}
958
959bool CodeGenPrepare::isMergingEmptyBlockProfitable(BasicBlock *BB,
960                                                   BasicBlock *DestBB,
961                                                   bool isPreheader) {
962  // Do not delete loop preheaders if doing so would create a critical edge.
963  // Loop preheaders can be good locations to spill registers. If the
964  // preheader is deleted and we create a critical edge, registers may be
965  // spilled in the loop body instead.
966  if (!DisablePreheaderProtect && isPreheader &&
967      !(BB->getSinglePredecessor() &&
968        BB->getSinglePredecessor()->getSingleSuccessor()))
969    return false;
970
971  // Skip merging if the block's successor is also a successor to any callbr
972  // that leads to this block.
973  // FIXME: Is this really needed? Is this a correctness issue?
974  for (BasicBlock *Pred : predecessors(BB)) {
975    if (auto *CBI = dyn_cast<CallBrInst>((Pred)->getTerminator()))
976      for (unsigned i = 0, e = CBI->getNumSuccessors(); i != e; ++i)
977        if (DestBB == CBI->getSuccessor(i))
978          return false;
979  }
980
981  // Try to skip merging if the unique predecessor of BB is terminated by a
982  // switch or indirect branch instruction, and BB is used as an incoming block
983  // of PHIs in DestBB. In such case, merging BB and DestBB would cause ISel to
984  // add COPY instructions in the predecessor of BB instead of BB (if it is not
985  // merged). Note that the critical edge created by merging such blocks wont be
986  // split in MachineSink because the jump table is not analyzable. By keeping
987  // such empty block (BB), ISel will place COPY instructions in BB, not in the
988  // predecessor of BB.
989  BasicBlock *Pred = BB->getUniquePredecessor();
990  if (!Pred || !(isa<SwitchInst>(Pred->getTerminator()) ||
991                 isa<IndirectBrInst>(Pred->getTerminator())))
992    return true;
993
994  if (BB->getTerminator() != BB->getFirstNonPHIOrDbg())
995    return true;
996
997  // We use a simple cost heuristic which determine skipping merging is
998  // profitable if the cost of skipping merging is less than the cost of
999  // merging : Cost(skipping merging) < Cost(merging BB), where the
1000  // Cost(skipping merging) is Freq(BB) * (Cost(Copy) + Cost(Branch)), and
1001  // the Cost(merging BB) is Freq(Pred) * Cost(Copy).
1002  // Assuming Cost(Copy) == Cost(Branch), we could simplify it to :
1003  //   Freq(Pred) / Freq(BB) > 2.
1004  // Note that if there are multiple empty blocks sharing the same incoming
1005  // value for the PHIs in the DestBB, we consider them together. In such
1006  // case, Cost(merging BB) will be the sum of their frequencies.
1007
1008  if (!isa<PHINode>(DestBB->begin()))
1009    return true;
1010
1011  SmallPtrSet<BasicBlock *, 16> SameIncomingValueBBs;
1012
1013  // Find all other incoming blocks from which incoming values of all PHIs in
1014  // DestBB are the same as the ones from BB.
1015  for (BasicBlock *DestBBPred : predecessors(DestBB)) {
1016    if (DestBBPred == BB)
1017      continue;
1018
1019    if (llvm::all_of(DestBB->phis(), [&](const PHINode &DestPN) {
1020          return DestPN.getIncomingValueForBlock(BB) ==
1021                 DestPN.getIncomingValueForBlock(DestBBPred);
1022        }))
1023      SameIncomingValueBBs.insert(DestBBPred);
1024  }
1025
1026  // See if all BB's incoming values are same as the value from Pred. In this
1027  // case, no reason to skip merging because COPYs are expected to be place in
1028  // Pred already.
1029  if (SameIncomingValueBBs.count(Pred))
1030    return true;
1031
1032  BlockFrequency PredFreq = BFI->getBlockFreq(Pred);
1033  BlockFrequency BBFreq = BFI->getBlockFreq(BB);
1034
1035  for (auto *SameValueBB : SameIncomingValueBBs)
1036    if (SameValueBB->getUniquePredecessor() == Pred &&
1037        DestBB == findDestBlockOfMergeableEmptyBlock(SameValueBB))
1038      BBFreq += BFI->getBlockFreq(SameValueBB);
1039
1040  std::optional<BlockFrequency> Limit = BBFreq.mul(FreqRatioToSkipMerge);
1041  return !Limit || PredFreq <= *Limit;
1042}
1043
1044/// Return true if we can merge BB into DestBB if there is a single
1045/// unconditional branch between them, and BB contains no other non-phi
1046/// instructions.
1047bool CodeGenPrepare::canMergeBlocks(const BasicBlock *BB,
1048                                    const BasicBlock *DestBB) const {
1049  // We only want to eliminate blocks whose phi nodes are used by phi nodes in
1050  // the successor.  If there are more complex condition (e.g. preheaders),
1051  // don't mess around with them.
1052  for (const PHINode &PN : BB->phis()) {
1053    for (const User *U : PN.users()) {
1054      const Instruction *UI = cast<Instruction>(U);
1055      if (UI->getParent() != DestBB || !isa<PHINode>(UI))
1056        return false;
1057      // If User is inside DestBB block and it is a PHINode then check
1058      // incoming value. If incoming value is not from BB then this is
1059      // a complex condition (e.g. preheaders) we want to avoid here.
1060      if (UI->getParent() == DestBB) {
1061        if (const PHINode *UPN = dyn_cast<PHINode>(UI))
1062          for (unsigned I = 0, E = UPN->getNumIncomingValues(); I != E; ++I) {
1063            Instruction *Insn = dyn_cast<Instruction>(UPN->getIncomingValue(I));
1064            if (Insn && Insn->getParent() == BB &&
1065                Insn->getParent() != UPN->getIncomingBlock(I))
1066              return false;
1067          }
1068      }
1069    }
1070  }
1071
1072  // If BB and DestBB contain any common predecessors, then the phi nodes in BB
1073  // and DestBB may have conflicting incoming values for the block.  If so, we
1074  // can't merge the block.
1075  const PHINode *DestBBPN = dyn_cast<PHINode>(DestBB->begin());
1076  if (!DestBBPN)
1077    return true; // no conflict.
1078
1079  // Collect the preds of BB.
1080  SmallPtrSet<const BasicBlock *, 16> BBPreds;
1081  if (const PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1082    // It is faster to get preds from a PHI than with pred_iterator.
1083    for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1084      BBPreds.insert(BBPN->getIncomingBlock(i));
1085  } else {
1086    BBPreds.insert(pred_begin(BB), pred_end(BB));
1087  }
1088
1089  // Walk the preds of DestBB.
1090  for (unsigned i = 0, e = DestBBPN->getNumIncomingValues(); i != e; ++i) {
1091    BasicBlock *Pred = DestBBPN->getIncomingBlock(i);
1092    if (BBPreds.count(Pred)) { // Common predecessor?
1093      for (const PHINode &PN : DestBB->phis()) {
1094        const Value *V1 = PN.getIncomingValueForBlock(Pred);
1095        const Value *V2 = PN.getIncomingValueForBlock(BB);
1096
1097        // If V2 is a phi node in BB, look up what the mapped value will be.
1098        if (const PHINode *V2PN = dyn_cast<PHINode>(V2))
1099          if (V2PN->getParent() == BB)
1100            V2 = V2PN->getIncomingValueForBlock(Pred);
1101
1102        // If there is a conflict, bail out.
1103        if (V1 != V2)
1104          return false;
1105      }
1106    }
1107  }
1108
1109  return true;
1110}
1111
1112/// Replace all old uses with new ones, and push the updated BBs into FreshBBs.
1113static void replaceAllUsesWith(Value *Old, Value *New,
1114                               SmallSet<BasicBlock *, 32> &FreshBBs,
1115                               bool IsHuge) {
1116  auto *OldI = dyn_cast<Instruction>(Old);
1117  if (OldI) {
1118    for (Value::user_iterator UI = OldI->user_begin(), E = OldI->user_end();
1119         UI != E; ++UI) {
1120      Instruction *User = cast<Instruction>(*UI);
1121      if (IsHuge)
1122        FreshBBs.insert(User->getParent());
1123    }
1124  }
1125  Old->replaceAllUsesWith(New);
1126}
1127
1128/// Eliminate a basic block that has only phi's and an unconditional branch in
1129/// it.
1130void CodeGenPrepare::eliminateMostlyEmptyBlock(BasicBlock *BB) {
1131  BranchInst *BI = cast<BranchInst>(BB->getTerminator());
1132  BasicBlock *DestBB = BI->getSuccessor(0);
1133
1134  LLVM_DEBUG(dbgs() << "MERGING MOSTLY EMPTY BLOCKS - BEFORE:\n"
1135                    << *BB << *DestBB);
1136
1137  // If the destination block has a single pred, then this is a trivial edge,
1138  // just collapse it.
1139  if (BasicBlock *SinglePred = DestBB->getSinglePredecessor()) {
1140    if (SinglePred != DestBB) {
1141      assert(SinglePred == BB &&
1142             "Single predecessor not the same as predecessor");
1143      // Merge DestBB into SinglePred/BB and delete it.
1144      MergeBlockIntoPredecessor(DestBB);
1145      // Note: BB(=SinglePred) will not be deleted on this path.
1146      // DestBB(=its single successor) is the one that was deleted.
1147      LLVM_DEBUG(dbgs() << "AFTER:\n" << *SinglePred << "\n\n\n");
1148
1149      if (IsHugeFunc) {
1150        // Update FreshBBs to optimize the merged BB.
1151        FreshBBs.insert(SinglePred);
1152        FreshBBs.erase(DestBB);
1153      }
1154      return;
1155    }
1156  }
1157
1158  // Otherwise, we have multiple predecessors of BB.  Update the PHIs in DestBB
1159  // to handle the new incoming edges it is about to have.
1160  for (PHINode &PN : DestBB->phis()) {
1161    // Remove the incoming value for BB, and remember it.
1162    Value *InVal = PN.removeIncomingValue(BB, false);
1163
1164    // Two options: either the InVal is a phi node defined in BB or it is some
1165    // value that dominates BB.
1166    PHINode *InValPhi = dyn_cast<PHINode>(InVal);
1167    if (InValPhi && InValPhi->getParent() == BB) {
1168      // Add all of the input values of the input PHI as inputs of this phi.
1169      for (unsigned i = 0, e = InValPhi->getNumIncomingValues(); i != e; ++i)
1170        PN.addIncoming(InValPhi->getIncomingValue(i),
1171                       InValPhi->getIncomingBlock(i));
1172    } else {
1173      // Otherwise, add one instance of the dominating value for each edge that
1174      // we will be adding.
1175      if (PHINode *BBPN = dyn_cast<PHINode>(BB->begin())) {
1176        for (unsigned i = 0, e = BBPN->getNumIncomingValues(); i != e; ++i)
1177          PN.addIncoming(InVal, BBPN->getIncomingBlock(i));
1178      } else {
1179        for (BasicBlock *Pred : predecessors(BB))
1180          PN.addIncoming(InVal, Pred);
1181      }
1182    }
1183  }
1184
1185  // The PHIs are now updated, change everything that refers to BB to use
1186  // DestBB and remove BB.
1187  BB->replaceAllUsesWith(DestBB);
1188  BB->eraseFromParent();
1189  ++NumBlocksElim;
1190
1191  LLVM_DEBUG(dbgs() << "AFTER:\n" << *DestBB << "\n\n\n");
1192}
1193
1194// Computes a map of base pointer relocation instructions to corresponding
1195// derived pointer relocation instructions given a vector of all relocate calls
1196static void computeBaseDerivedRelocateMap(
1197    const SmallVectorImpl<GCRelocateInst *> &AllRelocateCalls,
1198    DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>>
1199        &RelocateInstMap) {
1200  // Collect information in two maps: one primarily for locating the base object
1201  // while filling the second map; the second map is the final structure holding
1202  // a mapping between Base and corresponding Derived relocate calls
1203  DenseMap<std::pair<unsigned, unsigned>, GCRelocateInst *> RelocateIdxMap;
1204  for (auto *ThisRelocate : AllRelocateCalls) {
1205    auto K = std::make_pair(ThisRelocate->getBasePtrIndex(),
1206                            ThisRelocate->getDerivedPtrIndex());
1207    RelocateIdxMap.insert(std::make_pair(K, ThisRelocate));
1208  }
1209  for (auto &Item : RelocateIdxMap) {
1210    std::pair<unsigned, unsigned> Key = Item.first;
1211    if (Key.first == Key.second)
1212      // Base relocation: nothing to insert
1213      continue;
1214
1215    GCRelocateInst *I = Item.second;
1216    auto BaseKey = std::make_pair(Key.first, Key.first);
1217
1218    // We're iterating over RelocateIdxMap so we cannot modify it.
1219    auto MaybeBase = RelocateIdxMap.find(BaseKey);
1220    if (MaybeBase == RelocateIdxMap.end())
1221      // TODO: We might want to insert a new base object relocate and gep off
1222      // that, if there are enough derived object relocates.
1223      continue;
1224
1225    RelocateInstMap[MaybeBase->second].push_back(I);
1226  }
1227}
1228
1229// Accepts a GEP and extracts the operands into a vector provided they're all
1230// small integer constants
1231static bool getGEPSmallConstantIntOffsetV(GetElementPtrInst *GEP,
1232                                          SmallVectorImpl<Value *> &OffsetV) {
1233  for (unsigned i = 1; i < GEP->getNumOperands(); i++) {
1234    // Only accept small constant integer operands
1235    auto *Op = dyn_cast<ConstantInt>(GEP->getOperand(i));
1236    if (!Op || Op->getZExtValue() > 20)
1237      return false;
1238  }
1239
1240  for (unsigned i = 1; i < GEP->getNumOperands(); i++)
1241    OffsetV.push_back(GEP->getOperand(i));
1242  return true;
1243}
1244
1245// Takes a RelocatedBase (base pointer relocation instruction) and Targets to
1246// replace, computes a replacement, and affects it.
1247static bool
1248simplifyRelocatesOffABase(GCRelocateInst *RelocatedBase,
1249                          const SmallVectorImpl<GCRelocateInst *> &Targets) {
1250  bool MadeChange = false;
1251  // We must ensure the relocation of derived pointer is defined after
1252  // relocation of base pointer. If we find a relocation corresponding to base
1253  // defined earlier than relocation of base then we move relocation of base
1254  // right before found relocation. We consider only relocation in the same
1255  // basic block as relocation of base. Relocations from other basic block will
1256  // be skipped by optimization and we do not care about them.
1257  for (auto R = RelocatedBase->getParent()->getFirstInsertionPt();
1258       &*R != RelocatedBase; ++R)
1259    if (auto *RI = dyn_cast<GCRelocateInst>(R))
1260      if (RI->getStatepoint() == RelocatedBase->getStatepoint())
1261        if (RI->getBasePtrIndex() == RelocatedBase->getBasePtrIndex()) {
1262          RelocatedBase->moveBefore(RI);
1263          MadeChange = true;
1264          break;
1265        }
1266
1267  for (GCRelocateInst *ToReplace : Targets) {
1268    assert(ToReplace->getBasePtrIndex() == RelocatedBase->getBasePtrIndex() &&
1269           "Not relocating a derived object of the original base object");
1270    if (ToReplace->getBasePtrIndex() == ToReplace->getDerivedPtrIndex()) {
1271      // A duplicate relocate call. TODO: coalesce duplicates.
1272      continue;
1273    }
1274
1275    if (RelocatedBase->getParent() != ToReplace->getParent()) {
1276      // Base and derived relocates are in different basic blocks.
1277      // In this case transform is only valid when base dominates derived
1278      // relocate. However it would be too expensive to check dominance
1279      // for each such relocate, so we skip the whole transformation.
1280      continue;
1281    }
1282
1283    Value *Base = ToReplace->getBasePtr();
1284    auto *Derived = dyn_cast<GetElementPtrInst>(ToReplace->getDerivedPtr());
1285    if (!Derived || Derived->getPointerOperand() != Base)
1286      continue;
1287
1288    SmallVector<Value *, 2> OffsetV;
1289    if (!getGEPSmallConstantIntOffsetV(Derived, OffsetV))
1290      continue;
1291
1292    // Create a Builder and replace the target callsite with a gep
1293    assert(RelocatedBase->getNextNode() &&
1294           "Should always have one since it's not a terminator");
1295
1296    // Insert after RelocatedBase
1297    IRBuilder<> Builder(RelocatedBase->getNextNode());
1298    Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc());
1299
1300    // If gc_relocate does not match the actual type, cast it to the right type.
1301    // In theory, there must be a bitcast after gc_relocate if the type does not
1302    // match, and we should reuse it to get the derived pointer. But it could be
1303    // cases like this:
1304    // bb1:
1305    //  ...
1306    //  %g1 = call coldcc i8 addrspace(1)*
1307    //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1308    //
1309    // bb2:
1310    //  ...
1311    //  %g2 = call coldcc i8 addrspace(1)*
1312    //  @llvm.experimental.gc.relocate.p1i8(...) br label %merge
1313    //
1314    // merge:
1315    //  %p1 = phi i8 addrspace(1)* [ %g1, %bb1 ], [ %g2, %bb2 ]
1316    //  %cast = bitcast i8 addrspace(1)* %p1 in to i32 addrspace(1)*
1317    //
1318    // In this case, we can not find the bitcast any more. So we insert a new
1319    // bitcast no matter there is already one or not. In this way, we can handle
1320    // all cases, and the extra bitcast should be optimized away in later
1321    // passes.
1322    Value *ActualRelocatedBase = RelocatedBase;
1323    if (RelocatedBase->getType() != Base->getType()) {
1324      ActualRelocatedBase =
1325          Builder.CreateBitCast(RelocatedBase, Base->getType());
1326    }
1327    Value *Replacement =
1328        Builder.CreateGEP(Derived->getSourceElementType(), ActualRelocatedBase,
1329                          ArrayRef(OffsetV));
1330    Replacement->takeName(ToReplace);
1331    // If the newly generated derived pointer's type does not match the original
1332    // derived pointer's type, cast the new derived pointer to match it. Same
1333    // reasoning as above.
1334    Value *ActualReplacement = Replacement;
1335    if (Replacement->getType() != ToReplace->getType()) {
1336      ActualReplacement =
1337          Builder.CreateBitCast(Replacement, ToReplace->getType());
1338    }
1339    ToReplace->replaceAllUsesWith(ActualReplacement);
1340    ToReplace->eraseFromParent();
1341
1342    MadeChange = true;
1343  }
1344  return MadeChange;
1345}
1346
1347// Turns this:
1348//
1349// %base = ...
1350// %ptr = gep %base + 15
1351// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1352// %base' = relocate(%tok, i32 4, i32 4)
1353// %ptr' = relocate(%tok, i32 4, i32 5)
1354// %val = load %ptr'
1355//
1356// into this:
1357//
1358// %base = ...
1359// %ptr = gep %base + 15
1360// %tok = statepoint (%fun, i32 0, i32 0, i32 0, %base, %ptr)
1361// %base' = gc.relocate(%tok, i32 4, i32 4)
1362// %ptr' = gep %base' + 15
1363// %val = load %ptr'
1364bool CodeGenPrepare::simplifyOffsetableRelocate(GCStatepointInst &I) {
1365  bool MadeChange = false;
1366  SmallVector<GCRelocateInst *, 2> AllRelocateCalls;
1367  for (auto *U : I.users())
1368    if (GCRelocateInst *Relocate = dyn_cast<GCRelocateInst>(U))
1369      // Collect all the relocate calls associated with a statepoint
1370      AllRelocateCalls.push_back(Relocate);
1371
1372  // We need at least one base pointer relocation + one derived pointer
1373  // relocation to mangle
1374  if (AllRelocateCalls.size() < 2)
1375    return false;
1376
1377  // RelocateInstMap is a mapping from the base relocate instruction to the
1378  // corresponding derived relocate instructions
1379  DenseMap<GCRelocateInst *, SmallVector<GCRelocateInst *, 2>> RelocateInstMap;
1380  computeBaseDerivedRelocateMap(AllRelocateCalls, RelocateInstMap);
1381  if (RelocateInstMap.empty())
1382    return false;
1383
1384  for (auto &Item : RelocateInstMap)
1385    // Item.first is the RelocatedBase to offset against
1386    // Item.second is the vector of Targets to replace
1387    MadeChange = simplifyRelocatesOffABase(Item.first, Item.second);
1388  return MadeChange;
1389}
1390
1391/// Sink the specified cast instruction into its user blocks.
1392static bool SinkCast(CastInst *CI) {
1393  BasicBlock *DefBB = CI->getParent();
1394
1395  /// InsertedCasts - Only insert a cast in each block once.
1396  DenseMap<BasicBlock *, CastInst *> InsertedCasts;
1397
1398  bool MadeChange = false;
1399  for (Value::user_iterator UI = CI->user_begin(), E = CI->user_end();
1400       UI != E;) {
1401    Use &TheUse = UI.getUse();
1402    Instruction *User = cast<Instruction>(*UI);
1403
1404    // Figure out which BB this cast is used in.  For PHI's this is the
1405    // appropriate predecessor block.
1406    BasicBlock *UserBB = User->getParent();
1407    if (PHINode *PN = dyn_cast<PHINode>(User)) {
1408      UserBB = PN->getIncomingBlock(TheUse);
1409    }
1410
1411    // Preincrement use iterator so we don't invalidate it.
1412    ++UI;
1413
1414    // The first insertion point of a block containing an EH pad is after the
1415    // pad.  If the pad is the user, we cannot sink the cast past the pad.
1416    if (User->isEHPad())
1417      continue;
1418
1419    // If the block selected to receive the cast is an EH pad that does not
1420    // allow non-PHI instructions before the terminator, we can't sink the
1421    // cast.
1422    if (UserBB->getTerminator()->isEHPad())
1423      continue;
1424
1425    // If this user is in the same block as the cast, don't change the cast.
1426    if (UserBB == DefBB)
1427      continue;
1428
1429    // If we have already inserted a cast into this block, use it.
1430    CastInst *&InsertedCast = InsertedCasts[UserBB];
1431
1432    if (!InsertedCast) {
1433      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1434      assert(InsertPt != UserBB->end());
1435      InsertedCast = CastInst::Create(CI->getOpcode(), CI->getOperand(0),
1436                                      CI->getType(), "");
1437      InsertedCast->insertBefore(*UserBB, InsertPt);
1438      InsertedCast->setDebugLoc(CI->getDebugLoc());
1439    }
1440
1441    // Replace a use of the cast with a use of the new cast.
1442    TheUse = InsertedCast;
1443    MadeChange = true;
1444    ++NumCastUses;
1445  }
1446
1447  // If we removed all uses, nuke the cast.
1448  if (CI->use_empty()) {
1449    salvageDebugInfo(*CI);
1450    CI->eraseFromParent();
1451    MadeChange = true;
1452  }
1453
1454  return MadeChange;
1455}
1456
1457/// If the specified cast instruction is a noop copy (e.g. it's casting from
1458/// one pointer type to another, i32->i8 on PPC), sink it into user blocks to
1459/// reduce the number of virtual registers that must be created and coalesced.
1460///
1461/// Return true if any changes are made.
1462static bool OptimizeNoopCopyExpression(CastInst *CI, const TargetLowering &TLI,
1463                                       const DataLayout &DL) {
1464  // Sink only "cheap" (or nop) address-space casts.  This is a weaker condition
1465  // than sinking only nop casts, but is helpful on some platforms.
1466  if (auto *ASC = dyn_cast<AddrSpaceCastInst>(CI)) {
1467    if (!TLI.isFreeAddrSpaceCast(ASC->getSrcAddressSpace(),
1468                                 ASC->getDestAddressSpace()))
1469      return false;
1470  }
1471
1472  // If this is a noop copy,
1473  EVT SrcVT = TLI.getValueType(DL, CI->getOperand(0)->getType());
1474  EVT DstVT = TLI.getValueType(DL, CI->getType());
1475
1476  // This is an fp<->int conversion?
1477  if (SrcVT.isInteger() != DstVT.isInteger())
1478    return false;
1479
1480  // If this is an extension, it will be a zero or sign extension, which
1481  // isn't a noop.
1482  if (SrcVT.bitsLT(DstVT))
1483    return false;
1484
1485  // If these values will be promoted, find out what they will be promoted
1486  // to.  This helps us consider truncates on PPC as noop copies when they
1487  // are.
1488  if (TLI.getTypeAction(CI->getContext(), SrcVT) ==
1489      TargetLowering::TypePromoteInteger)
1490    SrcVT = TLI.getTypeToTransformTo(CI->getContext(), SrcVT);
1491  if (TLI.getTypeAction(CI->getContext(), DstVT) ==
1492      TargetLowering::TypePromoteInteger)
1493    DstVT = TLI.getTypeToTransformTo(CI->getContext(), DstVT);
1494
1495  // If, after promotion, these are the same types, this is a noop copy.
1496  if (SrcVT != DstVT)
1497    return false;
1498
1499  return SinkCast(CI);
1500}
1501
1502// Match a simple increment by constant operation.  Note that if a sub is
1503// matched, the step is negated (as if the step had been canonicalized to
1504// an add, even though we leave the instruction alone.)
1505bool matchIncrement(const Instruction *IVInc, Instruction *&LHS,
1506                    Constant *&Step) {
1507  if (match(IVInc, m_Add(m_Instruction(LHS), m_Constant(Step))) ||
1508      match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::uadd_with_overflow>(
1509                       m_Instruction(LHS), m_Constant(Step)))))
1510    return true;
1511  if (match(IVInc, m_Sub(m_Instruction(LHS), m_Constant(Step))) ||
1512      match(IVInc, m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
1513                       m_Instruction(LHS), m_Constant(Step))))) {
1514    Step = ConstantExpr::getNeg(Step);
1515    return true;
1516  }
1517  return false;
1518}
1519
1520/// If given \p PN is an inductive variable with value IVInc coming from the
1521/// backedge, and on each iteration it gets increased by Step, return pair
1522/// <IVInc, Step>. Otherwise, return std::nullopt.
1523static std::optional<std::pair<Instruction *, Constant *>>
1524getIVIncrement(const PHINode *PN, const LoopInfo *LI) {
1525  const Loop *L = LI->getLoopFor(PN->getParent());
1526  if (!L || L->getHeader() != PN->getParent() || !L->getLoopLatch())
1527    return std::nullopt;
1528  auto *IVInc =
1529      dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch()));
1530  if (!IVInc || LI->getLoopFor(IVInc->getParent()) != L)
1531    return std::nullopt;
1532  Instruction *LHS = nullptr;
1533  Constant *Step = nullptr;
1534  if (matchIncrement(IVInc, LHS, Step) && LHS == PN)
1535    return std::make_pair(IVInc, Step);
1536  return std::nullopt;
1537}
1538
1539static bool isIVIncrement(const Value *V, const LoopInfo *LI) {
1540  auto *I = dyn_cast<Instruction>(V);
1541  if (!I)
1542    return false;
1543  Instruction *LHS = nullptr;
1544  Constant *Step = nullptr;
1545  if (!matchIncrement(I, LHS, Step))
1546    return false;
1547  if (auto *PN = dyn_cast<PHINode>(LHS))
1548    if (auto IVInc = getIVIncrement(PN, LI))
1549      return IVInc->first == I;
1550  return false;
1551}
1552
1553bool CodeGenPrepare::replaceMathCmpWithIntrinsic(BinaryOperator *BO,
1554                                                 Value *Arg0, Value *Arg1,
1555                                                 CmpInst *Cmp,
1556                                                 Intrinsic::ID IID) {
1557  auto IsReplacableIVIncrement = [this, &Cmp](BinaryOperator *BO) {
1558    if (!isIVIncrement(BO, LI))
1559      return false;
1560    const Loop *L = LI->getLoopFor(BO->getParent());
1561    assert(L && "L should not be null after isIVIncrement()");
1562    // Do not risk on moving increment into a child loop.
1563    if (LI->getLoopFor(Cmp->getParent()) != L)
1564      return false;
1565
1566    // Finally, we need to ensure that the insert point will dominate all
1567    // existing uses of the increment.
1568
1569    auto &DT = getDT(*BO->getParent()->getParent());
1570    if (DT.dominates(Cmp->getParent(), BO->getParent()))
1571      // If we're moving up the dom tree, all uses are trivially dominated.
1572      // (This is the common case for code produced by LSR.)
1573      return true;
1574
1575    // Otherwise, special case the single use in the phi recurrence.
1576    return BO->hasOneUse() && DT.dominates(Cmp->getParent(), L->getLoopLatch());
1577  };
1578  if (BO->getParent() != Cmp->getParent() && !IsReplacableIVIncrement(BO)) {
1579    // We used to use a dominator tree here to allow multi-block optimization.
1580    // But that was problematic because:
1581    // 1. It could cause a perf regression by hoisting the math op into the
1582    //    critical path.
1583    // 2. It could cause a perf regression by creating a value that was live
1584    //    across multiple blocks and increasing register pressure.
1585    // 3. Use of a dominator tree could cause large compile-time regression.
1586    //    This is because we recompute the DT on every change in the main CGP
1587    //    run-loop. The recomputing is probably unnecessary in many cases, so if
1588    //    that was fixed, using a DT here would be ok.
1589    //
1590    // There is one important particular case we still want to handle: if BO is
1591    // the IV increment. Important properties that make it profitable:
1592    // - We can speculate IV increment anywhere in the loop (as long as the
1593    //   indvar Phi is its only user);
1594    // - Upon computing Cmp, we effectively compute something equivalent to the
1595    //   IV increment (despite it loops differently in the IR). So moving it up
1596    //   to the cmp point does not really increase register pressure.
1597    return false;
1598  }
1599
1600  // We allow matching the canonical IR (add X, C) back to (usubo X, -C).
1601  if (BO->getOpcode() == Instruction::Add &&
1602      IID == Intrinsic::usub_with_overflow) {
1603    assert(isa<Constant>(Arg1) && "Unexpected input for usubo");
1604    Arg1 = ConstantExpr::getNeg(cast<Constant>(Arg1));
1605  }
1606
1607  // Insert at the first instruction of the pair.
1608  Instruction *InsertPt = nullptr;
1609  for (Instruction &Iter : *Cmp->getParent()) {
1610    // If BO is an XOR, it is not guaranteed that it comes after both inputs to
1611    // the overflow intrinsic are defined.
1612    if ((BO->getOpcode() != Instruction::Xor && &Iter == BO) || &Iter == Cmp) {
1613      InsertPt = &Iter;
1614      break;
1615    }
1616  }
1617  assert(InsertPt != nullptr && "Parent block did not contain cmp or binop");
1618
1619  IRBuilder<> Builder(InsertPt);
1620  Value *MathOV = Builder.CreateBinaryIntrinsic(IID, Arg0, Arg1);
1621  if (BO->getOpcode() != Instruction::Xor) {
1622    Value *Math = Builder.CreateExtractValue(MathOV, 0, "math");
1623    replaceAllUsesWith(BO, Math, FreshBBs, IsHugeFunc);
1624  } else
1625    assert(BO->hasOneUse() &&
1626           "Patterns with XOr should use the BO only in the compare");
1627  Value *OV = Builder.CreateExtractValue(MathOV, 1, "ov");
1628  replaceAllUsesWith(Cmp, OV, FreshBBs, IsHugeFunc);
1629  Cmp->eraseFromParent();
1630  BO->eraseFromParent();
1631  return true;
1632}
1633
1634/// Match special-case patterns that check for unsigned add overflow.
1635static bool matchUAddWithOverflowConstantEdgeCases(CmpInst *Cmp,
1636                                                   BinaryOperator *&Add) {
1637  // Add = add A, 1; Cmp = icmp eq A,-1 (overflow if A is max val)
1638  // Add = add A,-1; Cmp = icmp ne A, 0 (overflow if A is non-zero)
1639  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1640
1641  // We are not expecting non-canonical/degenerate code. Just bail out.
1642  if (isa<Constant>(A))
1643    return false;
1644
1645  ICmpInst::Predicate Pred = Cmp->getPredicate();
1646  if (Pred == ICmpInst::ICMP_EQ && match(B, m_AllOnes()))
1647    B = ConstantInt::get(B->getType(), 1);
1648  else if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt()))
1649    B = ConstantInt::get(B->getType(), -1);
1650  else
1651    return false;
1652
1653  // Check the users of the variable operand of the compare looking for an add
1654  // with the adjusted constant.
1655  for (User *U : A->users()) {
1656    if (match(U, m_Add(m_Specific(A), m_Specific(B)))) {
1657      Add = cast<BinaryOperator>(U);
1658      return true;
1659    }
1660  }
1661  return false;
1662}
1663
1664/// Try to combine the compare into a call to the llvm.uadd.with.overflow
1665/// intrinsic. Return true if any changes were made.
1666bool CodeGenPrepare::combineToUAddWithOverflow(CmpInst *Cmp,
1667                                               ModifyDT &ModifiedDT) {
1668  bool EdgeCase = false;
1669  Value *A, *B;
1670  BinaryOperator *Add;
1671  if (!match(Cmp, m_UAddWithOverflow(m_Value(A), m_Value(B), m_BinOp(Add)))) {
1672    if (!matchUAddWithOverflowConstantEdgeCases(Cmp, Add))
1673      return false;
1674    // Set A and B in case we match matchUAddWithOverflowConstantEdgeCases.
1675    A = Add->getOperand(0);
1676    B = Add->getOperand(1);
1677    EdgeCase = true;
1678  }
1679
1680  if (!TLI->shouldFormOverflowOp(ISD::UADDO,
1681                                 TLI->getValueType(*DL, Add->getType()),
1682                                 Add->hasNUsesOrMore(EdgeCase ? 1 : 2)))
1683    return false;
1684
1685  // We don't want to move around uses of condition values this late, so we
1686  // check if it is legal to create the call to the intrinsic in the basic
1687  // block containing the icmp.
1688  if (Add->getParent() != Cmp->getParent() && !Add->hasOneUse())
1689    return false;
1690
1691  if (!replaceMathCmpWithIntrinsic(Add, A, B, Cmp,
1692                                   Intrinsic::uadd_with_overflow))
1693    return false;
1694
1695  // Reset callers - do not crash by iterating over a dead instruction.
1696  ModifiedDT = ModifyDT::ModifyInstDT;
1697  return true;
1698}
1699
1700bool CodeGenPrepare::combineToUSubWithOverflow(CmpInst *Cmp,
1701                                               ModifyDT &ModifiedDT) {
1702  // We are not expecting non-canonical/degenerate code. Just bail out.
1703  Value *A = Cmp->getOperand(0), *B = Cmp->getOperand(1);
1704  if (isa<Constant>(A) && isa<Constant>(B))
1705    return false;
1706
1707  // Convert (A u> B) to (A u< B) to simplify pattern matching.
1708  ICmpInst::Predicate Pred = Cmp->getPredicate();
1709  if (Pred == ICmpInst::ICMP_UGT) {
1710    std::swap(A, B);
1711    Pred = ICmpInst::ICMP_ULT;
1712  }
1713  // Convert special-case: (A == 0) is the same as (A u< 1).
1714  if (Pred == ICmpInst::ICMP_EQ && match(B, m_ZeroInt())) {
1715    B = ConstantInt::get(B->getType(), 1);
1716    Pred = ICmpInst::ICMP_ULT;
1717  }
1718  // Convert special-case: (A != 0) is the same as (0 u< A).
1719  if (Pred == ICmpInst::ICMP_NE && match(B, m_ZeroInt())) {
1720    std::swap(A, B);
1721    Pred = ICmpInst::ICMP_ULT;
1722  }
1723  if (Pred != ICmpInst::ICMP_ULT)
1724    return false;
1725
1726  // Walk the users of a variable operand of a compare looking for a subtract or
1727  // add with that same operand. Also match the 2nd operand of the compare to
1728  // the add/sub, but that may be a negated constant operand of an add.
1729  Value *CmpVariableOperand = isa<Constant>(A) ? B : A;
1730  BinaryOperator *Sub = nullptr;
1731  for (User *U : CmpVariableOperand->users()) {
1732    // A - B, A u< B --> usubo(A, B)
1733    if (match(U, m_Sub(m_Specific(A), m_Specific(B)))) {
1734      Sub = cast<BinaryOperator>(U);
1735      break;
1736    }
1737
1738    // A + (-C), A u< C (canonicalized form of (sub A, C))
1739    const APInt *CmpC, *AddC;
1740    if (match(U, m_Add(m_Specific(A), m_APInt(AddC))) &&
1741        match(B, m_APInt(CmpC)) && *AddC == -(*CmpC)) {
1742      Sub = cast<BinaryOperator>(U);
1743      break;
1744    }
1745  }
1746  if (!Sub)
1747    return false;
1748
1749  if (!TLI->shouldFormOverflowOp(ISD::USUBO,
1750                                 TLI->getValueType(*DL, Sub->getType()),
1751                                 Sub->hasNUsesOrMore(1)))
1752    return false;
1753
1754  if (!replaceMathCmpWithIntrinsic(Sub, Sub->getOperand(0), Sub->getOperand(1),
1755                                   Cmp, Intrinsic::usub_with_overflow))
1756    return false;
1757
1758  // Reset callers - do not crash by iterating over a dead instruction.
1759  ModifiedDT = ModifyDT::ModifyInstDT;
1760  return true;
1761}
1762
1763/// Sink the given CmpInst into user blocks to reduce the number of virtual
1764/// registers that must be created and coalesced. This is a clear win except on
1765/// targets with multiple condition code registers (PowerPC), where it might
1766/// lose; some adjustment may be wanted there.
1767///
1768/// Return true if any changes are made.
1769static bool sinkCmpExpression(CmpInst *Cmp, const TargetLowering &TLI) {
1770  if (TLI.hasMultipleConditionRegisters())
1771    return false;
1772
1773  // Avoid sinking soft-FP comparisons, since this can move them into a loop.
1774  if (TLI.useSoftFloat() && isa<FCmpInst>(Cmp))
1775    return false;
1776
1777  // Only insert a cmp in each block once.
1778  DenseMap<BasicBlock *, CmpInst *> InsertedCmps;
1779
1780  bool MadeChange = false;
1781  for (Value::user_iterator UI = Cmp->user_begin(), E = Cmp->user_end();
1782       UI != E;) {
1783    Use &TheUse = UI.getUse();
1784    Instruction *User = cast<Instruction>(*UI);
1785
1786    // Preincrement use iterator so we don't invalidate it.
1787    ++UI;
1788
1789    // Don't bother for PHI nodes.
1790    if (isa<PHINode>(User))
1791      continue;
1792
1793    // Figure out which BB this cmp is used in.
1794    BasicBlock *UserBB = User->getParent();
1795    BasicBlock *DefBB = Cmp->getParent();
1796
1797    // If this user is in the same block as the cmp, don't change the cmp.
1798    if (UserBB == DefBB)
1799      continue;
1800
1801    // If we have already inserted a cmp into this block, use it.
1802    CmpInst *&InsertedCmp = InsertedCmps[UserBB];
1803
1804    if (!InsertedCmp) {
1805      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
1806      assert(InsertPt != UserBB->end());
1807      InsertedCmp = CmpInst::Create(Cmp->getOpcode(), Cmp->getPredicate(),
1808                                    Cmp->getOperand(0), Cmp->getOperand(1), "");
1809      InsertedCmp->insertBefore(*UserBB, InsertPt);
1810      // Propagate the debug info.
1811      InsertedCmp->setDebugLoc(Cmp->getDebugLoc());
1812    }
1813
1814    // Replace a use of the cmp with a use of the new cmp.
1815    TheUse = InsertedCmp;
1816    MadeChange = true;
1817    ++NumCmpUses;
1818  }
1819
1820  // If we removed all uses, nuke the cmp.
1821  if (Cmp->use_empty()) {
1822    Cmp->eraseFromParent();
1823    MadeChange = true;
1824  }
1825
1826  return MadeChange;
1827}
1828
1829/// For pattern like:
1830///
1831///   DomCond = icmp sgt/slt CmpOp0, CmpOp1 (might not be in DomBB)
1832///   ...
1833/// DomBB:
1834///   ...
1835///   br DomCond, TrueBB, CmpBB
1836/// CmpBB: (with DomBB being the single predecessor)
1837///   ...
1838///   Cmp = icmp eq CmpOp0, CmpOp1
1839///   ...
1840///
1841/// It would use two comparison on targets that lowering of icmp sgt/slt is
1842/// different from lowering of icmp eq (PowerPC). This function try to convert
1843/// 'Cmp = icmp eq CmpOp0, CmpOp1' to ' Cmp = icmp slt/sgt CmpOp0, CmpOp1'.
1844/// After that, DomCond and Cmp can use the same comparison so reduce one
1845/// comparison.
1846///
1847/// Return true if any changes are made.
1848static bool foldICmpWithDominatingICmp(CmpInst *Cmp,
1849                                       const TargetLowering &TLI) {
1850  if (!EnableICMP_EQToICMP_ST && TLI.isEqualityCmpFoldedWithSignedCmp())
1851    return false;
1852
1853  ICmpInst::Predicate Pred = Cmp->getPredicate();
1854  if (Pred != ICmpInst::ICMP_EQ)
1855    return false;
1856
1857  // If icmp eq has users other than BranchInst and SelectInst, converting it to
1858  // icmp slt/sgt would introduce more redundant LLVM IR.
1859  for (User *U : Cmp->users()) {
1860    if (isa<BranchInst>(U))
1861      continue;
1862    if (isa<SelectInst>(U) && cast<SelectInst>(U)->getCondition() == Cmp)
1863      continue;
1864    return false;
1865  }
1866
1867  // This is a cheap/incomplete check for dominance - just match a single
1868  // predecessor with a conditional branch.
1869  BasicBlock *CmpBB = Cmp->getParent();
1870  BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1871  if (!DomBB)
1872    return false;
1873
1874  // We want to ensure that the only way control gets to the comparison of
1875  // interest is that a less/greater than comparison on the same operands is
1876  // false.
1877  Value *DomCond;
1878  BasicBlock *TrueBB, *FalseBB;
1879  if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1880    return false;
1881  if (CmpBB != FalseBB)
1882    return false;
1883
1884  Value *CmpOp0 = Cmp->getOperand(0), *CmpOp1 = Cmp->getOperand(1);
1885  ICmpInst::Predicate DomPred;
1886  if (!match(DomCond, m_ICmp(DomPred, m_Specific(CmpOp0), m_Specific(CmpOp1))))
1887    return false;
1888  if (DomPred != ICmpInst::ICMP_SGT && DomPred != ICmpInst::ICMP_SLT)
1889    return false;
1890
1891  // Convert the equality comparison to the opposite of the dominating
1892  // comparison and swap the direction for all branch/select users.
1893  // We have conceptually converted:
1894  // Res = (a < b) ? <LT_RES> : (a == b) ? <EQ_RES> : <GT_RES>;
1895  // to
1896  // Res = (a < b) ? <LT_RES> : (a > b)  ? <GT_RES> : <EQ_RES>;
1897  // And similarly for branches.
1898  for (User *U : Cmp->users()) {
1899    if (auto *BI = dyn_cast<BranchInst>(U)) {
1900      assert(BI->isConditional() && "Must be conditional");
1901      BI->swapSuccessors();
1902      continue;
1903    }
1904    if (auto *SI = dyn_cast<SelectInst>(U)) {
1905      // Swap operands
1906      SI->swapValues();
1907      SI->swapProfMetadata();
1908      continue;
1909    }
1910    llvm_unreachable("Must be a branch or a select");
1911  }
1912  Cmp->setPredicate(CmpInst::getSwappedPredicate(DomPred));
1913  return true;
1914}
1915
1916/// Many architectures use the same instruction for both subtract and cmp. Try
1917/// to swap cmp operands to match subtract operations to allow for CSE.
1918static bool swapICmpOperandsToExposeCSEOpportunities(CmpInst *Cmp) {
1919  Value *Op0 = Cmp->getOperand(0);
1920  Value *Op1 = Cmp->getOperand(1);
1921  if (!Op0->getType()->isIntegerTy() || isa<Constant>(Op0) ||
1922      isa<Constant>(Op1) || Op0 == Op1)
1923    return false;
1924
1925  // If a subtract already has the same operands as a compare, swapping would be
1926  // bad. If a subtract has the same operands as a compare but in reverse order,
1927  // then swapping is good.
1928  int GoodToSwap = 0;
1929  unsigned NumInspected = 0;
1930  for (const User *U : Op0->users()) {
1931    // Avoid walking many users.
1932    if (++NumInspected > 128)
1933      return false;
1934    if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
1935      GoodToSwap++;
1936    else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
1937      GoodToSwap--;
1938  }
1939
1940  if (GoodToSwap > 0) {
1941    Cmp->swapOperands();
1942    return true;
1943  }
1944  return false;
1945}
1946
1947bool CodeGenPrepare::optimizeCmp(CmpInst *Cmp, ModifyDT &ModifiedDT) {
1948  if (sinkCmpExpression(Cmp, *TLI))
1949    return true;
1950
1951  if (combineToUAddWithOverflow(Cmp, ModifiedDT))
1952    return true;
1953
1954  if (combineToUSubWithOverflow(Cmp, ModifiedDT))
1955    return true;
1956
1957  if (foldICmpWithDominatingICmp(Cmp, *TLI))
1958    return true;
1959
1960  if (swapICmpOperandsToExposeCSEOpportunities(Cmp))
1961    return true;
1962
1963  return false;
1964}
1965
1966/// Duplicate and sink the given 'and' instruction into user blocks where it is
1967/// used in a compare to allow isel to generate better code for targets where
1968/// this operation can be combined.
1969///
1970/// Return true if any changes are made.
1971static bool sinkAndCmp0Expression(Instruction *AndI, const TargetLowering &TLI,
1972                                  SetOfInstrs &InsertedInsts) {
1973  // Double-check that we're not trying to optimize an instruction that was
1974  // already optimized by some other part of this pass.
1975  assert(!InsertedInsts.count(AndI) &&
1976         "Attempting to optimize already optimized and instruction");
1977  (void)InsertedInsts;
1978
1979  // Nothing to do for single use in same basic block.
1980  if (AndI->hasOneUse() &&
1981      AndI->getParent() == cast<Instruction>(*AndI->user_begin())->getParent())
1982    return false;
1983
1984  // Try to avoid cases where sinking/duplicating is likely to increase register
1985  // pressure.
1986  if (!isa<ConstantInt>(AndI->getOperand(0)) &&
1987      !isa<ConstantInt>(AndI->getOperand(1)) &&
1988      AndI->getOperand(0)->hasOneUse() && AndI->getOperand(1)->hasOneUse())
1989    return false;
1990
1991  for (auto *U : AndI->users()) {
1992    Instruction *User = cast<Instruction>(U);
1993
1994    // Only sink 'and' feeding icmp with 0.
1995    if (!isa<ICmpInst>(User))
1996      return false;
1997
1998    auto *CmpC = dyn_cast<ConstantInt>(User->getOperand(1));
1999    if (!CmpC || !CmpC->isZero())
2000      return false;
2001  }
2002
2003  if (!TLI.isMaskAndCmp0FoldingBeneficial(*AndI))
2004    return false;
2005
2006  LLVM_DEBUG(dbgs() << "found 'and' feeding only icmp 0;\n");
2007  LLVM_DEBUG(AndI->getParent()->dump());
2008
2009  // Push the 'and' into the same block as the icmp 0.  There should only be
2010  // one (icmp (and, 0)) in each block, since CSE/GVN should have removed any
2011  // others, so we don't need to keep track of which BBs we insert into.
2012  for (Value::user_iterator UI = AndI->user_begin(), E = AndI->user_end();
2013       UI != E;) {
2014    Use &TheUse = UI.getUse();
2015    Instruction *User = cast<Instruction>(*UI);
2016
2017    // Preincrement use iterator so we don't invalidate it.
2018    ++UI;
2019
2020    LLVM_DEBUG(dbgs() << "sinking 'and' use: " << *User << "\n");
2021
2022    // Keep the 'and' in the same place if the use is already in the same block.
2023    Instruction *InsertPt =
2024        User->getParent() == AndI->getParent() ? AndI : User;
2025    Instruction *InsertedAnd =
2026        BinaryOperator::Create(Instruction::And, AndI->getOperand(0),
2027                               AndI->getOperand(1), "", InsertPt);
2028    // Propagate the debug info.
2029    InsertedAnd->setDebugLoc(AndI->getDebugLoc());
2030
2031    // Replace a use of the 'and' with a use of the new 'and'.
2032    TheUse = InsertedAnd;
2033    ++NumAndUses;
2034    LLVM_DEBUG(User->getParent()->dump());
2035  }
2036
2037  // We removed all uses, nuke the and.
2038  AndI->eraseFromParent();
2039  return true;
2040}
2041
2042/// Check if the candidates could be combined with a shift instruction, which
2043/// includes:
2044/// 1. Truncate instruction
2045/// 2. And instruction and the imm is a mask of the low bits:
2046/// imm & (imm+1) == 0
2047static bool isExtractBitsCandidateUse(Instruction *User) {
2048  if (!isa<TruncInst>(User)) {
2049    if (User->getOpcode() != Instruction::And ||
2050        !isa<ConstantInt>(User->getOperand(1)))
2051      return false;
2052
2053    const APInt &Cimm = cast<ConstantInt>(User->getOperand(1))->getValue();
2054
2055    if ((Cimm & (Cimm + 1)).getBoolValue())
2056      return false;
2057  }
2058  return true;
2059}
2060
2061/// Sink both shift and truncate instruction to the use of truncate's BB.
2062static bool
2063SinkShiftAndTruncate(BinaryOperator *ShiftI, Instruction *User, ConstantInt *CI,
2064                     DenseMap<BasicBlock *, BinaryOperator *> &InsertedShifts,
2065                     const TargetLowering &TLI, const DataLayout &DL) {
2066  BasicBlock *UserBB = User->getParent();
2067  DenseMap<BasicBlock *, CastInst *> InsertedTruncs;
2068  auto *TruncI = cast<TruncInst>(User);
2069  bool MadeChange = false;
2070
2071  for (Value::user_iterator TruncUI = TruncI->user_begin(),
2072                            TruncE = TruncI->user_end();
2073       TruncUI != TruncE;) {
2074
2075    Use &TruncTheUse = TruncUI.getUse();
2076    Instruction *TruncUser = cast<Instruction>(*TruncUI);
2077    // Preincrement use iterator so we don't invalidate it.
2078
2079    ++TruncUI;
2080
2081    int ISDOpcode = TLI.InstructionOpcodeToISD(TruncUser->getOpcode());
2082    if (!ISDOpcode)
2083      continue;
2084
2085    // If the use is actually a legal node, there will not be an
2086    // implicit truncate.
2087    // FIXME: always querying the result type is just an
2088    // approximation; some nodes' legality is determined by the
2089    // operand or other means. There's no good way to find out though.
2090    if (TLI.isOperationLegalOrCustom(
2091            ISDOpcode, TLI.getValueType(DL, TruncUser->getType(), true)))
2092      continue;
2093
2094    // Don't bother for PHI nodes.
2095    if (isa<PHINode>(TruncUser))
2096      continue;
2097
2098    BasicBlock *TruncUserBB = TruncUser->getParent();
2099
2100    if (UserBB == TruncUserBB)
2101      continue;
2102
2103    BinaryOperator *&InsertedShift = InsertedShifts[TruncUserBB];
2104    CastInst *&InsertedTrunc = InsertedTruncs[TruncUserBB];
2105
2106    if (!InsertedShift && !InsertedTrunc) {
2107      BasicBlock::iterator InsertPt = TruncUserBB->getFirstInsertionPt();
2108      assert(InsertPt != TruncUserBB->end());
2109      // Sink the shift
2110      if (ShiftI->getOpcode() == Instruction::AShr)
2111        InsertedShift =
2112            BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2113      else
2114        InsertedShift =
2115            BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2116      InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2117      InsertedShift->insertBefore(*TruncUserBB, InsertPt);
2118
2119      // Sink the trunc
2120      BasicBlock::iterator TruncInsertPt = TruncUserBB->getFirstInsertionPt();
2121      TruncInsertPt++;
2122      // It will go ahead of any debug-info.
2123      TruncInsertPt.setHeadBit(true);
2124      assert(TruncInsertPt != TruncUserBB->end());
2125
2126      InsertedTrunc = CastInst::Create(TruncI->getOpcode(), InsertedShift,
2127                                       TruncI->getType(), "");
2128      InsertedTrunc->insertBefore(*TruncUserBB, TruncInsertPt);
2129      InsertedTrunc->setDebugLoc(TruncI->getDebugLoc());
2130
2131      MadeChange = true;
2132
2133      TruncTheUse = InsertedTrunc;
2134    }
2135  }
2136  return MadeChange;
2137}
2138
2139/// Sink the shift *right* instruction into user blocks if the uses could
2140/// potentially be combined with this shift instruction and generate BitExtract
2141/// instruction. It will only be applied if the architecture supports BitExtract
2142/// instruction. Here is an example:
2143/// BB1:
2144///   %x.extract.shift = lshr i64 %arg1, 32
2145/// BB2:
2146///   %x.extract.trunc = trunc i64 %x.extract.shift to i16
2147/// ==>
2148///
2149/// BB2:
2150///   %x.extract.shift.1 = lshr i64 %arg1, 32
2151///   %x.extract.trunc = trunc i64 %x.extract.shift.1 to i16
2152///
2153/// CodeGen will recognize the pattern in BB2 and generate BitExtract
2154/// instruction.
2155/// Return true if any changes are made.
2156static bool OptimizeExtractBits(BinaryOperator *ShiftI, ConstantInt *CI,
2157                                const TargetLowering &TLI,
2158                                const DataLayout &DL) {
2159  BasicBlock *DefBB = ShiftI->getParent();
2160
2161  /// Only insert instructions in each block once.
2162  DenseMap<BasicBlock *, BinaryOperator *> InsertedShifts;
2163
2164  bool shiftIsLegal = TLI.isTypeLegal(TLI.getValueType(DL, ShiftI->getType()));
2165
2166  bool MadeChange = false;
2167  for (Value::user_iterator UI = ShiftI->user_begin(), E = ShiftI->user_end();
2168       UI != E;) {
2169    Use &TheUse = UI.getUse();
2170    Instruction *User = cast<Instruction>(*UI);
2171    // Preincrement use iterator so we don't invalidate it.
2172    ++UI;
2173
2174    // Don't bother for PHI nodes.
2175    if (isa<PHINode>(User))
2176      continue;
2177
2178    if (!isExtractBitsCandidateUse(User))
2179      continue;
2180
2181    BasicBlock *UserBB = User->getParent();
2182
2183    if (UserBB == DefBB) {
2184      // If the shift and truncate instruction are in the same BB. The use of
2185      // the truncate(TruncUse) may still introduce another truncate if not
2186      // legal. In this case, we would like to sink both shift and truncate
2187      // instruction to the BB of TruncUse.
2188      // for example:
2189      // BB1:
2190      // i64 shift.result = lshr i64 opnd, imm
2191      // trunc.result = trunc shift.result to i16
2192      //
2193      // BB2:
2194      //   ----> We will have an implicit truncate here if the architecture does
2195      //   not have i16 compare.
2196      // cmp i16 trunc.result, opnd2
2197      //
2198      if (isa<TruncInst>(User) &&
2199          shiftIsLegal
2200          // If the type of the truncate is legal, no truncate will be
2201          // introduced in other basic blocks.
2202          && (!TLI.isTypeLegal(TLI.getValueType(DL, User->getType()))))
2203        MadeChange =
2204            SinkShiftAndTruncate(ShiftI, User, CI, InsertedShifts, TLI, DL);
2205
2206      continue;
2207    }
2208    // If we have already inserted a shift into this block, use it.
2209    BinaryOperator *&InsertedShift = InsertedShifts[UserBB];
2210
2211    if (!InsertedShift) {
2212      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
2213      assert(InsertPt != UserBB->end());
2214
2215      if (ShiftI->getOpcode() == Instruction::AShr)
2216        InsertedShift =
2217            BinaryOperator::CreateAShr(ShiftI->getOperand(0), CI, "");
2218      else
2219        InsertedShift =
2220            BinaryOperator::CreateLShr(ShiftI->getOperand(0), CI, "");
2221      InsertedShift->insertBefore(*UserBB, InsertPt);
2222      InsertedShift->setDebugLoc(ShiftI->getDebugLoc());
2223
2224      MadeChange = true;
2225    }
2226
2227    // Replace a use of the shift with a use of the new shift.
2228    TheUse = InsertedShift;
2229  }
2230
2231  // If we removed all uses, or there are none, nuke the shift.
2232  if (ShiftI->use_empty()) {
2233    salvageDebugInfo(*ShiftI);
2234    ShiftI->eraseFromParent();
2235    MadeChange = true;
2236  }
2237
2238  return MadeChange;
2239}
2240
2241/// If counting leading or trailing zeros is an expensive operation and a zero
2242/// input is defined, add a check for zero to avoid calling the intrinsic.
2243///
2244/// We want to transform:
2245///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 false)
2246///
2247/// into:
2248///   entry:
2249///     %cmpz = icmp eq i64 %A, 0
2250///     br i1 %cmpz, label %cond.end, label %cond.false
2251///   cond.false:
2252///     %z = call i64 @llvm.cttz.i64(i64 %A, i1 true)
2253///     br label %cond.end
2254///   cond.end:
2255///     %ctz = phi i64 [ 64, %entry ], [ %z, %cond.false ]
2256///
2257/// If the transform is performed, return true and set ModifiedDT to true.
2258static bool despeculateCountZeros(IntrinsicInst *CountZeros,
2259                                  LoopInfo &LI,
2260                                  const TargetLowering *TLI,
2261                                  const DataLayout *DL, ModifyDT &ModifiedDT,
2262                                  SmallSet<BasicBlock *, 32> &FreshBBs,
2263                                  bool IsHugeFunc) {
2264  // If a zero input is undefined, it doesn't make sense to despeculate that.
2265  if (match(CountZeros->getOperand(1), m_One()))
2266    return false;
2267
2268  // If it's cheap to speculate, there's nothing to do.
2269  Type *Ty = CountZeros->getType();
2270  auto IntrinsicID = CountZeros->getIntrinsicID();
2271  if ((IntrinsicID == Intrinsic::cttz && TLI->isCheapToSpeculateCttz(Ty)) ||
2272      (IntrinsicID == Intrinsic::ctlz && TLI->isCheapToSpeculateCtlz(Ty)))
2273    return false;
2274
2275  // Only handle legal scalar cases. Anything else requires too much work.
2276  unsigned SizeInBits = Ty->getScalarSizeInBits();
2277  if (Ty->isVectorTy() || SizeInBits > DL->getLargestLegalIntTypeSizeInBits())
2278    return false;
2279
2280  // Bail if the value is never zero.
2281  Use &Op = CountZeros->getOperandUse(0);
2282  if (isKnownNonZero(Op, *DL))
2283    return false;
2284
2285  // The intrinsic will be sunk behind a compare against zero and branch.
2286  BasicBlock *StartBlock = CountZeros->getParent();
2287  BasicBlock *CallBlock = StartBlock->splitBasicBlock(CountZeros, "cond.false");
2288  if (IsHugeFunc)
2289    FreshBBs.insert(CallBlock);
2290
2291  // Create another block after the count zero intrinsic. A PHI will be added
2292  // in this block to select the result of the intrinsic or the bit-width
2293  // constant if the input to the intrinsic is zero.
2294  BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(CountZeros));
2295  // Any debug-info after CountZeros should not be included.
2296  SplitPt.setHeadBit(true);
2297  BasicBlock *EndBlock = CallBlock->splitBasicBlock(SplitPt, "cond.end");
2298  if (IsHugeFunc)
2299    FreshBBs.insert(EndBlock);
2300
2301  // Update the LoopInfo. The new blocks are in the same loop as the start
2302  // block.
2303  if (Loop *L = LI.getLoopFor(StartBlock)) {
2304    L->addBasicBlockToLoop(CallBlock, LI);
2305    L->addBasicBlockToLoop(EndBlock, LI);
2306  }
2307
2308  // Set up a builder to create a compare, conditional branch, and PHI.
2309  IRBuilder<> Builder(CountZeros->getContext());
2310  Builder.SetInsertPoint(StartBlock->getTerminator());
2311  Builder.SetCurrentDebugLocation(CountZeros->getDebugLoc());
2312
2313  // Replace the unconditional branch that was created by the first split with
2314  // a compare against zero and a conditional branch.
2315  Value *Zero = Constant::getNullValue(Ty);
2316  // Avoid introducing branch on poison. This also replaces the ctz operand.
2317  if (!isGuaranteedNotToBeUndefOrPoison(Op))
2318    Op = Builder.CreateFreeze(Op, Op->getName() + ".fr");
2319  Value *Cmp = Builder.CreateICmpEQ(Op, Zero, "cmpz");
2320  Builder.CreateCondBr(Cmp, EndBlock, CallBlock);
2321  StartBlock->getTerminator()->eraseFromParent();
2322
2323  // Create a PHI in the end block to select either the output of the intrinsic
2324  // or the bit width of the operand.
2325  Builder.SetInsertPoint(EndBlock, EndBlock->begin());
2326  PHINode *PN = Builder.CreatePHI(Ty, 2, "ctz");
2327  replaceAllUsesWith(CountZeros, PN, FreshBBs, IsHugeFunc);
2328  Value *BitWidth = Builder.getInt(APInt(SizeInBits, SizeInBits));
2329  PN->addIncoming(BitWidth, StartBlock);
2330  PN->addIncoming(CountZeros, CallBlock);
2331
2332  // We are explicitly handling the zero case, so we can set the intrinsic's
2333  // undefined zero argument to 'true'. This will also prevent reprocessing the
2334  // intrinsic; we only despeculate when a zero input is defined.
2335  CountZeros->setArgOperand(1, Builder.getTrue());
2336  ModifiedDT = ModifyDT::ModifyBBDT;
2337  return true;
2338}
2339
2340bool CodeGenPrepare::optimizeCallInst(CallInst *CI, ModifyDT &ModifiedDT) {
2341  BasicBlock *BB = CI->getParent();
2342
2343  // Lower inline assembly if we can.
2344  // If we found an inline asm expession, and if the target knows how to
2345  // lower it to normal LLVM code, do so now.
2346  if (CI->isInlineAsm()) {
2347    if (TLI->ExpandInlineAsm(CI)) {
2348      // Avoid invalidating the iterator.
2349      CurInstIterator = BB->begin();
2350      // Avoid processing instructions out of order, which could cause
2351      // reuse before a value is defined.
2352      SunkAddrs.clear();
2353      return true;
2354    }
2355    // Sink address computing for memory operands into the block.
2356    if (optimizeInlineAsmInst(CI))
2357      return true;
2358  }
2359
2360  // Align the pointer arguments to this call if the target thinks it's a good
2361  // idea
2362  unsigned MinSize;
2363  Align PrefAlign;
2364  if (TLI->shouldAlignPointerArgs(CI, MinSize, PrefAlign)) {
2365    for (auto &Arg : CI->args()) {
2366      // We want to align both objects whose address is used directly and
2367      // objects whose address is used in casts and GEPs, though it only makes
2368      // sense for GEPs if the offset is a multiple of the desired alignment and
2369      // if size - offset meets the size threshold.
2370      if (!Arg->getType()->isPointerTy())
2371        continue;
2372      APInt Offset(DL->getIndexSizeInBits(
2373                       cast<PointerType>(Arg->getType())->getAddressSpace()),
2374                   0);
2375      Value *Val = Arg->stripAndAccumulateInBoundsConstantOffsets(*DL, Offset);
2376      uint64_t Offset2 = Offset.getLimitedValue();
2377      if (!isAligned(PrefAlign, Offset2))
2378        continue;
2379      AllocaInst *AI;
2380      if ((AI = dyn_cast<AllocaInst>(Val)) && AI->getAlign() < PrefAlign &&
2381          DL->getTypeAllocSize(AI->getAllocatedType()) >= MinSize + Offset2)
2382        AI->setAlignment(PrefAlign);
2383      // Global variables can only be aligned if they are defined in this
2384      // object (i.e. they are uniquely initialized in this object), and
2385      // over-aligning global variables that have an explicit section is
2386      // forbidden.
2387      GlobalVariable *GV;
2388      if ((GV = dyn_cast<GlobalVariable>(Val)) && GV->canIncreaseAlignment() &&
2389          GV->getPointerAlignment(*DL) < PrefAlign &&
2390          DL->getTypeAllocSize(GV->getValueType()) >= MinSize + Offset2)
2391        GV->setAlignment(PrefAlign);
2392    }
2393  }
2394  // If this is a memcpy (or similar) then we may be able to improve the
2395  // alignment.
2396  if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(CI)) {
2397    Align DestAlign = getKnownAlignment(MI->getDest(), *DL);
2398    MaybeAlign MIDestAlign = MI->getDestAlign();
2399    if (!MIDestAlign || DestAlign > *MIDestAlign)
2400      MI->setDestAlignment(DestAlign);
2401    if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
2402      MaybeAlign MTISrcAlign = MTI->getSourceAlign();
2403      Align SrcAlign = getKnownAlignment(MTI->getSource(), *DL);
2404      if (!MTISrcAlign || SrcAlign > *MTISrcAlign)
2405        MTI->setSourceAlignment(SrcAlign);
2406    }
2407  }
2408
2409  // If we have a cold call site, try to sink addressing computation into the
2410  // cold block.  This interacts with our handling for loads and stores to
2411  // ensure that we can fold all uses of a potential addressing computation
2412  // into their uses.  TODO: generalize this to work over profiling data
2413  if (CI->hasFnAttr(Attribute::Cold) && !OptSize &&
2414      !llvm::shouldOptimizeForSize(BB, PSI, BFI.get()))
2415    for (auto &Arg : CI->args()) {
2416      if (!Arg->getType()->isPointerTy())
2417        continue;
2418      unsigned AS = Arg->getType()->getPointerAddressSpace();
2419      if (optimizeMemoryInst(CI, Arg, Arg->getType(), AS))
2420        return true;
2421    }
2422
2423  IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
2424  if (II) {
2425    switch (II->getIntrinsicID()) {
2426    default:
2427      break;
2428    case Intrinsic::assume:
2429      llvm_unreachable("llvm.assume should have been removed already");
2430    case Intrinsic::experimental_widenable_condition: {
2431      // Give up on future widening oppurtunties so that we can fold away dead
2432      // paths and merge blocks before going into block-local instruction
2433      // selection.
2434      if (II->use_empty()) {
2435        II->eraseFromParent();
2436        return true;
2437      }
2438      Constant *RetVal = ConstantInt::getTrue(II->getContext());
2439      resetIteratorIfInvalidatedWhileCalling(BB, [&]() {
2440        replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
2441      });
2442      return true;
2443    }
2444    case Intrinsic::objectsize:
2445      llvm_unreachable("llvm.objectsize.* should have been lowered already");
2446    case Intrinsic::is_constant:
2447      llvm_unreachable("llvm.is.constant.* should have been lowered already");
2448    case Intrinsic::aarch64_stlxr:
2449    case Intrinsic::aarch64_stxr: {
2450      ZExtInst *ExtVal = dyn_cast<ZExtInst>(CI->getArgOperand(0));
2451      if (!ExtVal || !ExtVal->hasOneUse() ||
2452          ExtVal->getParent() == CI->getParent())
2453        return false;
2454      // Sink a zext feeding stlxr/stxr before it, so it can be folded into it.
2455      ExtVal->moveBefore(CI);
2456      // Mark this instruction as "inserted by CGP", so that other
2457      // optimizations don't touch it.
2458      InsertedInsts.insert(ExtVal);
2459      return true;
2460    }
2461
2462    case Intrinsic::launder_invariant_group:
2463    case Intrinsic::strip_invariant_group: {
2464      Value *ArgVal = II->getArgOperand(0);
2465      auto it = LargeOffsetGEPMap.find(II);
2466      if (it != LargeOffsetGEPMap.end()) {
2467        // Merge entries in LargeOffsetGEPMap to reflect the RAUW.
2468        // Make sure not to have to deal with iterator invalidation
2469        // after possibly adding ArgVal to LargeOffsetGEPMap.
2470        auto GEPs = std::move(it->second);
2471        LargeOffsetGEPMap[ArgVal].append(GEPs.begin(), GEPs.end());
2472        LargeOffsetGEPMap.erase(II);
2473      }
2474
2475      replaceAllUsesWith(II, ArgVal, FreshBBs, IsHugeFunc);
2476      II->eraseFromParent();
2477      return true;
2478    }
2479    case Intrinsic::cttz:
2480    case Intrinsic::ctlz:
2481      // If counting zeros is expensive, try to avoid it.
2482      return despeculateCountZeros(II, *LI, TLI, DL, ModifiedDT, FreshBBs,
2483                                   IsHugeFunc);
2484    case Intrinsic::fshl:
2485    case Intrinsic::fshr:
2486      return optimizeFunnelShift(II);
2487    case Intrinsic::dbg_assign:
2488    case Intrinsic::dbg_value:
2489      return fixupDbgValue(II);
2490    case Intrinsic::masked_gather:
2491      return optimizeGatherScatterInst(II, II->getArgOperand(0));
2492    case Intrinsic::masked_scatter:
2493      return optimizeGatherScatterInst(II, II->getArgOperand(1));
2494    }
2495
2496    SmallVector<Value *, 2> PtrOps;
2497    Type *AccessTy;
2498    if (TLI->getAddrModeArguments(II, PtrOps, AccessTy))
2499      while (!PtrOps.empty()) {
2500        Value *PtrVal = PtrOps.pop_back_val();
2501        unsigned AS = PtrVal->getType()->getPointerAddressSpace();
2502        if (optimizeMemoryInst(II, PtrVal, AccessTy, AS))
2503          return true;
2504      }
2505  }
2506
2507  // From here on out we're working with named functions.
2508  if (!CI->getCalledFunction())
2509    return false;
2510
2511  // Lower all default uses of _chk calls.  This is very similar
2512  // to what InstCombineCalls does, but here we are only lowering calls
2513  // to fortified library functions (e.g. __memcpy_chk) that have the default
2514  // "don't know" as the objectsize.  Anything else should be left alone.
2515  FortifiedLibCallSimplifier Simplifier(TLInfo, true);
2516  IRBuilder<> Builder(CI);
2517  if (Value *V = Simplifier.optimizeCall(CI, Builder)) {
2518    replaceAllUsesWith(CI, V, FreshBBs, IsHugeFunc);
2519    CI->eraseFromParent();
2520    return true;
2521  }
2522
2523  return false;
2524}
2525
2526/// Look for opportunities to duplicate return instructions to the predecessor
2527/// to enable tail call optimizations. The case it is currently looking for is:
2528/// @code
2529/// bb0:
2530///   %tmp0 = tail call i32 @f0()
2531///   br label %return
2532/// bb1:
2533///   %tmp1 = tail call i32 @f1()
2534///   br label %return
2535/// bb2:
2536///   %tmp2 = tail call i32 @f2()
2537///   br label %return
2538/// return:
2539///   %retval = phi i32 [ %tmp0, %bb0 ], [ %tmp1, %bb1 ], [ %tmp2, %bb2 ]
2540///   ret i32 %retval
2541/// @endcode
2542///
2543/// =>
2544///
2545/// @code
2546/// bb0:
2547///   %tmp0 = tail call i32 @f0()
2548///   ret i32 %tmp0
2549/// bb1:
2550///   %tmp1 = tail call i32 @f1()
2551///   ret i32 %tmp1
2552/// bb2:
2553///   %tmp2 = tail call i32 @f2()
2554///   ret i32 %tmp2
2555/// @endcode
2556bool CodeGenPrepare::dupRetToEnableTailCallOpts(BasicBlock *BB,
2557                                                ModifyDT &ModifiedDT) {
2558  if (!BB->getTerminator())
2559    return false;
2560
2561  ReturnInst *RetI = dyn_cast<ReturnInst>(BB->getTerminator());
2562  if (!RetI)
2563    return false;
2564
2565  assert(LI->getLoopFor(BB) == nullptr && "A return block cannot be in a loop");
2566
2567  PHINode *PN = nullptr;
2568  ExtractValueInst *EVI = nullptr;
2569  BitCastInst *BCI = nullptr;
2570  Value *V = RetI->getReturnValue();
2571  if (V) {
2572    BCI = dyn_cast<BitCastInst>(V);
2573    if (BCI)
2574      V = BCI->getOperand(0);
2575
2576    EVI = dyn_cast<ExtractValueInst>(V);
2577    if (EVI) {
2578      V = EVI->getOperand(0);
2579      if (!llvm::all_of(EVI->indices(), [](unsigned idx) { return idx == 0; }))
2580        return false;
2581    }
2582
2583    PN = dyn_cast<PHINode>(V);
2584    if (!PN)
2585      return false;
2586  }
2587
2588  if (PN && PN->getParent() != BB)
2589    return false;
2590
2591  auto isLifetimeEndOrBitCastFor = [](const Instruction *Inst) {
2592    const BitCastInst *BC = dyn_cast<BitCastInst>(Inst);
2593    if (BC && BC->hasOneUse())
2594      Inst = BC->user_back();
2595
2596    if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
2597      return II->getIntrinsicID() == Intrinsic::lifetime_end;
2598    return false;
2599  };
2600
2601  // Make sure there are no instructions between the first instruction
2602  // and return.
2603  const Instruction *BI = BB->getFirstNonPHI();
2604  // Skip over debug and the bitcast.
2605  while (isa<DbgInfoIntrinsic>(BI) || BI == BCI || BI == EVI ||
2606         isa<PseudoProbeInst>(BI) || isLifetimeEndOrBitCastFor(BI))
2607    BI = BI->getNextNode();
2608  if (BI != RetI)
2609    return false;
2610
2611  /// Only dup the ReturnInst if the CallInst is likely to be emitted as a tail
2612  /// call.
2613  const Function *F = BB->getParent();
2614  SmallVector<BasicBlock *, 4> TailCallBBs;
2615  if (PN) {
2616    for (unsigned I = 0, E = PN->getNumIncomingValues(); I != E; ++I) {
2617      // Look through bitcasts.
2618      Value *IncomingVal = PN->getIncomingValue(I)->stripPointerCasts();
2619      CallInst *CI = dyn_cast<CallInst>(IncomingVal);
2620      BasicBlock *PredBB = PN->getIncomingBlock(I);
2621      // Make sure the phi value is indeed produced by the tail call.
2622      if (CI && CI->hasOneUse() && CI->getParent() == PredBB &&
2623          TLI->mayBeEmittedAsTailCall(CI) &&
2624          attributesPermitTailCall(F, CI, RetI, *TLI))
2625        TailCallBBs.push_back(PredBB);
2626    }
2627  } else {
2628    SmallPtrSet<BasicBlock *, 4> VisitedBBs;
2629    for (BasicBlock *Pred : predecessors(BB)) {
2630      if (!VisitedBBs.insert(Pred).second)
2631        continue;
2632      if (Instruction *I = Pred->rbegin()->getPrevNonDebugInstruction(true)) {
2633        CallInst *CI = dyn_cast<CallInst>(I);
2634        if (CI && CI->use_empty() && TLI->mayBeEmittedAsTailCall(CI) &&
2635            attributesPermitTailCall(F, CI, RetI, *TLI))
2636          TailCallBBs.push_back(Pred);
2637      }
2638    }
2639  }
2640
2641  bool Changed = false;
2642  for (auto const &TailCallBB : TailCallBBs) {
2643    // Make sure the call instruction is followed by an unconditional branch to
2644    // the return block.
2645    BranchInst *BI = dyn_cast<BranchInst>(TailCallBB->getTerminator());
2646    if (!BI || !BI->isUnconditional() || BI->getSuccessor(0) != BB)
2647      continue;
2648
2649    // Duplicate the return into TailCallBB.
2650    (void)FoldReturnIntoUncondBranch(RetI, BB, TailCallBB);
2651    assert(!VerifyBFIUpdates ||
2652           BFI->getBlockFreq(BB) >= BFI->getBlockFreq(TailCallBB));
2653    BFI->setBlockFreq(BB,
2654                      (BFI->getBlockFreq(BB) - BFI->getBlockFreq(TailCallBB)));
2655    ModifiedDT = ModifyDT::ModifyBBDT;
2656    Changed = true;
2657    ++NumRetsDup;
2658  }
2659
2660  // If we eliminated all predecessors of the block, delete the block now.
2661  if (Changed && !BB->hasAddressTaken() && pred_empty(BB))
2662    BB->eraseFromParent();
2663
2664  return Changed;
2665}
2666
2667//===----------------------------------------------------------------------===//
2668// Memory Optimization
2669//===----------------------------------------------------------------------===//
2670
2671namespace {
2672
2673/// This is an extended version of TargetLowering::AddrMode
2674/// which holds actual Value*'s for register values.
2675struct ExtAddrMode : public TargetLowering::AddrMode {
2676  Value *BaseReg = nullptr;
2677  Value *ScaledReg = nullptr;
2678  Value *OriginalValue = nullptr;
2679  bool InBounds = true;
2680
2681  enum FieldName {
2682    NoField = 0x00,
2683    BaseRegField = 0x01,
2684    BaseGVField = 0x02,
2685    BaseOffsField = 0x04,
2686    ScaledRegField = 0x08,
2687    ScaleField = 0x10,
2688    MultipleFields = 0xff
2689  };
2690
2691  ExtAddrMode() = default;
2692
2693  void print(raw_ostream &OS) const;
2694  void dump() const;
2695
2696  FieldName compare(const ExtAddrMode &other) {
2697    // First check that the types are the same on each field, as differing types
2698    // is something we can't cope with later on.
2699    if (BaseReg && other.BaseReg &&
2700        BaseReg->getType() != other.BaseReg->getType())
2701      return MultipleFields;
2702    if (BaseGV && other.BaseGV && BaseGV->getType() != other.BaseGV->getType())
2703      return MultipleFields;
2704    if (ScaledReg && other.ScaledReg &&
2705        ScaledReg->getType() != other.ScaledReg->getType())
2706      return MultipleFields;
2707
2708    // Conservatively reject 'inbounds' mismatches.
2709    if (InBounds != other.InBounds)
2710      return MultipleFields;
2711
2712    // Check each field to see if it differs.
2713    unsigned Result = NoField;
2714    if (BaseReg != other.BaseReg)
2715      Result |= BaseRegField;
2716    if (BaseGV != other.BaseGV)
2717      Result |= BaseGVField;
2718    if (BaseOffs != other.BaseOffs)
2719      Result |= BaseOffsField;
2720    if (ScaledReg != other.ScaledReg)
2721      Result |= ScaledRegField;
2722    // Don't count 0 as being a different scale, because that actually means
2723    // unscaled (which will already be counted by having no ScaledReg).
2724    if (Scale && other.Scale && Scale != other.Scale)
2725      Result |= ScaleField;
2726
2727    if (llvm::popcount(Result) > 1)
2728      return MultipleFields;
2729    else
2730      return static_cast<FieldName>(Result);
2731  }
2732
2733  // An AddrMode is trivial if it involves no calculation i.e. it is just a base
2734  // with no offset.
2735  bool isTrivial() {
2736    // An AddrMode is (BaseGV + BaseReg + BaseOffs + ScaleReg * Scale) so it is
2737    // trivial if at most one of these terms is nonzero, except that BaseGV and
2738    // BaseReg both being zero actually means a null pointer value, which we
2739    // consider to be 'non-zero' here.
2740    return !BaseOffs && !Scale && !(BaseGV && BaseReg);
2741  }
2742
2743  Value *GetFieldAsValue(FieldName Field, Type *IntPtrTy) {
2744    switch (Field) {
2745    default:
2746      return nullptr;
2747    case BaseRegField:
2748      return BaseReg;
2749    case BaseGVField:
2750      return BaseGV;
2751    case ScaledRegField:
2752      return ScaledReg;
2753    case BaseOffsField:
2754      return ConstantInt::get(IntPtrTy, BaseOffs);
2755    }
2756  }
2757
2758  void SetCombinedField(FieldName Field, Value *V,
2759                        const SmallVectorImpl<ExtAddrMode> &AddrModes) {
2760    switch (Field) {
2761    default:
2762      llvm_unreachable("Unhandled fields are expected to be rejected earlier");
2763      break;
2764    case ExtAddrMode::BaseRegField:
2765      BaseReg = V;
2766      break;
2767    case ExtAddrMode::BaseGVField:
2768      // A combined BaseGV is an Instruction, not a GlobalValue, so it goes
2769      // in the BaseReg field.
2770      assert(BaseReg == nullptr);
2771      BaseReg = V;
2772      BaseGV = nullptr;
2773      break;
2774    case ExtAddrMode::ScaledRegField:
2775      ScaledReg = V;
2776      // If we have a mix of scaled and unscaled addrmodes then we want scale
2777      // to be the scale and not zero.
2778      if (!Scale)
2779        for (const ExtAddrMode &AM : AddrModes)
2780          if (AM.Scale) {
2781            Scale = AM.Scale;
2782            break;
2783          }
2784      break;
2785    case ExtAddrMode::BaseOffsField:
2786      // The offset is no longer a constant, so it goes in ScaledReg with a
2787      // scale of 1.
2788      assert(ScaledReg == nullptr);
2789      ScaledReg = V;
2790      Scale = 1;
2791      BaseOffs = 0;
2792      break;
2793    }
2794  }
2795};
2796
2797#ifndef NDEBUG
2798static inline raw_ostream &operator<<(raw_ostream &OS, const ExtAddrMode &AM) {
2799  AM.print(OS);
2800  return OS;
2801}
2802#endif
2803
2804#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2805void ExtAddrMode::print(raw_ostream &OS) const {
2806  bool NeedPlus = false;
2807  OS << "[";
2808  if (InBounds)
2809    OS << "inbounds ";
2810  if (BaseGV) {
2811    OS << "GV:";
2812    BaseGV->printAsOperand(OS, /*PrintType=*/false);
2813    NeedPlus = true;
2814  }
2815
2816  if (BaseOffs) {
2817    OS << (NeedPlus ? " + " : "") << BaseOffs;
2818    NeedPlus = true;
2819  }
2820
2821  if (BaseReg) {
2822    OS << (NeedPlus ? " + " : "") << "Base:";
2823    BaseReg->printAsOperand(OS, /*PrintType=*/false);
2824    NeedPlus = true;
2825  }
2826  if (Scale) {
2827    OS << (NeedPlus ? " + " : "") << Scale << "*";
2828    ScaledReg->printAsOperand(OS, /*PrintType=*/false);
2829  }
2830
2831  OS << ']';
2832}
2833
2834LLVM_DUMP_METHOD void ExtAddrMode::dump() const {
2835  print(dbgs());
2836  dbgs() << '\n';
2837}
2838#endif
2839
2840} // end anonymous namespace
2841
2842namespace {
2843
2844/// This class provides transaction based operation on the IR.
2845/// Every change made through this class is recorded in the internal state and
2846/// can be undone (rollback) until commit is called.
2847/// CGP does not check if instructions could be speculatively executed when
2848/// moved. Preserving the original location would pessimize the debugging
2849/// experience, as well as negatively impact the quality of sample PGO.
2850class TypePromotionTransaction {
2851  /// This represents the common interface of the individual transaction.
2852  /// Each class implements the logic for doing one specific modification on
2853  /// the IR via the TypePromotionTransaction.
2854  class TypePromotionAction {
2855  protected:
2856    /// The Instruction modified.
2857    Instruction *Inst;
2858
2859  public:
2860    /// Constructor of the action.
2861    /// The constructor performs the related action on the IR.
2862    TypePromotionAction(Instruction *Inst) : Inst(Inst) {}
2863
2864    virtual ~TypePromotionAction() = default;
2865
2866    /// Undo the modification done by this action.
2867    /// When this method is called, the IR must be in the same state as it was
2868    /// before this action was applied.
2869    /// \pre Undoing the action works if and only if the IR is in the exact same
2870    /// state as it was directly after this action was applied.
2871    virtual void undo() = 0;
2872
2873    /// Advocate every change made by this action.
2874    /// When the results on the IR of the action are to be kept, it is important
2875    /// to call this function, otherwise hidden information may be kept forever.
2876    virtual void commit() {
2877      // Nothing to be done, this action is not doing anything.
2878    }
2879  };
2880
2881  /// Utility to remember the position of an instruction.
2882  class InsertionHandler {
2883    /// Position of an instruction.
2884    /// Either an instruction:
2885    /// - Is the first in a basic block: BB is used.
2886    /// - Has a previous instruction: PrevInst is used.
2887    union {
2888      Instruction *PrevInst;
2889      BasicBlock *BB;
2890    } Point;
2891    std::optional<DPValue::self_iterator> BeforeDPValue = std::nullopt;
2892
2893    /// Remember whether or not the instruction had a previous instruction.
2894    bool HasPrevInstruction;
2895
2896  public:
2897    /// Record the position of \p Inst.
2898    InsertionHandler(Instruction *Inst) {
2899      HasPrevInstruction = (Inst != &*(Inst->getParent()->begin()));
2900      BasicBlock *BB = Inst->getParent();
2901
2902      // Record where we would have to re-insert the instruction in the sequence
2903      // of DPValues, if we ended up reinserting.
2904      if (BB->IsNewDbgInfoFormat)
2905        BeforeDPValue = Inst->getDbgReinsertionPosition();
2906
2907      if (HasPrevInstruction) {
2908        Point.PrevInst = &*std::prev(Inst->getIterator());
2909      } else {
2910        Point.BB = BB;
2911      }
2912    }
2913
2914    /// Insert \p Inst at the recorded position.
2915    void insert(Instruction *Inst) {
2916      if (HasPrevInstruction) {
2917        if (Inst->getParent())
2918          Inst->removeFromParent();
2919        Inst->insertAfter(&*Point.PrevInst);
2920      } else {
2921        BasicBlock::iterator Position = Point.BB->getFirstInsertionPt();
2922        if (Inst->getParent())
2923          Inst->moveBefore(*Point.BB, Position);
2924        else
2925          Inst->insertBefore(*Point.BB, Position);
2926      }
2927
2928      Inst->getParent()->reinsertInstInDPValues(Inst, BeforeDPValue);
2929    }
2930  };
2931
2932  /// Move an instruction before another.
2933  class InstructionMoveBefore : public TypePromotionAction {
2934    /// Original position of the instruction.
2935    InsertionHandler Position;
2936
2937  public:
2938    /// Move \p Inst before \p Before.
2939    InstructionMoveBefore(Instruction *Inst, Instruction *Before)
2940        : TypePromotionAction(Inst), Position(Inst) {
2941      LLVM_DEBUG(dbgs() << "Do: move: " << *Inst << "\nbefore: " << *Before
2942                        << "\n");
2943      Inst->moveBefore(Before);
2944    }
2945
2946    /// Move the instruction back to its original position.
2947    void undo() override {
2948      LLVM_DEBUG(dbgs() << "Undo: moveBefore: " << *Inst << "\n");
2949      Position.insert(Inst);
2950    }
2951  };
2952
2953  /// Set the operand of an instruction with a new value.
2954  class OperandSetter : public TypePromotionAction {
2955    /// Original operand of the instruction.
2956    Value *Origin;
2957
2958    /// Index of the modified instruction.
2959    unsigned Idx;
2960
2961  public:
2962    /// Set \p Idx operand of \p Inst with \p NewVal.
2963    OperandSetter(Instruction *Inst, unsigned Idx, Value *NewVal)
2964        : TypePromotionAction(Inst), Idx(Idx) {
2965      LLVM_DEBUG(dbgs() << "Do: setOperand: " << Idx << "\n"
2966                        << "for:" << *Inst << "\n"
2967                        << "with:" << *NewVal << "\n");
2968      Origin = Inst->getOperand(Idx);
2969      Inst->setOperand(Idx, NewVal);
2970    }
2971
2972    /// Restore the original value of the instruction.
2973    void undo() override {
2974      LLVM_DEBUG(dbgs() << "Undo: setOperand:" << Idx << "\n"
2975                        << "for: " << *Inst << "\n"
2976                        << "with: " << *Origin << "\n");
2977      Inst->setOperand(Idx, Origin);
2978    }
2979  };
2980
2981  /// Hide the operands of an instruction.
2982  /// Do as if this instruction was not using any of its operands.
2983  class OperandsHider : public TypePromotionAction {
2984    /// The list of original operands.
2985    SmallVector<Value *, 4> OriginalValues;
2986
2987  public:
2988    /// Remove \p Inst from the uses of the operands of \p Inst.
2989    OperandsHider(Instruction *Inst) : TypePromotionAction(Inst) {
2990      LLVM_DEBUG(dbgs() << "Do: OperandsHider: " << *Inst << "\n");
2991      unsigned NumOpnds = Inst->getNumOperands();
2992      OriginalValues.reserve(NumOpnds);
2993      for (unsigned It = 0; It < NumOpnds; ++It) {
2994        // Save the current operand.
2995        Value *Val = Inst->getOperand(It);
2996        OriginalValues.push_back(Val);
2997        // Set a dummy one.
2998        // We could use OperandSetter here, but that would imply an overhead
2999        // that we are not willing to pay.
3000        Inst->setOperand(It, UndefValue::get(Val->getType()));
3001      }
3002    }
3003
3004    /// Restore the original list of uses.
3005    void undo() override {
3006      LLVM_DEBUG(dbgs() << "Undo: OperandsHider: " << *Inst << "\n");
3007      for (unsigned It = 0, EndIt = OriginalValues.size(); It != EndIt; ++It)
3008        Inst->setOperand(It, OriginalValues[It]);
3009    }
3010  };
3011
3012  /// Build a truncate instruction.
3013  class TruncBuilder : public TypePromotionAction {
3014    Value *Val;
3015
3016  public:
3017    /// Build a truncate instruction of \p Opnd producing a \p Ty
3018    /// result.
3019    /// trunc Opnd to Ty.
3020    TruncBuilder(Instruction *Opnd, Type *Ty) : TypePromotionAction(Opnd) {
3021      IRBuilder<> Builder(Opnd);
3022      Builder.SetCurrentDebugLocation(DebugLoc());
3023      Val = Builder.CreateTrunc(Opnd, Ty, "promoted");
3024      LLVM_DEBUG(dbgs() << "Do: TruncBuilder: " << *Val << "\n");
3025    }
3026
3027    /// Get the built value.
3028    Value *getBuiltValue() { return Val; }
3029
3030    /// Remove the built instruction.
3031    void undo() override {
3032      LLVM_DEBUG(dbgs() << "Undo: TruncBuilder: " << *Val << "\n");
3033      if (Instruction *IVal = dyn_cast<Instruction>(Val))
3034        IVal->eraseFromParent();
3035    }
3036  };
3037
3038  /// Build a sign extension instruction.
3039  class SExtBuilder : public TypePromotionAction {
3040    Value *Val;
3041
3042  public:
3043    /// Build a sign extension instruction of \p Opnd producing a \p Ty
3044    /// result.
3045    /// sext Opnd to Ty.
3046    SExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3047        : TypePromotionAction(InsertPt) {
3048      IRBuilder<> Builder(InsertPt);
3049      Val = Builder.CreateSExt(Opnd, Ty, "promoted");
3050      LLVM_DEBUG(dbgs() << "Do: SExtBuilder: " << *Val << "\n");
3051    }
3052
3053    /// Get the built value.
3054    Value *getBuiltValue() { return Val; }
3055
3056    /// Remove the built instruction.
3057    void undo() override {
3058      LLVM_DEBUG(dbgs() << "Undo: SExtBuilder: " << *Val << "\n");
3059      if (Instruction *IVal = dyn_cast<Instruction>(Val))
3060        IVal->eraseFromParent();
3061    }
3062  };
3063
3064  /// Build a zero extension instruction.
3065  class ZExtBuilder : public TypePromotionAction {
3066    Value *Val;
3067
3068  public:
3069    /// Build a zero extension instruction of \p Opnd producing a \p Ty
3070    /// result.
3071    /// zext Opnd to Ty.
3072    ZExtBuilder(Instruction *InsertPt, Value *Opnd, Type *Ty)
3073        : TypePromotionAction(InsertPt) {
3074      IRBuilder<> Builder(InsertPt);
3075      Builder.SetCurrentDebugLocation(DebugLoc());
3076      Val = Builder.CreateZExt(Opnd, Ty, "promoted");
3077      LLVM_DEBUG(dbgs() << "Do: ZExtBuilder: " << *Val << "\n");
3078    }
3079
3080    /// Get the built value.
3081    Value *getBuiltValue() { return Val; }
3082
3083    /// Remove the built instruction.
3084    void undo() override {
3085      LLVM_DEBUG(dbgs() << "Undo: ZExtBuilder: " << *Val << "\n");
3086      if (Instruction *IVal = dyn_cast<Instruction>(Val))
3087        IVal->eraseFromParent();
3088    }
3089  };
3090
3091  /// Mutate an instruction to another type.
3092  class TypeMutator : public TypePromotionAction {
3093    /// Record the original type.
3094    Type *OrigTy;
3095
3096  public:
3097    /// Mutate the type of \p Inst into \p NewTy.
3098    TypeMutator(Instruction *Inst, Type *NewTy)
3099        : TypePromotionAction(Inst), OrigTy(Inst->getType()) {
3100      LLVM_DEBUG(dbgs() << "Do: MutateType: " << *Inst << " with " << *NewTy
3101                        << "\n");
3102      Inst->mutateType(NewTy);
3103    }
3104
3105    /// Mutate the instruction back to its original type.
3106    void undo() override {
3107      LLVM_DEBUG(dbgs() << "Undo: MutateType: " << *Inst << " with " << *OrigTy
3108                        << "\n");
3109      Inst->mutateType(OrigTy);
3110    }
3111  };
3112
3113  /// Replace the uses of an instruction by another instruction.
3114  class UsesReplacer : public TypePromotionAction {
3115    /// Helper structure to keep track of the replaced uses.
3116    struct InstructionAndIdx {
3117      /// The instruction using the instruction.
3118      Instruction *Inst;
3119
3120      /// The index where this instruction is used for Inst.
3121      unsigned Idx;
3122
3123      InstructionAndIdx(Instruction *Inst, unsigned Idx)
3124          : Inst(Inst), Idx(Idx) {}
3125    };
3126
3127    /// Keep track of the original uses (pair Instruction, Index).
3128    SmallVector<InstructionAndIdx, 4> OriginalUses;
3129    /// Keep track of the debug users.
3130    SmallVector<DbgValueInst *, 1> DbgValues;
3131    /// And non-instruction debug-users too.
3132    SmallVector<DPValue *, 1> DPValues;
3133
3134    /// Keep track of the new value so that we can undo it by replacing
3135    /// instances of the new value with the original value.
3136    Value *New;
3137
3138    using use_iterator = SmallVectorImpl<InstructionAndIdx>::iterator;
3139
3140  public:
3141    /// Replace all the use of \p Inst by \p New.
3142    UsesReplacer(Instruction *Inst, Value *New)
3143        : TypePromotionAction(Inst), New(New) {
3144      LLVM_DEBUG(dbgs() << "Do: UsersReplacer: " << *Inst << " with " << *New
3145                        << "\n");
3146      // Record the original uses.
3147      for (Use &U : Inst->uses()) {
3148        Instruction *UserI = cast<Instruction>(U.getUser());
3149        OriginalUses.push_back(InstructionAndIdx(UserI, U.getOperandNo()));
3150      }
3151      // Record the debug uses separately. They are not in the instruction's
3152      // use list, but they are replaced by RAUW.
3153      findDbgValues(DbgValues, Inst, &DPValues);
3154
3155      // Now, we can replace the uses.
3156      Inst->replaceAllUsesWith(New);
3157    }
3158
3159    /// Reassign the original uses of Inst to Inst.
3160    void undo() override {
3161      LLVM_DEBUG(dbgs() << "Undo: UsersReplacer: " << *Inst << "\n");
3162      for (InstructionAndIdx &Use : OriginalUses)
3163        Use.Inst->setOperand(Use.Idx, Inst);
3164      // RAUW has replaced all original uses with references to the new value,
3165      // including the debug uses. Since we are undoing the replacements,
3166      // the original debug uses must also be reinstated to maintain the
3167      // correctness and utility of debug value instructions.
3168      for (auto *DVI : DbgValues)
3169        DVI->replaceVariableLocationOp(New, Inst);
3170      // Similar story with DPValues, the non-instruction representation of
3171      // dbg.values.
3172      for (DPValue *DPV : DPValues) // tested by transaction-test I'm adding
3173        DPV->replaceVariableLocationOp(New, Inst);
3174    }
3175  };
3176
3177  /// Remove an instruction from the IR.
3178  class InstructionRemover : public TypePromotionAction {
3179    /// Original position of the instruction.
3180    InsertionHandler Inserter;
3181
3182    /// Helper structure to hide all the link to the instruction. In other
3183    /// words, this helps to do as if the instruction was removed.
3184    OperandsHider Hider;
3185
3186    /// Keep track of the uses replaced, if any.
3187    UsesReplacer *Replacer = nullptr;
3188
3189    /// Keep track of instructions removed.
3190    SetOfInstrs &RemovedInsts;
3191
3192  public:
3193    /// Remove all reference of \p Inst and optionally replace all its
3194    /// uses with New.
3195    /// \p RemovedInsts Keep track of the instructions removed by this Action.
3196    /// \pre If !Inst->use_empty(), then New != nullptr
3197    InstructionRemover(Instruction *Inst, SetOfInstrs &RemovedInsts,
3198                       Value *New = nullptr)
3199        : TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
3200          RemovedInsts(RemovedInsts) {
3201      if (New)
3202        Replacer = new UsesReplacer(Inst, New);
3203      LLVM_DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
3204      RemovedInsts.insert(Inst);
3205      /// The instructions removed here will be freed after completing
3206      /// optimizeBlock() for all blocks as we need to keep track of the
3207      /// removed instructions during promotion.
3208      Inst->removeFromParent();
3209    }
3210
3211    ~InstructionRemover() override { delete Replacer; }
3212
3213    InstructionRemover &operator=(const InstructionRemover &other) = delete;
3214    InstructionRemover(const InstructionRemover &other) = delete;
3215
3216    /// Resurrect the instruction and reassign it to the proper uses if
3217    /// new value was provided when build this action.
3218    void undo() override {
3219      LLVM_DEBUG(dbgs() << "Undo: InstructionRemover: " << *Inst << "\n");
3220      Inserter.insert(Inst);
3221      if (Replacer)
3222        Replacer->undo();
3223      Hider.undo();
3224      RemovedInsts.erase(Inst);
3225    }
3226  };
3227
3228public:
3229  /// Restoration point.
3230  /// The restoration point is a pointer to an action instead of an iterator
3231  /// because the iterator may be invalidated but not the pointer.
3232  using ConstRestorationPt = const TypePromotionAction *;
3233
3234  TypePromotionTransaction(SetOfInstrs &RemovedInsts)
3235      : RemovedInsts(RemovedInsts) {}
3236
3237  /// Advocate every changes made in that transaction. Return true if any change
3238  /// happen.
3239  bool commit();
3240
3241  /// Undo all the changes made after the given point.
3242  void rollback(ConstRestorationPt Point);
3243
3244  /// Get the current restoration point.
3245  ConstRestorationPt getRestorationPoint() const;
3246
3247  /// \name API for IR modification with state keeping to support rollback.
3248  /// @{
3249  /// Same as Instruction::setOperand.
3250  void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
3251
3252  /// Same as Instruction::eraseFromParent.
3253  void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
3254
3255  /// Same as Value::replaceAllUsesWith.
3256  void replaceAllUsesWith(Instruction *Inst, Value *New);
3257
3258  /// Same as Value::mutateType.
3259  void mutateType(Instruction *Inst, Type *NewTy);
3260
3261  /// Same as IRBuilder::createTrunc.
3262  Value *createTrunc(Instruction *Opnd, Type *Ty);
3263
3264  /// Same as IRBuilder::createSExt.
3265  Value *createSExt(Instruction *Inst, Value *Opnd, Type *Ty);
3266
3267  /// Same as IRBuilder::createZExt.
3268  Value *createZExt(Instruction *Inst, Value *Opnd, Type *Ty);
3269
3270private:
3271  /// The ordered list of actions made so far.
3272  SmallVector<std::unique_ptr<TypePromotionAction>, 16> Actions;
3273
3274  using CommitPt =
3275      SmallVectorImpl<std::unique_ptr<TypePromotionAction>>::iterator;
3276
3277  SetOfInstrs &RemovedInsts;
3278};
3279
3280} // end anonymous namespace
3281
3282void TypePromotionTransaction::setOperand(Instruction *Inst, unsigned Idx,
3283                                          Value *NewVal) {
3284  Actions.push_back(std::make_unique<TypePromotionTransaction::OperandSetter>(
3285      Inst, Idx, NewVal));
3286}
3287
3288void TypePromotionTransaction::eraseInstruction(Instruction *Inst,
3289                                                Value *NewVal) {
3290  Actions.push_back(
3291      std::make_unique<TypePromotionTransaction::InstructionRemover>(
3292          Inst, RemovedInsts, NewVal));
3293}
3294
3295void TypePromotionTransaction::replaceAllUsesWith(Instruction *Inst,
3296                                                  Value *New) {
3297  Actions.push_back(
3298      std::make_unique<TypePromotionTransaction::UsesReplacer>(Inst, New));
3299}
3300
3301void TypePromotionTransaction::mutateType(Instruction *Inst, Type *NewTy) {
3302  Actions.push_back(
3303      std::make_unique<TypePromotionTransaction::TypeMutator>(Inst, NewTy));
3304}
3305
3306Value *TypePromotionTransaction::createTrunc(Instruction *Opnd, Type *Ty) {
3307  std::unique_ptr<TruncBuilder> Ptr(new TruncBuilder(Opnd, Ty));
3308  Value *Val = Ptr->getBuiltValue();
3309  Actions.push_back(std::move(Ptr));
3310  return Val;
3311}
3312
3313Value *TypePromotionTransaction::createSExt(Instruction *Inst, Value *Opnd,
3314                                            Type *Ty) {
3315  std::unique_ptr<SExtBuilder> Ptr(new SExtBuilder(Inst, Opnd, Ty));
3316  Value *Val = Ptr->getBuiltValue();
3317  Actions.push_back(std::move(Ptr));
3318  return Val;
3319}
3320
3321Value *TypePromotionTransaction::createZExt(Instruction *Inst, Value *Opnd,
3322                                            Type *Ty) {
3323  std::unique_ptr<ZExtBuilder> Ptr(new ZExtBuilder(Inst, Opnd, Ty));
3324  Value *Val = Ptr->getBuiltValue();
3325  Actions.push_back(std::move(Ptr));
3326  return Val;
3327}
3328
3329TypePromotionTransaction::ConstRestorationPt
3330TypePromotionTransaction::getRestorationPoint() const {
3331  return !Actions.empty() ? Actions.back().get() : nullptr;
3332}
3333
3334bool TypePromotionTransaction::commit() {
3335  for (std::unique_ptr<TypePromotionAction> &Action : Actions)
3336    Action->commit();
3337  bool Modified = !Actions.empty();
3338  Actions.clear();
3339  return Modified;
3340}
3341
3342void TypePromotionTransaction::rollback(
3343    TypePromotionTransaction::ConstRestorationPt Point) {
3344  while (!Actions.empty() && Point != Actions.back().get()) {
3345    std::unique_ptr<TypePromotionAction> Curr = Actions.pop_back_val();
3346    Curr->undo();
3347  }
3348}
3349
3350namespace {
3351
3352/// A helper class for matching addressing modes.
3353///
3354/// This encapsulates the logic for matching the target-legal addressing modes.
3355class AddressingModeMatcher {
3356  SmallVectorImpl<Instruction *> &AddrModeInsts;
3357  const TargetLowering &TLI;
3358  const TargetRegisterInfo &TRI;
3359  const DataLayout &DL;
3360  const LoopInfo &LI;
3361  const std::function<const DominatorTree &()> getDTFn;
3362
3363  /// AccessTy/MemoryInst - This is the type for the access (e.g. double) and
3364  /// the memory instruction that we're computing this address for.
3365  Type *AccessTy;
3366  unsigned AddrSpace;
3367  Instruction *MemoryInst;
3368
3369  /// This is the addressing mode that we're building up. This is
3370  /// part of the return value of this addressing mode matching stuff.
3371  ExtAddrMode &AddrMode;
3372
3373  /// The instructions inserted by other CodeGenPrepare optimizations.
3374  const SetOfInstrs &InsertedInsts;
3375
3376  /// A map from the instructions to their type before promotion.
3377  InstrToOrigTy &PromotedInsts;
3378
3379  /// The ongoing transaction where every action should be registered.
3380  TypePromotionTransaction &TPT;
3381
3382  // A GEP which has too large offset to be folded into the addressing mode.
3383  std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP;
3384
3385  /// This is set to true when we should not do profitability checks.
3386  /// When true, IsProfitableToFoldIntoAddressingMode always returns true.
3387  bool IgnoreProfitability;
3388
3389  /// True if we are optimizing for size.
3390  bool OptSize = false;
3391
3392  ProfileSummaryInfo *PSI;
3393  BlockFrequencyInfo *BFI;
3394
3395  AddressingModeMatcher(
3396      SmallVectorImpl<Instruction *> &AMI, const TargetLowering &TLI,
3397      const TargetRegisterInfo &TRI, const LoopInfo &LI,
3398      const std::function<const DominatorTree &()> getDTFn, Type *AT,
3399      unsigned AS, Instruction *MI, ExtAddrMode &AM,
3400      const SetOfInstrs &InsertedInsts, InstrToOrigTy &PromotedInsts,
3401      TypePromotionTransaction &TPT,
3402      std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3403      bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI)
3404      : AddrModeInsts(AMI), TLI(TLI), TRI(TRI),
3405        DL(MI->getModule()->getDataLayout()), LI(LI), getDTFn(getDTFn),
3406        AccessTy(AT), AddrSpace(AS), MemoryInst(MI), AddrMode(AM),
3407        InsertedInsts(InsertedInsts), PromotedInsts(PromotedInsts), TPT(TPT),
3408        LargeOffsetGEP(LargeOffsetGEP), OptSize(OptSize), PSI(PSI), BFI(BFI) {
3409    IgnoreProfitability = false;
3410  }
3411
3412public:
3413  /// Find the maximal addressing mode that a load/store of V can fold,
3414  /// give an access type of AccessTy.  This returns a list of involved
3415  /// instructions in AddrModeInsts.
3416  /// \p InsertedInsts The instructions inserted by other CodeGenPrepare
3417  /// optimizations.
3418  /// \p PromotedInsts maps the instructions to their type before promotion.
3419  /// \p The ongoing transaction where every action should be registered.
3420  static ExtAddrMode
3421  Match(Value *V, Type *AccessTy, unsigned AS, Instruction *MemoryInst,
3422        SmallVectorImpl<Instruction *> &AddrModeInsts,
3423        const TargetLowering &TLI, const LoopInfo &LI,
3424        const std::function<const DominatorTree &()> getDTFn,
3425        const TargetRegisterInfo &TRI, const SetOfInstrs &InsertedInsts,
3426        InstrToOrigTy &PromotedInsts, TypePromotionTransaction &TPT,
3427        std::pair<AssertingVH<GetElementPtrInst>, int64_t> &LargeOffsetGEP,
3428        bool OptSize, ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
3429    ExtAddrMode Result;
3430
3431    bool Success = AddressingModeMatcher(AddrModeInsts, TLI, TRI, LI, getDTFn,
3432                                         AccessTy, AS, MemoryInst, Result,
3433                                         InsertedInsts, PromotedInsts, TPT,
3434                                         LargeOffsetGEP, OptSize, PSI, BFI)
3435                       .matchAddr(V, 0);
3436    (void)Success;
3437    assert(Success && "Couldn't select *anything*?");
3438    return Result;
3439  }
3440
3441private:
3442  bool matchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
3443  bool matchAddr(Value *Addr, unsigned Depth);
3444  bool matchOperationAddr(User *AddrInst, unsigned Opcode, unsigned Depth,
3445                          bool *MovedAway = nullptr);
3446  bool isProfitableToFoldIntoAddressingMode(Instruction *I,
3447                                            ExtAddrMode &AMBefore,
3448                                            ExtAddrMode &AMAfter);
3449  bool valueAlreadyLiveAtInst(Value *Val, Value *KnownLive1, Value *KnownLive2);
3450  bool isPromotionProfitable(unsigned NewCost, unsigned OldCost,
3451                             Value *PromotedOperand) const;
3452};
3453
3454class PhiNodeSet;
3455
3456/// An iterator for PhiNodeSet.
3457class PhiNodeSetIterator {
3458  PhiNodeSet *const Set;
3459  size_t CurrentIndex = 0;
3460
3461public:
3462  /// The constructor. Start should point to either a valid element, or be equal
3463  /// to the size of the underlying SmallVector of the PhiNodeSet.
3464  PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start);
3465  PHINode *operator*() const;
3466  PhiNodeSetIterator &operator++();
3467  bool operator==(const PhiNodeSetIterator &RHS) const;
3468  bool operator!=(const PhiNodeSetIterator &RHS) const;
3469};
3470
3471/// Keeps a set of PHINodes.
3472///
3473/// This is a minimal set implementation for a specific use case:
3474/// It is very fast when there are very few elements, but also provides good
3475/// performance when there are many. It is similar to SmallPtrSet, but also
3476/// provides iteration by insertion order, which is deterministic and stable
3477/// across runs. It is also similar to SmallSetVector, but provides removing
3478/// elements in O(1) time. This is achieved by not actually removing the element
3479/// from the underlying vector, so comes at the cost of using more memory, but
3480/// that is fine, since PhiNodeSets are used as short lived objects.
3481class PhiNodeSet {
3482  friend class PhiNodeSetIterator;
3483
3484  using MapType = SmallDenseMap<PHINode *, size_t, 32>;
3485  using iterator = PhiNodeSetIterator;
3486
3487  /// Keeps the elements in the order of their insertion in the underlying
3488  /// vector. To achieve constant time removal, it never deletes any element.
3489  SmallVector<PHINode *, 32> NodeList;
3490
3491  /// Keeps the elements in the underlying set implementation. This (and not the
3492  /// NodeList defined above) is the source of truth on whether an element
3493  /// is actually in the collection.
3494  MapType NodeMap;
3495
3496  /// Points to the first valid (not deleted) element when the set is not empty
3497  /// and the value is not zero. Equals to the size of the underlying vector
3498  /// when the set is empty. When the value is 0, as in the beginning, the
3499  /// first element may or may not be valid.
3500  size_t FirstValidElement = 0;
3501
3502public:
3503  /// Inserts a new element to the collection.
3504  /// \returns true if the element is actually added, i.e. was not in the
3505  /// collection before the operation.
3506  bool insert(PHINode *Ptr) {
3507    if (NodeMap.insert(std::make_pair(Ptr, NodeList.size())).second) {
3508      NodeList.push_back(Ptr);
3509      return true;
3510    }
3511    return false;
3512  }
3513
3514  /// Removes the element from the collection.
3515  /// \returns whether the element is actually removed, i.e. was in the
3516  /// collection before the operation.
3517  bool erase(PHINode *Ptr) {
3518    if (NodeMap.erase(Ptr)) {
3519      SkipRemovedElements(FirstValidElement);
3520      return true;
3521    }
3522    return false;
3523  }
3524
3525  /// Removes all elements and clears the collection.
3526  void clear() {
3527    NodeMap.clear();
3528    NodeList.clear();
3529    FirstValidElement = 0;
3530  }
3531
3532  /// \returns an iterator that will iterate the elements in the order of
3533  /// insertion.
3534  iterator begin() {
3535    if (FirstValidElement == 0)
3536      SkipRemovedElements(FirstValidElement);
3537    return PhiNodeSetIterator(this, FirstValidElement);
3538  }
3539
3540  /// \returns an iterator that points to the end of the collection.
3541  iterator end() { return PhiNodeSetIterator(this, NodeList.size()); }
3542
3543  /// Returns the number of elements in the collection.
3544  size_t size() const { return NodeMap.size(); }
3545
3546  /// \returns 1 if the given element is in the collection, and 0 if otherwise.
3547  size_t count(PHINode *Ptr) const { return NodeMap.count(Ptr); }
3548
3549private:
3550  /// Updates the CurrentIndex so that it will point to a valid element.
3551  ///
3552  /// If the element of NodeList at CurrentIndex is valid, it does not
3553  /// change it. If there are no more valid elements, it updates CurrentIndex
3554  /// to point to the end of the NodeList.
3555  void SkipRemovedElements(size_t &CurrentIndex) {
3556    while (CurrentIndex < NodeList.size()) {
3557      auto it = NodeMap.find(NodeList[CurrentIndex]);
3558      // If the element has been deleted and added again later, NodeMap will
3559      // point to a different index, so CurrentIndex will still be invalid.
3560      if (it != NodeMap.end() && it->second == CurrentIndex)
3561        break;
3562      ++CurrentIndex;
3563    }
3564  }
3565};
3566
3567PhiNodeSetIterator::PhiNodeSetIterator(PhiNodeSet *const Set, size_t Start)
3568    : Set(Set), CurrentIndex(Start) {}
3569
3570PHINode *PhiNodeSetIterator::operator*() const {
3571  assert(CurrentIndex < Set->NodeList.size() &&
3572         "PhiNodeSet access out of range");
3573  return Set->NodeList[CurrentIndex];
3574}
3575
3576PhiNodeSetIterator &PhiNodeSetIterator::operator++() {
3577  assert(CurrentIndex < Set->NodeList.size() &&
3578         "PhiNodeSet access out of range");
3579  ++CurrentIndex;
3580  Set->SkipRemovedElements(CurrentIndex);
3581  return *this;
3582}
3583
3584bool PhiNodeSetIterator::operator==(const PhiNodeSetIterator &RHS) const {
3585  return CurrentIndex == RHS.CurrentIndex;
3586}
3587
3588bool PhiNodeSetIterator::operator!=(const PhiNodeSetIterator &RHS) const {
3589  return !((*this) == RHS);
3590}
3591
3592/// Keep track of simplification of Phi nodes.
3593/// Accept the set of all phi nodes and erase phi node from this set
3594/// if it is simplified.
3595class SimplificationTracker {
3596  DenseMap<Value *, Value *> Storage;
3597  const SimplifyQuery &SQ;
3598  // Tracks newly created Phi nodes. The elements are iterated by insertion
3599  // order.
3600  PhiNodeSet AllPhiNodes;
3601  // Tracks newly created Select nodes.
3602  SmallPtrSet<SelectInst *, 32> AllSelectNodes;
3603
3604public:
3605  SimplificationTracker(const SimplifyQuery &sq) : SQ(sq) {}
3606
3607  Value *Get(Value *V) {
3608    do {
3609      auto SV = Storage.find(V);
3610      if (SV == Storage.end())
3611        return V;
3612      V = SV->second;
3613    } while (true);
3614  }
3615
3616  Value *Simplify(Value *Val) {
3617    SmallVector<Value *, 32> WorkList;
3618    SmallPtrSet<Value *, 32> Visited;
3619    WorkList.push_back(Val);
3620    while (!WorkList.empty()) {
3621      auto *P = WorkList.pop_back_val();
3622      if (!Visited.insert(P).second)
3623        continue;
3624      if (auto *PI = dyn_cast<Instruction>(P))
3625        if (Value *V = simplifyInstruction(cast<Instruction>(PI), SQ)) {
3626          for (auto *U : PI->users())
3627            WorkList.push_back(cast<Value>(U));
3628          Put(PI, V);
3629          PI->replaceAllUsesWith(V);
3630          if (auto *PHI = dyn_cast<PHINode>(PI))
3631            AllPhiNodes.erase(PHI);
3632          if (auto *Select = dyn_cast<SelectInst>(PI))
3633            AllSelectNodes.erase(Select);
3634          PI->eraseFromParent();
3635        }
3636    }
3637    return Get(Val);
3638  }
3639
3640  void Put(Value *From, Value *To) { Storage.insert({From, To}); }
3641
3642  void ReplacePhi(PHINode *From, PHINode *To) {
3643    Value *OldReplacement = Get(From);
3644    while (OldReplacement != From) {
3645      From = To;
3646      To = dyn_cast<PHINode>(OldReplacement);
3647      OldReplacement = Get(From);
3648    }
3649    assert(To && Get(To) == To && "Replacement PHI node is already replaced.");
3650    Put(From, To);
3651    From->replaceAllUsesWith(To);
3652    AllPhiNodes.erase(From);
3653    From->eraseFromParent();
3654  }
3655
3656  PhiNodeSet &newPhiNodes() { return AllPhiNodes; }
3657
3658  void insertNewPhi(PHINode *PN) { AllPhiNodes.insert(PN); }
3659
3660  void insertNewSelect(SelectInst *SI) { AllSelectNodes.insert(SI); }
3661
3662  unsigned countNewPhiNodes() const { return AllPhiNodes.size(); }
3663
3664  unsigned countNewSelectNodes() const { return AllSelectNodes.size(); }
3665
3666  void destroyNewNodes(Type *CommonType) {
3667    // For safe erasing, replace the uses with dummy value first.
3668    auto *Dummy = PoisonValue::get(CommonType);
3669    for (auto *I : AllPhiNodes) {
3670      I->replaceAllUsesWith(Dummy);
3671      I->eraseFromParent();
3672    }
3673    AllPhiNodes.clear();
3674    for (auto *I : AllSelectNodes) {
3675      I->replaceAllUsesWith(Dummy);
3676      I->eraseFromParent();
3677    }
3678    AllSelectNodes.clear();
3679  }
3680};
3681
3682/// A helper class for combining addressing modes.
3683class AddressingModeCombiner {
3684  typedef DenseMap<Value *, Value *> FoldAddrToValueMapping;
3685  typedef std::pair<PHINode *, PHINode *> PHIPair;
3686
3687private:
3688  /// The addressing modes we've collected.
3689  SmallVector<ExtAddrMode, 16> AddrModes;
3690
3691  /// The field in which the AddrModes differ, when we have more than one.
3692  ExtAddrMode::FieldName DifferentField = ExtAddrMode::NoField;
3693
3694  /// Are the AddrModes that we have all just equal to their original values?
3695  bool AllAddrModesTrivial = true;
3696
3697  /// Common Type for all different fields in addressing modes.
3698  Type *CommonType = nullptr;
3699
3700  /// SimplifyQuery for simplifyInstruction utility.
3701  const SimplifyQuery &SQ;
3702
3703  /// Original Address.
3704  Value *Original;
3705
3706  /// Common value among addresses
3707  Value *CommonValue = nullptr;
3708
3709public:
3710  AddressingModeCombiner(const SimplifyQuery &_SQ, Value *OriginalValue)
3711      : SQ(_SQ), Original(OriginalValue) {}
3712
3713  ~AddressingModeCombiner() { eraseCommonValueIfDead(); }
3714
3715  /// Get the combined AddrMode
3716  const ExtAddrMode &getAddrMode() const { return AddrModes[0]; }
3717
3718  /// Add a new AddrMode if it's compatible with the AddrModes we already
3719  /// have.
3720  /// \return True iff we succeeded in doing so.
3721  bool addNewAddrMode(ExtAddrMode &NewAddrMode) {
3722    // Take note of if we have any non-trivial AddrModes, as we need to detect
3723    // when all AddrModes are trivial as then we would introduce a phi or select
3724    // which just duplicates what's already there.
3725    AllAddrModesTrivial = AllAddrModesTrivial && NewAddrMode.isTrivial();
3726
3727    // If this is the first addrmode then everything is fine.
3728    if (AddrModes.empty()) {
3729      AddrModes.emplace_back(NewAddrMode);
3730      return true;
3731    }
3732
3733    // Figure out how different this is from the other address modes, which we
3734    // can do just by comparing against the first one given that we only care
3735    // about the cumulative difference.
3736    ExtAddrMode::FieldName ThisDifferentField =
3737        AddrModes[0].compare(NewAddrMode);
3738    if (DifferentField == ExtAddrMode::NoField)
3739      DifferentField = ThisDifferentField;
3740    else if (DifferentField != ThisDifferentField)
3741      DifferentField = ExtAddrMode::MultipleFields;
3742
3743    // If NewAddrMode differs in more than one dimension we cannot handle it.
3744    bool CanHandle = DifferentField != ExtAddrMode::MultipleFields;
3745
3746    // If Scale Field is different then we reject.
3747    CanHandle = CanHandle && DifferentField != ExtAddrMode::ScaleField;
3748
3749    // We also must reject the case when base offset is different and
3750    // scale reg is not null, we cannot handle this case due to merge of
3751    // different offsets will be used as ScaleReg.
3752    CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseOffsField ||
3753                              !NewAddrMode.ScaledReg);
3754
3755    // We also must reject the case when GV is different and BaseReg installed
3756    // due to we want to use base reg as a merge of GV values.
3757    CanHandle = CanHandle && (DifferentField != ExtAddrMode::BaseGVField ||
3758                              !NewAddrMode.HasBaseReg);
3759
3760    // Even if NewAddMode is the same we still need to collect it due to
3761    // original value is different. And later we will need all original values
3762    // as anchors during finding the common Phi node.
3763    if (CanHandle)
3764      AddrModes.emplace_back(NewAddrMode);
3765    else
3766      AddrModes.clear();
3767
3768    return CanHandle;
3769  }
3770
3771  /// Combine the addressing modes we've collected into a single
3772  /// addressing mode.
3773  /// \return True iff we successfully combined them or we only had one so
3774  /// didn't need to combine them anyway.
3775  bool combineAddrModes() {
3776    // If we have no AddrModes then they can't be combined.
3777    if (AddrModes.size() == 0)
3778      return false;
3779
3780    // A single AddrMode can trivially be combined.
3781    if (AddrModes.size() == 1 || DifferentField == ExtAddrMode::NoField)
3782      return true;
3783
3784    // If the AddrModes we collected are all just equal to the value they are
3785    // derived from then combining them wouldn't do anything useful.
3786    if (AllAddrModesTrivial)
3787      return false;
3788
3789    if (!addrModeCombiningAllowed())
3790      return false;
3791
3792    // Build a map between <original value, basic block where we saw it> to
3793    // value of base register.
3794    // Bail out if there is no common type.
3795    FoldAddrToValueMapping Map;
3796    if (!initializeMap(Map))
3797      return false;
3798
3799    CommonValue = findCommon(Map);
3800    if (CommonValue)
3801      AddrModes[0].SetCombinedField(DifferentField, CommonValue, AddrModes);
3802    return CommonValue != nullptr;
3803  }
3804
3805private:
3806  /// `CommonValue` may be a placeholder inserted by us.
3807  /// If the placeholder is not used, we should remove this dead instruction.
3808  void eraseCommonValueIfDead() {
3809    if (CommonValue && CommonValue->getNumUses() == 0)
3810      if (Instruction *CommonInst = dyn_cast<Instruction>(CommonValue))
3811        CommonInst->eraseFromParent();
3812  }
3813
3814  /// Initialize Map with anchor values. For address seen
3815  /// we set the value of different field saw in this address.
3816  /// At the same time we find a common type for different field we will
3817  /// use to create new Phi/Select nodes. Keep it in CommonType field.
3818  /// Return false if there is no common type found.
3819  bool initializeMap(FoldAddrToValueMapping &Map) {
3820    // Keep track of keys where the value is null. We will need to replace it
3821    // with constant null when we know the common type.
3822    SmallVector<Value *, 2> NullValue;
3823    Type *IntPtrTy = SQ.DL.getIntPtrType(AddrModes[0].OriginalValue->getType());
3824    for (auto &AM : AddrModes) {
3825      Value *DV = AM.GetFieldAsValue(DifferentField, IntPtrTy);
3826      if (DV) {
3827        auto *Type = DV->getType();
3828        if (CommonType && CommonType != Type)
3829          return false;
3830        CommonType = Type;
3831        Map[AM.OriginalValue] = DV;
3832      } else {
3833        NullValue.push_back(AM.OriginalValue);
3834      }
3835    }
3836    assert(CommonType && "At least one non-null value must be!");
3837    for (auto *V : NullValue)
3838      Map[V] = Constant::getNullValue(CommonType);
3839    return true;
3840  }
3841
3842  /// We have mapping between value A and other value B where B was a field in
3843  /// addressing mode represented by A. Also we have an original value C
3844  /// representing an address we start with. Traversing from C through phi and
3845  /// selects we ended up with A's in a map. This utility function tries to find
3846  /// a value V which is a field in addressing mode C and traversing through phi
3847  /// nodes and selects we will end up in corresponded values B in a map.
3848  /// The utility will create a new Phi/Selects if needed.
3849  // The simple example looks as follows:
3850  // BB1:
3851  //   p1 = b1 + 40
3852  //   br cond BB2, BB3
3853  // BB2:
3854  //   p2 = b2 + 40
3855  //   br BB3
3856  // BB3:
3857  //   p = phi [p1, BB1], [p2, BB2]
3858  //   v = load p
3859  // Map is
3860  //   p1 -> b1
3861  //   p2 -> b2
3862  // Request is
3863  //   p -> ?
3864  // The function tries to find or build phi [b1, BB1], [b2, BB2] in BB3.
3865  Value *findCommon(FoldAddrToValueMapping &Map) {
3866    // Tracks the simplification of newly created phi nodes. The reason we use
3867    // this mapping is because we will add new created Phi nodes in AddrToBase.
3868    // Simplification of Phi nodes is recursive, so some Phi node may
3869    // be simplified after we added it to AddrToBase. In reality this
3870    // simplification is possible only if original phi/selects were not
3871    // simplified yet.
3872    // Using this mapping we can find the current value in AddrToBase.
3873    SimplificationTracker ST(SQ);
3874
3875    // First step, DFS to create PHI nodes for all intermediate blocks.
3876    // Also fill traverse order for the second step.
3877    SmallVector<Value *, 32> TraverseOrder;
3878    InsertPlaceholders(Map, TraverseOrder, ST);
3879
3880    // Second Step, fill new nodes by merged values and simplify if possible.
3881    FillPlaceholders(Map, TraverseOrder, ST);
3882
3883    if (!AddrSinkNewSelects && ST.countNewSelectNodes() > 0) {
3884      ST.destroyNewNodes(CommonType);
3885      return nullptr;
3886    }
3887
3888    // Now we'd like to match New Phi nodes to existed ones.
3889    unsigned PhiNotMatchedCount = 0;
3890    if (!MatchPhiSet(ST, AddrSinkNewPhis, PhiNotMatchedCount)) {
3891      ST.destroyNewNodes(CommonType);
3892      return nullptr;
3893    }
3894
3895    auto *Result = ST.Get(Map.find(Original)->second);
3896    if (Result) {
3897      NumMemoryInstsPhiCreated += ST.countNewPhiNodes() + PhiNotMatchedCount;
3898      NumMemoryInstsSelectCreated += ST.countNewSelectNodes();
3899    }
3900    return Result;
3901  }
3902
3903  /// Try to match PHI node to Candidate.
3904  /// Matcher tracks the matched Phi nodes.
3905  bool MatchPhiNode(PHINode *PHI, PHINode *Candidate,
3906                    SmallSetVector<PHIPair, 8> &Matcher,
3907                    PhiNodeSet &PhiNodesToMatch) {
3908    SmallVector<PHIPair, 8> WorkList;
3909    Matcher.insert({PHI, Candidate});
3910    SmallSet<PHINode *, 8> MatchedPHIs;
3911    MatchedPHIs.insert(PHI);
3912    WorkList.push_back({PHI, Candidate});
3913    SmallSet<PHIPair, 8> Visited;
3914    while (!WorkList.empty()) {
3915      auto Item = WorkList.pop_back_val();
3916      if (!Visited.insert(Item).second)
3917        continue;
3918      // We iterate over all incoming values to Phi to compare them.
3919      // If values are different and both of them Phi and the first one is a
3920      // Phi we added (subject to match) and both of them is in the same basic
3921      // block then we can match our pair if values match. So we state that
3922      // these values match and add it to work list to verify that.
3923      for (auto *B : Item.first->blocks()) {
3924        Value *FirstValue = Item.first->getIncomingValueForBlock(B);
3925        Value *SecondValue = Item.second->getIncomingValueForBlock(B);
3926        if (FirstValue == SecondValue)
3927          continue;
3928
3929        PHINode *FirstPhi = dyn_cast<PHINode>(FirstValue);
3930        PHINode *SecondPhi = dyn_cast<PHINode>(SecondValue);
3931
3932        // One of them is not Phi or
3933        // The first one is not Phi node from the set we'd like to match or
3934        // Phi nodes from different basic blocks then
3935        // we will not be able to match.
3936        if (!FirstPhi || !SecondPhi || !PhiNodesToMatch.count(FirstPhi) ||
3937            FirstPhi->getParent() != SecondPhi->getParent())
3938          return false;
3939
3940        // If we already matched them then continue.
3941        if (Matcher.count({FirstPhi, SecondPhi}))
3942          continue;
3943        // So the values are different and does not match. So we need them to
3944        // match. (But we register no more than one match per PHI node, so that
3945        // we won't later try to replace them twice.)
3946        if (MatchedPHIs.insert(FirstPhi).second)
3947          Matcher.insert({FirstPhi, SecondPhi});
3948        // But me must check it.
3949        WorkList.push_back({FirstPhi, SecondPhi});
3950      }
3951    }
3952    return true;
3953  }
3954
3955  /// For the given set of PHI nodes (in the SimplificationTracker) try
3956  /// to find their equivalents.
3957  /// Returns false if this matching fails and creation of new Phi is disabled.
3958  bool MatchPhiSet(SimplificationTracker &ST, bool AllowNewPhiNodes,
3959                   unsigned &PhiNotMatchedCount) {
3960    // Matched and PhiNodesToMatch iterate their elements in a deterministic
3961    // order, so the replacements (ReplacePhi) are also done in a deterministic
3962    // order.
3963    SmallSetVector<PHIPair, 8> Matched;
3964    SmallPtrSet<PHINode *, 8> WillNotMatch;
3965    PhiNodeSet &PhiNodesToMatch = ST.newPhiNodes();
3966    while (PhiNodesToMatch.size()) {
3967      PHINode *PHI = *PhiNodesToMatch.begin();
3968
3969      // Add us, if no Phi nodes in the basic block we do not match.
3970      WillNotMatch.clear();
3971      WillNotMatch.insert(PHI);
3972
3973      // Traverse all Phis until we found equivalent or fail to do that.
3974      bool IsMatched = false;
3975      for (auto &P : PHI->getParent()->phis()) {
3976        // Skip new Phi nodes.
3977        if (PhiNodesToMatch.count(&P))
3978          continue;
3979        if ((IsMatched = MatchPhiNode(PHI, &P, Matched, PhiNodesToMatch)))
3980          break;
3981        // If it does not match, collect all Phi nodes from matcher.
3982        // if we end up with no match, them all these Phi nodes will not match
3983        // later.
3984        for (auto M : Matched)
3985          WillNotMatch.insert(M.first);
3986        Matched.clear();
3987      }
3988      if (IsMatched) {
3989        // Replace all matched values and erase them.
3990        for (auto MV : Matched)
3991          ST.ReplacePhi(MV.first, MV.second);
3992        Matched.clear();
3993        continue;
3994      }
3995      // If we are not allowed to create new nodes then bail out.
3996      if (!AllowNewPhiNodes)
3997        return false;
3998      // Just remove all seen values in matcher. They will not match anything.
3999      PhiNotMatchedCount += WillNotMatch.size();
4000      for (auto *P : WillNotMatch)
4001        PhiNodesToMatch.erase(P);
4002    }
4003    return true;
4004  }
4005  /// Fill the placeholders with values from predecessors and simplify them.
4006  void FillPlaceholders(FoldAddrToValueMapping &Map,
4007                        SmallVectorImpl<Value *> &TraverseOrder,
4008                        SimplificationTracker &ST) {
4009    while (!TraverseOrder.empty()) {
4010      Value *Current = TraverseOrder.pop_back_val();
4011      assert(Map.contains(Current) && "No node to fill!!!");
4012      Value *V = Map[Current];
4013
4014      if (SelectInst *Select = dyn_cast<SelectInst>(V)) {
4015        // CurrentValue also must be Select.
4016        auto *CurrentSelect = cast<SelectInst>(Current);
4017        auto *TrueValue = CurrentSelect->getTrueValue();
4018        assert(Map.contains(TrueValue) && "No True Value!");
4019        Select->setTrueValue(ST.Get(Map[TrueValue]));
4020        auto *FalseValue = CurrentSelect->getFalseValue();
4021        assert(Map.contains(FalseValue) && "No False Value!");
4022        Select->setFalseValue(ST.Get(Map[FalseValue]));
4023      } else {
4024        // Must be a Phi node then.
4025        auto *PHI = cast<PHINode>(V);
4026        // Fill the Phi node with values from predecessors.
4027        for (auto *B : predecessors(PHI->getParent())) {
4028          Value *PV = cast<PHINode>(Current)->getIncomingValueForBlock(B);
4029          assert(Map.contains(PV) && "No predecessor Value!");
4030          PHI->addIncoming(ST.Get(Map[PV]), B);
4031        }
4032      }
4033      Map[Current] = ST.Simplify(V);
4034    }
4035  }
4036
4037  /// Starting from original value recursively iterates over def-use chain up to
4038  /// known ending values represented in a map. For each traversed phi/select
4039  /// inserts a placeholder Phi or Select.
4040  /// Reports all new created Phi/Select nodes by adding them to set.
4041  /// Also reports and order in what values have been traversed.
4042  void InsertPlaceholders(FoldAddrToValueMapping &Map,
4043                          SmallVectorImpl<Value *> &TraverseOrder,
4044                          SimplificationTracker &ST) {
4045    SmallVector<Value *, 32> Worklist;
4046    assert((isa<PHINode>(Original) || isa<SelectInst>(Original)) &&
4047           "Address must be a Phi or Select node");
4048    auto *Dummy = PoisonValue::get(CommonType);
4049    Worklist.push_back(Original);
4050    while (!Worklist.empty()) {
4051      Value *Current = Worklist.pop_back_val();
4052      // if it is already visited or it is an ending value then skip it.
4053      if (Map.contains(Current))
4054        continue;
4055      TraverseOrder.push_back(Current);
4056
4057      // CurrentValue must be a Phi node or select. All others must be covered
4058      // by anchors.
4059      if (SelectInst *CurrentSelect = dyn_cast<SelectInst>(Current)) {
4060        // Is it OK to get metadata from OrigSelect?!
4061        // Create a Select placeholder with dummy value.
4062        SelectInst *Select = SelectInst::Create(
4063            CurrentSelect->getCondition(), Dummy, Dummy,
4064            CurrentSelect->getName(), CurrentSelect, CurrentSelect);
4065        Map[Current] = Select;
4066        ST.insertNewSelect(Select);
4067        // We are interested in True and False values.
4068        Worklist.push_back(CurrentSelect->getTrueValue());
4069        Worklist.push_back(CurrentSelect->getFalseValue());
4070      } else {
4071        // It must be a Phi node then.
4072        PHINode *CurrentPhi = cast<PHINode>(Current);
4073        unsigned PredCount = CurrentPhi->getNumIncomingValues();
4074        PHINode *PHI =
4075            PHINode::Create(CommonType, PredCount, "sunk_phi", CurrentPhi);
4076        Map[Current] = PHI;
4077        ST.insertNewPhi(PHI);
4078        append_range(Worklist, CurrentPhi->incoming_values());
4079      }
4080    }
4081  }
4082
4083  bool addrModeCombiningAllowed() {
4084    if (DisableComplexAddrModes)
4085      return false;
4086    switch (DifferentField) {
4087    default:
4088      return false;
4089    case ExtAddrMode::BaseRegField:
4090      return AddrSinkCombineBaseReg;
4091    case ExtAddrMode::BaseGVField:
4092      return AddrSinkCombineBaseGV;
4093    case ExtAddrMode::BaseOffsField:
4094      return AddrSinkCombineBaseOffs;
4095    case ExtAddrMode::ScaledRegField:
4096      return AddrSinkCombineScaledReg;
4097    }
4098  }
4099};
4100} // end anonymous namespace
4101
4102/// Try adding ScaleReg*Scale to the current addressing mode.
4103/// Return true and update AddrMode if this addr mode is legal for the target,
4104/// false if not.
4105bool AddressingModeMatcher::matchScaledValue(Value *ScaleReg, int64_t Scale,
4106                                             unsigned Depth) {
4107  // If Scale is 1, then this is the same as adding ScaleReg to the addressing
4108  // mode.  Just process that directly.
4109  if (Scale == 1)
4110    return matchAddr(ScaleReg, Depth);
4111
4112  // If the scale is 0, it takes nothing to add this.
4113  if (Scale == 0)
4114    return true;
4115
4116  // If we already have a scale of this value, we can add to it, otherwise, we
4117  // need an available scale field.
4118  if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
4119    return false;
4120
4121  ExtAddrMode TestAddrMode = AddrMode;
4122
4123  // Add scale to turn X*4+X*3 -> X*7.  This could also do things like
4124  // [A+B + A*7] -> [B+A*8].
4125  TestAddrMode.Scale += Scale;
4126  TestAddrMode.ScaledReg = ScaleReg;
4127
4128  // If the new address isn't legal, bail out.
4129  if (!TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace))
4130    return false;
4131
4132  // It was legal, so commit it.
4133  AddrMode = TestAddrMode;
4134
4135  // Okay, we decided that we can add ScaleReg+Scale to AddrMode.  Check now
4136  // to see if ScaleReg is actually X+C.  If so, we can turn this into adding
4137  // X*Scale + C*Scale to addr mode. If we found available IV increment, do not
4138  // go any further: we can reuse it and cannot eliminate it.
4139  ConstantInt *CI = nullptr;
4140  Value *AddLHS = nullptr;
4141  if (isa<Instruction>(ScaleReg) && // not a constant expr.
4142      match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI))) &&
4143      !isIVIncrement(ScaleReg, &LI) && CI->getValue().isSignedIntN(64)) {
4144    TestAddrMode.InBounds = false;
4145    TestAddrMode.ScaledReg = AddLHS;
4146    TestAddrMode.BaseOffs += CI->getSExtValue() * TestAddrMode.Scale;
4147
4148    // If this addressing mode is legal, commit it and remember that we folded
4149    // this instruction.
4150    if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace)) {
4151      AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
4152      AddrMode = TestAddrMode;
4153      return true;
4154    }
4155    // Restore status quo.
4156    TestAddrMode = AddrMode;
4157  }
4158
4159  // If this is an add recurrence with a constant step, return the increment
4160  // instruction and the canonicalized step.
4161  auto GetConstantStep =
4162      [this](const Value *V) -> std::optional<std::pair<Instruction *, APInt>> {
4163    auto *PN = dyn_cast<PHINode>(V);
4164    if (!PN)
4165      return std::nullopt;
4166    auto IVInc = getIVIncrement(PN, &LI);
4167    if (!IVInc)
4168      return std::nullopt;
4169    // TODO: The result of the intrinsics above is two-complement. However when
4170    // IV inc is expressed as add or sub, iv.next is potentially a poison value.
4171    // If it has nuw or nsw flags, we need to make sure that these flags are
4172    // inferrable at the point of memory instruction. Otherwise we are replacing
4173    // well-defined two-complement computation with poison. Currently, to avoid
4174    // potentially complex analysis needed to prove this, we reject such cases.
4175    if (auto *OIVInc = dyn_cast<OverflowingBinaryOperator>(IVInc->first))
4176      if (OIVInc->hasNoSignedWrap() || OIVInc->hasNoUnsignedWrap())
4177        return std::nullopt;
4178    if (auto *ConstantStep = dyn_cast<ConstantInt>(IVInc->second))
4179      return std::make_pair(IVInc->first, ConstantStep->getValue());
4180    return std::nullopt;
4181  };
4182
4183  // Try to account for the following special case:
4184  // 1. ScaleReg is an inductive variable;
4185  // 2. We use it with non-zero offset;
4186  // 3. IV's increment is available at the point of memory instruction.
4187  //
4188  // In this case, we may reuse the IV increment instead of the IV Phi to
4189  // achieve the following advantages:
4190  // 1. If IV step matches the offset, we will have no need in the offset;
4191  // 2. Even if they don't match, we will reduce the overlap of living IV
4192  //    and IV increment, that will potentially lead to better register
4193  //    assignment.
4194  if (AddrMode.BaseOffs) {
4195    if (auto IVStep = GetConstantStep(ScaleReg)) {
4196      Instruction *IVInc = IVStep->first;
4197      // The following assert is important to ensure a lack of infinite loops.
4198      // This transforms is (intentionally) the inverse of the one just above.
4199      // If they don't agree on the definition of an increment, we'd alternate
4200      // back and forth indefinitely.
4201      assert(isIVIncrement(IVInc, &LI) && "implied by GetConstantStep");
4202      APInt Step = IVStep->second;
4203      APInt Offset = Step * AddrMode.Scale;
4204      if (Offset.isSignedIntN(64)) {
4205        TestAddrMode.InBounds = false;
4206        TestAddrMode.ScaledReg = IVInc;
4207        TestAddrMode.BaseOffs -= Offset.getLimitedValue();
4208        // If this addressing mode is legal, commit it..
4209        // (Note that we defer the (expensive) domtree base legality check
4210        // to the very last possible point.)
4211        if (TLI.isLegalAddressingMode(DL, TestAddrMode, AccessTy, AddrSpace) &&
4212            getDTFn().dominates(IVInc, MemoryInst)) {
4213          AddrModeInsts.push_back(cast<Instruction>(IVInc));
4214          AddrMode = TestAddrMode;
4215          return true;
4216        }
4217        // Restore status quo.
4218        TestAddrMode = AddrMode;
4219      }
4220    }
4221  }
4222
4223  // Otherwise, just return what we have.
4224  return true;
4225}
4226
4227/// This is a little filter, which returns true if an addressing computation
4228/// involving I might be folded into a load/store accessing it.
4229/// This doesn't need to be perfect, but needs to accept at least
4230/// the set of instructions that MatchOperationAddr can.
4231static bool MightBeFoldableInst(Instruction *I) {
4232  switch (I->getOpcode()) {
4233  case Instruction::BitCast:
4234  case Instruction::AddrSpaceCast:
4235    // Don't touch identity bitcasts.
4236    if (I->getType() == I->getOperand(0)->getType())
4237      return false;
4238    return I->getType()->isIntOrPtrTy();
4239  case Instruction::PtrToInt:
4240    // PtrToInt is always a noop, as we know that the int type is pointer sized.
4241    return true;
4242  case Instruction::IntToPtr:
4243    // We know the input is intptr_t, so this is foldable.
4244    return true;
4245  case Instruction::Add:
4246    return true;
4247  case Instruction::Mul:
4248  case Instruction::Shl:
4249    // Can only handle X*C and X << C.
4250    return isa<ConstantInt>(I->getOperand(1));
4251  case Instruction::GetElementPtr:
4252    return true;
4253  default:
4254    return false;
4255  }
4256}
4257
4258/// Check whether or not \p Val is a legal instruction for \p TLI.
4259/// \note \p Val is assumed to be the product of some type promotion.
4260/// Therefore if \p Val has an undefined state in \p TLI, this is assumed
4261/// to be legal, as the non-promoted value would have had the same state.
4262static bool isPromotedInstructionLegal(const TargetLowering &TLI,
4263                                       const DataLayout &DL, Value *Val) {
4264  Instruction *PromotedInst = dyn_cast<Instruction>(Val);
4265  if (!PromotedInst)
4266    return false;
4267  int ISDOpcode = TLI.InstructionOpcodeToISD(PromotedInst->getOpcode());
4268  // If the ISDOpcode is undefined, it was undefined before the promotion.
4269  if (!ISDOpcode)
4270    return true;
4271  // Otherwise, check if the promoted instruction is legal or not.
4272  return TLI.isOperationLegalOrCustom(
4273      ISDOpcode, TLI.getValueType(DL, PromotedInst->getType()));
4274}
4275
4276namespace {
4277
4278/// Hepler class to perform type promotion.
4279class TypePromotionHelper {
4280  /// Utility function to add a promoted instruction \p ExtOpnd to
4281  /// \p PromotedInsts and record the type of extension we have seen.
4282  static void addPromotedInst(InstrToOrigTy &PromotedInsts,
4283                              Instruction *ExtOpnd, bool IsSExt) {
4284    ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4285    InstrToOrigTy::iterator It = PromotedInsts.find(ExtOpnd);
4286    if (It != PromotedInsts.end()) {
4287      // If the new extension is same as original, the information in
4288      // PromotedInsts[ExtOpnd] is still correct.
4289      if (It->second.getInt() == ExtTy)
4290        return;
4291
4292      // Now the new extension is different from old extension, we make
4293      // the type information invalid by setting extension type to
4294      // BothExtension.
4295      ExtTy = BothExtension;
4296    }
4297    PromotedInsts[ExtOpnd] = TypeIsSExt(ExtOpnd->getType(), ExtTy);
4298  }
4299
4300  /// Utility function to query the original type of instruction \p Opnd
4301  /// with a matched extension type. If the extension doesn't match, we
4302  /// cannot use the information we had on the original type.
4303  /// BothExtension doesn't match any extension type.
4304  static const Type *getOrigType(const InstrToOrigTy &PromotedInsts,
4305                                 Instruction *Opnd, bool IsSExt) {
4306    ExtType ExtTy = IsSExt ? SignExtension : ZeroExtension;
4307    InstrToOrigTy::const_iterator It = PromotedInsts.find(Opnd);
4308    if (It != PromotedInsts.end() && It->second.getInt() == ExtTy)
4309      return It->second.getPointer();
4310    return nullptr;
4311  }
4312
4313  /// Utility function to check whether or not a sign or zero extension
4314  /// of \p Inst with \p ConsideredExtType can be moved through \p Inst by
4315  /// either using the operands of \p Inst or promoting \p Inst.
4316  /// The type of the extension is defined by \p IsSExt.
4317  /// In other words, check if:
4318  /// ext (Ty Inst opnd1 opnd2 ... opndN) to ConsideredExtType.
4319  /// #1 Promotion applies:
4320  /// ConsideredExtType Inst (ext opnd1 to ConsideredExtType, ...).
4321  /// #2 Operand reuses:
4322  /// ext opnd1 to ConsideredExtType.
4323  /// \p PromotedInsts maps the instructions to their type before promotion.
4324  static bool canGetThrough(const Instruction *Inst, Type *ConsideredExtType,
4325                            const InstrToOrigTy &PromotedInsts, bool IsSExt);
4326
4327  /// Utility function to determine if \p OpIdx should be promoted when
4328  /// promoting \p Inst.
4329  static bool shouldExtOperand(const Instruction *Inst, int OpIdx) {
4330    return !(isa<SelectInst>(Inst) && OpIdx == 0);
4331  }
4332
4333  /// Utility function to promote the operand of \p Ext when this
4334  /// operand is a promotable trunc or sext or zext.
4335  /// \p PromotedInsts maps the instructions to their type before promotion.
4336  /// \p CreatedInstsCost[out] contains the cost of all instructions
4337  /// created to promote the operand of Ext.
4338  /// Newly added extensions are inserted in \p Exts.
4339  /// Newly added truncates are inserted in \p Truncs.
4340  /// Should never be called directly.
4341  /// \return The promoted value which is used instead of Ext.
4342  static Value *promoteOperandForTruncAndAnyExt(
4343      Instruction *Ext, TypePromotionTransaction &TPT,
4344      InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4345      SmallVectorImpl<Instruction *> *Exts,
4346      SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI);
4347
4348  /// Utility function to promote the operand of \p Ext when this
4349  /// operand is promotable and is not a supported trunc or sext.
4350  /// \p PromotedInsts maps the instructions to their type before promotion.
4351  /// \p CreatedInstsCost[out] contains the cost of all the instructions
4352  /// created to promote the operand of Ext.
4353  /// Newly added extensions are inserted in \p Exts.
4354  /// Newly added truncates are inserted in \p Truncs.
4355  /// Should never be called directly.
4356  /// \return The promoted value which is used instead of Ext.
4357  static Value *promoteOperandForOther(Instruction *Ext,
4358                                       TypePromotionTransaction &TPT,
4359                                       InstrToOrigTy &PromotedInsts,
4360                                       unsigned &CreatedInstsCost,
4361                                       SmallVectorImpl<Instruction *> *Exts,
4362                                       SmallVectorImpl<Instruction *> *Truncs,
4363                                       const TargetLowering &TLI, bool IsSExt);
4364
4365  /// \see promoteOperandForOther.
4366  static Value *signExtendOperandForOther(
4367      Instruction *Ext, TypePromotionTransaction &TPT,
4368      InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4369      SmallVectorImpl<Instruction *> *Exts,
4370      SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4371    return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4372                                  Exts, Truncs, TLI, true);
4373  }
4374
4375  /// \see promoteOperandForOther.
4376  static Value *zeroExtendOperandForOther(
4377      Instruction *Ext, TypePromotionTransaction &TPT,
4378      InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4379      SmallVectorImpl<Instruction *> *Exts,
4380      SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4381    return promoteOperandForOther(Ext, TPT, PromotedInsts, CreatedInstsCost,
4382                                  Exts, Truncs, TLI, false);
4383  }
4384
4385public:
4386  /// Type for the utility function that promotes the operand of Ext.
4387  using Action = Value *(*)(Instruction *Ext, TypePromotionTransaction &TPT,
4388                            InstrToOrigTy &PromotedInsts,
4389                            unsigned &CreatedInstsCost,
4390                            SmallVectorImpl<Instruction *> *Exts,
4391                            SmallVectorImpl<Instruction *> *Truncs,
4392                            const TargetLowering &TLI);
4393
4394  /// Given a sign/zero extend instruction \p Ext, return the appropriate
4395  /// action to promote the operand of \p Ext instead of using Ext.
4396  /// \return NULL if no promotable action is possible with the current
4397  /// sign extension.
4398  /// \p InsertedInsts keeps track of all the instructions inserted by the
4399  /// other CodeGenPrepare optimizations. This information is important
4400  /// because we do not want to promote these instructions as CodeGenPrepare
4401  /// will reinsert them later. Thus creating an infinite loop: create/remove.
4402  /// \p PromotedInsts maps the instructions to their type before promotion.
4403  static Action getAction(Instruction *Ext, const SetOfInstrs &InsertedInsts,
4404                          const TargetLowering &TLI,
4405                          const InstrToOrigTy &PromotedInsts);
4406};
4407
4408} // end anonymous namespace
4409
4410bool TypePromotionHelper::canGetThrough(const Instruction *Inst,
4411                                        Type *ConsideredExtType,
4412                                        const InstrToOrigTy &PromotedInsts,
4413                                        bool IsSExt) {
4414  // The promotion helper does not know how to deal with vector types yet.
4415  // To be able to fix that, we would need to fix the places where we
4416  // statically extend, e.g., constants and such.
4417  if (Inst->getType()->isVectorTy())
4418    return false;
4419
4420  // We can always get through zext.
4421  if (isa<ZExtInst>(Inst))
4422    return true;
4423
4424  // sext(sext) is ok too.
4425  if (IsSExt && isa<SExtInst>(Inst))
4426    return true;
4427
4428  // We can get through binary operator, if it is legal. In other words, the
4429  // binary operator must have a nuw or nsw flag.
4430  if (const auto *BinOp = dyn_cast<BinaryOperator>(Inst))
4431    if (isa<OverflowingBinaryOperator>(BinOp) &&
4432        ((!IsSExt && BinOp->hasNoUnsignedWrap()) ||
4433         (IsSExt && BinOp->hasNoSignedWrap())))
4434      return true;
4435
4436  // ext(and(opnd, cst)) --> and(ext(opnd), ext(cst))
4437  if ((Inst->getOpcode() == Instruction::And ||
4438       Inst->getOpcode() == Instruction::Or))
4439    return true;
4440
4441  // ext(xor(opnd, cst)) --> xor(ext(opnd), ext(cst))
4442  if (Inst->getOpcode() == Instruction::Xor) {
4443    // Make sure it is not a NOT.
4444    if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1)))
4445      if (!Cst->getValue().isAllOnes())
4446        return true;
4447  }
4448
4449  // zext(shrl(opnd, cst)) --> shrl(zext(opnd), zext(cst))
4450  // It may change a poisoned value into a regular value, like
4451  //     zext i32 (shrl i8 %val, 12)  -->  shrl i32 (zext i8 %val), 12
4452  //          poisoned value                    regular value
4453  // It should be OK since undef covers valid value.
4454  if (Inst->getOpcode() == Instruction::LShr && !IsSExt)
4455    return true;
4456
4457  // and(ext(shl(opnd, cst)), cst) --> and(shl(ext(opnd), ext(cst)), cst)
4458  // It may change a poisoned value into a regular value, like
4459  //     zext i32 (shl i8 %val, 12)  -->  shl i32 (zext i8 %val), 12
4460  //          poisoned value                    regular value
4461  // It should be OK since undef covers valid value.
4462  if (Inst->getOpcode() == Instruction::Shl && Inst->hasOneUse()) {
4463    const auto *ExtInst = cast<const Instruction>(*Inst->user_begin());
4464    if (ExtInst->hasOneUse()) {
4465      const auto *AndInst = dyn_cast<const Instruction>(*ExtInst->user_begin());
4466      if (AndInst && AndInst->getOpcode() == Instruction::And) {
4467        const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1));
4468        if (Cst &&
4469            Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth()))
4470          return true;
4471      }
4472    }
4473  }
4474
4475  // Check if we can do the following simplification.
4476  // ext(trunc(opnd)) --> ext(opnd)
4477  if (!isa<TruncInst>(Inst))
4478    return false;
4479
4480  Value *OpndVal = Inst->getOperand(0);
4481  // Check if we can use this operand in the extension.
4482  // If the type is larger than the result type of the extension, we cannot.
4483  if (!OpndVal->getType()->isIntegerTy() ||
4484      OpndVal->getType()->getIntegerBitWidth() >
4485          ConsideredExtType->getIntegerBitWidth())
4486    return false;
4487
4488  // If the operand of the truncate is not an instruction, we will not have
4489  // any information on the dropped bits.
4490  // (Actually we could for constant but it is not worth the extra logic).
4491  Instruction *Opnd = dyn_cast<Instruction>(OpndVal);
4492  if (!Opnd)
4493    return false;
4494
4495  // Check if the source of the type is narrow enough.
4496  // I.e., check that trunc just drops extended bits of the same kind of
4497  // the extension.
4498  // #1 get the type of the operand and check the kind of the extended bits.
4499  const Type *OpndType = getOrigType(PromotedInsts, Opnd, IsSExt);
4500  if (OpndType)
4501    ;
4502  else if ((IsSExt && isa<SExtInst>(Opnd)) || (!IsSExt && isa<ZExtInst>(Opnd)))
4503    OpndType = Opnd->getOperand(0)->getType();
4504  else
4505    return false;
4506
4507  // #2 check that the truncate just drops extended bits.
4508  return Inst->getType()->getIntegerBitWidth() >=
4509         OpndType->getIntegerBitWidth();
4510}
4511
4512TypePromotionHelper::Action TypePromotionHelper::getAction(
4513    Instruction *Ext, const SetOfInstrs &InsertedInsts,
4514    const TargetLowering &TLI, const InstrToOrigTy &PromotedInsts) {
4515  assert((isa<SExtInst>(Ext) || isa<ZExtInst>(Ext)) &&
4516         "Unexpected instruction type");
4517  Instruction *ExtOpnd = dyn_cast<Instruction>(Ext->getOperand(0));
4518  Type *ExtTy = Ext->getType();
4519  bool IsSExt = isa<SExtInst>(Ext);
4520  // If the operand of the extension is not an instruction, we cannot
4521  // get through.
4522  // If it, check we can get through.
4523  if (!ExtOpnd || !canGetThrough(ExtOpnd, ExtTy, PromotedInsts, IsSExt))
4524    return nullptr;
4525
4526  // Do not promote if the operand has been added by codegenprepare.
4527  // Otherwise, it means we are undoing an optimization that is likely to be
4528  // redone, thus causing potential infinite loop.
4529  if (isa<TruncInst>(ExtOpnd) && InsertedInsts.count(ExtOpnd))
4530    return nullptr;
4531
4532  // SExt or Trunc instructions.
4533  // Return the related handler.
4534  if (isa<SExtInst>(ExtOpnd) || isa<TruncInst>(ExtOpnd) ||
4535      isa<ZExtInst>(ExtOpnd))
4536    return promoteOperandForTruncAndAnyExt;
4537
4538  // Regular instruction.
4539  // Abort early if we will have to insert non-free instructions.
4540  if (!ExtOpnd->hasOneUse() && !TLI.isTruncateFree(ExtTy, ExtOpnd->getType()))
4541    return nullptr;
4542  return IsSExt ? signExtendOperandForOther : zeroExtendOperandForOther;
4543}
4544
4545Value *TypePromotionHelper::promoteOperandForTruncAndAnyExt(
4546    Instruction *SExt, TypePromotionTransaction &TPT,
4547    InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4548    SmallVectorImpl<Instruction *> *Exts,
4549    SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI) {
4550  // By construction, the operand of SExt is an instruction. Otherwise we cannot
4551  // get through it and this method should not be called.
4552  Instruction *SExtOpnd = cast<Instruction>(SExt->getOperand(0));
4553  Value *ExtVal = SExt;
4554  bool HasMergedNonFreeExt = false;
4555  if (isa<ZExtInst>(SExtOpnd)) {
4556    // Replace s|zext(zext(opnd))
4557    // => zext(opnd).
4558    HasMergedNonFreeExt = !TLI.isExtFree(SExtOpnd);
4559    Value *ZExt =
4560        TPT.createZExt(SExt, SExtOpnd->getOperand(0), SExt->getType());
4561    TPT.replaceAllUsesWith(SExt, ZExt);
4562    TPT.eraseInstruction(SExt);
4563    ExtVal = ZExt;
4564  } else {
4565    // Replace z|sext(trunc(opnd)) or sext(sext(opnd))
4566    // => z|sext(opnd).
4567    TPT.setOperand(SExt, 0, SExtOpnd->getOperand(0));
4568  }
4569  CreatedInstsCost = 0;
4570
4571  // Remove dead code.
4572  if (SExtOpnd->use_empty())
4573    TPT.eraseInstruction(SExtOpnd);
4574
4575  // Check if the extension is still needed.
4576  Instruction *ExtInst = dyn_cast<Instruction>(ExtVal);
4577  if (!ExtInst || ExtInst->getType() != ExtInst->getOperand(0)->getType()) {
4578    if (ExtInst) {
4579      if (Exts)
4580        Exts->push_back(ExtInst);
4581      CreatedInstsCost = !TLI.isExtFree(ExtInst) && !HasMergedNonFreeExt;
4582    }
4583    return ExtVal;
4584  }
4585
4586  // At this point we have: ext ty opnd to ty.
4587  // Reassign the uses of ExtInst to the opnd and remove ExtInst.
4588  Value *NextVal = ExtInst->getOperand(0);
4589  TPT.eraseInstruction(ExtInst, NextVal);
4590  return NextVal;
4591}
4592
4593Value *TypePromotionHelper::promoteOperandForOther(
4594    Instruction *Ext, TypePromotionTransaction &TPT,
4595    InstrToOrigTy &PromotedInsts, unsigned &CreatedInstsCost,
4596    SmallVectorImpl<Instruction *> *Exts,
4597    SmallVectorImpl<Instruction *> *Truncs, const TargetLowering &TLI,
4598    bool IsSExt) {
4599  // By construction, the operand of Ext is an instruction. Otherwise we cannot
4600  // get through it and this method should not be called.
4601  Instruction *ExtOpnd = cast<Instruction>(Ext->getOperand(0));
4602  CreatedInstsCost = 0;
4603  if (!ExtOpnd->hasOneUse()) {
4604    // ExtOpnd will be promoted.
4605    // All its uses, but Ext, will need to use a truncated value of the
4606    // promoted version.
4607    // Create the truncate now.
4608    Value *Trunc = TPT.createTrunc(Ext, ExtOpnd->getType());
4609    if (Instruction *ITrunc = dyn_cast<Instruction>(Trunc)) {
4610      // Insert it just after the definition.
4611      ITrunc->moveAfter(ExtOpnd);
4612      if (Truncs)
4613        Truncs->push_back(ITrunc);
4614    }
4615
4616    TPT.replaceAllUsesWith(ExtOpnd, Trunc);
4617    // Restore the operand of Ext (which has been replaced by the previous call
4618    // to replaceAllUsesWith) to avoid creating a cycle trunc <-> sext.
4619    TPT.setOperand(Ext, 0, ExtOpnd);
4620  }
4621
4622  // Get through the Instruction:
4623  // 1. Update its type.
4624  // 2. Replace the uses of Ext by Inst.
4625  // 3. Extend each operand that needs to be extended.
4626
4627  // Remember the original type of the instruction before promotion.
4628  // This is useful to know that the high bits are sign extended bits.
4629  addPromotedInst(PromotedInsts, ExtOpnd, IsSExt);
4630  // Step #1.
4631  TPT.mutateType(ExtOpnd, Ext->getType());
4632  // Step #2.
4633  TPT.replaceAllUsesWith(Ext, ExtOpnd);
4634  // Step #3.
4635  LLVM_DEBUG(dbgs() << "Propagate Ext to operands\n");
4636  for (int OpIdx = 0, EndOpIdx = ExtOpnd->getNumOperands(); OpIdx != EndOpIdx;
4637       ++OpIdx) {
4638    LLVM_DEBUG(dbgs() << "Operand:\n" << *(ExtOpnd->getOperand(OpIdx)) << '\n');
4639    if (ExtOpnd->getOperand(OpIdx)->getType() == Ext->getType() ||
4640        !shouldExtOperand(ExtOpnd, OpIdx)) {
4641      LLVM_DEBUG(dbgs() << "No need to propagate\n");
4642      continue;
4643    }
4644    // Check if we can statically extend the operand.
4645    Value *Opnd = ExtOpnd->getOperand(OpIdx);
4646    if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) {
4647      LLVM_DEBUG(dbgs() << "Statically extend\n");
4648      unsigned BitWidth = Ext->getType()->getIntegerBitWidth();
4649      APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth)
4650                            : Cst->getValue().zext(BitWidth);
4651      TPT.setOperand(ExtOpnd, OpIdx, ConstantInt::get(Ext->getType(), CstVal));
4652      continue;
4653    }
4654    // UndefValue are typed, so we have to statically sign extend them.
4655    if (isa<UndefValue>(Opnd)) {
4656      LLVM_DEBUG(dbgs() << "Statically extend\n");
4657      TPT.setOperand(ExtOpnd, OpIdx, UndefValue::get(Ext->getType()));
4658      continue;
4659    }
4660
4661    // Otherwise we have to explicitly sign extend the operand.
4662    Value *ValForExtOpnd = IsSExt
4663                               ? TPT.createSExt(ExtOpnd, Opnd, Ext->getType())
4664                               : TPT.createZExt(ExtOpnd, Opnd, Ext->getType());
4665    TPT.setOperand(ExtOpnd, OpIdx, ValForExtOpnd);
4666    Instruction *InstForExtOpnd = dyn_cast<Instruction>(ValForExtOpnd);
4667    if (!InstForExtOpnd)
4668      continue;
4669
4670    if (Exts)
4671      Exts->push_back(InstForExtOpnd);
4672
4673    CreatedInstsCost += !TLI.isExtFree(InstForExtOpnd);
4674  }
4675  LLVM_DEBUG(dbgs() << "Extension is useless now\n");
4676  TPT.eraseInstruction(Ext);
4677  return ExtOpnd;
4678}
4679
4680/// Check whether or not promoting an instruction to a wider type is profitable.
4681/// \p NewCost gives the cost of extension instructions created by the
4682/// promotion.
4683/// \p OldCost gives the cost of extension instructions before the promotion
4684/// plus the number of instructions that have been
4685/// matched in the addressing mode the promotion.
4686/// \p PromotedOperand is the value that has been promoted.
4687/// \return True if the promotion is profitable, false otherwise.
4688bool AddressingModeMatcher::isPromotionProfitable(
4689    unsigned NewCost, unsigned OldCost, Value *PromotedOperand) const {
4690  LLVM_DEBUG(dbgs() << "OldCost: " << OldCost << "\tNewCost: " << NewCost
4691                    << '\n');
4692  // The cost of the new extensions is greater than the cost of the
4693  // old extension plus what we folded.
4694  // This is not profitable.
4695  if (NewCost > OldCost)
4696    return false;
4697  if (NewCost < OldCost)
4698    return true;
4699  // The promotion is neutral but it may help folding the sign extension in
4700  // loads for instance.
4701  // Check that we did not create an illegal instruction.
4702  return isPromotedInstructionLegal(TLI, DL, PromotedOperand);
4703}
4704
4705/// Given an instruction or constant expr, see if we can fold the operation
4706/// into the addressing mode. If so, update the addressing mode and return
4707/// true, otherwise return false without modifying AddrMode.
4708/// If \p MovedAway is not NULL, it contains the information of whether or
4709/// not AddrInst has to be folded into the addressing mode on success.
4710/// If \p MovedAway == true, \p AddrInst will not be part of the addressing
4711/// because it has been moved away.
4712/// Thus AddrInst must not be added in the matched instructions.
4713/// This state can happen when AddrInst is a sext, since it may be moved away.
4714/// Therefore, AddrInst may not be valid when MovedAway is true and it must
4715/// not be referenced anymore.
4716bool AddressingModeMatcher::matchOperationAddr(User *AddrInst, unsigned Opcode,
4717                                               unsigned Depth,
4718                                               bool *MovedAway) {
4719  // Avoid exponential behavior on extremely deep expression trees.
4720  if (Depth >= 5)
4721    return false;
4722
4723  // By default, all matched instructions stay in place.
4724  if (MovedAway)
4725    *MovedAway = false;
4726
4727  switch (Opcode) {
4728  case Instruction::PtrToInt:
4729    // PtrToInt is always a noop, as we know that the int type is pointer sized.
4730    return matchAddr(AddrInst->getOperand(0), Depth);
4731  case Instruction::IntToPtr: {
4732    auto AS = AddrInst->getType()->getPointerAddressSpace();
4733    auto PtrTy = MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
4734    // This inttoptr is a no-op if the integer type is pointer sized.
4735    if (TLI.getValueType(DL, AddrInst->getOperand(0)->getType()) == PtrTy)
4736      return matchAddr(AddrInst->getOperand(0), Depth);
4737    return false;
4738  }
4739  case Instruction::BitCast:
4740    // BitCast is always a noop, and we can handle it as long as it is
4741    // int->int or pointer->pointer (we don't want int<->fp or something).
4742    if (AddrInst->getOperand(0)->getType()->isIntOrPtrTy() &&
4743        // Don't touch identity bitcasts.  These were probably put here by LSR,
4744        // and we don't want to mess around with them.  Assume it knows what it
4745        // is doing.
4746        AddrInst->getOperand(0)->getType() != AddrInst->getType())
4747      return matchAddr(AddrInst->getOperand(0), Depth);
4748    return false;
4749  case Instruction::AddrSpaceCast: {
4750    unsigned SrcAS =
4751        AddrInst->getOperand(0)->getType()->getPointerAddressSpace();
4752    unsigned DestAS = AddrInst->getType()->getPointerAddressSpace();
4753    if (TLI.getTargetMachine().isNoopAddrSpaceCast(SrcAS, DestAS))
4754      return matchAddr(AddrInst->getOperand(0), Depth);
4755    return false;
4756  }
4757  case Instruction::Add: {
4758    // Check to see if we can merge in one operand, then the other.  If so, we
4759    // win.
4760    ExtAddrMode BackupAddrMode = AddrMode;
4761    unsigned OldSize = AddrModeInsts.size();
4762    // Start a transaction at this point.
4763    // The LHS may match but not the RHS.
4764    // Therefore, we need a higher level restoration point to undo partially
4765    // matched operation.
4766    TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4767        TPT.getRestorationPoint();
4768
4769    // Try to match an integer constant second to increase its chance of ending
4770    // up in `BaseOffs`, resp. decrease its chance of ending up in `BaseReg`.
4771    int First = 0, Second = 1;
4772    if (isa<ConstantInt>(AddrInst->getOperand(First))
4773      && !isa<ConstantInt>(AddrInst->getOperand(Second)))
4774        std::swap(First, Second);
4775    AddrMode.InBounds = false;
4776    if (matchAddr(AddrInst->getOperand(First), Depth + 1) &&
4777        matchAddr(AddrInst->getOperand(Second), Depth + 1))
4778      return true;
4779
4780    // Restore the old addr mode info.
4781    AddrMode = BackupAddrMode;
4782    AddrModeInsts.resize(OldSize);
4783    TPT.rollback(LastKnownGood);
4784
4785    // Otherwise this was over-aggressive.  Try merging operands in the opposite
4786    // order.
4787    if (matchAddr(AddrInst->getOperand(Second), Depth + 1) &&
4788        matchAddr(AddrInst->getOperand(First), Depth + 1))
4789      return true;
4790
4791    // Otherwise we definitely can't merge the ADD in.
4792    AddrMode = BackupAddrMode;
4793    AddrModeInsts.resize(OldSize);
4794    TPT.rollback(LastKnownGood);
4795    break;
4796  }
4797  // case Instruction::Or:
4798  //  TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
4799  // break;
4800  case Instruction::Mul:
4801  case Instruction::Shl: {
4802    // Can only handle X*C and X << C.
4803    AddrMode.InBounds = false;
4804    ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
4805    if (!RHS || RHS->getBitWidth() > 64)
4806      return false;
4807    int64_t Scale = Opcode == Instruction::Shl
4808                        ? 1LL << RHS->getLimitedValue(RHS->getBitWidth() - 1)
4809                        : RHS->getSExtValue();
4810
4811    return matchScaledValue(AddrInst->getOperand(0), Scale, Depth);
4812  }
4813  case Instruction::GetElementPtr: {
4814    // Scan the GEP.  We check it if it contains constant offsets and at most
4815    // one variable offset.
4816    int VariableOperand = -1;
4817    unsigned VariableScale = 0;
4818
4819    int64_t ConstantOffset = 0;
4820    gep_type_iterator GTI = gep_type_begin(AddrInst);
4821    for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
4822      if (StructType *STy = GTI.getStructTypeOrNull()) {
4823        const StructLayout *SL = DL.getStructLayout(STy);
4824        unsigned Idx =
4825            cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
4826        ConstantOffset += SL->getElementOffset(Idx);
4827      } else {
4828        TypeSize TS = GTI.getSequentialElementStride(DL);
4829        if (TS.isNonZero()) {
4830          // The optimisations below currently only work for fixed offsets.
4831          if (TS.isScalable())
4832            return false;
4833          int64_t TypeSize = TS.getFixedValue();
4834          if (ConstantInt *CI =
4835                  dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
4836            const APInt &CVal = CI->getValue();
4837            if (CVal.getSignificantBits() <= 64) {
4838              ConstantOffset += CVal.getSExtValue() * TypeSize;
4839              continue;
4840            }
4841          }
4842          // We only allow one variable index at the moment.
4843          if (VariableOperand != -1)
4844            return false;
4845
4846          // Remember the variable index.
4847          VariableOperand = i;
4848          VariableScale = TypeSize;
4849        }
4850      }
4851    }
4852
4853    // A common case is for the GEP to only do a constant offset.  In this case,
4854    // just add it to the disp field and check validity.
4855    if (VariableOperand == -1) {
4856      AddrMode.BaseOffs += ConstantOffset;
4857      if (matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4858          if (!cast<GEPOperator>(AddrInst)->isInBounds())
4859            AddrMode.InBounds = false;
4860          return true;
4861      }
4862      AddrMode.BaseOffs -= ConstantOffset;
4863
4864      if (EnableGEPOffsetSplit && isa<GetElementPtrInst>(AddrInst) &&
4865          TLI.shouldConsiderGEPOffsetSplit() && Depth == 0 &&
4866          ConstantOffset > 0) {
4867          // Record GEPs with non-zero offsets as candidates for splitting in
4868          // the event that the offset cannot fit into the r+i addressing mode.
4869          // Simple and common case that only one GEP is used in calculating the
4870          // address for the memory access.
4871          Value *Base = AddrInst->getOperand(0);
4872          auto *BaseI = dyn_cast<Instruction>(Base);
4873          auto *GEP = cast<GetElementPtrInst>(AddrInst);
4874          if (isa<Argument>(Base) || isa<GlobalValue>(Base) ||
4875              (BaseI && !isa<CastInst>(BaseI) &&
4876               !isa<GetElementPtrInst>(BaseI))) {
4877            // Make sure the parent block allows inserting non-PHI instructions
4878            // before the terminator.
4879            BasicBlock *Parent = BaseI ? BaseI->getParent()
4880                                       : &GEP->getFunction()->getEntryBlock();
4881            if (!Parent->getTerminator()->isEHPad())
4882            LargeOffsetGEP = std::make_pair(GEP, ConstantOffset);
4883          }
4884      }
4885
4886      return false;
4887    }
4888
4889    // Save the valid addressing mode in case we can't match.
4890    ExtAddrMode BackupAddrMode = AddrMode;
4891    unsigned OldSize = AddrModeInsts.size();
4892
4893    // See if the scale and offset amount is valid for this target.
4894    AddrMode.BaseOffs += ConstantOffset;
4895    if (!cast<GEPOperator>(AddrInst)->isInBounds())
4896      AddrMode.InBounds = false;
4897
4898    // Match the base operand of the GEP.
4899    if (!matchAddr(AddrInst->getOperand(0), Depth + 1)) {
4900      // If it couldn't be matched, just stuff the value in a register.
4901      if (AddrMode.HasBaseReg) {
4902        AddrMode = BackupAddrMode;
4903        AddrModeInsts.resize(OldSize);
4904        return false;
4905      }
4906      AddrMode.HasBaseReg = true;
4907      AddrMode.BaseReg = AddrInst->getOperand(0);
4908    }
4909
4910    // Match the remaining variable portion of the GEP.
4911    if (!matchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
4912                          Depth)) {
4913      // If it couldn't be matched, try stuffing the base into a register
4914      // instead of matching it, and retrying the match of the scale.
4915      AddrMode = BackupAddrMode;
4916      AddrModeInsts.resize(OldSize);
4917      if (AddrMode.HasBaseReg)
4918        return false;
4919      AddrMode.HasBaseReg = true;
4920      AddrMode.BaseReg = AddrInst->getOperand(0);
4921      AddrMode.BaseOffs += ConstantOffset;
4922      if (!matchScaledValue(AddrInst->getOperand(VariableOperand),
4923                            VariableScale, Depth)) {
4924        // If even that didn't work, bail.
4925        AddrMode = BackupAddrMode;
4926        AddrModeInsts.resize(OldSize);
4927        return false;
4928      }
4929    }
4930
4931    return true;
4932  }
4933  case Instruction::SExt:
4934  case Instruction::ZExt: {
4935    Instruction *Ext = dyn_cast<Instruction>(AddrInst);
4936    if (!Ext)
4937      return false;
4938
4939    // Try to move this ext out of the way of the addressing mode.
4940    // Ask for a method for doing so.
4941    TypePromotionHelper::Action TPH =
4942        TypePromotionHelper::getAction(Ext, InsertedInsts, TLI, PromotedInsts);
4943    if (!TPH)
4944      return false;
4945
4946    TypePromotionTransaction::ConstRestorationPt LastKnownGood =
4947        TPT.getRestorationPoint();
4948    unsigned CreatedInstsCost = 0;
4949    unsigned ExtCost = !TLI.isExtFree(Ext);
4950    Value *PromotedOperand =
4951        TPH(Ext, TPT, PromotedInsts, CreatedInstsCost, nullptr, nullptr, TLI);
4952    // SExt has been moved away.
4953    // Thus either it will be rematched later in the recursive calls or it is
4954    // gone. Anyway, we must not fold it into the addressing mode at this point.
4955    // E.g.,
4956    // op = add opnd, 1
4957    // idx = ext op
4958    // addr = gep base, idx
4959    // is now:
4960    // promotedOpnd = ext opnd            <- no match here
4961    // op = promoted_add promotedOpnd, 1  <- match (later in recursive calls)
4962    // addr = gep base, op                <- match
4963    if (MovedAway)
4964      *MovedAway = true;
4965
4966    assert(PromotedOperand &&
4967           "TypePromotionHelper should have filtered out those cases");
4968
4969    ExtAddrMode BackupAddrMode = AddrMode;
4970    unsigned OldSize = AddrModeInsts.size();
4971
4972    if (!matchAddr(PromotedOperand, Depth) ||
4973        // The total of the new cost is equal to the cost of the created
4974        // instructions.
4975        // The total of the old cost is equal to the cost of the extension plus
4976        // what we have saved in the addressing mode.
4977        !isPromotionProfitable(CreatedInstsCost,
4978                               ExtCost + (AddrModeInsts.size() - OldSize),
4979                               PromotedOperand)) {
4980      AddrMode = BackupAddrMode;
4981      AddrModeInsts.resize(OldSize);
4982      LLVM_DEBUG(dbgs() << "Sign extension does not pay off: rollback\n");
4983      TPT.rollback(LastKnownGood);
4984      return false;
4985    }
4986    return true;
4987  }
4988  }
4989  return false;
4990}
4991
4992/// If we can, try to add the value of 'Addr' into the current addressing mode.
4993/// If Addr can't be added to AddrMode this returns false and leaves AddrMode
4994/// unmodified. This assumes that Addr is either a pointer type or intptr_t
4995/// for the target.
4996///
4997bool AddressingModeMatcher::matchAddr(Value *Addr, unsigned Depth) {
4998  // Start a transaction at this point that we will rollback if the matching
4999  // fails.
5000  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5001      TPT.getRestorationPoint();
5002  if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
5003    if (CI->getValue().isSignedIntN(64)) {
5004      // Fold in immediates if legal for the target.
5005      AddrMode.BaseOffs += CI->getSExtValue();
5006      if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5007        return true;
5008      AddrMode.BaseOffs -= CI->getSExtValue();
5009    }
5010  } else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
5011    // If this is a global variable, try to fold it into the addressing mode.
5012    if (!AddrMode.BaseGV) {
5013      AddrMode.BaseGV = GV;
5014      if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5015        return true;
5016      AddrMode.BaseGV = nullptr;
5017    }
5018  } else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
5019    ExtAddrMode BackupAddrMode = AddrMode;
5020    unsigned OldSize = AddrModeInsts.size();
5021
5022    // Check to see if it is possible to fold this operation.
5023    bool MovedAway = false;
5024    if (matchOperationAddr(I, I->getOpcode(), Depth, &MovedAway)) {
5025      // This instruction may have been moved away. If so, there is nothing
5026      // to check here.
5027      if (MovedAway)
5028        return true;
5029      // Okay, it's possible to fold this.  Check to see if it is actually
5030      // *profitable* to do so.  We use a simple cost model to avoid increasing
5031      // register pressure too much.
5032      if (I->hasOneUse() ||
5033          isProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
5034        AddrModeInsts.push_back(I);
5035        return true;
5036      }
5037
5038      // It isn't profitable to do this, roll back.
5039      AddrMode = BackupAddrMode;
5040      AddrModeInsts.resize(OldSize);
5041      TPT.rollback(LastKnownGood);
5042    }
5043  } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
5044    if (matchOperationAddr(CE, CE->getOpcode(), Depth))
5045      return true;
5046    TPT.rollback(LastKnownGood);
5047  } else if (isa<ConstantPointerNull>(Addr)) {
5048    // Null pointer gets folded without affecting the addressing mode.
5049    return true;
5050  }
5051
5052  // Worse case, the target should support [reg] addressing modes. :)
5053  if (!AddrMode.HasBaseReg) {
5054    AddrMode.HasBaseReg = true;
5055    AddrMode.BaseReg = Addr;
5056    // Still check for legality in case the target supports [imm] but not [i+r].
5057    if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5058      return true;
5059    AddrMode.HasBaseReg = false;
5060    AddrMode.BaseReg = nullptr;
5061  }
5062
5063  // If the base register is already taken, see if we can do [r+r].
5064  if (AddrMode.Scale == 0) {
5065    AddrMode.Scale = 1;
5066    AddrMode.ScaledReg = Addr;
5067    if (TLI.isLegalAddressingMode(DL, AddrMode, AccessTy, AddrSpace))
5068      return true;
5069    AddrMode.Scale = 0;
5070    AddrMode.ScaledReg = nullptr;
5071  }
5072  // Couldn't match.
5073  TPT.rollback(LastKnownGood);
5074  return false;
5075}
5076
5077/// Check to see if all uses of OpVal by the specified inline asm call are due
5078/// to memory operands. If so, return true, otherwise return false.
5079static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
5080                                    const TargetLowering &TLI,
5081                                    const TargetRegisterInfo &TRI) {
5082  const Function *F = CI->getFunction();
5083  TargetLowering::AsmOperandInfoVector TargetConstraints =
5084      TLI.ParseConstraints(F->getParent()->getDataLayout(), &TRI, *CI);
5085
5086  for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
5087    // Compute the constraint code and ConstraintType to use.
5088    TLI.ComputeConstraintToUse(OpInfo, SDValue());
5089
5090    // If this asm operand is our Value*, and if it isn't an indirect memory
5091    // operand, we can't fold it!  TODO: Also handle C_Address?
5092    if (OpInfo.CallOperandVal == OpVal &&
5093        (OpInfo.ConstraintType != TargetLowering::C_Memory ||
5094         !OpInfo.isIndirect))
5095      return false;
5096  }
5097
5098  return true;
5099}
5100
5101/// Recursively walk all the uses of I until we find a memory use.
5102/// If we find an obviously non-foldable instruction, return true.
5103/// Add accessed addresses and types to MemoryUses.
5104static bool FindAllMemoryUses(
5105    Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5106    SmallPtrSetImpl<Instruction *> &ConsideredInsts, const TargetLowering &TLI,
5107    const TargetRegisterInfo &TRI, bool OptSize, ProfileSummaryInfo *PSI,
5108    BlockFrequencyInfo *BFI, unsigned &SeenInsts) {
5109  // If we already considered this instruction, we're done.
5110  if (!ConsideredInsts.insert(I).second)
5111    return false;
5112
5113  // If this is an obviously unfoldable instruction, bail out.
5114  if (!MightBeFoldableInst(I))
5115    return true;
5116
5117  // Loop over all the uses, recursively processing them.
5118  for (Use &U : I->uses()) {
5119    // Conservatively return true if we're seeing a large number or a deep chain
5120    // of users. This avoids excessive compilation times in pathological cases.
5121    if (SeenInsts++ >= MaxAddressUsersToScan)
5122      return true;
5123
5124    Instruction *UserI = cast<Instruction>(U.getUser());
5125    if (LoadInst *LI = dyn_cast<LoadInst>(UserI)) {
5126      MemoryUses.push_back({&U, LI->getType()});
5127      continue;
5128    }
5129
5130    if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
5131      if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
5132        return true; // Storing addr, not into addr.
5133      MemoryUses.push_back({&U, SI->getValueOperand()->getType()});
5134      continue;
5135    }
5136
5137    if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UserI)) {
5138      if (U.getOperandNo() != AtomicRMWInst::getPointerOperandIndex())
5139        return true; // Storing addr, not into addr.
5140      MemoryUses.push_back({&U, RMW->getValOperand()->getType()});
5141      continue;
5142    }
5143
5144    if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(UserI)) {
5145      if (U.getOperandNo() != AtomicCmpXchgInst::getPointerOperandIndex())
5146        return true; // Storing addr, not into addr.
5147      MemoryUses.push_back({&U, CmpX->getCompareOperand()->getType()});
5148      continue;
5149    }
5150
5151    if (CallInst *CI = dyn_cast<CallInst>(UserI)) {
5152      if (CI->hasFnAttr(Attribute::Cold)) {
5153        // If this is a cold call, we can sink the addressing calculation into
5154        // the cold path.  See optimizeCallInst
5155        bool OptForSize =
5156            OptSize || llvm::shouldOptimizeForSize(CI->getParent(), PSI, BFI);
5157        if (!OptForSize)
5158          continue;
5159      }
5160
5161      InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledOperand());
5162      if (!IA)
5163        return true;
5164
5165      // If this is a memory operand, we're cool, otherwise bail out.
5166      if (!IsOperandAMemoryOperand(CI, IA, I, TLI, TRI))
5167        return true;
5168      continue;
5169    }
5170
5171    if (FindAllMemoryUses(UserI, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5172                          PSI, BFI, SeenInsts))
5173      return true;
5174  }
5175
5176  return false;
5177}
5178
5179static bool FindAllMemoryUses(
5180    Instruction *I, SmallVectorImpl<std::pair<Use *, Type *>> &MemoryUses,
5181    const TargetLowering &TLI, const TargetRegisterInfo &TRI, bool OptSize,
5182    ProfileSummaryInfo *PSI, BlockFrequencyInfo *BFI) {
5183  unsigned SeenInsts = 0;
5184  SmallPtrSet<Instruction *, 16> ConsideredInsts;
5185  return FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI, TRI, OptSize,
5186                           PSI, BFI, SeenInsts);
5187}
5188
5189
5190/// Return true if Val is already known to be live at the use site that we're
5191/// folding it into. If so, there is no cost to include it in the addressing
5192/// mode. KnownLive1 and KnownLive2 are two values that we know are live at the
5193/// instruction already.
5194bool AddressingModeMatcher::valueAlreadyLiveAtInst(Value *Val,
5195                                                   Value *KnownLive1,
5196                                                   Value *KnownLive2) {
5197  // If Val is either of the known-live values, we know it is live!
5198  if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
5199    return true;
5200
5201  // All values other than instructions and arguments (e.g. constants) are live.
5202  if (!isa<Instruction>(Val) && !isa<Argument>(Val))
5203    return true;
5204
5205  // If Val is a constant sized alloca in the entry block, it is live, this is
5206  // true because it is just a reference to the stack/frame pointer, which is
5207  // live for the whole function.
5208  if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
5209    if (AI->isStaticAlloca())
5210      return true;
5211
5212  // Check to see if this value is already used in the memory instruction's
5213  // block.  If so, it's already live into the block at the very least, so we
5214  // can reasonably fold it.
5215  return Val->isUsedInBasicBlock(MemoryInst->getParent());
5216}
5217
5218/// It is possible for the addressing mode of the machine to fold the specified
5219/// instruction into a load or store that ultimately uses it.
5220/// However, the specified instruction has multiple uses.
5221/// Given this, it may actually increase register pressure to fold it
5222/// into the load. For example, consider this code:
5223///
5224///     X = ...
5225///     Y = X+1
5226///     use(Y)   -> nonload/store
5227///     Z = Y+1
5228///     load Z
5229///
5230/// In this case, Y has multiple uses, and can be folded into the load of Z
5231/// (yielding load [X+2]).  However, doing this will cause both "X" and "X+1" to
5232/// be live at the use(Y) line.  If we don't fold Y into load Z, we use one
5233/// fewer register.  Since Y can't be folded into "use(Y)" we don't increase the
5234/// number of computations either.
5235///
5236/// Note that this (like most of CodeGenPrepare) is just a rough heuristic.  If
5237/// X was live across 'load Z' for other reasons, we actually *would* want to
5238/// fold the addressing mode in the Z case.  This would make Y die earlier.
5239bool AddressingModeMatcher::isProfitableToFoldIntoAddressingMode(
5240    Instruction *I, ExtAddrMode &AMBefore, ExtAddrMode &AMAfter) {
5241  if (IgnoreProfitability)
5242    return true;
5243
5244  // AMBefore is the addressing mode before this instruction was folded into it,
5245  // and AMAfter is the addressing mode after the instruction was folded.  Get
5246  // the set of registers referenced by AMAfter and subtract out those
5247  // referenced by AMBefore: this is the set of values which folding in this
5248  // address extends the lifetime of.
5249  //
5250  // Note that there are only two potential values being referenced here,
5251  // BaseReg and ScaleReg (global addresses are always available, as are any
5252  // folded immediates).
5253  Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
5254
5255  // If the BaseReg or ScaledReg was referenced by the previous addrmode, their
5256  // lifetime wasn't extended by adding this instruction.
5257  if (valueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5258    BaseReg = nullptr;
5259  if (valueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
5260    ScaledReg = nullptr;
5261
5262  // If folding this instruction (and it's subexprs) didn't extend any live
5263  // ranges, we're ok with it.
5264  if (!BaseReg && !ScaledReg)
5265    return true;
5266
5267  // If all uses of this instruction can have the address mode sunk into them,
5268  // we can remove the addressing mode and effectively trade one live register
5269  // for another (at worst.)  In this context, folding an addressing mode into
5270  // the use is just a particularly nice way of sinking it.
5271  SmallVector<std::pair<Use *, Type *>, 16> MemoryUses;
5272  if (FindAllMemoryUses(I, MemoryUses, TLI, TRI, OptSize, PSI, BFI))
5273    return false; // Has a non-memory, non-foldable use!
5274
5275  // Now that we know that all uses of this instruction are part of a chain of
5276  // computation involving only operations that could theoretically be folded
5277  // into a memory use, loop over each of these memory operation uses and see
5278  // if they could  *actually* fold the instruction.  The assumption is that
5279  // addressing modes are cheap and that duplicating the computation involved
5280  // many times is worthwhile, even on a fastpath. For sinking candidates
5281  // (i.e. cold call sites), this serves as a way to prevent excessive code
5282  // growth since most architectures have some reasonable small and fast way to
5283  // compute an effective address.  (i.e LEA on x86)
5284  SmallVector<Instruction *, 32> MatchedAddrModeInsts;
5285  for (const std::pair<Use *, Type *> &Pair : MemoryUses) {
5286    Value *Address = Pair.first->get();
5287    Instruction *UserI = cast<Instruction>(Pair.first->getUser());
5288    Type *AddressAccessTy = Pair.second;
5289    unsigned AS = Address->getType()->getPointerAddressSpace();
5290
5291    // Do a match against the root of this address, ignoring profitability. This
5292    // will tell us if the addressing mode for the memory operation will
5293    // *actually* cover the shared instruction.
5294    ExtAddrMode Result;
5295    std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5296                                                                      0);
5297    TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5298        TPT.getRestorationPoint();
5299    AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, TRI, LI, getDTFn,
5300                                  AddressAccessTy, AS, UserI, Result,
5301                                  InsertedInsts, PromotedInsts, TPT,
5302                                  LargeOffsetGEP, OptSize, PSI, BFI);
5303    Matcher.IgnoreProfitability = true;
5304    bool Success = Matcher.matchAddr(Address, 0);
5305    (void)Success;
5306    assert(Success && "Couldn't select *anything*?");
5307
5308    // The match was to check the profitability, the changes made are not
5309    // part of the original matcher. Therefore, they should be dropped
5310    // otherwise the original matcher will not present the right state.
5311    TPT.rollback(LastKnownGood);
5312
5313    // If the match didn't cover I, then it won't be shared by it.
5314    if (!is_contained(MatchedAddrModeInsts, I))
5315      return false;
5316
5317    MatchedAddrModeInsts.clear();
5318  }
5319
5320  return true;
5321}
5322
5323/// Return true if the specified values are defined in a
5324/// different basic block than BB.
5325static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
5326  if (Instruction *I = dyn_cast<Instruction>(V))
5327    return I->getParent() != BB;
5328  return false;
5329}
5330
5331/// Sink addressing mode computation immediate before MemoryInst if doing so
5332/// can be done without increasing register pressure.  The need for the
5333/// register pressure constraint means this can end up being an all or nothing
5334/// decision for all uses of the same addressing computation.
5335///
5336/// Load and Store Instructions often have addressing modes that can do
5337/// significant amounts of computation. As such, instruction selection will try
5338/// to get the load or store to do as much computation as possible for the
5339/// program. The problem is that isel can only see within a single block. As
5340/// such, we sink as much legal addressing mode work into the block as possible.
5341///
5342/// This method is used to optimize both load/store and inline asms with memory
5343/// operands.  It's also used to sink addressing computations feeding into cold
5344/// call sites into their (cold) basic block.
5345///
5346/// The motivation for handling sinking into cold blocks is that doing so can
5347/// both enable other address mode sinking (by satisfying the register pressure
5348/// constraint above), and reduce register pressure globally (by removing the
5349/// addressing mode computation from the fast path entirely.).
5350bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
5351                                        Type *AccessTy, unsigned AddrSpace) {
5352  Value *Repl = Addr;
5353
5354  // Try to collapse single-value PHI nodes.  This is necessary to undo
5355  // unprofitable PRE transformations.
5356  SmallVector<Value *, 8> worklist;
5357  SmallPtrSet<Value *, 16> Visited;
5358  worklist.push_back(Addr);
5359
5360  // Use a worklist to iteratively look through PHI and select nodes, and
5361  // ensure that the addressing mode obtained from the non-PHI/select roots of
5362  // the graph are compatible.
5363  bool PhiOrSelectSeen = false;
5364  SmallVector<Instruction *, 16> AddrModeInsts;
5365  const SimplifyQuery SQ(*DL, TLInfo);
5366  AddressingModeCombiner AddrModes(SQ, Addr);
5367  TypePromotionTransaction TPT(RemovedInsts);
5368  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
5369      TPT.getRestorationPoint();
5370  while (!worklist.empty()) {
5371    Value *V = worklist.pop_back_val();
5372
5373    // We allow traversing cyclic Phi nodes.
5374    // In case of success after this loop we ensure that traversing through
5375    // Phi nodes ends up with all cases to compute address of the form
5376    //    BaseGV + Base + Scale * Index + Offset
5377    // where Scale and Offset are constans and BaseGV, Base and Index
5378    // are exactly the same Values in all cases.
5379    // It means that BaseGV, Scale and Offset dominate our memory instruction
5380    // and have the same value as they had in address computation represented
5381    // as Phi. So we can safely sink address computation to memory instruction.
5382    if (!Visited.insert(V).second)
5383      continue;
5384
5385    // For a PHI node, push all of its incoming values.
5386    if (PHINode *P = dyn_cast<PHINode>(V)) {
5387      append_range(worklist, P->incoming_values());
5388      PhiOrSelectSeen = true;
5389      continue;
5390    }
5391    // Similar for select.
5392    if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
5393      worklist.push_back(SI->getFalseValue());
5394      worklist.push_back(SI->getTrueValue());
5395      PhiOrSelectSeen = true;
5396      continue;
5397    }
5398
5399    // For non-PHIs, determine the addressing mode being computed.  Note that
5400    // the result may differ depending on what other uses our candidate
5401    // addressing instructions might have.
5402    AddrModeInsts.clear();
5403    std::pair<AssertingVH<GetElementPtrInst>, int64_t> LargeOffsetGEP(nullptr,
5404                                                                      0);
5405    // Defer the query (and possible computation of) the dom tree to point of
5406    // actual use.  It's expected that most address matches don't actually need
5407    // the domtree.
5408    auto getDTFn = [MemoryInst, this]() -> const DominatorTree & {
5409      Function *F = MemoryInst->getParent()->getParent();
5410      return this->getDT(*F);
5411    };
5412    ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
5413        V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *LI, getDTFn,
5414        *TRI, InsertedInsts, PromotedInsts, TPT, LargeOffsetGEP, OptSize, PSI,
5415        BFI.get());
5416
5417    GetElementPtrInst *GEP = LargeOffsetGEP.first;
5418    if (GEP && !NewGEPBases.count(GEP)) {
5419      // If splitting the underlying data structure can reduce the offset of a
5420      // GEP, collect the GEP.  Skip the GEPs that are the new bases of
5421      // previously split data structures.
5422      LargeOffsetGEPMap[GEP->getPointerOperand()].push_back(LargeOffsetGEP);
5423      LargeOffsetGEPID.insert(std::make_pair(GEP, LargeOffsetGEPID.size()));
5424    }
5425
5426    NewAddrMode.OriginalValue = V;
5427    if (!AddrModes.addNewAddrMode(NewAddrMode))
5428      break;
5429  }
5430
5431  // Try to combine the AddrModes we've collected. If we couldn't collect any,
5432  // or we have multiple but either couldn't combine them or combining them
5433  // wouldn't do anything useful, bail out now.
5434  if (!AddrModes.combineAddrModes()) {
5435    TPT.rollback(LastKnownGood);
5436    return false;
5437  }
5438  bool Modified = TPT.commit();
5439
5440  // Get the combined AddrMode (or the only AddrMode, if we only had one).
5441  ExtAddrMode AddrMode = AddrModes.getAddrMode();
5442
5443  // If all the instructions matched are already in this BB, don't do anything.
5444  // If we saw a Phi node then it is not local definitely, and if we saw a
5445  // select then we want to push the address calculation past it even if it's
5446  // already in this BB.
5447  if (!PhiOrSelectSeen && none_of(AddrModeInsts, [&](Value *V) {
5448        return IsNonLocalValue(V, MemoryInst->getParent());
5449      })) {
5450    LLVM_DEBUG(dbgs() << "CGP: Found      local addrmode: " << AddrMode
5451                      << "\n");
5452    return Modified;
5453  }
5454
5455  // Insert this computation right after this user.  Since our caller is
5456  // scanning from the top of the BB to the bottom, reuse of the expr are
5457  // guaranteed to happen later.
5458  IRBuilder<> Builder(MemoryInst);
5459
5460  // Now that we determined the addressing expression we want to use and know
5461  // that we have to sink it into this block.  Check to see if we have already
5462  // done this for some other load/store instr in this block.  If so, reuse
5463  // the computation.  Before attempting reuse, check if the address is valid
5464  // as it may have been erased.
5465
5466  WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];
5467
5468  Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
5469  Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5470  if (SunkAddr) {
5471    LLVM_DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode
5472                      << " for " << *MemoryInst << "\n");
5473    if (SunkAddr->getType() != Addr->getType()) {
5474      if (SunkAddr->getType()->getPointerAddressSpace() !=
5475              Addr->getType()->getPointerAddressSpace() &&
5476          !DL->isNonIntegralPointerType(Addr->getType())) {
5477        // There are two reasons the address spaces might not match: a no-op
5478        // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5479        // ptrtoint/inttoptr pair to ensure we match the original semantics.
5480        // TODO: allow bitcast between different address space pointers with the
5481        // same size.
5482        SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5483        SunkAddr =
5484            Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5485      } else
5486        SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5487    }
5488  } else if (AddrSinkUsingGEPs || (!AddrSinkUsingGEPs.getNumOccurrences() &&
5489                                   SubtargetInfo->addrSinkUsingGEPs())) {
5490    // By default, we use the GEP-based method when AA is used later. This
5491    // prevents new inttoptr/ptrtoint pairs from degrading AA capabilities.
5492    LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5493                      << " for " << *MemoryInst << "\n");
5494    Value *ResultPtr = nullptr, *ResultIndex = nullptr;
5495
5496    // First, find the pointer.
5497    if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
5498      ResultPtr = AddrMode.BaseReg;
5499      AddrMode.BaseReg = nullptr;
5500    }
5501
5502    if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
5503      // We can't add more than one pointer together, nor can we scale a
5504      // pointer (both of which seem meaningless).
5505      if (ResultPtr || AddrMode.Scale != 1)
5506        return Modified;
5507
5508      ResultPtr = AddrMode.ScaledReg;
5509      AddrMode.Scale = 0;
5510    }
5511
5512    // It is only safe to sign extend the BaseReg if we know that the math
5513    // required to create it did not overflow before we extend it. Since
5514    // the original IR value was tossed in favor of a constant back when
5515    // the AddrMode was created we need to bail out gracefully if widths
5516    // do not match instead of extending it.
5517    //
5518    // (See below for code to add the scale.)
5519    if (AddrMode.Scale) {
5520      Type *ScaledRegTy = AddrMode.ScaledReg->getType();
5521      if (cast<IntegerType>(IntPtrTy)->getBitWidth() >
5522          cast<IntegerType>(ScaledRegTy)->getBitWidth())
5523        return Modified;
5524    }
5525
5526    if (AddrMode.BaseGV) {
5527      if (ResultPtr)
5528        return Modified;
5529
5530      ResultPtr = AddrMode.BaseGV;
5531    }
5532
5533    // If the real base value actually came from an inttoptr, then the matcher
5534    // will look through it and provide only the integer value. In that case,
5535    // use it here.
5536    if (!DL->isNonIntegralPointerType(Addr->getType())) {
5537      if (!ResultPtr && AddrMode.BaseReg) {
5538        ResultPtr = Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(),
5539                                           "sunkaddr");
5540        AddrMode.BaseReg = nullptr;
5541      } else if (!ResultPtr && AddrMode.Scale == 1) {
5542        ResultPtr = Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(),
5543                                           "sunkaddr");
5544        AddrMode.Scale = 0;
5545      }
5546    }
5547
5548    if (!ResultPtr && !AddrMode.BaseReg && !AddrMode.Scale &&
5549        !AddrMode.BaseOffs) {
5550      SunkAddr = Constant::getNullValue(Addr->getType());
5551    } else if (!ResultPtr) {
5552      return Modified;
5553    } else {
5554      Type *I8PtrTy =
5555          Builder.getPtrTy(Addr->getType()->getPointerAddressSpace());
5556
5557      // Start with the base register. Do this first so that subsequent address
5558      // matching finds it last, which will prevent it from trying to match it
5559      // as the scaled value in case it happens to be a mul. That would be
5560      // problematic if we've sunk a different mul for the scale, because then
5561      // we'd end up sinking both muls.
5562      if (AddrMode.BaseReg) {
5563        Value *V = AddrMode.BaseReg;
5564        if (V->getType() != IntPtrTy)
5565          V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5566
5567        ResultIndex = V;
5568      }
5569
5570      // Add the scale value.
5571      if (AddrMode.Scale) {
5572        Value *V = AddrMode.ScaledReg;
5573        if (V->getType() == IntPtrTy) {
5574          // done.
5575        } else {
5576          assert(cast<IntegerType>(IntPtrTy)->getBitWidth() <
5577                     cast<IntegerType>(V->getType())->getBitWidth() &&
5578                 "We can't transform if ScaledReg is too narrow");
5579          V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5580        }
5581
5582        if (AddrMode.Scale != 1)
5583          V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5584                                "sunkaddr");
5585        if (ResultIndex)
5586          ResultIndex = Builder.CreateAdd(ResultIndex, V, "sunkaddr");
5587        else
5588          ResultIndex = V;
5589      }
5590
5591      // Add in the Base Offset if present.
5592      if (AddrMode.BaseOffs) {
5593        Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5594        if (ResultIndex) {
5595          // We need to add this separately from the scale above to help with
5596          // SDAG consecutive load/store merging.
5597          if (ResultPtr->getType() != I8PtrTy)
5598            ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5599          ResultPtr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5600                                           AddrMode.InBounds);
5601        }
5602
5603        ResultIndex = V;
5604      }
5605
5606      if (!ResultIndex) {
5607        SunkAddr = ResultPtr;
5608      } else {
5609        if (ResultPtr->getType() != I8PtrTy)
5610          ResultPtr = Builder.CreatePointerCast(ResultPtr, I8PtrTy);
5611        SunkAddr = Builder.CreatePtrAdd(ResultPtr, ResultIndex, "sunkaddr",
5612                                        AddrMode.InBounds);
5613      }
5614
5615      if (SunkAddr->getType() != Addr->getType()) {
5616        if (SunkAddr->getType()->getPointerAddressSpace() !=
5617                Addr->getType()->getPointerAddressSpace() &&
5618            !DL->isNonIntegralPointerType(Addr->getType())) {
5619          // There are two reasons the address spaces might not match: a no-op
5620          // addrspacecast, or a ptrtoint/inttoptr pair. Either way, we emit a
5621          // ptrtoint/inttoptr pair to ensure we match the original semantics.
5622          // TODO: allow bitcast between different address space pointers with
5623          // the same size.
5624          SunkAddr = Builder.CreatePtrToInt(SunkAddr, IntPtrTy, "sunkaddr");
5625          SunkAddr =
5626              Builder.CreateIntToPtr(SunkAddr, Addr->getType(), "sunkaddr");
5627        } else
5628          SunkAddr = Builder.CreatePointerCast(SunkAddr, Addr->getType());
5629      }
5630    }
5631  } else {
5632    // We'd require a ptrtoint/inttoptr down the line, which we can't do for
5633    // non-integral pointers, so in that case bail out now.
5634    Type *BaseTy = AddrMode.BaseReg ? AddrMode.BaseReg->getType() : nullptr;
5635    Type *ScaleTy = AddrMode.Scale ? AddrMode.ScaledReg->getType() : nullptr;
5636    PointerType *BasePtrTy = dyn_cast_or_null<PointerType>(BaseTy);
5637    PointerType *ScalePtrTy = dyn_cast_or_null<PointerType>(ScaleTy);
5638    if (DL->isNonIntegralPointerType(Addr->getType()) ||
5639        (BasePtrTy && DL->isNonIntegralPointerType(BasePtrTy)) ||
5640        (ScalePtrTy && DL->isNonIntegralPointerType(ScalePtrTy)) ||
5641        (AddrMode.BaseGV &&
5642         DL->isNonIntegralPointerType(AddrMode.BaseGV->getType())))
5643      return Modified;
5644
5645    LLVM_DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode
5646                      << " for " << *MemoryInst << "\n");
5647    Type *IntPtrTy = DL->getIntPtrType(Addr->getType());
5648    Value *Result = nullptr;
5649
5650    // Start with the base register. Do this first so that subsequent address
5651    // matching finds it last, which will prevent it from trying to match it
5652    // as the scaled value in case it happens to be a mul. That would be
5653    // problematic if we've sunk a different mul for the scale, because then
5654    // we'd end up sinking both muls.
5655    if (AddrMode.BaseReg) {
5656      Value *V = AddrMode.BaseReg;
5657      if (V->getType()->isPointerTy())
5658        V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5659      if (V->getType() != IntPtrTy)
5660        V = Builder.CreateIntCast(V, IntPtrTy, /*isSigned=*/true, "sunkaddr");
5661      Result = V;
5662    }
5663
5664    // Add the scale value.
5665    if (AddrMode.Scale) {
5666      Value *V = AddrMode.ScaledReg;
5667      if (V->getType() == IntPtrTy) {
5668        // done.
5669      } else if (V->getType()->isPointerTy()) {
5670        V = Builder.CreatePtrToInt(V, IntPtrTy, "sunkaddr");
5671      } else if (cast<IntegerType>(IntPtrTy)->getBitWidth() <
5672                 cast<IntegerType>(V->getType())->getBitWidth()) {
5673        V = Builder.CreateTrunc(V, IntPtrTy, "sunkaddr");
5674      } else {
5675        // It is only safe to sign extend the BaseReg if we know that the math
5676        // required to create it did not overflow before we extend it. Since
5677        // the original IR value was tossed in favor of a constant back when
5678        // the AddrMode was created we need to bail out gracefully if widths
5679        // do not match instead of extending it.
5680        Instruction *I = dyn_cast_or_null<Instruction>(Result);
5681        if (I && (Result != AddrMode.BaseReg))
5682          I->eraseFromParent();
5683        return Modified;
5684      }
5685      if (AddrMode.Scale != 1)
5686        V = Builder.CreateMul(V, ConstantInt::get(IntPtrTy, AddrMode.Scale),
5687                              "sunkaddr");
5688      if (Result)
5689        Result = Builder.CreateAdd(Result, V, "sunkaddr");
5690      else
5691        Result = V;
5692    }
5693
5694    // Add in the BaseGV if present.
5695    if (AddrMode.BaseGV) {
5696      Value *V = Builder.CreatePtrToInt(AddrMode.BaseGV, IntPtrTy, "sunkaddr");
5697      if (Result)
5698        Result = Builder.CreateAdd(Result, V, "sunkaddr");
5699      else
5700        Result = V;
5701    }
5702
5703    // Add in the Base Offset if present.
5704    if (AddrMode.BaseOffs) {
5705      Value *V = ConstantInt::get(IntPtrTy, AddrMode.BaseOffs);
5706      if (Result)
5707        Result = Builder.CreateAdd(Result, V, "sunkaddr");
5708      else
5709        Result = V;
5710    }
5711
5712    if (!Result)
5713      SunkAddr = Constant::getNullValue(Addr->getType());
5714    else
5715      SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
5716  }
5717
5718  MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
5719  // Store the newly computed address into the cache. In the case we reused a
5720  // value, this should be idempotent.
5721  SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);
5722
5723  // If we have no uses, recursively delete the value and all dead instructions
5724  // using it.
5725  if (Repl->use_empty()) {
5726    resetIteratorIfInvalidatedWhileCalling(CurInstIterator->getParent(), [&]() {
5727      RecursivelyDeleteTriviallyDeadInstructions(
5728          Repl, TLInfo, nullptr,
5729          [&](Value *V) { removeAllAssertingVHReferences(V); });
5730    });
5731  }
5732  ++NumMemoryInsts;
5733  return true;
5734}
5735
5736/// Rewrite GEP input to gather/scatter to enable SelectionDAGBuilder to find
5737/// a uniform base to use for ISD::MGATHER/MSCATTER. SelectionDAGBuilder can
5738/// only handle a 2 operand GEP in the same basic block or a splat constant
5739/// vector. The 2 operands to the GEP must have a scalar pointer and a vector
5740/// index.
5741///
5742/// If the existing GEP has a vector base pointer that is splat, we can look
5743/// through the splat to find the scalar pointer. If we can't find a scalar
5744/// pointer there's nothing we can do.
5745///
5746/// If we have a GEP with more than 2 indices where the middle indices are all
5747/// zeroes, we can replace it with 2 GEPs where the second has 2 operands.
5748///
5749/// If the final index isn't a vector or is a splat, we can emit a scalar GEP
5750/// followed by a GEP with an all zeroes vector index. This will enable
5751/// SelectionDAGBuilder to use the scalar GEP as the uniform base and have a
5752/// zero index.
5753bool CodeGenPrepare::optimizeGatherScatterInst(Instruction *MemoryInst,
5754                                               Value *Ptr) {
5755  Value *NewAddr;
5756
5757  if (const auto *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
5758    // Don't optimize GEPs that don't have indices.
5759    if (!GEP->hasIndices())
5760      return false;
5761
5762    // If the GEP and the gather/scatter aren't in the same BB, don't optimize.
5763    // FIXME: We should support this by sinking the GEP.
5764    if (MemoryInst->getParent() != GEP->getParent())
5765      return false;
5766
5767    SmallVector<Value *, 2> Ops(GEP->operands());
5768
5769    bool RewriteGEP = false;
5770
5771    if (Ops[0]->getType()->isVectorTy()) {
5772      Ops[0] = getSplatValue(Ops[0]);
5773      if (!Ops[0])
5774        return false;
5775      RewriteGEP = true;
5776    }
5777
5778    unsigned FinalIndex = Ops.size() - 1;
5779
5780    // Ensure all but the last index is 0.
5781    // FIXME: This isn't strictly required. All that's required is that they are
5782    // all scalars or splats.
5783    for (unsigned i = 1; i < FinalIndex; ++i) {
5784      auto *C = dyn_cast<Constant>(Ops[i]);
5785      if (!C)
5786        return false;
5787      if (isa<VectorType>(C->getType()))
5788        C = C->getSplatValue();
5789      auto *CI = dyn_cast_or_null<ConstantInt>(C);
5790      if (!CI || !CI->isZero())
5791        return false;
5792      // Scalarize the index if needed.
5793      Ops[i] = CI;
5794    }
5795
5796    // Try to scalarize the final index.
5797    if (Ops[FinalIndex]->getType()->isVectorTy()) {
5798      if (Value *V = getSplatValue(Ops[FinalIndex])) {
5799        auto *C = dyn_cast<ConstantInt>(V);
5800        // Don't scalarize all zeros vector.
5801        if (!C || !C->isZero()) {
5802          Ops[FinalIndex] = V;
5803          RewriteGEP = true;
5804        }
5805      }
5806    }
5807
5808    // If we made any changes or the we have extra operands, we need to generate
5809    // new instructions.
5810    if (!RewriteGEP && Ops.size() == 2)
5811      return false;
5812
5813    auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
5814
5815    IRBuilder<> Builder(MemoryInst);
5816
5817    Type *SourceTy = GEP->getSourceElementType();
5818    Type *ScalarIndexTy = DL->getIndexType(Ops[0]->getType()->getScalarType());
5819
5820    // If the final index isn't a vector, emit a scalar GEP containing all ops
5821    // and a vector GEP with all zeroes final index.
5822    if (!Ops[FinalIndex]->getType()->isVectorTy()) {
5823      NewAddr = Builder.CreateGEP(SourceTy, Ops[0], ArrayRef(Ops).drop_front());
5824      auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
5825      auto *SecondTy = GetElementPtrInst::getIndexedType(
5826          SourceTy, ArrayRef(Ops).drop_front());
5827      NewAddr =
5828          Builder.CreateGEP(SecondTy, NewAddr, Constant::getNullValue(IndexTy));
5829    } else {
5830      Value *Base = Ops[0];
5831      Value *Index = Ops[FinalIndex];
5832
5833      // Create a scalar GEP if there are more than 2 operands.
5834      if (Ops.size() != 2) {
5835        // Replace the last index with 0.
5836        Ops[FinalIndex] =
5837            Constant::getNullValue(Ops[FinalIndex]->getType()->getScalarType());
5838        Base = Builder.CreateGEP(SourceTy, Base, ArrayRef(Ops).drop_front());
5839        SourceTy = GetElementPtrInst::getIndexedType(
5840            SourceTy, ArrayRef(Ops).drop_front());
5841      }
5842
5843      // Now create the GEP with scalar pointer and vector index.
5844      NewAddr = Builder.CreateGEP(SourceTy, Base, Index);
5845    }
5846  } else if (!isa<Constant>(Ptr)) {
5847    // Not a GEP, maybe its a splat and we can create a GEP to enable
5848    // SelectionDAGBuilder to use it as a uniform base.
5849    Value *V = getSplatValue(Ptr);
5850    if (!V)
5851      return false;
5852
5853    auto NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
5854
5855    IRBuilder<> Builder(MemoryInst);
5856
5857    // Emit a vector GEP with a scalar pointer and all 0s vector index.
5858    Type *ScalarIndexTy = DL->getIndexType(V->getType()->getScalarType());
5859    auto *IndexTy = VectorType::get(ScalarIndexTy, NumElts);
5860    Type *ScalarTy;
5861    if (cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
5862        Intrinsic::masked_gather) {
5863      ScalarTy = MemoryInst->getType()->getScalarType();
5864    } else {
5865      assert(cast<IntrinsicInst>(MemoryInst)->getIntrinsicID() ==
5866             Intrinsic::masked_scatter);
5867      ScalarTy = MemoryInst->getOperand(0)->getType()->getScalarType();
5868    }
5869    NewAddr = Builder.CreateGEP(ScalarTy, V, Constant::getNullValue(IndexTy));
5870  } else {
5871    // Constant, SelectionDAGBuilder knows to check if its a splat.
5872    return false;
5873  }
5874
5875  MemoryInst->replaceUsesOfWith(Ptr, NewAddr);
5876
5877  // If we have no uses, recursively delete the value and all dead instructions
5878  // using it.
5879  if (Ptr->use_empty())
5880    RecursivelyDeleteTriviallyDeadInstructions(
5881        Ptr, TLInfo, nullptr,
5882        [&](Value *V) { removeAllAssertingVHReferences(V); });
5883
5884  return true;
5885}
5886
5887/// If there are any memory operands, use OptimizeMemoryInst to sink their
5888/// address computing into the block when possible / profitable.
5889bool CodeGenPrepare::optimizeInlineAsmInst(CallInst *CS) {
5890  bool MadeChange = false;
5891
5892  const TargetRegisterInfo *TRI =
5893      TM->getSubtargetImpl(*CS->getFunction())->getRegisterInfo();
5894  TargetLowering::AsmOperandInfoVector TargetConstraints =
5895      TLI->ParseConstraints(*DL, TRI, *CS);
5896  unsigned ArgNo = 0;
5897  for (TargetLowering::AsmOperandInfo &OpInfo : TargetConstraints) {
5898    // Compute the constraint code and ConstraintType to use.
5899    TLI->ComputeConstraintToUse(OpInfo, SDValue());
5900
5901    // TODO: Also handle C_Address?
5902    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
5903        OpInfo.isIndirect) {
5904      Value *OpVal = CS->getArgOperand(ArgNo++);
5905      MadeChange |= optimizeMemoryInst(CS, OpVal, OpVal->getType(), ~0u);
5906    } else if (OpInfo.Type == InlineAsm::isInput)
5907      ArgNo++;
5908  }
5909
5910  return MadeChange;
5911}
5912
5913/// Check if all the uses of \p Val are equivalent (or free) zero or
5914/// sign extensions.
5915static bool hasSameExtUse(Value *Val, const TargetLowering &TLI) {
5916  assert(!Val->use_empty() && "Input must have at least one use");
5917  const Instruction *FirstUser = cast<Instruction>(*Val->user_begin());
5918  bool IsSExt = isa<SExtInst>(FirstUser);
5919  Type *ExtTy = FirstUser->getType();
5920  for (const User *U : Val->users()) {
5921    const Instruction *UI = cast<Instruction>(U);
5922    if ((IsSExt && !isa<SExtInst>(UI)) || (!IsSExt && !isa<ZExtInst>(UI)))
5923      return false;
5924    Type *CurTy = UI->getType();
5925    // Same input and output types: Same instruction after CSE.
5926    if (CurTy == ExtTy)
5927      continue;
5928
5929    // If IsSExt is true, we are in this situation:
5930    // a = Val
5931    // b = sext ty1 a to ty2
5932    // c = sext ty1 a to ty3
5933    // Assuming ty2 is shorter than ty3, this could be turned into:
5934    // a = Val
5935    // b = sext ty1 a to ty2
5936    // c = sext ty2 b to ty3
5937    // However, the last sext is not free.
5938    if (IsSExt)
5939      return false;
5940
5941    // This is a ZExt, maybe this is free to extend from one type to another.
5942    // In that case, we would not account for a different use.
5943    Type *NarrowTy;
5944    Type *LargeTy;
5945    if (ExtTy->getScalarType()->getIntegerBitWidth() >
5946        CurTy->getScalarType()->getIntegerBitWidth()) {
5947      NarrowTy = CurTy;
5948      LargeTy = ExtTy;
5949    } else {
5950      NarrowTy = ExtTy;
5951      LargeTy = CurTy;
5952    }
5953
5954    if (!TLI.isZExtFree(NarrowTy, LargeTy))
5955      return false;
5956  }
5957  // All uses are the same or can be derived from one another for free.
5958  return true;
5959}
5960
5961/// Try to speculatively promote extensions in \p Exts and continue
5962/// promoting through newly promoted operands recursively as far as doing so is
5963/// profitable. Save extensions profitably moved up, in \p ProfitablyMovedExts.
5964/// When some promotion happened, \p TPT contains the proper state to revert
5965/// them.
5966///
5967/// \return true if some promotion happened, false otherwise.
5968bool CodeGenPrepare::tryToPromoteExts(
5969    TypePromotionTransaction &TPT, const SmallVectorImpl<Instruction *> &Exts,
5970    SmallVectorImpl<Instruction *> &ProfitablyMovedExts,
5971    unsigned CreatedInstsCost) {
5972  bool Promoted = false;
5973
5974  // Iterate over all the extensions to try to promote them.
5975  for (auto *I : Exts) {
5976    // Early check if we directly have ext(load).
5977    if (isa<LoadInst>(I->getOperand(0))) {
5978      ProfitablyMovedExts.push_back(I);
5979      continue;
5980    }
5981
5982    // Check whether or not we want to do any promotion.  The reason we have
5983    // this check inside the for loop is to catch the case where an extension
5984    // is directly fed by a load because in such case the extension can be moved
5985    // up without any promotion on its operands.
5986    if (!TLI->enableExtLdPromotion() || DisableExtLdPromotion)
5987      return false;
5988
5989    // Get the action to perform the promotion.
5990    TypePromotionHelper::Action TPH =
5991        TypePromotionHelper::getAction(I, InsertedInsts, *TLI, PromotedInsts);
5992    // Check if we can promote.
5993    if (!TPH) {
5994      // Save the current extension as we cannot move up through its operand.
5995      ProfitablyMovedExts.push_back(I);
5996      continue;
5997    }
5998
5999    // Save the current state.
6000    TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6001        TPT.getRestorationPoint();
6002    SmallVector<Instruction *, 4> NewExts;
6003    unsigned NewCreatedInstsCost = 0;
6004    unsigned ExtCost = !TLI->isExtFree(I);
6005    // Promote.
6006    Value *PromotedVal = TPH(I, TPT, PromotedInsts, NewCreatedInstsCost,
6007                             &NewExts, nullptr, *TLI);
6008    assert(PromotedVal &&
6009           "TypePromotionHelper should have filtered out those cases");
6010
6011    // We would be able to merge only one extension in a load.
6012    // Therefore, if we have more than 1 new extension we heuristically
6013    // cut this search path, because it means we degrade the code quality.
6014    // With exactly 2, the transformation is neutral, because we will merge
6015    // one extension but leave one. However, we optimistically keep going,
6016    // because the new extension may be removed too. Also avoid replacing a
6017    // single free extension with multiple extensions, as this increases the
6018    // number of IR instructions while not providing any savings.
6019    long long TotalCreatedInstsCost = CreatedInstsCost + NewCreatedInstsCost;
6020    // FIXME: It would be possible to propagate a negative value instead of
6021    // conservatively ceiling it to 0.
6022    TotalCreatedInstsCost =
6023        std::max((long long)0, (TotalCreatedInstsCost - ExtCost));
6024    if (!StressExtLdPromotion &&
6025        (TotalCreatedInstsCost > 1 ||
6026         !isPromotedInstructionLegal(*TLI, *DL, PromotedVal) ||
6027         (ExtCost == 0 && NewExts.size() > 1))) {
6028      // This promotion is not profitable, rollback to the previous state, and
6029      // save the current extension in ProfitablyMovedExts as the latest
6030      // speculative promotion turned out to be unprofitable.
6031      TPT.rollback(LastKnownGood);
6032      ProfitablyMovedExts.push_back(I);
6033      continue;
6034    }
6035    // Continue promoting NewExts as far as doing so is profitable.
6036    SmallVector<Instruction *, 2> NewlyMovedExts;
6037    (void)tryToPromoteExts(TPT, NewExts, NewlyMovedExts, TotalCreatedInstsCost);
6038    bool NewPromoted = false;
6039    for (auto *ExtInst : NewlyMovedExts) {
6040      Instruction *MovedExt = cast<Instruction>(ExtInst);
6041      Value *ExtOperand = MovedExt->getOperand(0);
6042      // If we have reached to a load, we need this extra profitability check
6043      // as it could potentially be merged into an ext(load).
6044      if (isa<LoadInst>(ExtOperand) &&
6045          !(StressExtLdPromotion || NewCreatedInstsCost <= ExtCost ||
6046            (ExtOperand->hasOneUse() || hasSameExtUse(ExtOperand, *TLI))))
6047        continue;
6048
6049      ProfitablyMovedExts.push_back(MovedExt);
6050      NewPromoted = true;
6051    }
6052
6053    // If none of speculative promotions for NewExts is profitable, rollback
6054    // and save the current extension (I) as the last profitable extension.
6055    if (!NewPromoted) {
6056      TPT.rollback(LastKnownGood);
6057      ProfitablyMovedExts.push_back(I);
6058      continue;
6059    }
6060    // The promotion is profitable.
6061    Promoted = true;
6062  }
6063  return Promoted;
6064}
6065
6066/// Merging redundant sexts when one is dominating the other.
6067bool CodeGenPrepare::mergeSExts(Function &F) {
6068  bool Changed = false;
6069  for (auto &Entry : ValToSExtendedUses) {
6070    SExts &Insts = Entry.second;
6071    SExts CurPts;
6072    for (Instruction *Inst : Insts) {
6073      if (RemovedInsts.count(Inst) || !isa<SExtInst>(Inst) ||
6074          Inst->getOperand(0) != Entry.first)
6075        continue;
6076      bool inserted = false;
6077      for (auto &Pt : CurPts) {
6078        if (getDT(F).dominates(Inst, Pt)) {
6079          replaceAllUsesWith(Pt, Inst, FreshBBs, IsHugeFunc);
6080          RemovedInsts.insert(Pt);
6081          Pt->removeFromParent();
6082          Pt = Inst;
6083          inserted = true;
6084          Changed = true;
6085          break;
6086        }
6087        if (!getDT(F).dominates(Pt, Inst))
6088          // Give up if we need to merge in a common dominator as the
6089          // experiments show it is not profitable.
6090          continue;
6091        replaceAllUsesWith(Inst, Pt, FreshBBs, IsHugeFunc);
6092        RemovedInsts.insert(Inst);
6093        Inst->removeFromParent();
6094        inserted = true;
6095        Changed = true;
6096        break;
6097      }
6098      if (!inserted)
6099        CurPts.push_back(Inst);
6100    }
6101  }
6102  return Changed;
6103}
6104
6105// Splitting large data structures so that the GEPs accessing them can have
6106// smaller offsets so that they can be sunk to the same blocks as their users.
6107// For example, a large struct starting from %base is split into two parts
6108// where the second part starts from %new_base.
6109//
6110// Before:
6111// BB0:
6112//   %base     =
6113//
6114// BB1:
6115//   %gep0     = gep %base, off0
6116//   %gep1     = gep %base, off1
6117//   %gep2     = gep %base, off2
6118//
6119// BB2:
6120//   %load1    = load %gep0
6121//   %load2    = load %gep1
6122//   %load3    = load %gep2
6123//
6124// After:
6125// BB0:
6126//   %base     =
6127//   %new_base = gep %base, off0
6128//
6129// BB1:
6130//   %new_gep0 = %new_base
6131//   %new_gep1 = gep %new_base, off1 - off0
6132//   %new_gep2 = gep %new_base, off2 - off0
6133//
6134// BB2:
6135//   %load1    = load i32, i32* %new_gep0
6136//   %load2    = load i32, i32* %new_gep1
6137//   %load3    = load i32, i32* %new_gep2
6138//
6139// %new_gep1 and %new_gep2 can be sunk to BB2 now after the splitting because
6140// their offsets are smaller enough to fit into the addressing mode.
6141bool CodeGenPrepare::splitLargeGEPOffsets() {
6142  bool Changed = false;
6143  for (auto &Entry : LargeOffsetGEPMap) {
6144    Value *OldBase = Entry.first;
6145    SmallVectorImpl<std::pair<AssertingVH<GetElementPtrInst>, int64_t>>
6146        &LargeOffsetGEPs = Entry.second;
6147    auto compareGEPOffset =
6148        [&](const std::pair<GetElementPtrInst *, int64_t> &LHS,
6149            const std::pair<GetElementPtrInst *, int64_t> &RHS) {
6150          if (LHS.first == RHS.first)
6151            return false;
6152          if (LHS.second != RHS.second)
6153            return LHS.second < RHS.second;
6154          return LargeOffsetGEPID[LHS.first] < LargeOffsetGEPID[RHS.first];
6155        };
6156    // Sorting all the GEPs of the same data structures based on the offsets.
6157    llvm::sort(LargeOffsetGEPs, compareGEPOffset);
6158    LargeOffsetGEPs.erase(
6159        std::unique(LargeOffsetGEPs.begin(), LargeOffsetGEPs.end()),
6160        LargeOffsetGEPs.end());
6161    // Skip if all the GEPs have the same offsets.
6162    if (LargeOffsetGEPs.front().second == LargeOffsetGEPs.back().second)
6163      continue;
6164    GetElementPtrInst *BaseGEP = LargeOffsetGEPs.begin()->first;
6165    int64_t BaseOffset = LargeOffsetGEPs.begin()->second;
6166    Value *NewBaseGEP = nullptr;
6167
6168    auto createNewBase = [&](int64_t BaseOffset, Value *OldBase,
6169                             GetElementPtrInst *GEP) {
6170      LLVMContext &Ctx = GEP->getContext();
6171      Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6172      Type *I8PtrTy =
6173          PointerType::get(Ctx, GEP->getType()->getPointerAddressSpace());
6174
6175      BasicBlock::iterator NewBaseInsertPt;
6176      BasicBlock *NewBaseInsertBB;
6177      if (auto *BaseI = dyn_cast<Instruction>(OldBase)) {
6178        // If the base of the struct is an instruction, the new base will be
6179        // inserted close to it.
6180        NewBaseInsertBB = BaseI->getParent();
6181        if (isa<PHINode>(BaseI))
6182          NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6183        else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(BaseI)) {
6184          NewBaseInsertBB =
6185              SplitEdge(NewBaseInsertBB, Invoke->getNormalDest(), DT.get(), LI);
6186          NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6187        } else
6188          NewBaseInsertPt = std::next(BaseI->getIterator());
6189      } else {
6190        // If the current base is an argument or global value, the new base
6191        // will be inserted to the entry block.
6192        NewBaseInsertBB = &BaseGEP->getFunction()->getEntryBlock();
6193        NewBaseInsertPt = NewBaseInsertBB->getFirstInsertionPt();
6194      }
6195      IRBuilder<> NewBaseBuilder(NewBaseInsertBB, NewBaseInsertPt);
6196      // Create a new base.
6197      Value *BaseIndex = ConstantInt::get(PtrIdxTy, BaseOffset);
6198      NewBaseGEP = OldBase;
6199      if (NewBaseGEP->getType() != I8PtrTy)
6200        NewBaseGEP = NewBaseBuilder.CreatePointerCast(NewBaseGEP, I8PtrTy);
6201      NewBaseGEP =
6202          NewBaseBuilder.CreatePtrAdd(NewBaseGEP, BaseIndex, "splitgep");
6203      NewGEPBases.insert(NewBaseGEP);
6204      return;
6205    };
6206
6207    // Check whether all the offsets can be encoded with prefered common base.
6208    if (int64_t PreferBase = TLI->getPreferredLargeGEPBaseOffset(
6209            LargeOffsetGEPs.front().second, LargeOffsetGEPs.back().second)) {
6210      BaseOffset = PreferBase;
6211      // Create a new base if the offset of the BaseGEP can be decoded with one
6212      // instruction.
6213      createNewBase(BaseOffset, OldBase, BaseGEP);
6214    }
6215
6216    auto *LargeOffsetGEP = LargeOffsetGEPs.begin();
6217    while (LargeOffsetGEP != LargeOffsetGEPs.end()) {
6218      GetElementPtrInst *GEP = LargeOffsetGEP->first;
6219      int64_t Offset = LargeOffsetGEP->second;
6220      if (Offset != BaseOffset) {
6221        TargetLowering::AddrMode AddrMode;
6222        AddrMode.HasBaseReg = true;
6223        AddrMode.BaseOffs = Offset - BaseOffset;
6224        // The result type of the GEP might not be the type of the memory
6225        // access.
6226        if (!TLI->isLegalAddressingMode(*DL, AddrMode,
6227                                        GEP->getResultElementType(),
6228                                        GEP->getAddressSpace())) {
6229          // We need to create a new base if the offset to the current base is
6230          // too large to fit into the addressing mode. So, a very large struct
6231          // may be split into several parts.
6232          BaseGEP = GEP;
6233          BaseOffset = Offset;
6234          NewBaseGEP = nullptr;
6235        }
6236      }
6237
6238      // Generate a new GEP to replace the current one.
6239      Type *PtrIdxTy = DL->getIndexType(GEP->getType());
6240
6241      if (!NewBaseGEP) {
6242        // Create a new base if we don't have one yet.  Find the insertion
6243        // pointer for the new base first.
6244        createNewBase(BaseOffset, OldBase, GEP);
6245      }
6246
6247      IRBuilder<> Builder(GEP);
6248      Value *NewGEP = NewBaseGEP;
6249      if (Offset != BaseOffset) {
6250        // Calculate the new offset for the new GEP.
6251        Value *Index = ConstantInt::get(PtrIdxTy, Offset - BaseOffset);
6252        NewGEP = Builder.CreatePtrAdd(NewBaseGEP, Index);
6253      }
6254      replaceAllUsesWith(GEP, NewGEP, FreshBBs, IsHugeFunc);
6255      LargeOffsetGEPID.erase(GEP);
6256      LargeOffsetGEP = LargeOffsetGEPs.erase(LargeOffsetGEP);
6257      GEP->eraseFromParent();
6258      Changed = true;
6259    }
6260  }
6261  return Changed;
6262}
6263
6264bool CodeGenPrepare::optimizePhiType(
6265    PHINode *I, SmallPtrSetImpl<PHINode *> &Visited,
6266    SmallPtrSetImpl<Instruction *> &DeletedInstrs) {
6267  // We are looking for a collection on interconnected phi nodes that together
6268  // only use loads/bitcasts and are used by stores/bitcasts, and the bitcasts
6269  // are of the same type. Convert the whole set of nodes to the type of the
6270  // bitcast.
6271  Type *PhiTy = I->getType();
6272  Type *ConvertTy = nullptr;
6273  if (Visited.count(I) ||
6274      (!I->getType()->isIntegerTy() && !I->getType()->isFloatingPointTy()))
6275    return false;
6276
6277  SmallVector<Instruction *, 4> Worklist;
6278  Worklist.push_back(cast<Instruction>(I));
6279  SmallPtrSet<PHINode *, 4> PhiNodes;
6280  SmallPtrSet<ConstantData *, 4> Constants;
6281  PhiNodes.insert(I);
6282  Visited.insert(I);
6283  SmallPtrSet<Instruction *, 4> Defs;
6284  SmallPtrSet<Instruction *, 4> Uses;
6285  // This works by adding extra bitcasts between load/stores and removing
6286  // existing bicasts. If we have a phi(bitcast(load)) or a store(bitcast(phi))
6287  // we can get in the situation where we remove a bitcast in one iteration
6288  // just to add it again in the next. We need to ensure that at least one
6289  // bitcast we remove are anchored to something that will not change back.
6290  bool AnyAnchored = false;
6291
6292  while (!Worklist.empty()) {
6293    Instruction *II = Worklist.pop_back_val();
6294
6295    if (auto *Phi = dyn_cast<PHINode>(II)) {
6296      // Handle Defs, which might also be PHI's
6297      for (Value *V : Phi->incoming_values()) {
6298        if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6299          if (!PhiNodes.count(OpPhi)) {
6300            if (!Visited.insert(OpPhi).second)
6301              return false;
6302            PhiNodes.insert(OpPhi);
6303            Worklist.push_back(OpPhi);
6304          }
6305        } else if (auto *OpLoad = dyn_cast<LoadInst>(V)) {
6306          if (!OpLoad->isSimple())
6307            return false;
6308          if (Defs.insert(OpLoad).second)
6309            Worklist.push_back(OpLoad);
6310        } else if (auto *OpEx = dyn_cast<ExtractElementInst>(V)) {
6311          if (Defs.insert(OpEx).second)
6312            Worklist.push_back(OpEx);
6313        } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6314          if (!ConvertTy)
6315            ConvertTy = OpBC->getOperand(0)->getType();
6316          if (OpBC->getOperand(0)->getType() != ConvertTy)
6317            return false;
6318          if (Defs.insert(OpBC).second) {
6319            Worklist.push_back(OpBC);
6320            AnyAnchored |= !isa<LoadInst>(OpBC->getOperand(0)) &&
6321                           !isa<ExtractElementInst>(OpBC->getOperand(0));
6322          }
6323        } else if (auto *OpC = dyn_cast<ConstantData>(V))
6324          Constants.insert(OpC);
6325        else
6326          return false;
6327      }
6328    }
6329
6330    // Handle uses which might also be phi's
6331    for (User *V : II->users()) {
6332      if (auto *OpPhi = dyn_cast<PHINode>(V)) {
6333        if (!PhiNodes.count(OpPhi)) {
6334          if (Visited.count(OpPhi))
6335            return false;
6336          PhiNodes.insert(OpPhi);
6337          Visited.insert(OpPhi);
6338          Worklist.push_back(OpPhi);
6339        }
6340      } else if (auto *OpStore = dyn_cast<StoreInst>(V)) {
6341        if (!OpStore->isSimple() || OpStore->getOperand(0) != II)
6342          return false;
6343        Uses.insert(OpStore);
6344      } else if (auto *OpBC = dyn_cast<BitCastInst>(V)) {
6345        if (!ConvertTy)
6346          ConvertTy = OpBC->getType();
6347        if (OpBC->getType() != ConvertTy)
6348          return false;
6349        Uses.insert(OpBC);
6350        AnyAnchored |=
6351            any_of(OpBC->users(), [](User *U) { return !isa<StoreInst>(U); });
6352      } else {
6353        return false;
6354      }
6355    }
6356  }
6357
6358  if (!ConvertTy || !AnyAnchored ||
6359      !TLI->shouldConvertPhiType(PhiTy, ConvertTy))
6360    return false;
6361
6362  LLVM_DEBUG(dbgs() << "Converting " << *I << "\n  and connected nodes to "
6363                    << *ConvertTy << "\n");
6364
6365  // Create all the new phi nodes of the new type, and bitcast any loads to the
6366  // correct type.
6367  ValueToValueMap ValMap;
6368  for (ConstantData *C : Constants)
6369    ValMap[C] = ConstantExpr::getBitCast(C, ConvertTy);
6370  for (Instruction *D : Defs) {
6371    if (isa<BitCastInst>(D)) {
6372      ValMap[D] = D->getOperand(0);
6373      DeletedInstrs.insert(D);
6374    } else {
6375      ValMap[D] =
6376          new BitCastInst(D, ConvertTy, D->getName() + ".bc", D->getNextNode());
6377    }
6378  }
6379  for (PHINode *Phi : PhiNodes)
6380    ValMap[Phi] = PHINode::Create(ConvertTy, Phi->getNumIncomingValues(),
6381                                  Phi->getName() + ".tc", Phi);
6382  // Pipe together all the PhiNodes.
6383  for (PHINode *Phi : PhiNodes) {
6384    PHINode *NewPhi = cast<PHINode>(ValMap[Phi]);
6385    for (int i = 0, e = Phi->getNumIncomingValues(); i < e; i++)
6386      NewPhi->addIncoming(ValMap[Phi->getIncomingValue(i)],
6387                          Phi->getIncomingBlock(i));
6388    Visited.insert(NewPhi);
6389  }
6390  // And finally pipe up the stores and bitcasts
6391  for (Instruction *U : Uses) {
6392    if (isa<BitCastInst>(U)) {
6393      DeletedInstrs.insert(U);
6394      replaceAllUsesWith(U, ValMap[U->getOperand(0)], FreshBBs, IsHugeFunc);
6395    } else {
6396      U->setOperand(0,
6397                    new BitCastInst(ValMap[U->getOperand(0)], PhiTy, "bc", U));
6398    }
6399  }
6400
6401  // Save the removed phis to be deleted later.
6402  for (PHINode *Phi : PhiNodes)
6403    DeletedInstrs.insert(Phi);
6404  return true;
6405}
6406
6407bool CodeGenPrepare::optimizePhiTypes(Function &F) {
6408  if (!OptimizePhiTypes)
6409    return false;
6410
6411  bool Changed = false;
6412  SmallPtrSet<PHINode *, 4> Visited;
6413  SmallPtrSet<Instruction *, 4> DeletedInstrs;
6414
6415  // Attempt to optimize all the phis in the functions to the correct type.
6416  for (auto &BB : F)
6417    for (auto &Phi : BB.phis())
6418      Changed |= optimizePhiType(&Phi, Visited, DeletedInstrs);
6419
6420  // Remove any old phi's that have been converted.
6421  for (auto *I : DeletedInstrs) {
6422    replaceAllUsesWith(I, PoisonValue::get(I->getType()), FreshBBs, IsHugeFunc);
6423    I->eraseFromParent();
6424  }
6425
6426  return Changed;
6427}
6428
6429/// Return true, if an ext(load) can be formed from an extension in
6430/// \p MovedExts.
6431bool CodeGenPrepare::canFormExtLd(
6432    const SmallVectorImpl<Instruction *> &MovedExts, LoadInst *&LI,
6433    Instruction *&Inst, bool HasPromoted) {
6434  for (auto *MovedExtInst : MovedExts) {
6435    if (isa<LoadInst>(MovedExtInst->getOperand(0))) {
6436      LI = cast<LoadInst>(MovedExtInst->getOperand(0));
6437      Inst = MovedExtInst;
6438      break;
6439    }
6440  }
6441  if (!LI)
6442    return false;
6443
6444  // If they're already in the same block, there's nothing to do.
6445  // Make the cheap checks first if we did not promote.
6446  // If we promoted, we need to check if it is indeed profitable.
6447  if (!HasPromoted && LI->getParent() == Inst->getParent())
6448    return false;
6449
6450  return TLI->isExtLoad(LI, Inst, *DL);
6451}
6452
6453/// Move a zext or sext fed by a load into the same basic block as the load,
6454/// unless conditions are unfavorable. This allows SelectionDAG to fold the
6455/// extend into the load.
6456///
6457/// E.g.,
6458/// \code
6459/// %ld = load i32* %addr
6460/// %add = add nuw i32 %ld, 4
6461/// %zext = zext i32 %add to i64
6462// \endcode
6463/// =>
6464/// \code
6465/// %ld = load i32* %addr
6466/// %zext = zext i32 %ld to i64
6467/// %add = add nuw i64 %zext, 4
6468/// \encode
6469/// Note that the promotion in %add to i64 is done in tryToPromoteExts(), which
6470/// allow us to match zext(load i32*) to i64.
6471///
6472/// Also, try to promote the computations used to obtain a sign extended
6473/// value used into memory accesses.
6474/// E.g.,
6475/// \code
6476/// a = add nsw i32 b, 3
6477/// d = sext i32 a to i64
6478/// e = getelementptr ..., i64 d
6479/// \endcode
6480/// =>
6481/// \code
6482/// f = sext i32 b to i64
6483/// a = add nsw i64 f, 3
6484/// e = getelementptr ..., i64 a
6485/// \endcode
6486///
6487/// \p Inst[in/out] the extension may be modified during the process if some
6488/// promotions apply.
6489bool CodeGenPrepare::optimizeExt(Instruction *&Inst) {
6490  bool AllowPromotionWithoutCommonHeader = false;
6491  /// See if it is an interesting sext operations for the address type
6492  /// promotion before trying to promote it, e.g., the ones with the right
6493  /// type and used in memory accesses.
6494  bool ATPConsiderable = TTI->shouldConsiderAddressTypePromotion(
6495      *Inst, AllowPromotionWithoutCommonHeader);
6496  TypePromotionTransaction TPT(RemovedInsts);
6497  TypePromotionTransaction::ConstRestorationPt LastKnownGood =
6498      TPT.getRestorationPoint();
6499  SmallVector<Instruction *, 1> Exts;
6500  SmallVector<Instruction *, 2> SpeculativelyMovedExts;
6501  Exts.push_back(Inst);
6502
6503  bool HasPromoted = tryToPromoteExts(TPT, Exts, SpeculativelyMovedExts);
6504
6505  // Look for a load being extended.
6506  LoadInst *LI = nullptr;
6507  Instruction *ExtFedByLoad;
6508
6509  // Try to promote a chain of computation if it allows to form an extended
6510  // load.
6511  if (canFormExtLd(SpeculativelyMovedExts, LI, ExtFedByLoad, HasPromoted)) {
6512    assert(LI && ExtFedByLoad && "Expect a valid load and extension");
6513    TPT.commit();
6514    // Move the extend into the same block as the load.
6515    ExtFedByLoad->moveAfter(LI);
6516    ++NumExtsMoved;
6517    Inst = ExtFedByLoad;
6518    return true;
6519  }
6520
6521  // Continue promoting SExts if known as considerable depending on targets.
6522  if (ATPConsiderable &&
6523      performAddressTypePromotion(Inst, AllowPromotionWithoutCommonHeader,
6524                                  HasPromoted, TPT, SpeculativelyMovedExts))
6525    return true;
6526
6527  TPT.rollback(LastKnownGood);
6528  return false;
6529}
6530
6531// Perform address type promotion if doing so is profitable.
6532// If AllowPromotionWithoutCommonHeader == false, we should find other sext
6533// instructions that sign extended the same initial value. However, if
6534// AllowPromotionWithoutCommonHeader == true, we expect promoting the
6535// extension is just profitable.
6536bool CodeGenPrepare::performAddressTypePromotion(
6537    Instruction *&Inst, bool AllowPromotionWithoutCommonHeader,
6538    bool HasPromoted, TypePromotionTransaction &TPT,
6539    SmallVectorImpl<Instruction *> &SpeculativelyMovedExts) {
6540  bool Promoted = false;
6541  SmallPtrSet<Instruction *, 1> UnhandledExts;
6542  bool AllSeenFirst = true;
6543  for (auto *I : SpeculativelyMovedExts) {
6544    Value *HeadOfChain = I->getOperand(0);
6545    DenseMap<Value *, Instruction *>::iterator AlreadySeen =
6546        SeenChainsForSExt.find(HeadOfChain);
6547    // If there is an unhandled SExt which has the same header, try to promote
6548    // it as well.
6549    if (AlreadySeen != SeenChainsForSExt.end()) {
6550      if (AlreadySeen->second != nullptr)
6551        UnhandledExts.insert(AlreadySeen->second);
6552      AllSeenFirst = false;
6553    }
6554  }
6555
6556  if (!AllSeenFirst || (AllowPromotionWithoutCommonHeader &&
6557                        SpeculativelyMovedExts.size() == 1)) {
6558    TPT.commit();
6559    if (HasPromoted)
6560      Promoted = true;
6561    for (auto *I : SpeculativelyMovedExts) {
6562      Value *HeadOfChain = I->getOperand(0);
6563      SeenChainsForSExt[HeadOfChain] = nullptr;
6564      ValToSExtendedUses[HeadOfChain].push_back(I);
6565    }
6566    // Update Inst as promotion happen.
6567    Inst = SpeculativelyMovedExts.pop_back_val();
6568  } else {
6569    // This is the first chain visited from the header, keep the current chain
6570    // as unhandled. Defer to promote this until we encounter another SExt
6571    // chain derived from the same header.
6572    for (auto *I : SpeculativelyMovedExts) {
6573      Value *HeadOfChain = I->getOperand(0);
6574      SeenChainsForSExt[HeadOfChain] = Inst;
6575    }
6576    return false;
6577  }
6578
6579  if (!AllSeenFirst && !UnhandledExts.empty())
6580    for (auto *VisitedSExt : UnhandledExts) {
6581      if (RemovedInsts.count(VisitedSExt))
6582        continue;
6583      TypePromotionTransaction TPT(RemovedInsts);
6584      SmallVector<Instruction *, 1> Exts;
6585      SmallVector<Instruction *, 2> Chains;
6586      Exts.push_back(VisitedSExt);
6587      bool HasPromoted = tryToPromoteExts(TPT, Exts, Chains);
6588      TPT.commit();
6589      if (HasPromoted)
6590        Promoted = true;
6591      for (auto *I : Chains) {
6592        Value *HeadOfChain = I->getOperand(0);
6593        // Mark this as handled.
6594        SeenChainsForSExt[HeadOfChain] = nullptr;
6595        ValToSExtendedUses[HeadOfChain].push_back(I);
6596      }
6597    }
6598  return Promoted;
6599}
6600
6601bool CodeGenPrepare::optimizeExtUses(Instruction *I) {
6602  BasicBlock *DefBB = I->getParent();
6603
6604  // If the result of a {s|z}ext and its source are both live out, rewrite all
6605  // other uses of the source with result of extension.
6606  Value *Src = I->getOperand(0);
6607  if (Src->hasOneUse())
6608    return false;
6609
6610  // Only do this xform if truncating is free.
6611  if (!TLI->isTruncateFree(I->getType(), Src->getType()))
6612    return false;
6613
6614  // Only safe to perform the optimization if the source is also defined in
6615  // this block.
6616  if (!isa<Instruction>(Src) || DefBB != cast<Instruction>(Src)->getParent())
6617    return false;
6618
6619  bool DefIsLiveOut = false;
6620  for (User *U : I->users()) {
6621    Instruction *UI = cast<Instruction>(U);
6622
6623    // Figure out which BB this ext is used in.
6624    BasicBlock *UserBB = UI->getParent();
6625    if (UserBB == DefBB)
6626      continue;
6627    DefIsLiveOut = true;
6628    break;
6629  }
6630  if (!DefIsLiveOut)
6631    return false;
6632
6633  // Make sure none of the uses are PHI nodes.
6634  for (User *U : Src->users()) {
6635    Instruction *UI = cast<Instruction>(U);
6636    BasicBlock *UserBB = UI->getParent();
6637    if (UserBB == DefBB)
6638      continue;
6639    // Be conservative. We don't want this xform to end up introducing
6640    // reloads just before load / store instructions.
6641    if (isa<PHINode>(UI) || isa<LoadInst>(UI) || isa<StoreInst>(UI))
6642      return false;
6643  }
6644
6645  // InsertedTruncs - Only insert one trunc in each block once.
6646  DenseMap<BasicBlock *, Instruction *> InsertedTruncs;
6647
6648  bool MadeChange = false;
6649  for (Use &U : Src->uses()) {
6650    Instruction *User = cast<Instruction>(U.getUser());
6651
6652    // Figure out which BB this ext is used in.
6653    BasicBlock *UserBB = User->getParent();
6654    if (UserBB == DefBB)
6655      continue;
6656
6657    // Both src and def are live in this block. Rewrite the use.
6658    Instruction *&InsertedTrunc = InsertedTruncs[UserBB];
6659
6660    if (!InsertedTrunc) {
6661      BasicBlock::iterator InsertPt = UserBB->getFirstInsertionPt();
6662      assert(InsertPt != UserBB->end());
6663      InsertedTrunc = new TruncInst(I, Src->getType(), "");
6664      InsertedTrunc->insertBefore(*UserBB, InsertPt);
6665      InsertedInsts.insert(InsertedTrunc);
6666    }
6667
6668    // Replace a use of the {s|z}ext source with a use of the result.
6669    U = InsertedTrunc;
6670    ++NumExtUses;
6671    MadeChange = true;
6672  }
6673
6674  return MadeChange;
6675}
6676
6677// Find loads whose uses only use some of the loaded value's bits.  Add an "and"
6678// just after the load if the target can fold this into one extload instruction,
6679// with the hope of eliminating some of the other later "and" instructions using
6680// the loaded value.  "and"s that are made trivially redundant by the insertion
6681// of the new "and" are removed by this function, while others (e.g. those whose
6682// path from the load goes through a phi) are left for isel to potentially
6683// remove.
6684//
6685// For example:
6686//
6687// b0:
6688//   x = load i32
6689//   ...
6690// b1:
6691//   y = and x, 0xff
6692//   z = use y
6693//
6694// becomes:
6695//
6696// b0:
6697//   x = load i32
6698//   x' = and x, 0xff
6699//   ...
6700// b1:
6701//   z = use x'
6702//
6703// whereas:
6704//
6705// b0:
6706//   x1 = load i32
6707//   ...
6708// b1:
6709//   x2 = load i32
6710//   ...
6711// b2:
6712//   x = phi x1, x2
6713//   y = and x, 0xff
6714//
6715// becomes (after a call to optimizeLoadExt for each load):
6716//
6717// b0:
6718//   x1 = load i32
6719//   x1' = and x1, 0xff
6720//   ...
6721// b1:
6722//   x2 = load i32
6723//   x2' = and x2, 0xff
6724//   ...
6725// b2:
6726//   x = phi x1', x2'
6727//   y = and x, 0xff
6728bool CodeGenPrepare::optimizeLoadExt(LoadInst *Load) {
6729  if (!Load->isSimple() || !Load->getType()->isIntOrPtrTy())
6730    return false;
6731
6732  // Skip loads we've already transformed.
6733  if (Load->hasOneUse() &&
6734      InsertedInsts.count(cast<Instruction>(*Load->user_begin())))
6735    return false;
6736
6737  // Look at all uses of Load, looking through phis, to determine how many bits
6738  // of the loaded value are needed.
6739  SmallVector<Instruction *, 8> WorkList;
6740  SmallPtrSet<Instruction *, 16> Visited;
6741  SmallVector<Instruction *, 8> AndsToMaybeRemove;
6742  for (auto *U : Load->users())
6743    WorkList.push_back(cast<Instruction>(U));
6744
6745  EVT LoadResultVT = TLI->getValueType(*DL, Load->getType());
6746  unsigned BitWidth = LoadResultVT.getSizeInBits();
6747  // If the BitWidth is 0, do not try to optimize the type
6748  if (BitWidth == 0)
6749    return false;
6750
6751  APInt DemandBits(BitWidth, 0);
6752  APInt WidestAndBits(BitWidth, 0);
6753
6754  while (!WorkList.empty()) {
6755    Instruction *I = WorkList.pop_back_val();
6756
6757    // Break use-def graph loops.
6758    if (!Visited.insert(I).second)
6759      continue;
6760
6761    // For a PHI node, push all of its users.
6762    if (auto *Phi = dyn_cast<PHINode>(I)) {
6763      for (auto *U : Phi->users())
6764        WorkList.push_back(cast<Instruction>(U));
6765      continue;
6766    }
6767
6768    switch (I->getOpcode()) {
6769    case Instruction::And: {
6770      auto *AndC = dyn_cast<ConstantInt>(I->getOperand(1));
6771      if (!AndC)
6772        return false;
6773      APInt AndBits = AndC->getValue();
6774      DemandBits |= AndBits;
6775      // Keep track of the widest and mask we see.
6776      if (AndBits.ugt(WidestAndBits))
6777        WidestAndBits = AndBits;
6778      if (AndBits == WidestAndBits && I->getOperand(0) == Load)
6779        AndsToMaybeRemove.push_back(I);
6780      break;
6781    }
6782
6783    case Instruction::Shl: {
6784      auto *ShlC = dyn_cast<ConstantInt>(I->getOperand(1));
6785      if (!ShlC)
6786        return false;
6787      uint64_t ShiftAmt = ShlC->getLimitedValue(BitWidth - 1);
6788      DemandBits.setLowBits(BitWidth - ShiftAmt);
6789      break;
6790    }
6791
6792    case Instruction::Trunc: {
6793      EVT TruncVT = TLI->getValueType(*DL, I->getType());
6794      unsigned TruncBitWidth = TruncVT.getSizeInBits();
6795      DemandBits.setLowBits(TruncBitWidth);
6796      break;
6797    }
6798
6799    default:
6800      return false;
6801    }
6802  }
6803
6804  uint32_t ActiveBits = DemandBits.getActiveBits();
6805  // Avoid hoisting (and (load x) 1) since it is unlikely to be folded by the
6806  // target even if isLoadExtLegal says an i1 EXTLOAD is valid.  For example,
6807  // for the AArch64 target isLoadExtLegal(ZEXTLOAD, i32, i1) returns true, but
6808  // (and (load x) 1) is not matched as a single instruction, rather as a LDR
6809  // followed by an AND.
6810  // TODO: Look into removing this restriction by fixing backends to either
6811  // return false for isLoadExtLegal for i1 or have them select this pattern to
6812  // a single instruction.
6813  //
6814  // Also avoid hoisting if we didn't see any ands with the exact DemandBits
6815  // mask, since these are the only ands that will be removed by isel.
6816  if (ActiveBits <= 1 || !DemandBits.isMask(ActiveBits) ||
6817      WidestAndBits != DemandBits)
6818    return false;
6819
6820  LLVMContext &Ctx = Load->getType()->getContext();
6821  Type *TruncTy = Type::getIntNTy(Ctx, ActiveBits);
6822  EVT TruncVT = TLI->getValueType(*DL, TruncTy);
6823
6824  // Reject cases that won't be matched as extloads.
6825  if (!LoadResultVT.bitsGT(TruncVT) || !TruncVT.isRound() ||
6826      !TLI->isLoadExtLegal(ISD::ZEXTLOAD, LoadResultVT, TruncVT))
6827    return false;
6828
6829  IRBuilder<> Builder(Load->getNextNonDebugInstruction());
6830  auto *NewAnd = cast<Instruction>(
6831      Builder.CreateAnd(Load, ConstantInt::get(Ctx, DemandBits)));
6832  // Mark this instruction as "inserted by CGP", so that other
6833  // optimizations don't touch it.
6834  InsertedInsts.insert(NewAnd);
6835
6836  // Replace all uses of load with new and (except for the use of load in the
6837  // new and itself).
6838  replaceAllUsesWith(Load, NewAnd, FreshBBs, IsHugeFunc);
6839  NewAnd->setOperand(0, Load);
6840
6841  // Remove any and instructions that are now redundant.
6842  for (auto *And : AndsToMaybeRemove)
6843    // Check that the and mask is the same as the one we decided to put on the
6844    // new and.
6845    if (cast<ConstantInt>(And->getOperand(1))->getValue() == DemandBits) {
6846      replaceAllUsesWith(And, NewAnd, FreshBBs, IsHugeFunc);
6847      if (&*CurInstIterator == And)
6848        CurInstIterator = std::next(And->getIterator());
6849      And->eraseFromParent();
6850      ++NumAndUses;
6851    }
6852
6853  ++NumAndsAdded;
6854  return true;
6855}
6856
6857/// Check if V (an operand of a select instruction) is an expensive instruction
6858/// that is only used once.
6859static bool sinkSelectOperand(const TargetTransformInfo *TTI, Value *V) {
6860  auto *I = dyn_cast<Instruction>(V);
6861  // If it's safe to speculatively execute, then it should not have side
6862  // effects; therefore, it's safe to sink and possibly *not* execute.
6863  return I && I->hasOneUse() && isSafeToSpeculativelyExecute(I) &&
6864         TTI->isExpensiveToSpeculativelyExecute(I);
6865}
6866
6867/// Returns true if a SelectInst should be turned into an explicit branch.
6868static bool isFormingBranchFromSelectProfitable(const TargetTransformInfo *TTI,
6869                                                const TargetLowering *TLI,
6870                                                SelectInst *SI) {
6871  // If even a predictable select is cheap, then a branch can't be cheaper.
6872  if (!TLI->isPredictableSelectExpensive())
6873    return false;
6874
6875  // FIXME: This should use the same heuristics as IfConversion to determine
6876  // whether a select is better represented as a branch.
6877
6878  // If metadata tells us that the select condition is obviously predictable,
6879  // then we want to replace the select with a branch.
6880  uint64_t TrueWeight, FalseWeight;
6881  if (extractBranchWeights(*SI, TrueWeight, FalseWeight)) {
6882    uint64_t Max = std::max(TrueWeight, FalseWeight);
6883    uint64_t Sum = TrueWeight + FalseWeight;
6884    if (Sum != 0) {
6885      auto Probability = BranchProbability::getBranchProbability(Max, Sum);
6886      if (Probability > TTI->getPredictableBranchThreshold())
6887        return true;
6888    }
6889  }
6890
6891  CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
6892
6893  // If a branch is predictable, an out-of-order CPU can avoid blocking on its
6894  // comparison condition. If the compare has more than one use, there's
6895  // probably another cmov or setcc around, so it's not worth emitting a branch.
6896  if (!Cmp || !Cmp->hasOneUse())
6897    return false;
6898
6899  // If either operand of the select is expensive and only needed on one side
6900  // of the select, we should form a branch.
6901  if (sinkSelectOperand(TTI, SI->getTrueValue()) ||
6902      sinkSelectOperand(TTI, SI->getFalseValue()))
6903    return true;
6904
6905  return false;
6906}
6907
6908/// If \p isTrue is true, return the true value of \p SI, otherwise return
6909/// false value of \p SI. If the true/false value of \p SI is defined by any
6910/// select instructions in \p Selects, look through the defining select
6911/// instruction until the true/false value is not defined in \p Selects.
6912static Value *
6913getTrueOrFalseValue(SelectInst *SI, bool isTrue,
6914                    const SmallPtrSet<const Instruction *, 2> &Selects) {
6915  Value *V = nullptr;
6916
6917  for (SelectInst *DefSI = SI; DefSI != nullptr && Selects.count(DefSI);
6918       DefSI = dyn_cast<SelectInst>(V)) {
6919    assert(DefSI->getCondition() == SI->getCondition() &&
6920           "The condition of DefSI does not match with SI");
6921    V = (isTrue ? DefSI->getTrueValue() : DefSI->getFalseValue());
6922  }
6923
6924  assert(V && "Failed to get select true/false value");
6925  return V;
6926}
6927
6928bool CodeGenPrepare::optimizeShiftInst(BinaryOperator *Shift) {
6929  assert(Shift->isShift() && "Expected a shift");
6930
6931  // If this is (1) a vector shift, (2) shifts by scalars are cheaper than
6932  // general vector shifts, and (3) the shift amount is a select-of-splatted
6933  // values, hoist the shifts before the select:
6934  //   shift Op0, (select Cond, TVal, FVal) -->
6935  //   select Cond, (shift Op0, TVal), (shift Op0, FVal)
6936  //
6937  // This is inverting a generic IR transform when we know that the cost of a
6938  // general vector shift is more than the cost of 2 shift-by-scalars.
6939  // We can't do this effectively in SDAG because we may not be able to
6940  // determine if the select operands are splats from within a basic block.
6941  Type *Ty = Shift->getType();
6942  if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6943    return false;
6944  Value *Cond, *TVal, *FVal;
6945  if (!match(Shift->getOperand(1),
6946             m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6947    return false;
6948  if (!isSplatValue(TVal) || !isSplatValue(FVal))
6949    return false;
6950
6951  IRBuilder<> Builder(Shift);
6952  BinaryOperator::BinaryOps Opcode = Shift->getOpcode();
6953  Value *NewTVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), TVal);
6954  Value *NewFVal = Builder.CreateBinOp(Opcode, Shift->getOperand(0), FVal);
6955  Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6956  replaceAllUsesWith(Shift, NewSel, FreshBBs, IsHugeFunc);
6957  Shift->eraseFromParent();
6958  return true;
6959}
6960
6961bool CodeGenPrepare::optimizeFunnelShift(IntrinsicInst *Fsh) {
6962  Intrinsic::ID Opcode = Fsh->getIntrinsicID();
6963  assert((Opcode == Intrinsic::fshl || Opcode == Intrinsic::fshr) &&
6964         "Expected a funnel shift");
6965
6966  // If this is (1) a vector funnel shift, (2) shifts by scalars are cheaper
6967  // than general vector shifts, and (3) the shift amount is select-of-splatted
6968  // values, hoist the funnel shifts before the select:
6969  //   fsh Op0, Op1, (select Cond, TVal, FVal) -->
6970  //   select Cond, (fsh Op0, Op1, TVal), (fsh Op0, Op1, FVal)
6971  //
6972  // This is inverting a generic IR transform when we know that the cost of a
6973  // general vector shift is more than the cost of 2 shift-by-scalars.
6974  // We can't do this effectively in SDAG because we may not be able to
6975  // determine if the select operands are splats from within a basic block.
6976  Type *Ty = Fsh->getType();
6977  if (!Ty->isVectorTy() || !TLI->isVectorShiftByScalarCheap(Ty))
6978    return false;
6979  Value *Cond, *TVal, *FVal;
6980  if (!match(Fsh->getOperand(2),
6981             m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
6982    return false;
6983  if (!isSplatValue(TVal) || !isSplatValue(FVal))
6984    return false;
6985
6986  IRBuilder<> Builder(Fsh);
6987  Value *X = Fsh->getOperand(0), *Y = Fsh->getOperand(1);
6988  Value *NewTVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, TVal});
6989  Value *NewFVal = Builder.CreateIntrinsic(Opcode, Ty, {X, Y, FVal});
6990  Value *NewSel = Builder.CreateSelect(Cond, NewTVal, NewFVal);
6991  replaceAllUsesWith(Fsh, NewSel, FreshBBs, IsHugeFunc);
6992  Fsh->eraseFromParent();
6993  return true;
6994}
6995
6996/// If we have a SelectInst that will likely profit from branch prediction,
6997/// turn it into a branch.
6998bool CodeGenPrepare::optimizeSelectInst(SelectInst *SI) {
6999  if (DisableSelectToBranch)
7000    return false;
7001
7002  // If the SelectOptimize pass is enabled, selects have already been optimized.
7003  if (!getCGPassBuilderOption().DisableSelectOptimize)
7004    return false;
7005
7006  // Find all consecutive select instructions that share the same condition.
7007  SmallVector<SelectInst *, 2> ASI;
7008  ASI.push_back(SI);
7009  for (BasicBlock::iterator It = ++BasicBlock::iterator(SI);
7010       It != SI->getParent()->end(); ++It) {
7011    SelectInst *I = dyn_cast<SelectInst>(&*It);
7012    if (I && SI->getCondition() == I->getCondition()) {
7013      ASI.push_back(I);
7014    } else {
7015      break;
7016    }
7017  }
7018
7019  SelectInst *LastSI = ASI.back();
7020  // Increment the current iterator to skip all the rest of select instructions
7021  // because they will be either "not lowered" or "all lowered" to branch.
7022  CurInstIterator = std::next(LastSI->getIterator());
7023  // Examine debug-info attached to the consecutive select instructions. They
7024  // won't be individually optimised by optimizeInst, so we need to perform
7025  // DPValue maintenence here instead.
7026  for (SelectInst *SI : ArrayRef(ASI).drop_front())
7027    fixupDPValuesOnInst(*SI);
7028
7029  bool VectorCond = !SI->getCondition()->getType()->isIntegerTy(1);
7030
7031  // Can we convert the 'select' to CF ?
7032  if (VectorCond || SI->getMetadata(LLVMContext::MD_unpredictable))
7033    return false;
7034
7035  TargetLowering::SelectSupportKind SelectKind;
7036  if (SI->getType()->isVectorTy())
7037    SelectKind = TargetLowering::ScalarCondVectorVal;
7038  else
7039    SelectKind = TargetLowering::ScalarValSelect;
7040
7041  if (TLI->isSelectSupported(SelectKind) &&
7042      (!isFormingBranchFromSelectProfitable(TTI, TLI, SI) || OptSize ||
7043       llvm::shouldOptimizeForSize(SI->getParent(), PSI, BFI.get())))
7044    return false;
7045
7046  // The DominatorTree needs to be rebuilt by any consumers after this
7047  // transformation. We simply reset here rather than setting the ModifiedDT
7048  // flag to avoid restarting the function walk in runOnFunction for each
7049  // select optimized.
7050  DT.reset();
7051
7052  // Transform a sequence like this:
7053  //    start:
7054  //       %cmp = cmp uge i32 %a, %b
7055  //       %sel = select i1 %cmp, i32 %c, i32 %d
7056  //
7057  // Into:
7058  //    start:
7059  //       %cmp = cmp uge i32 %a, %b
7060  //       %cmp.frozen = freeze %cmp
7061  //       br i1 %cmp.frozen, label %select.true, label %select.false
7062  //    select.true:
7063  //       br label %select.end
7064  //    select.false:
7065  //       br label %select.end
7066  //    select.end:
7067  //       %sel = phi i32 [ %c, %select.true ], [ %d, %select.false ]
7068  //
7069  // %cmp should be frozen, otherwise it may introduce undefined behavior.
7070  // In addition, we may sink instructions that produce %c or %d from
7071  // the entry block into the destination(s) of the new branch.
7072  // If the true or false blocks do not contain a sunken instruction, that
7073  // block and its branch may be optimized away. In that case, one side of the
7074  // first branch will point directly to select.end, and the corresponding PHI
7075  // predecessor block will be the start block.
7076
7077  // Collect values that go on the true side and the values that go on the false
7078  // side.
7079  SmallVector<Instruction *> TrueInstrs, FalseInstrs;
7080  for (SelectInst *SI : ASI) {
7081    if (Value *V = SI->getTrueValue(); sinkSelectOperand(TTI, V))
7082      TrueInstrs.push_back(cast<Instruction>(V));
7083    if (Value *V = SI->getFalseValue(); sinkSelectOperand(TTI, V))
7084      FalseInstrs.push_back(cast<Instruction>(V));
7085  }
7086
7087  // Split the select block, according to how many (if any) values go on each
7088  // side.
7089  BasicBlock *StartBlock = SI->getParent();
7090  BasicBlock::iterator SplitPt = std::next(BasicBlock::iterator(LastSI));
7091  // We should split before any debug-info.
7092  SplitPt.setHeadBit(true);
7093
7094  IRBuilder<> IB(SI);
7095  auto *CondFr = IB.CreateFreeze(SI->getCondition(), SI->getName() + ".frozen");
7096
7097  BasicBlock *TrueBlock = nullptr;
7098  BasicBlock *FalseBlock = nullptr;
7099  BasicBlock *EndBlock = nullptr;
7100  BranchInst *TrueBranch = nullptr;
7101  BranchInst *FalseBranch = nullptr;
7102  if (TrueInstrs.size() == 0) {
7103    FalseBranch = cast<BranchInst>(SplitBlockAndInsertIfElse(
7104        CondFr, SplitPt, false, nullptr, nullptr, LI));
7105    FalseBlock = FalseBranch->getParent();
7106    EndBlock = cast<BasicBlock>(FalseBranch->getOperand(0));
7107  } else if (FalseInstrs.size() == 0) {
7108    TrueBranch = cast<BranchInst>(SplitBlockAndInsertIfThen(
7109        CondFr, SplitPt, false, nullptr, nullptr, LI));
7110    TrueBlock = TrueBranch->getParent();
7111    EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7112  } else {
7113    Instruction *ThenTerm = nullptr;
7114    Instruction *ElseTerm = nullptr;
7115    SplitBlockAndInsertIfThenElse(CondFr, SplitPt, &ThenTerm, &ElseTerm,
7116                                  nullptr, nullptr, LI);
7117    TrueBranch = cast<BranchInst>(ThenTerm);
7118    FalseBranch = cast<BranchInst>(ElseTerm);
7119    TrueBlock = TrueBranch->getParent();
7120    FalseBlock = FalseBranch->getParent();
7121    EndBlock = cast<BasicBlock>(TrueBranch->getOperand(0));
7122  }
7123
7124  EndBlock->setName("select.end");
7125  if (TrueBlock)
7126    TrueBlock->setName("select.true.sink");
7127  if (FalseBlock)
7128    FalseBlock->setName(FalseInstrs.size() == 0 ? "select.false"
7129                                                : "select.false.sink");
7130
7131  if (IsHugeFunc) {
7132    if (TrueBlock)
7133      FreshBBs.insert(TrueBlock);
7134    if (FalseBlock)
7135      FreshBBs.insert(FalseBlock);
7136    FreshBBs.insert(EndBlock);
7137  }
7138
7139  BFI->setBlockFreq(EndBlock, BFI->getBlockFreq(StartBlock));
7140
7141  static const unsigned MD[] = {
7142      LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
7143      LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
7144  StartBlock->getTerminator()->copyMetadata(*SI, MD);
7145
7146  // Sink expensive instructions into the conditional blocks to avoid executing
7147  // them speculatively.
7148  for (Instruction *I : TrueInstrs)
7149    I->moveBefore(TrueBranch);
7150  for (Instruction *I : FalseInstrs)
7151    I->moveBefore(FalseBranch);
7152
7153  // If we did not create a new block for one of the 'true' or 'false' paths
7154  // of the condition, it means that side of the branch goes to the end block
7155  // directly and the path originates from the start block from the point of
7156  // view of the new PHI.
7157  if (TrueBlock == nullptr)
7158    TrueBlock = StartBlock;
7159  else if (FalseBlock == nullptr)
7160    FalseBlock = StartBlock;
7161
7162  SmallPtrSet<const Instruction *, 2> INS;
7163  INS.insert(ASI.begin(), ASI.end());
7164  // Use reverse iterator because later select may use the value of the
7165  // earlier select, and we need to propagate value through earlier select
7166  // to get the PHI operand.
7167  for (SelectInst *SI : llvm::reverse(ASI)) {
7168    // The select itself is replaced with a PHI Node.
7169    PHINode *PN = PHINode::Create(SI->getType(), 2, "");
7170    PN->insertBefore(EndBlock->begin());
7171    PN->takeName(SI);
7172    PN->addIncoming(getTrueOrFalseValue(SI, true, INS), TrueBlock);
7173    PN->addIncoming(getTrueOrFalseValue(SI, false, INS), FalseBlock);
7174    PN->setDebugLoc(SI->getDebugLoc());
7175
7176    replaceAllUsesWith(SI, PN, FreshBBs, IsHugeFunc);
7177    SI->eraseFromParent();
7178    INS.erase(SI);
7179    ++NumSelectsExpanded;
7180  }
7181
7182  // Instruct OptimizeBlock to skip to the next block.
7183  CurInstIterator = StartBlock->end();
7184  return true;
7185}
7186
7187/// Some targets only accept certain types for splat inputs. For example a VDUP
7188/// in MVE takes a GPR (integer) register, and the instruction that incorporate
7189/// a VDUP (such as a VADD qd, qm, rm) also require a gpr register.
7190bool CodeGenPrepare::optimizeShuffleVectorInst(ShuffleVectorInst *SVI) {
7191  // Accept shuf(insertelem(undef/poison, val, 0), undef/poison, <0,0,..>) only
7192  if (!match(SVI, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
7193                            m_Undef(), m_ZeroMask())))
7194    return false;
7195  Type *NewType = TLI->shouldConvertSplatType(SVI);
7196  if (!NewType)
7197    return false;
7198
7199  auto *SVIVecType = cast<FixedVectorType>(SVI->getType());
7200  assert(!NewType->isVectorTy() && "Expected a scalar type!");
7201  assert(NewType->getScalarSizeInBits() == SVIVecType->getScalarSizeInBits() &&
7202         "Expected a type of the same size!");
7203  auto *NewVecType =
7204      FixedVectorType::get(NewType, SVIVecType->getNumElements());
7205
7206  // Create a bitcast (shuffle (insert (bitcast(..))))
7207  IRBuilder<> Builder(SVI->getContext());
7208  Builder.SetInsertPoint(SVI);
7209  Value *BC1 = Builder.CreateBitCast(
7210      cast<Instruction>(SVI->getOperand(0))->getOperand(1), NewType);
7211  Value *Shuffle = Builder.CreateVectorSplat(NewVecType->getNumElements(), BC1);
7212  Value *BC2 = Builder.CreateBitCast(Shuffle, SVIVecType);
7213
7214  replaceAllUsesWith(SVI, BC2, FreshBBs, IsHugeFunc);
7215  RecursivelyDeleteTriviallyDeadInstructions(
7216      SVI, TLInfo, nullptr,
7217      [&](Value *V) { removeAllAssertingVHReferences(V); });
7218
7219  // Also hoist the bitcast up to its operand if it they are not in the same
7220  // block.
7221  if (auto *BCI = dyn_cast<Instruction>(BC1))
7222    if (auto *Op = dyn_cast<Instruction>(BCI->getOperand(0)))
7223      if (BCI->getParent() != Op->getParent() && !isa<PHINode>(Op) &&
7224          !Op->isTerminator() && !Op->isEHPad())
7225        BCI->moveAfter(Op);
7226
7227  return true;
7228}
7229
7230bool CodeGenPrepare::tryToSinkFreeOperands(Instruction *I) {
7231  // If the operands of I can be folded into a target instruction together with
7232  // I, duplicate and sink them.
7233  SmallVector<Use *, 4> OpsToSink;
7234  if (!TLI->shouldSinkOperands(I, OpsToSink))
7235    return false;
7236
7237  // OpsToSink can contain multiple uses in a use chain (e.g.
7238  // (%u1 with %u1 = shufflevector), (%u2 with %u2 = zext %u1)). The dominating
7239  // uses must come first, so we process the ops in reverse order so as to not
7240  // create invalid IR.
7241  BasicBlock *TargetBB = I->getParent();
7242  bool Changed = false;
7243  SmallVector<Use *, 4> ToReplace;
7244  Instruction *InsertPoint = I;
7245  DenseMap<const Instruction *, unsigned long> InstOrdering;
7246  unsigned long InstNumber = 0;
7247  for (const auto &I : *TargetBB)
7248    InstOrdering[&I] = InstNumber++;
7249
7250  for (Use *U : reverse(OpsToSink)) {
7251    auto *UI = cast<Instruction>(U->get());
7252    if (isa<PHINode>(UI))
7253      continue;
7254    if (UI->getParent() == TargetBB) {
7255      if (InstOrdering[UI] < InstOrdering[InsertPoint])
7256        InsertPoint = UI;
7257      continue;
7258    }
7259    ToReplace.push_back(U);
7260  }
7261
7262  SetVector<Instruction *> MaybeDead;
7263  DenseMap<Instruction *, Instruction *> NewInstructions;
7264  for (Use *U : ToReplace) {
7265    auto *UI = cast<Instruction>(U->get());
7266    Instruction *NI = UI->clone();
7267
7268    if (IsHugeFunc) {
7269      // Now we clone an instruction, its operands' defs may sink to this BB
7270      // now. So we put the operands defs' BBs into FreshBBs to do optimization.
7271      for (unsigned I = 0; I < NI->getNumOperands(); ++I) {
7272        auto *OpDef = dyn_cast<Instruction>(NI->getOperand(I));
7273        if (!OpDef)
7274          continue;
7275        FreshBBs.insert(OpDef->getParent());
7276      }
7277    }
7278
7279    NewInstructions[UI] = NI;
7280    MaybeDead.insert(UI);
7281    LLVM_DEBUG(dbgs() << "Sinking " << *UI << " to user " << *I << "\n");
7282    NI->insertBefore(InsertPoint);
7283    InsertPoint = NI;
7284    InsertedInsts.insert(NI);
7285
7286    // Update the use for the new instruction, making sure that we update the
7287    // sunk instruction uses, if it is part of a chain that has already been
7288    // sunk.
7289    Instruction *OldI = cast<Instruction>(U->getUser());
7290    if (NewInstructions.count(OldI))
7291      NewInstructions[OldI]->setOperand(U->getOperandNo(), NI);
7292    else
7293      U->set(NI);
7294    Changed = true;
7295  }
7296
7297  // Remove instructions that are dead after sinking.
7298  for (auto *I : MaybeDead) {
7299    if (!I->hasNUsesOrMore(1)) {
7300      LLVM_DEBUG(dbgs() << "Removing dead instruction: " << *I << "\n");
7301      I->eraseFromParent();
7302    }
7303  }
7304
7305  return Changed;
7306}
7307
7308bool CodeGenPrepare::optimizeSwitchType(SwitchInst *SI) {
7309  Value *Cond = SI->getCondition();
7310  Type *OldType = Cond->getType();
7311  LLVMContext &Context = Cond->getContext();
7312  EVT OldVT = TLI->getValueType(*DL, OldType);
7313  MVT RegType = TLI->getPreferredSwitchConditionType(Context, OldVT);
7314  unsigned RegWidth = RegType.getSizeInBits();
7315
7316  if (RegWidth <= cast<IntegerType>(OldType)->getBitWidth())
7317    return false;
7318
7319  // If the register width is greater than the type width, expand the condition
7320  // of the switch instruction and each case constant to the width of the
7321  // register. By widening the type of the switch condition, subsequent
7322  // comparisons (for case comparisons) will not need to be extended to the
7323  // preferred register width, so we will potentially eliminate N-1 extends,
7324  // where N is the number of cases in the switch.
7325  auto *NewType = Type::getIntNTy(Context, RegWidth);
7326
7327  // Extend the switch condition and case constants using the target preferred
7328  // extend unless the switch condition is a function argument with an extend
7329  // attribute. In that case, we can avoid an unnecessary mask/extension by
7330  // matching the argument extension instead.
7331  Instruction::CastOps ExtType = Instruction::ZExt;
7332  // Some targets prefer SExt over ZExt.
7333  if (TLI->isSExtCheaperThanZExt(OldVT, RegType))
7334    ExtType = Instruction::SExt;
7335
7336  if (auto *Arg = dyn_cast<Argument>(Cond)) {
7337    if (Arg->hasSExtAttr())
7338      ExtType = Instruction::SExt;
7339    if (Arg->hasZExtAttr())
7340      ExtType = Instruction::ZExt;
7341  }
7342
7343  auto *ExtInst = CastInst::Create(ExtType, Cond, NewType);
7344  ExtInst->insertBefore(SI);
7345  ExtInst->setDebugLoc(SI->getDebugLoc());
7346  SI->setCondition(ExtInst);
7347  for (auto Case : SI->cases()) {
7348    const APInt &NarrowConst = Case.getCaseValue()->getValue();
7349    APInt WideConst = (ExtType == Instruction::ZExt)
7350                          ? NarrowConst.zext(RegWidth)
7351                          : NarrowConst.sext(RegWidth);
7352    Case.setValue(ConstantInt::get(Context, WideConst));
7353  }
7354
7355  return true;
7356}
7357
7358bool CodeGenPrepare::optimizeSwitchPhiConstants(SwitchInst *SI) {
7359  // The SCCP optimization tends to produce code like this:
7360  //   switch(x) { case 42: phi(42, ...) }
7361  // Materializing the constant for the phi-argument needs instructions; So we
7362  // change the code to:
7363  //   switch(x) { case 42: phi(x, ...) }
7364
7365  Value *Condition = SI->getCondition();
7366  // Avoid endless loop in degenerate case.
7367  if (isa<ConstantInt>(*Condition))
7368    return false;
7369
7370  bool Changed = false;
7371  BasicBlock *SwitchBB = SI->getParent();
7372  Type *ConditionType = Condition->getType();
7373
7374  for (const SwitchInst::CaseHandle &Case : SI->cases()) {
7375    ConstantInt *CaseValue = Case.getCaseValue();
7376    BasicBlock *CaseBB = Case.getCaseSuccessor();
7377    // Set to true if we previously checked that `CaseBB` is only reached by
7378    // a single case from this switch.
7379    bool CheckedForSinglePred = false;
7380    for (PHINode &PHI : CaseBB->phis()) {
7381      Type *PHIType = PHI.getType();
7382      // If ZExt is free then we can also catch patterns like this:
7383      //   switch((i32)x) { case 42: phi((i64)42, ...); }
7384      // and replace `(i64)42` with `zext i32 %x to i64`.
7385      bool TryZExt =
7386          PHIType->isIntegerTy() &&
7387          PHIType->getIntegerBitWidth() > ConditionType->getIntegerBitWidth() &&
7388          TLI->isZExtFree(ConditionType, PHIType);
7389      if (PHIType == ConditionType || TryZExt) {
7390        // Set to true to skip this case because of multiple preds.
7391        bool SkipCase = false;
7392        Value *Replacement = nullptr;
7393        for (unsigned I = 0, E = PHI.getNumIncomingValues(); I != E; I++) {
7394          Value *PHIValue = PHI.getIncomingValue(I);
7395          if (PHIValue != CaseValue) {
7396            if (!TryZExt)
7397              continue;
7398            ConstantInt *PHIValueInt = dyn_cast<ConstantInt>(PHIValue);
7399            if (!PHIValueInt ||
7400                PHIValueInt->getValue() !=
7401                    CaseValue->getValue().zext(PHIType->getIntegerBitWidth()))
7402              continue;
7403          }
7404          if (PHI.getIncomingBlock(I) != SwitchBB)
7405            continue;
7406          // We cannot optimize if there are multiple case labels jumping to
7407          // this block.  This check may get expensive when there are many
7408          // case labels so we test for it last.
7409          if (!CheckedForSinglePred) {
7410            CheckedForSinglePred = true;
7411            if (SI->findCaseDest(CaseBB) == nullptr) {
7412              SkipCase = true;
7413              break;
7414            }
7415          }
7416
7417          if (Replacement == nullptr) {
7418            if (PHIValue == CaseValue) {
7419              Replacement = Condition;
7420            } else {
7421              IRBuilder<> Builder(SI);
7422              Replacement = Builder.CreateZExt(Condition, PHIType);
7423            }
7424          }
7425          PHI.setIncomingValue(I, Replacement);
7426          Changed = true;
7427        }
7428        if (SkipCase)
7429          break;
7430      }
7431    }
7432  }
7433  return Changed;
7434}
7435
7436bool CodeGenPrepare::optimizeSwitchInst(SwitchInst *SI) {
7437  bool Changed = optimizeSwitchType(SI);
7438  Changed |= optimizeSwitchPhiConstants(SI);
7439  return Changed;
7440}
7441
7442namespace {
7443
7444/// Helper class to promote a scalar operation to a vector one.
7445/// This class is used to move downward extractelement transition.
7446/// E.g.,
7447/// a = vector_op <2 x i32>
7448/// b = extractelement <2 x i32> a, i32 0
7449/// c = scalar_op b
7450/// store c
7451///
7452/// =>
7453/// a = vector_op <2 x i32>
7454/// c = vector_op a (equivalent to scalar_op on the related lane)
7455/// * d = extractelement <2 x i32> c, i32 0
7456/// * store d
7457/// Assuming both extractelement and store can be combine, we get rid of the
7458/// transition.
7459class VectorPromoteHelper {
7460  /// DataLayout associated with the current module.
7461  const DataLayout &DL;
7462
7463  /// Used to perform some checks on the legality of vector operations.
7464  const TargetLowering &TLI;
7465
7466  /// Used to estimated the cost of the promoted chain.
7467  const TargetTransformInfo &TTI;
7468
7469  /// The transition being moved downwards.
7470  Instruction *Transition;
7471
7472  /// The sequence of instructions to be promoted.
7473  SmallVector<Instruction *, 4> InstsToBePromoted;
7474
7475  /// Cost of combining a store and an extract.
7476  unsigned StoreExtractCombineCost;
7477
7478  /// Instruction that will be combined with the transition.
7479  Instruction *CombineInst = nullptr;
7480
7481  /// The instruction that represents the current end of the transition.
7482  /// Since we are faking the promotion until we reach the end of the chain
7483  /// of computation, we need a way to get the current end of the transition.
7484  Instruction *getEndOfTransition() const {
7485    if (InstsToBePromoted.empty())
7486      return Transition;
7487    return InstsToBePromoted.back();
7488  }
7489
7490  /// Return the index of the original value in the transition.
7491  /// E.g., for "extractelement <2 x i32> c, i32 1" the original value,
7492  /// c, is at index 0.
7493  unsigned getTransitionOriginalValueIdx() const {
7494    assert(isa<ExtractElementInst>(Transition) &&
7495           "Other kind of transitions are not supported yet");
7496    return 0;
7497  }
7498
7499  /// Return the index of the index in the transition.
7500  /// E.g., for "extractelement <2 x i32> c, i32 0" the index
7501  /// is at index 1.
7502  unsigned getTransitionIdx() const {
7503    assert(isa<ExtractElementInst>(Transition) &&
7504           "Other kind of transitions are not supported yet");
7505    return 1;
7506  }
7507
7508  /// Get the type of the transition.
7509  /// This is the type of the original value.
7510  /// E.g., for "extractelement <2 x i32> c, i32 1" the type of the
7511  /// transition is <2 x i32>.
7512  Type *getTransitionType() const {
7513    return Transition->getOperand(getTransitionOriginalValueIdx())->getType();
7514  }
7515
7516  /// Promote \p ToBePromoted by moving \p Def downward through.
7517  /// I.e., we have the following sequence:
7518  /// Def = Transition <ty1> a to <ty2>
7519  /// b = ToBePromoted <ty2> Def, ...
7520  /// =>
7521  /// b = ToBePromoted <ty1> a, ...
7522  /// Def = Transition <ty1> ToBePromoted to <ty2>
7523  void promoteImpl(Instruction *ToBePromoted);
7524
7525  /// Check whether or not it is profitable to promote all the
7526  /// instructions enqueued to be promoted.
7527  bool isProfitableToPromote() {
7528    Value *ValIdx = Transition->getOperand(getTransitionOriginalValueIdx());
7529    unsigned Index = isa<ConstantInt>(ValIdx)
7530                         ? cast<ConstantInt>(ValIdx)->getZExtValue()
7531                         : -1;
7532    Type *PromotedType = getTransitionType();
7533
7534    StoreInst *ST = cast<StoreInst>(CombineInst);
7535    unsigned AS = ST->getPointerAddressSpace();
7536    // Check if this store is supported.
7537    if (!TLI.allowsMisalignedMemoryAccesses(
7538            TLI.getValueType(DL, ST->getValueOperand()->getType()), AS,
7539            ST->getAlign())) {
7540      // If this is not supported, there is no way we can combine
7541      // the extract with the store.
7542      return false;
7543    }
7544
7545    // The scalar chain of computation has to pay for the transition
7546    // scalar to vector.
7547    // The vector chain has to account for the combining cost.
7548    enum TargetTransformInfo::TargetCostKind CostKind =
7549        TargetTransformInfo::TCK_RecipThroughput;
7550    InstructionCost ScalarCost =
7551        TTI.getVectorInstrCost(*Transition, PromotedType, CostKind, Index);
7552    InstructionCost VectorCost = StoreExtractCombineCost;
7553    for (const auto &Inst : InstsToBePromoted) {
7554      // Compute the cost.
7555      // By construction, all instructions being promoted are arithmetic ones.
7556      // Moreover, one argument is a constant that can be viewed as a splat
7557      // constant.
7558      Value *Arg0 = Inst->getOperand(0);
7559      bool IsArg0Constant = isa<UndefValue>(Arg0) || isa<ConstantInt>(Arg0) ||
7560                            isa<ConstantFP>(Arg0);
7561      TargetTransformInfo::OperandValueInfo Arg0Info, Arg1Info;
7562      if (IsArg0Constant)
7563        Arg0Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7564      else
7565        Arg1Info.Kind = TargetTransformInfo::OK_UniformConstantValue;
7566
7567      ScalarCost += TTI.getArithmeticInstrCost(
7568          Inst->getOpcode(), Inst->getType(), CostKind, Arg0Info, Arg1Info);
7569      VectorCost += TTI.getArithmeticInstrCost(Inst->getOpcode(), PromotedType,
7570                                               CostKind, Arg0Info, Arg1Info);
7571    }
7572    LLVM_DEBUG(
7573        dbgs() << "Estimated cost of computation to be promoted:\nScalar: "
7574               << ScalarCost << "\nVector: " << VectorCost << '\n');
7575    return ScalarCost > VectorCost;
7576  }
7577
7578  /// Generate a constant vector with \p Val with the same
7579  /// number of elements as the transition.
7580  /// \p UseSplat defines whether or not \p Val should be replicated
7581  /// across the whole vector.
7582  /// In other words, if UseSplat == true, we generate <Val, Val, ..., Val>,
7583  /// otherwise we generate a vector with as many undef as possible:
7584  /// <undef, ..., undef, Val, undef, ..., undef> where \p Val is only
7585  /// used at the index of the extract.
7586  Value *getConstantVector(Constant *Val, bool UseSplat) const {
7587    unsigned ExtractIdx = std::numeric_limits<unsigned>::max();
7588    if (!UseSplat) {
7589      // If we cannot determine where the constant must be, we have to
7590      // use a splat constant.
7591      Value *ValExtractIdx = Transition->getOperand(getTransitionIdx());
7592      if (ConstantInt *CstVal = dyn_cast<ConstantInt>(ValExtractIdx))
7593        ExtractIdx = CstVal->getSExtValue();
7594      else
7595        UseSplat = true;
7596    }
7597
7598    ElementCount EC = cast<VectorType>(getTransitionType())->getElementCount();
7599    if (UseSplat)
7600      return ConstantVector::getSplat(EC, Val);
7601
7602    if (!EC.isScalable()) {
7603      SmallVector<Constant *, 4> ConstVec;
7604      UndefValue *UndefVal = UndefValue::get(Val->getType());
7605      for (unsigned Idx = 0; Idx != EC.getKnownMinValue(); ++Idx) {
7606        if (Idx == ExtractIdx)
7607          ConstVec.push_back(Val);
7608        else
7609          ConstVec.push_back(UndefVal);
7610      }
7611      return ConstantVector::get(ConstVec);
7612    } else
7613      llvm_unreachable(
7614          "Generate scalable vector for non-splat is unimplemented");
7615  }
7616
7617  /// Check if promoting to a vector type an operand at \p OperandIdx
7618  /// in \p Use can trigger undefined behavior.
7619  static bool canCauseUndefinedBehavior(const Instruction *Use,
7620                                        unsigned OperandIdx) {
7621    // This is not safe to introduce undef when the operand is on
7622    // the right hand side of a division-like instruction.
7623    if (OperandIdx != 1)
7624      return false;
7625    switch (Use->getOpcode()) {
7626    default:
7627      return false;
7628    case Instruction::SDiv:
7629    case Instruction::UDiv:
7630    case Instruction::SRem:
7631    case Instruction::URem:
7632      return true;
7633    case Instruction::FDiv:
7634    case Instruction::FRem:
7635      return !Use->hasNoNaNs();
7636    }
7637    llvm_unreachable(nullptr);
7638  }
7639
7640public:
7641  VectorPromoteHelper(const DataLayout &DL, const TargetLowering &TLI,
7642                      const TargetTransformInfo &TTI, Instruction *Transition,
7643                      unsigned CombineCost)
7644      : DL(DL), TLI(TLI), TTI(TTI), Transition(Transition),
7645        StoreExtractCombineCost(CombineCost) {
7646    assert(Transition && "Do not know how to promote null");
7647  }
7648
7649  /// Check if we can promote \p ToBePromoted to \p Type.
7650  bool canPromote(const Instruction *ToBePromoted) const {
7651    // We could support CastInst too.
7652    return isa<BinaryOperator>(ToBePromoted);
7653  }
7654
7655  /// Check if it is profitable to promote \p ToBePromoted
7656  /// by moving downward the transition through.
7657  bool shouldPromote(const Instruction *ToBePromoted) const {
7658    // Promote only if all the operands can be statically expanded.
7659    // Indeed, we do not want to introduce any new kind of transitions.
7660    for (const Use &U : ToBePromoted->operands()) {
7661      const Value *Val = U.get();
7662      if (Val == getEndOfTransition()) {
7663        // If the use is a division and the transition is on the rhs,
7664        // we cannot promote the operation, otherwise we may create a
7665        // division by zero.
7666        if (canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()))
7667          return false;
7668        continue;
7669      }
7670      if (!isa<ConstantInt>(Val) && !isa<UndefValue>(Val) &&
7671          !isa<ConstantFP>(Val))
7672        return false;
7673    }
7674    // Check that the resulting operation is legal.
7675    int ISDOpcode = TLI.InstructionOpcodeToISD(ToBePromoted->getOpcode());
7676    if (!ISDOpcode)
7677      return false;
7678    return StressStoreExtract ||
7679           TLI.isOperationLegalOrCustom(
7680               ISDOpcode, TLI.getValueType(DL, getTransitionType(), true));
7681  }
7682
7683  /// Check whether or not \p Use can be combined
7684  /// with the transition.
7685  /// I.e., is it possible to do Use(Transition) => AnotherUse?
7686  bool canCombine(const Instruction *Use) { return isa<StoreInst>(Use); }
7687
7688  /// Record \p ToBePromoted as part of the chain to be promoted.
7689  void enqueueForPromotion(Instruction *ToBePromoted) {
7690    InstsToBePromoted.push_back(ToBePromoted);
7691  }
7692
7693  /// Set the instruction that will be combined with the transition.
7694  void recordCombineInstruction(Instruction *ToBeCombined) {
7695    assert(canCombine(ToBeCombined) && "Unsupported instruction to combine");
7696    CombineInst = ToBeCombined;
7697  }
7698
7699  /// Promote all the instructions enqueued for promotion if it is
7700  /// is profitable.
7701  /// \return True if the promotion happened, false otherwise.
7702  bool promote() {
7703    // Check if there is something to promote.
7704    // Right now, if we do not have anything to combine with,
7705    // we assume the promotion is not profitable.
7706    if (InstsToBePromoted.empty() || !CombineInst)
7707      return false;
7708
7709    // Check cost.
7710    if (!StressStoreExtract && !isProfitableToPromote())
7711      return false;
7712
7713    // Promote.
7714    for (auto &ToBePromoted : InstsToBePromoted)
7715      promoteImpl(ToBePromoted);
7716    InstsToBePromoted.clear();
7717    return true;
7718  }
7719};
7720
7721} // end anonymous namespace
7722
7723void VectorPromoteHelper::promoteImpl(Instruction *ToBePromoted) {
7724  // At this point, we know that all the operands of ToBePromoted but Def
7725  // can be statically promoted.
7726  // For Def, we need to use its parameter in ToBePromoted:
7727  // b = ToBePromoted ty1 a
7728  // Def = Transition ty1 b to ty2
7729  // Move the transition down.
7730  // 1. Replace all uses of the promoted operation by the transition.
7731  // = ... b => = ... Def.
7732  assert(ToBePromoted->getType() == Transition->getType() &&
7733         "The type of the result of the transition does not match "
7734         "the final type");
7735  ToBePromoted->replaceAllUsesWith(Transition);
7736  // 2. Update the type of the uses.
7737  // b = ToBePromoted ty2 Def => b = ToBePromoted ty1 Def.
7738  Type *TransitionTy = getTransitionType();
7739  ToBePromoted->mutateType(TransitionTy);
7740  // 3. Update all the operands of the promoted operation with promoted
7741  // operands.
7742  // b = ToBePromoted ty1 Def => b = ToBePromoted ty1 a.
7743  for (Use &U : ToBePromoted->operands()) {
7744    Value *Val = U.get();
7745    Value *NewVal = nullptr;
7746    if (Val == Transition)
7747      NewVal = Transition->getOperand(getTransitionOriginalValueIdx());
7748    else if (isa<UndefValue>(Val) || isa<ConstantInt>(Val) ||
7749             isa<ConstantFP>(Val)) {
7750      // Use a splat constant if it is not safe to use undef.
7751      NewVal = getConstantVector(
7752          cast<Constant>(Val),
7753          isa<UndefValue>(Val) ||
7754              canCauseUndefinedBehavior(ToBePromoted, U.getOperandNo()));
7755    } else
7756      llvm_unreachable("Did you modified shouldPromote and forgot to update "
7757                       "this?");
7758    ToBePromoted->setOperand(U.getOperandNo(), NewVal);
7759  }
7760  Transition->moveAfter(ToBePromoted);
7761  Transition->setOperand(getTransitionOriginalValueIdx(), ToBePromoted);
7762}
7763
7764/// Some targets can do store(extractelement) with one instruction.
7765/// Try to push the extractelement towards the stores when the target
7766/// has this feature and this is profitable.
7767bool CodeGenPrepare::optimizeExtractElementInst(Instruction *Inst) {
7768  unsigned CombineCost = std::numeric_limits<unsigned>::max();
7769  if (DisableStoreExtract ||
7770      (!StressStoreExtract &&
7771       !TLI->canCombineStoreAndExtract(Inst->getOperand(0)->getType(),
7772                                       Inst->getOperand(1), CombineCost)))
7773    return false;
7774
7775  // At this point we know that Inst is a vector to scalar transition.
7776  // Try to move it down the def-use chain, until:
7777  // - We can combine the transition with its single use
7778  //   => we got rid of the transition.
7779  // - We escape the current basic block
7780  //   => we would need to check that we are moving it at a cheaper place and
7781  //      we do not do that for now.
7782  BasicBlock *Parent = Inst->getParent();
7783  LLVM_DEBUG(dbgs() << "Found an interesting transition: " << *Inst << '\n');
7784  VectorPromoteHelper VPH(*DL, *TLI, *TTI, Inst, CombineCost);
7785  // If the transition has more than one use, assume this is not going to be
7786  // beneficial.
7787  while (Inst->hasOneUse()) {
7788    Instruction *ToBePromoted = cast<Instruction>(*Inst->user_begin());
7789    LLVM_DEBUG(dbgs() << "Use: " << *ToBePromoted << '\n');
7790
7791    if (ToBePromoted->getParent() != Parent) {
7792      LLVM_DEBUG(dbgs() << "Instruction to promote is in a different block ("
7793                        << ToBePromoted->getParent()->getName()
7794                        << ") than the transition (" << Parent->getName()
7795                        << ").\n");
7796      return false;
7797    }
7798
7799    if (VPH.canCombine(ToBePromoted)) {
7800      LLVM_DEBUG(dbgs() << "Assume " << *Inst << '\n'
7801                        << "will be combined with: " << *ToBePromoted << '\n');
7802      VPH.recordCombineInstruction(ToBePromoted);
7803      bool Changed = VPH.promote();
7804      NumStoreExtractExposed += Changed;
7805      return Changed;
7806    }
7807
7808    LLVM_DEBUG(dbgs() << "Try promoting.\n");
7809    if (!VPH.canPromote(ToBePromoted) || !VPH.shouldPromote(ToBePromoted))
7810      return false;
7811
7812    LLVM_DEBUG(dbgs() << "Promoting is possible... Enqueue for promotion!\n");
7813
7814    VPH.enqueueForPromotion(ToBePromoted);
7815    Inst = ToBePromoted;
7816  }
7817  return false;
7818}
7819
7820/// For the instruction sequence of store below, F and I values
7821/// are bundled together as an i64 value before being stored into memory.
7822/// Sometimes it is more efficient to generate separate stores for F and I,
7823/// which can remove the bitwise instructions or sink them to colder places.
7824///
7825///   (store (or (zext (bitcast F to i32) to i64),
7826///              (shl (zext I to i64), 32)), addr)  -->
7827///   (store F, addr) and (store I, addr+4)
7828///
7829/// Similarly, splitting for other merged store can also be beneficial, like:
7830/// For pair of {i32, i32}, i64 store --> two i32 stores.
7831/// For pair of {i32, i16}, i64 store --> two i32 stores.
7832/// For pair of {i16, i16}, i32 store --> two i16 stores.
7833/// For pair of {i16, i8},  i32 store --> two i16 stores.
7834/// For pair of {i8, i8},   i16 store --> two i8 stores.
7835///
7836/// We allow each target to determine specifically which kind of splitting is
7837/// supported.
7838///
7839/// The store patterns are commonly seen from the simple code snippet below
7840/// if only std::make_pair(...) is sroa transformed before inlined into hoo.
7841///   void goo(const std::pair<int, float> &);
7842///   hoo() {
7843///     ...
7844///     goo(std::make_pair(tmp, ftmp));
7845///     ...
7846///   }
7847///
7848/// Although we already have similar splitting in DAG Combine, we duplicate
7849/// it in CodeGenPrepare to catch the case in which pattern is across
7850/// multiple BBs. The logic in DAG Combine is kept to catch case generated
7851/// during code expansion.
7852static bool splitMergedValStore(StoreInst &SI, const DataLayout &DL,
7853                                const TargetLowering &TLI) {
7854  // Handle simple but common cases only.
7855  Type *StoreType = SI.getValueOperand()->getType();
7856
7857  // The code below assumes shifting a value by <number of bits>,
7858  // whereas scalable vectors would have to be shifted by
7859  // <2log(vscale) + number of bits> in order to store the
7860  // low/high parts. Bailing out for now.
7861  if (StoreType->isScalableTy())
7862    return false;
7863
7864  if (!DL.typeSizeEqualsStoreSize(StoreType) ||
7865      DL.getTypeSizeInBits(StoreType) == 0)
7866    return false;
7867
7868  unsigned HalfValBitSize = DL.getTypeSizeInBits(StoreType) / 2;
7869  Type *SplitStoreType = Type::getIntNTy(SI.getContext(), HalfValBitSize);
7870  if (!DL.typeSizeEqualsStoreSize(SplitStoreType))
7871    return false;
7872
7873  // Don't split the store if it is volatile.
7874  if (SI.isVolatile())
7875    return false;
7876
7877  // Match the following patterns:
7878  // (store (or (zext LValue to i64),
7879  //            (shl (zext HValue to i64), 32)), HalfValBitSize)
7880  //  or
7881  // (store (or (shl (zext HValue to i64), 32)), HalfValBitSize)
7882  //            (zext LValue to i64),
7883  // Expect both operands of OR and the first operand of SHL have only
7884  // one use.
7885  Value *LValue, *HValue;
7886  if (!match(SI.getValueOperand(),
7887             m_c_Or(m_OneUse(m_ZExt(m_Value(LValue))),
7888                    m_OneUse(m_Shl(m_OneUse(m_ZExt(m_Value(HValue))),
7889                                   m_SpecificInt(HalfValBitSize))))))
7890    return false;
7891
7892  // Check LValue and HValue are int with size less or equal than 32.
7893  if (!LValue->getType()->isIntegerTy() ||
7894      DL.getTypeSizeInBits(LValue->getType()) > HalfValBitSize ||
7895      !HValue->getType()->isIntegerTy() ||
7896      DL.getTypeSizeInBits(HValue->getType()) > HalfValBitSize)
7897    return false;
7898
7899  // If LValue/HValue is a bitcast instruction, use the EVT before bitcast
7900  // as the input of target query.
7901  auto *LBC = dyn_cast<BitCastInst>(LValue);
7902  auto *HBC = dyn_cast<BitCastInst>(HValue);
7903  EVT LowTy = LBC ? EVT::getEVT(LBC->getOperand(0)->getType())
7904                  : EVT::getEVT(LValue->getType());
7905  EVT HighTy = HBC ? EVT::getEVT(HBC->getOperand(0)->getType())
7906                   : EVT::getEVT(HValue->getType());
7907  if (!ForceSplitStore && !TLI.isMultiStoresCheaperThanBitsMerge(LowTy, HighTy))
7908    return false;
7909
7910  // Start to split store.
7911  IRBuilder<> Builder(SI.getContext());
7912  Builder.SetInsertPoint(&SI);
7913
7914  // If LValue/HValue is a bitcast in another BB, create a new one in current
7915  // BB so it may be merged with the splitted stores by dag combiner.
7916  if (LBC && LBC->getParent() != SI.getParent())
7917    LValue = Builder.CreateBitCast(LBC->getOperand(0), LBC->getType());
7918  if (HBC && HBC->getParent() != SI.getParent())
7919    HValue = Builder.CreateBitCast(HBC->getOperand(0), HBC->getType());
7920
7921  bool IsLE = SI.getModule()->getDataLayout().isLittleEndian();
7922  auto CreateSplitStore = [&](Value *V, bool Upper) {
7923    V = Builder.CreateZExtOrBitCast(V, SplitStoreType);
7924    Value *Addr = SI.getPointerOperand();
7925    Align Alignment = SI.getAlign();
7926    const bool IsOffsetStore = (IsLE && Upper) || (!IsLE && !Upper);
7927    if (IsOffsetStore) {
7928      Addr = Builder.CreateGEP(
7929          SplitStoreType, Addr,
7930          ConstantInt::get(Type::getInt32Ty(SI.getContext()), 1));
7931
7932      // When splitting the store in half, naturally one half will retain the
7933      // alignment of the original wider store, regardless of whether it was
7934      // over-aligned or not, while the other will require adjustment.
7935      Alignment = commonAlignment(Alignment, HalfValBitSize / 8);
7936    }
7937    Builder.CreateAlignedStore(V, Addr, Alignment);
7938  };
7939
7940  CreateSplitStore(LValue, false);
7941  CreateSplitStore(HValue, true);
7942
7943  // Delete the old store.
7944  SI.eraseFromParent();
7945  return true;
7946}
7947
7948// Return true if the GEP has two operands, the first operand is of a sequential
7949// type, and the second operand is a constant.
7950static bool GEPSequentialConstIndexed(GetElementPtrInst *GEP) {
7951  gep_type_iterator I = gep_type_begin(*GEP);
7952  return GEP->getNumOperands() == 2 && I.isSequential() &&
7953         isa<ConstantInt>(GEP->getOperand(1));
7954}
7955
7956// Try unmerging GEPs to reduce liveness interference (register pressure) across
7957// IndirectBr edges. Since IndirectBr edges tend to touch on many blocks,
7958// reducing liveness interference across those edges benefits global register
7959// allocation. Currently handles only certain cases.
7960//
7961// For example, unmerge %GEPI and %UGEPI as below.
7962//
7963// ---------- BEFORE ----------
7964// SrcBlock:
7965//   ...
7966//   %GEPIOp = ...
7967//   ...
7968//   %GEPI = gep %GEPIOp, Idx
7969//   ...
7970//   indirectbr ... [ label %DstB0, label %DstB1, ... label %DstBi ... ]
7971//   (* %GEPI is alive on the indirectbr edges due to other uses ahead)
7972//   (* %GEPIOp is alive on the indirectbr edges only because of it's used by
7973//   %UGEPI)
7974//
7975// DstB0: ... (there may be a gep similar to %UGEPI to be unmerged)
7976// DstB1: ... (there may be a gep similar to %UGEPI to be unmerged)
7977// ...
7978//
7979// DstBi:
7980//   ...
7981//   %UGEPI = gep %GEPIOp, UIdx
7982// ...
7983// ---------------------------
7984//
7985// ---------- AFTER ----------
7986// SrcBlock:
7987//   ... (same as above)
7988//    (* %GEPI is still alive on the indirectbr edges)
7989//    (* %GEPIOp is no longer alive on the indirectbr edges as a result of the
7990//    unmerging)
7991// ...
7992//
7993// DstBi:
7994//   ...
7995//   %UGEPI = gep %GEPI, (UIdx-Idx)
7996//   ...
7997// ---------------------------
7998//
7999// The register pressure on the IndirectBr edges is reduced because %GEPIOp is
8000// no longer alive on them.
8001//
8002// We try to unmerge GEPs here in CodGenPrepare, as opposed to limiting merging
8003// of GEPs in the first place in InstCombiner::visitGetElementPtrInst() so as
8004// not to disable further simplications and optimizations as a result of GEP
8005// merging.
8006//
8007// Note this unmerging may increase the length of the data flow critical path
8008// (the path from %GEPIOp to %UGEPI would go through %GEPI), which is a tradeoff
8009// between the register pressure and the length of data-flow critical
8010// path. Restricting this to the uncommon IndirectBr case would minimize the
8011// impact of potentially longer critical path, if any, and the impact on compile
8012// time.
8013static bool tryUnmergingGEPsAcrossIndirectBr(GetElementPtrInst *GEPI,
8014                                             const TargetTransformInfo *TTI) {
8015  BasicBlock *SrcBlock = GEPI->getParent();
8016  // Check that SrcBlock ends with an IndirectBr. If not, give up. The common
8017  // (non-IndirectBr) cases exit early here.
8018  if (!isa<IndirectBrInst>(SrcBlock->getTerminator()))
8019    return false;
8020  // Check that GEPI is a simple gep with a single constant index.
8021  if (!GEPSequentialConstIndexed(GEPI))
8022    return false;
8023  ConstantInt *GEPIIdx = cast<ConstantInt>(GEPI->getOperand(1));
8024  // Check that GEPI is a cheap one.
8025  if (TTI->getIntImmCost(GEPIIdx->getValue(), GEPIIdx->getType(),
8026                         TargetTransformInfo::TCK_SizeAndLatency) >
8027      TargetTransformInfo::TCC_Basic)
8028    return false;
8029  Value *GEPIOp = GEPI->getOperand(0);
8030  // Check that GEPIOp is an instruction that's also defined in SrcBlock.
8031  if (!isa<Instruction>(GEPIOp))
8032    return false;
8033  auto *GEPIOpI = cast<Instruction>(GEPIOp);
8034  if (GEPIOpI->getParent() != SrcBlock)
8035    return false;
8036  // Check that GEP is used outside the block, meaning it's alive on the
8037  // IndirectBr edge(s).
8038  if (llvm::none_of(GEPI->users(), [&](User *Usr) {
8039        if (auto *I = dyn_cast<Instruction>(Usr)) {
8040          if (I->getParent() != SrcBlock) {
8041            return true;
8042          }
8043        }
8044        return false;
8045      }))
8046    return false;
8047  // The second elements of the GEP chains to be unmerged.
8048  std::vector<GetElementPtrInst *> UGEPIs;
8049  // Check each user of GEPIOp to check if unmerging would make GEPIOp not alive
8050  // on IndirectBr edges.
8051  for (User *Usr : GEPIOp->users()) {
8052    if (Usr == GEPI)
8053      continue;
8054    // Check if Usr is an Instruction. If not, give up.
8055    if (!isa<Instruction>(Usr))
8056      return false;
8057    auto *UI = cast<Instruction>(Usr);
8058    // Check if Usr in the same block as GEPIOp, which is fine, skip.
8059    if (UI->getParent() == SrcBlock)
8060      continue;
8061    // Check if Usr is a GEP. If not, give up.
8062    if (!isa<GetElementPtrInst>(Usr))
8063      return false;
8064    auto *UGEPI = cast<GetElementPtrInst>(Usr);
8065    // Check if UGEPI is a simple gep with a single constant index and GEPIOp is
8066    // the pointer operand to it. If so, record it in the vector. If not, give
8067    // up.
8068    if (!GEPSequentialConstIndexed(UGEPI))
8069      return false;
8070    if (UGEPI->getOperand(0) != GEPIOp)
8071      return false;
8072    if (UGEPI->getSourceElementType() != GEPI->getSourceElementType())
8073      return false;
8074    if (GEPIIdx->getType() !=
8075        cast<ConstantInt>(UGEPI->getOperand(1))->getType())
8076      return false;
8077    ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8078    if (TTI->getIntImmCost(UGEPIIdx->getValue(), UGEPIIdx->getType(),
8079                           TargetTransformInfo::TCK_SizeAndLatency) >
8080        TargetTransformInfo::TCC_Basic)
8081      return false;
8082    UGEPIs.push_back(UGEPI);
8083  }
8084  if (UGEPIs.size() == 0)
8085    return false;
8086  // Check the materializing cost of (Uidx-Idx).
8087  for (GetElementPtrInst *UGEPI : UGEPIs) {
8088    ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8089    APInt NewIdx = UGEPIIdx->getValue() - GEPIIdx->getValue();
8090    InstructionCost ImmCost = TTI->getIntImmCost(
8091        NewIdx, GEPIIdx->getType(), TargetTransformInfo::TCK_SizeAndLatency);
8092    if (ImmCost > TargetTransformInfo::TCC_Basic)
8093      return false;
8094  }
8095  // Now unmerge between GEPI and UGEPIs.
8096  for (GetElementPtrInst *UGEPI : UGEPIs) {
8097    UGEPI->setOperand(0, GEPI);
8098    ConstantInt *UGEPIIdx = cast<ConstantInt>(UGEPI->getOperand(1));
8099    Constant *NewUGEPIIdx = ConstantInt::get(
8100        GEPIIdx->getType(), UGEPIIdx->getValue() - GEPIIdx->getValue());
8101    UGEPI->setOperand(1, NewUGEPIIdx);
8102    // If GEPI is not inbounds but UGEPI is inbounds, change UGEPI to not
8103    // inbounds to avoid UB.
8104    if (!GEPI->isInBounds()) {
8105      UGEPI->setIsInBounds(false);
8106    }
8107  }
8108  // After unmerging, verify that GEPIOp is actually only used in SrcBlock (not
8109  // alive on IndirectBr edges).
8110  assert(llvm::none_of(GEPIOp->users(),
8111                       [&](User *Usr) {
8112                         return cast<Instruction>(Usr)->getParent() != SrcBlock;
8113                       }) &&
8114         "GEPIOp is used outside SrcBlock");
8115  return true;
8116}
8117
8118static bool optimizeBranch(BranchInst *Branch, const TargetLowering &TLI,
8119                           SmallSet<BasicBlock *, 32> &FreshBBs,
8120                           bool IsHugeFunc) {
8121  // Try and convert
8122  //  %c = icmp ult %x, 8
8123  //  br %c, bla, blb
8124  //  %tc = lshr %x, 3
8125  // to
8126  //  %tc = lshr %x, 3
8127  //  %c = icmp eq %tc, 0
8128  //  br %c, bla, blb
8129  // Creating the cmp to zero can be better for the backend, especially if the
8130  // lshr produces flags that can be used automatically.
8131  if (!TLI.preferZeroCompareBranch() || !Branch->isConditional())
8132    return false;
8133
8134  ICmpInst *Cmp = dyn_cast<ICmpInst>(Branch->getCondition());
8135  if (!Cmp || !isa<ConstantInt>(Cmp->getOperand(1)) || !Cmp->hasOneUse())
8136    return false;
8137
8138  Value *X = Cmp->getOperand(0);
8139  APInt CmpC = cast<ConstantInt>(Cmp->getOperand(1))->getValue();
8140
8141  for (auto *U : X->users()) {
8142    Instruction *UI = dyn_cast<Instruction>(U);
8143    // A quick dominance check
8144    if (!UI ||
8145        (UI->getParent() != Branch->getParent() &&
8146         UI->getParent() != Branch->getSuccessor(0) &&
8147         UI->getParent() != Branch->getSuccessor(1)) ||
8148        (UI->getParent() != Branch->getParent() &&
8149         !UI->getParent()->getSinglePredecessor()))
8150      continue;
8151
8152    if (CmpC.isPowerOf2() && Cmp->getPredicate() == ICmpInst::ICMP_ULT &&
8153        match(UI, m_Shr(m_Specific(X), m_SpecificInt(CmpC.logBase2())))) {
8154      IRBuilder<> Builder(Branch);
8155      if (UI->getParent() != Branch->getParent())
8156        UI->moveBefore(Branch);
8157      UI->dropPoisonGeneratingFlags();
8158      Value *NewCmp = Builder.CreateCmp(ICmpInst::ICMP_EQ, UI,
8159                                        ConstantInt::get(UI->getType(), 0));
8160      LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8161      LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8162      replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8163      return true;
8164    }
8165    if (Cmp->isEquality() &&
8166        (match(UI, m_Add(m_Specific(X), m_SpecificInt(-CmpC))) ||
8167         match(UI, m_Sub(m_Specific(X), m_SpecificInt(CmpC))))) {
8168      IRBuilder<> Builder(Branch);
8169      if (UI->getParent() != Branch->getParent())
8170        UI->moveBefore(Branch);
8171      UI->dropPoisonGeneratingFlags();
8172      Value *NewCmp = Builder.CreateCmp(Cmp->getPredicate(), UI,
8173                                        ConstantInt::get(UI->getType(), 0));
8174      LLVM_DEBUG(dbgs() << "Converting " << *Cmp << "\n");
8175      LLVM_DEBUG(dbgs() << " to compare on zero: " << *NewCmp << "\n");
8176      replaceAllUsesWith(Cmp, NewCmp, FreshBBs, IsHugeFunc);
8177      return true;
8178    }
8179  }
8180  return false;
8181}
8182
8183bool CodeGenPrepare::optimizeInst(Instruction *I, ModifyDT &ModifiedDT) {
8184  bool AnyChange = false;
8185  AnyChange = fixupDPValuesOnInst(*I);
8186
8187  // Bail out if we inserted the instruction to prevent optimizations from
8188  // stepping on each other's toes.
8189  if (InsertedInsts.count(I))
8190    return AnyChange;
8191
8192  // TODO: Move into the switch on opcode below here.
8193  if (PHINode *P = dyn_cast<PHINode>(I)) {
8194    // It is possible for very late stage optimizations (such as SimplifyCFG)
8195    // to introduce PHI nodes too late to be cleaned up.  If we detect such a
8196    // trivial PHI, go ahead and zap it here.
8197    if (Value *V = simplifyInstruction(P, {*DL, TLInfo})) {
8198      LargeOffsetGEPMap.erase(P);
8199      replaceAllUsesWith(P, V, FreshBBs, IsHugeFunc);
8200      P->eraseFromParent();
8201      ++NumPHIsElim;
8202      return true;
8203    }
8204    return AnyChange;
8205  }
8206
8207  if (CastInst *CI = dyn_cast<CastInst>(I)) {
8208    // If the source of the cast is a constant, then this should have
8209    // already been constant folded.  The only reason NOT to constant fold
8210    // it is if something (e.g. LSR) was careful to place the constant
8211    // evaluation in a block other than then one that uses it (e.g. to hoist
8212    // the address of globals out of a loop).  If this is the case, we don't
8213    // want to forward-subst the cast.
8214    if (isa<Constant>(CI->getOperand(0)))
8215      return AnyChange;
8216
8217    if (OptimizeNoopCopyExpression(CI, *TLI, *DL))
8218      return true;
8219
8220    if ((isa<UIToFPInst>(I) || isa<FPToUIInst>(I) || isa<TruncInst>(I)) &&
8221        TLI->optimizeExtendOrTruncateConversion(
8222            I, LI->getLoopFor(I->getParent()), *TTI))
8223      return true;
8224
8225    if (isa<ZExtInst>(I) || isa<SExtInst>(I)) {
8226      /// Sink a zext or sext into its user blocks if the target type doesn't
8227      /// fit in one register
8228      if (TLI->getTypeAction(CI->getContext(),
8229                             TLI->getValueType(*DL, CI->getType())) ==
8230          TargetLowering::TypeExpandInteger) {
8231        return SinkCast(CI);
8232      } else {
8233        if (TLI->optimizeExtendOrTruncateConversion(
8234                I, LI->getLoopFor(I->getParent()), *TTI))
8235          return true;
8236
8237        bool MadeChange = optimizeExt(I);
8238        return MadeChange | optimizeExtUses(I);
8239      }
8240    }
8241    return AnyChange;
8242  }
8243
8244  if (auto *Cmp = dyn_cast<CmpInst>(I))
8245    if (optimizeCmp(Cmp, ModifiedDT))
8246      return true;
8247
8248  if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
8249    LI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8250    bool Modified = optimizeLoadExt(LI);
8251    unsigned AS = LI->getPointerAddressSpace();
8252    Modified |= optimizeMemoryInst(I, I->getOperand(0), LI->getType(), AS);
8253    return Modified;
8254  }
8255
8256  if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
8257    if (splitMergedValStore(*SI, *DL, *TLI))
8258      return true;
8259    SI->setMetadata(LLVMContext::MD_invariant_group, nullptr);
8260    unsigned AS = SI->getPointerAddressSpace();
8261    return optimizeMemoryInst(I, SI->getOperand(1),
8262                              SI->getOperand(0)->getType(), AS);
8263  }
8264
8265  if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
8266    unsigned AS = RMW->getPointerAddressSpace();
8267    return optimizeMemoryInst(I, RMW->getPointerOperand(), RMW->getType(), AS);
8268  }
8269
8270  if (AtomicCmpXchgInst *CmpX = dyn_cast<AtomicCmpXchgInst>(I)) {
8271    unsigned AS = CmpX->getPointerAddressSpace();
8272    return optimizeMemoryInst(I, CmpX->getPointerOperand(),
8273                              CmpX->getCompareOperand()->getType(), AS);
8274  }
8275
8276  BinaryOperator *BinOp = dyn_cast<BinaryOperator>(I);
8277
8278  if (BinOp && BinOp->getOpcode() == Instruction::And && EnableAndCmpSinking &&
8279      sinkAndCmp0Expression(BinOp, *TLI, InsertedInsts))
8280    return true;
8281
8282  // TODO: Move this into the switch on opcode - it handles shifts already.
8283  if (BinOp && (BinOp->getOpcode() == Instruction::AShr ||
8284                BinOp->getOpcode() == Instruction::LShr)) {
8285    ConstantInt *CI = dyn_cast<ConstantInt>(BinOp->getOperand(1));
8286    if (CI && TLI->hasExtractBitsInsn())
8287      if (OptimizeExtractBits(BinOp, CI, *TLI, *DL))
8288        return true;
8289  }
8290
8291  if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
8292    if (GEPI->hasAllZeroIndices()) {
8293      /// The GEP operand must be a pointer, so must its result -> BitCast
8294      Instruction *NC = new BitCastInst(GEPI->getOperand(0), GEPI->getType(),
8295                                        GEPI->getName(), GEPI);
8296      NC->setDebugLoc(GEPI->getDebugLoc());
8297      replaceAllUsesWith(GEPI, NC, FreshBBs, IsHugeFunc);
8298      RecursivelyDeleteTriviallyDeadInstructions(
8299          GEPI, TLInfo, nullptr,
8300          [&](Value *V) { removeAllAssertingVHReferences(V); });
8301      ++NumGEPsElim;
8302      optimizeInst(NC, ModifiedDT);
8303      return true;
8304    }
8305    if (tryUnmergingGEPsAcrossIndirectBr(GEPI, TTI)) {
8306      return true;
8307    }
8308  }
8309
8310  if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) {
8311    // freeze(icmp a, const)) -> icmp (freeze a), const
8312    // This helps generate efficient conditional jumps.
8313    Instruction *CmpI = nullptr;
8314    if (ICmpInst *II = dyn_cast<ICmpInst>(FI->getOperand(0)))
8315      CmpI = II;
8316    else if (FCmpInst *F = dyn_cast<FCmpInst>(FI->getOperand(0)))
8317      CmpI = F->getFastMathFlags().none() ? F : nullptr;
8318
8319    if (CmpI && CmpI->hasOneUse()) {
8320      auto Op0 = CmpI->getOperand(0), Op1 = CmpI->getOperand(1);
8321      bool Const0 = isa<ConstantInt>(Op0) || isa<ConstantFP>(Op0) ||
8322                    isa<ConstantPointerNull>(Op0);
8323      bool Const1 = isa<ConstantInt>(Op1) || isa<ConstantFP>(Op1) ||
8324                    isa<ConstantPointerNull>(Op1);
8325      if (Const0 || Const1) {
8326        if (!Const0 || !Const1) {
8327          auto *F = new FreezeInst(Const0 ? Op1 : Op0, "", CmpI);
8328          F->takeName(FI);
8329          CmpI->setOperand(Const0 ? 1 : 0, F);
8330        }
8331        replaceAllUsesWith(FI, CmpI, FreshBBs, IsHugeFunc);
8332        FI->eraseFromParent();
8333        return true;
8334      }
8335    }
8336    return AnyChange;
8337  }
8338
8339  if (tryToSinkFreeOperands(I))
8340    return true;
8341
8342  switch (I->getOpcode()) {
8343  case Instruction::Shl:
8344  case Instruction::LShr:
8345  case Instruction::AShr:
8346    return optimizeShiftInst(cast<BinaryOperator>(I));
8347  case Instruction::Call:
8348    return optimizeCallInst(cast<CallInst>(I), ModifiedDT);
8349  case Instruction::Select:
8350    return optimizeSelectInst(cast<SelectInst>(I));
8351  case Instruction::ShuffleVector:
8352    return optimizeShuffleVectorInst(cast<ShuffleVectorInst>(I));
8353  case Instruction::Switch:
8354    return optimizeSwitchInst(cast<SwitchInst>(I));
8355  case Instruction::ExtractElement:
8356    return optimizeExtractElementInst(cast<ExtractElementInst>(I));
8357  case Instruction::Br:
8358    return optimizeBranch(cast<BranchInst>(I), *TLI, FreshBBs, IsHugeFunc);
8359  }
8360
8361  return AnyChange;
8362}
8363
8364/// Given an OR instruction, check to see if this is a bitreverse
8365/// idiom. If so, insert the new intrinsic and return true.
8366bool CodeGenPrepare::makeBitReverse(Instruction &I) {
8367  if (!I.getType()->isIntegerTy() ||
8368      !TLI->isOperationLegalOrCustom(ISD::BITREVERSE,
8369                                     TLI->getValueType(*DL, I.getType(), true)))
8370    return false;
8371
8372  SmallVector<Instruction *, 4> Insts;
8373  if (!recognizeBSwapOrBitReverseIdiom(&I, false, true, Insts))
8374    return false;
8375  Instruction *LastInst = Insts.back();
8376  replaceAllUsesWith(&I, LastInst, FreshBBs, IsHugeFunc);
8377  RecursivelyDeleteTriviallyDeadInstructions(
8378      &I, TLInfo, nullptr,
8379      [&](Value *V) { removeAllAssertingVHReferences(V); });
8380  return true;
8381}
8382
8383// In this pass we look for GEP and cast instructions that are used
8384// across basic blocks and rewrite them to improve basic-block-at-a-time
8385// selection.
8386bool CodeGenPrepare::optimizeBlock(BasicBlock &BB, ModifyDT &ModifiedDT) {
8387  SunkAddrs.clear();
8388  bool MadeChange = false;
8389
8390  do {
8391    CurInstIterator = BB.begin();
8392    ModifiedDT = ModifyDT::NotModifyDT;
8393    while (CurInstIterator != BB.end()) {
8394      MadeChange |= optimizeInst(&*CurInstIterator++, ModifiedDT);
8395      if (ModifiedDT != ModifyDT::NotModifyDT) {
8396        // For huge function we tend to quickly go though the inner optmization
8397        // opportunities in the BB. So we go back to the BB head to re-optimize
8398        // each instruction instead of go back to the function head.
8399        if (IsHugeFunc) {
8400          DT.reset();
8401          getDT(*BB.getParent());
8402          break;
8403        } else {
8404          return true;
8405        }
8406      }
8407    }
8408  } while (ModifiedDT == ModifyDT::ModifyInstDT);
8409
8410  bool MadeBitReverse = true;
8411  while (MadeBitReverse) {
8412    MadeBitReverse = false;
8413    for (auto &I : reverse(BB)) {
8414      if (makeBitReverse(I)) {
8415        MadeBitReverse = MadeChange = true;
8416        break;
8417      }
8418    }
8419  }
8420  MadeChange |= dupRetToEnableTailCallOpts(&BB, ModifiedDT);
8421
8422  return MadeChange;
8423}
8424
8425// Some CGP optimizations may move or alter what's computed in a block. Check
8426// whether a dbg.value intrinsic could be pointed at a more appropriate operand.
8427bool CodeGenPrepare::fixupDbgValue(Instruction *I) {
8428  assert(isa<DbgValueInst>(I));
8429  DbgValueInst &DVI = *cast<DbgValueInst>(I);
8430
8431  // Does this dbg.value refer to a sunk address calculation?
8432  bool AnyChange = false;
8433  SmallDenseSet<Value *> LocationOps(DVI.location_ops().begin(),
8434                                     DVI.location_ops().end());
8435  for (Value *Location : LocationOps) {
8436    WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8437    Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8438    if (SunkAddr) {
8439      // Point dbg.value at locally computed address, which should give the best
8440      // opportunity to be accurately lowered. This update may change the type
8441      // of pointer being referred to; however this makes no difference to
8442      // debugging information, and we can't generate bitcasts that may affect
8443      // codegen.
8444      DVI.replaceVariableLocationOp(Location, SunkAddr);
8445      AnyChange = true;
8446    }
8447  }
8448  return AnyChange;
8449}
8450
8451bool CodeGenPrepare::fixupDPValuesOnInst(Instruction &I) {
8452  bool AnyChange = false;
8453  for (DPValue &DPV : I.getDbgValueRange())
8454    AnyChange |= fixupDPValue(DPV);
8455  return AnyChange;
8456}
8457
8458// FIXME: should updating debug-info really cause the "changed" flag to fire,
8459// which can cause a function to be reprocessed?
8460bool CodeGenPrepare::fixupDPValue(DPValue &DPV) {
8461  if (DPV.Type != DPValue::LocationType::Value)
8462    return false;
8463
8464  // Does this DPValue refer to a sunk address calculation?
8465  bool AnyChange = false;
8466  SmallDenseSet<Value *> LocationOps(DPV.location_ops().begin(),
8467                                     DPV.location_ops().end());
8468  for (Value *Location : LocationOps) {
8469    WeakTrackingVH SunkAddrVH = SunkAddrs[Location];
8470    Value *SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
8471    if (SunkAddr) {
8472      // Point dbg.value at locally computed address, which should give the best
8473      // opportunity to be accurately lowered. This update may change the type
8474      // of pointer being referred to; however this makes no difference to
8475      // debugging information, and we can't generate bitcasts that may affect
8476      // codegen.
8477      DPV.replaceVariableLocationOp(Location, SunkAddr);
8478      AnyChange = true;
8479    }
8480  }
8481  return AnyChange;
8482}
8483
8484static void DbgInserterHelper(DbgValueInst *DVI, Instruction *VI) {
8485  DVI->removeFromParent();
8486  if (isa<PHINode>(VI))
8487    DVI->insertBefore(&*VI->getParent()->getFirstInsertionPt());
8488  else
8489    DVI->insertAfter(VI);
8490}
8491
8492static void DbgInserterHelper(DPValue *DPV, Instruction *VI) {
8493  DPV->removeFromParent();
8494  BasicBlock *VIBB = VI->getParent();
8495  if (isa<PHINode>(VI))
8496    VIBB->insertDPValueBefore(DPV, VIBB->getFirstInsertionPt());
8497  else
8498    VIBB->insertDPValueAfter(DPV, VI);
8499}
8500
8501// A llvm.dbg.value may be using a value before its definition, due to
8502// optimizations in this pass and others. Scan for such dbg.values, and rescue
8503// them by moving the dbg.value to immediately after the value definition.
8504// FIXME: Ideally this should never be necessary, and this has the potential
8505// to re-order dbg.value intrinsics.
8506bool CodeGenPrepare::placeDbgValues(Function &F) {
8507  bool MadeChange = false;
8508  DominatorTree DT(F);
8509
8510  auto DbgProcessor = [&](auto *DbgItem, Instruction *Position) {
8511    SmallVector<Instruction *, 4> VIs;
8512    for (Value *V : DbgItem->location_ops())
8513      if (Instruction *VI = dyn_cast_or_null<Instruction>(V))
8514        VIs.push_back(VI);
8515
8516    // This item may depend on multiple instructions, complicating any
8517    // potential sink. This block takes the defensive approach, opting to
8518    // "undef" the item if it has more than one instruction and any of them do
8519    // not dominate iem.
8520    for (Instruction *VI : VIs) {
8521      if (VI->isTerminator())
8522        continue;
8523
8524      // If VI is a phi in a block with an EHPad terminator, we can't insert
8525      // after it.
8526      if (isa<PHINode>(VI) && VI->getParent()->getTerminator()->isEHPad())
8527        continue;
8528
8529      // If the defining instruction dominates the dbg.value, we do not need
8530      // to move the dbg.value.
8531      if (DT.dominates(VI, Position))
8532        continue;
8533
8534      // If we depend on multiple instructions and any of them doesn't
8535      // dominate this DVI, we probably can't salvage it: moving it to
8536      // after any of the instructions could cause us to lose the others.
8537      if (VIs.size() > 1) {
8538        LLVM_DEBUG(
8539            dbgs()
8540            << "Unable to find valid location for Debug Value, undefing:\n"
8541            << *DbgItem);
8542        DbgItem->setKillLocation();
8543        break;
8544      }
8545
8546      LLVM_DEBUG(dbgs() << "Moving Debug Value before :\n"
8547                        << *DbgItem << ' ' << *VI);
8548      DbgInserterHelper(DbgItem, VI);
8549      MadeChange = true;
8550      ++NumDbgValueMoved;
8551    }
8552  };
8553
8554  for (BasicBlock &BB : F) {
8555    for (Instruction &Insn : llvm::make_early_inc_range(BB)) {
8556      // Process dbg.value intrinsics.
8557      DbgValueInst *DVI = dyn_cast<DbgValueInst>(&Insn);
8558      if (DVI) {
8559        DbgProcessor(DVI, DVI);
8560        continue;
8561      }
8562
8563      // If this isn't a dbg.value, process any attached DPValue records
8564      // attached to this instruction.
8565      for (DPValue &DPV : llvm::make_early_inc_range(Insn.getDbgValueRange())) {
8566        if (DPV.Type != DPValue::LocationType::Value)
8567          continue;
8568        DbgProcessor(&DPV, &Insn);
8569      }
8570    }
8571  }
8572
8573  return MadeChange;
8574}
8575
8576// Group scattered pseudo probes in a block to favor SelectionDAG. Scattered
8577// probes can be chained dependencies of other regular DAG nodes and block DAG
8578// combine optimizations.
8579bool CodeGenPrepare::placePseudoProbes(Function &F) {
8580  bool MadeChange = false;
8581  for (auto &Block : F) {
8582    // Move the rest probes to the beginning of the block.
8583    auto FirstInst = Block.getFirstInsertionPt();
8584    while (FirstInst != Block.end() && FirstInst->isDebugOrPseudoInst())
8585      ++FirstInst;
8586    BasicBlock::iterator I(FirstInst);
8587    I++;
8588    while (I != Block.end()) {
8589      if (auto *II = dyn_cast<PseudoProbeInst>(I++)) {
8590        II->moveBefore(&*FirstInst);
8591        MadeChange = true;
8592      }
8593    }
8594  }
8595  return MadeChange;
8596}
8597
8598/// Scale down both weights to fit into uint32_t.
8599static void scaleWeights(uint64_t &NewTrue, uint64_t &NewFalse) {
8600  uint64_t NewMax = (NewTrue > NewFalse) ? NewTrue : NewFalse;
8601  uint32_t Scale = (NewMax / std::numeric_limits<uint32_t>::max()) + 1;
8602  NewTrue = NewTrue / Scale;
8603  NewFalse = NewFalse / Scale;
8604}
8605
8606/// Some targets prefer to split a conditional branch like:
8607/// \code
8608///   %0 = icmp ne i32 %a, 0
8609///   %1 = icmp ne i32 %b, 0
8610///   %or.cond = or i1 %0, %1
8611///   br i1 %or.cond, label %TrueBB, label %FalseBB
8612/// \endcode
8613/// into multiple branch instructions like:
8614/// \code
8615///   bb1:
8616///     %0 = icmp ne i32 %a, 0
8617///     br i1 %0, label %TrueBB, label %bb2
8618///   bb2:
8619///     %1 = icmp ne i32 %b, 0
8620///     br i1 %1, label %TrueBB, label %FalseBB
8621/// \endcode
8622/// This usually allows instruction selection to do even further optimizations
8623/// and combine the compare with the branch instruction. Currently this is
8624/// applied for targets which have "cheap" jump instructions.
8625///
8626/// FIXME: Remove the (equivalent?) implementation in SelectionDAG.
8627///
8628bool CodeGenPrepare::splitBranchCondition(Function &F, ModifyDT &ModifiedDT) {
8629  if (!TM->Options.EnableFastISel || TLI->isJumpExpensive())
8630    return false;
8631
8632  bool MadeChange = false;
8633  for (auto &BB : F) {
8634    // Does this BB end with the following?
8635    //   %cond1 = icmp|fcmp|binary instruction ...
8636    //   %cond2 = icmp|fcmp|binary instruction ...
8637    //   %cond.or = or|and i1 %cond1, cond2
8638    //   br i1 %cond.or label %dest1, label %dest2"
8639    Instruction *LogicOp;
8640    BasicBlock *TBB, *FBB;
8641    if (!match(BB.getTerminator(),
8642               m_Br(m_OneUse(m_Instruction(LogicOp)), TBB, FBB)))
8643      continue;
8644
8645    auto *Br1 = cast<BranchInst>(BB.getTerminator());
8646    if (Br1->getMetadata(LLVMContext::MD_unpredictable))
8647      continue;
8648
8649    // The merging of mostly empty BB can cause a degenerate branch.
8650    if (TBB == FBB)
8651      continue;
8652
8653    unsigned Opc;
8654    Value *Cond1, *Cond2;
8655    if (match(LogicOp,
8656              m_LogicalAnd(m_OneUse(m_Value(Cond1)), m_OneUse(m_Value(Cond2)))))
8657      Opc = Instruction::And;
8658    else if (match(LogicOp, m_LogicalOr(m_OneUse(m_Value(Cond1)),
8659                                        m_OneUse(m_Value(Cond2)))))
8660      Opc = Instruction::Or;
8661    else
8662      continue;
8663
8664    auto IsGoodCond = [](Value *Cond) {
8665      return match(
8666          Cond,
8667          m_CombineOr(m_Cmp(), m_CombineOr(m_LogicalAnd(m_Value(), m_Value()),
8668                                           m_LogicalOr(m_Value(), m_Value()))));
8669    };
8670    if (!IsGoodCond(Cond1) || !IsGoodCond(Cond2))
8671      continue;
8672
8673    LLVM_DEBUG(dbgs() << "Before branch condition splitting\n"; BB.dump());
8674
8675    // Create a new BB.
8676    auto *TmpBB =
8677        BasicBlock::Create(BB.getContext(), BB.getName() + ".cond.split",
8678                           BB.getParent(), BB.getNextNode());
8679    if (IsHugeFunc)
8680      FreshBBs.insert(TmpBB);
8681
8682    // Update original basic block by using the first condition directly by the
8683    // branch instruction and removing the no longer needed and/or instruction.
8684    Br1->setCondition(Cond1);
8685    LogicOp->eraseFromParent();
8686
8687    // Depending on the condition we have to either replace the true or the
8688    // false successor of the original branch instruction.
8689    if (Opc == Instruction::And)
8690      Br1->setSuccessor(0, TmpBB);
8691    else
8692      Br1->setSuccessor(1, TmpBB);
8693
8694    // Fill in the new basic block.
8695    auto *Br2 = IRBuilder<>(TmpBB).CreateCondBr(Cond2, TBB, FBB);
8696    if (auto *I = dyn_cast<Instruction>(Cond2)) {
8697      I->removeFromParent();
8698      I->insertBefore(Br2);
8699    }
8700
8701    // Update PHI nodes in both successors. The original BB needs to be
8702    // replaced in one successor's PHI nodes, because the branch comes now from
8703    // the newly generated BB (NewBB). In the other successor we need to add one
8704    // incoming edge to the PHI nodes, because both branch instructions target
8705    // now the same successor. Depending on the original branch condition
8706    // (and/or) we have to swap the successors (TrueDest, FalseDest), so that
8707    // we perform the correct update for the PHI nodes.
8708    // This doesn't change the successor order of the just created branch
8709    // instruction (or any other instruction).
8710    if (Opc == Instruction::Or)
8711      std::swap(TBB, FBB);
8712
8713    // Replace the old BB with the new BB.
8714    TBB->replacePhiUsesWith(&BB, TmpBB);
8715
8716    // Add another incoming edge from the new BB.
8717    for (PHINode &PN : FBB->phis()) {
8718      auto *Val = PN.getIncomingValueForBlock(&BB);
8719      PN.addIncoming(Val, TmpBB);
8720    }
8721
8722    // Update the branch weights (from SelectionDAGBuilder::
8723    // FindMergedConditions).
8724    if (Opc == Instruction::Or) {
8725      // Codegen X | Y as:
8726      // BB1:
8727      //   jmp_if_X TBB
8728      //   jmp TmpBB
8729      // TmpBB:
8730      //   jmp_if_Y TBB
8731      //   jmp FBB
8732      //
8733
8734      // We have flexibility in setting Prob for BB1 and Prob for NewBB.
8735      // The requirement is that
8736      //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
8737      //     = TrueProb for original BB.
8738      // Assuming the original weights are A and B, one choice is to set BB1's
8739      // weights to A and A+2B, and set TmpBB's weights to A and 2B. This choice
8740      // assumes that
8741      //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
8742      // Another choice is to assume TrueProb for BB1 equals to TrueProb for
8743      // TmpBB, but the math is more complicated.
8744      uint64_t TrueWeight, FalseWeight;
8745      if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
8746        uint64_t NewTrueWeight = TrueWeight;
8747        uint64_t NewFalseWeight = TrueWeight + 2 * FalseWeight;
8748        scaleWeights(NewTrueWeight, NewFalseWeight);
8749        Br1->setMetadata(LLVMContext::MD_prof,
8750                         MDBuilder(Br1->getContext())
8751                             .createBranchWeights(TrueWeight, FalseWeight));
8752
8753        NewTrueWeight = TrueWeight;
8754        NewFalseWeight = 2 * FalseWeight;
8755        scaleWeights(NewTrueWeight, NewFalseWeight);
8756        Br2->setMetadata(LLVMContext::MD_prof,
8757                         MDBuilder(Br2->getContext())
8758                             .createBranchWeights(TrueWeight, FalseWeight));
8759      }
8760    } else {
8761      // Codegen X & Y as:
8762      // BB1:
8763      //   jmp_if_X TmpBB
8764      //   jmp FBB
8765      // TmpBB:
8766      //   jmp_if_Y TBB
8767      //   jmp FBB
8768      //
8769      //  This requires creation of TmpBB after CurBB.
8770
8771      // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
8772      // The requirement is that
8773      //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
8774      //     = FalseProb for original BB.
8775      // Assuming the original weights are A and B, one choice is to set BB1's
8776      // weights to 2A+B and B, and set TmpBB's weights to 2A and B. This choice
8777      // assumes that
8778      //   FalseProb for BB1 == TrueProb for BB1 * FalseProb for TmpBB.
8779      uint64_t TrueWeight, FalseWeight;
8780      if (extractBranchWeights(*Br1, TrueWeight, FalseWeight)) {
8781        uint64_t NewTrueWeight = 2 * TrueWeight + FalseWeight;
8782        uint64_t NewFalseWeight = FalseWeight;
8783        scaleWeights(NewTrueWeight, NewFalseWeight);
8784        Br1->setMetadata(LLVMContext::MD_prof,
8785                         MDBuilder(Br1->getContext())
8786                             .createBranchWeights(TrueWeight, FalseWeight));
8787
8788        NewTrueWeight = 2 * TrueWeight;
8789        NewFalseWeight = FalseWeight;
8790        scaleWeights(NewTrueWeight, NewFalseWeight);
8791        Br2->setMetadata(LLVMContext::MD_prof,
8792                         MDBuilder(Br2->getContext())
8793                             .createBranchWeights(TrueWeight, FalseWeight));
8794      }
8795    }
8796
8797    ModifiedDT = ModifyDT::ModifyBBDT;
8798    MadeChange = true;
8799
8800    LLVM_DEBUG(dbgs() << "After branch condition splitting\n"; BB.dump();
8801               TmpBB->dump());
8802  }
8803  return MadeChange;
8804}
8805