MemorySSA.cpp revision 360784
1//===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the MemorySSA class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "llvm/Analysis/MemorySSA.h"
14#include "llvm/ADT/DenseMap.h"
15#include "llvm/ADT/DenseMapInfo.h"
16#include "llvm/ADT/DenseSet.h"
17#include "llvm/ADT/DepthFirstIterator.h"
18#include "llvm/ADT/Hashing.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/Optional.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/iterator.h"
25#include "llvm/ADT/iterator_range.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/IteratedDominanceFrontier.h"
28#include "llvm/Analysis/MemoryLocation.h"
29#include "llvm/Config/llvm-config.h"
30#include "llvm/IR/AssemblyAnnotationWriter.h"
31#include "llvm/IR/BasicBlock.h"
32#include "llvm/IR/Dominators.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/Intrinsics.h"
38#include "llvm/IR/LLVMContext.h"
39#include "llvm/IR/PassManager.h"
40#include "llvm/IR/Use.h"
41#include "llvm/InitializePasses.h"
42#include "llvm/Pass.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/Casting.h"
45#include "llvm/Support/CommandLine.h"
46#include "llvm/Support/Compiler.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/ErrorHandling.h"
49#include "llvm/Support/FormattedStream.h"
50#include "llvm/Support/raw_ostream.h"
51#include <algorithm>
52#include <cassert>
53#include <cstdlib>
54#include <iterator>
55#include <memory>
56#include <utility>
57
58using namespace llvm;
59
60#define DEBUG_TYPE "memoryssa"
61
62INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
63                      true)
64INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
65INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
66INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
67                    true)
68
69INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
70                      "Memory SSA Printer", false, false)
71INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
72INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
73                    "Memory SSA Printer", false, false)
74
75static cl::opt<unsigned> MaxCheckLimit(
76    "memssa-check-limit", cl::Hidden, cl::init(100),
77    cl::desc("The maximum number of stores/phis MemorySSA"
78             "will consider trying to walk past (default = 100)"));
79
80// Always verify MemorySSA if expensive checking is enabled.
81#ifdef EXPENSIVE_CHECKS
82bool llvm::VerifyMemorySSA = true;
83#else
84bool llvm::VerifyMemorySSA = false;
85#endif
86/// Enables memory ssa as a dependency for loop passes in legacy pass manager.
87cl::opt<bool> llvm::EnableMSSALoopDependency(
88    "enable-mssa-loop-dependency", cl::Hidden, cl::init(true),
89    cl::desc("Enable MemorySSA dependency for loop pass manager"));
90
91static cl::opt<bool, true>
92    VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
93                     cl::Hidden, cl::desc("Enable verification of MemorySSA."));
94
95namespace llvm {
96
97/// An assembly annotator class to print Memory SSA information in
98/// comments.
99class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
100  friend class MemorySSA;
101
102  const MemorySSA *MSSA;
103
104public:
105  MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
106
107  void emitBasicBlockStartAnnot(const BasicBlock *BB,
108                                formatted_raw_ostream &OS) override {
109    if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
110      OS << "; " << *MA << "\n";
111  }
112
113  void emitInstructionAnnot(const Instruction *I,
114                            formatted_raw_ostream &OS) override {
115    if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
116      OS << "; " << *MA << "\n";
117  }
118};
119
120} // end namespace llvm
121
122namespace {
123
124/// Our current alias analysis API differentiates heavily between calls and
125/// non-calls, and functions called on one usually assert on the other.
126/// This class encapsulates the distinction to simplify other code that wants
127/// "Memory affecting instructions and related data" to use as a key.
128/// For example, this class is used as a densemap key in the use optimizer.
129class MemoryLocOrCall {
130public:
131  bool IsCall = false;
132
133  MemoryLocOrCall(MemoryUseOrDef *MUD)
134      : MemoryLocOrCall(MUD->getMemoryInst()) {}
135  MemoryLocOrCall(const MemoryUseOrDef *MUD)
136      : MemoryLocOrCall(MUD->getMemoryInst()) {}
137
138  MemoryLocOrCall(Instruction *Inst) {
139    if (auto *C = dyn_cast<CallBase>(Inst)) {
140      IsCall = true;
141      Call = C;
142    } else {
143      IsCall = false;
144      // There is no such thing as a memorylocation for a fence inst, and it is
145      // unique in that regard.
146      if (!isa<FenceInst>(Inst))
147        Loc = MemoryLocation::get(Inst);
148    }
149  }
150
151  explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
152
153  const CallBase *getCall() const {
154    assert(IsCall);
155    return Call;
156  }
157
158  MemoryLocation getLoc() const {
159    assert(!IsCall);
160    return Loc;
161  }
162
163  bool operator==(const MemoryLocOrCall &Other) const {
164    if (IsCall != Other.IsCall)
165      return false;
166
167    if (!IsCall)
168      return Loc == Other.Loc;
169
170    if (Call->getCalledValue() != Other.Call->getCalledValue())
171      return false;
172
173    return Call->arg_size() == Other.Call->arg_size() &&
174           std::equal(Call->arg_begin(), Call->arg_end(),
175                      Other.Call->arg_begin());
176  }
177
178private:
179  union {
180    const CallBase *Call;
181    MemoryLocation Loc;
182  };
183};
184
185} // end anonymous namespace
186
187namespace llvm {
188
189template <> struct DenseMapInfo<MemoryLocOrCall> {
190  static inline MemoryLocOrCall getEmptyKey() {
191    return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
192  }
193
194  static inline MemoryLocOrCall getTombstoneKey() {
195    return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
196  }
197
198  static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
199    if (!MLOC.IsCall)
200      return hash_combine(
201          MLOC.IsCall,
202          DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
203
204    hash_code hash =
205        hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
206                                      MLOC.getCall()->getCalledValue()));
207
208    for (const Value *Arg : MLOC.getCall()->args())
209      hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
210    return hash;
211  }
212
213  static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
214    return LHS == RHS;
215  }
216};
217
218} // end namespace llvm
219
220/// This does one-way checks to see if Use could theoretically be hoisted above
221/// MayClobber. This will not check the other way around.
222///
223/// This assumes that, for the purposes of MemorySSA, Use comes directly after
224/// MayClobber, with no potentially clobbering operations in between them.
225/// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
226static bool areLoadsReorderable(const LoadInst *Use,
227                                const LoadInst *MayClobber) {
228  bool VolatileUse = Use->isVolatile();
229  bool VolatileClobber = MayClobber->isVolatile();
230  // Volatile operations may never be reordered with other volatile operations.
231  if (VolatileUse && VolatileClobber)
232    return false;
233  // Otherwise, volatile doesn't matter here. From the language reference:
234  // 'optimizers may change the order of volatile operations relative to
235  // non-volatile operations.'"
236
237  // If a load is seq_cst, it cannot be moved above other loads. If its ordering
238  // is weaker, it can be moved above other loads. We just need to be sure that
239  // MayClobber isn't an acquire load, because loads can't be moved above
240  // acquire loads.
241  //
242  // Note that this explicitly *does* allow the free reordering of monotonic (or
243  // weaker) loads of the same address.
244  bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
245  bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
246                                                     AtomicOrdering::Acquire);
247  return !(SeqCstUse || MayClobberIsAcquire);
248}
249
250namespace {
251
252struct ClobberAlias {
253  bool IsClobber;
254  Optional<AliasResult> AR;
255};
256
257} // end anonymous namespace
258
259// Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
260// ignored if IsClobber = false.
261template <typename AliasAnalysisType>
262static ClobberAlias
263instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
264                         const Instruction *UseInst, AliasAnalysisType &AA) {
265  Instruction *DefInst = MD->getMemoryInst();
266  assert(DefInst && "Defining instruction not actually an instruction");
267  const auto *UseCall = dyn_cast<CallBase>(UseInst);
268  Optional<AliasResult> AR;
269
270  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
271    // These intrinsics will show up as affecting memory, but they are just
272    // markers, mostly.
273    //
274    // FIXME: We probably don't actually want MemorySSA to model these at all
275    // (including creating MemoryAccesses for them): we just end up inventing
276    // clobbers where they don't really exist at all. Please see D43269 for
277    // context.
278    switch (II->getIntrinsicID()) {
279    case Intrinsic::lifetime_start:
280      if (UseCall)
281        return {false, NoAlias};
282      AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
283      return {AR != NoAlias, AR};
284    case Intrinsic::lifetime_end:
285    case Intrinsic::invariant_start:
286    case Intrinsic::invariant_end:
287    case Intrinsic::assume:
288      return {false, NoAlias};
289    case Intrinsic::dbg_addr:
290    case Intrinsic::dbg_declare:
291    case Intrinsic::dbg_label:
292    case Intrinsic::dbg_value:
293      llvm_unreachable("debuginfo shouldn't have associated defs!");
294    default:
295      break;
296    }
297  }
298
299  if (UseCall) {
300    ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
301    AR = isMustSet(I) ? MustAlias : MayAlias;
302    return {isModOrRefSet(I), AR};
303  }
304
305  if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
306    if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
307      return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
308
309  ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
310  AR = isMustSet(I) ? MustAlias : MayAlias;
311  return {isModSet(I), AR};
312}
313
314template <typename AliasAnalysisType>
315static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
316                                             const MemoryUseOrDef *MU,
317                                             const MemoryLocOrCall &UseMLOC,
318                                             AliasAnalysisType &AA) {
319  // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
320  // to exist while MemoryLocOrCall is pushed through places.
321  if (UseMLOC.IsCall)
322    return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
323                                    AA);
324  return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
325                                  AA);
326}
327
328// Return true when MD may alias MU, return false otherwise.
329bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
330                                        AliasAnalysis &AA) {
331  return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
332}
333
334namespace {
335
336struct UpwardsMemoryQuery {
337  // True if our original query started off as a call
338  bool IsCall = false;
339  // The pointer location we started the query with. This will be empty if
340  // IsCall is true.
341  MemoryLocation StartingLoc;
342  // This is the instruction we were querying about.
343  const Instruction *Inst = nullptr;
344  // The MemoryAccess we actually got called with, used to test local domination
345  const MemoryAccess *OriginalAccess = nullptr;
346  Optional<AliasResult> AR = MayAlias;
347  bool SkipSelfAccess = false;
348
349  UpwardsMemoryQuery() = default;
350
351  UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
352      : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
353    if (!IsCall)
354      StartingLoc = MemoryLocation::get(Inst);
355  }
356};
357
358} // end anonymous namespace
359
360static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
361                           BatchAAResults &AA) {
362  Instruction *Inst = MD->getMemoryInst();
363  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
364    switch (II->getIntrinsicID()) {
365    case Intrinsic::lifetime_end:
366      return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
367    default:
368      return false;
369    }
370  }
371  return false;
372}
373
374template <typename AliasAnalysisType>
375static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
376                                                   const Instruction *I) {
377  // If the memory can't be changed, then loads of the memory can't be
378  // clobbered.
379  return isa<LoadInst>(I) && (I->hasMetadata(LLVMContext::MD_invariant_load) ||
380                              AA.pointsToConstantMemory(MemoryLocation(
381                                  cast<LoadInst>(I)->getPointerOperand())));
382}
383
384/// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
385/// inbetween `Start` and `ClobberAt` can clobbers `Start`.
386///
387/// This is meant to be as simple and self-contained as possible. Because it
388/// uses no cache, etc., it can be relatively expensive.
389///
390/// \param Start     The MemoryAccess that we want to walk from.
391/// \param ClobberAt A clobber for Start.
392/// \param StartLoc  The MemoryLocation for Start.
393/// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
394/// \param Query     The UpwardsMemoryQuery we used for our search.
395/// \param AA        The AliasAnalysis we used for our search.
396/// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
397
398template <typename AliasAnalysisType>
399LLVM_ATTRIBUTE_UNUSED static void
400checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
401                   const MemoryLocation &StartLoc, const MemorySSA &MSSA,
402                   const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
403                   bool AllowImpreciseClobber = false) {
404  assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
405
406  if (MSSA.isLiveOnEntryDef(Start)) {
407    assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
408           "liveOnEntry must clobber itself");
409    return;
410  }
411
412  bool FoundClobber = false;
413  DenseSet<ConstMemoryAccessPair> VisitedPhis;
414  SmallVector<ConstMemoryAccessPair, 8> Worklist;
415  Worklist.emplace_back(Start, StartLoc);
416  // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
417  // is found, complain.
418  while (!Worklist.empty()) {
419    auto MAP = Worklist.pop_back_val();
420    // All we care about is that nothing from Start to ClobberAt clobbers Start.
421    // We learn nothing from revisiting nodes.
422    if (!VisitedPhis.insert(MAP).second)
423      continue;
424
425    for (const auto *MA : def_chain(MAP.first)) {
426      if (MA == ClobberAt) {
427        if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
428          // instructionClobbersQuery isn't essentially free, so don't use `|=`,
429          // since it won't let us short-circuit.
430          //
431          // Also, note that this can't be hoisted out of the `Worklist` loop,
432          // since MD may only act as a clobber for 1 of N MemoryLocations.
433          FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
434          if (!FoundClobber) {
435            ClobberAlias CA =
436                instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
437            if (CA.IsClobber) {
438              FoundClobber = true;
439              // Not used: CA.AR;
440            }
441          }
442        }
443        break;
444      }
445
446      // We should never hit liveOnEntry, unless it's the clobber.
447      assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
448
449      if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
450        // If Start is a Def, skip self.
451        if (MD == Start)
452          continue;
453
454        assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
455                    .IsClobber &&
456               "Found clobber before reaching ClobberAt!");
457        continue;
458      }
459
460      if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
461        (void)MU;
462        assert (MU == Start &&
463                "Can only find use in def chain if Start is a use");
464        continue;
465      }
466
467      assert(isa<MemoryPhi>(MA));
468      Worklist.append(
469          upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
470          upward_defs_end());
471    }
472  }
473
474  // If the verify is done following an optimization, it's possible that
475  // ClobberAt was a conservative clobbering, that we can now infer is not a
476  // true clobbering access. Don't fail the verify if that's the case.
477  // We do have accesses that claim they're optimized, but could be optimized
478  // further. Updating all these can be expensive, so allow it for now (FIXME).
479  if (AllowImpreciseClobber)
480    return;
481
482  // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
483  // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
484  assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
485         "ClobberAt never acted as a clobber");
486}
487
488namespace {
489
490/// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
491/// in one class.
492template <class AliasAnalysisType> class ClobberWalker {
493  /// Save a few bytes by using unsigned instead of size_t.
494  using ListIndex = unsigned;
495
496  /// Represents a span of contiguous MemoryDefs, potentially ending in a
497  /// MemoryPhi.
498  struct DefPath {
499    MemoryLocation Loc;
500    // Note that, because we always walk in reverse, Last will always dominate
501    // First. Also note that First and Last are inclusive.
502    MemoryAccess *First;
503    MemoryAccess *Last;
504    Optional<ListIndex> Previous;
505
506    DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
507            Optional<ListIndex> Previous)
508        : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
509
510    DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
511            Optional<ListIndex> Previous)
512        : DefPath(Loc, Init, Init, Previous) {}
513  };
514
515  const MemorySSA &MSSA;
516  AliasAnalysisType &AA;
517  DominatorTree &DT;
518  UpwardsMemoryQuery *Query;
519  unsigned *UpwardWalkLimit;
520
521  // Phi optimization bookkeeping
522  SmallVector<DefPath, 32> Paths;
523  DenseSet<ConstMemoryAccessPair> VisitedPhis;
524
525  /// Find the nearest def or phi that `From` can legally be optimized to.
526  const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
527    assert(From->getNumOperands() && "Phi with no operands?");
528
529    BasicBlock *BB = From->getBlock();
530    MemoryAccess *Result = MSSA.getLiveOnEntryDef();
531    DomTreeNode *Node = DT.getNode(BB);
532    while ((Node = Node->getIDom())) {
533      auto *Defs = MSSA.getBlockDefs(Node->getBlock());
534      if (Defs)
535        return &*Defs->rbegin();
536    }
537    return Result;
538  }
539
540  /// Result of calling walkToPhiOrClobber.
541  struct UpwardsWalkResult {
542    /// The "Result" of the walk. Either a clobber, the last thing we walked, or
543    /// both. Include alias info when clobber found.
544    MemoryAccess *Result;
545    bool IsKnownClobber;
546    Optional<AliasResult> AR;
547  };
548
549  /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
550  /// This will update Desc.Last as it walks. It will (optionally) also stop at
551  /// StopAt.
552  ///
553  /// This does not test for whether StopAt is a clobber
554  UpwardsWalkResult
555  walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
556                     const MemoryAccess *SkipStopAt = nullptr) const {
557    assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
558    assert(UpwardWalkLimit && "Need a valid walk limit");
559    bool LimitAlreadyReached = false;
560    // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
561    // it to 1. This will not do any alias() calls. It either returns in the
562    // first iteration in the loop below, or is set back to 0 if all def chains
563    // are free of MemoryDefs.
564    if (!*UpwardWalkLimit) {
565      *UpwardWalkLimit = 1;
566      LimitAlreadyReached = true;
567    }
568
569    for (MemoryAccess *Current : def_chain(Desc.Last)) {
570      Desc.Last = Current;
571      if (Current == StopAt || Current == SkipStopAt)
572        return {Current, false, MayAlias};
573
574      if (auto *MD = dyn_cast<MemoryDef>(Current)) {
575        if (MSSA.isLiveOnEntryDef(MD))
576          return {MD, true, MustAlias};
577
578        if (!--*UpwardWalkLimit)
579          return {Current, true, MayAlias};
580
581        ClobberAlias CA =
582            instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
583        if (CA.IsClobber)
584          return {MD, true, CA.AR};
585      }
586    }
587
588    if (LimitAlreadyReached)
589      *UpwardWalkLimit = 0;
590
591    assert(isa<MemoryPhi>(Desc.Last) &&
592           "Ended at a non-clobber that's not a phi?");
593    return {Desc.Last, false, MayAlias};
594  }
595
596  void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
597                   ListIndex PriorNode) {
598    auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
599                                 upward_defs_end());
600    for (const MemoryAccessPair &P : UpwardDefs) {
601      PausedSearches.push_back(Paths.size());
602      Paths.emplace_back(P.second, P.first, PriorNode);
603    }
604  }
605
606  /// Represents a search that terminated after finding a clobber. This clobber
607  /// may or may not be present in the path of defs from LastNode..SearchStart,
608  /// since it may have been retrieved from cache.
609  struct TerminatedPath {
610    MemoryAccess *Clobber;
611    ListIndex LastNode;
612  };
613
614  /// Get an access that keeps us from optimizing to the given phi.
615  ///
616  /// PausedSearches is an array of indices into the Paths array. Its incoming
617  /// value is the indices of searches that stopped at the last phi optimization
618  /// target. It's left in an unspecified state.
619  ///
620  /// If this returns None, NewPaused is a vector of searches that terminated
621  /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
622  Optional<TerminatedPath>
623  getBlockingAccess(const MemoryAccess *StopWhere,
624                    SmallVectorImpl<ListIndex> &PausedSearches,
625                    SmallVectorImpl<ListIndex> &NewPaused,
626                    SmallVectorImpl<TerminatedPath> &Terminated) {
627    assert(!PausedSearches.empty() && "No searches to continue?");
628
629    // BFS vs DFS really doesn't make a difference here, so just do a DFS with
630    // PausedSearches as our stack.
631    while (!PausedSearches.empty()) {
632      ListIndex PathIndex = PausedSearches.pop_back_val();
633      DefPath &Node = Paths[PathIndex];
634
635      // If we've already visited this path with this MemoryLocation, we don't
636      // need to do so again.
637      //
638      // NOTE: That we just drop these paths on the ground makes caching
639      // behavior sporadic. e.g. given a diamond:
640      //  A
641      // B C
642      //  D
643      //
644      // ...If we walk D, B, A, C, we'll only cache the result of phi
645      // optimization for A, B, and D; C will be skipped because it dies here.
646      // This arguably isn't the worst thing ever, since:
647      //   - We generally query things in a top-down order, so if we got below D
648      //     without needing cache entries for {C, MemLoc}, then chances are
649      //     that those cache entries would end up ultimately unused.
650      //   - We still cache things for A, so C only needs to walk up a bit.
651      // If this behavior becomes problematic, we can fix without a ton of extra
652      // work.
653      if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
654        continue;
655
656      const MemoryAccess *SkipStopWhere = nullptr;
657      if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
658        assert(isa<MemoryDef>(Query->OriginalAccess));
659        SkipStopWhere = Query->OriginalAccess;
660      }
661
662      UpwardsWalkResult Res = walkToPhiOrClobber(Node,
663                                                 /*StopAt=*/StopWhere,
664                                                 /*SkipStopAt=*/SkipStopWhere);
665      if (Res.IsKnownClobber) {
666        assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
667
668        // If this wasn't a cache hit, we hit a clobber when walking. That's a
669        // failure.
670        TerminatedPath Term{Res.Result, PathIndex};
671        if (!MSSA.dominates(Res.Result, StopWhere))
672          return Term;
673
674        // Otherwise, it's a valid thing to potentially optimize to.
675        Terminated.push_back(Term);
676        continue;
677      }
678
679      if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
680        // We've hit our target. Save this path off for if we want to continue
681        // walking. If we are in the mode of skipping the OriginalAccess, and
682        // we've reached back to the OriginalAccess, do not save path, we've
683        // just looped back to self.
684        if (Res.Result != SkipStopWhere)
685          NewPaused.push_back(PathIndex);
686        continue;
687      }
688
689      assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
690      addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
691    }
692
693    return None;
694  }
695
696  template <typename T, typename Walker>
697  struct generic_def_path_iterator
698      : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
699                                    std::forward_iterator_tag, T *> {
700    generic_def_path_iterator() {}
701    generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
702
703    T &operator*() const { return curNode(); }
704
705    generic_def_path_iterator &operator++() {
706      N = curNode().Previous;
707      return *this;
708    }
709
710    bool operator==(const generic_def_path_iterator &O) const {
711      if (N.hasValue() != O.N.hasValue())
712        return false;
713      return !N.hasValue() || *N == *O.N;
714    }
715
716  private:
717    T &curNode() const { return W->Paths[*N]; }
718
719    Walker *W = nullptr;
720    Optional<ListIndex> N = None;
721  };
722
723  using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
724  using const_def_path_iterator =
725      generic_def_path_iterator<const DefPath, const ClobberWalker>;
726
727  iterator_range<def_path_iterator> def_path(ListIndex From) {
728    return make_range(def_path_iterator(this, From), def_path_iterator());
729  }
730
731  iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
732    return make_range(const_def_path_iterator(this, From),
733                      const_def_path_iterator());
734  }
735
736  struct OptznResult {
737    /// The path that contains our result.
738    TerminatedPath PrimaryClobber;
739    /// The paths that we can legally cache back from, but that aren't
740    /// necessarily the result of the Phi optimization.
741    SmallVector<TerminatedPath, 4> OtherClobbers;
742  };
743
744  ListIndex defPathIndex(const DefPath &N) const {
745    // The assert looks nicer if we don't need to do &N
746    const DefPath *NP = &N;
747    assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
748           "Out of bounds DefPath!");
749    return NP - &Paths.front();
750  }
751
752  /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
753  /// that act as legal clobbers. Note that this won't return *all* clobbers.
754  ///
755  /// Phi optimization algorithm tl;dr:
756  ///   - Find the earliest def/phi, A, we can optimize to
757  ///   - Find if all paths from the starting memory access ultimately reach A
758  ///     - If not, optimization isn't possible.
759  ///     - Otherwise, walk from A to another clobber or phi, A'.
760  ///       - If A' is a def, we're done.
761  ///       - If A' is a phi, try to optimize it.
762  ///
763  /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
764  /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
765  OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
766                             const MemoryLocation &Loc) {
767    assert(Paths.empty() && VisitedPhis.empty() &&
768           "Reset the optimization state.");
769
770    Paths.emplace_back(Loc, Start, Phi, None);
771    // Stores how many "valid" optimization nodes we had prior to calling
772    // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
773    auto PriorPathsSize = Paths.size();
774
775    SmallVector<ListIndex, 16> PausedSearches;
776    SmallVector<ListIndex, 8> NewPaused;
777    SmallVector<TerminatedPath, 4> TerminatedPaths;
778
779    addSearches(Phi, PausedSearches, 0);
780
781    // Moves the TerminatedPath with the "most dominated" Clobber to the end of
782    // Paths.
783    auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
784      assert(!Paths.empty() && "Need a path to move");
785      auto Dom = Paths.begin();
786      for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
787        if (!MSSA.dominates(I->Clobber, Dom->Clobber))
788          Dom = I;
789      auto Last = Paths.end() - 1;
790      if (Last != Dom)
791        std::iter_swap(Last, Dom);
792    };
793
794    MemoryPhi *Current = Phi;
795    while (true) {
796      assert(!MSSA.isLiveOnEntryDef(Current) &&
797             "liveOnEntry wasn't treated as a clobber?");
798
799      const auto *Target = getWalkTarget(Current);
800      // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
801      // optimization for the prior phi.
802      assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
803        return MSSA.dominates(P.Clobber, Target);
804      }));
805
806      // FIXME: This is broken, because the Blocker may be reported to be
807      // liveOnEntry, and we'll happily wait for that to disappear (read: never)
808      // For the moment, this is fine, since we do nothing with blocker info.
809      if (Optional<TerminatedPath> Blocker = getBlockingAccess(
810              Target, PausedSearches, NewPaused, TerminatedPaths)) {
811
812        // Find the node we started at. We can't search based on N->Last, since
813        // we may have gone around a loop with a different MemoryLocation.
814        auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
815          return defPathIndex(N) < PriorPathsSize;
816        });
817        assert(Iter != def_path_iterator());
818
819        DefPath &CurNode = *Iter;
820        assert(CurNode.Last == Current);
821
822        // Two things:
823        // A. We can't reliably cache all of NewPaused back. Consider a case
824        //    where we have two paths in NewPaused; one of which can't optimize
825        //    above this phi, whereas the other can. If we cache the second path
826        //    back, we'll end up with suboptimal cache entries. We can handle
827        //    cases like this a bit better when we either try to find all
828        //    clobbers that block phi optimization, or when our cache starts
829        //    supporting unfinished searches.
830        // B. We can't reliably cache TerminatedPaths back here without doing
831        //    extra checks; consider a case like:
832        //       T
833        //      / \
834        //     D   C
835        //      \ /
836        //       S
837        //    Where T is our target, C is a node with a clobber on it, D is a
838        //    diamond (with a clobber *only* on the left or right node, N), and
839        //    S is our start. Say we walk to D, through the node opposite N
840        //    (read: ignoring the clobber), and see a cache entry in the top
841        //    node of D. That cache entry gets put into TerminatedPaths. We then
842        //    walk up to C (N is later in our worklist), find the clobber, and
843        //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
844        //    the bottom part of D to the cached clobber, ignoring the clobber
845        //    in N. Again, this problem goes away if we start tracking all
846        //    blockers for a given phi optimization.
847        TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
848        return {Result, {}};
849      }
850
851      // If there's nothing left to search, then all paths led to valid clobbers
852      // that we got from our cache; pick the nearest to the start, and allow
853      // the rest to be cached back.
854      if (NewPaused.empty()) {
855        MoveDominatedPathToEnd(TerminatedPaths);
856        TerminatedPath Result = TerminatedPaths.pop_back_val();
857        return {Result, std::move(TerminatedPaths)};
858      }
859
860      MemoryAccess *DefChainEnd = nullptr;
861      SmallVector<TerminatedPath, 4> Clobbers;
862      for (ListIndex Paused : NewPaused) {
863        UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
864        if (WR.IsKnownClobber)
865          Clobbers.push_back({WR.Result, Paused});
866        else
867          // Micro-opt: If we hit the end of the chain, save it.
868          DefChainEnd = WR.Result;
869      }
870
871      if (!TerminatedPaths.empty()) {
872        // If we couldn't find the dominating phi/liveOnEntry in the above loop,
873        // do it now.
874        if (!DefChainEnd)
875          for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
876            DefChainEnd = MA;
877        assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
878
879        // If any of the terminated paths don't dominate the phi we'll try to
880        // optimize, we need to figure out what they are and quit.
881        const BasicBlock *ChainBB = DefChainEnd->getBlock();
882        for (const TerminatedPath &TP : TerminatedPaths) {
883          // Because we know that DefChainEnd is as "high" as we can go, we
884          // don't need local dominance checks; BB dominance is sufficient.
885          if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
886            Clobbers.push_back(TP);
887        }
888      }
889
890      // If we have clobbers in the def chain, find the one closest to Current
891      // and quit.
892      if (!Clobbers.empty()) {
893        MoveDominatedPathToEnd(Clobbers);
894        TerminatedPath Result = Clobbers.pop_back_val();
895        return {Result, std::move(Clobbers)};
896      }
897
898      assert(all_of(NewPaused,
899                    [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
900
901      // Because liveOnEntry is a clobber, this must be a phi.
902      auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
903
904      PriorPathsSize = Paths.size();
905      PausedSearches.clear();
906      for (ListIndex I : NewPaused)
907        addSearches(DefChainPhi, PausedSearches, I);
908      NewPaused.clear();
909
910      Current = DefChainPhi;
911    }
912  }
913
914  void verifyOptResult(const OptznResult &R) const {
915    assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
916      return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
917    }));
918  }
919
920  void resetPhiOptznState() {
921    Paths.clear();
922    VisitedPhis.clear();
923  }
924
925public:
926  ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
927      : MSSA(MSSA), AA(AA), DT(DT) {}
928
929  AliasAnalysisType *getAA() { return &AA; }
930  /// Finds the nearest clobber for the given query, optimizing phis if
931  /// possible.
932  MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
933                            unsigned &UpWalkLimit) {
934    Query = &Q;
935    UpwardWalkLimit = &UpWalkLimit;
936    // Starting limit must be > 0.
937    if (!UpWalkLimit)
938      UpWalkLimit++;
939
940    MemoryAccess *Current = Start;
941    // This walker pretends uses don't exist. If we're handed one, silently grab
942    // its def. (This has the nice side-effect of ensuring we never cache uses)
943    if (auto *MU = dyn_cast<MemoryUse>(Start))
944      Current = MU->getDefiningAccess();
945
946    DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
947    // Fast path for the overly-common case (no crazy phi optimization
948    // necessary)
949    UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
950    MemoryAccess *Result;
951    if (WalkResult.IsKnownClobber) {
952      Result = WalkResult.Result;
953      Q.AR = WalkResult.AR;
954    } else {
955      OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
956                                          Current, Q.StartingLoc);
957      verifyOptResult(OptRes);
958      resetPhiOptznState();
959      Result = OptRes.PrimaryClobber.Clobber;
960    }
961
962#ifdef EXPENSIVE_CHECKS
963    if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
964      checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
965#endif
966    return Result;
967  }
968};
969
970struct RenamePassData {
971  DomTreeNode *DTN;
972  DomTreeNode::const_iterator ChildIt;
973  MemoryAccess *IncomingVal;
974
975  RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
976                 MemoryAccess *M)
977      : DTN(D), ChildIt(It), IncomingVal(M) {}
978
979  void swap(RenamePassData &RHS) {
980    std::swap(DTN, RHS.DTN);
981    std::swap(ChildIt, RHS.ChildIt);
982    std::swap(IncomingVal, RHS.IncomingVal);
983  }
984};
985
986} // end anonymous namespace
987
988namespace llvm {
989
990template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
991  ClobberWalker<AliasAnalysisType> Walker;
992  MemorySSA *MSSA;
993
994public:
995  ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
996      : Walker(*M, *A, *D), MSSA(M) {}
997
998  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
999                                              const MemoryLocation &,
1000                                              unsigned &);
1001  // Third argument (bool), defines whether the clobber search should skip the
1002  // original queried access. If true, there will be a follow-up query searching
1003  // for a clobber access past "self". Note that the Optimized access is not
1004  // updated if a new clobber is found by this SkipSelf search. If this
1005  // additional query becomes heavily used we may decide to cache the result.
1006  // Walker instantiations will decide how to set the SkipSelf bool.
1007  MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1008};
1009
1010/// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1011/// longer does caching on its own, but the name has been retained for the
1012/// moment.
1013template <class AliasAnalysisType>
1014class MemorySSA::CachingWalker final : public MemorySSAWalker {
1015  ClobberWalkerBase<AliasAnalysisType> *Walker;
1016
1017public:
1018  CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1019      : MemorySSAWalker(M), Walker(W) {}
1020  ~CachingWalker() override = default;
1021
1022  using MemorySSAWalker::getClobberingMemoryAccess;
1023
1024  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1025    return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1026  }
1027  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1028                                          const MemoryLocation &Loc,
1029                                          unsigned &UWL) {
1030    return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1031  }
1032
1033  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1034    unsigned UpwardWalkLimit = MaxCheckLimit;
1035    return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1036  }
1037  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1038                                          const MemoryLocation &Loc) override {
1039    unsigned UpwardWalkLimit = MaxCheckLimit;
1040    return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1041  }
1042
1043  void invalidateInfo(MemoryAccess *MA) override {
1044    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1045      MUD->resetOptimized();
1046  }
1047};
1048
1049template <class AliasAnalysisType>
1050class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1051  ClobberWalkerBase<AliasAnalysisType> *Walker;
1052
1053public:
1054  SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1055      : MemorySSAWalker(M), Walker(W) {}
1056  ~SkipSelfWalker() override = default;
1057
1058  using MemorySSAWalker::getClobberingMemoryAccess;
1059
1060  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1061    return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1062  }
1063  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1064                                          const MemoryLocation &Loc,
1065                                          unsigned &UWL) {
1066    return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1067  }
1068
1069  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1070    unsigned UpwardWalkLimit = MaxCheckLimit;
1071    return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1072  }
1073  MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1074                                          const MemoryLocation &Loc) override {
1075    unsigned UpwardWalkLimit = MaxCheckLimit;
1076    return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1077  }
1078
1079  void invalidateInfo(MemoryAccess *MA) override {
1080    if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1081      MUD->resetOptimized();
1082  }
1083};
1084
1085} // end namespace llvm
1086
1087void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1088                                    bool RenameAllUses) {
1089  // Pass through values to our successors
1090  for (const BasicBlock *S : successors(BB)) {
1091    auto It = PerBlockAccesses.find(S);
1092    // Rename the phi nodes in our successor block
1093    if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1094      continue;
1095    AccessList *Accesses = It->second.get();
1096    auto *Phi = cast<MemoryPhi>(&Accesses->front());
1097    if (RenameAllUses) {
1098      bool ReplacementDone = false;
1099      for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1100        if (Phi->getIncomingBlock(I) == BB) {
1101          Phi->setIncomingValue(I, IncomingVal);
1102          ReplacementDone = true;
1103        }
1104      (void) ReplacementDone;
1105      assert(ReplacementDone && "Incomplete phi during partial rename");
1106    } else
1107      Phi->addIncoming(IncomingVal, BB);
1108  }
1109}
1110
1111/// Rename a single basic block into MemorySSA form.
1112/// Uses the standard SSA renaming algorithm.
1113/// \returns The new incoming value.
1114MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1115                                     bool RenameAllUses) {
1116  auto It = PerBlockAccesses.find(BB);
1117  // Skip most processing if the list is empty.
1118  if (It != PerBlockAccesses.end()) {
1119    AccessList *Accesses = It->second.get();
1120    for (MemoryAccess &L : *Accesses) {
1121      if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1122        if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1123          MUD->setDefiningAccess(IncomingVal);
1124        if (isa<MemoryDef>(&L))
1125          IncomingVal = &L;
1126      } else {
1127        IncomingVal = &L;
1128      }
1129    }
1130  }
1131  return IncomingVal;
1132}
1133
1134/// This is the standard SSA renaming algorithm.
1135///
1136/// We walk the dominator tree in preorder, renaming accesses, and then filling
1137/// in phi nodes in our successors.
1138void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1139                           SmallPtrSetImpl<BasicBlock *> &Visited,
1140                           bool SkipVisited, bool RenameAllUses) {
1141  assert(Root && "Trying to rename accesses in an unreachable block");
1142
1143  SmallVector<RenamePassData, 32> WorkStack;
1144  // Skip everything if we already renamed this block and we are skipping.
1145  // Note: You can't sink this into the if, because we need it to occur
1146  // regardless of whether we skip blocks or not.
1147  bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1148  if (SkipVisited && AlreadyVisited)
1149    return;
1150
1151  IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1152  renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1153  WorkStack.push_back({Root, Root->begin(), IncomingVal});
1154
1155  while (!WorkStack.empty()) {
1156    DomTreeNode *Node = WorkStack.back().DTN;
1157    DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1158    IncomingVal = WorkStack.back().IncomingVal;
1159
1160    if (ChildIt == Node->end()) {
1161      WorkStack.pop_back();
1162    } else {
1163      DomTreeNode *Child = *ChildIt;
1164      ++WorkStack.back().ChildIt;
1165      BasicBlock *BB = Child->getBlock();
1166      // Note: You can't sink this into the if, because we need it to occur
1167      // regardless of whether we skip blocks or not.
1168      AlreadyVisited = !Visited.insert(BB).second;
1169      if (SkipVisited && AlreadyVisited) {
1170        // We already visited this during our renaming, which can happen when
1171        // being asked to rename multiple blocks. Figure out the incoming val,
1172        // which is the last def.
1173        // Incoming value can only change if there is a block def, and in that
1174        // case, it's the last block def in the list.
1175        if (auto *BlockDefs = getWritableBlockDefs(BB))
1176          IncomingVal = &*BlockDefs->rbegin();
1177      } else
1178        IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1179      renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1180      WorkStack.push_back({Child, Child->begin(), IncomingVal});
1181    }
1182  }
1183}
1184
1185/// This handles unreachable block accesses by deleting phi nodes in
1186/// unreachable blocks, and marking all other unreachable MemoryAccess's as
1187/// being uses of the live on entry definition.
1188void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1189  assert(!DT->isReachableFromEntry(BB) &&
1190         "Reachable block found while handling unreachable blocks");
1191
1192  // Make sure phi nodes in our reachable successors end up with a
1193  // LiveOnEntryDef for our incoming edge, even though our block is forward
1194  // unreachable.  We could just disconnect these blocks from the CFG fully,
1195  // but we do not right now.
1196  for (const BasicBlock *S : successors(BB)) {
1197    if (!DT->isReachableFromEntry(S))
1198      continue;
1199    auto It = PerBlockAccesses.find(S);
1200    // Rename the phi nodes in our successor block
1201    if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1202      continue;
1203    AccessList *Accesses = It->second.get();
1204    auto *Phi = cast<MemoryPhi>(&Accesses->front());
1205    Phi->addIncoming(LiveOnEntryDef.get(), BB);
1206  }
1207
1208  auto It = PerBlockAccesses.find(BB);
1209  if (It == PerBlockAccesses.end())
1210    return;
1211
1212  auto &Accesses = It->second;
1213  for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1214    auto Next = std::next(AI);
1215    // If we have a phi, just remove it. We are going to replace all
1216    // users with live on entry.
1217    if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1218      UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1219    else
1220      Accesses->erase(AI);
1221    AI = Next;
1222  }
1223}
1224
1225MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1226    : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1227      SkipWalker(nullptr), NextID(0) {
1228  // Build MemorySSA using a batch alias analysis. This reuses the internal
1229  // state that AA collects during an alias()/getModRefInfo() call. This is
1230  // safe because there are no CFG changes while building MemorySSA and can
1231  // significantly reduce the time spent by the compiler in AA, because we will
1232  // make queries about all the instructions in the Function.
1233  assert(AA && "No alias analysis?");
1234  BatchAAResults BatchAA(*AA);
1235  buildMemorySSA(BatchAA);
1236  // Intentionally leave AA to nullptr while building so we don't accidently
1237  // use non-batch AliasAnalysis.
1238  this->AA = AA;
1239  // Also create the walker here.
1240  getWalker();
1241}
1242
1243MemorySSA::~MemorySSA() {
1244  // Drop all our references
1245  for (const auto &Pair : PerBlockAccesses)
1246    for (MemoryAccess &MA : *Pair.second)
1247      MA.dropAllReferences();
1248}
1249
1250MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1251  auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1252
1253  if (Res.second)
1254    Res.first->second = std::make_unique<AccessList>();
1255  return Res.first->second.get();
1256}
1257
1258MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1259  auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1260
1261  if (Res.second)
1262    Res.first->second = std::make_unique<DefsList>();
1263  return Res.first->second.get();
1264}
1265
1266namespace llvm {
1267
1268/// This class is a batch walker of all MemoryUse's in the program, and points
1269/// their defining access at the thing that actually clobbers them.  Because it
1270/// is a batch walker that touches everything, it does not operate like the
1271/// other walkers.  This walker is basically performing a top-down SSA renaming
1272/// pass, where the version stack is used as the cache.  This enables it to be
1273/// significantly more time and memory efficient than using the regular walker,
1274/// which is walking bottom-up.
1275class MemorySSA::OptimizeUses {
1276public:
1277  OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1278               BatchAAResults *BAA, DominatorTree *DT)
1279      : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1280
1281  void optimizeUses();
1282
1283private:
1284  /// This represents where a given memorylocation is in the stack.
1285  struct MemlocStackInfo {
1286    // This essentially is keeping track of versions of the stack. Whenever
1287    // the stack changes due to pushes or pops, these versions increase.
1288    unsigned long StackEpoch;
1289    unsigned long PopEpoch;
1290    // This is the lower bound of places on the stack to check. It is equal to
1291    // the place the last stack walk ended.
1292    // Note: Correctness depends on this being initialized to 0, which densemap
1293    // does
1294    unsigned long LowerBound;
1295    const BasicBlock *LowerBoundBlock;
1296    // This is where the last walk for this memory location ended.
1297    unsigned long LastKill;
1298    bool LastKillValid;
1299    Optional<AliasResult> AR;
1300  };
1301
1302  void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1303                           SmallVectorImpl<MemoryAccess *> &,
1304                           DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1305
1306  MemorySSA *MSSA;
1307  CachingWalker<BatchAAResults> *Walker;
1308  BatchAAResults *AA;
1309  DominatorTree *DT;
1310};
1311
1312} // end namespace llvm
1313
1314/// Optimize the uses in a given block This is basically the SSA renaming
1315/// algorithm, with one caveat: We are able to use a single stack for all
1316/// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1317/// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1318/// going to be some position in that stack of possible ones.
1319///
1320/// We track the stack positions that each MemoryLocation needs
1321/// to check, and last ended at.  This is because we only want to check the
1322/// things that changed since last time.  The same MemoryLocation should
1323/// get clobbered by the same store (getModRefInfo does not use invariantness or
1324/// things like this, and if they start, we can modify MemoryLocOrCall to
1325/// include relevant data)
1326void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1327    const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1328    SmallVectorImpl<MemoryAccess *> &VersionStack,
1329    DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1330
1331  /// If no accesses, nothing to do.
1332  MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1333  if (Accesses == nullptr)
1334    return;
1335
1336  // Pop everything that doesn't dominate the current block off the stack,
1337  // increment the PopEpoch to account for this.
1338  while (true) {
1339    assert(
1340        !VersionStack.empty() &&
1341        "Version stack should have liveOnEntry sentinel dominating everything");
1342    BasicBlock *BackBlock = VersionStack.back()->getBlock();
1343    if (DT->dominates(BackBlock, BB))
1344      break;
1345    while (VersionStack.back()->getBlock() == BackBlock)
1346      VersionStack.pop_back();
1347    ++PopEpoch;
1348  }
1349
1350  for (MemoryAccess &MA : *Accesses) {
1351    auto *MU = dyn_cast<MemoryUse>(&MA);
1352    if (!MU) {
1353      VersionStack.push_back(&MA);
1354      ++StackEpoch;
1355      continue;
1356    }
1357
1358    if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1359      MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1360      continue;
1361    }
1362
1363    MemoryLocOrCall UseMLOC(MU);
1364    auto &LocInfo = LocStackInfo[UseMLOC];
1365    // If the pop epoch changed, it means we've removed stuff from top of
1366    // stack due to changing blocks. We may have to reset the lower bound or
1367    // last kill info.
1368    if (LocInfo.PopEpoch != PopEpoch) {
1369      LocInfo.PopEpoch = PopEpoch;
1370      LocInfo.StackEpoch = StackEpoch;
1371      // If the lower bound was in something that no longer dominates us, we
1372      // have to reset it.
1373      // We can't simply track stack size, because the stack may have had
1374      // pushes/pops in the meantime.
1375      // XXX: This is non-optimal, but only is slower cases with heavily
1376      // branching dominator trees.  To get the optimal number of queries would
1377      // be to make lowerbound and lastkill a per-loc stack, and pop it until
1378      // the top of that stack dominates us.  This does not seem worth it ATM.
1379      // A much cheaper optimization would be to always explore the deepest
1380      // branch of the dominator tree first. This will guarantee this resets on
1381      // the smallest set of blocks.
1382      if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1383          !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1384        // Reset the lower bound of things to check.
1385        // TODO: Some day we should be able to reset to last kill, rather than
1386        // 0.
1387        LocInfo.LowerBound = 0;
1388        LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1389        LocInfo.LastKillValid = false;
1390      }
1391    } else if (LocInfo.StackEpoch != StackEpoch) {
1392      // If all that has changed is the StackEpoch, we only have to check the
1393      // new things on the stack, because we've checked everything before.  In
1394      // this case, the lower bound of things to check remains the same.
1395      LocInfo.PopEpoch = PopEpoch;
1396      LocInfo.StackEpoch = StackEpoch;
1397    }
1398    if (!LocInfo.LastKillValid) {
1399      LocInfo.LastKill = VersionStack.size() - 1;
1400      LocInfo.LastKillValid = true;
1401      LocInfo.AR = MayAlias;
1402    }
1403
1404    // At this point, we should have corrected last kill and LowerBound to be
1405    // in bounds.
1406    assert(LocInfo.LowerBound < VersionStack.size() &&
1407           "Lower bound out of range");
1408    assert(LocInfo.LastKill < VersionStack.size() &&
1409           "Last kill info out of range");
1410    // In any case, the new upper bound is the top of the stack.
1411    unsigned long UpperBound = VersionStack.size() - 1;
1412
1413    if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1414      LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1415                        << *(MU->getMemoryInst()) << ")"
1416                        << " because there are "
1417                        << UpperBound - LocInfo.LowerBound
1418                        << " stores to disambiguate\n");
1419      // Because we did not walk, LastKill is no longer valid, as this may
1420      // have been a kill.
1421      LocInfo.LastKillValid = false;
1422      continue;
1423    }
1424    bool FoundClobberResult = false;
1425    unsigned UpwardWalkLimit = MaxCheckLimit;
1426    while (UpperBound > LocInfo.LowerBound) {
1427      if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1428        // For phis, use the walker, see where we ended up, go there
1429        MemoryAccess *Result =
1430            Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1431        // We are guaranteed to find it or something is wrong
1432        while (VersionStack[UpperBound] != Result) {
1433          assert(UpperBound != 0);
1434          --UpperBound;
1435        }
1436        FoundClobberResult = true;
1437        break;
1438      }
1439
1440      MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1441      // If the lifetime of the pointer ends at this instruction, it's live on
1442      // entry.
1443      if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1444        // Reset UpperBound to liveOnEntryDef's place in the stack
1445        UpperBound = 0;
1446        FoundClobberResult = true;
1447        LocInfo.AR = MustAlias;
1448        break;
1449      }
1450      ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1451      if (CA.IsClobber) {
1452        FoundClobberResult = true;
1453        LocInfo.AR = CA.AR;
1454        break;
1455      }
1456      --UpperBound;
1457    }
1458
1459    // Note: Phis always have AliasResult AR set to MayAlias ATM.
1460
1461    // At the end of this loop, UpperBound is either a clobber, or lower bound
1462    // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1463    if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1464      // We were last killed now by where we got to
1465      if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1466        LocInfo.AR = None;
1467      MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1468      LocInfo.LastKill = UpperBound;
1469    } else {
1470      // Otherwise, we checked all the new ones, and now we know we can get to
1471      // LastKill.
1472      MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1473    }
1474    LocInfo.LowerBound = VersionStack.size() - 1;
1475    LocInfo.LowerBoundBlock = BB;
1476  }
1477}
1478
1479/// Optimize uses to point to their actual clobbering definitions.
1480void MemorySSA::OptimizeUses::optimizeUses() {
1481  SmallVector<MemoryAccess *, 16> VersionStack;
1482  DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1483  VersionStack.push_back(MSSA->getLiveOnEntryDef());
1484
1485  unsigned long StackEpoch = 1;
1486  unsigned long PopEpoch = 1;
1487  // We perform a non-recursive top-down dominator tree walk.
1488  for (const auto *DomNode : depth_first(DT->getRootNode()))
1489    optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1490                        LocStackInfo);
1491}
1492
1493void MemorySSA::placePHINodes(
1494    const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1495  // Determine where our MemoryPhi's should go
1496  ForwardIDFCalculator IDFs(*DT);
1497  IDFs.setDefiningBlocks(DefiningBlocks);
1498  SmallVector<BasicBlock *, 32> IDFBlocks;
1499  IDFs.calculate(IDFBlocks);
1500
1501  // Now place MemoryPhi nodes.
1502  for (auto &BB : IDFBlocks)
1503    createMemoryPhi(BB);
1504}
1505
1506void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1507  // We create an access to represent "live on entry", for things like
1508  // arguments or users of globals, where the memory they use is defined before
1509  // the beginning of the function. We do not actually insert it into the IR.
1510  // We do not define a live on exit for the immediate uses, and thus our
1511  // semantics do *not* imply that something with no immediate uses can simply
1512  // be removed.
1513  BasicBlock &StartingPoint = F.getEntryBlock();
1514  LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1515                                     &StartingPoint, NextID++));
1516
1517  // We maintain lists of memory accesses per-block, trading memory for time. We
1518  // could just look up the memory access for every possible instruction in the
1519  // stream.
1520  SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1521  // Go through each block, figure out where defs occur, and chain together all
1522  // the accesses.
1523  for (BasicBlock &B : F) {
1524    bool InsertIntoDef = false;
1525    AccessList *Accesses = nullptr;
1526    DefsList *Defs = nullptr;
1527    for (Instruction &I : B) {
1528      MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1529      if (!MUD)
1530        continue;
1531
1532      if (!Accesses)
1533        Accesses = getOrCreateAccessList(&B);
1534      Accesses->push_back(MUD);
1535      if (isa<MemoryDef>(MUD)) {
1536        InsertIntoDef = true;
1537        if (!Defs)
1538          Defs = getOrCreateDefsList(&B);
1539        Defs->push_back(*MUD);
1540      }
1541    }
1542    if (InsertIntoDef)
1543      DefiningBlocks.insert(&B);
1544  }
1545  placePHINodes(DefiningBlocks);
1546
1547  // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1548  // filled in with all blocks.
1549  SmallPtrSet<BasicBlock *, 16> Visited;
1550  renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1551
1552  ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1553  CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1554  OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1555
1556  // Mark the uses in unreachable blocks as live on entry, so that they go
1557  // somewhere.
1558  for (auto &BB : F)
1559    if (!Visited.count(&BB))
1560      markUnreachableAsLiveOnEntry(&BB);
1561}
1562
1563MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1564
1565MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1566  if (Walker)
1567    return Walker.get();
1568
1569  if (!WalkerBase)
1570    WalkerBase =
1571        std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1572
1573  Walker =
1574      std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1575  return Walker.get();
1576}
1577
1578MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1579  if (SkipWalker)
1580    return SkipWalker.get();
1581
1582  if (!WalkerBase)
1583    WalkerBase =
1584        std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1585
1586  SkipWalker =
1587      std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1588  return SkipWalker.get();
1589 }
1590
1591
1592// This is a helper function used by the creation routines. It places NewAccess
1593// into the access and defs lists for a given basic block, at the given
1594// insertion point.
1595void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1596                                        const BasicBlock *BB,
1597                                        InsertionPlace Point) {
1598  auto *Accesses = getOrCreateAccessList(BB);
1599  if (Point == Beginning) {
1600    // If it's a phi node, it goes first, otherwise, it goes after any phi
1601    // nodes.
1602    if (isa<MemoryPhi>(NewAccess)) {
1603      Accesses->push_front(NewAccess);
1604      auto *Defs = getOrCreateDefsList(BB);
1605      Defs->push_front(*NewAccess);
1606    } else {
1607      auto AI = find_if_not(
1608          *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1609      Accesses->insert(AI, NewAccess);
1610      if (!isa<MemoryUse>(NewAccess)) {
1611        auto *Defs = getOrCreateDefsList(BB);
1612        auto DI = find_if_not(
1613            *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1614        Defs->insert(DI, *NewAccess);
1615      }
1616    }
1617  } else {
1618    Accesses->push_back(NewAccess);
1619    if (!isa<MemoryUse>(NewAccess)) {
1620      auto *Defs = getOrCreateDefsList(BB);
1621      Defs->push_back(*NewAccess);
1622    }
1623  }
1624  BlockNumberingValid.erase(BB);
1625}
1626
1627void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1628                                      AccessList::iterator InsertPt) {
1629  auto *Accesses = getWritableBlockAccesses(BB);
1630  bool WasEnd = InsertPt == Accesses->end();
1631  Accesses->insert(AccessList::iterator(InsertPt), What);
1632  if (!isa<MemoryUse>(What)) {
1633    auto *Defs = getOrCreateDefsList(BB);
1634    // If we got asked to insert at the end, we have an easy job, just shove it
1635    // at the end. If we got asked to insert before an existing def, we also get
1636    // an iterator. If we got asked to insert before a use, we have to hunt for
1637    // the next def.
1638    if (WasEnd) {
1639      Defs->push_back(*What);
1640    } else if (isa<MemoryDef>(InsertPt)) {
1641      Defs->insert(InsertPt->getDefsIterator(), *What);
1642    } else {
1643      while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1644        ++InsertPt;
1645      // Either we found a def, or we are inserting at the end
1646      if (InsertPt == Accesses->end())
1647        Defs->push_back(*What);
1648      else
1649        Defs->insert(InsertPt->getDefsIterator(), *What);
1650    }
1651  }
1652  BlockNumberingValid.erase(BB);
1653}
1654
1655void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1656  // Keep it in the lookup tables, remove from the lists
1657  removeFromLists(What, false);
1658
1659  // Note that moving should implicitly invalidate the optimized state of a
1660  // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1661  // MemoryDef.
1662  if (auto *MD = dyn_cast<MemoryDef>(What))
1663    MD->resetOptimized();
1664  What->setBlock(BB);
1665}
1666
1667// Move What before Where in the IR.  The end result is that What will belong to
1668// the right lists and have the right Block set, but will not otherwise be
1669// correct. It will not have the right defining access, and if it is a def,
1670// things below it will not properly be updated.
1671void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1672                       AccessList::iterator Where) {
1673  prepareForMoveTo(What, BB);
1674  insertIntoListsBefore(What, BB, Where);
1675}
1676
1677void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1678                       InsertionPlace Point) {
1679  if (isa<MemoryPhi>(What)) {
1680    assert(Point == Beginning &&
1681           "Can only move a Phi at the beginning of the block");
1682    // Update lookup table entry
1683    ValueToMemoryAccess.erase(What->getBlock());
1684    bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1685    (void)Inserted;
1686    assert(Inserted && "Cannot move a Phi to a block that already has one");
1687  }
1688
1689  prepareForMoveTo(What, BB);
1690  insertIntoListsForBlock(What, BB, Point);
1691}
1692
1693MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1694  assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1695  MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1696  // Phi's always are placed at the front of the block.
1697  insertIntoListsForBlock(Phi, BB, Beginning);
1698  ValueToMemoryAccess[BB] = Phi;
1699  return Phi;
1700}
1701
1702MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1703                                               MemoryAccess *Definition,
1704                                               const MemoryUseOrDef *Template,
1705                                               bool CreationMustSucceed) {
1706  assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1707  MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1708  if (CreationMustSucceed)
1709    assert(NewAccess != nullptr && "Tried to create a memory access for a "
1710                                   "non-memory touching instruction");
1711  if (NewAccess)
1712    NewAccess->setDefiningAccess(Definition);
1713  return NewAccess;
1714}
1715
1716// Return true if the instruction has ordering constraints.
1717// Note specifically that this only considers stores and loads
1718// because others are still considered ModRef by getModRefInfo.
1719static inline bool isOrdered(const Instruction *I) {
1720  if (auto *SI = dyn_cast<StoreInst>(I)) {
1721    if (!SI->isUnordered())
1722      return true;
1723  } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1724    if (!LI->isUnordered())
1725      return true;
1726  }
1727  return false;
1728}
1729
1730/// Helper function to create new memory accesses
1731template <typename AliasAnalysisType>
1732MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1733                                           AliasAnalysisType *AAP,
1734                                           const MemoryUseOrDef *Template) {
1735  // The assume intrinsic has a control dependency which we model by claiming
1736  // that it writes arbitrarily. Debuginfo intrinsics may be considered
1737  // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1738  // dependencies here.
1739  // FIXME: Replace this special casing with a more accurate modelling of
1740  // assume's control dependency.
1741  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1742    if (II->getIntrinsicID() == Intrinsic::assume)
1743      return nullptr;
1744
1745  // Using a nonstandard AA pipelines might leave us with unexpected modref
1746  // results for I, so add a check to not model instructions that may not read
1747  // from or write to memory. This is necessary for correctness.
1748  if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1749    return nullptr;
1750
1751  bool Def, Use;
1752  if (Template) {
1753    Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1754    Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1755#if !defined(NDEBUG)
1756    ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1757    bool DefCheck, UseCheck;
1758    DefCheck = isModSet(ModRef) || isOrdered(I);
1759    UseCheck = isRefSet(ModRef);
1760    assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1761#endif
1762  } else {
1763    // Find out what affect this instruction has on memory.
1764    ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1765    // The isOrdered check is used to ensure that volatiles end up as defs
1766    // (atomics end up as ModRef right now anyway).  Until we separate the
1767    // ordering chain from the memory chain, this enables people to see at least
1768    // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1769    // will still give an answer that bypasses other volatile loads.  TODO:
1770    // Separate memory aliasing and ordering into two different chains so that
1771    // we can precisely represent both "what memory will this read/write/is
1772    // clobbered by" and "what instructions can I move this past".
1773    Def = isModSet(ModRef) || isOrdered(I);
1774    Use = isRefSet(ModRef);
1775  }
1776
1777  // It's possible for an instruction to not modify memory at all. During
1778  // construction, we ignore them.
1779  if (!Def && !Use)
1780    return nullptr;
1781
1782  MemoryUseOrDef *MUD;
1783  if (Def)
1784    MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1785  else
1786    MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1787  ValueToMemoryAccess[I] = MUD;
1788  return MUD;
1789}
1790
1791/// Returns true if \p Replacer dominates \p Replacee .
1792bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1793                             const MemoryAccess *Replacee) const {
1794  if (isa<MemoryUseOrDef>(Replacee))
1795    return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1796  const auto *MP = cast<MemoryPhi>(Replacee);
1797  // For a phi node, the use occurs in the predecessor block of the phi node.
1798  // Since we may occur multiple times in the phi node, we have to check each
1799  // operand to ensure Replacer dominates each operand where Replacee occurs.
1800  for (const Use &Arg : MP->operands()) {
1801    if (Arg.get() != Replacee &&
1802        !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1803      return false;
1804  }
1805  return true;
1806}
1807
1808/// Properly remove \p MA from all of MemorySSA's lookup tables.
1809void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1810  assert(MA->use_empty() &&
1811         "Trying to remove memory access that still has uses");
1812  BlockNumbering.erase(MA);
1813  if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1814    MUD->setDefiningAccess(nullptr);
1815  // Invalidate our walker's cache if necessary
1816  if (!isa<MemoryUse>(MA))
1817    getWalker()->invalidateInfo(MA);
1818
1819  Value *MemoryInst;
1820  if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1821    MemoryInst = MUD->getMemoryInst();
1822  else
1823    MemoryInst = MA->getBlock();
1824
1825  auto VMA = ValueToMemoryAccess.find(MemoryInst);
1826  if (VMA->second == MA)
1827    ValueToMemoryAccess.erase(VMA);
1828}
1829
1830/// Properly remove \p MA from all of MemorySSA's lists.
1831///
1832/// Because of the way the intrusive list and use lists work, it is important to
1833/// do removal in the right order.
1834/// ShouldDelete defaults to true, and will cause the memory access to also be
1835/// deleted, not just removed.
1836void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1837  BasicBlock *BB = MA->getBlock();
1838  // The access list owns the reference, so we erase it from the non-owning list
1839  // first.
1840  if (!isa<MemoryUse>(MA)) {
1841    auto DefsIt = PerBlockDefs.find(BB);
1842    std::unique_ptr<DefsList> &Defs = DefsIt->second;
1843    Defs->remove(*MA);
1844    if (Defs->empty())
1845      PerBlockDefs.erase(DefsIt);
1846  }
1847
1848  // The erase call here will delete it. If we don't want it deleted, we call
1849  // remove instead.
1850  auto AccessIt = PerBlockAccesses.find(BB);
1851  std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1852  if (ShouldDelete)
1853    Accesses->erase(MA);
1854  else
1855    Accesses->remove(MA);
1856
1857  if (Accesses->empty()) {
1858    PerBlockAccesses.erase(AccessIt);
1859    BlockNumberingValid.erase(BB);
1860  }
1861}
1862
1863void MemorySSA::print(raw_ostream &OS) const {
1864  MemorySSAAnnotatedWriter Writer(this);
1865  F.print(OS, &Writer);
1866}
1867
1868#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1869LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1870#endif
1871
1872void MemorySSA::verifyMemorySSA() const {
1873  verifyOrderingDominationAndDefUses(F);
1874  verifyDominationNumbers(F);
1875  verifyPrevDefInPhis(F);
1876  // Previously, the verification used to also verify that the clobberingAccess
1877  // cached by MemorySSA is the same as the clobberingAccess found at a later
1878  // query to AA. This does not hold true in general due to the current fragility
1879  // of BasicAA which has arbitrary caps on the things it analyzes before giving
1880  // up. As a result, transformations that are correct, will lead to BasicAA
1881  // returning different Alias answers before and after that transformation.
1882  // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1883  // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1884  // every transformation, which defeats the purpose of using it. For such an
1885  // example, see test4 added in D51960.
1886}
1887
1888void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1889#if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1890  for (const BasicBlock &BB : F) {
1891    if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1892      for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1893        auto *Pred = Phi->getIncomingBlock(I);
1894        auto *IncAcc = Phi->getIncomingValue(I);
1895        // If Pred has no unreachable predecessors, get last def looking at
1896        // IDoms. If, while walkings IDoms, any of these has an unreachable
1897        // predecessor, then the incoming def can be any access.
1898        if (auto *DTNode = DT->getNode(Pred)) {
1899          while (DTNode) {
1900            if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1901              auto *LastAcc = &*(--DefList->end());
1902              assert(LastAcc == IncAcc &&
1903                     "Incorrect incoming access into phi.");
1904              break;
1905            }
1906            DTNode = DTNode->getIDom();
1907          }
1908        } else {
1909          // If Pred has unreachable predecessors, but has at least a Def, the
1910          // incoming access can be the last Def in Pred, or it could have been
1911          // optimized to LoE. After an update, though, the LoE may have been
1912          // replaced by another access, so IncAcc may be any access.
1913          // If Pred has unreachable predecessors and no Defs, incoming access
1914          // should be LoE; However, after an update, it may be any access.
1915        }
1916      }
1917    }
1918  }
1919#endif
1920}
1921
1922/// Verify that all of the blocks we believe to have valid domination numbers
1923/// actually have valid domination numbers.
1924void MemorySSA::verifyDominationNumbers(const Function &F) const {
1925#ifndef NDEBUG
1926  if (BlockNumberingValid.empty())
1927    return;
1928
1929  SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1930  for (const BasicBlock &BB : F) {
1931    if (!ValidBlocks.count(&BB))
1932      continue;
1933
1934    ValidBlocks.erase(&BB);
1935
1936    const AccessList *Accesses = getBlockAccesses(&BB);
1937    // It's correct to say an empty block has valid numbering.
1938    if (!Accesses)
1939      continue;
1940
1941    // Block numbering starts at 1.
1942    unsigned long LastNumber = 0;
1943    for (const MemoryAccess &MA : *Accesses) {
1944      auto ThisNumberIter = BlockNumbering.find(&MA);
1945      assert(ThisNumberIter != BlockNumbering.end() &&
1946             "MemoryAccess has no domination number in a valid block!");
1947
1948      unsigned long ThisNumber = ThisNumberIter->second;
1949      assert(ThisNumber > LastNumber &&
1950             "Domination numbers should be strictly increasing!");
1951      LastNumber = ThisNumber;
1952    }
1953  }
1954
1955  assert(ValidBlocks.empty() &&
1956         "All valid BasicBlocks should exist in F -- dangling pointers?");
1957#endif
1958}
1959
1960/// Verify ordering: the order and existence of MemoryAccesses matches the
1961/// order and existence of memory affecting instructions.
1962/// Verify domination: each definition dominates all of its uses.
1963/// Verify def-uses: the immediate use information - walk all the memory
1964/// accesses and verifying that, for each use, it appears in the appropriate
1965/// def's use list
1966void MemorySSA::verifyOrderingDominationAndDefUses(Function &F) const {
1967#if !defined(NDEBUG)
1968  // Walk all the blocks, comparing what the lookups think and what the access
1969  // lists think, as well as the order in the blocks vs the order in the access
1970  // lists.
1971  SmallVector<MemoryAccess *, 32> ActualAccesses;
1972  SmallVector<MemoryAccess *, 32> ActualDefs;
1973  for (BasicBlock &B : F) {
1974    const AccessList *AL = getBlockAccesses(&B);
1975    const auto *DL = getBlockDefs(&B);
1976    MemoryPhi *Phi = getMemoryAccess(&B);
1977    if (Phi) {
1978      // Verify ordering.
1979      ActualAccesses.push_back(Phi);
1980      ActualDefs.push_back(Phi);
1981      // Verify domination
1982      for (const Use &U : Phi->uses())
1983        assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
1984#if defined(EXPENSIVE_CHECKS)
1985      // Verify def-uses.
1986      assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1987                                          pred_begin(&B), pred_end(&B))) &&
1988             "Incomplete MemoryPhi Node");
1989      for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1990        verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1991        assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
1992                   pred_end(&B) &&
1993               "Incoming phi block not a block predecessor");
1994      }
1995#endif
1996    }
1997
1998    for (Instruction &I : B) {
1999      MemoryUseOrDef *MA = getMemoryAccess(&I);
2000      assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
2001             "We have memory affecting instructions "
2002             "in this block but they are not in the "
2003             "access list or defs list");
2004      if (MA) {
2005        // Verify ordering.
2006        ActualAccesses.push_back(MA);
2007        if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2008          // Verify ordering.
2009          ActualDefs.push_back(MA);
2010          // Verify domination.
2011          for (const Use &U : MD->uses())
2012            assert(dominates(MD, U) &&
2013                   "Memory Def does not dominate it's uses");
2014        }
2015#if defined(EXPENSIVE_CHECKS)
2016        // Verify def-uses.
2017        verifyUseInDefs(MA->getDefiningAccess(), MA);
2018#endif
2019      }
2020    }
2021    // Either we hit the assert, really have no accesses, or we have both
2022    // accesses and an access list. Same with defs.
2023    if (!AL && !DL)
2024      continue;
2025    // Verify ordering.
2026    assert(AL->size() == ActualAccesses.size() &&
2027           "We don't have the same number of accesses in the block as on the "
2028           "access list");
2029    assert((DL || ActualDefs.size() == 0) &&
2030           "Either we should have a defs list, or we should have no defs");
2031    assert((!DL || DL->size() == ActualDefs.size()) &&
2032           "We don't have the same number of defs in the block as on the "
2033           "def list");
2034    auto ALI = AL->begin();
2035    auto AAI = ActualAccesses.begin();
2036    while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2037      assert(&*ALI == *AAI && "Not the same accesses in the same order");
2038      ++ALI;
2039      ++AAI;
2040    }
2041    ActualAccesses.clear();
2042    if (DL) {
2043      auto DLI = DL->begin();
2044      auto ADI = ActualDefs.begin();
2045      while (DLI != DL->end() && ADI != ActualDefs.end()) {
2046        assert(&*DLI == *ADI && "Not the same defs in the same order");
2047        ++DLI;
2048        ++ADI;
2049      }
2050    }
2051    ActualDefs.clear();
2052  }
2053#endif
2054}
2055
2056/// Verify the def-use lists in MemorySSA, by verifying that \p Use
2057/// appears in the use list of \p Def.
2058void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2059#ifndef NDEBUG
2060  // The live on entry use may cause us to get a NULL def here
2061  if (!Def)
2062    assert(isLiveOnEntryDef(Use) &&
2063           "Null def but use not point to live on entry def");
2064  else
2065    assert(is_contained(Def->users(), Use) &&
2066           "Did not find use in def's use list");
2067#endif
2068}
2069
2070/// Perform a local numbering on blocks so that instruction ordering can be
2071/// determined in constant time.
2072/// TODO: We currently just number in order.  If we numbered by N, we could
2073/// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2074/// log2(N) sequences of mixed before and after) without needing to invalidate
2075/// the numbering.
2076void MemorySSA::renumberBlock(const BasicBlock *B) const {
2077  // The pre-increment ensures the numbers really start at 1.
2078  unsigned long CurrentNumber = 0;
2079  const AccessList *AL = getBlockAccesses(B);
2080  assert(AL != nullptr && "Asking to renumber an empty block");
2081  for (const auto &I : *AL)
2082    BlockNumbering[&I] = ++CurrentNumber;
2083  BlockNumberingValid.insert(B);
2084}
2085
2086/// Determine, for two memory accesses in the same block,
2087/// whether \p Dominator dominates \p Dominatee.
2088/// \returns True if \p Dominator dominates \p Dominatee.
2089bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2090                                 const MemoryAccess *Dominatee) const {
2091  const BasicBlock *DominatorBlock = Dominator->getBlock();
2092
2093  assert((DominatorBlock == Dominatee->getBlock()) &&
2094         "Asking for local domination when accesses are in different blocks!");
2095  // A node dominates itself.
2096  if (Dominatee == Dominator)
2097    return true;
2098
2099  // When Dominatee is defined on function entry, it is not dominated by another
2100  // memory access.
2101  if (isLiveOnEntryDef(Dominatee))
2102    return false;
2103
2104  // When Dominator is defined on function entry, it dominates the other memory
2105  // access.
2106  if (isLiveOnEntryDef(Dominator))
2107    return true;
2108
2109  if (!BlockNumberingValid.count(DominatorBlock))
2110    renumberBlock(DominatorBlock);
2111
2112  unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2113  // All numbers start with 1
2114  assert(DominatorNum != 0 && "Block was not numbered properly");
2115  unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2116  assert(DominateeNum != 0 && "Block was not numbered properly");
2117  return DominatorNum < DominateeNum;
2118}
2119
2120bool MemorySSA::dominates(const MemoryAccess *Dominator,
2121                          const MemoryAccess *Dominatee) const {
2122  if (Dominator == Dominatee)
2123    return true;
2124
2125  if (isLiveOnEntryDef(Dominatee))
2126    return false;
2127
2128  if (Dominator->getBlock() != Dominatee->getBlock())
2129    return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2130  return locallyDominates(Dominator, Dominatee);
2131}
2132
2133bool MemorySSA::dominates(const MemoryAccess *Dominator,
2134                          const Use &Dominatee) const {
2135  if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2136    BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2137    // The def must dominate the incoming block of the phi.
2138    if (UseBB != Dominator->getBlock())
2139      return DT->dominates(Dominator->getBlock(), UseBB);
2140    // If the UseBB and the DefBB are the same, compare locally.
2141    return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2142  }
2143  // If it's not a PHI node use, the normal dominates can already handle it.
2144  return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2145}
2146
2147const static char LiveOnEntryStr[] = "liveOnEntry";
2148
2149void MemoryAccess::print(raw_ostream &OS) const {
2150  switch (getValueID()) {
2151  case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2152  case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2153  case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2154  }
2155  llvm_unreachable("invalid value id");
2156}
2157
2158void MemoryDef::print(raw_ostream &OS) const {
2159  MemoryAccess *UO = getDefiningAccess();
2160
2161  auto printID = [&OS](MemoryAccess *A) {
2162    if (A && A->getID())
2163      OS << A->getID();
2164    else
2165      OS << LiveOnEntryStr;
2166  };
2167
2168  OS << getID() << " = MemoryDef(";
2169  printID(UO);
2170  OS << ")";
2171
2172  if (isOptimized()) {
2173    OS << "->";
2174    printID(getOptimized());
2175
2176    if (Optional<AliasResult> AR = getOptimizedAccessType())
2177      OS << " " << *AR;
2178  }
2179}
2180
2181void MemoryPhi::print(raw_ostream &OS) const {
2182  bool First = true;
2183  OS << getID() << " = MemoryPhi(";
2184  for (const auto &Op : operands()) {
2185    BasicBlock *BB = getIncomingBlock(Op);
2186    MemoryAccess *MA = cast<MemoryAccess>(Op);
2187    if (!First)
2188      OS << ',';
2189    else
2190      First = false;
2191
2192    OS << '{';
2193    if (BB->hasName())
2194      OS << BB->getName();
2195    else
2196      BB->printAsOperand(OS, false);
2197    OS << ',';
2198    if (unsigned ID = MA->getID())
2199      OS << ID;
2200    else
2201      OS << LiveOnEntryStr;
2202    OS << '}';
2203  }
2204  OS << ')';
2205}
2206
2207void MemoryUse::print(raw_ostream &OS) const {
2208  MemoryAccess *UO = getDefiningAccess();
2209  OS << "MemoryUse(";
2210  if (UO && UO->getID())
2211    OS << UO->getID();
2212  else
2213    OS << LiveOnEntryStr;
2214  OS << ')';
2215
2216  if (Optional<AliasResult> AR = getOptimizedAccessType())
2217    OS << " " << *AR;
2218}
2219
2220void MemoryAccess::dump() const {
2221// Cannot completely remove virtual function even in release mode.
2222#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2223  print(dbgs());
2224  dbgs() << "\n";
2225#endif
2226}
2227
2228char MemorySSAPrinterLegacyPass::ID = 0;
2229
2230MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2231  initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2232}
2233
2234void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2235  AU.setPreservesAll();
2236  AU.addRequired<MemorySSAWrapperPass>();
2237}
2238
2239bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2240  auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2241  MSSA.print(dbgs());
2242  if (VerifyMemorySSA)
2243    MSSA.verifyMemorySSA();
2244  return false;
2245}
2246
2247AnalysisKey MemorySSAAnalysis::Key;
2248
2249MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2250                                                 FunctionAnalysisManager &AM) {
2251  auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2252  auto &AA = AM.getResult<AAManager>(F);
2253  return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2254}
2255
2256bool MemorySSAAnalysis::Result::invalidate(
2257    Function &F, const PreservedAnalyses &PA,
2258    FunctionAnalysisManager::Invalidator &Inv) {
2259  auto PAC = PA.getChecker<MemorySSAAnalysis>();
2260  return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2261         Inv.invalidate<AAManager>(F, PA) ||
2262         Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2263}
2264
2265PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2266                                            FunctionAnalysisManager &AM) {
2267  OS << "MemorySSA for function: " << F.getName() << "\n";
2268  AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2269
2270  return PreservedAnalyses::all();
2271}
2272
2273PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2274                                             FunctionAnalysisManager &AM) {
2275  AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2276
2277  return PreservedAnalyses::all();
2278}
2279
2280char MemorySSAWrapperPass::ID = 0;
2281
2282MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2283  initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2284}
2285
2286void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2287
2288void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2289  AU.setPreservesAll();
2290  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2291  AU.addRequiredTransitive<AAResultsWrapperPass>();
2292}
2293
2294bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2295  auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2296  auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2297  MSSA.reset(new MemorySSA(F, &AA, &DT));
2298  return false;
2299}
2300
2301void MemorySSAWrapperPass::verifyAnalysis() const {
2302  if (VerifyMemorySSA)
2303    MSSA->verifyMemorySSA();
2304}
2305
2306void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2307  MSSA->print(OS);
2308}
2309
2310MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2311
2312/// Walk the use-def chains starting at \p StartingAccess and find
2313/// the MemoryAccess that actually clobbers Loc.
2314///
2315/// \returns our clobbering memory access
2316template <typename AliasAnalysisType>
2317MemoryAccess *
2318MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2319    MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2320    unsigned &UpwardWalkLimit) {
2321  if (isa<MemoryPhi>(StartingAccess))
2322    return StartingAccess;
2323
2324  auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2325  if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2326    return StartingUseOrDef;
2327
2328  Instruction *I = StartingUseOrDef->getMemoryInst();
2329
2330  // Conservatively, fences are always clobbers, so don't perform the walk if we
2331  // hit a fence.
2332  if (!isa<CallBase>(I) && I->isFenceLike())
2333    return StartingUseOrDef;
2334
2335  UpwardsMemoryQuery Q;
2336  Q.OriginalAccess = StartingUseOrDef;
2337  Q.StartingLoc = Loc;
2338  Q.Inst = I;
2339  Q.IsCall = false;
2340
2341  // Unlike the other function, do not walk to the def of a def, because we are
2342  // handed something we already believe is the clobbering access.
2343  // We never set SkipSelf to true in Q in this method.
2344  MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2345                                     ? StartingUseOrDef->getDefiningAccess()
2346                                     : StartingUseOrDef;
2347
2348  MemoryAccess *Clobber =
2349      Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2350  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2351  LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2352  LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2353  LLVM_DEBUG(dbgs() << *Clobber << "\n");
2354  return Clobber;
2355}
2356
2357template <typename AliasAnalysisType>
2358MemoryAccess *
2359MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2360    MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2361  auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2362  // If this is a MemoryPhi, we can't do anything.
2363  if (!StartingAccess)
2364    return MA;
2365
2366  bool IsOptimized = false;
2367
2368  // If this is an already optimized use or def, return the optimized result.
2369  // Note: Currently, we store the optimized def result in a separate field,
2370  // since we can't use the defining access.
2371  if (StartingAccess->isOptimized()) {
2372    if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2373      return StartingAccess->getOptimized();
2374    IsOptimized = true;
2375  }
2376
2377  const Instruction *I = StartingAccess->getMemoryInst();
2378  // We can't sanely do anything with a fence, since they conservatively clobber
2379  // all memory, and have no locations to get pointers from to try to
2380  // disambiguate.
2381  if (!isa<CallBase>(I) && I->isFenceLike())
2382    return StartingAccess;
2383
2384  UpwardsMemoryQuery Q(I, StartingAccess);
2385
2386  if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2387    MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2388    StartingAccess->setOptimized(LiveOnEntry);
2389    StartingAccess->setOptimizedAccessType(None);
2390    return LiveOnEntry;
2391  }
2392
2393  MemoryAccess *OptimizedAccess;
2394  if (!IsOptimized) {
2395    // Start with the thing we already think clobbers this location
2396    MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2397
2398    // At this point, DefiningAccess may be the live on entry def.
2399    // If it is, we will not get a better result.
2400    if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2401      StartingAccess->setOptimized(DefiningAccess);
2402      StartingAccess->setOptimizedAccessType(None);
2403      return DefiningAccess;
2404    }
2405
2406    OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2407    StartingAccess->setOptimized(OptimizedAccess);
2408    if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2409      StartingAccess->setOptimizedAccessType(None);
2410    else if (Q.AR == MustAlias)
2411      StartingAccess->setOptimizedAccessType(MustAlias);
2412  } else
2413    OptimizedAccess = StartingAccess->getOptimized();
2414
2415  LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2416  LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2417  LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2418  LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2419
2420  MemoryAccess *Result;
2421  if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2422      isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2423    assert(isa<MemoryDef>(Q.OriginalAccess));
2424    Q.SkipSelfAccess = true;
2425    Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2426  } else
2427    Result = OptimizedAccess;
2428
2429  LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2430  LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2431
2432  return Result;
2433}
2434
2435MemoryAccess *
2436DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2437  if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2438    return Use->getDefiningAccess();
2439  return MA;
2440}
2441
2442MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2443    MemoryAccess *StartingAccess, const MemoryLocation &) {
2444  if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2445    return Use->getDefiningAccess();
2446  return StartingAccess;
2447}
2448
2449void MemoryPhi::deleteMe(DerivedUser *Self) {
2450  delete static_cast<MemoryPhi *>(Self);
2451}
2452
2453void MemoryDef::deleteMe(DerivedUser *Self) {
2454  delete static_cast<MemoryDef *>(Self);
2455}
2456
2457void MemoryUse::deleteMe(DerivedUser *Self) {
2458  delete static_cast<MemoryUse *>(Self);
2459}
2460