LazyCallGraph.cpp revision 360784
1//===- LazyCallGraph.cpp - Analysis of a Module's call graph --------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Analysis/LazyCallGraph.h"
10#include "llvm/ADT/ArrayRef.h"
11#include "llvm/ADT/STLExtras.h"
12#include "llvm/ADT/ScopeExit.h"
13#include "llvm/ADT/Sequence.h"
14#include "llvm/ADT/SmallPtrSet.h"
15#include "llvm/ADT/SmallVector.h"
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Analysis/TargetLibraryInfo.h"
18#include "llvm/Config/llvm-config.h"
19#include "llvm/IR/CallSite.h"
20#include "llvm/IR/Function.h"
21#include "llvm/IR/GlobalVariable.h"
22#include "llvm/IR/Instruction.h"
23#include "llvm/IR/Module.h"
24#include "llvm/IR/PassManager.h"
25#include "llvm/Support/Casting.h"
26#include "llvm/Support/Compiler.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/GraphWriter.h"
29#include "llvm/Support/raw_ostream.h"
30#include <algorithm>
31#include <cassert>
32#include <cstddef>
33#include <iterator>
34#include <string>
35#include <tuple>
36#include <utility>
37
38using namespace llvm;
39
40#define DEBUG_TYPE "lcg"
41
42void LazyCallGraph::EdgeSequence::insertEdgeInternal(Node &TargetN,
43                                                     Edge::Kind EK) {
44  EdgeIndexMap.insert({&TargetN, Edges.size()});
45  Edges.emplace_back(TargetN, EK);
46}
47
48void LazyCallGraph::EdgeSequence::setEdgeKind(Node &TargetN, Edge::Kind EK) {
49  Edges[EdgeIndexMap.find(&TargetN)->second].setKind(EK);
50}
51
52bool LazyCallGraph::EdgeSequence::removeEdgeInternal(Node &TargetN) {
53  auto IndexMapI = EdgeIndexMap.find(&TargetN);
54  if (IndexMapI == EdgeIndexMap.end())
55    return false;
56
57  Edges[IndexMapI->second] = Edge();
58  EdgeIndexMap.erase(IndexMapI);
59  return true;
60}
61
62static void addEdge(SmallVectorImpl<LazyCallGraph::Edge> &Edges,
63                    DenseMap<LazyCallGraph::Node *, int> &EdgeIndexMap,
64                    LazyCallGraph::Node &N, LazyCallGraph::Edge::Kind EK) {
65  if (!EdgeIndexMap.insert({&N, Edges.size()}).second)
66    return;
67
68  LLVM_DEBUG(dbgs() << "    Added callable function: " << N.getName() << "\n");
69  Edges.emplace_back(LazyCallGraph::Edge(N, EK));
70}
71
72LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() {
73  assert(!Edges && "Must not have already populated the edges for this node!");
74
75  LLVM_DEBUG(dbgs() << "  Adding functions called by '" << getName()
76                    << "' to the graph.\n");
77
78  Edges = EdgeSequence();
79
80  SmallVector<Constant *, 16> Worklist;
81  SmallPtrSet<Function *, 4> Callees;
82  SmallPtrSet<Constant *, 16> Visited;
83
84  // Find all the potential call graph edges in this function. We track both
85  // actual call edges and indirect references to functions. The direct calls
86  // are trivially added, but to accumulate the latter we walk the instructions
87  // and add every operand which is a constant to the worklist to process
88  // afterward.
89  //
90  // Note that we consider *any* function with a definition to be a viable
91  // edge. Even if the function's definition is subject to replacement by
92  // some other module (say, a weak definition) there may still be
93  // optimizations which essentially speculate based on the definition and
94  // a way to check that the specific definition is in fact the one being
95  // used. For example, this could be done by moving the weak definition to
96  // a strong (internal) definition and making the weak definition be an
97  // alias. Then a test of the address of the weak function against the new
98  // strong definition's address would be an effective way to determine the
99  // safety of optimizing a direct call edge.
100  for (BasicBlock &BB : *F)
101    for (Instruction &I : BB) {
102      if (auto CS = CallSite(&I))
103        if (Function *Callee = CS.getCalledFunction())
104          if (!Callee->isDeclaration())
105            if (Callees.insert(Callee).second) {
106              Visited.insert(Callee);
107              addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*Callee),
108                      LazyCallGraph::Edge::Call);
109            }
110
111      for (Value *Op : I.operand_values())
112        if (Constant *C = dyn_cast<Constant>(Op))
113          if (Visited.insert(C).second)
114            Worklist.push_back(C);
115    }
116
117  // We've collected all the constant (and thus potentially function or
118  // function containing) operands to all of the instructions in the function.
119  // Process them (recursively) collecting every function found.
120  visitReferences(Worklist, Visited, [&](Function &F) {
121    addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(F),
122            LazyCallGraph::Edge::Ref);
123  });
124
125  // Add implicit reference edges to any defined libcall functions (if we
126  // haven't found an explicit edge).
127  for (auto *F : G->LibFunctions)
128    if (!Visited.count(F))
129      addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*F),
130              LazyCallGraph::Edge::Ref);
131
132  return *Edges;
133}
134
135void LazyCallGraph::Node::replaceFunction(Function &NewF) {
136  assert(F != &NewF && "Must not replace a function with itself!");
137  F = &NewF;
138}
139
140#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
141LLVM_DUMP_METHOD void LazyCallGraph::Node::dump() const {
142  dbgs() << *this << '\n';
143}
144#endif
145
146static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI) {
147  LibFunc LF;
148
149  // Either this is a normal library function or a "vectorizable" function.
150  return TLI.getLibFunc(F, LF) || TLI.isFunctionVectorizable(F.getName());
151}
152
153LazyCallGraph::LazyCallGraph(
154    Module &M, function_ref<TargetLibraryInfo &(Function &)> GetTLI) {
155  LLVM_DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
156                    << "\n");
157  for (Function &F : M) {
158    if (F.isDeclaration())
159      continue;
160    // If this function is a known lib function to LLVM then we want to
161    // synthesize reference edges to it to model the fact that LLVM can turn
162    // arbitrary code into a library function call.
163    if (isKnownLibFunction(F, GetTLI(F)))
164      LibFunctions.insert(&F);
165
166    if (F.hasLocalLinkage())
167      continue;
168
169    // External linkage defined functions have edges to them from other
170    // modules.
171    LLVM_DEBUG(dbgs() << "  Adding '" << F.getName()
172                      << "' to entry set of the graph.\n");
173    addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref);
174  }
175
176  // Externally visible aliases of internal functions are also viable entry
177  // edges to the module.
178  for (auto &A : M.aliases()) {
179    if (A.hasLocalLinkage())
180      continue;
181    if (Function* F = dyn_cast<Function>(A.getAliasee())) {
182      LLVM_DEBUG(dbgs() << "  Adding '" << F->getName()
183                        << "' with alias '" << A.getName()
184                        << "' to entry set of the graph.\n");
185      addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(*F), Edge::Ref);
186    }
187  }
188
189  // Now add entry nodes for functions reachable via initializers to globals.
190  SmallVector<Constant *, 16> Worklist;
191  SmallPtrSet<Constant *, 16> Visited;
192  for (GlobalVariable &GV : M.globals())
193    if (GV.hasInitializer())
194      if (Visited.insert(GV.getInitializer()).second)
195        Worklist.push_back(GV.getInitializer());
196
197  LLVM_DEBUG(
198      dbgs() << "  Adding functions referenced by global initializers to the "
199                "entry set.\n");
200  visitReferences(Worklist, Visited, [&](Function &F) {
201    addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F),
202            LazyCallGraph::Edge::Ref);
203  });
204}
205
206LazyCallGraph::LazyCallGraph(LazyCallGraph &&G)
207    : BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)),
208      EntryEdges(std::move(G.EntryEdges)), SCCBPA(std::move(G.SCCBPA)),
209      SCCMap(std::move(G.SCCMap)),
210      LibFunctions(std::move(G.LibFunctions)) {
211  updateGraphPtrs();
212}
213
214LazyCallGraph &LazyCallGraph::operator=(LazyCallGraph &&G) {
215  BPA = std::move(G.BPA);
216  NodeMap = std::move(G.NodeMap);
217  EntryEdges = std::move(G.EntryEdges);
218  SCCBPA = std::move(G.SCCBPA);
219  SCCMap = std::move(G.SCCMap);
220  LibFunctions = std::move(G.LibFunctions);
221  updateGraphPtrs();
222  return *this;
223}
224
225#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
226LLVM_DUMP_METHOD void LazyCallGraph::SCC::dump() const {
227  dbgs() << *this << '\n';
228}
229#endif
230
231#ifndef NDEBUG
232void LazyCallGraph::SCC::verify() {
233  assert(OuterRefSCC && "Can't have a null RefSCC!");
234  assert(!Nodes.empty() && "Can't have an empty SCC!");
235
236  for (Node *N : Nodes) {
237    assert(N && "Can't have a null node!");
238    assert(OuterRefSCC->G->lookupSCC(*N) == this &&
239           "Node does not map to this SCC!");
240    assert(N->DFSNumber == -1 &&
241           "Must set DFS numbers to -1 when adding a node to an SCC!");
242    assert(N->LowLink == -1 &&
243           "Must set low link to -1 when adding a node to an SCC!");
244    for (Edge &E : **N)
245      assert(E.getNode().isPopulated() && "Can't have an unpopulated node!");
246  }
247}
248#endif
249
250bool LazyCallGraph::SCC::isParentOf(const SCC &C) const {
251  if (this == &C)
252    return false;
253
254  for (Node &N : *this)
255    for (Edge &E : N->calls())
256      if (OuterRefSCC->G->lookupSCC(E.getNode()) == &C)
257        return true;
258
259  // No edges found.
260  return false;
261}
262
263bool LazyCallGraph::SCC::isAncestorOf(const SCC &TargetC) const {
264  if (this == &TargetC)
265    return false;
266
267  LazyCallGraph &G = *OuterRefSCC->G;
268
269  // Start with this SCC.
270  SmallPtrSet<const SCC *, 16> Visited = {this};
271  SmallVector<const SCC *, 16> Worklist = {this};
272
273  // Walk down the graph until we run out of edges or find a path to TargetC.
274  do {
275    const SCC &C = *Worklist.pop_back_val();
276    for (Node &N : C)
277      for (Edge &E : N->calls()) {
278        SCC *CalleeC = G.lookupSCC(E.getNode());
279        if (!CalleeC)
280          continue;
281
282        // If the callee's SCC is the TargetC, we're done.
283        if (CalleeC == &TargetC)
284          return true;
285
286        // If this is the first time we've reached this SCC, put it on the
287        // worklist to recurse through.
288        if (Visited.insert(CalleeC).second)
289          Worklist.push_back(CalleeC);
290      }
291  } while (!Worklist.empty());
292
293  // No paths found.
294  return false;
295}
296
297LazyCallGraph::RefSCC::RefSCC(LazyCallGraph &G) : G(&G) {}
298
299#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
300LLVM_DUMP_METHOD void LazyCallGraph::RefSCC::dump() const {
301  dbgs() << *this << '\n';
302}
303#endif
304
305#ifndef NDEBUG
306void LazyCallGraph::RefSCC::verify() {
307  assert(G && "Can't have a null graph!");
308  assert(!SCCs.empty() && "Can't have an empty SCC!");
309
310  // Verify basic properties of the SCCs.
311  SmallPtrSet<SCC *, 4> SCCSet;
312  for (SCC *C : SCCs) {
313    assert(C && "Can't have a null SCC!");
314    C->verify();
315    assert(&C->getOuterRefSCC() == this &&
316           "SCC doesn't think it is inside this RefSCC!");
317    bool Inserted = SCCSet.insert(C).second;
318    assert(Inserted && "Found a duplicate SCC!");
319    auto IndexIt = SCCIndices.find(C);
320    assert(IndexIt != SCCIndices.end() &&
321           "Found an SCC that doesn't have an index!");
322  }
323
324  // Check that our indices map correctly.
325  for (auto &SCCIndexPair : SCCIndices) {
326    SCC *C = SCCIndexPair.first;
327    int i = SCCIndexPair.second;
328    assert(C && "Can't have a null SCC in the indices!");
329    assert(SCCSet.count(C) && "Found an index for an SCC not in the RefSCC!");
330    assert(SCCs[i] == C && "Index doesn't point to SCC!");
331  }
332
333  // Check that the SCCs are in fact in post-order.
334  for (int i = 0, Size = SCCs.size(); i < Size; ++i) {
335    SCC &SourceSCC = *SCCs[i];
336    for (Node &N : SourceSCC)
337      for (Edge &E : *N) {
338        if (!E.isCall())
339          continue;
340        SCC &TargetSCC = *G->lookupSCC(E.getNode());
341        if (&TargetSCC.getOuterRefSCC() == this) {
342          assert(SCCIndices.find(&TargetSCC)->second <= i &&
343                 "Edge between SCCs violates post-order relationship.");
344          continue;
345        }
346      }
347  }
348}
349#endif
350
351bool LazyCallGraph::RefSCC::isParentOf(const RefSCC &RC) const {
352  if (&RC == this)
353    return false;
354
355  // Search all edges to see if this is a parent.
356  for (SCC &C : *this)
357    for (Node &N : C)
358      for (Edge &E : *N)
359        if (G->lookupRefSCC(E.getNode()) == &RC)
360          return true;
361
362  return false;
363}
364
365bool LazyCallGraph::RefSCC::isAncestorOf(const RefSCC &RC) const {
366  if (&RC == this)
367    return false;
368
369  // For each descendant of this RefSCC, see if one of its children is the
370  // argument. If not, add that descendant to the worklist and continue
371  // searching.
372  SmallVector<const RefSCC *, 4> Worklist;
373  SmallPtrSet<const RefSCC *, 4> Visited;
374  Worklist.push_back(this);
375  Visited.insert(this);
376  do {
377    const RefSCC &DescendantRC = *Worklist.pop_back_val();
378    for (SCC &C : DescendantRC)
379      for (Node &N : C)
380        for (Edge &E : *N) {
381          auto *ChildRC = G->lookupRefSCC(E.getNode());
382          if (ChildRC == &RC)
383            return true;
384          if (!ChildRC || !Visited.insert(ChildRC).second)
385            continue;
386          Worklist.push_back(ChildRC);
387        }
388  } while (!Worklist.empty());
389
390  return false;
391}
392
393/// Generic helper that updates a postorder sequence of SCCs for a potentially
394/// cycle-introducing edge insertion.
395///
396/// A postorder sequence of SCCs of a directed graph has one fundamental
397/// property: all deges in the DAG of SCCs point "up" the sequence. That is,
398/// all edges in the SCC DAG point to prior SCCs in the sequence.
399///
400/// This routine both updates a postorder sequence and uses that sequence to
401/// compute the set of SCCs connected into a cycle. It should only be called to
402/// insert a "downward" edge which will require changing the sequence to
403/// restore it to a postorder.
404///
405/// When inserting an edge from an earlier SCC to a later SCC in some postorder
406/// sequence, all of the SCCs which may be impacted are in the closed range of
407/// those two within the postorder sequence. The algorithm used here to restore
408/// the state is as follows:
409///
410/// 1) Starting from the source SCC, construct a set of SCCs which reach the
411///    source SCC consisting of just the source SCC. Then scan toward the
412///    target SCC in postorder and for each SCC, if it has an edge to an SCC
413///    in the set, add it to the set. Otherwise, the source SCC is not
414///    a successor, move it in the postorder sequence to immediately before
415///    the source SCC, shifting the source SCC and all SCCs in the set one
416///    position toward the target SCC. Stop scanning after processing the
417///    target SCC.
418/// 2) If the source SCC is now past the target SCC in the postorder sequence,
419///    and thus the new edge will flow toward the start, we are done.
420/// 3) Otherwise, starting from the target SCC, walk all edges which reach an
421///    SCC between the source and the target, and add them to the set of
422///    connected SCCs, then recurse through them. Once a complete set of the
423///    SCCs the target connects to is known, hoist the remaining SCCs between
424///    the source and the target to be above the target. Note that there is no
425///    need to process the source SCC, it is already known to connect.
426/// 4) At this point, all of the SCCs in the closed range between the source
427///    SCC and the target SCC in the postorder sequence are connected,
428///    including the target SCC and the source SCC. Inserting the edge from
429///    the source SCC to the target SCC will form a cycle out of precisely
430///    these SCCs. Thus we can merge all of the SCCs in this closed range into
431///    a single SCC.
432///
433/// This process has various important properties:
434/// - Only mutates the SCCs when adding the edge actually changes the SCC
435///   structure.
436/// - Never mutates SCCs which are unaffected by the change.
437/// - Updates the postorder sequence to correctly satisfy the postorder
438///   constraint after the edge is inserted.
439/// - Only reorders SCCs in the closed postorder sequence from the source to
440///   the target, so easy to bound how much has changed even in the ordering.
441/// - Big-O is the number of edges in the closed postorder range of SCCs from
442///   source to target.
443///
444/// This helper routine, in addition to updating the postorder sequence itself
445/// will also update a map from SCCs to indices within that sequence.
446///
447/// The sequence and the map must operate on pointers to the SCC type.
448///
449/// Two callbacks must be provided. The first computes the subset of SCCs in
450/// the postorder closed range from the source to the target which connect to
451/// the source SCC via some (transitive) set of edges. The second computes the
452/// subset of the same range which the target SCC connects to via some
453/// (transitive) set of edges. Both callbacks should populate the set argument
454/// provided.
455template <typename SCCT, typename PostorderSequenceT, typename SCCIndexMapT,
456          typename ComputeSourceConnectedSetCallableT,
457          typename ComputeTargetConnectedSetCallableT>
458static iterator_range<typename PostorderSequenceT::iterator>
459updatePostorderSequenceForEdgeInsertion(
460    SCCT &SourceSCC, SCCT &TargetSCC, PostorderSequenceT &SCCs,
461    SCCIndexMapT &SCCIndices,
462    ComputeSourceConnectedSetCallableT ComputeSourceConnectedSet,
463    ComputeTargetConnectedSetCallableT ComputeTargetConnectedSet) {
464  int SourceIdx = SCCIndices[&SourceSCC];
465  int TargetIdx = SCCIndices[&TargetSCC];
466  assert(SourceIdx < TargetIdx && "Cannot have equal indices here!");
467
468  SmallPtrSet<SCCT *, 4> ConnectedSet;
469
470  // Compute the SCCs which (transitively) reach the source.
471  ComputeSourceConnectedSet(ConnectedSet);
472
473  // Partition the SCCs in this part of the port-order sequence so only SCCs
474  // connecting to the source remain between it and the target. This is
475  // a benign partition as it preserves postorder.
476  auto SourceI = std::stable_partition(
477      SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx + 1,
478      [&ConnectedSet](SCCT *C) { return !ConnectedSet.count(C); });
479  for (int i = SourceIdx, e = TargetIdx + 1; i < e; ++i)
480    SCCIndices.find(SCCs[i])->second = i;
481
482  // If the target doesn't connect to the source, then we've corrected the
483  // post-order and there are no cycles formed.
484  if (!ConnectedSet.count(&TargetSCC)) {
485    assert(SourceI > (SCCs.begin() + SourceIdx) &&
486           "Must have moved the source to fix the post-order.");
487    assert(*std::prev(SourceI) == &TargetSCC &&
488           "Last SCC to move should have bene the target.");
489
490    // Return an empty range at the target SCC indicating there is nothing to
491    // merge.
492    return make_range(std::prev(SourceI), std::prev(SourceI));
493  }
494
495  assert(SCCs[TargetIdx] == &TargetSCC &&
496         "Should not have moved target if connected!");
497  SourceIdx = SourceI - SCCs.begin();
498  assert(SCCs[SourceIdx] == &SourceSCC &&
499         "Bad updated index computation for the source SCC!");
500
501
502  // See whether there are any remaining intervening SCCs between the source
503  // and target. If so we need to make sure they all are reachable form the
504  // target.
505  if (SourceIdx + 1 < TargetIdx) {
506    ConnectedSet.clear();
507    ComputeTargetConnectedSet(ConnectedSet);
508
509    // Partition SCCs so that only SCCs reached from the target remain between
510    // the source and the target. This preserves postorder.
511    auto TargetI = std::stable_partition(
512        SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1,
513        [&ConnectedSet](SCCT *C) { return ConnectedSet.count(C); });
514    for (int i = SourceIdx + 1, e = TargetIdx + 1; i < e; ++i)
515      SCCIndices.find(SCCs[i])->second = i;
516    TargetIdx = std::prev(TargetI) - SCCs.begin();
517    assert(SCCs[TargetIdx] == &TargetSCC &&
518           "Should always end with the target!");
519  }
520
521  // At this point, we know that connecting source to target forms a cycle
522  // because target connects back to source, and we know that all of the SCCs
523  // between the source and target in the postorder sequence participate in that
524  // cycle.
525  return make_range(SCCs.begin() + SourceIdx, SCCs.begin() + TargetIdx);
526}
527
528bool
529LazyCallGraph::RefSCC::switchInternalEdgeToCall(
530    Node &SourceN, Node &TargetN,
531    function_ref<void(ArrayRef<SCC *> MergeSCCs)> MergeCB) {
532  assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
533  SmallVector<SCC *, 1> DeletedSCCs;
534
535#ifndef NDEBUG
536  // In a debug build, verify the RefSCC is valid to start with and when this
537  // routine finishes.
538  verify();
539  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
540#endif
541
542  SCC &SourceSCC = *G->lookupSCC(SourceN);
543  SCC &TargetSCC = *G->lookupSCC(TargetN);
544
545  // If the two nodes are already part of the same SCC, we're also done as
546  // we've just added more connectivity.
547  if (&SourceSCC == &TargetSCC) {
548    SourceN->setEdgeKind(TargetN, Edge::Call);
549    return false; // No new cycle.
550  }
551
552  // At this point we leverage the postorder list of SCCs to detect when the
553  // insertion of an edge changes the SCC structure in any way.
554  //
555  // First and foremost, we can eliminate the need for any changes when the
556  // edge is toward the beginning of the postorder sequence because all edges
557  // flow in that direction already. Thus adding a new one cannot form a cycle.
558  int SourceIdx = SCCIndices[&SourceSCC];
559  int TargetIdx = SCCIndices[&TargetSCC];
560  if (TargetIdx < SourceIdx) {
561    SourceN->setEdgeKind(TargetN, Edge::Call);
562    return false; // No new cycle.
563  }
564
565  // Compute the SCCs which (transitively) reach the source.
566  auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
567#ifndef NDEBUG
568    // Check that the RefSCC is still valid before computing this as the
569    // results will be nonsensical of we've broken its invariants.
570    verify();
571#endif
572    ConnectedSet.insert(&SourceSCC);
573    auto IsConnected = [&](SCC &C) {
574      for (Node &N : C)
575        for (Edge &E : N->calls())
576          if (ConnectedSet.count(G->lookupSCC(E.getNode())))
577            return true;
578
579      return false;
580    };
581
582    for (SCC *C :
583         make_range(SCCs.begin() + SourceIdx + 1, SCCs.begin() + TargetIdx + 1))
584      if (IsConnected(*C))
585        ConnectedSet.insert(C);
586  };
587
588  // Use a normal worklist to find which SCCs the target connects to. We still
589  // bound the search based on the range in the postorder list we care about,
590  // but because this is forward connectivity we just "recurse" through the
591  // edges.
592  auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<SCC *> &ConnectedSet) {
593#ifndef NDEBUG
594    // Check that the RefSCC is still valid before computing this as the
595    // results will be nonsensical of we've broken its invariants.
596    verify();
597#endif
598    ConnectedSet.insert(&TargetSCC);
599    SmallVector<SCC *, 4> Worklist;
600    Worklist.push_back(&TargetSCC);
601    do {
602      SCC &C = *Worklist.pop_back_val();
603      for (Node &N : C)
604        for (Edge &E : *N) {
605          if (!E.isCall())
606            continue;
607          SCC &EdgeC = *G->lookupSCC(E.getNode());
608          if (&EdgeC.getOuterRefSCC() != this)
609            // Not in this RefSCC...
610            continue;
611          if (SCCIndices.find(&EdgeC)->second <= SourceIdx)
612            // Not in the postorder sequence between source and target.
613            continue;
614
615          if (ConnectedSet.insert(&EdgeC).second)
616            Worklist.push_back(&EdgeC);
617        }
618    } while (!Worklist.empty());
619  };
620
621  // Use a generic helper to update the postorder sequence of SCCs and return
622  // a range of any SCCs connected into a cycle by inserting this edge. This
623  // routine will also take care of updating the indices into the postorder
624  // sequence.
625  auto MergeRange = updatePostorderSequenceForEdgeInsertion(
626      SourceSCC, TargetSCC, SCCs, SCCIndices, ComputeSourceConnectedSet,
627      ComputeTargetConnectedSet);
628
629  // Run the user's callback on the merged SCCs before we actually merge them.
630  if (MergeCB)
631    MergeCB(makeArrayRef(MergeRange.begin(), MergeRange.end()));
632
633  // If the merge range is empty, then adding the edge didn't actually form any
634  // new cycles. We're done.
635  if (MergeRange.empty()) {
636    // Now that the SCC structure is finalized, flip the kind to call.
637    SourceN->setEdgeKind(TargetN, Edge::Call);
638    return false; // No new cycle.
639  }
640
641#ifndef NDEBUG
642  // Before merging, check that the RefSCC remains valid after all the
643  // postorder updates.
644  verify();
645#endif
646
647  // Otherwise we need to merge all of the SCCs in the cycle into a single
648  // result SCC.
649  //
650  // NB: We merge into the target because all of these functions were already
651  // reachable from the target, meaning any SCC-wide properties deduced about it
652  // other than the set of functions within it will not have changed.
653  for (SCC *C : MergeRange) {
654    assert(C != &TargetSCC &&
655           "We merge *into* the target and shouldn't process it here!");
656    SCCIndices.erase(C);
657    TargetSCC.Nodes.append(C->Nodes.begin(), C->Nodes.end());
658    for (Node *N : C->Nodes)
659      G->SCCMap[N] = &TargetSCC;
660    C->clear();
661    DeletedSCCs.push_back(C);
662  }
663
664  // Erase the merged SCCs from the list and update the indices of the
665  // remaining SCCs.
666  int IndexOffset = MergeRange.end() - MergeRange.begin();
667  auto EraseEnd = SCCs.erase(MergeRange.begin(), MergeRange.end());
668  for (SCC *C : make_range(EraseEnd, SCCs.end()))
669    SCCIndices[C] -= IndexOffset;
670
671  // Now that the SCC structure is finalized, flip the kind to call.
672  SourceN->setEdgeKind(TargetN, Edge::Call);
673
674  // And we're done, but we did form a new cycle.
675  return true;
676}
677
678void LazyCallGraph::RefSCC::switchTrivialInternalEdgeToRef(Node &SourceN,
679                                                           Node &TargetN) {
680  assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
681
682#ifndef NDEBUG
683  // In a debug build, verify the RefSCC is valid to start with and when this
684  // routine finishes.
685  verify();
686  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
687#endif
688
689  assert(G->lookupRefSCC(SourceN) == this &&
690         "Source must be in this RefSCC.");
691  assert(G->lookupRefSCC(TargetN) == this &&
692         "Target must be in this RefSCC.");
693  assert(G->lookupSCC(SourceN) != G->lookupSCC(TargetN) &&
694         "Source and Target must be in separate SCCs for this to be trivial!");
695
696  // Set the edge kind.
697  SourceN->setEdgeKind(TargetN, Edge::Ref);
698}
699
700iterator_range<LazyCallGraph::RefSCC::iterator>
701LazyCallGraph::RefSCC::switchInternalEdgeToRef(Node &SourceN, Node &TargetN) {
702  assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
703
704#ifndef NDEBUG
705  // In a debug build, verify the RefSCC is valid to start with and when this
706  // routine finishes.
707  verify();
708  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
709#endif
710
711  assert(G->lookupRefSCC(SourceN) == this &&
712         "Source must be in this RefSCC.");
713  assert(G->lookupRefSCC(TargetN) == this &&
714         "Target must be in this RefSCC.");
715
716  SCC &TargetSCC = *G->lookupSCC(TargetN);
717  assert(G->lookupSCC(SourceN) == &TargetSCC && "Source and Target must be in "
718                                                "the same SCC to require the "
719                                                "full CG update.");
720
721  // Set the edge kind.
722  SourceN->setEdgeKind(TargetN, Edge::Ref);
723
724  // Otherwise we are removing a call edge from a single SCC. This may break
725  // the cycle. In order to compute the new set of SCCs, we need to do a small
726  // DFS over the nodes within the SCC to form any sub-cycles that remain as
727  // distinct SCCs and compute a postorder over the resulting SCCs.
728  //
729  // However, we specially handle the target node. The target node is known to
730  // reach all other nodes in the original SCC by definition. This means that
731  // we want the old SCC to be replaced with an SCC containing that node as it
732  // will be the root of whatever SCC DAG results from the DFS. Assumptions
733  // about an SCC such as the set of functions called will continue to hold,
734  // etc.
735
736  SCC &OldSCC = TargetSCC;
737  SmallVector<std::pair<Node *, EdgeSequence::call_iterator>, 16> DFSStack;
738  SmallVector<Node *, 16> PendingSCCStack;
739  SmallVector<SCC *, 4> NewSCCs;
740
741  // Prepare the nodes for a fresh DFS.
742  SmallVector<Node *, 16> Worklist;
743  Worklist.swap(OldSCC.Nodes);
744  for (Node *N : Worklist) {
745    N->DFSNumber = N->LowLink = 0;
746    G->SCCMap.erase(N);
747  }
748
749  // Force the target node to be in the old SCC. This also enables us to take
750  // a very significant short-cut in the standard Tarjan walk to re-form SCCs
751  // below: whenever we build an edge that reaches the target node, we know
752  // that the target node eventually connects back to all other nodes in our
753  // walk. As a consequence, we can detect and handle participants in that
754  // cycle without walking all the edges that form this connection, and instead
755  // by relying on the fundamental guarantee coming into this operation (all
756  // nodes are reachable from the target due to previously forming an SCC).
757  TargetN.DFSNumber = TargetN.LowLink = -1;
758  OldSCC.Nodes.push_back(&TargetN);
759  G->SCCMap[&TargetN] = &OldSCC;
760
761  // Scan down the stack and DFS across the call edges.
762  for (Node *RootN : Worklist) {
763    assert(DFSStack.empty() &&
764           "Cannot begin a new root with a non-empty DFS stack!");
765    assert(PendingSCCStack.empty() &&
766           "Cannot begin a new root with pending nodes for an SCC!");
767
768    // Skip any nodes we've already reached in the DFS.
769    if (RootN->DFSNumber != 0) {
770      assert(RootN->DFSNumber == -1 &&
771             "Shouldn't have any mid-DFS root nodes!");
772      continue;
773    }
774
775    RootN->DFSNumber = RootN->LowLink = 1;
776    int NextDFSNumber = 2;
777
778    DFSStack.push_back({RootN, (*RootN)->call_begin()});
779    do {
780      Node *N;
781      EdgeSequence::call_iterator I;
782      std::tie(N, I) = DFSStack.pop_back_val();
783      auto E = (*N)->call_end();
784      while (I != E) {
785        Node &ChildN = I->getNode();
786        if (ChildN.DFSNumber == 0) {
787          // We haven't yet visited this child, so descend, pushing the current
788          // node onto the stack.
789          DFSStack.push_back({N, I});
790
791          assert(!G->SCCMap.count(&ChildN) &&
792                 "Found a node with 0 DFS number but already in an SCC!");
793          ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
794          N = &ChildN;
795          I = (*N)->call_begin();
796          E = (*N)->call_end();
797          continue;
798        }
799
800        // Check for the child already being part of some component.
801        if (ChildN.DFSNumber == -1) {
802          if (G->lookupSCC(ChildN) == &OldSCC) {
803            // If the child is part of the old SCC, we know that it can reach
804            // every other node, so we have formed a cycle. Pull the entire DFS
805            // and pending stacks into it. See the comment above about setting
806            // up the old SCC for why we do this.
807            int OldSize = OldSCC.size();
808            OldSCC.Nodes.push_back(N);
809            OldSCC.Nodes.append(PendingSCCStack.begin(), PendingSCCStack.end());
810            PendingSCCStack.clear();
811            while (!DFSStack.empty())
812              OldSCC.Nodes.push_back(DFSStack.pop_back_val().first);
813            for (Node &N : make_range(OldSCC.begin() + OldSize, OldSCC.end())) {
814              N.DFSNumber = N.LowLink = -1;
815              G->SCCMap[&N] = &OldSCC;
816            }
817            N = nullptr;
818            break;
819          }
820
821          // If the child has already been added to some child component, it
822          // couldn't impact the low-link of this parent because it isn't
823          // connected, and thus its low-link isn't relevant so skip it.
824          ++I;
825          continue;
826        }
827
828        // Track the lowest linked child as the lowest link for this node.
829        assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
830        if (ChildN.LowLink < N->LowLink)
831          N->LowLink = ChildN.LowLink;
832
833        // Move to the next edge.
834        ++I;
835      }
836      if (!N)
837        // Cleared the DFS early, start another round.
838        break;
839
840      // We've finished processing N and its descendants, put it on our pending
841      // SCC stack to eventually get merged into an SCC of nodes.
842      PendingSCCStack.push_back(N);
843
844      // If this node is linked to some lower entry, continue walking up the
845      // stack.
846      if (N->LowLink != N->DFSNumber)
847        continue;
848
849      // Otherwise, we've completed an SCC. Append it to our post order list of
850      // SCCs.
851      int RootDFSNumber = N->DFSNumber;
852      // Find the range of the node stack by walking down until we pass the
853      // root DFS number.
854      auto SCCNodes = make_range(
855          PendingSCCStack.rbegin(),
856          find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
857            return N->DFSNumber < RootDFSNumber;
858          }));
859
860      // Form a new SCC out of these nodes and then clear them off our pending
861      // stack.
862      NewSCCs.push_back(G->createSCC(*this, SCCNodes));
863      for (Node &N : *NewSCCs.back()) {
864        N.DFSNumber = N.LowLink = -1;
865        G->SCCMap[&N] = NewSCCs.back();
866      }
867      PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
868    } while (!DFSStack.empty());
869  }
870
871  // Insert the remaining SCCs before the old one. The old SCC can reach all
872  // other SCCs we form because it contains the target node of the removed edge
873  // of the old SCC. This means that we will have edges into all of the new
874  // SCCs, which means the old one must come last for postorder.
875  int OldIdx = SCCIndices[&OldSCC];
876  SCCs.insert(SCCs.begin() + OldIdx, NewSCCs.begin(), NewSCCs.end());
877
878  // Update the mapping from SCC* to index to use the new SCC*s, and remove the
879  // old SCC from the mapping.
880  for (int Idx = OldIdx, Size = SCCs.size(); Idx < Size; ++Idx)
881    SCCIndices[SCCs[Idx]] = Idx;
882
883  return make_range(SCCs.begin() + OldIdx,
884                    SCCs.begin() + OldIdx + NewSCCs.size());
885}
886
887void LazyCallGraph::RefSCC::switchOutgoingEdgeToCall(Node &SourceN,
888                                                     Node &TargetN) {
889  assert(!(*SourceN)[TargetN].isCall() && "Must start with a ref edge!");
890
891  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
892  assert(G->lookupRefSCC(TargetN) != this &&
893         "Target must not be in this RefSCC.");
894#ifdef EXPENSIVE_CHECKS
895  assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
896         "Target must be a descendant of the Source.");
897#endif
898
899  // Edges between RefSCCs are the same regardless of call or ref, so we can
900  // just flip the edge here.
901  SourceN->setEdgeKind(TargetN, Edge::Call);
902
903#ifndef NDEBUG
904  // Check that the RefSCC is still valid.
905  verify();
906#endif
907}
908
909void LazyCallGraph::RefSCC::switchOutgoingEdgeToRef(Node &SourceN,
910                                                    Node &TargetN) {
911  assert((*SourceN)[TargetN].isCall() && "Must start with a call edge!");
912
913  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
914  assert(G->lookupRefSCC(TargetN) != this &&
915         "Target must not be in this RefSCC.");
916#ifdef EXPENSIVE_CHECKS
917  assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
918         "Target must be a descendant of the Source.");
919#endif
920
921  // Edges between RefSCCs are the same regardless of call or ref, so we can
922  // just flip the edge here.
923  SourceN->setEdgeKind(TargetN, Edge::Ref);
924
925#ifndef NDEBUG
926  // Check that the RefSCC is still valid.
927  verify();
928#endif
929}
930
931void LazyCallGraph::RefSCC::insertInternalRefEdge(Node &SourceN,
932                                                  Node &TargetN) {
933  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
934  assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
935
936  SourceN->insertEdgeInternal(TargetN, Edge::Ref);
937
938#ifndef NDEBUG
939  // Check that the RefSCC is still valid.
940  verify();
941#endif
942}
943
944void LazyCallGraph::RefSCC::insertOutgoingEdge(Node &SourceN, Node &TargetN,
945                                               Edge::Kind EK) {
946  // First insert it into the caller.
947  SourceN->insertEdgeInternal(TargetN, EK);
948
949  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
950
951  assert(G->lookupRefSCC(TargetN) != this &&
952         "Target must not be in this RefSCC.");
953#ifdef EXPENSIVE_CHECKS
954  assert(G->lookupRefSCC(TargetN)->isDescendantOf(*this) &&
955         "Target must be a descendant of the Source.");
956#endif
957
958#ifndef NDEBUG
959  // Check that the RefSCC is still valid.
960  verify();
961#endif
962}
963
964SmallVector<LazyCallGraph::RefSCC *, 1>
965LazyCallGraph::RefSCC::insertIncomingRefEdge(Node &SourceN, Node &TargetN) {
966  assert(G->lookupRefSCC(TargetN) == this && "Target must be in this RefSCC.");
967  RefSCC &SourceC = *G->lookupRefSCC(SourceN);
968  assert(&SourceC != this && "Source must not be in this RefSCC.");
969#ifdef EXPENSIVE_CHECKS
970  assert(SourceC.isDescendantOf(*this) &&
971         "Source must be a descendant of the Target.");
972#endif
973
974  SmallVector<RefSCC *, 1> DeletedRefSCCs;
975
976#ifndef NDEBUG
977  // In a debug build, verify the RefSCC is valid to start with and when this
978  // routine finishes.
979  verify();
980  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
981#endif
982
983  int SourceIdx = G->RefSCCIndices[&SourceC];
984  int TargetIdx = G->RefSCCIndices[this];
985  assert(SourceIdx < TargetIdx &&
986         "Postorder list doesn't see edge as incoming!");
987
988  // Compute the RefSCCs which (transitively) reach the source. We do this by
989  // working backwards from the source using the parent set in each RefSCC,
990  // skipping any RefSCCs that don't fall in the postorder range. This has the
991  // advantage of walking the sparser parent edge (in high fan-out graphs) but
992  // more importantly this removes examining all forward edges in all RefSCCs
993  // within the postorder range which aren't in fact connected. Only connected
994  // RefSCCs (and their edges) are visited here.
995  auto ComputeSourceConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
996    Set.insert(&SourceC);
997    auto IsConnected = [&](RefSCC &RC) {
998      for (SCC &C : RC)
999        for (Node &N : C)
1000          for (Edge &E : *N)
1001            if (Set.count(G->lookupRefSCC(E.getNode())))
1002              return true;
1003
1004      return false;
1005    };
1006
1007    for (RefSCC *C : make_range(G->PostOrderRefSCCs.begin() + SourceIdx + 1,
1008                                G->PostOrderRefSCCs.begin() + TargetIdx + 1))
1009      if (IsConnected(*C))
1010        Set.insert(C);
1011  };
1012
1013  // Use a normal worklist to find which SCCs the target connects to. We still
1014  // bound the search based on the range in the postorder list we care about,
1015  // but because this is forward connectivity we just "recurse" through the
1016  // edges.
1017  auto ComputeTargetConnectedSet = [&](SmallPtrSetImpl<RefSCC *> &Set) {
1018    Set.insert(this);
1019    SmallVector<RefSCC *, 4> Worklist;
1020    Worklist.push_back(this);
1021    do {
1022      RefSCC &RC = *Worklist.pop_back_val();
1023      for (SCC &C : RC)
1024        for (Node &N : C)
1025          for (Edge &E : *N) {
1026            RefSCC &EdgeRC = *G->lookupRefSCC(E.getNode());
1027            if (G->getRefSCCIndex(EdgeRC) <= SourceIdx)
1028              // Not in the postorder sequence between source and target.
1029              continue;
1030
1031            if (Set.insert(&EdgeRC).second)
1032              Worklist.push_back(&EdgeRC);
1033          }
1034    } while (!Worklist.empty());
1035  };
1036
1037  // Use a generic helper to update the postorder sequence of RefSCCs and return
1038  // a range of any RefSCCs connected into a cycle by inserting this edge. This
1039  // routine will also take care of updating the indices into the postorder
1040  // sequence.
1041  iterator_range<SmallVectorImpl<RefSCC *>::iterator> MergeRange =
1042      updatePostorderSequenceForEdgeInsertion(
1043          SourceC, *this, G->PostOrderRefSCCs, G->RefSCCIndices,
1044          ComputeSourceConnectedSet, ComputeTargetConnectedSet);
1045
1046  // Build a set so we can do fast tests for whether a RefSCC will end up as
1047  // part of the merged RefSCC.
1048  SmallPtrSet<RefSCC *, 16> MergeSet(MergeRange.begin(), MergeRange.end());
1049
1050  // This RefSCC will always be part of that set, so just insert it here.
1051  MergeSet.insert(this);
1052
1053  // Now that we have identified all of the SCCs which need to be merged into
1054  // a connected set with the inserted edge, merge all of them into this SCC.
1055  SmallVector<SCC *, 16> MergedSCCs;
1056  int SCCIndex = 0;
1057  for (RefSCC *RC : MergeRange) {
1058    assert(RC != this && "We're merging into the target RefSCC, so it "
1059                         "shouldn't be in the range.");
1060
1061    // Walk the inner SCCs to update their up-pointer and walk all the edges to
1062    // update any parent sets.
1063    // FIXME: We should try to find a way to avoid this (rather expensive) edge
1064    // walk by updating the parent sets in some other manner.
1065    for (SCC &InnerC : *RC) {
1066      InnerC.OuterRefSCC = this;
1067      SCCIndices[&InnerC] = SCCIndex++;
1068      for (Node &N : InnerC)
1069        G->SCCMap[&N] = &InnerC;
1070    }
1071
1072    // Now merge in the SCCs. We can actually move here so try to reuse storage
1073    // the first time through.
1074    if (MergedSCCs.empty())
1075      MergedSCCs = std::move(RC->SCCs);
1076    else
1077      MergedSCCs.append(RC->SCCs.begin(), RC->SCCs.end());
1078    RC->SCCs.clear();
1079    DeletedRefSCCs.push_back(RC);
1080  }
1081
1082  // Append our original SCCs to the merged list and move it into place.
1083  for (SCC &InnerC : *this)
1084    SCCIndices[&InnerC] = SCCIndex++;
1085  MergedSCCs.append(SCCs.begin(), SCCs.end());
1086  SCCs = std::move(MergedSCCs);
1087
1088  // Remove the merged away RefSCCs from the post order sequence.
1089  for (RefSCC *RC : MergeRange)
1090    G->RefSCCIndices.erase(RC);
1091  int IndexOffset = MergeRange.end() - MergeRange.begin();
1092  auto EraseEnd =
1093      G->PostOrderRefSCCs.erase(MergeRange.begin(), MergeRange.end());
1094  for (RefSCC *RC : make_range(EraseEnd, G->PostOrderRefSCCs.end()))
1095    G->RefSCCIndices[RC] -= IndexOffset;
1096
1097  // At this point we have a merged RefSCC with a post-order SCCs list, just
1098  // connect the nodes to form the new edge.
1099  SourceN->insertEdgeInternal(TargetN, Edge::Ref);
1100
1101  // We return the list of SCCs which were merged so that callers can
1102  // invalidate any data they have associated with those SCCs. Note that these
1103  // SCCs are no longer in an interesting state (they are totally empty) but
1104  // the pointers will remain stable for the life of the graph itself.
1105  return DeletedRefSCCs;
1106}
1107
1108void LazyCallGraph::RefSCC::removeOutgoingEdge(Node &SourceN, Node &TargetN) {
1109  assert(G->lookupRefSCC(SourceN) == this &&
1110         "The source must be a member of this RefSCC.");
1111  assert(G->lookupRefSCC(TargetN) != this &&
1112         "The target must not be a member of this RefSCC");
1113
1114#ifndef NDEBUG
1115  // In a debug build, verify the RefSCC is valid to start with and when this
1116  // routine finishes.
1117  verify();
1118  auto VerifyOnExit = make_scope_exit([&]() { verify(); });
1119#endif
1120
1121  // First remove it from the node.
1122  bool Removed = SourceN->removeEdgeInternal(TargetN);
1123  (void)Removed;
1124  assert(Removed && "Target not in the edge set for this caller?");
1125}
1126
1127SmallVector<LazyCallGraph::RefSCC *, 1>
1128LazyCallGraph::RefSCC::removeInternalRefEdge(Node &SourceN,
1129                                             ArrayRef<Node *> TargetNs) {
1130  // We return a list of the resulting *new* RefSCCs in post-order.
1131  SmallVector<RefSCC *, 1> Result;
1132
1133#ifndef NDEBUG
1134  // In a debug build, verify the RefSCC is valid to start with and that either
1135  // we return an empty list of result RefSCCs and this RefSCC remains valid,
1136  // or we return new RefSCCs and this RefSCC is dead.
1137  verify();
1138  auto VerifyOnExit = make_scope_exit([&]() {
1139    // If we didn't replace our RefSCC with new ones, check that this one
1140    // remains valid.
1141    if (G)
1142      verify();
1143  });
1144#endif
1145
1146  // First remove the actual edges.
1147  for (Node *TargetN : TargetNs) {
1148    assert(!(*SourceN)[*TargetN].isCall() &&
1149           "Cannot remove a call edge, it must first be made a ref edge");
1150
1151    bool Removed = SourceN->removeEdgeInternal(*TargetN);
1152    (void)Removed;
1153    assert(Removed && "Target not in the edge set for this caller?");
1154  }
1155
1156  // Direct self references don't impact the ref graph at all.
1157  if (llvm::all_of(TargetNs,
1158                   [&](Node *TargetN) { return &SourceN == TargetN; }))
1159    return Result;
1160
1161  // If all targets are in the same SCC as the source, because no call edges
1162  // were removed there is no RefSCC structure change.
1163  SCC &SourceC = *G->lookupSCC(SourceN);
1164  if (llvm::all_of(TargetNs, [&](Node *TargetN) {
1165        return G->lookupSCC(*TargetN) == &SourceC;
1166      }))
1167    return Result;
1168
1169  // We build somewhat synthetic new RefSCCs by providing a postorder mapping
1170  // for each inner SCC. We store these inside the low-link field of the nodes
1171  // rather than associated with SCCs because this saves a round-trip through
1172  // the node->SCC map and in the common case, SCCs are small. We will verify
1173  // that we always give the same number to every node in the SCC such that
1174  // these are equivalent.
1175  int PostOrderNumber = 0;
1176
1177  // Reset all the other nodes to prepare for a DFS over them, and add them to
1178  // our worklist.
1179  SmallVector<Node *, 8> Worklist;
1180  for (SCC *C : SCCs) {
1181    for (Node &N : *C)
1182      N.DFSNumber = N.LowLink = 0;
1183
1184    Worklist.append(C->Nodes.begin(), C->Nodes.end());
1185  }
1186
1187  // Track the number of nodes in this RefSCC so that we can quickly recognize
1188  // an important special case of the edge removal not breaking the cycle of
1189  // this RefSCC.
1190  const int NumRefSCCNodes = Worklist.size();
1191
1192  SmallVector<std::pair<Node *, EdgeSequence::iterator>, 4> DFSStack;
1193  SmallVector<Node *, 4> PendingRefSCCStack;
1194  do {
1195    assert(DFSStack.empty() &&
1196           "Cannot begin a new root with a non-empty DFS stack!");
1197    assert(PendingRefSCCStack.empty() &&
1198           "Cannot begin a new root with pending nodes for an SCC!");
1199
1200    Node *RootN = Worklist.pop_back_val();
1201    // Skip any nodes we've already reached in the DFS.
1202    if (RootN->DFSNumber != 0) {
1203      assert(RootN->DFSNumber == -1 &&
1204             "Shouldn't have any mid-DFS root nodes!");
1205      continue;
1206    }
1207
1208    RootN->DFSNumber = RootN->LowLink = 1;
1209    int NextDFSNumber = 2;
1210
1211    DFSStack.push_back({RootN, (*RootN)->begin()});
1212    do {
1213      Node *N;
1214      EdgeSequence::iterator I;
1215      std::tie(N, I) = DFSStack.pop_back_val();
1216      auto E = (*N)->end();
1217
1218      assert(N->DFSNumber != 0 && "We should always assign a DFS number "
1219                                  "before processing a node.");
1220
1221      while (I != E) {
1222        Node &ChildN = I->getNode();
1223        if (ChildN.DFSNumber == 0) {
1224          // Mark that we should start at this child when next this node is the
1225          // top of the stack. We don't start at the next child to ensure this
1226          // child's lowlink is reflected.
1227          DFSStack.push_back({N, I});
1228
1229          // Continue, resetting to the child node.
1230          ChildN.LowLink = ChildN.DFSNumber = NextDFSNumber++;
1231          N = &ChildN;
1232          I = ChildN->begin();
1233          E = ChildN->end();
1234          continue;
1235        }
1236        if (ChildN.DFSNumber == -1) {
1237          // If this child isn't currently in this RefSCC, no need to process
1238          // it.
1239          ++I;
1240          continue;
1241        }
1242
1243        // Track the lowest link of the children, if any are still in the stack.
1244        // Any child not on the stack will have a LowLink of -1.
1245        assert(ChildN.LowLink != 0 &&
1246               "Low-link must not be zero with a non-zero DFS number.");
1247        if (ChildN.LowLink >= 0 && ChildN.LowLink < N->LowLink)
1248          N->LowLink = ChildN.LowLink;
1249        ++I;
1250      }
1251
1252      // We've finished processing N and its descendants, put it on our pending
1253      // stack to eventually get merged into a RefSCC.
1254      PendingRefSCCStack.push_back(N);
1255
1256      // If this node is linked to some lower entry, continue walking up the
1257      // stack.
1258      if (N->LowLink != N->DFSNumber) {
1259        assert(!DFSStack.empty() &&
1260               "We never found a viable root for a RefSCC to pop off!");
1261        continue;
1262      }
1263
1264      // Otherwise, form a new RefSCC from the top of the pending node stack.
1265      int RefSCCNumber = PostOrderNumber++;
1266      int RootDFSNumber = N->DFSNumber;
1267
1268      // Find the range of the node stack by walking down until we pass the
1269      // root DFS number. Update the DFS numbers and low link numbers in the
1270      // process to avoid re-walking this list where possible.
1271      auto StackRI = find_if(reverse(PendingRefSCCStack), [&](Node *N) {
1272        if (N->DFSNumber < RootDFSNumber)
1273          // We've found the bottom.
1274          return true;
1275
1276        // Update this node and keep scanning.
1277        N->DFSNumber = -1;
1278        // Save the post-order number in the lowlink field so that we can use
1279        // it to map SCCs into new RefSCCs after we finish the DFS.
1280        N->LowLink = RefSCCNumber;
1281        return false;
1282      });
1283      auto RefSCCNodes = make_range(StackRI.base(), PendingRefSCCStack.end());
1284
1285      // If we find a cycle containing all nodes originally in this RefSCC then
1286      // the removal hasn't changed the structure at all. This is an important
1287      // special case and we can directly exit the entire routine more
1288      // efficiently as soon as we discover it.
1289      if (llvm::size(RefSCCNodes) == NumRefSCCNodes) {
1290        // Clear out the low link field as we won't need it.
1291        for (Node *N : RefSCCNodes)
1292          N->LowLink = -1;
1293        // Return the empty result immediately.
1294        return Result;
1295      }
1296
1297      // We've already marked the nodes internally with the RefSCC number so
1298      // just clear them off the stack and continue.
1299      PendingRefSCCStack.erase(RefSCCNodes.begin(), PendingRefSCCStack.end());
1300    } while (!DFSStack.empty());
1301
1302    assert(DFSStack.empty() && "Didn't flush the entire DFS stack!");
1303    assert(PendingRefSCCStack.empty() && "Didn't flush all pending nodes!");
1304  } while (!Worklist.empty());
1305
1306  assert(PostOrderNumber > 1 &&
1307         "Should never finish the DFS when the existing RefSCC remains valid!");
1308
1309  // Otherwise we create a collection of new RefSCC nodes and build
1310  // a radix-sort style map from postorder number to these new RefSCCs. We then
1311  // append SCCs to each of these RefSCCs in the order they occurred in the
1312  // original SCCs container.
1313  for (int i = 0; i < PostOrderNumber; ++i)
1314    Result.push_back(G->createRefSCC(*G));
1315
1316  // Insert the resulting postorder sequence into the global graph postorder
1317  // sequence before the current RefSCC in that sequence, and then remove the
1318  // current one.
1319  //
1320  // FIXME: It'd be nice to change the APIs so that we returned an iterator
1321  // range over the global postorder sequence and generally use that sequence
1322  // rather than building a separate result vector here.
1323  int Idx = G->getRefSCCIndex(*this);
1324  G->PostOrderRefSCCs.erase(G->PostOrderRefSCCs.begin() + Idx);
1325  G->PostOrderRefSCCs.insert(G->PostOrderRefSCCs.begin() + Idx, Result.begin(),
1326                             Result.end());
1327  for (int i : seq<int>(Idx, G->PostOrderRefSCCs.size()))
1328    G->RefSCCIndices[G->PostOrderRefSCCs[i]] = i;
1329
1330  for (SCC *C : SCCs) {
1331    // We store the SCC number in the node's low-link field above.
1332    int SCCNumber = C->begin()->LowLink;
1333    // Clear out all of the SCC's node's low-link fields now that we're done
1334    // using them as side-storage.
1335    for (Node &N : *C) {
1336      assert(N.LowLink == SCCNumber &&
1337             "Cannot have different numbers for nodes in the same SCC!");
1338      N.LowLink = -1;
1339    }
1340
1341    RefSCC &RC = *Result[SCCNumber];
1342    int SCCIndex = RC.SCCs.size();
1343    RC.SCCs.push_back(C);
1344    RC.SCCIndices[C] = SCCIndex;
1345    C->OuterRefSCC = &RC;
1346  }
1347
1348  // Now that we've moved things into the new RefSCCs, clear out our current
1349  // one.
1350  G = nullptr;
1351  SCCs.clear();
1352  SCCIndices.clear();
1353
1354#ifndef NDEBUG
1355  // Verify the new RefSCCs we've built.
1356  for (RefSCC *RC : Result)
1357    RC->verify();
1358#endif
1359
1360  // Return the new list of SCCs.
1361  return Result;
1362}
1363
1364void LazyCallGraph::RefSCC::handleTrivialEdgeInsertion(Node &SourceN,
1365                                                       Node &TargetN) {
1366  // The only trivial case that requires any graph updates is when we add new
1367  // ref edge and may connect different RefSCCs along that path. This is only
1368  // because of the parents set. Every other part of the graph remains constant
1369  // after this edge insertion.
1370  assert(G->lookupRefSCC(SourceN) == this && "Source must be in this RefSCC.");
1371  RefSCC &TargetRC = *G->lookupRefSCC(TargetN);
1372  if (&TargetRC == this)
1373    return;
1374
1375#ifdef EXPENSIVE_CHECKS
1376  assert(TargetRC.isDescendantOf(*this) &&
1377         "Target must be a descendant of the Source.");
1378#endif
1379}
1380
1381void LazyCallGraph::RefSCC::insertTrivialCallEdge(Node &SourceN,
1382                                                  Node &TargetN) {
1383#ifndef NDEBUG
1384  // Check that the RefSCC is still valid when we finish.
1385  auto ExitVerifier = make_scope_exit([this] { verify(); });
1386
1387#ifdef EXPENSIVE_CHECKS
1388  // Check that we aren't breaking some invariants of the SCC graph. Note that
1389  // this is quadratic in the number of edges in the call graph!
1390  SCC &SourceC = *G->lookupSCC(SourceN);
1391  SCC &TargetC = *G->lookupSCC(TargetN);
1392  if (&SourceC != &TargetC)
1393    assert(SourceC.isAncestorOf(TargetC) &&
1394           "Call edge is not trivial in the SCC graph!");
1395#endif // EXPENSIVE_CHECKS
1396#endif // NDEBUG
1397
1398  // First insert it into the source or find the existing edge.
1399  auto InsertResult =
1400      SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()});
1401  if (!InsertResult.second) {
1402    // Already an edge, just update it.
1403    Edge &E = SourceN->Edges[InsertResult.first->second];
1404    if (E.isCall())
1405      return; // Nothing to do!
1406    E.setKind(Edge::Call);
1407  } else {
1408    // Create the new edge.
1409    SourceN->Edges.emplace_back(TargetN, Edge::Call);
1410  }
1411
1412  // Now that we have the edge, handle the graph fallout.
1413  handleTrivialEdgeInsertion(SourceN, TargetN);
1414}
1415
1416void LazyCallGraph::RefSCC::insertTrivialRefEdge(Node &SourceN, Node &TargetN) {
1417#ifndef NDEBUG
1418  // Check that the RefSCC is still valid when we finish.
1419  auto ExitVerifier = make_scope_exit([this] { verify(); });
1420
1421#ifdef EXPENSIVE_CHECKS
1422  // Check that we aren't breaking some invariants of the RefSCC graph.
1423  RefSCC &SourceRC = *G->lookupRefSCC(SourceN);
1424  RefSCC &TargetRC = *G->lookupRefSCC(TargetN);
1425  if (&SourceRC != &TargetRC)
1426    assert(SourceRC.isAncestorOf(TargetRC) &&
1427           "Ref edge is not trivial in the RefSCC graph!");
1428#endif // EXPENSIVE_CHECKS
1429#endif // NDEBUG
1430
1431  // First insert it into the source or find the existing edge.
1432  auto InsertResult =
1433      SourceN->EdgeIndexMap.insert({&TargetN, SourceN->Edges.size()});
1434  if (!InsertResult.second)
1435    // Already an edge, we're done.
1436    return;
1437
1438  // Create the new edge.
1439  SourceN->Edges.emplace_back(TargetN, Edge::Ref);
1440
1441  // Now that we have the edge, handle the graph fallout.
1442  handleTrivialEdgeInsertion(SourceN, TargetN);
1443}
1444
1445void LazyCallGraph::RefSCC::replaceNodeFunction(Node &N, Function &NewF) {
1446  Function &OldF = N.getFunction();
1447
1448#ifndef NDEBUG
1449  // Check that the RefSCC is still valid when we finish.
1450  auto ExitVerifier = make_scope_exit([this] { verify(); });
1451
1452  assert(G->lookupRefSCC(N) == this &&
1453         "Cannot replace the function of a node outside this RefSCC.");
1454
1455  assert(G->NodeMap.find(&NewF) == G->NodeMap.end() &&
1456         "Must not have already walked the new function!'");
1457
1458  // It is important that this replacement not introduce graph changes so we
1459  // insist that the caller has already removed every use of the original
1460  // function and that all uses of the new function correspond to existing
1461  // edges in the graph. The common and expected way to use this is when
1462  // replacing the function itself in the IR without changing the call graph
1463  // shape and just updating the analysis based on that.
1464  assert(&OldF != &NewF && "Cannot replace a function with itself!");
1465  assert(OldF.use_empty() &&
1466         "Must have moved all uses from the old function to the new!");
1467#endif
1468
1469  N.replaceFunction(NewF);
1470
1471  // Update various call graph maps.
1472  G->NodeMap.erase(&OldF);
1473  G->NodeMap[&NewF] = &N;
1474}
1475
1476void LazyCallGraph::insertEdge(Node &SourceN, Node &TargetN, Edge::Kind EK) {
1477  assert(SCCMap.empty() &&
1478         "This method cannot be called after SCCs have been formed!");
1479
1480  return SourceN->insertEdgeInternal(TargetN, EK);
1481}
1482
1483void LazyCallGraph::removeEdge(Node &SourceN, Node &TargetN) {
1484  assert(SCCMap.empty() &&
1485         "This method cannot be called after SCCs have been formed!");
1486
1487  bool Removed = SourceN->removeEdgeInternal(TargetN);
1488  (void)Removed;
1489  assert(Removed && "Target not in the edge set for this caller?");
1490}
1491
1492void LazyCallGraph::removeDeadFunction(Function &F) {
1493  // FIXME: This is unnecessarily restrictive. We should be able to remove
1494  // functions which recursively call themselves.
1495  assert(F.use_empty() &&
1496         "This routine should only be called on trivially dead functions!");
1497
1498  // We shouldn't remove library functions as they are never really dead while
1499  // the call graph is in use -- every function definition refers to them.
1500  assert(!isLibFunction(F) &&
1501         "Must not remove lib functions from the call graph!");
1502
1503  auto NI = NodeMap.find(&F);
1504  if (NI == NodeMap.end())
1505    // Not in the graph at all!
1506    return;
1507
1508  Node &N = *NI->second;
1509  NodeMap.erase(NI);
1510
1511  // Remove this from the entry edges if present.
1512  EntryEdges.removeEdgeInternal(N);
1513
1514  if (SCCMap.empty()) {
1515    // No SCCs have been formed, so removing this is fine and there is nothing
1516    // else necessary at this point but clearing out the node.
1517    N.clear();
1518    return;
1519  }
1520
1521  // Cannot remove a function which has yet to be visited in the DFS walk, so
1522  // if we have a node at all then we must have an SCC and RefSCC.
1523  auto CI = SCCMap.find(&N);
1524  assert(CI != SCCMap.end() &&
1525         "Tried to remove a node without an SCC after DFS walk started!");
1526  SCC &C = *CI->second;
1527  SCCMap.erase(CI);
1528  RefSCC &RC = C.getOuterRefSCC();
1529
1530  // This node must be the only member of its SCC as it has no callers, and
1531  // that SCC must be the only member of a RefSCC as it has no references.
1532  // Validate these properties first.
1533  assert(C.size() == 1 && "Dead functions must be in a singular SCC");
1534  assert(RC.size() == 1 && "Dead functions must be in a singular RefSCC");
1535
1536  auto RCIndexI = RefSCCIndices.find(&RC);
1537  int RCIndex = RCIndexI->second;
1538  PostOrderRefSCCs.erase(PostOrderRefSCCs.begin() + RCIndex);
1539  RefSCCIndices.erase(RCIndexI);
1540  for (int i = RCIndex, Size = PostOrderRefSCCs.size(); i < Size; ++i)
1541    RefSCCIndices[PostOrderRefSCCs[i]] = i;
1542
1543  // Finally clear out all the data structures from the node down through the
1544  // components.
1545  N.clear();
1546  N.G = nullptr;
1547  N.F = nullptr;
1548  C.clear();
1549  RC.clear();
1550  RC.G = nullptr;
1551
1552  // Nothing to delete as all the objects are allocated in stable bump pointer
1553  // allocators.
1554}
1555
1556LazyCallGraph::Node &LazyCallGraph::insertInto(Function &F, Node *&MappedN) {
1557  return *new (MappedN = BPA.Allocate()) Node(*this, F);
1558}
1559
1560void LazyCallGraph::updateGraphPtrs() {
1561  // Walk the node map to update their graph pointers. While this iterates in
1562  // an unstable order, the order has no effect so it remains correct.
1563  for (auto &FunctionNodePair : NodeMap)
1564    FunctionNodePair.second->G = this;
1565
1566  for (auto *RC : PostOrderRefSCCs)
1567    RC->G = this;
1568}
1569
1570template <typename RootsT, typename GetBeginT, typename GetEndT,
1571          typename GetNodeT, typename FormSCCCallbackT>
1572void LazyCallGraph::buildGenericSCCs(RootsT &&Roots, GetBeginT &&GetBegin,
1573                                     GetEndT &&GetEnd, GetNodeT &&GetNode,
1574                                     FormSCCCallbackT &&FormSCC) {
1575  using EdgeItT = decltype(GetBegin(std::declval<Node &>()));
1576
1577  SmallVector<std::pair<Node *, EdgeItT>, 16> DFSStack;
1578  SmallVector<Node *, 16> PendingSCCStack;
1579
1580  // Scan down the stack and DFS across the call edges.
1581  for (Node *RootN : Roots) {
1582    assert(DFSStack.empty() &&
1583           "Cannot begin a new root with a non-empty DFS stack!");
1584    assert(PendingSCCStack.empty() &&
1585           "Cannot begin a new root with pending nodes for an SCC!");
1586
1587    // Skip any nodes we've already reached in the DFS.
1588    if (RootN->DFSNumber != 0) {
1589      assert(RootN->DFSNumber == -1 &&
1590             "Shouldn't have any mid-DFS root nodes!");
1591      continue;
1592    }
1593
1594    RootN->DFSNumber = RootN->LowLink = 1;
1595    int NextDFSNumber = 2;
1596
1597    DFSStack.push_back({RootN, GetBegin(*RootN)});
1598    do {
1599      Node *N;
1600      EdgeItT I;
1601      std::tie(N, I) = DFSStack.pop_back_val();
1602      auto E = GetEnd(*N);
1603      while (I != E) {
1604        Node &ChildN = GetNode(I);
1605        if (ChildN.DFSNumber == 0) {
1606          // We haven't yet visited this child, so descend, pushing the current
1607          // node onto the stack.
1608          DFSStack.push_back({N, I});
1609
1610          ChildN.DFSNumber = ChildN.LowLink = NextDFSNumber++;
1611          N = &ChildN;
1612          I = GetBegin(*N);
1613          E = GetEnd(*N);
1614          continue;
1615        }
1616
1617        // If the child has already been added to some child component, it
1618        // couldn't impact the low-link of this parent because it isn't
1619        // connected, and thus its low-link isn't relevant so skip it.
1620        if (ChildN.DFSNumber == -1) {
1621          ++I;
1622          continue;
1623        }
1624
1625        // Track the lowest linked child as the lowest link for this node.
1626        assert(ChildN.LowLink > 0 && "Must have a positive low-link number!");
1627        if (ChildN.LowLink < N->LowLink)
1628          N->LowLink = ChildN.LowLink;
1629
1630        // Move to the next edge.
1631        ++I;
1632      }
1633
1634      // We've finished processing N and its descendants, put it on our pending
1635      // SCC stack to eventually get merged into an SCC of nodes.
1636      PendingSCCStack.push_back(N);
1637
1638      // If this node is linked to some lower entry, continue walking up the
1639      // stack.
1640      if (N->LowLink != N->DFSNumber)
1641        continue;
1642
1643      // Otherwise, we've completed an SCC. Append it to our post order list of
1644      // SCCs.
1645      int RootDFSNumber = N->DFSNumber;
1646      // Find the range of the node stack by walking down until we pass the
1647      // root DFS number.
1648      auto SCCNodes = make_range(
1649          PendingSCCStack.rbegin(),
1650          find_if(reverse(PendingSCCStack), [RootDFSNumber](const Node *N) {
1651            return N->DFSNumber < RootDFSNumber;
1652          }));
1653      // Form a new SCC out of these nodes and then clear them off our pending
1654      // stack.
1655      FormSCC(SCCNodes);
1656      PendingSCCStack.erase(SCCNodes.end().base(), PendingSCCStack.end());
1657    } while (!DFSStack.empty());
1658  }
1659}
1660
1661/// Build the internal SCCs for a RefSCC from a sequence of nodes.
1662///
1663/// Appends the SCCs to the provided vector and updates the map with their
1664/// indices. Both the vector and map must be empty when passed into this
1665/// routine.
1666void LazyCallGraph::buildSCCs(RefSCC &RC, node_stack_range Nodes) {
1667  assert(RC.SCCs.empty() && "Already built SCCs!");
1668  assert(RC.SCCIndices.empty() && "Already mapped SCC indices!");
1669
1670  for (Node *N : Nodes) {
1671    assert(N->LowLink >= (*Nodes.begin())->LowLink &&
1672           "We cannot have a low link in an SCC lower than its root on the "
1673           "stack!");
1674
1675    // This node will go into the next RefSCC, clear out its DFS and low link
1676    // as we scan.
1677    N->DFSNumber = N->LowLink = 0;
1678  }
1679
1680  // Each RefSCC contains a DAG of the call SCCs. To build these, we do
1681  // a direct walk of the call edges using Tarjan's algorithm. We reuse the
1682  // internal storage as we won't need it for the outer graph's DFS any longer.
1683  buildGenericSCCs(
1684      Nodes, [](Node &N) { return N->call_begin(); },
1685      [](Node &N) { return N->call_end(); },
1686      [](EdgeSequence::call_iterator I) -> Node & { return I->getNode(); },
1687      [this, &RC](node_stack_range Nodes) {
1688        RC.SCCs.push_back(createSCC(RC, Nodes));
1689        for (Node &N : *RC.SCCs.back()) {
1690          N.DFSNumber = N.LowLink = -1;
1691          SCCMap[&N] = RC.SCCs.back();
1692        }
1693      });
1694
1695  // Wire up the SCC indices.
1696  for (int i = 0, Size = RC.SCCs.size(); i < Size; ++i)
1697    RC.SCCIndices[RC.SCCs[i]] = i;
1698}
1699
1700void LazyCallGraph::buildRefSCCs() {
1701  if (EntryEdges.empty() || !PostOrderRefSCCs.empty())
1702    // RefSCCs are either non-existent or already built!
1703    return;
1704
1705  assert(RefSCCIndices.empty() && "Already mapped RefSCC indices!");
1706
1707  SmallVector<Node *, 16> Roots;
1708  for (Edge &E : *this)
1709    Roots.push_back(&E.getNode());
1710
1711  // The roots will be popped of a stack, so use reverse to get a less
1712  // surprising order. This doesn't change any of the semantics anywhere.
1713  std::reverse(Roots.begin(), Roots.end());
1714
1715  buildGenericSCCs(
1716      Roots,
1717      [](Node &N) {
1718        // We need to populate each node as we begin to walk its edges.
1719        N.populate();
1720        return N->begin();
1721      },
1722      [](Node &N) { return N->end(); },
1723      [](EdgeSequence::iterator I) -> Node & { return I->getNode(); },
1724      [this](node_stack_range Nodes) {
1725        RefSCC *NewRC = createRefSCC(*this);
1726        buildSCCs(*NewRC, Nodes);
1727
1728        // Push the new node into the postorder list and remember its position
1729        // in the index map.
1730        bool Inserted =
1731            RefSCCIndices.insert({NewRC, PostOrderRefSCCs.size()}).second;
1732        (void)Inserted;
1733        assert(Inserted && "Cannot already have this RefSCC in the index map!");
1734        PostOrderRefSCCs.push_back(NewRC);
1735#ifndef NDEBUG
1736        NewRC->verify();
1737#endif
1738      });
1739}
1740
1741AnalysisKey LazyCallGraphAnalysis::Key;
1742
1743LazyCallGraphPrinterPass::LazyCallGraphPrinterPass(raw_ostream &OS) : OS(OS) {}
1744
1745static void printNode(raw_ostream &OS, LazyCallGraph::Node &N) {
1746  OS << "  Edges in function: " << N.getFunction().getName() << "\n";
1747  for (LazyCallGraph::Edge &E : N.populate())
1748    OS << "    " << (E.isCall() ? "call" : "ref ") << " -> "
1749       << E.getFunction().getName() << "\n";
1750
1751  OS << "\n";
1752}
1753
1754static void printSCC(raw_ostream &OS, LazyCallGraph::SCC &C) {
1755  OS << "    SCC with " << C.size() << " functions:\n";
1756
1757  for (LazyCallGraph::Node &N : C)
1758    OS << "      " << N.getFunction().getName() << "\n";
1759}
1760
1761static void printRefSCC(raw_ostream &OS, LazyCallGraph::RefSCC &C) {
1762  OS << "  RefSCC with " << C.size() << " call SCCs:\n";
1763
1764  for (LazyCallGraph::SCC &InnerC : C)
1765    printSCC(OS, InnerC);
1766
1767  OS << "\n";
1768}
1769
1770PreservedAnalyses LazyCallGraphPrinterPass::run(Module &M,
1771                                                ModuleAnalysisManager &AM) {
1772  LazyCallGraph &G = AM.getResult<LazyCallGraphAnalysis>(M);
1773
1774  OS << "Printing the call graph for module: " << M.getModuleIdentifier()
1775     << "\n\n";
1776
1777  for (Function &F : M)
1778    printNode(OS, G.get(F));
1779
1780  G.buildRefSCCs();
1781  for (LazyCallGraph::RefSCC &C : G.postorder_ref_sccs())
1782    printRefSCC(OS, C);
1783
1784  return PreservedAnalyses::all();
1785}
1786
1787LazyCallGraphDOTPrinterPass::LazyCallGraphDOTPrinterPass(raw_ostream &OS)
1788    : OS(OS) {}
1789
1790static void printNodeDOT(raw_ostream &OS, LazyCallGraph::Node &N) {
1791  std::string Name = "\"" + DOT::EscapeString(N.getFunction().getName()) + "\"";
1792
1793  for (LazyCallGraph::Edge &E : N.populate()) {
1794    OS << "  " << Name << " -> \""
1795       << DOT::EscapeString(E.getFunction().getName()) << "\"";
1796    if (!E.isCall()) // It is a ref edge.
1797      OS << " [style=dashed,label=\"ref\"]";
1798    OS << ";\n";
1799  }
1800
1801  OS << "\n";
1802}
1803
1804PreservedAnalyses LazyCallGraphDOTPrinterPass::run(Module &M,
1805                                                   ModuleAnalysisManager &AM) {
1806  LazyCallGraph &G = AM.getResult<LazyCallGraphAnalysis>(M);
1807
1808  OS << "digraph \"" << DOT::EscapeString(M.getModuleIdentifier()) << "\" {\n";
1809
1810  for (Function &F : M)
1811    printNodeDOT(OS, G.get(F));
1812
1813  OS << "}\n";
1814
1815  return PreservedAnalyses::all();
1816}
1817