DeadStoreElimination.cpp revision 360784
1//===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements a trivial dead store elimination that only considers
10// basic-block local redundant stores.
11//
12// FIXME: This should eventually be extended to be a post-dominator tree
13// traversal.  Doing so would be pretty trivial.
14//
15//===----------------------------------------------------------------------===//
16
17#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/DenseMap.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/SetVector.h"
22#include "llvm/ADT/SmallPtrSet.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/ADT/StringRef.h"
26#include "llvm/Analysis/AliasAnalysis.h"
27#include "llvm/Analysis/CaptureTracking.h"
28#include "llvm/Analysis/GlobalsModRef.h"
29#include "llvm/Analysis/MemoryBuiltins.h"
30#include "llvm/Analysis/MemoryDependenceAnalysis.h"
31#include "llvm/Analysis/MemoryLocation.h"
32#include "llvm/Analysis/OrderedBasicBlock.h"
33#include "llvm/Analysis/TargetLibraryInfo.h"
34#include "llvm/Analysis/ValueTracking.h"
35#include "llvm/IR/Argument.h"
36#include "llvm/IR/BasicBlock.h"
37#include "llvm/IR/CallSite.h"
38#include "llvm/IR/Constant.h"
39#include "llvm/IR/Constants.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/Function.h"
43#include "llvm/IR/InstrTypes.h"
44#include "llvm/IR/Instruction.h"
45#include "llvm/IR/Instructions.h"
46#include "llvm/IR/IntrinsicInst.h"
47#include "llvm/IR/Intrinsics.h"
48#include "llvm/IR/LLVMContext.h"
49#include "llvm/IR/Module.h"
50#include "llvm/IR/PassManager.h"
51#include "llvm/IR/Value.h"
52#include "llvm/InitializePasses.h"
53#include "llvm/Pass.h"
54#include "llvm/Support/Casting.h"
55#include "llvm/Support/CommandLine.h"
56#include "llvm/Support/Debug.h"
57#include "llvm/Support/ErrorHandling.h"
58#include "llvm/Support/MathExtras.h"
59#include "llvm/Support/raw_ostream.h"
60#include "llvm/Transforms/Scalar.h"
61#include "llvm/Transforms/Utils/Local.h"
62#include <algorithm>
63#include <cassert>
64#include <cstddef>
65#include <cstdint>
66#include <iterator>
67#include <map>
68#include <utility>
69
70using namespace llvm;
71
72#define DEBUG_TYPE "dse"
73
74STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
75STATISTIC(NumFastStores, "Number of stores deleted");
76STATISTIC(NumFastOther, "Number of other instrs removed");
77STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
78STATISTIC(NumModifiedStores, "Number of stores modified");
79
80static cl::opt<bool>
81EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
82  cl::init(true), cl::Hidden,
83  cl::desc("Enable partial-overwrite tracking in DSE"));
84
85static cl::opt<bool>
86EnablePartialStoreMerging("enable-dse-partial-store-merging",
87  cl::init(true), cl::Hidden,
88  cl::desc("Enable partial store merging in DSE"));
89
90//===----------------------------------------------------------------------===//
91// Helper functions
92//===----------------------------------------------------------------------===//
93using OverlapIntervalsTy = std::map<int64_t, int64_t>;
94using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
95
96/// Delete this instruction.  Before we do, go through and zero out all the
97/// operands of this instruction.  If any of them become dead, delete them and
98/// the computation tree that feeds them.
99/// If ValueSet is non-null, remove any deleted instructions from it as well.
100static void
101deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
102                      MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
103                      InstOverlapIntervalsTy &IOL, OrderedBasicBlock &OBB,
104                      MapVector<Instruction *, bool> &ThrowableInst,
105                      SmallSetVector<const Value *, 16> *ValueSet = nullptr) {
106  SmallVector<Instruction*, 32> NowDeadInsts;
107
108  NowDeadInsts.push_back(I);
109  --NumFastOther;
110
111  // Keeping the iterator straight is a pain, so we let this routine tell the
112  // caller what the next instruction is after we're done mucking about.
113  BasicBlock::iterator NewIter = *BBI;
114
115  // Before we touch this instruction, remove it from memdep!
116  do {
117    Instruction *DeadInst = NowDeadInsts.pop_back_val();
118    // Mark the DeadInst as dead in the list of throwable instructions.
119    auto It = ThrowableInst.find(DeadInst);
120    if (It != ThrowableInst.end())
121      ThrowableInst[It->first] = false;
122    ++NumFastOther;
123
124    // Try to preserve debug information attached to the dead instruction.
125    salvageDebugInfo(*DeadInst);
126
127    // This instruction is dead, zap it, in stages.  Start by removing it from
128    // MemDep, which needs to know the operands and needs it to be in the
129    // function.
130    MD.removeInstruction(DeadInst);
131
132    for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
133      Value *Op = DeadInst->getOperand(op);
134      DeadInst->setOperand(op, nullptr);
135
136      // If this operand just became dead, add it to the NowDeadInsts list.
137      if (!Op->use_empty()) continue;
138
139      if (Instruction *OpI = dyn_cast<Instruction>(Op))
140        if (isInstructionTriviallyDead(OpI, &TLI))
141          NowDeadInsts.push_back(OpI);
142    }
143
144    if (ValueSet) ValueSet->remove(DeadInst);
145    IOL.erase(DeadInst);
146    OBB.eraseInstruction(DeadInst);
147
148    if (NewIter == DeadInst->getIterator())
149      NewIter = DeadInst->eraseFromParent();
150    else
151      DeadInst->eraseFromParent();
152  } while (!NowDeadInsts.empty());
153  *BBI = NewIter;
154  // Pop dead entries from back of ThrowableInst till we find an alive entry.
155  while (!ThrowableInst.empty() && !ThrowableInst.back().second)
156    ThrowableInst.pop_back();
157}
158
159/// Does this instruction write some memory?  This only returns true for things
160/// that we can analyze with other helpers below.
161static bool hasAnalyzableMemoryWrite(Instruction *I,
162                                     const TargetLibraryInfo &TLI) {
163  if (isa<StoreInst>(I))
164    return true;
165  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
166    switch (II->getIntrinsicID()) {
167    default:
168      return false;
169    case Intrinsic::memset:
170    case Intrinsic::memmove:
171    case Intrinsic::memcpy:
172    case Intrinsic::memcpy_element_unordered_atomic:
173    case Intrinsic::memmove_element_unordered_atomic:
174    case Intrinsic::memset_element_unordered_atomic:
175    case Intrinsic::init_trampoline:
176    case Intrinsic::lifetime_end:
177      return true;
178    }
179  }
180  if (auto CS = CallSite(I)) {
181    if (Function *F = CS.getCalledFunction()) {
182      LibFunc LF;
183      if (TLI.getLibFunc(*F, LF) && TLI.has(LF)) {
184        switch (LF) {
185        case LibFunc_strcpy:
186        case LibFunc_strncpy:
187        case LibFunc_strcat:
188        case LibFunc_strncat:
189          return true;
190        default:
191          return false;
192        }
193      }
194    }
195  }
196  return false;
197}
198
199/// Return a Location stored to by the specified instruction. If isRemovable
200/// returns true, this function and getLocForRead completely describe the memory
201/// operations for this instruction.
202static MemoryLocation getLocForWrite(Instruction *Inst) {
203
204  if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
205    return MemoryLocation::get(SI);
206
207  if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
208    // memcpy/memmove/memset.
209    MemoryLocation Loc = MemoryLocation::getForDest(MI);
210    return Loc;
211  }
212
213  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
214    switch (II->getIntrinsicID()) {
215    default:
216      return MemoryLocation(); // Unhandled intrinsic.
217    case Intrinsic::init_trampoline:
218      return MemoryLocation(II->getArgOperand(0));
219    case Intrinsic::lifetime_end: {
220      uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
221      return MemoryLocation(II->getArgOperand(1), Len);
222    }
223    }
224  }
225  if (auto CS = CallSite(Inst))
226    // All the supported TLI functions so far happen to have dest as their
227    // first argument.
228    return MemoryLocation(CS.getArgument(0));
229  return MemoryLocation();
230}
231
232/// Return the location read by the specified "hasAnalyzableMemoryWrite"
233/// instruction if any.
234static MemoryLocation getLocForRead(Instruction *Inst,
235                                    const TargetLibraryInfo &TLI) {
236  assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
237
238  // The only instructions that both read and write are the mem transfer
239  // instructions (memcpy/memmove).
240  if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
241    return MemoryLocation::getForSource(MTI);
242  return MemoryLocation();
243}
244
245/// If the value of this instruction and the memory it writes to is unused, may
246/// we delete this instruction?
247static bool isRemovable(Instruction *I) {
248  // Don't remove volatile/atomic stores.
249  if (StoreInst *SI = dyn_cast<StoreInst>(I))
250    return SI->isUnordered();
251
252  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
253    switch (II->getIntrinsicID()) {
254    default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
255    case Intrinsic::lifetime_end:
256      // Never remove dead lifetime_end's, e.g. because it is followed by a
257      // free.
258      return false;
259    case Intrinsic::init_trampoline:
260      // Always safe to remove init_trampoline.
261      return true;
262    case Intrinsic::memset:
263    case Intrinsic::memmove:
264    case Intrinsic::memcpy:
265      // Don't remove volatile memory intrinsics.
266      return !cast<MemIntrinsic>(II)->isVolatile();
267    case Intrinsic::memcpy_element_unordered_atomic:
268    case Intrinsic::memmove_element_unordered_atomic:
269    case Intrinsic::memset_element_unordered_atomic:
270      return true;
271    }
272  }
273
274  // note: only get here for calls with analyzable writes - i.e. libcalls
275  if (auto CS = CallSite(I))
276    return CS.getInstruction()->use_empty();
277
278  return false;
279}
280
281/// Returns true if the end of this instruction can be safely shortened in
282/// length.
283static bool isShortenableAtTheEnd(Instruction *I) {
284  // Don't shorten stores for now
285  if (isa<StoreInst>(I))
286    return false;
287
288  if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
289    switch (II->getIntrinsicID()) {
290      default: return false;
291      case Intrinsic::memset:
292      case Intrinsic::memcpy:
293      case Intrinsic::memcpy_element_unordered_atomic:
294      case Intrinsic::memset_element_unordered_atomic:
295        // Do shorten memory intrinsics.
296        // FIXME: Add memmove if it's also safe to transform.
297        return true;
298    }
299  }
300
301  // Don't shorten libcalls calls for now.
302
303  return false;
304}
305
306/// Returns true if the beginning of this instruction can be safely shortened
307/// in length.
308static bool isShortenableAtTheBeginning(Instruction *I) {
309  // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
310  // easily done by offsetting the source address.
311  return isa<AnyMemSetInst>(I);
312}
313
314/// Return the pointer that is being written to.
315static Value *getStoredPointerOperand(Instruction *I) {
316  //TODO: factor this to reuse getLocForWrite
317  MemoryLocation Loc = getLocForWrite(I);
318  assert(Loc.Ptr &&
319         "unable to find pointer written for analyzable instruction?");
320  // TODO: most APIs don't expect const Value *
321  return const_cast<Value*>(Loc.Ptr);
322}
323
324static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
325                               const TargetLibraryInfo &TLI,
326                               const Function *F) {
327  uint64_t Size;
328  ObjectSizeOpts Opts;
329  Opts.NullIsUnknownSize = NullPointerIsDefined(F);
330
331  if (getObjectSize(V, Size, DL, &TLI, Opts))
332    return Size;
333  return MemoryLocation::UnknownSize;
334}
335
336namespace {
337
338enum OverwriteResult {
339  OW_Begin,
340  OW_Complete,
341  OW_End,
342  OW_PartialEarlierWithFullLater,
343  OW_Unknown
344};
345
346} // end anonymous namespace
347
348/// Return 'OW_Complete' if a store to the 'Later' location completely
349/// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
350/// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
351/// beginning of the 'Earlier' location is overwritten by 'Later'.
352/// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
353/// overwritten by a latter (smaller) store which doesn't write outside the big
354/// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
355static OverwriteResult isOverwrite(const MemoryLocation &Later,
356                                   const MemoryLocation &Earlier,
357                                   const DataLayout &DL,
358                                   const TargetLibraryInfo &TLI,
359                                   int64_t &EarlierOff, int64_t &LaterOff,
360                                   Instruction *DepWrite,
361                                   InstOverlapIntervalsTy &IOL,
362                                   AliasAnalysis &AA,
363                                   const Function *F) {
364  // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
365  // get imprecise values here, though (except for unknown sizes).
366  if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise())
367    return OW_Unknown;
368
369  const uint64_t LaterSize = Later.Size.getValue();
370  const uint64_t EarlierSize = Earlier.Size.getValue();
371
372  const Value *P1 = Earlier.Ptr->stripPointerCasts();
373  const Value *P2 = Later.Ptr->stripPointerCasts();
374
375  // If the start pointers are the same, we just have to compare sizes to see if
376  // the later store was larger than the earlier store.
377  if (P1 == P2 || AA.isMustAlias(P1, P2)) {
378    // Make sure that the Later size is >= the Earlier size.
379    if (LaterSize >= EarlierSize)
380      return OW_Complete;
381  }
382
383  // Check to see if the later store is to the entire object (either a global,
384  // an alloca, or a byval/inalloca argument).  If so, then it clearly
385  // overwrites any other store to the same object.
386  const Value *UO1 = GetUnderlyingObject(P1, DL),
387              *UO2 = GetUnderlyingObject(P2, DL);
388
389  // If we can't resolve the same pointers to the same object, then we can't
390  // analyze them at all.
391  if (UO1 != UO2)
392    return OW_Unknown;
393
394  // If the "Later" store is to a recognizable object, get its size.
395  uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
396  if (ObjectSize != MemoryLocation::UnknownSize)
397    if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
398      return OW_Complete;
399
400  // Okay, we have stores to two completely different pointers.  Try to
401  // decompose the pointer into a "base + constant_offset" form.  If the base
402  // pointers are equal, then we can reason about the two stores.
403  EarlierOff = 0;
404  LaterOff = 0;
405  const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
406  const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
407
408  // If the base pointers still differ, we have two completely different stores.
409  if (BP1 != BP2)
410    return OW_Unknown;
411
412  // The later store completely overlaps the earlier store if:
413  //
414  // 1. Both start at the same offset and the later one's size is greater than
415  //    or equal to the earlier one's, or
416  //
417  //      |--earlier--|
418  //      |--   later   --|
419  //
420  // 2. The earlier store has an offset greater than the later offset, but which
421  //    still lies completely within the later store.
422  //
423  //        |--earlier--|
424  //    |-----  later  ------|
425  //
426  // We have to be careful here as *Off is signed while *.Size is unsigned.
427  if (EarlierOff >= LaterOff &&
428      LaterSize >= EarlierSize &&
429      uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
430    return OW_Complete;
431
432  // We may now overlap, although the overlap is not complete. There might also
433  // be other incomplete overlaps, and together, they might cover the complete
434  // earlier write.
435  // Note: The correctness of this logic depends on the fact that this function
436  // is not even called providing DepWrite when there are any intervening reads.
437  if (EnablePartialOverwriteTracking &&
438      LaterOff < int64_t(EarlierOff + EarlierSize) &&
439      int64_t(LaterOff + LaterSize) >= EarlierOff) {
440
441    // Insert our part of the overlap into the map.
442    auto &IM = IOL[DepWrite];
443    LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
444                      << ", " << int64_t(EarlierOff + EarlierSize)
445                      << ") Later [" << LaterOff << ", "
446                      << int64_t(LaterOff + LaterSize) << ")\n");
447
448    // Make sure that we only insert non-overlapping intervals and combine
449    // adjacent intervals. The intervals are stored in the map with the ending
450    // offset as the key (in the half-open sense) and the starting offset as
451    // the value.
452    int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
453
454    // Find any intervals ending at, or after, LaterIntStart which start
455    // before LaterIntEnd.
456    auto ILI = IM.lower_bound(LaterIntStart);
457    if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
458      // This existing interval is overlapped with the current store somewhere
459      // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
460      // intervals and adjusting our start and end.
461      LaterIntStart = std::min(LaterIntStart, ILI->second);
462      LaterIntEnd = std::max(LaterIntEnd, ILI->first);
463      ILI = IM.erase(ILI);
464
465      // Continue erasing and adjusting our end in case other previous
466      // intervals are also overlapped with the current store.
467      //
468      // |--- ealier 1 ---|  |--- ealier 2 ---|
469      //     |------- later---------|
470      //
471      while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
472        assert(ILI->second > LaterIntStart && "Unexpected interval");
473        LaterIntEnd = std::max(LaterIntEnd, ILI->first);
474        ILI = IM.erase(ILI);
475      }
476    }
477
478    IM[LaterIntEnd] = LaterIntStart;
479
480    ILI = IM.begin();
481    if (ILI->second <= EarlierOff &&
482        ILI->first >= int64_t(EarlierOff + EarlierSize)) {
483      LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
484                        << EarlierOff << ", "
485                        << int64_t(EarlierOff + EarlierSize)
486                        << ") Composite Later [" << ILI->second << ", "
487                        << ILI->first << ")\n");
488      ++NumCompletePartials;
489      return OW_Complete;
490    }
491  }
492
493  // Check for an earlier store which writes to all the memory locations that
494  // the later store writes to.
495  if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
496      int64_t(EarlierOff + EarlierSize) > LaterOff &&
497      uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
498    LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
499                      << EarlierOff << ", "
500                      << int64_t(EarlierOff + EarlierSize)
501                      << ") by a later store [" << LaterOff << ", "
502                      << int64_t(LaterOff + LaterSize) << ")\n");
503    // TODO: Maybe come up with a better name?
504    return OW_PartialEarlierWithFullLater;
505  }
506
507  // Another interesting case is if the later store overwrites the end of the
508  // earlier store.
509  //
510  //      |--earlier--|
511  //                |--   later   --|
512  //
513  // In this case we may want to trim the size of earlier to avoid generating
514  // writes to addresses which will definitely be overwritten later
515  if (!EnablePartialOverwriteTracking &&
516      (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
517       int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
518    return OW_End;
519
520  // Finally, we also need to check if the later store overwrites the beginning
521  // of the earlier store.
522  //
523  //                |--earlier--|
524  //      |--   later   --|
525  //
526  // In this case we may want to move the destination address and trim the size
527  // of earlier to avoid generating writes to addresses which will definitely
528  // be overwritten later.
529  if (!EnablePartialOverwriteTracking &&
530      (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
531    assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
532           "Expect to be handled as OW_Complete");
533    return OW_Begin;
534  }
535  // Otherwise, they don't completely overlap.
536  return OW_Unknown;
537}
538
539/// If 'Inst' might be a self read (i.e. a noop copy of a
540/// memory region into an identical pointer) then it doesn't actually make its
541/// input dead in the traditional sense.  Consider this case:
542///
543///   memmove(A <- B)
544///   memmove(A <- A)
545///
546/// In this case, the second store to A does not make the first store to A dead.
547/// The usual situation isn't an explicit A<-A store like this (which can be
548/// trivially removed) but a case where two pointers may alias.
549///
550/// This function detects when it is unsafe to remove a dependent instruction
551/// because the DSE inducing instruction may be a self-read.
552static bool isPossibleSelfRead(Instruction *Inst,
553                               const MemoryLocation &InstStoreLoc,
554                               Instruction *DepWrite,
555                               const TargetLibraryInfo &TLI,
556                               AliasAnalysis &AA) {
557  // Self reads can only happen for instructions that read memory.  Get the
558  // location read.
559  MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
560  if (!InstReadLoc.Ptr)
561    return false; // Not a reading instruction.
562
563  // If the read and written loc obviously don't alias, it isn't a read.
564  if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
565    return false;
566
567  if (isa<AnyMemCpyInst>(Inst)) {
568    // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
569    // but in practice memcpy(A <- B) either means that A and B are disjoint or
570    // are equal (i.e. there are not partial overlaps).  Given that, if we have:
571    //
572    //   memcpy/memmove(A <- B)  // DepWrite
573    //   memcpy(A <- B)  // Inst
574    //
575    // with Inst reading/writing a >= size than DepWrite, we can reason as
576    // follows:
577    //
578    //   - If A == B then both the copies are no-ops, so the DepWrite can be
579    //     removed.
580    //   - If A != B then A and B are disjoint locations in Inst.  Since
581    //     Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
582    //     Therefore DepWrite can be removed.
583    MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
584
585    if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
586      return false;
587  }
588
589  // If DepWrite doesn't read memory or if we can't prove it is a must alias,
590  // then it can't be considered dead.
591  return true;
592}
593
594/// Returns true if the memory which is accessed by the second instruction is not
595/// modified between the first and the second instruction.
596/// Precondition: Second instruction must be dominated by the first
597/// instruction.
598static bool memoryIsNotModifiedBetween(Instruction *FirstI,
599                                       Instruction *SecondI,
600                                       AliasAnalysis *AA) {
601  SmallVector<BasicBlock *, 16> WorkList;
602  SmallPtrSet<BasicBlock *, 8> Visited;
603  BasicBlock::iterator FirstBBI(FirstI);
604  ++FirstBBI;
605  BasicBlock::iterator SecondBBI(SecondI);
606  BasicBlock *FirstBB = FirstI->getParent();
607  BasicBlock *SecondBB = SecondI->getParent();
608  MemoryLocation MemLoc = MemoryLocation::get(SecondI);
609
610  // Start checking the store-block.
611  WorkList.push_back(SecondBB);
612  bool isFirstBlock = true;
613
614  // Check all blocks going backward until we reach the load-block.
615  while (!WorkList.empty()) {
616    BasicBlock *B = WorkList.pop_back_val();
617
618    // Ignore instructions before LI if this is the FirstBB.
619    BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
620
621    BasicBlock::iterator EI;
622    if (isFirstBlock) {
623      // Ignore instructions after SI if this is the first visit of SecondBB.
624      assert(B == SecondBB && "first block is not the store block");
625      EI = SecondBBI;
626      isFirstBlock = false;
627    } else {
628      // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
629      // In this case we also have to look at instructions after SI.
630      EI = B->end();
631    }
632    for (; BI != EI; ++BI) {
633      Instruction *I = &*BI;
634      if (I->mayWriteToMemory() && I != SecondI)
635        if (isModSet(AA->getModRefInfo(I, MemLoc)))
636          return false;
637    }
638    if (B != FirstBB) {
639      assert(B != &FirstBB->getParent()->getEntryBlock() &&
640          "Should not hit the entry block because SI must be dominated by LI");
641      for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
642        if (!Visited.insert(*PredI).second)
643          continue;
644        WorkList.push_back(*PredI);
645      }
646    }
647  }
648  return true;
649}
650
651/// Find all blocks that will unconditionally lead to the block BB and append
652/// them to F.
653static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
654                                   BasicBlock *BB, DominatorTree *DT) {
655  for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
656    BasicBlock *Pred = *I;
657    if (Pred == BB) continue;
658    Instruction *PredTI = Pred->getTerminator();
659    if (PredTI->getNumSuccessors() != 1)
660      continue;
661
662    if (DT->isReachableFromEntry(Pred))
663      Blocks.push_back(Pred);
664  }
665}
666
667/// Handle frees of entire structures whose dependency is a store
668/// to a field of that structure.
669static bool handleFree(CallInst *F, AliasAnalysis *AA,
670                       MemoryDependenceResults *MD, DominatorTree *DT,
671                       const TargetLibraryInfo *TLI,
672                       InstOverlapIntervalsTy &IOL, OrderedBasicBlock &OBB,
673                       MapVector<Instruction *, bool> &ThrowableInst) {
674  bool MadeChange = false;
675
676  MemoryLocation Loc = MemoryLocation(F->getOperand(0));
677  SmallVector<BasicBlock *, 16> Blocks;
678  Blocks.push_back(F->getParent());
679  const DataLayout &DL = F->getModule()->getDataLayout();
680
681  while (!Blocks.empty()) {
682    BasicBlock *BB = Blocks.pop_back_val();
683    Instruction *InstPt = BB->getTerminator();
684    if (BB == F->getParent()) InstPt = F;
685
686    MemDepResult Dep =
687        MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
688    while (Dep.isDef() || Dep.isClobber()) {
689      Instruction *Dependency = Dep.getInst();
690      if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
691          !isRemovable(Dependency))
692        break;
693
694      Value *DepPointer =
695          GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
696
697      // Check for aliasing.
698      if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
699        break;
700
701      LLVM_DEBUG(
702          dbgs() << "DSE: Dead Store to soon to be freed memory:\n  DEAD: "
703                 << *Dependency << '\n');
704
705      // DCE instructions only used to calculate that store.
706      BasicBlock::iterator BBI(Dependency);
707      deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, OBB,
708                            ThrowableInst);
709      ++NumFastStores;
710      MadeChange = true;
711
712      // Inst's old Dependency is now deleted. Compute the next dependency,
713      // which may also be dead, as in
714      //    s[0] = 0;
715      //    s[1] = 0; // This has just been deleted.
716      //    free(s);
717      Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
718    }
719
720    if (Dep.isNonLocal())
721      findUnconditionalPreds(Blocks, BB, DT);
722  }
723
724  return MadeChange;
725}
726
727/// Check to see if the specified location may alias any of the stack objects in
728/// the DeadStackObjects set. If so, they become live because the location is
729/// being loaded.
730static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
731                                  SmallSetVector<const Value *, 16> &DeadStackObjects,
732                                  const DataLayout &DL, AliasAnalysis *AA,
733                                  const TargetLibraryInfo *TLI,
734                                  const Function *F) {
735  const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
736
737  // A constant can't be in the dead pointer set.
738  if (isa<Constant>(UnderlyingPointer))
739    return;
740
741  // If the kill pointer can be easily reduced to an alloca, don't bother doing
742  // extraneous AA queries.
743  if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
744    DeadStackObjects.remove(UnderlyingPointer);
745    return;
746  }
747
748  // Remove objects that could alias LoadedLoc.
749  DeadStackObjects.remove_if([&](const Value *I) {
750    // See if the loaded location could alias the stack location.
751    MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
752    return !AA->isNoAlias(StackLoc, LoadedLoc);
753  });
754}
755
756/// Remove dead stores to stack-allocated locations in the function end block.
757/// Ex:
758/// %A = alloca i32
759/// ...
760/// store i32 1, i32* %A
761/// ret void
762static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
763                           MemoryDependenceResults *MD,
764                           const TargetLibraryInfo *TLI,
765                           InstOverlapIntervalsTy &IOL, OrderedBasicBlock &OBB,
766                           MapVector<Instruction *, bool> &ThrowableInst) {
767  bool MadeChange = false;
768
769  // Keep track of all of the stack objects that are dead at the end of the
770  // function.
771  SmallSetVector<const Value*, 16> DeadStackObjects;
772
773  // Find all of the alloca'd pointers in the entry block.
774  BasicBlock &Entry = BB.getParent()->front();
775  for (Instruction &I : Entry) {
776    if (isa<AllocaInst>(&I))
777      DeadStackObjects.insert(&I);
778
779    // Okay, so these are dead heap objects, but if the pointer never escapes
780    // then it's leaked by this function anyways.
781    else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
782      DeadStackObjects.insert(&I);
783  }
784
785  // Treat byval or inalloca arguments the same, stores to them are dead at the
786  // end of the function.
787  for (Argument &AI : BB.getParent()->args())
788    if (AI.hasByValOrInAllocaAttr())
789      DeadStackObjects.insert(&AI);
790
791  const DataLayout &DL = BB.getModule()->getDataLayout();
792
793  // Scan the basic block backwards
794  for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
795    --BBI;
796
797    // If we find a store, check to see if it points into a dead stack value.
798    if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
799      // See through pointer-to-pointer bitcasts
800      SmallVector<const Value *, 4> Pointers;
801      GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
802
803      // Stores to stack values are valid candidates for removal.
804      bool AllDead = true;
805      for (const Value *Pointer : Pointers)
806        if (!DeadStackObjects.count(Pointer)) {
807          AllDead = false;
808          break;
809        }
810
811      if (AllDead) {
812        Instruction *Dead = &*BBI;
813
814        LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n  DEAD: "
815                          << *Dead << "\n  Objects: ";
816                   for (SmallVectorImpl<const Value *>::iterator I =
817                            Pointers.begin(),
818                        E = Pointers.end();
819                        I != E; ++I) {
820                     dbgs() << **I;
821                     if (std::next(I) != E)
822                       dbgs() << ", ";
823                   } dbgs()
824                   << '\n');
825
826        // DCE instructions only used to calculate that store.
827        deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, OBB, ThrowableInst,
828                              &DeadStackObjects);
829        ++NumFastStores;
830        MadeChange = true;
831        continue;
832      }
833    }
834
835    // Remove any dead non-memory-mutating instructions.
836    if (isInstructionTriviallyDead(&*BBI, TLI)) {
837      LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n  DEAD: "
838                        << *&*BBI << '\n');
839      deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, OBB, ThrowableInst,
840                            &DeadStackObjects);
841      ++NumFastOther;
842      MadeChange = true;
843      continue;
844    }
845
846    if (isa<AllocaInst>(BBI)) {
847      // Remove allocas from the list of dead stack objects; there can't be
848      // any references before the definition.
849      DeadStackObjects.remove(&*BBI);
850      continue;
851    }
852
853    if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
854      // Remove allocation function calls from the list of dead stack objects;
855      // there can't be any references before the definition.
856      if (isAllocLikeFn(&*BBI, TLI))
857        DeadStackObjects.remove(&*BBI);
858
859      // If this call does not access memory, it can't be loading any of our
860      // pointers.
861      if (AA->doesNotAccessMemory(Call))
862        continue;
863
864      // If the call might load from any of our allocas, then any store above
865      // the call is live.
866      DeadStackObjects.remove_if([&](const Value *I) {
867        // See if the call site touches the value.
868        return isRefSet(AA->getModRefInfo(
869            Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
870      });
871
872      // If all of the allocas were clobbered by the call then we're not going
873      // to find anything else to process.
874      if (DeadStackObjects.empty())
875        break;
876
877      continue;
878    }
879
880    // We can remove the dead stores, irrespective of the fence and its ordering
881    // (release/acquire/seq_cst). Fences only constraints the ordering of
882    // already visible stores, it does not make a store visible to other
883    // threads. So, skipping over a fence does not change a store from being
884    // dead.
885    if (isa<FenceInst>(*BBI))
886      continue;
887
888    MemoryLocation LoadedLoc;
889
890    // If we encounter a use of the pointer, it is no longer considered dead
891    if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
892      if (!L->isUnordered()) // Be conservative with atomic/volatile load
893        break;
894      LoadedLoc = MemoryLocation::get(L);
895    } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
896      LoadedLoc = MemoryLocation::get(V);
897    } else if (!BBI->mayReadFromMemory()) {
898      // Instruction doesn't read memory.  Note that stores that weren't removed
899      // above will hit this case.
900      continue;
901    } else {
902      // Unknown inst; assume it clobbers everything.
903      break;
904    }
905
906    // Remove any allocas from the DeadPointer set that are loaded, as this
907    // makes any stores above the access live.
908    removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
909
910    // If all of the allocas were clobbered by the access then we're not going
911    // to find anything else to process.
912    if (DeadStackObjects.empty())
913      break;
914  }
915
916  return MadeChange;
917}
918
919static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
920                         int64_t &EarlierSize, int64_t LaterOffset,
921                         int64_t LaterSize, bool IsOverwriteEnd) {
922  // TODO: base this on the target vector size so that if the earlier
923  // store was too small to get vector writes anyway then its likely
924  // a good idea to shorten it
925  // Power of 2 vector writes are probably always a bad idea to optimize
926  // as any store/memset/memcpy is likely using vector instructions so
927  // shortening it to not vector size is likely to be slower
928  auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
929  unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
930  if (!IsOverwriteEnd)
931    LaterOffset = int64_t(LaterOffset + LaterSize);
932
933  if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
934      !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
935    return false;
936
937  int64_t NewLength = IsOverwriteEnd
938                          ? LaterOffset - EarlierOffset
939                          : EarlierSize - (LaterOffset - EarlierOffset);
940
941  if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
942    // When shortening an atomic memory intrinsic, the newly shortened
943    // length must remain an integer multiple of the element size.
944    const uint32_t ElementSize = AMI->getElementSizeInBytes();
945    if (0 != NewLength % ElementSize)
946      return false;
947  }
948
949  LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
950                    << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
951                    << *EarlierWrite << "\n  KILLER (offset " << LaterOffset
952                    << ", " << EarlierSize << ")\n");
953
954  Value *EarlierWriteLength = EarlierIntrinsic->getLength();
955  Value *TrimmedLength =
956      ConstantInt::get(EarlierWriteLength->getType(), NewLength);
957  EarlierIntrinsic->setLength(TrimmedLength);
958
959  EarlierSize = NewLength;
960  if (!IsOverwriteEnd) {
961    int64_t OffsetMoved = (LaterOffset - EarlierOffset);
962    Value *Indices[1] = {
963        ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
964    GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
965        EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
966        EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
967    NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
968    EarlierIntrinsic->setDest(NewDestGEP);
969    EarlierOffset = EarlierOffset + OffsetMoved;
970  }
971  return true;
972}
973
974static bool tryToShortenEnd(Instruction *EarlierWrite,
975                            OverlapIntervalsTy &IntervalMap,
976                            int64_t &EarlierStart, int64_t &EarlierSize) {
977  if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
978    return false;
979
980  OverlapIntervalsTy::iterator OII = --IntervalMap.end();
981  int64_t LaterStart = OII->second;
982  int64_t LaterSize = OII->first - LaterStart;
983
984  if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
985      LaterStart + LaterSize >= EarlierStart + EarlierSize) {
986    if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
987                     LaterSize, true)) {
988      IntervalMap.erase(OII);
989      return true;
990    }
991  }
992  return false;
993}
994
995static bool tryToShortenBegin(Instruction *EarlierWrite,
996                              OverlapIntervalsTy &IntervalMap,
997                              int64_t &EarlierStart, int64_t &EarlierSize) {
998  if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
999    return false;
1000
1001  OverlapIntervalsTy::iterator OII = IntervalMap.begin();
1002  int64_t LaterStart = OII->second;
1003  int64_t LaterSize = OII->first - LaterStart;
1004
1005  if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
1006    assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
1007           "Should have been handled as OW_Complete");
1008    if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
1009                     LaterSize, false)) {
1010      IntervalMap.erase(OII);
1011      return true;
1012    }
1013  }
1014  return false;
1015}
1016
1017static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
1018                                            const DataLayout &DL,
1019                                            InstOverlapIntervalsTy &IOL) {
1020  bool Changed = false;
1021  for (auto OI : IOL) {
1022    Instruction *EarlierWrite = OI.first;
1023    MemoryLocation Loc = getLocForWrite(EarlierWrite);
1024    assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1025
1026    const Value *Ptr = Loc.Ptr->stripPointerCasts();
1027    int64_t EarlierStart = 0;
1028    int64_t EarlierSize = int64_t(Loc.Size.getValue());
1029    GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1030    OverlapIntervalsTy &IntervalMap = OI.second;
1031    Changed |=
1032        tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1033    if (IntervalMap.empty())
1034      continue;
1035    Changed |=
1036        tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1037  }
1038  return Changed;
1039}
1040
1041static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1042                               AliasAnalysis *AA, MemoryDependenceResults *MD,
1043                               const DataLayout &DL,
1044                               const TargetLibraryInfo *TLI,
1045                               InstOverlapIntervalsTy &IOL,
1046                               OrderedBasicBlock &OBB,
1047                               MapVector<Instruction *, bool> &ThrowableInst) {
1048  // Must be a store instruction.
1049  StoreInst *SI = dyn_cast<StoreInst>(Inst);
1050  if (!SI)
1051    return false;
1052
1053  // If we're storing the same value back to a pointer that we just loaded from,
1054  // then the store can be removed.
1055  if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1056    if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1057        isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1058
1059      LLVM_DEBUG(
1060          dbgs() << "DSE: Remove Store Of Load from same pointer:\n  LOAD: "
1061                 << *DepLoad << "\n  STORE: " << *SI << '\n');
1062
1063      deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, OBB, ThrowableInst);
1064      ++NumRedundantStores;
1065      return true;
1066    }
1067  }
1068
1069  // Remove null stores into the calloc'ed objects
1070  Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1071  if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1072    Instruction *UnderlyingPointer =
1073        dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
1074
1075    if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1076        memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
1077      LLVM_DEBUG(
1078          dbgs() << "DSE: Remove null store to the calloc'ed object:\n  DEAD: "
1079                 << *Inst << "\n  OBJECT: " << *UnderlyingPointer << '\n');
1080
1081      deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, OBB, ThrowableInst);
1082      ++NumRedundantStores;
1083      return true;
1084    }
1085  }
1086  return false;
1087}
1088
1089static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1090                                MemoryDependenceResults *MD, DominatorTree *DT,
1091                                const TargetLibraryInfo *TLI) {
1092  const DataLayout &DL = BB.getModule()->getDataLayout();
1093  bool MadeChange = false;
1094
1095  OrderedBasicBlock OBB(&BB);
1096  MapVector<Instruction *, bool> ThrowableInst;
1097
1098  // A map of interval maps representing partially-overwritten value parts.
1099  InstOverlapIntervalsTy IOL;
1100
1101  // Do a top-down walk on the BB.
1102  for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1103    // Handle 'free' calls specially.
1104    if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1105      MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, OBB, ThrowableInst);
1106      // Increment BBI after handleFree has potentially deleted instructions.
1107      // This ensures we maintain a valid iterator.
1108      ++BBI;
1109      continue;
1110    }
1111
1112    Instruction *Inst = &*BBI++;
1113
1114    if (Inst->mayThrow()) {
1115      ThrowableInst[Inst] = true;
1116      continue;
1117    }
1118
1119    // Check to see if Inst writes to memory.  If not, continue.
1120    if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1121      continue;
1122
1123    // eliminateNoopStore will update in iterator, if necessary.
1124    if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, OBB,
1125                           ThrowableInst)) {
1126      MadeChange = true;
1127      continue;
1128    }
1129
1130    // If we find something that writes memory, get its memory dependence.
1131    MemDepResult InstDep = MD->getDependency(Inst, &OBB);
1132
1133    // Ignore any store where we can't find a local dependence.
1134    // FIXME: cross-block DSE would be fun. :)
1135    if (!InstDep.isDef() && !InstDep.isClobber())
1136      continue;
1137
1138    // Figure out what location is being stored to.
1139    MemoryLocation Loc = getLocForWrite(Inst);
1140
1141    // If we didn't get a useful location, fail.
1142    if (!Loc.Ptr)
1143      continue;
1144
1145    // Loop until we find a store we can eliminate or a load that
1146    // invalidates the analysis. Without an upper bound on the number of
1147    // instructions examined, this analysis can become very time-consuming.
1148    // However, the potential gain diminishes as we process more instructions
1149    // without eliminating any of them. Therefore, we limit the number of
1150    // instructions we look at.
1151    auto Limit = MD->getDefaultBlockScanLimit();
1152    while (InstDep.isDef() || InstDep.isClobber()) {
1153      // Get the memory clobbered by the instruction we depend on.  MemDep will
1154      // skip any instructions that 'Loc' clearly doesn't interact with.  If we
1155      // end up depending on a may- or must-aliased load, then we can't optimize
1156      // away the store and we bail out.  However, if we depend on something
1157      // that overwrites the memory location we *can* potentially optimize it.
1158      //
1159      // Find out what memory location the dependent instruction stores.
1160      Instruction *DepWrite = InstDep.getInst();
1161      if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1162        break;
1163      MemoryLocation DepLoc = getLocForWrite(DepWrite);
1164      // If we didn't get a useful location, or if it isn't a size, bail out.
1165      if (!DepLoc.Ptr)
1166        break;
1167
1168      // Find the last throwable instruction not removed by call to
1169      // deleteDeadInstruction.
1170      Instruction *LastThrowing = nullptr;
1171      if (!ThrowableInst.empty())
1172        LastThrowing = ThrowableInst.back().first;
1173
1174      // Make sure we don't look past a call which might throw. This is an
1175      // issue because MemoryDependenceAnalysis works in the wrong direction:
1176      // it finds instructions which dominate the current instruction, rather than
1177      // instructions which are post-dominated by the current instruction.
1178      //
1179      // If the underlying object is a non-escaping memory allocation, any store
1180      // to it is dead along the unwind edge. Otherwise, we need to preserve
1181      // the store.
1182      if (LastThrowing && OBB.dominates(DepWrite, LastThrowing)) {
1183        const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1184        bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1185        if (!IsStoreDeadOnUnwind) {
1186            // We're looking for a call to an allocation function
1187            // where the allocation doesn't escape before the last
1188            // throwing instruction; PointerMayBeCaptured
1189            // reasonably fast approximation.
1190            IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1191                !PointerMayBeCaptured(Underlying, false, true);
1192        }
1193        if (!IsStoreDeadOnUnwind)
1194          break;
1195      }
1196
1197      // If we find a write that is a) removable (i.e., non-volatile), b) is
1198      // completely obliterated by the store to 'Loc', and c) which we know that
1199      // 'Inst' doesn't load from, then we can remove it.
1200      // Also try to merge two stores if a later one only touches memory written
1201      // to by the earlier one.
1202      if (isRemovable(DepWrite) &&
1203          !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1204        int64_t InstWriteOffset, DepWriteOffset;
1205        OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
1206                                         InstWriteOffset, DepWrite, IOL, *AA,
1207                                         BB.getParent());
1208        if (OR == OW_Complete) {
1209          LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DepWrite
1210                            << "\n  KILLER: " << *Inst << '\n');
1211
1212          // Delete the store and now-dead instructions that feed it.
1213          deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, OBB,
1214                                ThrowableInst);
1215          ++NumFastStores;
1216          MadeChange = true;
1217
1218          // We erased DepWrite; start over.
1219          InstDep = MD->getDependency(Inst, &OBB);
1220          continue;
1221        } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1222                   ((OR == OW_Begin &&
1223                     isShortenableAtTheBeginning(DepWrite)))) {
1224          assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1225                                                    "when partial-overwrite "
1226                                                    "tracking is enabled");
1227          // The overwrite result is known, so these must be known, too.
1228          int64_t EarlierSize = DepLoc.Size.getValue();
1229          int64_t LaterSize = Loc.Size.getValue();
1230          bool IsOverwriteEnd = (OR == OW_End);
1231          MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1232                                    InstWriteOffset, LaterSize, IsOverwriteEnd);
1233        } else if (EnablePartialStoreMerging &&
1234                   OR == OW_PartialEarlierWithFullLater) {
1235          auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1236          auto *Later = dyn_cast<StoreInst>(Inst);
1237          if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1238              DL.typeSizeEqualsStoreSize(
1239                  Earlier->getValueOperand()->getType()) &&
1240              Later && isa<ConstantInt>(Later->getValueOperand()) &&
1241              DL.typeSizeEqualsStoreSize(
1242                  Later->getValueOperand()->getType()) &&
1243              memoryIsNotModifiedBetween(Earlier, Later, AA)) {
1244            // If the store we find is:
1245            //   a) partially overwritten by the store to 'Loc'
1246            //   b) the later store is fully contained in the earlier one and
1247            //   c) they both have a constant value
1248            //   d) none of the two stores need padding
1249            // Merge the two stores, replacing the earlier store's value with a
1250            // merge of both values.
1251            // TODO: Deal with other constant types (vectors, etc), and probably
1252            // some mem intrinsics (if needed)
1253
1254            APInt EarlierValue =
1255                cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1256            APInt LaterValue =
1257                cast<ConstantInt>(Later->getValueOperand())->getValue();
1258            unsigned LaterBits = LaterValue.getBitWidth();
1259            assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1260            LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1261
1262            // Offset of the smaller store inside the larger store
1263            unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1264            unsigned LShiftAmount =
1265                DL.isBigEndian()
1266                    ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1267                    : BitOffsetDiff;
1268            APInt Mask =
1269                APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1270                                  LShiftAmount + LaterBits);
1271            // Clear the bits we'll be replacing, then OR with the smaller
1272            // store, shifted appropriately.
1273            APInt Merged =
1274                (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1275            LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *DepWrite
1276                              << "\n  Later: " << *Inst
1277                              << "\n  Merged Value: " << Merged << '\n');
1278
1279            auto *SI = new StoreInst(
1280                ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1281                Earlier->getPointerOperand(), false,
1282                MaybeAlign(Earlier->getAlignment()), Earlier->getOrdering(),
1283                Earlier->getSyncScopeID(), DepWrite);
1284
1285            unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1286                                   LLVMContext::MD_alias_scope,
1287                                   LLVMContext::MD_noalias,
1288                                   LLVMContext::MD_nontemporal};
1289            SI->copyMetadata(*DepWrite, MDToKeep);
1290            ++NumModifiedStores;
1291
1292            // Remove earlier, wider, store
1293            OBB.replaceInstruction(DepWrite, SI);
1294
1295            // Delete the old stores and now-dead instructions that feed them.
1296            deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, OBB,
1297                                  ThrowableInst);
1298            deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, OBB,
1299                                  ThrowableInst);
1300            MadeChange = true;
1301
1302            // We erased DepWrite and Inst (Loc); start over.
1303            break;
1304          }
1305        }
1306      }
1307
1308      // If this is a may-aliased store that is clobbering the store value, we
1309      // can keep searching past it for another must-aliased pointer that stores
1310      // to the same location.  For example, in:
1311      //   store -> P
1312      //   store -> Q
1313      //   store -> P
1314      // we can remove the first store to P even though we don't know if P and Q
1315      // alias.
1316      if (DepWrite == &BB.front()) break;
1317
1318      // Can't look past this instruction if it might read 'Loc'.
1319      if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1320        break;
1321
1322      InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1323                                             DepWrite->getIterator(), &BB,
1324                                             /*QueryInst=*/ nullptr, &Limit);
1325    }
1326  }
1327
1328  if (EnablePartialOverwriteTracking)
1329    MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1330
1331  // If this block ends in a return, unwind, or unreachable, all allocas are
1332  // dead at its end, which means stores to them are also dead.
1333  if (BB.getTerminator()->getNumSuccessors() == 0)
1334    MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, OBB, ThrowableInst);
1335
1336  return MadeChange;
1337}
1338
1339static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1340                                MemoryDependenceResults *MD, DominatorTree *DT,
1341                                const TargetLibraryInfo *TLI) {
1342  bool MadeChange = false;
1343  for (BasicBlock &BB : F)
1344    // Only check non-dead blocks.  Dead blocks may have strange pointer
1345    // cycles that will confuse alias analysis.
1346    if (DT->isReachableFromEntry(&BB))
1347      MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1348
1349  return MadeChange;
1350}
1351
1352//===----------------------------------------------------------------------===//
1353// DSE Pass
1354//===----------------------------------------------------------------------===//
1355PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
1356  AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1357  DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1358  MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F);
1359  const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1360
1361  if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1362    return PreservedAnalyses::all();
1363
1364  PreservedAnalyses PA;
1365  PA.preserveSet<CFGAnalyses>();
1366  PA.preserve<GlobalsAA>();
1367  PA.preserve<MemoryDependenceAnalysis>();
1368  return PA;
1369}
1370
1371namespace {
1372
1373/// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1374class DSELegacyPass : public FunctionPass {
1375public:
1376  static char ID; // Pass identification, replacement for typeid
1377
1378  DSELegacyPass() : FunctionPass(ID) {
1379    initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
1380  }
1381
1382  bool runOnFunction(Function &F) override {
1383    if (skipFunction(F))
1384      return false;
1385
1386    DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1387    AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1388    MemoryDependenceResults *MD =
1389        &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1390    const TargetLibraryInfo *TLI =
1391        &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1392
1393    return eliminateDeadStores(F, AA, MD, DT, TLI);
1394  }
1395
1396  void getAnalysisUsage(AnalysisUsage &AU) const override {
1397    AU.setPreservesCFG();
1398    AU.addRequired<DominatorTreeWrapperPass>();
1399    AU.addRequired<AAResultsWrapperPass>();
1400    AU.addRequired<MemoryDependenceWrapperPass>();
1401    AU.addRequired<TargetLibraryInfoWrapperPass>();
1402    AU.addPreserved<DominatorTreeWrapperPass>();
1403    AU.addPreserved<GlobalsAAWrapperPass>();
1404    AU.addPreserved<MemoryDependenceWrapperPass>();
1405  }
1406};
1407
1408} // end anonymous namespace
1409
1410char DSELegacyPass::ID = 0;
1411
1412INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1413                      false)
1414INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1415INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1416INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
1417INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
1418INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1419INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
1420                    false)
1421
1422FunctionPass *llvm::createDeadStoreEliminationPass() {
1423  return new DSELegacyPass();
1424}
1425