HexagonFrameLowering.cpp revision 360784
1240116Smarcel//===- HexagonFrameLowering.cpp - Define frame lowering -------------------===//
2240116Smarcel//
3240116Smarcel// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4240116Smarcel// See https://llvm.org/LICENSE.txt for license information.
5240116Smarcel// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6240116Smarcel//
7240116Smarcel//
8240116Smarcel//===----------------------------------------------------------------------===//
9240116Smarcel
10240116Smarcel#include "HexagonFrameLowering.h"
11240116Smarcel#include "HexagonBlockRanges.h"
12240116Smarcel#include "HexagonInstrInfo.h"
13240116Smarcel#include "HexagonMachineFunctionInfo.h"
14240116Smarcel#include "HexagonRegisterInfo.h"
15240116Smarcel#include "HexagonSubtarget.h"
16240116Smarcel#include "HexagonTargetMachine.h"
17240116Smarcel#include "MCTargetDesc/HexagonBaseInfo.h"
18240116Smarcel#include "llvm/ADT/BitVector.h"
19240116Smarcel#include "llvm/ADT/DenseMap.h"
20240116Smarcel#include "llvm/ADT/None.h"
21240116Smarcel#include "llvm/ADT/Optional.h"
22240116Smarcel#include "llvm/ADT/PostOrderIterator.h"
23240116Smarcel#include "llvm/ADT/SetVector.h"
24240116Smarcel#include "llvm/ADT/SmallSet.h"
25240116Smarcel#include "llvm/ADT/SmallVector.h"
26240116Smarcel#include "llvm/CodeGen/LivePhysRegs.h"
27240116Smarcel#include "llvm/CodeGen/MachineBasicBlock.h"
28240116Smarcel#include "llvm/CodeGen/MachineDominators.h"
29240116Smarcel#include "llvm/CodeGen/MachineFrameInfo.h"
30240116Smarcel#include "llvm/CodeGen/MachineFunction.h"
31240116Smarcel#include "llvm/CodeGen/MachineFunctionPass.h"
32240116Smarcel#include "llvm/CodeGen/MachineInstr.h"
33240116Smarcel#include "llvm/CodeGen/MachineInstrBuilder.h"
34240116Smarcel#include "llvm/CodeGen/MachineMemOperand.h"
35240116Smarcel#include "llvm/CodeGen/MachineModuleInfo.h"
36240116Smarcel#include "llvm/CodeGen/MachineOperand.h"
37240116Smarcel#include "llvm/CodeGen/MachinePostDominators.h"
38240116Smarcel#include "llvm/CodeGen/MachineRegisterInfo.h"
39240116Smarcel#include "llvm/CodeGen/PseudoSourceValue.h"
40240116Smarcel#include "llvm/CodeGen/RegisterScavenging.h"
41240116Smarcel#include "llvm/CodeGen/TargetRegisterInfo.h"
42240116Smarcel#include "llvm/IR/Attributes.h"
43240116Smarcel#include "llvm/IR/DebugLoc.h"
44240116Smarcel#include "llvm/IR/Function.h"
45240116Smarcel#include "llvm/MC/MCDwarf.h"
46240116Smarcel#include "llvm/MC/MCRegisterInfo.h"
47240116Smarcel#include "llvm/Pass.h"
48240116Smarcel#include "llvm/Support/CodeGen.h"
49240116Smarcel#include "llvm/Support/CommandLine.h"
50240116Smarcel#include "llvm/Support/Compiler.h"
51240116Smarcel#include "llvm/Support/Debug.h"
52240116Smarcel#include "llvm/Support/ErrorHandling.h"
53240116Smarcel#include "llvm/Support/MathExtras.h"
54240116Smarcel#include "llvm/Support/raw_ostream.h"
55240116Smarcel#include "llvm/Target/TargetMachine.h"
56240116Smarcel#include "llvm/Target/TargetOptions.h"
57240116Smarcel#include <algorithm>
58240116Smarcel#include <cassert>
59240116Smarcel#include <cstdint>
60240116Smarcel#include <iterator>
61240116Smarcel#include <limits>
62240116Smarcel#include <map>
63240116Smarcel#include <utility>
64240116Smarcel#include <vector>
65240116Smarcel
66240116Smarcel#define DEBUG_TYPE "hexagon-pei"
67240116Smarcel
68240116Smarcel// Hexagon stack frame layout as defined by the ABI:
69240116Smarcel//
70240116Smarcel//                                                       Incoming arguments
71240116Smarcel//                                                       passed via stack
72240116Smarcel//                                                                      |
73240116Smarcel//                                                                      |
74240116Smarcel//        SP during function's                 FP during function's     |
75240116Smarcel//    +-- runtime (top of stack)               runtime (bottom) --+     |
76240116Smarcel//    |                                                           |     |
77240116Smarcel// --++---------------------+------------------+-----------------++-+-------
78240116Smarcel//   |  parameter area for  |  variable-size   |   fixed-size    |LR|  arg
79240116Smarcel//   |   called functions   |  local objects   |  local objects  |FP|
80240116Smarcel// --+----------------------+------------------+-----------------+--+-------
81240116Smarcel//    <-    size known    -> <- size unknown -> <- size known  ->
82240116Smarcel//
83240116Smarcel// Low address                                                 High address
84240116Smarcel//
85240116Smarcel// <--- stack growth
86240116Smarcel//
87240116Smarcel//
88240116Smarcel// - In any circumstances, the outgoing function arguments are always accessi-
89240116Smarcel//   ble using the SP, and the incoming arguments are accessible using the FP.
90240116Smarcel// - If the local objects are not aligned, they can always be accessed using
91240116Smarcel//   the FP.
92240116Smarcel// - If there are no variable-sized objects, the local objects can always be
93240116Smarcel//   accessed using the SP, regardless whether they are aligned or not. (The
94240116Smarcel//   alignment padding will be at the bottom of the stack (highest address),
95240116Smarcel//   and so the offset with respect to the SP will be known at the compile-
96240116Smarcel//   -time.)
97240116Smarcel//
98240116Smarcel// The only complication occurs if there are both, local aligned objects, and
99240116Smarcel// dynamically allocated (variable-sized) objects. The alignment pad will be
100240116Smarcel// placed between the FP and the local objects, thus preventing the use of the
101240116Smarcel// FP to access the local objects. At the same time, the variable-sized objects
102240116Smarcel// will be between the SP and the local objects, thus introducing an unknown
103240116Smarcel// distance from the SP to the locals.
104240116Smarcel//
105240116Smarcel// To avoid this problem, a new register is created that holds the aligned
106240116Smarcel// address of the bottom of the stack, referred in the sources as AP (aligned
107240116Smarcel// pointer). The AP will be equal to "FP-p", where "p" is the smallest pad
108240116Smarcel// that aligns AP to the required boundary (a maximum of the alignments of
109240116Smarcel// all stack objects, fixed- and variable-sized). All local objects[1] will
110240116Smarcel// then use AP as the base pointer.
111240116Smarcel// [1] The exception is with "fixed" stack objects. "Fixed" stack objects get
112240116Smarcel// their name from being allocated at fixed locations on the stack, relative
113240116Smarcel// to the FP. In the presence of dynamic allocation and local alignment, such
114240116Smarcel// objects can only be accessed through the FP.
115240116Smarcel//
116240116Smarcel// Illustration of the AP:
117240116Smarcel//                                                                FP --+
118240116Smarcel//                                                                     |
119240116Smarcel// ---------------+---------------------+-----+-----------------------++-+--
120240116Smarcel//   Rest of the  | Local stack objects | Pad |  Fixed stack objects  |LR|
121240116Smarcel//   stack frame  | (aligned)           |     |  (CSR, spills, etc.)  |FP|
122240116Smarcel// ---------------+---------------------+-----+-----------------+-----+--+--
123240116Smarcel//                                      |<-- Multiple of the -->|
124240116Smarcel//                                           stack alignment    +-- AP
125240116Smarcel//
126240116Smarcel// The AP is set up at the beginning of the function. Since it is not a dedi-
127240116Smarcel// cated (reserved) register, it needs to be kept live throughout the function
128240116Smarcel// to be available as the base register for local object accesses.
129240116Smarcel// Normally, an address of a stack objects is obtained by a pseudo-instruction
130240116Smarcel// PS_fi. To access local objects with the AP register present, a different
131240116Smarcel// pseudo-instruction needs to be used: PS_fia. The PS_fia takes one extra
132240116Smarcel// argument compared to PS_fi: the first input register is the AP register.
133240116Smarcel// This keeps the register live between its definition and its uses.
134240116Smarcel
135240116Smarcel// The AP register is originally set up using pseudo-instruction PS_aligna:
136240116Smarcel//   AP = PS_aligna A
137240116Smarcel// where
138240116Smarcel//   A  - required stack alignment
139240116Smarcel// The alignment value must be the maximum of all alignments required by
140240116Smarcel// any stack object.
141240116Smarcel
142240116Smarcel// The dynamic allocation uses a pseudo-instruction PS_alloca:
143240116Smarcel//   Rd = PS_alloca Rs, A
144240116Smarcel// where
145240116Smarcel//   Rd - address of the allocated space
146240116Smarcel//   Rs - minimum size (the actual allocated can be larger to accommodate
147240116Smarcel//        alignment)
148240116Smarcel//   A  - required alignment
149240116Smarcel
150240116Smarcelusing namespace llvm;
151240116Smarcel
152240116Smarcelstatic cl::opt<bool> DisableDeallocRet("disable-hexagon-dealloc-ret",
153240116Smarcel    cl::Hidden, cl::desc("Disable Dealloc Return for Hexagon target"));
154240116Smarcel
155240116Smarcelstatic cl::opt<unsigned> NumberScavengerSlots("number-scavenger-slots",
156240116Smarcel    cl::Hidden, cl::desc("Set the number of scavenger slots"), cl::init(2),
157240116Smarcel    cl::ZeroOrMore);
158240116Smarcel
159240116Smarcelstatic cl::opt<int> SpillFuncThreshold("spill-func-threshold",
160240116Smarcel    cl::Hidden, cl::desc("Specify O2(not Os) spill func threshold"),
161240116Smarcel    cl::init(6), cl::ZeroOrMore);
162240116Smarcel
163240116Smarcelstatic cl::opt<int> SpillFuncThresholdOs("spill-func-threshold-Os",
164240116Smarcel    cl::Hidden, cl::desc("Specify Os spill func threshold"),
165240116Smarcel    cl::init(1), cl::ZeroOrMore);
166240116Smarcel
167240116Smarcelstatic cl::opt<bool> EnableStackOVFSanitizer("enable-stackovf-sanitizer",
168240116Smarcel    cl::Hidden, cl::desc("Enable runtime checks for stack overflow."),
169240116Smarcel    cl::init(false), cl::ZeroOrMore);
170240116Smarcel
171240116Smarcelstatic cl::opt<bool> EnableShrinkWrapping("hexagon-shrink-frame",
172240116Smarcel    cl::init(true), cl::Hidden, cl::ZeroOrMore,
173240116Smarcel    cl::desc("Enable stack frame shrink wrapping"));
174240116Smarcel
175240116Smarcelstatic cl::opt<unsigned> ShrinkLimit("shrink-frame-limit",
176240116Smarcel    cl::init(std::numeric_limits<unsigned>::max()), cl::Hidden, cl::ZeroOrMore,
177240116Smarcel    cl::desc("Max count of stack frame shrink-wraps"));
178240116Smarcel
179240116Smarcelstatic cl::opt<bool> EnableSaveRestoreLong("enable-save-restore-long",
180240116Smarcel    cl::Hidden, cl::desc("Enable long calls for save-restore stubs."),
181240116Smarcel    cl::init(false), cl::ZeroOrMore);
182240116Smarcel
183240116Smarcelstatic cl::opt<bool> EliminateFramePointer("hexagon-fp-elim", cl::init(true),
184240116Smarcel    cl::Hidden, cl::desc("Refrain from using FP whenever possible"));
185240116Smarcel
186240116Smarcelstatic cl::opt<bool> OptimizeSpillSlots("hexagon-opt-spill", cl::Hidden,
187240116Smarcel    cl::init(true), cl::desc("Optimize spill slots"));
188240116Smarcel
189240116Smarcel#ifndef NDEBUG
190240116Smarcelstatic cl::opt<unsigned> SpillOptMax("spill-opt-max", cl::Hidden,
191240116Smarcel    cl::init(std::numeric_limits<unsigned>::max()));
192240116Smarcelstatic unsigned SpillOptCount = 0;
193240116Smarcel#endif
194240116Smarcel
195240116Smarcelnamespace llvm {
196240116Smarcel
197240116Smarcel  void initializeHexagonCallFrameInformationPass(PassRegistry&);
198240116Smarcel  FunctionPass *createHexagonCallFrameInformation();
199240116Smarcel
200240116Smarcel} // end namespace llvm
201240116Smarcel
202240116Smarcelnamespace {
203240116Smarcel
204240116Smarcel  class HexagonCallFrameInformation : public MachineFunctionPass {
205240116Smarcel  public:
206240116Smarcel    static char ID;
207240116Smarcel
208240116Smarcel    HexagonCallFrameInformation() : MachineFunctionPass(ID) {
209240116Smarcel      PassRegistry &PR = *PassRegistry::getPassRegistry();
210240116Smarcel      initializeHexagonCallFrameInformationPass(PR);
211240116Smarcel    }
212240116Smarcel
213240116Smarcel    bool runOnMachineFunction(MachineFunction &MF) override;
214240116Smarcel
215240116Smarcel    MachineFunctionProperties getRequiredProperties() const override {
216240116Smarcel      return MachineFunctionProperties().set(
217240116Smarcel          MachineFunctionProperties::Property::NoVRegs);
218240116Smarcel    }
219240116Smarcel  };
220240116Smarcel
221240116Smarcel  char HexagonCallFrameInformation::ID = 0;
222240116Smarcel
223240116Smarcel} // end anonymous namespace
224240116Smarcel
225240116Smarcelbool HexagonCallFrameInformation::runOnMachineFunction(MachineFunction &MF) {
226240116Smarcel  auto &HFI = *MF.getSubtarget<HexagonSubtarget>().getFrameLowering();
227240116Smarcel  bool NeedCFI = MF.needsFrameMoves();
228240116Smarcel
229240116Smarcel  if (!NeedCFI)
230240116Smarcel    return false;
231240116Smarcel  HFI.insertCFIInstructions(MF);
232240116Smarcel  return true;
233240116Smarcel}
234240116Smarcel
235240116SmarcelINITIALIZE_PASS(HexagonCallFrameInformation, "hexagon-cfi",
236240116Smarcel                "Hexagon call frame information", false, false)
237240116Smarcel
238240116SmarcelFunctionPass *llvm::createHexagonCallFrameInformation() {
239240116Smarcel  return new HexagonCallFrameInformation();
240240116Smarcel}
241240116Smarcel
242240116Smarcel/// Map a register pair Reg to the subregister that has the greater "number",
243240116Smarcel/// i.e. D3 (aka R7:6) will be mapped to R7, etc.
244240116Smarcelstatic unsigned getMax32BitSubRegister(unsigned Reg,
245240116Smarcel                                       const TargetRegisterInfo &TRI,
246240116Smarcel                                       bool hireg = true) {
247240116Smarcel    if (Reg < Hexagon::D0 || Reg > Hexagon::D15)
248240116Smarcel      return Reg;
249240116Smarcel
250240116Smarcel    unsigned RegNo = 0;
251240116Smarcel    for (MCSubRegIterator SubRegs(Reg, &TRI); SubRegs.isValid(); ++SubRegs) {
252240116Smarcel      if (hireg) {
253240116Smarcel        if (*SubRegs > RegNo)
254240116Smarcel          RegNo = *SubRegs;
255240116Smarcel      } else {
256240116Smarcel        if (!RegNo || *SubRegs < RegNo)
257240116Smarcel          RegNo = *SubRegs;
258240116Smarcel      }
259240116Smarcel    }
260240116Smarcel    return RegNo;
261240116Smarcel}
262240116Smarcel
263240116Smarcel/// Returns the callee saved register with the largest id in the vector.
264240116Smarcelstatic unsigned getMaxCalleeSavedReg(const std::vector<CalleeSavedInfo> &CSI,
265240116Smarcel                                     const TargetRegisterInfo &TRI) {
266240116Smarcel    static_assert(Hexagon::R1 > 0,
267240116Smarcel                  "Assume physical registers are encoded as positive integers");
268240116Smarcel    if (CSI.empty())
269240116Smarcel      return 0;
270240116Smarcel
271240116Smarcel    unsigned Max = getMax32BitSubRegister(CSI[0].getReg(), TRI);
272240116Smarcel    for (unsigned I = 1, E = CSI.size(); I < E; ++I) {
273240116Smarcel      unsigned Reg = getMax32BitSubRegister(CSI[I].getReg(), TRI);
274240116Smarcel      if (Reg > Max)
275240116Smarcel        Max = Reg;
276240116Smarcel    }
277240116Smarcel    return Max;
278240116Smarcel}
279240116Smarcel
280240116Smarcel/// Checks if the basic block contains any instruction that needs a stack
281240116Smarcel/// frame to be already in place.
282240116Smarcelstatic bool needsStackFrame(const MachineBasicBlock &MBB, const BitVector &CSR,
283240116Smarcel                            const HexagonRegisterInfo &HRI) {
284240116Smarcel    for (auto &I : MBB) {
285240116Smarcel      const MachineInstr *MI = &I;
286240116Smarcel      if (MI->isCall())
287240116Smarcel        return true;
288240116Smarcel      unsigned Opc = MI->getOpcode();
289240116Smarcel      switch (Opc) {
290240116Smarcel        case Hexagon::PS_alloca:
291240116Smarcel        case Hexagon::PS_aligna:
292240116Smarcel          return true;
293240116Smarcel        default:
294240116Smarcel          break;
295240116Smarcel      }
296240116Smarcel      // Check individual operands.
297240116Smarcel      for (const MachineOperand &MO : MI->operands()) {
298240116Smarcel        // While the presence of a frame index does not prove that a stack
299240116Smarcel        // frame will be required, all frame indexes should be within alloc-
300240116Smarcel        // frame/deallocframe. Otherwise, the code that translates a frame
301240116Smarcel        // index into an offset would have to be aware of the placement of
302240116Smarcel        // the frame creation/destruction instructions.
303240116Smarcel        if (MO.isFI())
304240116Smarcel          return true;
305240116Smarcel        if (MO.isReg()) {
306240116Smarcel          Register R = MO.getReg();
307240116Smarcel          // Virtual registers will need scavenging, which then may require
308240116Smarcel          // a stack slot.
309240116Smarcel          if (Register::isVirtualRegister(R))
310240116Smarcel            return true;
311240116Smarcel          for (MCSubRegIterator S(R, &HRI, true); S.isValid(); ++S)
312240116Smarcel            if (CSR[*S])
313240116Smarcel              return true;
314240116Smarcel          continue;
315240116Smarcel        }
316240116Smarcel        if (MO.isRegMask()) {
317240116Smarcel          // A regmask would normally have all callee-saved registers marked
318240116Smarcel          // as preserved, so this check would not be needed, but in case of
319240116Smarcel          // ever having other regmasks (for other calling conventions),
320240116Smarcel          // make sure they would be processed correctly.
321240116Smarcel          const uint32_t *BM = MO.getRegMask();
322240116Smarcel          for (int x = CSR.find_first(); x >= 0; x = CSR.find_next(x)) {
323240116Smarcel            unsigned R = x;
324240116Smarcel            // If this regmask does not preserve a CSR, a frame will be needed.
325240116Smarcel            if (!(BM[R/32] & (1u << (R%32))))
326240116Smarcel              return true;
327240116Smarcel          }
328240116Smarcel        }
329240116Smarcel      }
330240116Smarcel    }
331240116Smarcel    return false;
332240116Smarcel}
333240116Smarcel
334240116Smarcel  /// Returns true if MBB has a machine instructions that indicates a tail call
335240116Smarcel  /// in the block.
336240116Smarcelstatic bool hasTailCall(const MachineBasicBlock &MBB) {
337240116Smarcel    MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr();
338240116Smarcel    if (I == MBB.end())
339240116Smarcel      return false;
340240116Smarcel    unsigned RetOpc = I->getOpcode();
341240116Smarcel    return RetOpc == Hexagon::PS_tailcall_i || RetOpc == Hexagon::PS_tailcall_r;
342240116Smarcel}
343240116Smarcel
344240116Smarcel/// Returns true if MBB contains an instruction that returns.
345240116Smarcelstatic bool hasReturn(const MachineBasicBlock &MBB) {
346240116Smarcel    for (auto I = MBB.getFirstTerminator(), E = MBB.end(); I != E; ++I)
347240116Smarcel      if (I->isReturn())
348240116Smarcel        return true;
349240116Smarcel    return false;
350240116Smarcel}
351240116Smarcel
352240116Smarcel/// Returns the "return" instruction from this block, or nullptr if there
353240116Smarcel/// isn't any.
354240116Smarcelstatic MachineInstr *getReturn(MachineBasicBlock &MBB) {
355240116Smarcel    for (auto &I : MBB)
356240116Smarcel      if (I.isReturn())
357240116Smarcel        return &I;
358240116Smarcel    return nullptr;
359240116Smarcel}
360240116Smarcel
361240116Smarcelstatic bool isRestoreCall(unsigned Opc) {
362240116Smarcel    switch (Opc) {
363240116Smarcel      case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
364240116Smarcel      case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
365240116Smarcel      case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT:
366240116Smarcel      case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC:
367240116Smarcel      case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT:
368240116Smarcel      case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC:
369240116Smarcel      case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4:
370240116Smarcel      case Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC:
371240116Smarcel        return true;
372240116Smarcel    }
373240116Smarcel    return false;
374240116Smarcel}
375240116Smarcel
376240116Smarcelstatic inline bool isOptNone(const MachineFunction &MF) {
377240116Smarcel    return MF.getFunction().hasOptNone() ||
378240116Smarcel           MF.getTarget().getOptLevel() == CodeGenOpt::None;
379240116Smarcel}
380240116Smarcel
381240116Smarcelstatic inline bool isOptSize(const MachineFunction &MF) {
382240116Smarcel    const Function &F = MF.getFunction();
383240116Smarcel    return F.hasOptSize() && !F.hasMinSize();
384240116Smarcel}
385240116Smarcel
386240116Smarcelstatic inline bool isMinSize(const MachineFunction &MF) {
387240116Smarcel    return MF.getFunction().hasMinSize();
388240116Smarcel}
389240116Smarcel
390240116Smarcel/// Implements shrink-wrapping of the stack frame. By default, stack frame
391240116Smarcel/// is created in the function entry block, and is cleaned up in every block
392240116Smarcel/// that returns. This function finds alternate blocks: one for the frame
393240116Smarcel/// setup (prolog) and one for the cleanup (epilog).
394240116Smarcelvoid HexagonFrameLowering::findShrunkPrologEpilog(MachineFunction &MF,
395240116Smarcel      MachineBasicBlock *&PrologB, MachineBasicBlock *&EpilogB) const {
396240116Smarcel  static unsigned ShrinkCounter = 0;
397240116Smarcel
398240116Smarcel  if (ShrinkLimit.getPosition()) {
399240116Smarcel    if (ShrinkCounter >= ShrinkLimit)
400240116Smarcel      return;
401240116Smarcel    ShrinkCounter++;
402240116Smarcel  }
403240116Smarcel
404240116Smarcel  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
405240116Smarcel
406240116Smarcel  MachineDominatorTree MDT;
407240116Smarcel  MDT.runOnMachineFunction(MF);
408240116Smarcel  MachinePostDominatorTree MPT;
409240116Smarcel  MPT.runOnMachineFunction(MF);
410240116Smarcel
411240116Smarcel  using UnsignedMap = DenseMap<unsigned, unsigned>;
412240116Smarcel  using RPOTType = ReversePostOrderTraversal<const MachineFunction *>;
413240116Smarcel
414240116Smarcel  UnsignedMap RPO;
415240116Smarcel  RPOTType RPOT(&MF);
416240116Smarcel  unsigned RPON = 0;
417240116Smarcel  for (RPOTType::rpo_iterator I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
418240116Smarcel    RPO[(*I)->getNumber()] = RPON++;
419240116Smarcel
420240116Smarcel  // Don't process functions that have loops, at least for now. Placement
421240116Smarcel  // of prolog and epilog must take loop structure into account. For simpli-
422240116Smarcel  // city don't do it right now.
423240116Smarcel  for (auto &I : MF) {
424240116Smarcel    unsigned BN = RPO[I.getNumber()];
425240116Smarcel    for (auto SI = I.succ_begin(), SE = I.succ_end(); SI != SE; ++SI) {
426240116Smarcel      // If found a back-edge, return.
427240116Smarcel      if (RPO[(*SI)->getNumber()] <= BN)
428240116Smarcel        return;
429240116Smarcel    }
430240116Smarcel  }
431240116Smarcel
432240116Smarcel  // Collect the set of blocks that need a stack frame to execute. Scan
433240116Smarcel  // each block for uses/defs of callee-saved registers, calls, etc.
434240116Smarcel  SmallVector<MachineBasicBlock*,16> SFBlocks;
435240116Smarcel  BitVector CSR(Hexagon::NUM_TARGET_REGS);
436240116Smarcel  for (const MCPhysReg *P = HRI.getCalleeSavedRegs(&MF); *P; ++P)
437240116Smarcel    for (MCSubRegIterator S(*P, &HRI, true); S.isValid(); ++S)
438240116Smarcel      CSR[*S] = true;
439240116Smarcel
440240116Smarcel  for (auto &I : MF)
441240116Smarcel    if (needsStackFrame(I, CSR, HRI))
442240116Smarcel      SFBlocks.push_back(&I);
443240116Smarcel
444240116Smarcel  LLVM_DEBUG({
445240116Smarcel    dbgs() << "Blocks needing SF: {";
446240116Smarcel    for (auto &B : SFBlocks)
447240116Smarcel      dbgs() << " " << printMBBReference(*B);
448240116Smarcel    dbgs() << " }\n";
449240116Smarcel  });
450240116Smarcel  // No frame needed?
451240116Smarcel  if (SFBlocks.empty())
452240116Smarcel    return;
453240116Smarcel
454240116Smarcel  // Pick a common dominator and a common post-dominator.
455240116Smarcel  MachineBasicBlock *DomB = SFBlocks[0];
456240116Smarcel  for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
457240116Smarcel    DomB = MDT.findNearestCommonDominator(DomB, SFBlocks[i]);
458240116Smarcel    if (!DomB)
459240116Smarcel      break;
460240116Smarcel  }
461240116Smarcel  MachineBasicBlock *PDomB = SFBlocks[0];
462240116Smarcel  for (unsigned i = 1, n = SFBlocks.size(); i < n; ++i) {
463240116Smarcel    PDomB = MPT.findNearestCommonDominator(PDomB, SFBlocks[i]);
464240116Smarcel    if (!PDomB)
465240116Smarcel      break;
466240116Smarcel  }
467240116Smarcel  LLVM_DEBUG({
468240116Smarcel    dbgs() << "Computed dom block: ";
469240116Smarcel    if (DomB)
470240116Smarcel      dbgs() << printMBBReference(*DomB);
471240116Smarcel    else
472240116Smarcel      dbgs() << "<null>";
473240116Smarcel    dbgs() << ", computed pdom block: ";
474240116Smarcel    if (PDomB)
475240116Smarcel      dbgs() << printMBBReference(*PDomB);
476240116Smarcel    else
477240116Smarcel      dbgs() << "<null>";
478240116Smarcel    dbgs() << "\n";
479240116Smarcel  });
480240116Smarcel  if (!DomB || !PDomB)
481240116Smarcel    return;
482240116Smarcel
483240116Smarcel  // Make sure that DomB dominates PDomB and PDomB post-dominates DomB.
484240116Smarcel  if (!MDT.dominates(DomB, PDomB)) {
485240116Smarcel    LLVM_DEBUG(dbgs() << "Dom block does not dominate pdom block\n");
486240116Smarcel    return;
487240116Smarcel  }
488240116Smarcel  if (!MPT.dominates(PDomB, DomB)) {
489240116Smarcel    LLVM_DEBUG(dbgs() << "PDom block does not post-dominate dom block\n");
490240116Smarcel    return;
491240116Smarcel  }
492240116Smarcel
493240116Smarcel  // Finally, everything seems right.
494240116Smarcel  PrologB = DomB;
495240116Smarcel  EpilogB = PDomB;
496240116Smarcel}
497240116Smarcel
498240116Smarcel/// Perform most of the PEI work here:
499240116Smarcel/// - saving/restoring of the callee-saved registers,
500240116Smarcel/// - stack frame creation and destruction.
501240116Smarcel/// Normally, this work is distributed among various functions, but doing it
502240116Smarcel/// in one place allows shrink-wrapping of the stack frame.
503240116Smarcelvoid HexagonFrameLowering::emitPrologue(MachineFunction &MF,
504240116Smarcel                                        MachineBasicBlock &MBB) const {
505240116Smarcel  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
506240116Smarcel
507240116Smarcel  MachineFrameInfo &MFI = MF.getFrameInfo();
508240116Smarcel  const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
509240116Smarcel
510240116Smarcel  MachineBasicBlock *PrologB = &MF.front(), *EpilogB = nullptr;
511240116Smarcel  if (EnableShrinkWrapping)
512240116Smarcel    findShrunkPrologEpilog(MF, PrologB, EpilogB);
513240116Smarcel
514240116Smarcel  bool PrologueStubs = false;
515240116Smarcel  insertCSRSpillsInBlock(*PrologB, CSI, HRI, PrologueStubs);
516240116Smarcel  insertPrologueInBlock(*PrologB, PrologueStubs);
517240116Smarcel  updateEntryPaths(MF, *PrologB);
518240116Smarcel
519240116Smarcel  if (EpilogB) {
520240116Smarcel    insertCSRRestoresInBlock(*EpilogB, CSI, HRI);
521240116Smarcel    insertEpilogueInBlock(*EpilogB);
522240116Smarcel  } else {
523240116Smarcel    for (auto &B : MF)
524240116Smarcel      if (B.isReturnBlock())
525240116Smarcel        insertCSRRestoresInBlock(B, CSI, HRI);
526240116Smarcel
527240116Smarcel    for (auto &B : MF)
528240116Smarcel      if (B.isReturnBlock())
529240116Smarcel        insertEpilogueInBlock(B);
530240116Smarcel
531240116Smarcel    for (auto &B : MF) {
532240116Smarcel      if (B.empty())
533240116Smarcel        continue;
534240116Smarcel      MachineInstr *RetI = getReturn(B);
535240116Smarcel      if (!RetI || isRestoreCall(RetI->getOpcode()))
536240116Smarcel        continue;
537240116Smarcel      for (auto &R : CSI)
538240116Smarcel        RetI->addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
539240116Smarcel    }
540240116Smarcel  }
541240116Smarcel
542240116Smarcel  if (EpilogB) {
543240116Smarcel    // If there is an epilog block, it may not have a return instruction.
544240116Smarcel    // In such case, we need to add the callee-saved registers as live-ins
545240116Smarcel    // in all blocks on all paths from the epilog to any return block.
546240116Smarcel    unsigned MaxBN = MF.getNumBlockIDs();
547240116Smarcel    BitVector DoneT(MaxBN+1), DoneF(MaxBN+1), Path(MaxBN+1);
548240116Smarcel    updateExitPaths(*EpilogB, *EpilogB, DoneT, DoneF, Path);
549240116Smarcel  }
550240116Smarcel}
551240116Smarcel
552240116Smarcel/// Returns true if the target can safely skip saving callee-saved registers
553240116Smarcel/// for noreturn nounwind functions.
554240116Smarcelbool HexagonFrameLowering::enableCalleeSaveSkip(
555240116Smarcel    const MachineFunction &MF) const {
556240116Smarcel  const auto &F = MF.getFunction();
557240116Smarcel  assert(F.hasFnAttribute(Attribute::NoReturn) &&
558240116Smarcel         F.getFunction().hasFnAttribute(Attribute::NoUnwind) &&
559240116Smarcel         !F.getFunction().hasFnAttribute(Attribute::UWTable));
560240116Smarcel  (void)F;
561240116Smarcel
562240116Smarcel  // No need to save callee saved registers if the function does not return.
563240116Smarcel  return MF.getSubtarget<HexagonSubtarget>().noreturnStackElim();
564240116Smarcel}
565240116Smarcel
566240116Smarcel// Helper function used to determine when to eliminate the stack frame for
567240116Smarcel// functions marked as noreturn and when the noreturn-stack-elim options are
568240116Smarcel// specified. When both these conditions are true, then a FP may not be needed
569240116Smarcel// if the function makes a call. It is very similar to enableCalleeSaveSkip,
570240116Smarcel// but it used to check if the allocframe can be eliminated as well.
571240116Smarcelstatic bool enableAllocFrameElim(const MachineFunction &MF) {
572240116Smarcel  const auto &F = MF.getFunction();
573240116Smarcel  const auto &MFI = MF.getFrameInfo();
574240116Smarcel  const auto &HST = MF.getSubtarget<HexagonSubtarget>();
575240116Smarcel  assert(!MFI.hasVarSizedObjects() &&
576240116Smarcel         !HST.getRegisterInfo()->needsStackRealignment(MF));
577240116Smarcel  return F.hasFnAttribute(Attribute::NoReturn) &&
578240116Smarcel    F.hasFnAttribute(Attribute::NoUnwind) &&
579240116Smarcel    !F.hasFnAttribute(Attribute::UWTable) && HST.noreturnStackElim() &&
580240116Smarcel    MFI.getStackSize() == 0;
581240116Smarcel}
582240116Smarcel
583240116Smarcelvoid HexagonFrameLowering::insertPrologueInBlock(MachineBasicBlock &MBB,
584240116Smarcel      bool PrologueStubs) const {
585240116Smarcel  MachineFunction &MF = *MBB.getParent();
586240116Smarcel  MachineFrameInfo &MFI = MF.getFrameInfo();
587240116Smarcel  auto &HST = MF.getSubtarget<HexagonSubtarget>();
588240116Smarcel  auto &HII = *HST.getInstrInfo();
589240116Smarcel  auto &HRI = *HST.getRegisterInfo();
590240116Smarcel
591240116Smarcel  unsigned MaxAlign = std::max(MFI.getMaxAlignment(), getStackAlignment());
592240116Smarcel
593240116Smarcel  // Calculate the total stack frame size.
594240116Smarcel  // Get the number of bytes to allocate from the FrameInfo.
595240116Smarcel  unsigned FrameSize = MFI.getStackSize();
596240116Smarcel  // Round up the max call frame size to the max alignment on the stack.
597240116Smarcel  unsigned MaxCFA = alignTo(MFI.getMaxCallFrameSize(), MaxAlign);
598240116Smarcel  MFI.setMaxCallFrameSize(MaxCFA);
599240116Smarcel
600240116Smarcel  FrameSize = MaxCFA + alignTo(FrameSize, MaxAlign);
601240116Smarcel  MFI.setStackSize(FrameSize);
602240116Smarcel
603240116Smarcel  bool AlignStack = (MaxAlign > getStackAlignment());
604240116Smarcel
605240116Smarcel  // Get the number of bytes to allocate from the FrameInfo.
606240116Smarcel  unsigned NumBytes = MFI.getStackSize();
607240116Smarcel  unsigned SP = HRI.getStackRegister();
608240116Smarcel  unsigned MaxCF = MFI.getMaxCallFrameSize();
609240116Smarcel  MachineBasicBlock::iterator InsertPt = MBB.begin();
610240116Smarcel
611240116Smarcel  SmallVector<MachineInstr *, 4> AdjustRegs;
612240116Smarcel  for (auto &MBB : MF)
613240116Smarcel    for (auto &MI : MBB)
614240116Smarcel      if (MI.getOpcode() == Hexagon::PS_alloca)
615240116Smarcel        AdjustRegs.push_back(&MI);
616240116Smarcel
617240116Smarcel  for (auto MI : AdjustRegs) {
618240116Smarcel    assert((MI->getOpcode() == Hexagon::PS_alloca) && "Expected alloca");
619240116Smarcel    expandAlloca(MI, HII, SP, MaxCF);
620240116Smarcel    MI->eraseFromParent();
621240116Smarcel  }
622240116Smarcel
623240116Smarcel  DebugLoc dl = MBB.findDebugLoc(InsertPt);
624240116Smarcel
625240116Smarcel  if (hasFP(MF)) {
626240116Smarcel    insertAllocframe(MBB, InsertPt, NumBytes);
627240116Smarcel    if (AlignStack) {
628240116Smarcel      BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_andir), SP)
629240116Smarcel          .addReg(SP)
630240116Smarcel          .addImm(-int64_t(MaxAlign));
631240116Smarcel    }
632240116Smarcel    // If the stack-checking is enabled, and we spilled the callee-saved
633240116Smarcel    // registers inline (i.e. did not use a spill function), then call
634240116Smarcel    // the stack checker directly.
635240116Smarcel    if (EnableStackOVFSanitizer && !PrologueStubs)
636240116Smarcel      BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::PS_call_stk))
637240116Smarcel             .addExternalSymbol("__runtime_stack_check");
638  } else if (NumBytes > 0) {
639    assert(alignTo(NumBytes, 8) == NumBytes);
640    BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
641      .addReg(SP)
642      .addImm(-int(NumBytes));
643  }
644}
645
646void HexagonFrameLowering::insertEpilogueInBlock(MachineBasicBlock &MBB) const {
647  MachineFunction &MF = *MBB.getParent();
648  auto &HST = MF.getSubtarget<HexagonSubtarget>();
649  auto &HII = *HST.getInstrInfo();
650  auto &HRI = *HST.getRegisterInfo();
651  unsigned SP = HRI.getStackRegister();
652
653  MachineBasicBlock::iterator InsertPt = MBB.getFirstTerminator();
654  DebugLoc dl = MBB.findDebugLoc(InsertPt);
655
656  if (!hasFP(MF)) {
657    MachineFrameInfo &MFI = MF.getFrameInfo();
658    if (unsigned NumBytes = MFI.getStackSize()) {
659      BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
660        .addReg(SP)
661        .addImm(NumBytes);
662    }
663    return;
664  }
665
666  MachineInstr *RetI = getReturn(MBB);
667  unsigned RetOpc = RetI ? RetI->getOpcode() : 0;
668
669  // Handle EH_RETURN.
670  if (RetOpc == Hexagon::EH_RETURN_JMPR) {
671    BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
672        .addDef(Hexagon::D15)
673        .addReg(Hexagon::R30);
674    BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_add), SP)
675        .addReg(SP)
676        .addReg(Hexagon::R28);
677    return;
678  }
679
680  // Check for RESTORE_DEALLOC_RET* tail call. Don't emit an extra dealloc-
681  // frame instruction if we encounter it.
682  if (RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4 ||
683      RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC ||
684      RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT ||
685      RetOpc == Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC) {
686    MachineBasicBlock::iterator It = RetI;
687    ++It;
688    // Delete all instructions after the RESTORE (except labels).
689    while (It != MBB.end()) {
690      if (!It->isLabel())
691        It = MBB.erase(It);
692      else
693        ++It;
694    }
695    return;
696  }
697
698  // It is possible that the restoring code is a call to a library function.
699  // All of the restore* functions include "deallocframe", so we need to make
700  // sure that we don't add an extra one.
701  bool NeedsDeallocframe = true;
702  if (!MBB.empty() && InsertPt != MBB.begin()) {
703    MachineBasicBlock::iterator PrevIt = std::prev(InsertPt);
704    unsigned COpc = PrevIt->getOpcode();
705    if (COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4 ||
706        COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC ||
707        COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT ||
708        COpc == Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC ||
709        COpc == Hexagon::PS_call_nr || COpc == Hexagon::PS_callr_nr)
710      NeedsDeallocframe = false;
711  }
712
713  if (!NeedsDeallocframe)
714    return;
715  // If the returning instruction is PS_jmpret, replace it with dealloc_return,
716  // otherwise just add deallocframe. The function could be returning via a
717  // tail call.
718  if (RetOpc != Hexagon::PS_jmpret || DisableDeallocRet) {
719    BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::L2_deallocframe))
720      .addDef(Hexagon::D15)
721      .addReg(Hexagon::R30);
722    return;
723  }
724  unsigned NewOpc = Hexagon::L4_return;
725  MachineInstr *NewI = BuildMI(MBB, RetI, dl, HII.get(NewOpc))
726      .addDef(Hexagon::D15)
727      .addReg(Hexagon::R30);
728  // Transfer the function live-out registers.
729  NewI->copyImplicitOps(MF, *RetI);
730  MBB.erase(RetI);
731}
732
733void HexagonFrameLowering::insertAllocframe(MachineBasicBlock &MBB,
734      MachineBasicBlock::iterator InsertPt, unsigned NumBytes) const {
735  MachineFunction &MF = *MBB.getParent();
736  auto &HST = MF.getSubtarget<HexagonSubtarget>();
737  auto &HII = *HST.getInstrInfo();
738  auto &HRI = *HST.getRegisterInfo();
739
740  // Check for overflow.
741  // Hexagon_TODO: Ugh! hardcoding. Is there an API that can be used?
742  const unsigned int ALLOCFRAME_MAX = 16384;
743
744  // Create a dummy memory operand to avoid allocframe from being treated as
745  // a volatile memory reference.
746  auto *MMO = MF.getMachineMemOperand(MachinePointerInfo::getStack(MF, 0),
747                                      MachineMemOperand::MOStore, 4, 4);
748
749  DebugLoc dl = MBB.findDebugLoc(InsertPt);
750  unsigned SP = HRI.getStackRegister();
751
752  if (NumBytes >= ALLOCFRAME_MAX) {
753    // Emit allocframe(#0).
754    BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
755      .addDef(SP)
756      .addReg(SP)
757      .addImm(0)
758      .addMemOperand(MMO);
759
760    // Subtract the size from the stack pointer.
761    unsigned SP = HRI.getStackRegister();
762    BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::A2_addi), SP)
763      .addReg(SP)
764      .addImm(-int(NumBytes));
765  } else {
766    BuildMI(MBB, InsertPt, dl, HII.get(Hexagon::S2_allocframe))
767      .addDef(SP)
768      .addReg(SP)
769      .addImm(NumBytes)
770      .addMemOperand(MMO);
771  }
772}
773
774void HexagonFrameLowering::updateEntryPaths(MachineFunction &MF,
775      MachineBasicBlock &SaveB) const {
776  SetVector<unsigned> Worklist;
777
778  MachineBasicBlock &EntryB = MF.front();
779  Worklist.insert(EntryB.getNumber());
780
781  unsigned SaveN = SaveB.getNumber();
782  auto &CSI = MF.getFrameInfo().getCalleeSavedInfo();
783
784  for (unsigned i = 0; i < Worklist.size(); ++i) {
785    unsigned BN = Worklist[i];
786    MachineBasicBlock &MBB = *MF.getBlockNumbered(BN);
787    for (auto &R : CSI)
788      if (!MBB.isLiveIn(R.getReg()))
789        MBB.addLiveIn(R.getReg());
790    if (BN != SaveN)
791      for (auto &SB : MBB.successors())
792        Worklist.insert(SB->getNumber());
793  }
794}
795
796bool HexagonFrameLowering::updateExitPaths(MachineBasicBlock &MBB,
797      MachineBasicBlock &RestoreB, BitVector &DoneT, BitVector &DoneF,
798      BitVector &Path) const {
799  assert(MBB.getNumber() >= 0);
800  unsigned BN = MBB.getNumber();
801  if (Path[BN] || DoneF[BN])
802    return false;
803  if (DoneT[BN])
804    return true;
805
806  auto &CSI = MBB.getParent()->getFrameInfo().getCalleeSavedInfo();
807
808  Path[BN] = true;
809  bool ReachedExit = false;
810  for (auto &SB : MBB.successors())
811    ReachedExit |= updateExitPaths(*SB, RestoreB, DoneT, DoneF, Path);
812
813  if (!MBB.empty() && MBB.back().isReturn()) {
814    // Add implicit uses of all callee-saved registers to the reached
815    // return instructions. This is to prevent the anti-dependency breaker
816    // from renaming these registers.
817    MachineInstr &RetI = MBB.back();
818    if (!isRestoreCall(RetI.getOpcode()))
819      for (auto &R : CSI)
820        RetI.addOperand(MachineOperand::CreateReg(R.getReg(), false, true));
821    ReachedExit = true;
822  }
823
824  // We don't want to add unnecessary live-ins to the restore block: since
825  // the callee-saved registers are being defined in it, the entry of the
826  // restore block cannot be on the path from the definitions to any exit.
827  if (ReachedExit && &MBB != &RestoreB) {
828    for (auto &R : CSI)
829      if (!MBB.isLiveIn(R.getReg()))
830        MBB.addLiveIn(R.getReg());
831    DoneT[BN] = true;
832  }
833  if (!ReachedExit)
834    DoneF[BN] = true;
835
836  Path[BN] = false;
837  return ReachedExit;
838}
839
840static Optional<MachineBasicBlock::iterator>
841findCFILocation(MachineBasicBlock &B) {
842    // The CFI instructions need to be inserted right after allocframe.
843    // An exception to this is a situation where allocframe is bundled
844    // with a call: then the CFI instructions need to be inserted before
845    // the packet with the allocframe+call (in case the call throws an
846    // exception).
847    auto End = B.instr_end();
848
849    for (MachineInstr &I : B) {
850      MachineBasicBlock::iterator It = I.getIterator();
851      if (!I.isBundle()) {
852        if (I.getOpcode() == Hexagon::S2_allocframe)
853          return std::next(It);
854        continue;
855      }
856      // I is a bundle.
857      bool HasCall = false, HasAllocFrame = false;
858      auto T = It.getInstrIterator();
859      while (++T != End && T->isBundled()) {
860        if (T->getOpcode() == Hexagon::S2_allocframe)
861          HasAllocFrame = true;
862        else if (T->isCall())
863          HasCall = true;
864      }
865      if (HasAllocFrame)
866        return HasCall ? It : std::next(It);
867    }
868    return None;
869}
870
871void HexagonFrameLowering::insertCFIInstructions(MachineFunction &MF) const {
872  for (auto &B : MF) {
873    auto At = findCFILocation(B);
874    if (At.hasValue())
875      insertCFIInstructionsAt(B, At.getValue());
876  }
877}
878
879void HexagonFrameLowering::insertCFIInstructionsAt(MachineBasicBlock &MBB,
880      MachineBasicBlock::iterator At) const {
881  MachineFunction &MF = *MBB.getParent();
882  MachineFrameInfo &MFI = MF.getFrameInfo();
883  MachineModuleInfo &MMI = MF.getMMI();
884  auto &HST = MF.getSubtarget<HexagonSubtarget>();
885  auto &HII = *HST.getInstrInfo();
886  auto &HRI = *HST.getRegisterInfo();
887
888  // If CFI instructions have debug information attached, something goes
889  // wrong with the final assembly generation: the prolog_end is placed
890  // in a wrong location.
891  DebugLoc DL;
892  const MCInstrDesc &CFID = HII.get(TargetOpcode::CFI_INSTRUCTION);
893
894  MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
895  bool HasFP = hasFP(MF);
896
897  if (HasFP) {
898    unsigned DwFPReg = HRI.getDwarfRegNum(HRI.getFrameRegister(), true);
899    unsigned DwRAReg = HRI.getDwarfRegNum(HRI.getRARegister(), true);
900
901    // Define CFA via an offset from the value of FP.
902    //
903    //  -8   -4    0 (SP)
904    // --+----+----+---------------------
905    //   | FP | LR |          increasing addresses -->
906    // --+----+----+---------------------
907    //   |         +-- Old SP (before allocframe)
908    //   +-- New FP (after allocframe)
909    //
910    // MCCFIInstruction::createDefCfa subtracts the offset from the register.
911    // MCCFIInstruction::createOffset takes the offset without sign change.
912    auto DefCfa = MCCFIInstruction::createDefCfa(FrameLabel, DwFPReg, -8);
913    BuildMI(MBB, At, DL, CFID)
914        .addCFIIndex(MF.addFrameInst(DefCfa));
915    // R31 (return addr) = CFA - 4
916    auto OffR31 = MCCFIInstruction::createOffset(FrameLabel, DwRAReg, -4);
917    BuildMI(MBB, At, DL, CFID)
918        .addCFIIndex(MF.addFrameInst(OffR31));
919    // R30 (frame ptr) = CFA - 8
920    auto OffR30 = MCCFIInstruction::createOffset(FrameLabel, DwFPReg, -8);
921    BuildMI(MBB, At, DL, CFID)
922        .addCFIIndex(MF.addFrameInst(OffR30));
923  }
924
925  static unsigned int RegsToMove[] = {
926    Hexagon::R1,  Hexagon::R0,  Hexagon::R3,  Hexagon::R2,
927    Hexagon::R17, Hexagon::R16, Hexagon::R19, Hexagon::R18,
928    Hexagon::R21, Hexagon::R20, Hexagon::R23, Hexagon::R22,
929    Hexagon::R25, Hexagon::R24, Hexagon::R27, Hexagon::R26,
930    Hexagon::D0,  Hexagon::D1,  Hexagon::D8,  Hexagon::D9,
931    Hexagon::D10, Hexagon::D11, Hexagon::D12, Hexagon::D13,
932    Hexagon::NoRegister
933  };
934
935  const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
936
937  for (unsigned i = 0; RegsToMove[i] != Hexagon::NoRegister; ++i) {
938    unsigned Reg = RegsToMove[i];
939    auto IfR = [Reg] (const CalleeSavedInfo &C) -> bool {
940      return C.getReg() == Reg;
941    };
942    auto F = find_if(CSI, IfR);
943    if (F == CSI.end())
944      continue;
945
946    int64_t Offset;
947    if (HasFP) {
948      // If the function has a frame pointer (i.e. has an allocframe),
949      // then the CFA has been defined in terms of FP. Any offsets in
950      // the following CFI instructions have to be defined relative
951      // to FP, which points to the bottom of the stack frame.
952      // The function getFrameIndexReference can still choose to use SP
953      // for the offset calculation, so we cannot simply call it here.
954      // Instead, get the offset (relative to the FP) directly.
955      Offset = MFI.getObjectOffset(F->getFrameIdx());
956    } else {
957      unsigned FrameReg;
958      Offset = getFrameIndexReference(MF, F->getFrameIdx(), FrameReg);
959    }
960    // Subtract 8 to make room for R30 and R31, which are added above.
961    Offset -= 8;
962
963    if (Reg < Hexagon::D0 || Reg > Hexagon::D15) {
964      unsigned DwarfReg = HRI.getDwarfRegNum(Reg, true);
965      auto OffReg = MCCFIInstruction::createOffset(FrameLabel, DwarfReg,
966                                                   Offset);
967      BuildMI(MBB, At, DL, CFID)
968          .addCFIIndex(MF.addFrameInst(OffReg));
969    } else {
970      // Split the double regs into subregs, and generate appropriate
971      // cfi_offsets.
972      // The only reason, we are split double regs is, llvm-mc does not
973      // understand paired registers for cfi_offset.
974      // Eg .cfi_offset r1:0, -64
975
976      Register HiReg = HRI.getSubReg(Reg, Hexagon::isub_hi);
977      Register LoReg = HRI.getSubReg(Reg, Hexagon::isub_lo);
978      unsigned HiDwarfReg = HRI.getDwarfRegNum(HiReg, true);
979      unsigned LoDwarfReg = HRI.getDwarfRegNum(LoReg, true);
980      auto OffHi = MCCFIInstruction::createOffset(FrameLabel, HiDwarfReg,
981                                                  Offset+4);
982      BuildMI(MBB, At, DL, CFID)
983          .addCFIIndex(MF.addFrameInst(OffHi));
984      auto OffLo = MCCFIInstruction::createOffset(FrameLabel, LoDwarfReg,
985                                                  Offset);
986      BuildMI(MBB, At, DL, CFID)
987          .addCFIIndex(MF.addFrameInst(OffLo));
988    }
989  }
990}
991
992bool HexagonFrameLowering::hasFP(const MachineFunction &MF) const {
993  if (MF.getFunction().hasFnAttribute(Attribute::Naked))
994    return false;
995
996  auto &MFI = MF.getFrameInfo();
997  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
998  bool HasExtraAlign = HRI.needsStackRealignment(MF);
999  bool HasAlloca = MFI.hasVarSizedObjects();
1000
1001  // Insert ALLOCFRAME if we need to or at -O0 for the debugger.  Think
1002  // that this shouldn't be required, but doing so now because gcc does and
1003  // gdb can't break at the start of the function without it.  Will remove if
1004  // this turns out to be a gdb bug.
1005  //
1006  if (MF.getTarget().getOptLevel() == CodeGenOpt::None)
1007    return true;
1008
1009  // By default we want to use SP (since it's always there). FP requires
1010  // some setup (i.e. ALLOCFRAME).
1011  // Both, alloca and stack alignment modify the stack pointer by an
1012  // undetermined value, so we need to save it at the entry to the function
1013  // (i.e. use allocframe).
1014  if (HasAlloca || HasExtraAlign)
1015    return true;
1016
1017  if (MFI.getStackSize() > 0) {
1018    // If FP-elimination is disabled, we have to use FP at this point.
1019    const TargetMachine &TM = MF.getTarget();
1020    if (TM.Options.DisableFramePointerElim(MF) || !EliminateFramePointer)
1021      return true;
1022    if (EnableStackOVFSanitizer)
1023      return true;
1024  }
1025
1026  const auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1027  if ((MFI.hasCalls() && !enableAllocFrameElim(MF)) || HMFI.hasClobberLR())
1028    return true;
1029
1030  return false;
1031}
1032
1033enum SpillKind {
1034  SK_ToMem,
1035  SK_FromMem,
1036  SK_FromMemTailcall
1037};
1038
1039static const char *getSpillFunctionFor(unsigned MaxReg, SpillKind SpillType,
1040      bool Stkchk = false) {
1041  const char * V4SpillToMemoryFunctions[] = {
1042    "__save_r16_through_r17",
1043    "__save_r16_through_r19",
1044    "__save_r16_through_r21",
1045    "__save_r16_through_r23",
1046    "__save_r16_through_r25",
1047    "__save_r16_through_r27" };
1048
1049  const char * V4SpillToMemoryStkchkFunctions[] = {
1050    "__save_r16_through_r17_stkchk",
1051    "__save_r16_through_r19_stkchk",
1052    "__save_r16_through_r21_stkchk",
1053    "__save_r16_through_r23_stkchk",
1054    "__save_r16_through_r25_stkchk",
1055    "__save_r16_through_r27_stkchk" };
1056
1057  const char * V4SpillFromMemoryFunctions[] = {
1058    "__restore_r16_through_r17_and_deallocframe",
1059    "__restore_r16_through_r19_and_deallocframe",
1060    "__restore_r16_through_r21_and_deallocframe",
1061    "__restore_r16_through_r23_and_deallocframe",
1062    "__restore_r16_through_r25_and_deallocframe",
1063    "__restore_r16_through_r27_and_deallocframe" };
1064
1065  const char * V4SpillFromMemoryTailcallFunctions[] = {
1066    "__restore_r16_through_r17_and_deallocframe_before_tailcall",
1067    "__restore_r16_through_r19_and_deallocframe_before_tailcall",
1068    "__restore_r16_through_r21_and_deallocframe_before_tailcall",
1069    "__restore_r16_through_r23_and_deallocframe_before_tailcall",
1070    "__restore_r16_through_r25_and_deallocframe_before_tailcall",
1071    "__restore_r16_through_r27_and_deallocframe_before_tailcall"
1072  };
1073
1074  const char **SpillFunc = nullptr;
1075
1076  switch(SpillType) {
1077  case SK_ToMem:
1078    SpillFunc = Stkchk ? V4SpillToMemoryStkchkFunctions
1079                       : V4SpillToMemoryFunctions;
1080    break;
1081  case SK_FromMem:
1082    SpillFunc = V4SpillFromMemoryFunctions;
1083    break;
1084  case SK_FromMemTailcall:
1085    SpillFunc = V4SpillFromMemoryTailcallFunctions;
1086    break;
1087  }
1088  assert(SpillFunc && "Unknown spill kind");
1089
1090  // Spill all callee-saved registers up to the highest register used.
1091  switch (MaxReg) {
1092  case Hexagon::R17:
1093    return SpillFunc[0];
1094  case Hexagon::R19:
1095    return SpillFunc[1];
1096  case Hexagon::R21:
1097    return SpillFunc[2];
1098  case Hexagon::R23:
1099    return SpillFunc[3];
1100  case Hexagon::R25:
1101    return SpillFunc[4];
1102  case Hexagon::R27:
1103    return SpillFunc[5];
1104  default:
1105    llvm_unreachable("Unhandled maximum callee save register");
1106  }
1107  return nullptr;
1108}
1109
1110int HexagonFrameLowering::getFrameIndexReference(const MachineFunction &MF,
1111      int FI, unsigned &FrameReg) const {
1112  auto &MFI = MF.getFrameInfo();
1113  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1114
1115  int Offset = MFI.getObjectOffset(FI);
1116  bool HasAlloca = MFI.hasVarSizedObjects();
1117  bool HasExtraAlign = HRI.needsStackRealignment(MF);
1118  bool NoOpt = MF.getTarget().getOptLevel() == CodeGenOpt::None;
1119
1120  auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1121  unsigned FrameSize = MFI.getStackSize();
1122  unsigned SP = HRI.getStackRegister();
1123  unsigned FP = HRI.getFrameRegister();
1124  unsigned AP = HMFI.getStackAlignBasePhysReg();
1125  // It may happen that AP will be absent even HasAlloca && HasExtraAlign
1126  // is true. HasExtraAlign may be set because of vector spills, without
1127  // aligned locals or aligned outgoing function arguments. Since vector
1128  // spills will ultimately be "unaligned", it is safe to use FP as the
1129  // base register.
1130  // In fact, in such a scenario the stack is actually not required to be
1131  // aligned, although it may end up being aligned anyway, since this
1132  // particular case is not easily detectable. The alignment will be
1133  // unnecessary, but not incorrect.
1134  // Unfortunately there is no quick way to verify that the above is
1135  // indeed the case (and that it's not a result of an error), so just
1136  // assume that missing AP will be replaced by FP.
1137  // (A better fix would be to rematerialize AP from FP and always align
1138  // vector spills.)
1139  if (AP == 0)
1140    AP = FP;
1141
1142  bool UseFP = false, UseAP = false;  // Default: use SP (except at -O0).
1143  // Use FP at -O0, except when there are objects with extra alignment.
1144  // That additional alignment requirement may cause a pad to be inserted,
1145  // which will make it impossible to use FP to access objects located
1146  // past the pad.
1147  if (NoOpt && !HasExtraAlign)
1148    UseFP = true;
1149  if (MFI.isFixedObjectIndex(FI) || MFI.isObjectPreAllocated(FI)) {
1150    // Fixed and preallocated objects will be located before any padding
1151    // so FP must be used to access them.
1152    UseFP |= (HasAlloca || HasExtraAlign);
1153  } else {
1154    if (HasAlloca) {
1155      if (HasExtraAlign)
1156        UseAP = true;
1157      else
1158        UseFP = true;
1159    }
1160  }
1161
1162  // If FP was picked, then there had better be FP.
1163  bool HasFP = hasFP(MF);
1164  assert((HasFP || !UseFP) && "This function must have frame pointer");
1165
1166  // Having FP implies allocframe. Allocframe will store extra 8 bytes:
1167  // FP/LR. If the base register is used to access an object across these
1168  // 8 bytes, then the offset will need to be adjusted by 8.
1169  //
1170  // After allocframe:
1171  //                    HexagonISelLowering adds 8 to ---+
1172  //                    the offsets of all stack-based   |
1173  //                    arguments (*)                    |
1174  //                                                     |
1175  //   getObjectOffset < 0   0     8  getObjectOffset >= 8
1176  // ------------------------+-----+------------------------> increasing
1177  //     <local objects>     |FP/LR|    <input arguments>     addresses
1178  // -----------------+------+-----+------------------------>
1179  //                  |      |
1180  //    SP/AP point --+      +-- FP points here (**)
1181  //    somewhere on
1182  //    this side of FP/LR
1183  //
1184  // (*) See LowerFormalArguments. The FP/LR is assumed to be present.
1185  // (**) *FP == old-FP. FP+0..7 are the bytes of FP/LR.
1186
1187  // The lowering assumes that FP/LR is present, and so the offsets of
1188  // the formal arguments start at 8. If FP/LR is not there we need to
1189  // reduce the offset by 8.
1190  if (Offset > 0 && !HasFP)
1191    Offset -= 8;
1192
1193  if (UseFP)
1194    FrameReg = FP;
1195  else if (UseAP)
1196    FrameReg = AP;
1197  else
1198    FrameReg = SP;
1199
1200  // Calculate the actual offset in the instruction. If there is no FP
1201  // (in other words, no allocframe), then SP will not be adjusted (i.e.
1202  // there will be no SP -= FrameSize), so the frame size should not be
1203  // added to the calculated offset.
1204  int RealOffset = Offset;
1205  if (!UseFP && !UseAP)
1206    RealOffset = FrameSize+Offset;
1207  return RealOffset;
1208}
1209
1210bool HexagonFrameLowering::insertCSRSpillsInBlock(MachineBasicBlock &MBB,
1211      const CSIVect &CSI, const HexagonRegisterInfo &HRI,
1212      bool &PrologueStubs) const {
1213  if (CSI.empty())
1214    return true;
1215
1216  MachineBasicBlock::iterator MI = MBB.begin();
1217  PrologueStubs = false;
1218  MachineFunction &MF = *MBB.getParent();
1219  auto &HST = MF.getSubtarget<HexagonSubtarget>();
1220  auto &HII = *HST.getInstrInfo();
1221
1222  if (useSpillFunction(MF, CSI)) {
1223    PrologueStubs = true;
1224    unsigned MaxReg = getMaxCalleeSavedReg(CSI, HRI);
1225    bool StkOvrFlowEnabled = EnableStackOVFSanitizer;
1226    const char *SpillFun = getSpillFunctionFor(MaxReg, SK_ToMem,
1227                                               StkOvrFlowEnabled);
1228    auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1229    bool IsPIC = HTM.isPositionIndependent();
1230    bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1231
1232    // Call spill function.
1233    DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc() : DebugLoc();
1234    unsigned SpillOpc;
1235    if (StkOvrFlowEnabled) {
1236      if (LongCalls)
1237        SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT_PIC
1238                         : Hexagon::SAVE_REGISTERS_CALL_V4STK_EXT;
1239      else
1240        SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4STK_PIC
1241                         : Hexagon::SAVE_REGISTERS_CALL_V4STK;
1242    } else {
1243      if (LongCalls)
1244        SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC
1245                         : Hexagon::SAVE_REGISTERS_CALL_V4_EXT;
1246      else
1247        SpillOpc = IsPIC ? Hexagon::SAVE_REGISTERS_CALL_V4_PIC
1248                         : Hexagon::SAVE_REGISTERS_CALL_V4;
1249    }
1250
1251    MachineInstr *SaveRegsCall =
1252        BuildMI(MBB, MI, DL, HII.get(SpillOpc))
1253          .addExternalSymbol(SpillFun);
1254
1255    // Add callee-saved registers as use.
1256    addCalleeSaveRegistersAsImpOperand(SaveRegsCall, CSI, false, true);
1257    // Add live in registers.
1258    for (unsigned I = 0; I < CSI.size(); ++I)
1259      MBB.addLiveIn(CSI[I].getReg());
1260    return true;
1261  }
1262
1263  for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1264    unsigned Reg = CSI[i].getReg();
1265    // Add live in registers. We treat eh_return callee saved register r0 - r3
1266    // specially. They are not really callee saved registers as they are not
1267    // supposed to be killed.
1268    bool IsKill = !HRI.isEHReturnCalleeSaveReg(Reg);
1269    int FI = CSI[i].getFrameIdx();
1270    const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1271    HII.storeRegToStackSlot(MBB, MI, Reg, IsKill, FI, RC, &HRI);
1272    if (IsKill)
1273      MBB.addLiveIn(Reg);
1274  }
1275  return true;
1276}
1277
1278bool HexagonFrameLowering::insertCSRRestoresInBlock(MachineBasicBlock &MBB,
1279      const CSIVect &CSI, const HexagonRegisterInfo &HRI) const {
1280  if (CSI.empty())
1281    return false;
1282
1283  MachineBasicBlock::iterator MI = MBB.getFirstTerminator();
1284  MachineFunction &MF = *MBB.getParent();
1285  auto &HST = MF.getSubtarget<HexagonSubtarget>();
1286  auto &HII = *HST.getInstrInfo();
1287
1288  if (useRestoreFunction(MF, CSI)) {
1289    bool HasTC = hasTailCall(MBB) || !hasReturn(MBB);
1290    unsigned MaxR = getMaxCalleeSavedReg(CSI, HRI);
1291    SpillKind Kind = HasTC ? SK_FromMemTailcall : SK_FromMem;
1292    const char *RestoreFn = getSpillFunctionFor(MaxR, Kind);
1293    auto &HTM = static_cast<const HexagonTargetMachine&>(MF.getTarget());
1294    bool IsPIC = HTM.isPositionIndependent();
1295    bool LongCalls = HST.useLongCalls() || EnableSaveRestoreLong;
1296
1297    // Call spill function.
1298    DebugLoc DL = MI != MBB.end() ? MI->getDebugLoc()
1299                                  : MBB.findDebugLoc(MBB.end());
1300    MachineInstr *DeallocCall = nullptr;
1301
1302    if (HasTC) {
1303      unsigned RetOpc;
1304      if (LongCalls)
1305        RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT_PIC
1306                       : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_EXT;
1307      else
1308        RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4_PIC
1309                       : Hexagon::RESTORE_DEALLOC_BEFORE_TAILCALL_V4;
1310      DeallocCall = BuildMI(MBB, MI, DL, HII.get(RetOpc))
1311          .addExternalSymbol(RestoreFn);
1312    } else {
1313      // The block has a return.
1314      MachineBasicBlock::iterator It = MBB.getFirstTerminator();
1315      assert(It->isReturn() && std::next(It) == MBB.end());
1316      unsigned RetOpc;
1317      if (LongCalls)
1318        RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT_PIC
1319                       : Hexagon::RESTORE_DEALLOC_RET_JMP_V4_EXT;
1320      else
1321        RetOpc = IsPIC ? Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC
1322                       : Hexagon::RESTORE_DEALLOC_RET_JMP_V4;
1323      DeallocCall = BuildMI(MBB, It, DL, HII.get(RetOpc))
1324          .addExternalSymbol(RestoreFn);
1325      // Transfer the function live-out registers.
1326      DeallocCall->copyImplicitOps(MF, *It);
1327    }
1328    addCalleeSaveRegistersAsImpOperand(DeallocCall, CSI, true, false);
1329    return true;
1330  }
1331
1332  for (unsigned i = 0; i < CSI.size(); ++i) {
1333    unsigned Reg = CSI[i].getReg();
1334    const TargetRegisterClass *RC = HRI.getMinimalPhysRegClass(Reg);
1335    int FI = CSI[i].getFrameIdx();
1336    HII.loadRegFromStackSlot(MBB, MI, Reg, FI, RC, &HRI);
1337  }
1338
1339  return true;
1340}
1341
1342MachineBasicBlock::iterator HexagonFrameLowering::eliminateCallFramePseudoInstr(
1343    MachineFunction &MF, MachineBasicBlock &MBB,
1344    MachineBasicBlock::iterator I) const {
1345  MachineInstr &MI = *I;
1346  unsigned Opc = MI.getOpcode();
1347  (void)Opc; // Silence compiler warning.
1348  assert((Opc == Hexagon::ADJCALLSTACKDOWN || Opc == Hexagon::ADJCALLSTACKUP) &&
1349         "Cannot handle this call frame pseudo instruction");
1350  return MBB.erase(I);
1351}
1352
1353void HexagonFrameLowering::processFunctionBeforeFrameFinalized(
1354    MachineFunction &MF, RegScavenger *RS) const {
1355  // If this function has uses aligned stack and also has variable sized stack
1356  // objects, then we need to map all spill slots to fixed positions, so that
1357  // they can be accessed through FP. Otherwise they would have to be accessed
1358  // via AP, which may not be available at the particular place in the program.
1359  MachineFrameInfo &MFI = MF.getFrameInfo();
1360  bool HasAlloca = MFI.hasVarSizedObjects();
1361  bool NeedsAlign = (MFI.getMaxAlignment() > getStackAlignment());
1362
1363  if (!HasAlloca || !NeedsAlign)
1364    return;
1365
1366  SmallSet<int, 4> DealignSlots;
1367  unsigned LFS = MFI.getLocalFrameSize();
1368  for (int i = 0, e = MFI.getObjectIndexEnd(); i != e; ++i) {
1369    if (!MFI.isSpillSlotObjectIndex(i) || MFI.isDeadObjectIndex(i))
1370      continue;
1371    unsigned S = MFI.getObjectSize(i);
1372    // Reduce the alignment to at most 8. This will require unaligned vector
1373    // stores if they happen here.
1374    unsigned A = std::max(MFI.getObjectAlignment(i), 8U);
1375    MFI.setObjectAlignment(i, 8);
1376    LFS = alignTo(LFS+S, A);
1377    MFI.mapLocalFrameObject(i, -static_cast<int64_t>(LFS));
1378    DealignSlots.insert(i);
1379  }
1380
1381  MFI.setLocalFrameSize(LFS);
1382  Align A = MFI.getLocalFrameMaxAlign();
1383  assert(A <= 8 && "Unexpected local frame alignment");
1384  if (A == 1)
1385    MFI.setLocalFrameMaxAlign(Align(8));
1386  MFI.setUseLocalStackAllocationBlock(true);
1387
1388  // Go over all MachineMemOperands in the code, and change the ones that
1389  // refer to the dealigned stack slots to reflect the new alignment.
1390  if (!DealignSlots.empty()) {
1391    for (MachineBasicBlock &BB : MF) {
1392      for (MachineInstr &MI : BB) {
1393        bool KeepOld = true;
1394        ArrayRef<MachineMemOperand*> memops = MI.memoperands();
1395        SmallVector<MachineMemOperand*,1> new_memops;
1396        for (MachineMemOperand *MMO : memops) {
1397          auto *PV = MMO->getPseudoValue();
1398          if (auto *FS = dyn_cast_or_null<FixedStackPseudoSourceValue>(PV)) {
1399            int FI = FS->getFrameIndex();
1400            if (DealignSlots.count(FI)) {
1401              unsigned A = MFI.getObjectAlignment(FI);
1402              auto *NewMMO = MF.getMachineMemOperand(MMO->getPointerInfo(),
1403                                MMO->getFlags(), MMO->getSize(), A,
1404                                MMO->getAAInfo(), MMO->getRanges(),
1405                                MMO->getSyncScopeID(), MMO->getOrdering(),
1406                                MMO->getFailureOrdering());
1407              new_memops.push_back(NewMMO);
1408              KeepOld = false;
1409              continue;
1410            }
1411          }
1412          new_memops.push_back(MMO);
1413        }
1414        if (!KeepOld)
1415          MI.setMemRefs(MF, new_memops);
1416      }
1417    }
1418  }
1419
1420  // Set the physical aligned-stack base address register.
1421  unsigned AP = 0;
1422  if (const MachineInstr *AI = getAlignaInstr(MF))
1423    AP = AI->getOperand(0).getReg();
1424  auto &HMFI = *MF.getInfo<HexagonMachineFunctionInfo>();
1425  HMFI.setStackAlignBasePhysReg(AP);
1426}
1427
1428/// Returns true if there are no caller-saved registers available in class RC.
1429static bool needToReserveScavengingSpillSlots(MachineFunction &MF,
1430      const HexagonRegisterInfo &HRI, const TargetRegisterClass *RC) {
1431  MachineRegisterInfo &MRI = MF.getRegInfo();
1432
1433  auto IsUsed = [&HRI,&MRI] (unsigned Reg) -> bool {
1434    for (MCRegAliasIterator AI(Reg, &HRI, true); AI.isValid(); ++AI)
1435      if (MRI.isPhysRegUsed(*AI))
1436        return true;
1437    return false;
1438  };
1439
1440  // Check for an unused caller-saved register. Callee-saved registers
1441  // have become pristine by now.
1442  for (const MCPhysReg *P = HRI.getCallerSavedRegs(&MF, RC); *P; ++P)
1443    if (!IsUsed(*P))
1444      return false;
1445
1446  // All caller-saved registers are used.
1447  return true;
1448}
1449
1450#ifndef NDEBUG
1451static void dump_registers(BitVector &Regs, const TargetRegisterInfo &TRI) {
1452  dbgs() << '{';
1453  for (int x = Regs.find_first(); x >= 0; x = Regs.find_next(x)) {
1454    unsigned R = x;
1455    dbgs() << ' ' << printReg(R, &TRI);
1456  }
1457  dbgs() << " }";
1458}
1459#endif
1460
1461bool HexagonFrameLowering::assignCalleeSavedSpillSlots(MachineFunction &MF,
1462      const TargetRegisterInfo *TRI, std::vector<CalleeSavedInfo> &CSI) const {
1463  LLVM_DEBUG(dbgs() << __func__ << " on " << MF.getName() << '\n');
1464  MachineFrameInfo &MFI = MF.getFrameInfo();
1465  BitVector SRegs(Hexagon::NUM_TARGET_REGS);
1466
1467  // Generate a set of unique, callee-saved registers (SRegs), where each
1468  // register in the set is maximal in terms of sub-/super-register relation,
1469  // i.e. for each R in SRegs, no proper super-register of R is also in SRegs.
1470
1471  // (1) For each callee-saved register, add that register and all of its
1472  // sub-registers to SRegs.
1473  LLVM_DEBUG(dbgs() << "Initial CS registers: {");
1474  for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1475    unsigned R = CSI[i].getReg();
1476    LLVM_DEBUG(dbgs() << ' ' << printReg(R, TRI));
1477    for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
1478      SRegs[*SR] = true;
1479  }
1480  LLVM_DEBUG(dbgs() << " }\n");
1481  LLVM_DEBUG(dbgs() << "SRegs.1: "; dump_registers(SRegs, *TRI);
1482             dbgs() << "\n");
1483
1484  // (2) For each reserved register, remove that register and all of its
1485  // sub- and super-registers from SRegs.
1486  BitVector Reserved = TRI->getReservedRegs(MF);
1487  for (int x = Reserved.find_first(); x >= 0; x = Reserved.find_next(x)) {
1488    unsigned R = x;
1489    for (MCSuperRegIterator SR(R, TRI, true); SR.isValid(); ++SR)
1490      SRegs[*SR] = false;
1491  }
1492  LLVM_DEBUG(dbgs() << "Res:     "; dump_registers(Reserved, *TRI);
1493             dbgs() << "\n");
1494  LLVM_DEBUG(dbgs() << "SRegs.2: "; dump_registers(SRegs, *TRI);
1495             dbgs() << "\n");
1496
1497  // (3) Collect all registers that have at least one sub-register in SRegs,
1498  // and also have no sub-registers that are reserved. These will be the can-
1499  // didates for saving as a whole instead of their individual sub-registers.
1500  // (Saving R17:16 instead of R16 is fine, but only if R17 was not reserved.)
1501  BitVector TmpSup(Hexagon::NUM_TARGET_REGS);
1502  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1503    unsigned R = x;
1504    for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR)
1505      TmpSup[*SR] = true;
1506  }
1507  for (int x = TmpSup.find_first(); x >= 0; x = TmpSup.find_next(x)) {
1508    unsigned R = x;
1509    for (MCSubRegIterator SR(R, TRI, true); SR.isValid(); ++SR) {
1510      if (!Reserved[*SR])
1511        continue;
1512      TmpSup[R] = false;
1513      break;
1514    }
1515  }
1516  LLVM_DEBUG(dbgs() << "TmpSup:  "; dump_registers(TmpSup, *TRI);
1517             dbgs() << "\n");
1518
1519  // (4) Include all super-registers found in (3) into SRegs.
1520  SRegs |= TmpSup;
1521  LLVM_DEBUG(dbgs() << "SRegs.4: "; dump_registers(SRegs, *TRI);
1522             dbgs() << "\n");
1523
1524  // (5) For each register R in SRegs, if any super-register of R is in SRegs,
1525  // remove R from SRegs.
1526  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1527    unsigned R = x;
1528    for (MCSuperRegIterator SR(R, TRI); SR.isValid(); ++SR) {
1529      if (!SRegs[*SR])
1530        continue;
1531      SRegs[R] = false;
1532      break;
1533    }
1534  }
1535  LLVM_DEBUG(dbgs() << "SRegs.5: "; dump_registers(SRegs, *TRI);
1536             dbgs() << "\n");
1537
1538  // Now, for each register that has a fixed stack slot, create the stack
1539  // object for it.
1540  CSI.clear();
1541
1542  using SpillSlot = TargetFrameLowering::SpillSlot;
1543
1544  unsigned NumFixed;
1545  int MinOffset = 0;  // CS offsets are negative.
1546  const SpillSlot *FixedSlots = getCalleeSavedSpillSlots(NumFixed);
1547  for (const SpillSlot *S = FixedSlots; S != FixedSlots+NumFixed; ++S) {
1548    if (!SRegs[S->Reg])
1549      continue;
1550    const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg);
1551    int FI = MFI.CreateFixedSpillStackObject(TRI->getSpillSize(*RC), S->Offset);
1552    MinOffset = std::min(MinOffset, S->Offset);
1553    CSI.push_back(CalleeSavedInfo(S->Reg, FI));
1554    SRegs[S->Reg] = false;
1555  }
1556
1557  // There can be some registers that don't have fixed slots. For example,
1558  // we need to store R0-R3 in functions with exception handling. For each
1559  // such register, create a non-fixed stack object.
1560  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1561    unsigned R = x;
1562    const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R);
1563    unsigned Size = TRI->getSpillSize(*RC);
1564    int Off = MinOffset - Size;
1565    unsigned Align = std::min(TRI->getSpillAlignment(*RC), getStackAlignment());
1566    assert(isPowerOf2_32(Align));
1567    Off &= -Align;
1568    int FI = MFI.CreateFixedSpillStackObject(Size, Off);
1569    MinOffset = std::min(MinOffset, Off);
1570    CSI.push_back(CalleeSavedInfo(R, FI));
1571    SRegs[R] = false;
1572  }
1573
1574  LLVM_DEBUG({
1575    dbgs() << "CS information: {";
1576    for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
1577      int FI = CSI[i].getFrameIdx();
1578      int Off = MFI.getObjectOffset(FI);
1579      dbgs() << ' ' << printReg(CSI[i].getReg(), TRI) << ":fi#" << FI << ":sp";
1580      if (Off >= 0)
1581        dbgs() << '+';
1582      dbgs() << Off;
1583    }
1584    dbgs() << " }\n";
1585  });
1586
1587#ifndef NDEBUG
1588  // Verify that all registers were handled.
1589  bool MissedReg = false;
1590  for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
1591    unsigned R = x;
1592    dbgs() << printReg(R, TRI) << ' ';
1593    MissedReg = true;
1594  }
1595  if (MissedReg)
1596    llvm_unreachable("...there are unhandled callee-saved registers!");
1597#endif
1598
1599  return true;
1600}
1601
1602bool HexagonFrameLowering::expandCopy(MachineBasicBlock &B,
1603      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1604      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1605  MachineInstr *MI = &*It;
1606  DebugLoc DL = MI->getDebugLoc();
1607  Register DstR = MI->getOperand(0).getReg();
1608  Register SrcR = MI->getOperand(1).getReg();
1609  if (!Hexagon::ModRegsRegClass.contains(DstR) ||
1610      !Hexagon::ModRegsRegClass.contains(SrcR))
1611    return false;
1612
1613  Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1614  BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), TmpR).add(MI->getOperand(1));
1615  BuildMI(B, It, DL, HII.get(TargetOpcode::COPY), DstR)
1616    .addReg(TmpR, RegState::Kill);
1617
1618  NewRegs.push_back(TmpR);
1619  B.erase(It);
1620  return true;
1621}
1622
1623bool HexagonFrameLowering::expandStoreInt(MachineBasicBlock &B,
1624      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1625      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1626  MachineInstr *MI = &*It;
1627  if (!MI->getOperand(0).isFI())
1628    return false;
1629
1630  DebugLoc DL = MI->getDebugLoc();
1631  unsigned Opc = MI->getOpcode();
1632  Register SrcR = MI->getOperand(2).getReg();
1633  bool IsKill = MI->getOperand(2).isKill();
1634  int FI = MI->getOperand(0).getIndex();
1635
1636  // TmpR = C2_tfrpr SrcR   if SrcR is a predicate register
1637  // TmpR = A2_tfrcrr SrcR  if SrcR is a modifier register
1638  Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1639  unsigned TfrOpc = (Opc == Hexagon::STriw_pred) ? Hexagon::C2_tfrpr
1640                                                 : Hexagon::A2_tfrcrr;
1641  BuildMI(B, It, DL, HII.get(TfrOpc), TmpR)
1642    .addReg(SrcR, getKillRegState(IsKill));
1643
1644  // S2_storeri_io FI, 0, TmpR
1645  BuildMI(B, It, DL, HII.get(Hexagon::S2_storeri_io))
1646      .addFrameIndex(FI)
1647      .addImm(0)
1648      .addReg(TmpR, RegState::Kill)
1649      .cloneMemRefs(*MI);
1650
1651  NewRegs.push_back(TmpR);
1652  B.erase(It);
1653  return true;
1654}
1655
1656bool HexagonFrameLowering::expandLoadInt(MachineBasicBlock &B,
1657      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1658      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1659  MachineInstr *MI = &*It;
1660  if (!MI->getOperand(1).isFI())
1661    return false;
1662
1663  DebugLoc DL = MI->getDebugLoc();
1664  unsigned Opc = MI->getOpcode();
1665  Register DstR = MI->getOperand(0).getReg();
1666  int FI = MI->getOperand(1).getIndex();
1667
1668  // TmpR = L2_loadri_io FI, 0
1669  Register TmpR = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1670  BuildMI(B, It, DL, HII.get(Hexagon::L2_loadri_io), TmpR)
1671      .addFrameIndex(FI)
1672      .addImm(0)
1673      .cloneMemRefs(*MI);
1674
1675  // DstR = C2_tfrrp TmpR   if DstR is a predicate register
1676  // DstR = A2_tfrrcr TmpR  if DstR is a modifier register
1677  unsigned TfrOpc = (Opc == Hexagon::LDriw_pred) ? Hexagon::C2_tfrrp
1678                                                 : Hexagon::A2_tfrrcr;
1679  BuildMI(B, It, DL, HII.get(TfrOpc), DstR)
1680    .addReg(TmpR, RegState::Kill);
1681
1682  NewRegs.push_back(TmpR);
1683  B.erase(It);
1684  return true;
1685}
1686
1687bool HexagonFrameLowering::expandStoreVecPred(MachineBasicBlock &B,
1688      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1689      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1690  MachineInstr *MI = &*It;
1691  if (!MI->getOperand(0).isFI())
1692    return false;
1693
1694  DebugLoc DL = MI->getDebugLoc();
1695  Register SrcR = MI->getOperand(2).getReg();
1696  bool IsKill = MI->getOperand(2).isKill();
1697  int FI = MI->getOperand(0).getIndex();
1698  auto *RC = &Hexagon::HvxVRRegClass;
1699
1700  // Insert transfer to general vector register.
1701  //   TmpR0 = A2_tfrsi 0x01010101
1702  //   TmpR1 = V6_vandqrt Qx, TmpR0
1703  //   store FI, 0, TmpR1
1704  Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1705  Register TmpR1 = MRI.createVirtualRegister(RC);
1706
1707  BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1708    .addImm(0x01010101);
1709
1710  BuildMI(B, It, DL, HII.get(Hexagon::V6_vandqrt), TmpR1)
1711    .addReg(SrcR, getKillRegState(IsKill))
1712    .addReg(TmpR0, RegState::Kill);
1713
1714  auto *HRI = B.getParent()->getSubtarget<HexagonSubtarget>().getRegisterInfo();
1715  HII.storeRegToStackSlot(B, It, TmpR1, true, FI, RC, HRI);
1716  expandStoreVec(B, std::prev(It), MRI, HII, NewRegs);
1717
1718  NewRegs.push_back(TmpR0);
1719  NewRegs.push_back(TmpR1);
1720  B.erase(It);
1721  return true;
1722}
1723
1724bool HexagonFrameLowering::expandLoadVecPred(MachineBasicBlock &B,
1725      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1726      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1727  MachineInstr *MI = &*It;
1728  if (!MI->getOperand(1).isFI())
1729    return false;
1730
1731  DebugLoc DL = MI->getDebugLoc();
1732  Register DstR = MI->getOperand(0).getReg();
1733  int FI = MI->getOperand(1).getIndex();
1734  auto *RC = &Hexagon::HvxVRRegClass;
1735
1736  // TmpR0 = A2_tfrsi 0x01010101
1737  // TmpR1 = load FI, 0
1738  // DstR = V6_vandvrt TmpR1, TmpR0
1739  Register TmpR0 = MRI.createVirtualRegister(&Hexagon::IntRegsRegClass);
1740  Register TmpR1 = MRI.createVirtualRegister(RC);
1741
1742  BuildMI(B, It, DL, HII.get(Hexagon::A2_tfrsi), TmpR0)
1743    .addImm(0x01010101);
1744  MachineFunction &MF = *B.getParent();
1745  auto *HRI = MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1746  HII.loadRegFromStackSlot(B, It, TmpR1, FI, RC, HRI);
1747  expandLoadVec(B, std::prev(It), MRI, HII, NewRegs);
1748
1749  BuildMI(B, It, DL, HII.get(Hexagon::V6_vandvrt), DstR)
1750    .addReg(TmpR1, RegState::Kill)
1751    .addReg(TmpR0, RegState::Kill);
1752
1753  NewRegs.push_back(TmpR0);
1754  NewRegs.push_back(TmpR1);
1755  B.erase(It);
1756  return true;
1757}
1758
1759bool HexagonFrameLowering::expandStoreVec2(MachineBasicBlock &B,
1760      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1761      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1762  MachineFunction &MF = *B.getParent();
1763  auto &MFI = MF.getFrameInfo();
1764  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1765  MachineInstr *MI = &*It;
1766  if (!MI->getOperand(0).isFI())
1767    return false;
1768
1769  // It is possible that the double vector being stored is only partially
1770  // defined. From the point of view of the liveness tracking, it is ok to
1771  // store it as a whole, but if we break it up we may end up storing a
1772  // register that is entirely undefined.
1773  LivePhysRegs LPR(HRI);
1774  LPR.addLiveIns(B);
1775  SmallVector<std::pair<MCPhysReg, const MachineOperand*>,2> Clobbers;
1776  for (auto R = B.begin(); R != It; ++R) {
1777    Clobbers.clear();
1778    LPR.stepForward(*R, Clobbers);
1779  }
1780
1781  DebugLoc DL = MI->getDebugLoc();
1782  Register SrcR = MI->getOperand(2).getReg();
1783  Register SrcLo = HRI.getSubReg(SrcR, Hexagon::vsub_lo);
1784  Register SrcHi = HRI.getSubReg(SrcR, Hexagon::vsub_hi);
1785  bool IsKill = MI->getOperand(2).isKill();
1786  int FI = MI->getOperand(0).getIndex();
1787  bool NeedsAligna = needsAligna(MF);
1788
1789  unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1790  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1791  unsigned HasAlign = MFI.getObjectAlignment(FI);
1792  unsigned StoreOpc;
1793
1794  auto UseAligned = [&] (unsigned NeedAlign, unsigned HasAlign) {
1795    return !NeedsAligna && (NeedAlign <= HasAlign);
1796  };
1797
1798  // Store low part.
1799  if (LPR.contains(SrcLo)) {
1800    StoreOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vS32b_ai
1801                                               : Hexagon::V6_vS32Ub_ai;
1802    BuildMI(B, It, DL, HII.get(StoreOpc))
1803        .addFrameIndex(FI)
1804        .addImm(0)
1805        .addReg(SrcLo, getKillRegState(IsKill))
1806        .cloneMemRefs(*MI);
1807  }
1808
1809  // Store high part.
1810  if (LPR.contains(SrcHi)) {
1811    StoreOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vS32b_ai
1812                                               : Hexagon::V6_vS32Ub_ai;
1813    BuildMI(B, It, DL, HII.get(StoreOpc))
1814        .addFrameIndex(FI)
1815        .addImm(Size)
1816        .addReg(SrcHi, getKillRegState(IsKill))
1817        .cloneMemRefs(*MI);
1818  }
1819
1820  B.erase(It);
1821  return true;
1822}
1823
1824bool HexagonFrameLowering::expandLoadVec2(MachineBasicBlock &B,
1825      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1826      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1827  MachineFunction &MF = *B.getParent();
1828  auto &MFI = MF.getFrameInfo();
1829  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1830  MachineInstr *MI = &*It;
1831  if (!MI->getOperand(1).isFI())
1832    return false;
1833
1834  DebugLoc DL = MI->getDebugLoc();
1835  Register DstR = MI->getOperand(0).getReg();
1836  Register DstHi = HRI.getSubReg(DstR, Hexagon::vsub_hi);
1837  Register DstLo = HRI.getSubReg(DstR, Hexagon::vsub_lo);
1838  int FI = MI->getOperand(1).getIndex();
1839  bool NeedsAligna = needsAligna(MF);
1840
1841  unsigned Size = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1842  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1843  unsigned HasAlign = MFI.getObjectAlignment(FI);
1844  unsigned LoadOpc;
1845
1846  auto UseAligned = [&] (unsigned NeedAlign, unsigned HasAlign) {
1847    return !NeedsAligna && (NeedAlign <= HasAlign);
1848  };
1849
1850  // Load low part.
1851  LoadOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vL32b_ai
1852                                            : Hexagon::V6_vL32Ub_ai;
1853  BuildMI(B, It, DL, HII.get(LoadOpc), DstLo)
1854      .addFrameIndex(FI)
1855      .addImm(0)
1856      .cloneMemRefs(*MI);
1857
1858  // Load high part.
1859  LoadOpc = UseAligned(NeedAlign, HasAlign) ? Hexagon::V6_vL32b_ai
1860                                            : Hexagon::V6_vL32Ub_ai;
1861  BuildMI(B, It, DL, HII.get(LoadOpc), DstHi)
1862      .addFrameIndex(FI)
1863      .addImm(Size)
1864      .cloneMemRefs(*MI);
1865
1866  B.erase(It);
1867  return true;
1868}
1869
1870bool HexagonFrameLowering::expandStoreVec(MachineBasicBlock &B,
1871      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1872      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1873  MachineFunction &MF = *B.getParent();
1874  auto &MFI = MF.getFrameInfo();
1875  MachineInstr *MI = &*It;
1876  if (!MI->getOperand(0).isFI())
1877    return false;
1878
1879  bool NeedsAligna = needsAligna(MF);
1880  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1881  DebugLoc DL = MI->getDebugLoc();
1882  Register SrcR = MI->getOperand(2).getReg();
1883  bool IsKill = MI->getOperand(2).isKill();
1884  int FI = MI->getOperand(0).getIndex();
1885
1886  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1887  unsigned HasAlign = MFI.getObjectAlignment(FI);
1888  bool UseAligned = !NeedsAligna && (NeedAlign <= HasAlign);
1889  unsigned StoreOpc = UseAligned ? Hexagon::V6_vS32b_ai
1890                                 : Hexagon::V6_vS32Ub_ai;
1891  BuildMI(B, It, DL, HII.get(StoreOpc))
1892      .addFrameIndex(FI)
1893      .addImm(0)
1894      .addReg(SrcR, getKillRegState(IsKill))
1895      .cloneMemRefs(*MI);
1896
1897  B.erase(It);
1898  return true;
1899}
1900
1901bool HexagonFrameLowering::expandLoadVec(MachineBasicBlock &B,
1902      MachineBasicBlock::iterator It, MachineRegisterInfo &MRI,
1903      const HexagonInstrInfo &HII, SmallVectorImpl<unsigned> &NewRegs) const {
1904  MachineFunction &MF = *B.getParent();
1905  auto &MFI = MF.getFrameInfo();
1906  MachineInstr *MI = &*It;
1907  if (!MI->getOperand(1).isFI())
1908    return false;
1909
1910  bool NeedsAligna = needsAligna(MF);
1911  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1912  DebugLoc DL = MI->getDebugLoc();
1913  Register DstR = MI->getOperand(0).getReg();
1914  int FI = MI->getOperand(1).getIndex();
1915
1916  unsigned NeedAlign = HRI.getSpillAlignment(Hexagon::HvxVRRegClass);
1917  unsigned HasAlign = MFI.getObjectAlignment(FI);
1918  bool UseAligned = !NeedsAligna && (NeedAlign <= HasAlign);
1919  unsigned LoadOpc = UseAligned ? Hexagon::V6_vL32b_ai
1920                                : Hexagon::V6_vL32Ub_ai;
1921  BuildMI(B, It, DL, HII.get(LoadOpc), DstR)
1922      .addFrameIndex(FI)
1923      .addImm(0)
1924      .cloneMemRefs(*MI);
1925
1926  B.erase(It);
1927  return true;
1928}
1929
1930bool HexagonFrameLowering::expandSpillMacros(MachineFunction &MF,
1931      SmallVectorImpl<unsigned> &NewRegs) const {
1932  auto &HII = *MF.getSubtarget<HexagonSubtarget>().getInstrInfo();
1933  MachineRegisterInfo &MRI = MF.getRegInfo();
1934  bool Changed = false;
1935
1936  for (auto &B : MF) {
1937    // Traverse the basic block.
1938    MachineBasicBlock::iterator NextI;
1939    for (auto I = B.begin(), E = B.end(); I != E; I = NextI) {
1940      MachineInstr *MI = &*I;
1941      NextI = std::next(I);
1942      unsigned Opc = MI->getOpcode();
1943
1944      switch (Opc) {
1945        case TargetOpcode::COPY:
1946          Changed |= expandCopy(B, I, MRI, HII, NewRegs);
1947          break;
1948        case Hexagon::STriw_pred:
1949        case Hexagon::STriw_ctr:
1950          Changed |= expandStoreInt(B, I, MRI, HII, NewRegs);
1951          break;
1952        case Hexagon::LDriw_pred:
1953        case Hexagon::LDriw_ctr:
1954          Changed |= expandLoadInt(B, I, MRI, HII, NewRegs);
1955          break;
1956        case Hexagon::PS_vstorerq_ai:
1957          Changed |= expandStoreVecPred(B, I, MRI, HII, NewRegs);
1958          break;
1959        case Hexagon::PS_vloadrq_ai:
1960          Changed |= expandLoadVecPred(B, I, MRI, HII, NewRegs);
1961          break;
1962        case Hexagon::PS_vloadrw_ai:
1963          Changed |= expandLoadVec2(B, I, MRI, HII, NewRegs);
1964          break;
1965        case Hexagon::PS_vstorerw_ai:
1966          Changed |= expandStoreVec2(B, I, MRI, HII, NewRegs);
1967          break;
1968      }
1969    }
1970  }
1971
1972  return Changed;
1973}
1974
1975void HexagonFrameLowering::determineCalleeSaves(MachineFunction &MF,
1976                                                BitVector &SavedRegs,
1977                                                RegScavenger *RS) const {
1978  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
1979
1980  SavedRegs.resize(HRI.getNumRegs());
1981
1982  // If we have a function containing __builtin_eh_return we want to spill and
1983  // restore all callee saved registers. Pretend that they are used.
1984  if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn())
1985    for (const MCPhysReg *R = HRI.getCalleeSavedRegs(&MF); *R; ++R)
1986      SavedRegs.set(*R);
1987
1988  // Replace predicate register pseudo spill code.
1989  SmallVector<unsigned,8> NewRegs;
1990  expandSpillMacros(MF, NewRegs);
1991  if (OptimizeSpillSlots && !isOptNone(MF))
1992    optimizeSpillSlots(MF, NewRegs);
1993
1994  // We need to reserve a spill slot if scavenging could potentially require
1995  // spilling a scavenged register.
1996  if (!NewRegs.empty() || mayOverflowFrameOffset(MF)) {
1997    MachineFrameInfo &MFI = MF.getFrameInfo();
1998    MachineRegisterInfo &MRI = MF.getRegInfo();
1999    SetVector<const TargetRegisterClass*> SpillRCs;
2000    // Reserve an int register in any case, because it could be used to hold
2001    // the stack offset in case it does not fit into a spill instruction.
2002    SpillRCs.insert(&Hexagon::IntRegsRegClass);
2003
2004    for (unsigned VR : NewRegs)
2005      SpillRCs.insert(MRI.getRegClass(VR));
2006
2007    for (auto *RC : SpillRCs) {
2008      if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
2009        continue;
2010      unsigned Num = 1;
2011      switch (RC->getID()) {
2012        case Hexagon::IntRegsRegClassID:
2013          Num = NumberScavengerSlots;
2014          break;
2015        case Hexagon::HvxQRRegClassID:
2016          Num = 2; // Vector predicate spills also need a vector register.
2017          break;
2018      }
2019      unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC);
2020      for (unsigned i = 0; i < Num; i++) {
2021        int NewFI = MFI.CreateSpillStackObject(S, A);
2022        RS->addScavengingFrameIndex(NewFI);
2023      }
2024    }
2025  }
2026
2027  TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
2028}
2029
2030unsigned HexagonFrameLowering::findPhysReg(MachineFunction &MF,
2031      HexagonBlockRanges::IndexRange &FIR,
2032      HexagonBlockRanges::InstrIndexMap &IndexMap,
2033      HexagonBlockRanges::RegToRangeMap &DeadMap,
2034      const TargetRegisterClass *RC) const {
2035  auto &HRI = *MF.getSubtarget<HexagonSubtarget>().getRegisterInfo();
2036  auto &MRI = MF.getRegInfo();
2037
2038  auto isDead = [&FIR,&DeadMap] (unsigned Reg) -> bool {
2039    auto F = DeadMap.find({Reg,0});
2040    if (F == DeadMap.end())
2041      return false;
2042    for (auto &DR : F->second)
2043      if (DR.contains(FIR))
2044        return true;
2045    return false;
2046  };
2047
2048  for (unsigned Reg : RC->getRawAllocationOrder(MF)) {
2049    bool Dead = true;
2050    for (auto R : HexagonBlockRanges::expandToSubRegs({Reg,0}, MRI, HRI)) {
2051      if (isDead(R.Reg))
2052        continue;
2053      Dead = false;
2054      break;
2055    }
2056    if (Dead)
2057      return Reg;
2058  }
2059  return 0;
2060}
2061
2062void HexagonFrameLowering::optimizeSpillSlots(MachineFunction &MF,
2063      SmallVectorImpl<unsigned> &VRegs) const {
2064  auto &HST = MF.getSubtarget<HexagonSubtarget>();
2065  auto &HII = *HST.getInstrInfo();
2066  auto &HRI = *HST.getRegisterInfo();
2067  auto &MRI = MF.getRegInfo();
2068  HexagonBlockRanges HBR(MF);
2069
2070  using BlockIndexMap =
2071      std::map<MachineBasicBlock *, HexagonBlockRanges::InstrIndexMap>;
2072  using BlockRangeMap =
2073      std::map<MachineBasicBlock *, HexagonBlockRanges::RangeList>;
2074  using IndexType = HexagonBlockRanges::IndexType;
2075
2076  struct SlotInfo {
2077    BlockRangeMap Map;
2078    unsigned Size = 0;
2079    const TargetRegisterClass *RC = nullptr;
2080
2081    SlotInfo() = default;
2082  };
2083
2084  BlockIndexMap BlockIndexes;
2085  SmallSet<int,4> BadFIs;
2086  std::map<int,SlotInfo> FIRangeMap;
2087
2088  // Accumulate register classes: get a common class for a pre-existing
2089  // class HaveRC and a new class NewRC. Return nullptr if a common class
2090  // cannot be found, otherwise return the resulting class. If HaveRC is
2091  // nullptr, assume that it is still unset.
2092  auto getCommonRC =
2093      [](const TargetRegisterClass *HaveRC,
2094         const TargetRegisterClass *NewRC) -> const TargetRegisterClass * {
2095    if (HaveRC == nullptr || HaveRC == NewRC)
2096      return NewRC;
2097    // Different classes, both non-null. Pick the more general one.
2098    if (HaveRC->hasSubClassEq(NewRC))
2099      return HaveRC;
2100    if (NewRC->hasSubClassEq(HaveRC))
2101      return NewRC;
2102    return nullptr;
2103  };
2104
2105  // Scan all blocks in the function. Check all occurrences of frame indexes,
2106  // and collect relevant information.
2107  for (auto &B : MF) {
2108    std::map<int,IndexType> LastStore, LastLoad;
2109    // Emplace appears not to be supported in gcc 4.7.2-4.
2110    //auto P = BlockIndexes.emplace(&B, HexagonBlockRanges::InstrIndexMap(B));
2111    auto P = BlockIndexes.insert(
2112                std::make_pair(&B, HexagonBlockRanges::InstrIndexMap(B)));
2113    auto &IndexMap = P.first->second;
2114    LLVM_DEBUG(dbgs() << "Index map for " << printMBBReference(B) << "\n"
2115                      << IndexMap << '\n');
2116
2117    for (auto &In : B) {
2118      int LFI, SFI;
2119      bool Load = HII.isLoadFromStackSlot(In, LFI) && !HII.isPredicated(In);
2120      bool Store = HII.isStoreToStackSlot(In, SFI) && !HII.isPredicated(In);
2121      if (Load && Store) {
2122        // If it's both a load and a store, then we won't handle it.
2123        BadFIs.insert(LFI);
2124        BadFIs.insert(SFI);
2125        continue;
2126      }
2127      // Check for register classes of the register used as the source for
2128      // the store, and the register used as the destination for the load.
2129      // Also, only accept base+imm_offset addressing modes. Other addressing
2130      // modes can have side-effects (post-increments, etc.). For stack
2131      // slots they are very unlikely, so there is not much loss due to
2132      // this restriction.
2133      if (Load || Store) {
2134        int TFI = Load ? LFI : SFI;
2135        unsigned AM = HII.getAddrMode(In);
2136        SlotInfo &SI = FIRangeMap[TFI];
2137        bool Bad = (AM != HexagonII::BaseImmOffset);
2138        if (!Bad) {
2139          // If the addressing mode is ok, check the register class.
2140          unsigned OpNum = Load ? 0 : 2;
2141          auto *RC = HII.getRegClass(In.getDesc(), OpNum, &HRI, MF);
2142          RC = getCommonRC(SI.RC, RC);
2143          if (RC == nullptr)
2144            Bad = true;
2145          else
2146            SI.RC = RC;
2147        }
2148        if (!Bad) {
2149          // Check sizes.
2150          unsigned S = HII.getMemAccessSize(In);
2151          if (SI.Size != 0 && SI.Size != S)
2152            Bad = true;
2153          else
2154            SI.Size = S;
2155        }
2156        if (!Bad) {
2157          for (auto *Mo : In.memoperands()) {
2158            if (!Mo->isVolatile() && !Mo->isAtomic())
2159              continue;
2160            Bad = true;
2161            break;
2162          }
2163        }
2164        if (Bad)
2165          BadFIs.insert(TFI);
2166      }
2167
2168      // Locate uses of frame indices.
2169      for (unsigned i = 0, n = In.getNumOperands(); i < n; ++i) {
2170        const MachineOperand &Op = In.getOperand(i);
2171        if (!Op.isFI())
2172          continue;
2173        int FI = Op.getIndex();
2174        // Make sure that the following operand is an immediate and that
2175        // it is 0. This is the offset in the stack object.
2176        if (i+1 >= n || !In.getOperand(i+1).isImm() ||
2177            In.getOperand(i+1).getImm() != 0)
2178          BadFIs.insert(FI);
2179        if (BadFIs.count(FI))
2180          continue;
2181
2182        IndexType Index = IndexMap.getIndex(&In);
2183        if (Load) {
2184          if (LastStore[FI] == IndexType::None)
2185            LastStore[FI] = IndexType::Entry;
2186          LastLoad[FI] = Index;
2187        } else if (Store) {
2188          HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2189          if (LastStore[FI] != IndexType::None)
2190            RL.add(LastStore[FI], LastLoad[FI], false, false);
2191          else if (LastLoad[FI] != IndexType::None)
2192            RL.add(IndexType::Entry, LastLoad[FI], false, false);
2193          LastLoad[FI] = IndexType::None;
2194          LastStore[FI] = Index;
2195        } else {
2196          BadFIs.insert(FI);
2197        }
2198      }
2199    }
2200
2201    for (auto &I : LastLoad) {
2202      IndexType LL = I.second;
2203      if (LL == IndexType::None)
2204        continue;
2205      auto &RL = FIRangeMap[I.first].Map[&B];
2206      IndexType &LS = LastStore[I.first];
2207      if (LS != IndexType::None)
2208        RL.add(LS, LL, false, false);
2209      else
2210        RL.add(IndexType::Entry, LL, false, false);
2211      LS = IndexType::None;
2212    }
2213    for (auto &I : LastStore) {
2214      IndexType LS = I.second;
2215      if (LS == IndexType::None)
2216        continue;
2217      auto &RL = FIRangeMap[I.first].Map[&B];
2218      RL.add(LS, IndexType::None, false, false);
2219    }
2220  }
2221
2222  LLVM_DEBUG({
2223    for (auto &P : FIRangeMap) {
2224      dbgs() << "fi#" << P.first;
2225      if (BadFIs.count(P.first))
2226        dbgs() << " (bad)";
2227      dbgs() << "  RC: ";
2228      if (P.second.RC != nullptr)
2229        dbgs() << HRI.getRegClassName(P.second.RC) << '\n';
2230      else
2231        dbgs() << "<null>\n";
2232      for (auto &R : P.second.Map)
2233        dbgs() << "  " << printMBBReference(*R.first) << " { " << R.second
2234               << "}\n";
2235    }
2236  });
2237
2238  // When a slot is loaded from in a block without being stored to in the
2239  // same block, it is live-on-entry to this block. To avoid CFG analysis,
2240  // consider this slot to be live-on-exit from all blocks.
2241  SmallSet<int,4> LoxFIs;
2242
2243  std::map<MachineBasicBlock*,std::vector<int>> BlockFIMap;
2244
2245  for (auto &P : FIRangeMap) {
2246    // P = pair(FI, map: BB->RangeList)
2247    if (BadFIs.count(P.first))
2248      continue;
2249    for (auto &B : MF) {
2250      auto F = P.second.Map.find(&B);
2251      // F = pair(BB, RangeList)
2252      if (F == P.second.Map.end() || F->second.empty())
2253        continue;
2254      HexagonBlockRanges::IndexRange &IR = F->second.front();
2255      if (IR.start() == IndexType::Entry)
2256        LoxFIs.insert(P.first);
2257      BlockFIMap[&B].push_back(P.first);
2258    }
2259  }
2260
2261  LLVM_DEBUG({
2262    dbgs() << "Block-to-FI map (* -- live-on-exit):\n";
2263    for (auto &P : BlockFIMap) {
2264      auto &FIs = P.second;
2265      if (FIs.empty())
2266        continue;
2267      dbgs() << "  " << printMBBReference(*P.first) << ": {";
2268      for (auto I : FIs) {
2269        dbgs() << " fi#" << I;
2270        if (LoxFIs.count(I))
2271          dbgs() << '*';
2272      }
2273      dbgs() << " }\n";
2274    }
2275  });
2276
2277#ifndef NDEBUG
2278  bool HasOptLimit = SpillOptMax.getPosition();
2279#endif
2280
2281  // eliminate loads, when all loads eliminated, eliminate all stores.
2282  for (auto &B : MF) {
2283    auto F = BlockIndexes.find(&B);
2284    assert(F != BlockIndexes.end());
2285    HexagonBlockRanges::InstrIndexMap &IM = F->second;
2286    HexagonBlockRanges::RegToRangeMap LM = HBR.computeLiveMap(IM);
2287    HexagonBlockRanges::RegToRangeMap DM = HBR.computeDeadMap(IM, LM);
2288    LLVM_DEBUG(dbgs() << printMBBReference(B) << " dead map\n"
2289                      << HexagonBlockRanges::PrintRangeMap(DM, HRI));
2290
2291    for (auto FI : BlockFIMap[&B]) {
2292      if (BadFIs.count(FI))
2293        continue;
2294      LLVM_DEBUG(dbgs() << "Working on fi#" << FI << '\n');
2295      HexagonBlockRanges::RangeList &RL = FIRangeMap[FI].Map[&B];
2296      for (auto &Range : RL) {
2297        LLVM_DEBUG(dbgs() << "--Examining range:" << RL << '\n');
2298        if (!IndexType::isInstr(Range.start()) ||
2299            !IndexType::isInstr(Range.end()))
2300          continue;
2301        MachineInstr &SI = *IM.getInstr(Range.start());
2302        MachineInstr &EI = *IM.getInstr(Range.end());
2303        assert(SI.mayStore() && "Unexpected start instruction");
2304        assert(EI.mayLoad() && "Unexpected end instruction");
2305        MachineOperand &SrcOp = SI.getOperand(2);
2306
2307        HexagonBlockRanges::RegisterRef SrcRR = { SrcOp.getReg(),
2308                                                  SrcOp.getSubReg() };
2309        auto *RC = HII.getRegClass(SI.getDesc(), 2, &HRI, MF);
2310        // The this-> is needed to unconfuse MSVC.
2311        unsigned FoundR = this->findPhysReg(MF, Range, IM, DM, RC);
2312        LLVM_DEBUG(dbgs() << "Replacement reg:" << printReg(FoundR, &HRI)
2313                          << '\n');
2314        if (FoundR == 0)
2315          continue;
2316#ifndef NDEBUG
2317        if (HasOptLimit) {
2318          if (SpillOptCount >= SpillOptMax)
2319            return;
2320          SpillOptCount++;
2321        }
2322#endif
2323
2324        // Generate the copy-in: "FoundR = COPY SrcR" at the store location.
2325        MachineBasicBlock::iterator StartIt = SI.getIterator(), NextIt;
2326        MachineInstr *CopyIn = nullptr;
2327        if (SrcRR.Reg != FoundR || SrcRR.Sub != 0) {
2328          const DebugLoc &DL = SI.getDebugLoc();
2329          CopyIn = BuildMI(B, StartIt, DL, HII.get(TargetOpcode::COPY), FoundR)
2330                       .add(SrcOp);
2331        }
2332
2333        ++StartIt;
2334        // Check if this is a last store and the FI is live-on-exit.
2335        if (LoxFIs.count(FI) && (&Range == &RL.back())) {
2336          // Update store's source register.
2337          if (unsigned SR = SrcOp.getSubReg())
2338            SrcOp.setReg(HRI.getSubReg(FoundR, SR));
2339          else
2340            SrcOp.setReg(FoundR);
2341          SrcOp.setSubReg(0);
2342          // We are keeping this register live.
2343          SrcOp.setIsKill(false);
2344        } else {
2345          B.erase(&SI);
2346          IM.replaceInstr(&SI, CopyIn);
2347        }
2348
2349        auto EndIt = std::next(EI.getIterator());
2350        for (auto It = StartIt; It != EndIt; It = NextIt) {
2351          MachineInstr &MI = *It;
2352          NextIt = std::next(It);
2353          int TFI;
2354          if (!HII.isLoadFromStackSlot(MI, TFI) || TFI != FI)
2355            continue;
2356          Register DstR = MI.getOperand(0).getReg();
2357          assert(MI.getOperand(0).getSubReg() == 0);
2358          MachineInstr *CopyOut = nullptr;
2359          if (DstR != FoundR) {
2360            DebugLoc DL = MI.getDebugLoc();
2361            unsigned MemSize = HII.getMemAccessSize(MI);
2362            assert(HII.getAddrMode(MI) == HexagonII::BaseImmOffset);
2363            unsigned CopyOpc = TargetOpcode::COPY;
2364            if (HII.isSignExtendingLoad(MI))
2365              CopyOpc = (MemSize == 1) ? Hexagon::A2_sxtb : Hexagon::A2_sxth;
2366            else if (HII.isZeroExtendingLoad(MI))
2367              CopyOpc = (MemSize == 1) ? Hexagon::A2_zxtb : Hexagon::A2_zxth;
2368            CopyOut = BuildMI(B, It, DL, HII.get(CopyOpc), DstR)
2369                        .addReg(FoundR, getKillRegState(&MI == &EI));
2370          }
2371          IM.replaceInstr(&MI, CopyOut);
2372          B.erase(It);
2373        }
2374
2375        // Update the dead map.
2376        HexagonBlockRanges::RegisterRef FoundRR = { FoundR, 0 };
2377        for (auto RR : HexagonBlockRanges::expandToSubRegs(FoundRR, MRI, HRI))
2378          DM[RR].subtract(Range);
2379      } // for Range in range list
2380    }
2381  }
2382}
2383
2384void HexagonFrameLowering::expandAlloca(MachineInstr *AI,
2385      const HexagonInstrInfo &HII, unsigned SP, unsigned CF) const {
2386  MachineBasicBlock &MB = *AI->getParent();
2387  DebugLoc DL = AI->getDebugLoc();
2388  unsigned A = AI->getOperand(2).getImm();
2389
2390  // Have
2391  //    Rd  = alloca Rs, #A
2392  //
2393  // If Rs and Rd are different registers, use this sequence:
2394  //    Rd  = sub(r29, Rs)
2395  //    r29 = sub(r29, Rs)
2396  //    Rd  = and(Rd, #-A)    ; if necessary
2397  //    r29 = and(r29, #-A)   ; if necessary
2398  //    Rd  = add(Rd, #CF)    ; CF size aligned to at most A
2399  // otherwise, do
2400  //    Rd  = sub(r29, Rs)
2401  //    Rd  = and(Rd, #-A)    ; if necessary
2402  //    r29 = Rd
2403  //    Rd  = add(Rd, #CF)    ; CF size aligned to at most A
2404
2405  MachineOperand &RdOp = AI->getOperand(0);
2406  MachineOperand &RsOp = AI->getOperand(1);
2407  unsigned Rd = RdOp.getReg(), Rs = RsOp.getReg();
2408
2409  // Rd = sub(r29, Rs)
2410  BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), Rd)
2411      .addReg(SP)
2412      .addReg(Rs);
2413  if (Rs != Rd) {
2414    // r29 = sub(r29, Rs)
2415    BuildMI(MB, AI, DL, HII.get(Hexagon::A2_sub), SP)
2416        .addReg(SP)
2417        .addReg(Rs);
2418  }
2419  if (A > 8) {
2420    // Rd  = and(Rd, #-A)
2421    BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), Rd)
2422        .addReg(Rd)
2423        .addImm(-int64_t(A));
2424    if (Rs != Rd)
2425      BuildMI(MB, AI, DL, HII.get(Hexagon::A2_andir), SP)
2426          .addReg(SP)
2427          .addImm(-int64_t(A));
2428  }
2429  if (Rs == Rd) {
2430    // r29 = Rd
2431    BuildMI(MB, AI, DL, HII.get(TargetOpcode::COPY), SP)
2432        .addReg(Rd);
2433  }
2434  if (CF > 0) {
2435    // Rd = add(Rd, #CF)
2436    BuildMI(MB, AI, DL, HII.get(Hexagon::A2_addi), Rd)
2437        .addReg(Rd)
2438        .addImm(CF);
2439  }
2440}
2441
2442bool HexagonFrameLowering::needsAligna(const MachineFunction &MF) const {
2443  const MachineFrameInfo &MFI = MF.getFrameInfo();
2444  if (!MFI.hasVarSizedObjects())
2445    return false;
2446  // Do not check for max stack object alignment here, because the stack
2447  // may not be complete yet. Assume that we will need PS_aligna if there
2448  // are variable-sized objects.
2449  return true;
2450}
2451
2452const MachineInstr *HexagonFrameLowering::getAlignaInstr(
2453      const MachineFunction &MF) const {
2454  for (auto &B : MF)
2455    for (auto &I : B)
2456      if (I.getOpcode() == Hexagon::PS_aligna)
2457        return &I;
2458  return nullptr;
2459}
2460
2461/// Adds all callee-saved registers as implicit uses or defs to the
2462/// instruction.
2463void HexagonFrameLowering::addCalleeSaveRegistersAsImpOperand(MachineInstr *MI,
2464      const CSIVect &CSI, bool IsDef, bool IsKill) const {
2465  // Add the callee-saved registers as implicit uses.
2466  for (auto &R : CSI)
2467    MI->addOperand(MachineOperand::CreateReg(R.getReg(), IsDef, true, IsKill));
2468}
2469
2470/// Determine whether the callee-saved register saves and restores should
2471/// be generated via inline code. If this function returns "true", inline
2472/// code will be generated. If this function returns "false", additional
2473/// checks are performed, which may still lead to the inline code.
2474bool HexagonFrameLowering::shouldInlineCSR(const MachineFunction &MF,
2475      const CSIVect &CSI) const {
2476  if (MF.getInfo<HexagonMachineFunctionInfo>()->hasEHReturn())
2477    return true;
2478  if (!hasFP(MF))
2479    return true;
2480  if (!isOptSize(MF) && !isMinSize(MF))
2481    if (MF.getTarget().getOptLevel() > CodeGenOpt::Default)
2482      return true;
2483
2484  // Check if CSI only has double registers, and if the registers form
2485  // a contiguous block starting from D8.
2486  BitVector Regs(Hexagon::NUM_TARGET_REGS);
2487  for (unsigned i = 0, n = CSI.size(); i < n; ++i) {
2488    unsigned R = CSI[i].getReg();
2489    if (!Hexagon::DoubleRegsRegClass.contains(R))
2490      return true;
2491    Regs[R] = true;
2492  }
2493  int F = Regs.find_first();
2494  if (F != Hexagon::D8)
2495    return true;
2496  while (F >= 0) {
2497    int N = Regs.find_next(F);
2498    if (N >= 0 && N != F+1)
2499      return true;
2500    F = N;
2501  }
2502
2503  return false;
2504}
2505
2506bool HexagonFrameLowering::useSpillFunction(const MachineFunction &MF,
2507      const CSIVect &CSI) const {
2508  if (shouldInlineCSR(MF, CSI))
2509    return false;
2510  unsigned NumCSI = CSI.size();
2511  if (NumCSI <= 1)
2512    return false;
2513
2514  unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs
2515                                     : SpillFuncThreshold;
2516  return Threshold < NumCSI;
2517}
2518
2519bool HexagonFrameLowering::useRestoreFunction(const MachineFunction &MF,
2520      const CSIVect &CSI) const {
2521  if (shouldInlineCSR(MF, CSI))
2522    return false;
2523  // The restore functions do a bit more than just restoring registers.
2524  // The non-returning versions will go back directly to the caller's
2525  // caller, others will clean up the stack frame in preparation for
2526  // a tail call. Using them can still save code size even if only one
2527  // register is getting restores. Make the decision based on -Oz:
2528  // using -Os will use inline restore for a single register.
2529  if (isMinSize(MF))
2530    return true;
2531  unsigned NumCSI = CSI.size();
2532  if (NumCSI <= 1)
2533    return false;
2534
2535  unsigned Threshold = isOptSize(MF) ? SpillFuncThresholdOs-1
2536                                     : SpillFuncThreshold;
2537  return Threshold < NumCSI;
2538}
2539
2540bool HexagonFrameLowering::mayOverflowFrameOffset(MachineFunction &MF) const {
2541  unsigned StackSize = MF.getFrameInfo().estimateStackSize(MF);
2542  auto &HST = MF.getSubtarget<HexagonSubtarget>();
2543  // A fairly simplistic guess as to whether a potential load/store to a
2544  // stack location could require an extra register.
2545  if (HST.useHVXOps() && StackSize > 256)
2546    return true;
2547
2548  // Check if the function has store-immediate instructions that access
2549  // the stack. Since the offset field is not extendable, if the stack
2550  // size exceeds the offset limit (6 bits, shifted), the stores will
2551  // require a new base register.
2552  bool HasImmStack = false;
2553  unsigned MinLS = ~0u;   // Log_2 of the memory access size.
2554
2555  for (const MachineBasicBlock &B : MF) {
2556    for (const MachineInstr &MI : B) {
2557      unsigned LS = 0;
2558      switch (MI.getOpcode()) {
2559        case Hexagon::S4_storeirit_io:
2560        case Hexagon::S4_storeirif_io:
2561        case Hexagon::S4_storeiri_io:
2562          ++LS;
2563          LLVM_FALLTHROUGH;
2564        case Hexagon::S4_storeirht_io:
2565        case Hexagon::S4_storeirhf_io:
2566        case Hexagon::S4_storeirh_io:
2567          ++LS;
2568          LLVM_FALLTHROUGH;
2569        case Hexagon::S4_storeirbt_io:
2570        case Hexagon::S4_storeirbf_io:
2571        case Hexagon::S4_storeirb_io:
2572          if (MI.getOperand(0).isFI())
2573            HasImmStack = true;
2574          MinLS = std::min(MinLS, LS);
2575          break;
2576      }
2577    }
2578  }
2579
2580  if (HasImmStack)
2581    return !isUInt<6>(StackSize >> MinLS);
2582
2583  return false;
2584}
2585