AArch64CallLowering.cpp revision 360784
1//===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8///
9/// \file
10/// This file implements the lowering of LLVM calls to machine code calls for
11/// GlobalISel.
12///
13//===----------------------------------------------------------------------===//
14
15#include "AArch64CallLowering.h"
16#include "AArch64ISelLowering.h"
17#include "AArch64MachineFunctionInfo.h"
18#include "AArch64Subtarget.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/CodeGen/Analysis.h"
22#include "llvm/CodeGen/CallingConvLower.h"
23#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24#include "llvm/CodeGen/GlobalISel/Utils.h"
25#include "llvm/CodeGen/LowLevelType.h"
26#include "llvm/CodeGen/MachineBasicBlock.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineInstrBuilder.h"
30#include "llvm/CodeGen/MachineMemOperand.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/CodeGen/MachineRegisterInfo.h"
33#include "llvm/CodeGen/TargetRegisterInfo.h"
34#include "llvm/CodeGen/TargetSubtargetInfo.h"
35#include "llvm/CodeGen/ValueTypes.h"
36#include "llvm/IR/Argument.h"
37#include "llvm/IR/Attributes.h"
38#include "llvm/IR/Function.h"
39#include "llvm/IR/Type.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/MachineValueType.h"
42#include <algorithm>
43#include <cassert>
44#include <cstdint>
45#include <iterator>
46
47#define DEBUG_TYPE "aarch64-call-lowering"
48
49using namespace llvm;
50
51AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
52  : CallLowering(&TLI) {}
53
54namespace {
55struct IncomingArgHandler : public CallLowering::ValueHandler {
56  IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
57                     CCAssignFn *AssignFn)
58      : ValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
59
60  Register getStackAddress(uint64_t Size, int64_t Offset,
61                           MachinePointerInfo &MPO) override {
62    auto &MFI = MIRBuilder.getMF().getFrameInfo();
63    int FI = MFI.CreateFixedObject(Size, Offset, true);
64    MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
65    Register AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
66    MIRBuilder.buildFrameIndex(AddrReg, FI);
67    StackUsed = std::max(StackUsed, Size + Offset);
68    return AddrReg;
69  }
70
71  void assignValueToReg(Register ValVReg, Register PhysReg,
72                        CCValAssign &VA) override {
73    markPhysRegUsed(PhysReg);
74    switch (VA.getLocInfo()) {
75    default:
76      MIRBuilder.buildCopy(ValVReg, PhysReg);
77      break;
78    case CCValAssign::LocInfo::SExt:
79    case CCValAssign::LocInfo::ZExt:
80    case CCValAssign::LocInfo::AExt: {
81      auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
82      MIRBuilder.buildTrunc(ValVReg, Copy);
83      break;
84    }
85    }
86  }
87
88  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
89                            MachinePointerInfo &MPO, CCValAssign &VA) override {
90    // FIXME: Get alignment
91    auto MMO = MIRBuilder.getMF().getMachineMemOperand(
92        MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
93        1);
94    MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
95  }
96
97  /// How the physical register gets marked varies between formal
98  /// parameters (it's a basic-block live-in), and a call instruction
99  /// (it's an implicit-def of the BL).
100  virtual void markPhysRegUsed(unsigned PhysReg) = 0;
101
102  bool isIncomingArgumentHandler() const override { return true; }
103
104  uint64_t StackUsed;
105};
106
107struct FormalArgHandler : public IncomingArgHandler {
108  FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
109                   CCAssignFn *AssignFn)
110    : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
111
112  void markPhysRegUsed(unsigned PhysReg) override {
113    MIRBuilder.getMRI()->addLiveIn(PhysReg);
114    MIRBuilder.getMBB().addLiveIn(PhysReg);
115  }
116};
117
118struct CallReturnHandler : public IncomingArgHandler {
119  CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
120                    MachineInstrBuilder MIB, CCAssignFn *AssignFn)
121    : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
122
123  void markPhysRegUsed(unsigned PhysReg) override {
124    MIB.addDef(PhysReg, RegState::Implicit);
125  }
126
127  MachineInstrBuilder MIB;
128};
129
130struct OutgoingArgHandler : public CallLowering::ValueHandler {
131  OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
132                     MachineInstrBuilder MIB, CCAssignFn *AssignFn,
133                     CCAssignFn *AssignFnVarArg, bool IsTailCall = false,
134                     int FPDiff = 0)
135      : ValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
136        AssignFnVarArg(AssignFnVarArg), IsTailCall(IsTailCall), FPDiff(FPDiff),
137        StackSize(0) {}
138
139  bool isIncomingArgumentHandler() const override { return false; }
140
141  Register getStackAddress(uint64_t Size, int64_t Offset,
142                           MachinePointerInfo &MPO) override {
143    MachineFunction &MF = MIRBuilder.getMF();
144    LLT p0 = LLT::pointer(0, 64);
145    LLT s64 = LLT::scalar(64);
146
147    if (IsTailCall) {
148      Offset += FPDiff;
149      int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
150      Register FIReg = MRI.createGenericVirtualRegister(p0);
151      MIRBuilder.buildFrameIndex(FIReg, FI);
152      MPO = MachinePointerInfo::getFixedStack(MF, FI);
153      return FIReg;
154    }
155
156    Register SPReg = MRI.createGenericVirtualRegister(p0);
157    MIRBuilder.buildCopy(SPReg, Register(AArch64::SP));
158
159    Register OffsetReg = MRI.createGenericVirtualRegister(s64);
160    MIRBuilder.buildConstant(OffsetReg, Offset);
161
162    Register AddrReg = MRI.createGenericVirtualRegister(p0);
163    MIRBuilder.buildPtrAdd(AddrReg, SPReg, OffsetReg);
164
165    MPO = MachinePointerInfo::getStack(MF, Offset);
166    return AddrReg;
167  }
168
169  void assignValueToReg(Register ValVReg, Register PhysReg,
170                        CCValAssign &VA) override {
171    MIB.addUse(PhysReg, RegState::Implicit);
172    Register ExtReg = extendRegister(ValVReg, VA);
173    MIRBuilder.buildCopy(PhysReg, ExtReg);
174  }
175
176  void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
177                            MachinePointerInfo &MPO, CCValAssign &VA) override {
178    if (VA.getLocInfo() == CCValAssign::LocInfo::AExt) {
179      Size = VA.getLocVT().getSizeInBits() / 8;
180      ValVReg = MIRBuilder.buildAnyExt(LLT::scalar(Size * 8), ValVReg)
181                    ->getOperand(0)
182                    .getReg();
183    }
184    auto MMO = MIRBuilder.getMF().getMachineMemOperand(
185        MPO, MachineMemOperand::MOStore, Size, 1);
186    MIRBuilder.buildStore(ValVReg, Addr, *MMO);
187  }
188
189  bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
190                 CCValAssign::LocInfo LocInfo,
191                 const CallLowering::ArgInfo &Info,
192                 ISD::ArgFlagsTy Flags,
193                 CCState &State) override {
194    bool Res;
195    if (Info.IsFixed)
196      Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
197    else
198      Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
199
200    StackSize = State.getNextStackOffset();
201    return Res;
202  }
203
204  MachineInstrBuilder MIB;
205  CCAssignFn *AssignFnVarArg;
206  bool IsTailCall;
207
208  /// For tail calls, the byte offset of the call's argument area from the
209  /// callee's. Unused elsewhere.
210  int FPDiff;
211  uint64_t StackSize;
212};
213} // namespace
214
215static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
216  return CallConv == CallingConv::Fast && TailCallOpt;
217}
218
219void AArch64CallLowering::splitToValueTypes(
220    const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
221    const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const {
222  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
223  LLVMContext &Ctx = OrigArg.Ty->getContext();
224
225  if (OrigArg.Ty->isVoidTy())
226    return;
227
228  SmallVector<EVT, 4> SplitVTs;
229  SmallVector<uint64_t, 4> Offsets;
230  ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
231
232  if (SplitVTs.size() == 1) {
233    // No splitting to do, but we want to replace the original type (e.g. [1 x
234    // double] -> double).
235    SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
236                           OrigArg.Flags[0], OrigArg.IsFixed);
237    return;
238  }
239
240  // Create one ArgInfo for each virtual register in the original ArgInfo.
241  assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
242
243  bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
244      OrigArg.Ty, CallConv, false);
245  for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
246    Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
247    SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
248                           OrigArg.IsFixed);
249    if (NeedsRegBlock)
250      SplitArgs.back().Flags[0].setInConsecutiveRegs();
251  }
252
253  SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
254}
255
256bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
257                                      const Value *Val,
258                                      ArrayRef<Register> VRegs,
259                                      Register SwiftErrorVReg) const {
260  auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
261  assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
262         "Return value without a vreg");
263
264  bool Success = true;
265  if (!VRegs.empty()) {
266    MachineFunction &MF = MIRBuilder.getMF();
267    const Function &F = MF.getFunction();
268
269    MachineRegisterInfo &MRI = MF.getRegInfo();
270    const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
271    CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
272    auto &DL = F.getParent()->getDataLayout();
273    LLVMContext &Ctx = Val->getType()->getContext();
274
275    SmallVector<EVT, 4> SplitEVTs;
276    ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
277    assert(VRegs.size() == SplitEVTs.size() &&
278           "For each split Type there should be exactly one VReg.");
279
280    SmallVector<ArgInfo, 8> SplitArgs;
281    CallingConv::ID CC = F.getCallingConv();
282
283    for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
284      if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) {
285        LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split");
286        return false;
287      }
288
289      Register CurVReg = VRegs[i];
290      ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
291      setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
292
293      // i1 is a special case because SDAG i1 true is naturally zero extended
294      // when widened using ANYEXT. We need to do it explicitly here.
295      if (MRI.getType(CurVReg).getSizeInBits() == 1) {
296        CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
297      } else {
298        // Some types will need extending as specified by the CC.
299        MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
300        if (EVT(NewVT) != SplitEVTs[i]) {
301          unsigned ExtendOp = TargetOpcode::G_ANYEXT;
302          if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
303                                             Attribute::SExt))
304            ExtendOp = TargetOpcode::G_SEXT;
305          else if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
306                                                  Attribute::ZExt))
307            ExtendOp = TargetOpcode::G_ZEXT;
308
309          LLT NewLLT(NewVT);
310          LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
311          CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
312          // Instead of an extend, we might have a vector type which needs
313          // padding with more elements, e.g. <2 x half> -> <4 x half>.
314          if (NewVT.isVector()) {
315            if (OldLLT.isVector()) {
316              if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
317                // We don't handle VA types which are not exactly twice the
318                // size, but can easily be done in future.
319                if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
320                  LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts");
321                  return false;
322                }
323                auto Undef = MIRBuilder.buildUndef({OldLLT});
324                CurVReg =
325                    MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef.getReg(0)})
326                        .getReg(0);
327              } else {
328                // Just do a vector extend.
329                CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
330                              .getReg(0);
331              }
332            } else if (NewLLT.getNumElements() == 2) {
333              // We need to pad a <1 x S> type to <2 x S>. Since we don't have
334              // <1 x S> vector types in GISel we use a build_vector instead
335              // of a vector merge/concat.
336              auto Undef = MIRBuilder.buildUndef({OldLLT});
337              CurVReg =
338                  MIRBuilder
339                      .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
340                      .getReg(0);
341            } else {
342              LLVM_DEBUG(dbgs() << "Could not handle ret ty");
343              return false;
344            }
345          } else {
346            // A scalar extend.
347            CurVReg =
348                MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0);
349          }
350        }
351      }
352      if (CurVReg != CurArgInfo.Regs[0]) {
353        CurArgInfo.Regs[0] = CurVReg;
354        // Reset the arg flags after modifying CurVReg.
355        setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
356      }
357     splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC);
358    }
359
360    OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
361    Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
362  }
363
364  if (SwiftErrorVReg) {
365    MIB.addUse(AArch64::X21, RegState::Implicit);
366    MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
367  }
368
369  MIRBuilder.insertInstr(MIB);
370  return Success;
371}
372
373/// Helper function to compute forwarded registers for musttail calls. Computes
374/// the forwarded registers, sets MBB liveness, and emits COPY instructions that
375/// can be used to save + restore registers later.
376static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
377                                             CCAssignFn *AssignFn) {
378  MachineBasicBlock &MBB = MIRBuilder.getMBB();
379  MachineFunction &MF = MIRBuilder.getMF();
380  MachineFrameInfo &MFI = MF.getFrameInfo();
381
382  if (!MFI.hasMustTailInVarArgFunc())
383    return;
384
385  AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
386  const Function &F = MF.getFunction();
387  assert(F.isVarArg() && "Expected F to be vararg?");
388
389  // Compute the set of forwarded registers. The rest are scratch.
390  SmallVector<CCValAssign, 16> ArgLocs;
391  CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
392                 F.getContext());
393  SmallVector<MVT, 2> RegParmTypes;
394  RegParmTypes.push_back(MVT::i64);
395  RegParmTypes.push_back(MVT::f128);
396
397  // Later on, we can use this vector to restore the registers if necessary.
398  SmallVectorImpl<ForwardedRegister> &Forwards =
399      FuncInfo->getForwardedMustTailRegParms();
400  CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
401
402  // Conservatively forward X8, since it might be used for an aggregate
403  // return.
404  if (!CCInfo.isAllocated(AArch64::X8)) {
405    unsigned X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
406    Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
407  }
408
409  // Add the forwards to the MachineBasicBlock and MachineFunction.
410  for (const auto &F : Forwards) {
411    MBB.addLiveIn(F.PReg);
412    MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
413  }
414}
415
416bool AArch64CallLowering::lowerFormalArguments(
417    MachineIRBuilder &MIRBuilder, const Function &F,
418    ArrayRef<ArrayRef<Register>> VRegs) const {
419  MachineFunction &MF = MIRBuilder.getMF();
420  MachineBasicBlock &MBB = MIRBuilder.getMBB();
421  MachineRegisterInfo &MRI = MF.getRegInfo();
422  auto &DL = F.getParent()->getDataLayout();
423
424  SmallVector<ArgInfo, 8> SplitArgs;
425  unsigned i = 0;
426  for (auto &Arg : F.args()) {
427    if (DL.getTypeStoreSize(Arg.getType()) == 0)
428      continue;
429
430    ArgInfo OrigArg{VRegs[i], Arg.getType()};
431    setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
432
433    splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv());
434    ++i;
435  }
436
437  if (!MBB.empty())
438    MIRBuilder.setInstr(*MBB.begin());
439
440  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
441  CCAssignFn *AssignFn =
442      TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
443
444  FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
445  if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
446    return false;
447
448  AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
449  uint64_t StackOffset = Handler.StackUsed;
450  if (F.isVarArg()) {
451    auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
452    if (!Subtarget.isTargetDarwin()) {
453        // FIXME: we need to reimplement saveVarArgsRegisters from
454      // AArch64ISelLowering.
455      return false;
456    }
457
458    // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
459    StackOffset = alignTo(Handler.StackUsed, Subtarget.isTargetILP32() ? 4 : 8);
460
461    auto &MFI = MIRBuilder.getMF().getFrameInfo();
462    FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
463  }
464
465  if (doesCalleeRestoreStack(F.getCallingConv(),
466                             MF.getTarget().Options.GuaranteedTailCallOpt)) {
467    // We have a non-standard ABI, so why not make full use of the stack that
468    // we're going to pop? It must be aligned to 16 B in any case.
469    StackOffset = alignTo(StackOffset, 16);
470
471    // If we're expected to restore the stack (e.g. fastcc), then we'll be
472    // adding a multiple of 16.
473    FuncInfo->setArgumentStackToRestore(StackOffset);
474
475    // Our own callers will guarantee that the space is free by giving an
476    // aligned value to CALLSEQ_START.
477  }
478
479  // When we tail call, we need to check if the callee's arguments
480  // will fit on the caller's stack. So, whenever we lower formal arguments,
481  // we should keep track of this information, since we might lower a tail call
482  // in this function later.
483  FuncInfo->setBytesInStackArgArea(StackOffset);
484
485  auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
486  if (Subtarget.hasCustomCallingConv())
487    Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
488
489  handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
490
491  // Move back to the end of the basic block.
492  MIRBuilder.setMBB(MBB);
493
494  return true;
495}
496
497/// Return true if the calling convention is one that we can guarantee TCO for.
498static bool canGuaranteeTCO(CallingConv::ID CC) {
499  return CC == CallingConv::Fast;
500}
501
502/// Return true if we might ever do TCO for calls with this calling convention.
503static bool mayTailCallThisCC(CallingConv::ID CC) {
504  switch (CC) {
505  case CallingConv::C:
506  case CallingConv::PreserveMost:
507  case CallingConv::Swift:
508    return true;
509  default:
510    return canGuaranteeTCO(CC);
511  }
512}
513
514/// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
515/// CC.
516static std::pair<CCAssignFn *, CCAssignFn *>
517getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
518  return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
519}
520
521bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
522    CallLoweringInfo &Info, MachineFunction &MF,
523    SmallVectorImpl<ArgInfo> &InArgs) const {
524  const Function &CallerF = MF.getFunction();
525  CallingConv::ID CalleeCC = Info.CallConv;
526  CallingConv::ID CallerCC = CallerF.getCallingConv();
527
528  // If the calling conventions match, then everything must be the same.
529  if (CalleeCC == CallerCC)
530    return true;
531
532  // Check if the caller and callee will handle arguments in the same way.
533  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
534  CCAssignFn *CalleeAssignFnFixed;
535  CCAssignFn *CalleeAssignFnVarArg;
536  std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
537      getAssignFnsForCC(CalleeCC, TLI);
538
539  CCAssignFn *CallerAssignFnFixed;
540  CCAssignFn *CallerAssignFnVarArg;
541  std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
542      getAssignFnsForCC(CallerCC, TLI);
543
544  if (!resultsCompatible(Info, MF, InArgs, *CalleeAssignFnFixed,
545                         *CalleeAssignFnVarArg, *CallerAssignFnFixed,
546                         *CallerAssignFnVarArg))
547    return false;
548
549  // Make sure that the caller and callee preserve all of the same registers.
550  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
551  const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
552  const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
553  if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
554    TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
555    TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
556  }
557
558  return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
559}
560
561bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
562    CallLoweringInfo &Info, MachineFunction &MF,
563    SmallVectorImpl<ArgInfo> &OutArgs) const {
564  // If there are no outgoing arguments, then we are done.
565  if (OutArgs.empty())
566    return true;
567
568  const Function &CallerF = MF.getFunction();
569  CallingConv::ID CalleeCC = Info.CallConv;
570  CallingConv::ID CallerCC = CallerF.getCallingConv();
571  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
572
573  CCAssignFn *AssignFnFixed;
574  CCAssignFn *AssignFnVarArg;
575  std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
576
577  // We have outgoing arguments. Make sure that we can tail call with them.
578  SmallVector<CCValAssign, 16> OutLocs;
579  CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
580
581  if (!analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg)) {
582    LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
583    return false;
584  }
585
586  // Make sure that they can fit on the caller's stack.
587  const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
588  if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
589    LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
590    return false;
591  }
592
593  // Verify that the parameters in callee-saved registers match.
594  // TODO: Port this over to CallLowering as general code once swiftself is
595  // supported.
596  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
597  const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
598  MachineRegisterInfo &MRI = MF.getRegInfo();
599
600  for (unsigned i = 0; i < OutLocs.size(); ++i) {
601    auto &ArgLoc = OutLocs[i];
602    // If it's not a register, it's fine.
603    if (!ArgLoc.isRegLoc()) {
604      if (Info.IsVarArg) {
605        // Be conservative and disallow variadic memory operands to match SDAG's
606        // behaviour.
607        // FIXME: If the caller's calling convention is C, then we can
608        // potentially use its argument area. However, for cases like fastcc,
609        // we can't do anything.
610        LLVM_DEBUG(
611            dbgs()
612            << "... Cannot tail call vararg function with stack arguments\n");
613        return false;
614      }
615      continue;
616    }
617
618    Register Reg = ArgLoc.getLocReg();
619
620    // Only look at callee-saved registers.
621    if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
622      continue;
623
624    LLVM_DEBUG(
625        dbgs()
626        << "... Call has an argument passed in a callee-saved register.\n");
627
628    // Check if it was copied from.
629    ArgInfo &OutInfo = OutArgs[i];
630
631    if (OutInfo.Regs.size() > 1) {
632      LLVM_DEBUG(
633          dbgs() << "... Cannot handle arguments in multiple registers.\n");
634      return false;
635    }
636
637    // Check if we copy the register, walking through copies from virtual
638    // registers. Note that getDefIgnoringCopies does not ignore copies from
639    // physical registers.
640    MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
641    if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
642      LLVM_DEBUG(
643          dbgs()
644          << "... Parameter was not copied into a VReg, cannot tail call.\n");
645      return false;
646    }
647
648    // Got a copy. Verify that it's the same as the register we want.
649    Register CopyRHS = RegDef->getOperand(1).getReg();
650    if (CopyRHS != Reg) {
651      LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
652                           "VReg, cannot tail call.\n");
653      return false;
654    }
655  }
656
657  return true;
658}
659
660bool AArch64CallLowering::isEligibleForTailCallOptimization(
661    MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
662    SmallVectorImpl<ArgInfo> &InArgs,
663    SmallVectorImpl<ArgInfo> &OutArgs) const {
664
665  // Must pass all target-independent checks in order to tail call optimize.
666  if (!Info.IsTailCall)
667    return false;
668
669  CallingConv::ID CalleeCC = Info.CallConv;
670  MachineFunction &MF = MIRBuilder.getMF();
671  const Function &CallerF = MF.getFunction();
672
673  LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");
674
675  if (Info.SwiftErrorVReg) {
676    // TODO: We should handle this.
677    // Note that this is also handled by the check for no outgoing arguments.
678    // Proactively disabling this though, because the swifterror handling in
679    // lowerCall inserts a COPY *after* the location of the call.
680    LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");
681    return false;
682  }
683
684  if (!mayTailCallThisCC(CalleeCC)) {
685    LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
686    return false;
687  }
688
689  // Byval parameters hand the function a pointer directly into the stack area
690  // we want to reuse during a tail call. Working around this *is* possible (see
691  // X86).
692  //
693  // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
694  // it?
695  //
696  // On Windows, "inreg" attributes signify non-aggregate indirect returns.
697  // In this case, it is necessary to save/restore X0 in the callee. Tail
698  // call opt interferes with this. So we disable tail call opt when the
699  // caller has an argument with "inreg" attribute.
700  //
701  // FIXME: Check whether the callee also has an "inreg" argument.
702  //
703  // When the caller has a swifterror argument, we don't want to tail call
704  // because would have to move into the swifterror register before the
705  // tail call.
706  if (any_of(CallerF.args(), [](const Argument &A) {
707        return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
708      })) {
709    LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "
710                         "inreg, or swifterror arguments\n");
711    return false;
712  }
713
714  // Externally-defined functions with weak linkage should not be
715  // tail-called on AArch64 when the OS does not support dynamic
716  // pre-emption of symbols, as the AAELF spec requires normal calls
717  // to undefined weak functions to be replaced with a NOP or jump to the
718  // next instruction. The behaviour of branch instructions in this
719  // situation (as used for tail calls) is implementation-defined, so we
720  // cannot rely on the linker replacing the tail call with a return.
721  if (Info.Callee.isGlobal()) {
722    const GlobalValue *GV = Info.Callee.getGlobal();
723    const Triple &TT = MF.getTarget().getTargetTriple();
724    if (GV->hasExternalWeakLinkage() &&
725        (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
726         TT.isOSBinFormatMachO())) {
727      LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
728                           "with weak linkage for this OS.\n");
729      return false;
730    }
731  }
732
733  // If we have -tailcallopt, then we're done.
734  if (MF.getTarget().Options.GuaranteedTailCallOpt)
735    return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv();
736
737  // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
738  // Try to find cases where we can do that.
739
740  // I want anyone implementing a new calling convention to think long and hard
741  // about this assert.
742  assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&
743         "Unexpected variadic calling convention");
744
745  // Verify that the incoming and outgoing arguments from the callee are
746  // safe to tail call.
747  if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
748    LLVM_DEBUG(
749        dbgs()
750        << "... Caller and callee have incompatible calling conventions.\n");
751    return false;
752  }
753
754  if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
755    return false;
756
757  LLVM_DEBUG(
758      dbgs() << "... Call is eligible for tail call optimization.\n");
759  return true;
760}
761
762static unsigned getCallOpcode(const Function &CallerF, bool IsIndirect,
763                              bool IsTailCall) {
764  if (!IsTailCall)
765    return IsIndirect ? AArch64::BLR : AArch64::BL;
766
767  if (!IsIndirect)
768    return AArch64::TCRETURNdi;
769
770  // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
771  // x16 or x17.
772  if (CallerF.hasFnAttribute("branch-target-enforcement"))
773    return AArch64::TCRETURNriBTI;
774
775  return AArch64::TCRETURNri;
776}
777
778bool AArch64CallLowering::lowerTailCall(
779    MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
780    SmallVectorImpl<ArgInfo> &OutArgs) const {
781  MachineFunction &MF = MIRBuilder.getMF();
782  const Function &F = MF.getFunction();
783  MachineRegisterInfo &MRI = MF.getRegInfo();
784  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
785  AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
786
787  // True when we're tail calling, but without -tailcallopt.
788  bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt;
789
790  // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
791  // register class. Until we can do that, we should fall back here.
792  if (F.hasFnAttribute("branch-target-enforcement")) {
793    LLVM_DEBUG(
794        dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
795    return false;
796  }
797
798  // Find out which ABI gets to decide where things go.
799  CallingConv::ID CalleeCC = Info.CallConv;
800  CCAssignFn *AssignFnFixed;
801  CCAssignFn *AssignFnVarArg;
802  std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
803
804  MachineInstrBuilder CallSeqStart;
805  if (!IsSibCall)
806    CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
807
808  unsigned Opc = getCallOpcode(F, Info.Callee.isReg(), true);
809  auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
810  MIB.add(Info.Callee);
811
812  // Byte offset for the tail call. When we are sibcalling, this will always
813  // be 0.
814  MIB.addImm(0);
815
816  // Tell the call which registers are clobbered.
817  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
818  const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
819  if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
820    TRI->UpdateCustomCallPreservedMask(MF, &Mask);
821  MIB.addRegMask(Mask);
822
823  if (TRI->isAnyArgRegReserved(MF))
824    TRI->emitReservedArgRegCallError(MF);
825
826  // FPDiff is the byte offset of the call's argument area from the callee's.
827  // Stores to callee stack arguments will be placed in FixedStackSlots offset
828  // by this amount for a tail call. In a sibling call it must be 0 because the
829  // caller will deallocate the entire stack and the callee still expects its
830  // arguments to begin at SP+0.
831  int FPDiff = 0;
832
833  // This will be 0 for sibcalls, potentially nonzero for tail calls produced
834  // by -tailcallopt. For sibcalls, the memory operands for the call are
835  // already available in the caller's incoming argument space.
836  unsigned NumBytes = 0;
837  if (!IsSibCall) {
838    // We aren't sibcalling, so we need to compute FPDiff. We need to do this
839    // before handling assignments, because FPDiff must be known for memory
840    // arguments.
841    unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
842    SmallVector<CCValAssign, 16> OutLocs;
843    CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
844    analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg);
845
846    // The callee will pop the argument stack as a tail call. Thus, we must
847    // keep it 16-byte aligned.
848    NumBytes = alignTo(OutInfo.getNextStackOffset(), 16);
849
850    // FPDiff will be negative if this tail call requires more space than we
851    // would automatically have in our incoming argument space. Positive if we
852    // actually shrink the stack.
853    FPDiff = NumReusableBytes - NumBytes;
854
855    // The stack pointer must be 16-byte aligned at all times it's used for a
856    // memory operation, which in practice means at *all* times and in
857    // particular across call boundaries. Therefore our own arguments started at
858    // a 16-byte aligned SP and the delta applied for the tail call should
859    // satisfy the same constraint.
860    assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
861  }
862
863  const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
864
865  // Do the actual argument marshalling.
866  SmallVector<unsigned, 8> PhysRegs;
867  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
868                             AssignFnVarArg, true, FPDiff);
869  if (!handleAssignments(MIRBuilder, OutArgs, Handler))
870    return false;
871
872  if (Info.IsVarArg && Info.IsMustTailCall) {
873    // Now we know what's being passed to the function. Add uses to the call for
874    // the forwarded registers that we *aren't* passing as parameters. This will
875    // preserve the copies we build earlier.
876    for (const auto &F : Forwards) {
877      Register ForwardedReg = F.PReg;
878      // If the register is already passed, or aliases a register which is
879      // already being passed, then skip it.
880      if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
881            if (!Use.isReg())
882              return false;
883            return TRI->regsOverlap(Use.getReg(), ForwardedReg);
884          }))
885        continue;
886
887      // We aren't passing it already, so we should add it to the call.
888      MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
889      MIB.addReg(ForwardedReg, RegState::Implicit);
890    }
891  }
892
893  // If we have -tailcallopt, we need to adjust the stack. We'll do the call
894  // sequence start and end here.
895  if (!IsSibCall) {
896    MIB->getOperand(1).setImm(FPDiff);
897    CallSeqStart.addImm(NumBytes).addImm(0);
898    // End the call sequence *before* emitting the call. Normally, we would
899    // tidy the frame up after the call. However, here, we've laid out the
900    // parameters so that when SP is reset, they will be in the correct
901    // location.
902    MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(NumBytes).addImm(0);
903  }
904
905  // Now we can add the actual call instruction to the correct basic block.
906  MIRBuilder.insertInstr(MIB);
907
908  // If Callee is a reg, since it is used by a target specific instruction,
909  // it must have a register class matching the constraint of that instruction.
910  if (Info.Callee.isReg())
911    MIB->getOperand(0).setReg(constrainOperandRegClass(
912        MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
913        *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
914        0));
915
916  MF.getFrameInfo().setHasTailCall();
917  Info.LoweredTailCall = true;
918  return true;
919}
920
921bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
922                                    CallLoweringInfo &Info) const {
923  MachineFunction &MF = MIRBuilder.getMF();
924  const Function &F = MF.getFunction();
925  MachineRegisterInfo &MRI = MF.getRegInfo();
926  auto &DL = F.getParent()->getDataLayout();
927  const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
928
929  SmallVector<ArgInfo, 8> OutArgs;
930  for (auto &OrigArg : Info.OrigArgs) {
931    splitToValueTypes(OrigArg, OutArgs, DL, MRI, Info.CallConv);
932    // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
933    if (OrigArg.Ty->isIntegerTy(1))
934      OutArgs.back().Flags[0].setZExt();
935  }
936
937  SmallVector<ArgInfo, 8> InArgs;
938  if (!Info.OrigRet.Ty->isVoidTy())
939    splitToValueTypes(Info.OrigRet, InArgs, DL, MRI, F.getCallingConv());
940
941  // If we can lower as a tail call, do that instead.
942  bool CanTailCallOpt =
943      isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
944
945  // We must emit a tail call if we have musttail.
946  if (Info.IsMustTailCall && !CanTailCallOpt) {
947    // There are types of incoming/outgoing arguments we can't handle yet, so
948    // it doesn't make sense to actually die here like in ISelLowering. Instead,
949    // fall back to SelectionDAG and let it try to handle this.
950    LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
951    return false;
952  }
953
954  if (CanTailCallOpt)
955    return lowerTailCall(MIRBuilder, Info, OutArgs);
956
957  // Find out which ABI gets to decide where things go.
958  CCAssignFn *AssignFnFixed;
959  CCAssignFn *AssignFnVarArg;
960  std::tie(AssignFnFixed, AssignFnVarArg) =
961      getAssignFnsForCC(Info.CallConv, TLI);
962
963  MachineInstrBuilder CallSeqStart;
964  CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
965
966  // Create a temporarily-floating call instruction so we can add the implicit
967  // uses of arg registers.
968  unsigned Opc = getCallOpcode(F, Info.Callee.isReg(), false);
969
970  auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
971  MIB.add(Info.Callee);
972
973  // Tell the call which registers are clobbered.
974  auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
975  const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
976  if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
977    TRI->UpdateCustomCallPreservedMask(MF, &Mask);
978  MIB.addRegMask(Mask);
979
980  if (TRI->isAnyArgRegReserved(MF))
981    TRI->emitReservedArgRegCallError(MF);
982
983  // Do the actual argument marshalling.
984  SmallVector<unsigned, 8> PhysRegs;
985  OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
986                             AssignFnVarArg, false);
987  if (!handleAssignments(MIRBuilder, OutArgs, Handler))
988    return false;
989
990  // Now we can add the actual call instruction to the correct basic block.
991  MIRBuilder.insertInstr(MIB);
992
993  // If Callee is a reg, since it is used by a target specific
994  // instruction, it must have a register class matching the
995  // constraint of that instruction.
996  if (Info.Callee.isReg())
997    MIB->getOperand(0).setReg(constrainOperandRegClass(
998        MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
999        *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
1000        0));
1001
1002  // Finally we can copy the returned value back into its virtual-register. In
1003  // symmetry with the arguments, the physical register must be an
1004  // implicit-define of the call instruction.
1005  if (!Info.OrigRet.Ty->isVoidTy()) {
1006    CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1007    CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
1008    if (!handleAssignments(MIRBuilder, InArgs, Handler))
1009      return false;
1010  }
1011
1012  if (Info.SwiftErrorVReg) {
1013    MIB.addDef(AArch64::X21, RegState::Implicit);
1014    MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
1015  }
1016
1017  uint64_t CalleePopBytes =
1018      doesCalleeRestoreStack(Info.CallConv,
1019                             MF.getTarget().Options.GuaranteedTailCallOpt)
1020          ? alignTo(Handler.StackSize, 16)
1021          : 0;
1022
1023  CallSeqStart.addImm(Handler.StackSize).addImm(0);
1024  MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
1025      .addImm(Handler.StackSize)
1026      .addImm(CalleePopBytes);
1027
1028  return true;
1029}
1030