SIISelLowering.h revision 360784
1//===-- SIISelLowering.h - SI DAG Lowering Interface ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file
10/// SI DAG Lowering interface definition
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
15#define LLVM_LIB_TARGET_AMDGPU_SIISELLOWERING_H
16
17#include "AMDGPUISelLowering.h"
18#include "AMDGPUArgumentUsageInfo.h"
19#include "SIInstrInfo.h"
20
21namespace llvm {
22
23class SITargetLowering final : public AMDGPUTargetLowering {
24private:
25  const GCNSubtarget *Subtarget;
26
27public:
28  MVT getRegisterTypeForCallingConv(LLVMContext &Context,
29                                    CallingConv::ID CC,
30                                    EVT VT) const override;
31  unsigned getNumRegistersForCallingConv(LLVMContext &Context,
32                                         CallingConv::ID CC,
33                                         EVT VT) const override;
34
35  unsigned getVectorTypeBreakdownForCallingConv(
36    LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
37    unsigned &NumIntermediates, MVT &RegisterVT) const override;
38
39private:
40  SDValue lowerKernArgParameterPtr(SelectionDAG &DAG, const SDLoc &SL,
41                                   SDValue Chain, uint64_t Offset) const;
42  SDValue getImplicitArgPtr(SelectionDAG &DAG, const SDLoc &SL) const;
43  SDValue lowerKernargMemParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
44                                   const SDLoc &SL, SDValue Chain,
45                                   uint64_t Offset, unsigned Align, bool Signed,
46                                   const ISD::InputArg *Arg = nullptr) const;
47
48  SDValue lowerStackParameter(SelectionDAG &DAG, CCValAssign &VA,
49                              const SDLoc &SL, SDValue Chain,
50                              const ISD::InputArg &Arg) const;
51  SDValue getPreloadedValue(SelectionDAG &DAG,
52                            const SIMachineFunctionInfo &MFI,
53                            EVT VT,
54                            AMDGPUFunctionArgInfo::PreloadedValue) const;
55
56  SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op,
57                             SelectionDAG &DAG) const override;
58  SDValue lowerImplicitZextParam(SelectionDAG &DAG, SDValue Op,
59                                 MVT VT, unsigned Offset) const;
60  SDValue lowerImage(SDValue Op, const AMDGPU::ImageDimIntrinsicInfo *Intr,
61                     SelectionDAG &DAG) const;
62  SDValue lowerSBuffer(EVT VT, SDLoc DL, SDValue Rsrc, SDValue Offset,
63                       SDValue GLC, SDValue DLC, SelectionDAG &DAG) const;
64
65  SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
66  SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
67  SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
68
69  // The raw.tbuffer and struct.tbuffer intrinsics have two offset args: offset
70  // (the offset that is included in bounds checking and swizzling, to be split
71  // between the instruction's voffset and immoffset fields) and soffset (the
72  // offset that is excluded from bounds checking and swizzling, to go in the
73  // instruction's soffset field).  This function takes the first kind of
74  // offset and figures out how to split it between voffset and immoffset.
75  std::pair<SDValue, SDValue> splitBufferOffsets(SDValue Offset,
76                                                 SelectionDAG &DAG) const;
77
78  SDValue widenLoad(LoadSDNode *Ld, DAGCombinerInfo &DCI) const;
79  SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
80  SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
81  SDValue lowerFastUnsafeFDIV(SDValue Op, SelectionDAG &DAG) const;
82  SDValue lowerFDIV_FAST(SDValue Op, SelectionDAG &DAG) const;
83  SDValue LowerFDIV16(SDValue Op, SelectionDAG &DAG) const;
84  SDValue LowerFDIV32(SDValue Op, SelectionDAG &DAG) const;
85  SDValue LowerFDIV64(SDValue Op, SelectionDAG &DAG) const;
86  SDValue LowerFDIV(SDValue Op, SelectionDAG &DAG) const;
87  SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG, bool Signed) const;
88  SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
89  SDValue LowerTrig(SDValue Op, SelectionDAG &DAG) const;
90  SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
91  SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
92  SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
93  SDValue adjustLoadValueType(unsigned Opcode, MemSDNode *M,
94                              SelectionDAG &DAG, ArrayRef<SDValue> Ops,
95                              bool IsIntrinsic = false) const;
96
97  SDValue lowerIntrinsicLoad(MemSDNode *M, bool IsFormat, SelectionDAG &DAG,
98                             ArrayRef<SDValue> Ops) const;
99
100  // Call DAG.getMemIntrinsicNode for a load, but first widen a dwordx3 type to
101  // dwordx4 if on SI.
102  SDValue getMemIntrinsicNode(unsigned Opcode, const SDLoc &DL, SDVTList VTList,
103                              ArrayRef<SDValue> Ops, EVT MemVT,
104                              MachineMemOperand *MMO, SelectionDAG &DAG) const;
105
106  SDValue handleD16VData(SDValue VData, SelectionDAG &DAG) const;
107
108  /// Converts \p Op, which must be of floating point type, to the
109  /// floating point type \p VT, by either extending or truncating it.
110  SDValue getFPExtOrFPTrunc(SelectionDAG &DAG,
111                            SDValue Op,
112                            const SDLoc &DL,
113                            EVT VT) const;
114
115  SDValue convertArgType(
116    SelectionDAG &DAG, EVT VT, EVT MemVT, const SDLoc &SL, SDValue Val,
117    bool Signed, const ISD::InputArg *Arg = nullptr) const;
118
119  /// Custom lowering for ISD::FP_ROUND for MVT::f16.
120  SDValue lowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
121  SDValue lowerFMINNUM_FMAXNUM(SDValue Op, SelectionDAG &DAG) const;
122
123  SDValue getSegmentAperture(unsigned AS, const SDLoc &DL,
124                             SelectionDAG &DAG) const;
125
126  SDValue lowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) const;
127  SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
128  SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
129  SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
130  SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
131  SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
132  SDValue lowerTRAP(SDValue Op, SelectionDAG &DAG) const;
133  SDValue lowerDEBUGTRAP(SDValue Op, SelectionDAG &DAG) const;
134
135  SDNode *adjustWritemask(MachineSDNode *&N, SelectionDAG &DAG) const;
136
137  SDValue performUCharToFloatCombine(SDNode *N,
138                                     DAGCombinerInfo &DCI) const;
139  SDValue performSHLPtrCombine(SDNode *N,
140                               unsigned AS,
141                               EVT MemVT,
142                               DAGCombinerInfo &DCI) const;
143
144  SDValue performMemSDNodeCombine(MemSDNode *N, DAGCombinerInfo &DCI) const;
145
146  SDValue splitBinaryBitConstantOp(DAGCombinerInfo &DCI, const SDLoc &SL,
147                                   unsigned Opc, SDValue LHS,
148                                   const ConstantSDNode *CRHS) const;
149
150  SDValue performAndCombine(SDNode *N, DAGCombinerInfo &DCI) const;
151  SDValue performOrCombine(SDNode *N, DAGCombinerInfo &DCI) const;
152  SDValue performXorCombine(SDNode *N, DAGCombinerInfo &DCI) const;
153  SDValue performZeroExtendCombine(SDNode *N, DAGCombinerInfo &DCI) const;
154  SDValue performSignExtendInRegCombine(SDNode *N, DAGCombinerInfo &DCI) const;
155  SDValue performClassCombine(SDNode *N, DAGCombinerInfo &DCI) const;
156  SDValue getCanonicalConstantFP(SelectionDAG &DAG, const SDLoc &SL, EVT VT,
157                                 const APFloat &C) const;
158  SDValue performFCanonicalizeCombine(SDNode *N, DAGCombinerInfo &DCI) const;
159
160  SDValue performFPMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
161                                  SDValue Op0, SDValue Op1) const;
162  SDValue performIntMed3ImmCombine(SelectionDAG &DAG, const SDLoc &SL,
163                                   SDValue Op0, SDValue Op1, bool Signed) const;
164  SDValue performMinMaxCombine(SDNode *N, DAGCombinerInfo &DCI) const;
165  SDValue performFMed3Combine(SDNode *N, DAGCombinerInfo &DCI) const;
166  SDValue performCvtPkRTZCombine(SDNode *N, DAGCombinerInfo &DCI) const;
167  SDValue performExtractVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
168  SDValue performInsertVectorEltCombine(SDNode *N, DAGCombinerInfo &DCI) const;
169
170  SDValue reassociateScalarOps(SDNode *N, SelectionDAG &DAG) const;
171  unsigned getFusedOpcode(const SelectionDAG &DAG,
172                          const SDNode *N0, const SDNode *N1) const;
173  SDValue performAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
174  SDValue performAddCarrySubCarryCombine(SDNode *N, DAGCombinerInfo &DCI) const;
175  SDValue performSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
176  SDValue performFAddCombine(SDNode *N, DAGCombinerInfo &DCI) const;
177  SDValue performFSubCombine(SDNode *N, DAGCombinerInfo &DCI) const;
178  SDValue performFMACombine(SDNode *N, DAGCombinerInfo &DCI) const;
179  SDValue performSetCCCombine(SDNode *N, DAGCombinerInfo &DCI) const;
180  SDValue performCvtF32UByteNCombine(SDNode *N, DAGCombinerInfo &DCI) const;
181  SDValue performClampCombine(SDNode *N, DAGCombinerInfo &DCI) const;
182  SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const;
183
184  bool isLegalFlatAddressingMode(const AddrMode &AM) const;
185  bool isLegalMUBUFAddressingMode(const AddrMode &AM) const;
186
187  unsigned isCFIntrinsic(const SDNode *Intr) const;
188
189public:
190  /// \returns True if fixup needs to be emitted for given global value \p GV,
191  /// false otherwise.
192  bool shouldEmitFixup(const GlobalValue *GV) const;
193
194  /// \returns True if GOT relocation needs to be emitted for given global value
195  /// \p GV, false otherwise.
196  bool shouldEmitGOTReloc(const GlobalValue *GV) const;
197
198  /// \returns True if PC-relative relocation needs to be emitted for given
199  /// global value \p GV, false otherwise.
200  bool shouldEmitPCReloc(const GlobalValue *GV) const;
201
202private:
203  // Analyze a combined offset from an amdgcn_buffer_ intrinsic and store the
204  // three offsets (voffset, soffset and instoffset) into the SDValue[3] array
205  // pointed to by Offsets.
206  /// \returns 0 If there is a non-constant offset or if the offset is 0.
207  /// Otherwise returns the constant offset.
208  unsigned setBufferOffsets(SDValue CombinedOffset, SelectionDAG &DAG,
209                           SDValue *Offsets, unsigned Align = 4) const;
210
211  // Handle 8 bit and 16 bit buffer loads
212  SDValue handleByteShortBufferLoads(SelectionDAG &DAG, EVT LoadVT, SDLoc DL,
213                                     ArrayRef<SDValue> Ops, MemSDNode *M) const;
214
215  // Handle 8 bit and 16 bit buffer stores
216  SDValue handleByteShortBufferStores(SelectionDAG &DAG, EVT VDataType,
217                                      SDLoc DL, SDValue Ops[],
218                                      MemSDNode *M) const;
219
220public:
221  SITargetLowering(const TargetMachine &tm, const GCNSubtarget &STI);
222
223  const GCNSubtarget *getSubtarget() const;
224
225  bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT,
226                       EVT SrcVT) const override;
227
228  bool isShuffleMaskLegal(ArrayRef<int> /*Mask*/, EVT /*VT*/) const override;
229
230  bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
231                          MachineFunction &MF,
232                          unsigned IntrinsicID) const override;
233
234  bool getAddrModeArguments(IntrinsicInst * /*I*/,
235                            SmallVectorImpl<Value*> &/*Ops*/,
236                            Type *&/*AccessTy*/) const override;
237
238  bool isLegalGlobalAddressingMode(const AddrMode &AM) const;
239  bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
240                             unsigned AS,
241                             Instruction *I = nullptr) const override;
242
243  bool canMergeStoresTo(unsigned AS, EVT MemVT,
244                        const SelectionDAG &DAG) const override;
245
246  bool allowsMisalignedMemoryAccessesImpl(
247      unsigned Size, unsigned AS, unsigned Align,
248      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
249      bool *IsFast = nullptr) const;
250
251  bool allowsMisalignedMemoryAccesses(
252      EVT VT, unsigned AS, unsigned Align,
253      MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
254      bool *IsFast = nullptr) const override;
255
256  EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
257                          unsigned SrcAlign, bool IsMemset,
258                          bool ZeroMemset,
259                          bool MemcpyStrSrc,
260                          const AttributeList &FuncAttributes) const override;
261
262  bool isMemOpUniform(const SDNode *N) const;
263  bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const;
264
265  static bool isFlatGlobalAddrSpace(unsigned AS) {
266    return AS == AMDGPUAS::GLOBAL_ADDRESS ||
267           AS == AMDGPUAS::FLAT_ADDRESS ||
268           AS == AMDGPUAS::CONSTANT_ADDRESS ||
269           AS > AMDGPUAS::MAX_AMDGPU_ADDRESS;
270  }
271
272  bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
273  bool isFreeAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
274
275  TargetLoweringBase::LegalizeTypeAction
276  getPreferredVectorAction(MVT VT) const override;
277
278  bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
279                                        Type *Ty) const override;
280
281  bool isTypeDesirableForOp(unsigned Op, EVT VT) const override;
282
283  bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
284
285  bool supportSplitCSR(MachineFunction *MF) const override;
286  void initializeSplitCSR(MachineBasicBlock *Entry) const override;
287  void insertCopiesSplitCSR(
288    MachineBasicBlock *Entry,
289    const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
290
291  SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
292                               bool isVarArg,
293                               const SmallVectorImpl<ISD::InputArg> &Ins,
294                               const SDLoc &DL, SelectionDAG &DAG,
295                               SmallVectorImpl<SDValue> &InVals) const override;
296
297  bool CanLowerReturn(CallingConv::ID CallConv,
298                      MachineFunction &MF, bool isVarArg,
299                      const SmallVectorImpl<ISD::OutputArg> &Outs,
300                      LLVMContext &Context) const override;
301
302  SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
303                      const SmallVectorImpl<ISD::OutputArg> &Outs,
304                      const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
305                      SelectionDAG &DAG) const override;
306
307  void passSpecialInputs(
308    CallLoweringInfo &CLI,
309    CCState &CCInfo,
310    const SIMachineFunctionInfo &Info,
311    SmallVectorImpl<std::pair<unsigned, SDValue>> &RegsToPass,
312    SmallVectorImpl<SDValue> &MemOpChains,
313    SDValue Chain) const;
314
315  SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
316                          CallingConv::ID CallConv, bool isVarArg,
317                          const SmallVectorImpl<ISD::InputArg> &Ins,
318                          const SDLoc &DL, SelectionDAG &DAG,
319                          SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
320                          SDValue ThisVal) const;
321
322  bool mayBeEmittedAsTailCall(const CallInst *) const override;
323
324  bool isEligibleForTailCallOptimization(
325    SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
326    const SmallVectorImpl<ISD::OutputArg> &Outs,
327    const SmallVectorImpl<SDValue> &OutVals,
328    const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
329
330  SDValue LowerCall(CallLoweringInfo &CLI,
331                    SmallVectorImpl<SDValue> &InVals) const override;
332
333  Register getRegisterByName(const char* RegName, LLT VT,
334                             const MachineFunction &MF) const override;
335
336  MachineBasicBlock *splitKillBlock(MachineInstr &MI,
337                                    MachineBasicBlock *BB) const;
338
339  void bundleInstWithWaitcnt(MachineInstr &MI) const;
340  MachineBasicBlock *emitGWSMemViolTestLoop(MachineInstr &MI,
341                                            MachineBasicBlock *BB) const;
342
343  MachineBasicBlock *
344  EmitInstrWithCustomInserter(MachineInstr &MI,
345                              MachineBasicBlock *BB) const override;
346
347  bool hasBitPreservingFPLogic(EVT VT) const override;
348  bool enableAggressiveFMAFusion(EVT VT) const override;
349  EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
350                         EVT VT) const override;
351  MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override;
352  bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
353                                  EVT VT) const override;
354  bool isFMADLegalForFAddFSub(const SelectionDAG &DAG,
355                              const SDNode *N) const override;
356
357  SDValue splitUnaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
358  SDValue splitBinaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
359  SDValue splitTernaryVectorOp(SDValue Op, SelectionDAG &DAG) const;
360  SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
361
362  void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
363                          SelectionDAG &DAG) const override;
364
365  SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
366  SDNode *PostISelFolding(MachineSDNode *N, SelectionDAG &DAG) const override;
367  void AdjustInstrPostInstrSelection(MachineInstr &MI,
368                                     SDNode *Node) const override;
369
370  SDNode *legalizeTargetIndependentNode(SDNode *Node, SelectionDAG &DAG) const;
371
372  MachineSDNode *wrapAddr64Rsrc(SelectionDAG &DAG, const SDLoc &DL,
373                                SDValue Ptr) const;
374  MachineSDNode *buildRSRC(SelectionDAG &DAG, const SDLoc &DL, SDValue Ptr,
375                           uint32_t RsrcDword1, uint64_t RsrcDword2And3) const;
376  std::pair<unsigned, const TargetRegisterClass *>
377  getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
378                               StringRef Constraint, MVT VT) const override;
379  ConstraintType getConstraintType(StringRef Constraint) const override;
380  SDValue copyToM0(SelectionDAG &DAG, SDValue Chain, const SDLoc &DL,
381                   SDValue V) const;
382
383  void finalizeLowering(MachineFunction &MF) const override;
384
385  void computeKnownBitsForFrameIndex(const SDValue Op,
386                                     KnownBits &Known,
387                                     const APInt &DemandedElts,
388                                     const SelectionDAG &DAG,
389                                     unsigned Depth = 0) const override;
390
391  bool isSDNodeSourceOfDivergence(const SDNode *N,
392    FunctionLoweringInfo *FLI, LegacyDivergenceAnalysis *DA) const override;
393
394  bool isCanonicalized(SelectionDAG &DAG, SDValue Op,
395                       unsigned MaxDepth = 5) const;
396  bool denormalsEnabledForType(const SelectionDAG &DAG, EVT VT) const;
397
398  bool isKnownNeverNaNForTargetNode(SDValue Op,
399                                    const SelectionDAG &DAG,
400                                    bool SNaN = false,
401                                    unsigned Depth = 0) const override;
402  AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
403
404  virtual const TargetRegisterClass *
405  getRegClassFor(MVT VT, bool isDivergent) const override;
406  virtual bool requiresUniformRegister(MachineFunction &MF,
407                                       const Value *V) const override;
408  Align getPrefLoopAlignment(MachineLoop *ML) const override;
409
410  void allocateHSAUserSGPRs(CCState &CCInfo,
411                            MachineFunction &MF,
412                            const SIRegisterInfo &TRI,
413                            SIMachineFunctionInfo &Info) const;
414
415  void allocateSystemSGPRs(CCState &CCInfo,
416                           MachineFunction &MF,
417                           SIMachineFunctionInfo &Info,
418                           CallingConv::ID CallConv,
419                           bool IsShader) const;
420
421  void allocateSpecialEntryInputVGPRs(CCState &CCInfo,
422                                      MachineFunction &MF,
423                                      const SIRegisterInfo &TRI,
424                                      SIMachineFunctionInfo &Info) const;
425  void allocateSpecialInputSGPRs(
426    CCState &CCInfo,
427    MachineFunction &MF,
428    const SIRegisterInfo &TRI,
429    SIMachineFunctionInfo &Info) const;
430
431  void allocateSpecialInputVGPRs(CCState &CCInfo,
432                                 MachineFunction &MF,
433                                 const SIRegisterInfo &TRI,
434                                 SIMachineFunctionInfo &Info) const;
435};
436
437} // End namespace llvm
438
439#endif
440