1//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This implements routines for translating from LLVM IR into SelectionDAG IR.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SelectionDAGBuilder.h"
14#include "SDNodeDbgValue.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/APInt.h"
17#include "llvm/ADT/BitVector.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/SmallSet.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/Analysis/AliasAnalysis.h"
24#include "llvm/Analysis/BranchProbabilityInfo.h"
25#include "llvm/Analysis/ConstantFolding.h"
26#include "llvm/Analysis/Loads.h"
27#include "llvm/Analysis/MemoryLocation.h"
28#include "llvm/Analysis/TargetLibraryInfo.h"
29#include "llvm/Analysis/ValueTracking.h"
30#include "llvm/Analysis/VectorUtils.h"
31#include "llvm/CodeGen/Analysis.h"
32#include "llvm/CodeGen/AssignmentTrackingAnalysis.h"
33#include "llvm/CodeGen/CodeGenCommonISel.h"
34#include "llvm/CodeGen/FunctionLoweringInfo.h"
35#include "llvm/CodeGen/GCMetadata.h"
36#include "llvm/CodeGen/ISDOpcodes.h"
37#include "llvm/CodeGen/MachineBasicBlock.h"
38#include "llvm/CodeGen/MachineFrameInfo.h"
39#include "llvm/CodeGen/MachineFunction.h"
40#include "llvm/CodeGen/MachineInstrBuilder.h"
41#include "llvm/CodeGen/MachineInstrBundleIterator.h"
42#include "llvm/CodeGen/MachineMemOperand.h"
43#include "llvm/CodeGen/MachineModuleInfo.h"
44#include "llvm/CodeGen/MachineOperand.h"
45#include "llvm/CodeGen/MachineRegisterInfo.h"
46#include "llvm/CodeGen/RuntimeLibcalls.h"
47#include "llvm/CodeGen/SelectionDAG.h"
48#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
49#include "llvm/CodeGen/StackMaps.h"
50#include "llvm/CodeGen/SwiftErrorValueTracking.h"
51#include "llvm/CodeGen/TargetFrameLowering.h"
52#include "llvm/CodeGen/TargetInstrInfo.h"
53#include "llvm/CodeGen/TargetOpcodes.h"
54#include "llvm/CodeGen/TargetRegisterInfo.h"
55#include "llvm/CodeGen/TargetSubtargetInfo.h"
56#include "llvm/CodeGen/WinEHFuncInfo.h"
57#include "llvm/IR/Argument.h"
58#include "llvm/IR/Attributes.h"
59#include "llvm/IR/BasicBlock.h"
60#include "llvm/IR/CFG.h"
61#include "llvm/IR/CallingConv.h"
62#include "llvm/IR/Constant.h"
63#include "llvm/IR/ConstantRange.h"
64#include "llvm/IR/Constants.h"
65#include "llvm/IR/DataLayout.h"
66#include "llvm/IR/DebugInfo.h"
67#include "llvm/IR/DebugInfoMetadata.h"
68#include "llvm/IR/DerivedTypes.h"
69#include "llvm/IR/DiagnosticInfo.h"
70#include "llvm/IR/EHPersonalities.h"
71#include "llvm/IR/Function.h"
72#include "llvm/IR/GetElementPtrTypeIterator.h"
73#include "llvm/IR/InlineAsm.h"
74#include "llvm/IR/InstrTypes.h"
75#include "llvm/IR/Instructions.h"
76#include "llvm/IR/IntrinsicInst.h"
77#include "llvm/IR/Intrinsics.h"
78#include "llvm/IR/IntrinsicsAArch64.h"
79#include "llvm/IR/IntrinsicsAMDGPU.h"
80#include "llvm/IR/IntrinsicsWebAssembly.h"
81#include "llvm/IR/LLVMContext.h"
82#include "llvm/IR/Metadata.h"
83#include "llvm/IR/Module.h"
84#include "llvm/IR/Operator.h"
85#include "llvm/IR/PatternMatch.h"
86#include "llvm/IR/Statepoint.h"
87#include "llvm/IR/Type.h"
88#include "llvm/IR/User.h"
89#include "llvm/IR/Value.h"
90#include "llvm/MC/MCContext.h"
91#include "llvm/Support/AtomicOrdering.h"
92#include "llvm/Support/Casting.h"
93#include "llvm/Support/CommandLine.h"
94#include "llvm/Support/Compiler.h"
95#include "llvm/Support/Debug.h"
96#include "llvm/Support/MathExtras.h"
97#include "llvm/Support/raw_ostream.h"
98#include "llvm/Target/TargetIntrinsicInfo.h"
99#include "llvm/Target/TargetMachine.h"
100#include "llvm/Target/TargetOptions.h"
101#include "llvm/TargetParser/Triple.h"
102#include "llvm/Transforms/Utils/Local.h"
103#include <cstddef>
104#include <iterator>
105#include <limits>
106#include <optional>
107#include <tuple>
108
109using namespace llvm;
110using namespace PatternMatch;
111using namespace SwitchCG;
112
113#define DEBUG_TYPE "isel"
114
115/// LimitFloatPrecision - Generate low-precision inline sequences for
116/// some float libcalls (6, 8 or 12 bits).
117static unsigned LimitFloatPrecision;
118
119static cl::opt<bool>
120    InsertAssertAlign("insert-assert-align", cl::init(true),
121                      cl::desc("Insert the experimental `assertalign` node."),
122                      cl::ReallyHidden);
123
124static cl::opt<unsigned, true>
125    LimitFPPrecision("limit-float-precision",
126                     cl::desc("Generate low-precision inline sequences "
127                              "for some float libcalls"),
128                     cl::location(LimitFloatPrecision), cl::Hidden,
129                     cl::init(0));
130
131static cl::opt<unsigned> SwitchPeelThreshold(
132    "switch-peel-threshold", cl::Hidden, cl::init(66),
133    cl::desc("Set the case probability threshold for peeling the case from a "
134             "switch statement. A value greater than 100 will void this "
135             "optimization"));
136
137// Limit the width of DAG chains. This is important in general to prevent
138// DAG-based analysis from blowing up. For example, alias analysis and
139// load clustering may not complete in reasonable time. It is difficult to
140// recognize and avoid this situation within each individual analysis, and
141// future analyses are likely to have the same behavior. Limiting DAG width is
142// the safe approach and will be especially important with global DAGs.
143//
144// MaxParallelChains default is arbitrarily high to avoid affecting
145// optimization, but could be lowered to improve compile time. Any ld-ld-st-st
146// sequence over this should have been converted to llvm.memcpy by the
147// frontend. It is easy to induce this behavior with .ll code such as:
148// %buffer = alloca [4096 x i8]
149// %data = load [4096 x i8]* %argPtr
150// store [4096 x i8] %data, [4096 x i8]* %buffer
151static const unsigned MaxParallelChains = 64;
152
153static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
154                                      const SDValue *Parts, unsigned NumParts,
155                                      MVT PartVT, EVT ValueVT, const Value *V,
156                                      SDValue InChain,
157                                      std::optional<CallingConv::ID> CC);
158
159/// getCopyFromParts - Create a value that contains the specified legal parts
160/// combined into the value they represent.  If the parts combine to a type
161/// larger than ValueVT then AssertOp can be used to specify whether the extra
162/// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT
163/// (ISD::AssertSext).
164static SDValue
165getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts,
166                 unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V,
167                 SDValue InChain,
168                 std::optional<CallingConv::ID> CC = std::nullopt,
169                 std::optional<ISD::NodeType> AssertOp = std::nullopt) {
170  // Let the target assemble the parts if it wants to
171  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
172  if (SDValue Val = TLI.joinRegisterPartsIntoValue(DAG, DL, Parts, NumParts,
173                                                   PartVT, ValueVT, CC))
174    return Val;
175
176  if (ValueVT.isVector())
177    return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V,
178                                  InChain, CC);
179
180  assert(NumParts > 0 && "No parts to assemble!");
181  SDValue Val = Parts[0];
182
183  if (NumParts > 1) {
184    // Assemble the value from multiple parts.
185    if (ValueVT.isInteger()) {
186      unsigned PartBits = PartVT.getSizeInBits();
187      unsigned ValueBits = ValueVT.getSizeInBits();
188
189      // Assemble the power of 2 part.
190      unsigned RoundParts = llvm::bit_floor(NumParts);
191      unsigned RoundBits = PartBits * RoundParts;
192      EVT RoundVT = RoundBits == ValueBits ?
193        ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits);
194      SDValue Lo, Hi;
195
196      EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2);
197
198      if (RoundParts > 2) {
199        Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V,
200                              InChain);
201        Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2,
202                              PartVT, HalfVT, V, InChain);
203      } else {
204        Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]);
205        Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]);
206      }
207
208      if (DAG.getDataLayout().isBigEndian())
209        std::swap(Lo, Hi);
210
211      Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi);
212
213      if (RoundParts < NumParts) {
214        // Assemble the trailing non-power-of-2 part.
215        unsigned OddParts = NumParts - RoundParts;
216        EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits);
217        Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT,
218                              OddVT, V, InChain, CC);
219
220        // Combine the round and odd parts.
221        Lo = Val;
222        if (DAG.getDataLayout().isBigEndian())
223          std::swap(Lo, Hi);
224        EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
225        Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi);
226        Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi,
227                         DAG.getConstant(Lo.getValueSizeInBits(), DL,
228                                         TLI.getShiftAmountTy(
229                                             TotalVT, DAG.getDataLayout())));
230        Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo);
231        Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi);
232      }
233    } else if (PartVT.isFloatingPoint()) {
234      // FP split into multiple FP parts (for ppcf128)
235      assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 &&
236             "Unexpected split");
237      SDValue Lo, Hi;
238      Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]);
239      Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]);
240      if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout()))
241        std::swap(Lo, Hi);
242      Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi);
243    } else {
244      // FP split into integer parts (soft fp)
245      assert(ValueVT.isFloatingPoint() && PartVT.isInteger() &&
246             !PartVT.isVector() && "Unexpected split");
247      EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits());
248      Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V,
249                             InChain, CC);
250    }
251  }
252
253  // There is now one part, held in Val.  Correct it to match ValueVT.
254  // PartEVT is the type of the register class that holds the value.
255  // ValueVT is the type of the inline asm operation.
256  EVT PartEVT = Val.getValueType();
257
258  if (PartEVT == ValueVT)
259    return Val;
260
261  if (PartEVT.isInteger() && ValueVT.isFloatingPoint() &&
262      ValueVT.bitsLT(PartEVT)) {
263    // For an FP value in an integer part, we need to truncate to the right
264    // width first.
265    PartEVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
266    Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val);
267  }
268
269  // Handle types that have the same size.
270  if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits())
271    return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
272
273  // Handle types with different sizes.
274  if (PartEVT.isInteger() && ValueVT.isInteger()) {
275    if (ValueVT.bitsLT(PartEVT)) {
276      // For a truncate, see if we have any information to
277      // indicate whether the truncated bits will always be
278      // zero or sign-extension.
279      if (AssertOp)
280        Val = DAG.getNode(*AssertOp, DL, PartEVT, Val,
281                          DAG.getValueType(ValueVT));
282      return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
283    }
284    return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
285  }
286
287  if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
288    // FP_ROUND's are always exact here.
289    if (ValueVT.bitsLT(Val.getValueType())) {
290
291      SDValue NoChange =
292          DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
293
294      if (DAG.getMachineFunction().getFunction().getAttributes().hasFnAttr(
295              llvm::Attribute::StrictFP)) {
296        return DAG.getNode(ISD::STRICT_FP_ROUND, DL,
297                           DAG.getVTList(ValueVT, MVT::Other), InChain, Val,
298                           NoChange);
299      }
300
301      return DAG.getNode(ISD::FP_ROUND, DL, ValueVT, Val, NoChange);
302    }
303
304    return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val);
305  }
306
307  // Handle MMX to a narrower integer type by bitcasting MMX to integer and
308  // then truncating.
309  if (PartEVT == MVT::x86mmx && ValueVT.isInteger() &&
310      ValueVT.bitsLT(PartEVT)) {
311    Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val);
312    return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
313  }
314
315  report_fatal_error("Unknown mismatch in getCopyFromParts!");
316}
317
318static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V,
319                                              const Twine &ErrMsg) {
320  const Instruction *I = dyn_cast_or_null<Instruction>(V);
321  if (!V)
322    return Ctx.emitError(ErrMsg);
323
324  const char *AsmError = ", possible invalid constraint for vector type";
325  if (const CallInst *CI = dyn_cast<CallInst>(I))
326    if (CI->isInlineAsm())
327      return Ctx.emitError(I, ErrMsg + AsmError);
328
329  return Ctx.emitError(I, ErrMsg);
330}
331
332/// getCopyFromPartsVector - Create a value that contains the specified legal
333/// parts combined into the value they represent.  If the parts combine to a
334/// type larger than ValueVT then AssertOp can be used to specify whether the
335/// extra bits are known to be zero (ISD::AssertZext) or sign extended from
336/// ValueVT (ISD::AssertSext).
337static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL,
338                                      const SDValue *Parts, unsigned NumParts,
339                                      MVT PartVT, EVT ValueVT, const Value *V,
340                                      SDValue InChain,
341                                      std::optional<CallingConv::ID> CallConv) {
342  assert(ValueVT.isVector() && "Not a vector value");
343  assert(NumParts > 0 && "No parts to assemble!");
344  const bool IsABIRegCopy = CallConv.has_value();
345
346  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
347  SDValue Val = Parts[0];
348
349  // Handle a multi-element vector.
350  if (NumParts > 1) {
351    EVT IntermediateVT;
352    MVT RegisterVT;
353    unsigned NumIntermediates;
354    unsigned NumRegs;
355
356    if (IsABIRegCopy) {
357      NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
358          *DAG.getContext(), *CallConv, ValueVT, IntermediateVT,
359          NumIntermediates, RegisterVT);
360    } else {
361      NumRegs =
362          TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
363                                     NumIntermediates, RegisterVT);
364    }
365
366    assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
367    NumParts = NumRegs; // Silence a compiler warning.
368    assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
369    assert(RegisterVT.getSizeInBits() ==
370           Parts[0].getSimpleValueType().getSizeInBits() &&
371           "Part type sizes don't match!");
372
373    // Assemble the parts into intermediate operands.
374    SmallVector<SDValue, 8> Ops(NumIntermediates);
375    if (NumIntermediates == NumParts) {
376      // If the register was not expanded, truncate or copy the value,
377      // as appropriate.
378      for (unsigned i = 0; i != NumParts; ++i)
379        Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT,
380                                  V, InChain, CallConv);
381    } else if (NumParts > 0) {
382      // If the intermediate type was expanded, build the intermediate
383      // operands from the parts.
384      assert(NumParts % NumIntermediates == 0 &&
385             "Must expand into a divisible number of parts!");
386      unsigned Factor = NumParts / NumIntermediates;
387      for (unsigned i = 0; i != NumIntermediates; ++i)
388        Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT,
389                                  IntermediateVT, V, InChain, CallConv);
390    }
391
392    // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the
393    // intermediate operands.
394    EVT BuiltVectorTy =
395        IntermediateVT.isVector()
396            ? EVT::getVectorVT(
397                  *DAG.getContext(), IntermediateVT.getScalarType(),
398                  IntermediateVT.getVectorElementCount() * NumParts)
399            : EVT::getVectorVT(*DAG.getContext(),
400                               IntermediateVT.getScalarType(),
401                               NumIntermediates);
402    Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS
403                                                : ISD::BUILD_VECTOR,
404                      DL, BuiltVectorTy, Ops);
405  }
406
407  // There is now one part, held in Val.  Correct it to match ValueVT.
408  EVT PartEVT = Val.getValueType();
409
410  if (PartEVT == ValueVT)
411    return Val;
412
413  if (PartEVT.isVector()) {
414    // Vector/Vector bitcast.
415    if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
416      return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
417
418    // If the parts vector has more elements than the value vector, then we
419    // have a vector widening case (e.g. <2 x float> -> <4 x float>).
420    // Extract the elements we want.
421    if (PartEVT.getVectorElementCount() != ValueVT.getVectorElementCount()) {
422      assert((PartEVT.getVectorElementCount().getKnownMinValue() >
423              ValueVT.getVectorElementCount().getKnownMinValue()) &&
424             (PartEVT.getVectorElementCount().isScalable() ==
425              ValueVT.getVectorElementCount().isScalable()) &&
426             "Cannot narrow, it would be a lossy transformation");
427      PartEVT =
428          EVT::getVectorVT(*DAG.getContext(), PartEVT.getVectorElementType(),
429                           ValueVT.getVectorElementCount());
430      Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, PartEVT, Val,
431                        DAG.getVectorIdxConstant(0, DL));
432      if (PartEVT == ValueVT)
433        return Val;
434      if (PartEVT.isInteger() && ValueVT.isFloatingPoint())
435        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
436
437      // Vector/Vector bitcast (e.g. <2 x bfloat> -> <2 x half>).
438      if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits())
439        return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
440    }
441
442    // Promoted vector extract
443    return DAG.getAnyExtOrTrunc(Val, DL, ValueVT);
444  }
445
446  // Trivial bitcast if the types are the same size and the destination
447  // vector type is legal.
448  if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() &&
449      TLI.isTypeLegal(ValueVT))
450    return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
451
452  if (ValueVT.getVectorNumElements() != 1) {
453     // Certain ABIs require that vectors are passed as integers. For vectors
454     // are the same size, this is an obvious bitcast.
455     if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) {
456       return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
457     } else if (ValueVT.bitsLT(PartEVT)) {
458       const uint64_t ValueSize = ValueVT.getFixedSizeInBits();
459       EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
460       // Drop the extra bits.
461       Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
462       return DAG.getBitcast(ValueVT, Val);
463     }
464
465     diagnosePossiblyInvalidConstraint(
466         *DAG.getContext(), V, "non-trivial scalar-to-vector conversion");
467     return DAG.getUNDEF(ValueVT);
468  }
469
470  // Handle cases such as i8 -> <1 x i1>
471  EVT ValueSVT = ValueVT.getVectorElementType();
472  if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) {
473    unsigned ValueSize = ValueSVT.getSizeInBits();
474    if (ValueSize == PartEVT.getSizeInBits()) {
475      Val = DAG.getNode(ISD::BITCAST, DL, ValueSVT, Val);
476    } else if (ValueSVT.isFloatingPoint() && PartEVT.isInteger()) {
477      // It's possible a scalar floating point type gets softened to integer and
478      // then promoted to a larger integer. If PartEVT is the larger integer
479      // we need to truncate it and then bitcast to the FP type.
480      assert(ValueSVT.bitsLT(PartEVT) && "Unexpected types");
481      EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
482      Val = DAG.getNode(ISD::TRUNCATE, DL, IntermediateType, Val);
483      Val = DAG.getBitcast(ValueSVT, Val);
484    } else {
485      Val = ValueVT.isFloatingPoint()
486                ? DAG.getFPExtendOrRound(Val, DL, ValueSVT)
487                : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT);
488    }
489  }
490
491  return DAG.getBuildVector(ValueVT, DL, Val);
492}
493
494static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl,
495                                 SDValue Val, SDValue *Parts, unsigned NumParts,
496                                 MVT PartVT, const Value *V,
497                                 std::optional<CallingConv::ID> CallConv);
498
499/// getCopyToParts - Create a series of nodes that contain the specified value
500/// split into legal parts.  If the parts contain more bits than Val, then, for
501/// integers, ExtendKind can be used to specify how to generate the extra bits.
502static void
503getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
504               unsigned NumParts, MVT PartVT, const Value *V,
505               std::optional<CallingConv::ID> CallConv = std::nullopt,
506               ISD::NodeType ExtendKind = ISD::ANY_EXTEND) {
507  // Let the target split the parts if it wants to
508  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
509  if (TLI.splitValueIntoRegisterParts(DAG, DL, Val, Parts, NumParts, PartVT,
510                                      CallConv))
511    return;
512  EVT ValueVT = Val.getValueType();
513
514  // Handle the vector case separately.
515  if (ValueVT.isVector())
516    return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V,
517                                CallConv);
518
519  unsigned OrigNumParts = NumParts;
520  assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
521         "Copying to an illegal type!");
522
523  if (NumParts == 0)
524    return;
525
526  assert(!ValueVT.isVector() && "Vector case handled elsewhere");
527  EVT PartEVT = PartVT;
528  if (PartEVT == ValueVT) {
529    assert(NumParts == 1 && "No-op copy with multiple parts!");
530    Parts[0] = Val;
531    return;
532  }
533
534  unsigned PartBits = PartVT.getSizeInBits();
535  if (NumParts * PartBits > ValueVT.getSizeInBits()) {
536    // If the parts cover more bits than the value has, promote the value.
537    if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) {
538      assert(NumParts == 1 && "Do not know what to promote to!");
539      Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val);
540    } else {
541      if (ValueVT.isFloatingPoint()) {
542        // FP values need to be bitcast, then extended if they are being put
543        // into a larger container.
544        ValueVT = EVT::getIntegerVT(*DAG.getContext(),  ValueVT.getSizeInBits());
545        Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
546      }
547      assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
548             ValueVT.isInteger() &&
549             "Unknown mismatch!");
550      ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
551      Val = DAG.getNode(ExtendKind, DL, ValueVT, Val);
552      if (PartVT == MVT::x86mmx)
553        Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
554    }
555  } else if (PartBits == ValueVT.getSizeInBits()) {
556    // Different types of the same size.
557    assert(NumParts == 1 && PartEVT != ValueVT);
558    Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
559  } else if (NumParts * PartBits < ValueVT.getSizeInBits()) {
560    // If the parts cover less bits than value has, truncate the value.
561    assert((PartVT.isInteger() || PartVT == MVT::x86mmx) &&
562           ValueVT.isInteger() &&
563           "Unknown mismatch!");
564    ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
565    Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
566    if (PartVT == MVT::x86mmx)
567      Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
568  }
569
570  // The value may have changed - recompute ValueVT.
571  ValueVT = Val.getValueType();
572  assert(NumParts * PartBits == ValueVT.getSizeInBits() &&
573         "Failed to tile the value with PartVT!");
574
575  if (NumParts == 1) {
576    if (PartEVT != ValueVT) {
577      diagnosePossiblyInvalidConstraint(*DAG.getContext(), V,
578                                        "scalar-to-vector conversion failed");
579      Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
580    }
581
582    Parts[0] = Val;
583    return;
584  }
585
586  // Expand the value into multiple parts.
587  if (NumParts & (NumParts - 1)) {
588    // The number of parts is not a power of 2.  Split off and copy the tail.
589    assert(PartVT.isInteger() && ValueVT.isInteger() &&
590           "Do not know what to expand to!");
591    unsigned RoundParts = llvm::bit_floor(NumParts);
592    unsigned RoundBits = RoundParts * PartBits;
593    unsigned OddParts = NumParts - RoundParts;
594    SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val,
595      DAG.getShiftAmountConstant(RoundBits, ValueVT, DL));
596
597    getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V,
598                   CallConv);
599
600    if (DAG.getDataLayout().isBigEndian())
601      // The odd parts were reversed by getCopyToParts - unreverse them.
602      std::reverse(Parts + RoundParts, Parts + NumParts);
603
604    NumParts = RoundParts;
605    ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits);
606    Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val);
607  }
608
609  // The number of parts is a power of 2.  Repeatedly bisect the value using
610  // EXTRACT_ELEMENT.
611  Parts[0] = DAG.getNode(ISD::BITCAST, DL,
612                         EVT::getIntegerVT(*DAG.getContext(),
613                                           ValueVT.getSizeInBits()),
614                         Val);
615
616  for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) {
617    for (unsigned i = 0; i < NumParts; i += StepSize) {
618      unsigned ThisBits = StepSize * PartBits / 2;
619      EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits);
620      SDValue &Part0 = Parts[i];
621      SDValue &Part1 = Parts[i+StepSize/2];
622
623      Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
624                          ThisVT, Part0, DAG.getIntPtrConstant(1, DL));
625      Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL,
626                          ThisVT, Part0, DAG.getIntPtrConstant(0, DL));
627
628      if (ThisBits == PartBits && ThisVT != PartVT) {
629        Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0);
630        Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1);
631      }
632    }
633  }
634
635  if (DAG.getDataLayout().isBigEndian())
636    std::reverse(Parts, Parts + OrigNumParts);
637}
638
639static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val,
640                                     const SDLoc &DL, EVT PartVT) {
641  if (!PartVT.isVector())
642    return SDValue();
643
644  EVT ValueVT = Val.getValueType();
645  EVT PartEVT = PartVT.getVectorElementType();
646  EVT ValueEVT = ValueVT.getVectorElementType();
647  ElementCount PartNumElts = PartVT.getVectorElementCount();
648  ElementCount ValueNumElts = ValueVT.getVectorElementCount();
649
650  // We only support widening vectors with equivalent element types and
651  // fixed/scalable properties. If a target needs to widen a fixed-length type
652  // to a scalable one, it should be possible to use INSERT_SUBVECTOR below.
653  if (ElementCount::isKnownLE(PartNumElts, ValueNumElts) ||
654      PartNumElts.isScalable() != ValueNumElts.isScalable())
655    return SDValue();
656
657  // Have a try for bf16 because some targets share its ABI with fp16.
658  if (ValueEVT == MVT::bf16 && PartEVT == MVT::f16) {
659    assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) &&
660           "Cannot widen to illegal type");
661    Val = DAG.getNode(ISD::BITCAST, DL,
662                      ValueVT.changeVectorElementType(MVT::f16), Val);
663  } else if (PartEVT != ValueEVT) {
664    return SDValue();
665  }
666
667  // Widening a scalable vector to another scalable vector is done by inserting
668  // the vector into a larger undef one.
669  if (PartNumElts.isScalable())
670    return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
671                       Val, DAG.getVectorIdxConstant(0, DL));
672
673  // Vector widening case, e.g. <2 x float> -> <4 x float>.  Shuffle in
674  // undef elements.
675  SmallVector<SDValue, 16> Ops;
676  DAG.ExtractVectorElements(Val, Ops);
677  SDValue EltUndef = DAG.getUNDEF(PartEVT);
678  Ops.append((PartNumElts - ValueNumElts).getFixedValue(), EltUndef);
679
680  // FIXME: Use CONCAT for 2x -> 4x.
681  return DAG.getBuildVector(PartVT, DL, Ops);
682}
683
684/// getCopyToPartsVector - Create a series of nodes that contain the specified
685/// value split into legal parts.
686static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL,
687                                 SDValue Val, SDValue *Parts, unsigned NumParts,
688                                 MVT PartVT, const Value *V,
689                                 std::optional<CallingConv::ID> CallConv) {
690  EVT ValueVT = Val.getValueType();
691  assert(ValueVT.isVector() && "Not a vector");
692  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
693  const bool IsABIRegCopy = CallConv.has_value();
694
695  if (NumParts == 1) {
696    EVT PartEVT = PartVT;
697    if (PartEVT == ValueVT) {
698      // Nothing to do.
699    } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) {
700      // Bitconvert vector->vector case.
701      Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
702    } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) {
703      Val = Widened;
704    } else if (PartVT.isVector() &&
705               PartEVT.getVectorElementType().bitsGE(
706                   ValueVT.getVectorElementType()) &&
707               PartEVT.getVectorElementCount() ==
708                   ValueVT.getVectorElementCount()) {
709
710      // Promoted vector extract
711      Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
712    } else if (PartEVT.isVector() &&
713               PartEVT.getVectorElementType() !=
714                   ValueVT.getVectorElementType() &&
715               TLI.getTypeAction(*DAG.getContext(), ValueVT) ==
716                   TargetLowering::TypeWidenVector) {
717      // Combination of widening and promotion.
718      EVT WidenVT =
719          EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(),
720                           PartVT.getVectorElementCount());
721      SDValue Widened = widenVectorToPartType(DAG, Val, DL, WidenVT);
722      Val = DAG.getAnyExtOrTrunc(Widened, DL, PartVT);
723    } else {
724      // Don't extract an integer from a float vector. This can happen if the
725      // FP type gets softened to integer and then promoted. The promotion
726      // prevents it from being picked up by the earlier bitcast case.
727      if (ValueVT.getVectorElementCount().isScalar() &&
728          (!ValueVT.isFloatingPoint() || !PartVT.isInteger())) {
729        Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val,
730                          DAG.getVectorIdxConstant(0, DL));
731      } else {
732        uint64_t ValueSize = ValueVT.getFixedSizeInBits();
733        assert(PartVT.getFixedSizeInBits() > ValueSize &&
734               "lossy conversion of vector to scalar type");
735        EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueSize);
736        Val = DAG.getBitcast(IntermediateType, Val);
737        Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT);
738      }
739    }
740
741    assert(Val.getValueType() == PartVT && "Unexpected vector part value type");
742    Parts[0] = Val;
743    return;
744  }
745
746  // Handle a multi-element vector.
747  EVT IntermediateVT;
748  MVT RegisterVT;
749  unsigned NumIntermediates;
750  unsigned NumRegs;
751  if (IsABIRegCopy) {
752    NumRegs = TLI.getVectorTypeBreakdownForCallingConv(
753        *DAG.getContext(), *CallConv, ValueVT, IntermediateVT, NumIntermediates,
754        RegisterVT);
755  } else {
756    NumRegs =
757        TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT,
758                                   NumIntermediates, RegisterVT);
759  }
760
761  assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!");
762  NumParts = NumRegs; // Silence a compiler warning.
763  assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!");
764
765  assert(IntermediateVT.isScalableVector() == ValueVT.isScalableVector() &&
766         "Mixing scalable and fixed vectors when copying in parts");
767
768  std::optional<ElementCount> DestEltCnt;
769
770  if (IntermediateVT.isVector())
771    DestEltCnt = IntermediateVT.getVectorElementCount() * NumIntermediates;
772  else
773    DestEltCnt = ElementCount::getFixed(NumIntermediates);
774
775  EVT BuiltVectorTy = EVT::getVectorVT(
776      *DAG.getContext(), IntermediateVT.getScalarType(), *DestEltCnt);
777
778  if (ValueVT == BuiltVectorTy) {
779    // Nothing to do.
780  } else if (ValueVT.getSizeInBits() == BuiltVectorTy.getSizeInBits()) {
781    // Bitconvert vector->vector case.
782    Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val);
783  } else {
784    if (BuiltVectorTy.getVectorElementType().bitsGT(
785            ValueVT.getVectorElementType())) {
786      // Integer promotion.
787      ValueVT = EVT::getVectorVT(*DAG.getContext(),
788                                 BuiltVectorTy.getVectorElementType(),
789                                 ValueVT.getVectorElementCount());
790      Val = DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val);
791    }
792
793    if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) {
794      Val = Widened;
795    }
796  }
797
798  assert(Val.getValueType() == BuiltVectorTy && "Unexpected vector value type");
799
800  // Split the vector into intermediate operands.
801  SmallVector<SDValue, 8> Ops(NumIntermediates);
802  for (unsigned i = 0; i != NumIntermediates; ++i) {
803    if (IntermediateVT.isVector()) {
804      // This does something sensible for scalable vectors - see the
805      // definition of EXTRACT_SUBVECTOR for further details.
806      unsigned IntermediateNumElts = IntermediateVT.getVectorMinNumElements();
807      Ops[i] =
808          DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val,
809                      DAG.getVectorIdxConstant(i * IntermediateNumElts, DL));
810    } else {
811      Ops[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val,
812                           DAG.getVectorIdxConstant(i, DL));
813    }
814  }
815
816  // Split the intermediate operands into legal parts.
817  if (NumParts == NumIntermediates) {
818    // If the register was not expanded, promote or copy the value,
819    // as appropriate.
820    for (unsigned i = 0; i != NumParts; ++i)
821      getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv);
822  } else if (NumParts > 0) {
823    // If the intermediate type was expanded, split each the value into
824    // legal parts.
825    assert(NumIntermediates != 0 && "division by zero");
826    assert(NumParts % NumIntermediates == 0 &&
827           "Must expand into a divisible number of parts!");
828    unsigned Factor = NumParts / NumIntermediates;
829    for (unsigned i = 0; i != NumIntermediates; ++i)
830      getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V,
831                     CallConv);
832  }
833}
834
835RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt,
836                           EVT valuevt, std::optional<CallingConv::ID> CC)
837    : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs),
838      RegCount(1, regs.size()), CallConv(CC) {}
839
840RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI,
841                           const DataLayout &DL, unsigned Reg, Type *Ty,
842                           std::optional<CallingConv::ID> CC) {
843  ComputeValueVTs(TLI, DL, Ty, ValueVTs);
844
845  CallConv = CC;
846
847  for (EVT ValueVT : ValueVTs) {
848    unsigned NumRegs =
849        isABIMangled()
850            ? TLI.getNumRegistersForCallingConv(Context, *CC, ValueVT)
851            : TLI.getNumRegisters(Context, ValueVT);
852    MVT RegisterVT =
853        isABIMangled()
854            ? TLI.getRegisterTypeForCallingConv(Context, *CC, ValueVT)
855            : TLI.getRegisterType(Context, ValueVT);
856    for (unsigned i = 0; i != NumRegs; ++i)
857      Regs.push_back(Reg + i);
858    RegVTs.push_back(RegisterVT);
859    RegCount.push_back(NumRegs);
860    Reg += NumRegs;
861  }
862}
863
864SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG,
865                                      FunctionLoweringInfo &FuncInfo,
866                                      const SDLoc &dl, SDValue &Chain,
867                                      SDValue *Glue, const Value *V) const {
868  // A Value with type {} or [0 x %t] needs no registers.
869  if (ValueVTs.empty())
870    return SDValue();
871
872  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
873
874  // Assemble the legal parts into the final values.
875  SmallVector<SDValue, 4> Values(ValueVTs.size());
876  SmallVector<SDValue, 8> Parts;
877  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
878    // Copy the legal parts from the registers.
879    EVT ValueVT = ValueVTs[Value];
880    unsigned NumRegs = RegCount[Value];
881    MVT RegisterVT = isABIMangled()
882                         ? TLI.getRegisterTypeForCallingConv(
883                               *DAG.getContext(), *CallConv, RegVTs[Value])
884                         : RegVTs[Value];
885
886    Parts.resize(NumRegs);
887    for (unsigned i = 0; i != NumRegs; ++i) {
888      SDValue P;
889      if (!Glue) {
890        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT);
891      } else {
892        P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Glue);
893        *Glue = P.getValue(2);
894      }
895
896      Chain = P.getValue(1);
897      Parts[i] = P;
898
899      // If the source register was virtual and if we know something about it,
900      // add an assert node.
901      if (!Register::isVirtualRegister(Regs[Part + i]) ||
902          !RegisterVT.isInteger())
903        continue;
904
905      const FunctionLoweringInfo::LiveOutInfo *LOI =
906        FuncInfo.GetLiveOutRegInfo(Regs[Part+i]);
907      if (!LOI)
908        continue;
909
910      unsigned RegSize = RegisterVT.getScalarSizeInBits();
911      unsigned NumSignBits = LOI->NumSignBits;
912      unsigned NumZeroBits = LOI->Known.countMinLeadingZeros();
913
914      if (NumZeroBits == RegSize) {
915        // The current value is a zero.
916        // Explicitly express that as it would be easier for
917        // optimizations to kick in.
918        Parts[i] = DAG.getConstant(0, dl, RegisterVT);
919        continue;
920      }
921
922      // FIXME: We capture more information than the dag can represent.  For
923      // now, just use the tightest assertzext/assertsext possible.
924      bool isSExt;
925      EVT FromVT(MVT::Other);
926      if (NumZeroBits) {
927        FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits);
928        isSExt = false;
929      } else if (NumSignBits > 1) {
930        FromVT =
931            EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1);
932        isSExt = true;
933      } else {
934        continue;
935      }
936      // Add an assertion node.
937      assert(FromVT != MVT::Other);
938      Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl,
939                             RegisterVT, P, DAG.getValueType(FromVT));
940    }
941
942    Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs,
943                                     RegisterVT, ValueVT, V, Chain, CallConv);
944    Part += NumRegs;
945    Parts.clear();
946  }
947
948  return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values);
949}
950
951void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG,
952                                 const SDLoc &dl, SDValue &Chain, SDValue *Glue,
953                                 const Value *V,
954                                 ISD::NodeType PreferredExtendType) const {
955  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
956  ISD::NodeType ExtendKind = PreferredExtendType;
957
958  // Get the list of the values's legal parts.
959  unsigned NumRegs = Regs.size();
960  SmallVector<SDValue, 8> Parts(NumRegs);
961  for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) {
962    unsigned NumParts = RegCount[Value];
963
964    MVT RegisterVT = isABIMangled()
965                         ? TLI.getRegisterTypeForCallingConv(
966                               *DAG.getContext(), *CallConv, RegVTs[Value])
967                         : RegVTs[Value];
968
969    if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT))
970      ExtendKind = ISD::ZERO_EXTEND;
971
972    getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part],
973                   NumParts, RegisterVT, V, CallConv, ExtendKind);
974    Part += NumParts;
975  }
976
977  // Copy the parts into the registers.
978  SmallVector<SDValue, 8> Chains(NumRegs);
979  for (unsigned i = 0; i != NumRegs; ++i) {
980    SDValue Part;
981    if (!Glue) {
982      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]);
983    } else {
984      Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Glue);
985      *Glue = Part.getValue(1);
986    }
987
988    Chains[i] = Part.getValue(0);
989  }
990
991  if (NumRegs == 1 || Glue)
992    // If NumRegs > 1 && Glue is used then the use of the last CopyToReg is
993    // flagged to it. That is the CopyToReg nodes and the user are considered
994    // a single scheduling unit. If we create a TokenFactor and return it as
995    // chain, then the TokenFactor is both a predecessor (operand) of the
996    // user as well as a successor (the TF operands are flagged to the user).
997    // c1, f1 = CopyToReg
998    // c2, f2 = CopyToReg
999    // c3     = TokenFactor c1, c2
1000    // ...
1001    //        = op c3, ..., f2
1002    Chain = Chains[NumRegs-1];
1003  else
1004    Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
1005}
1006
1007void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching,
1008                                        unsigned MatchingIdx, const SDLoc &dl,
1009                                        SelectionDAG &DAG,
1010                                        std::vector<SDValue> &Ops) const {
1011  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1012
1013  InlineAsm::Flag Flag(Code, Regs.size());
1014  if (HasMatching)
1015    Flag.setMatchingOp(MatchingIdx);
1016  else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) {
1017    // Put the register class of the virtual registers in the flag word.  That
1018    // way, later passes can recompute register class constraints for inline
1019    // assembly as well as normal instructions.
1020    // Don't do this for tied operands that can use the regclass information
1021    // from the def.
1022    const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
1023    const TargetRegisterClass *RC = MRI.getRegClass(Regs.front());
1024    Flag.setRegClass(RC->getID());
1025  }
1026
1027  SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32);
1028  Ops.push_back(Res);
1029
1030  if (Code == InlineAsm::Kind::Clobber) {
1031    // Clobbers should always have a 1:1 mapping with registers, and may
1032    // reference registers that have illegal (e.g. vector) types. Hence, we
1033    // shouldn't try to apply any sort of splitting logic to them.
1034    assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() &&
1035           "No 1:1 mapping from clobbers to regs?");
1036    Register SP = TLI.getStackPointerRegisterToSaveRestore();
1037    (void)SP;
1038    for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) {
1039      Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I]));
1040      assert(
1041          (Regs[I] != SP ||
1042           DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) &&
1043          "If we clobbered the stack pointer, MFI should know about it.");
1044    }
1045    return;
1046  }
1047
1048  for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) {
1049    MVT RegisterVT = RegVTs[Value];
1050    unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value],
1051                                           RegisterVT);
1052    for (unsigned i = 0; i != NumRegs; ++i) {
1053      assert(Reg < Regs.size() && "Mismatch in # registers expected");
1054      unsigned TheReg = Regs[Reg++];
1055      Ops.push_back(DAG.getRegister(TheReg, RegisterVT));
1056    }
1057  }
1058}
1059
1060SmallVector<std::pair<unsigned, TypeSize>, 4>
1061RegsForValue::getRegsAndSizes() const {
1062  SmallVector<std::pair<unsigned, TypeSize>, 4> OutVec;
1063  unsigned I = 0;
1064  for (auto CountAndVT : zip_first(RegCount, RegVTs)) {
1065    unsigned RegCount = std::get<0>(CountAndVT);
1066    MVT RegisterVT = std::get<1>(CountAndVT);
1067    TypeSize RegisterSize = RegisterVT.getSizeInBits();
1068    for (unsigned E = I + RegCount; I != E; ++I)
1069      OutVec.push_back(std::make_pair(Regs[I], RegisterSize));
1070  }
1071  return OutVec;
1072}
1073
1074void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa,
1075                               AssumptionCache *ac,
1076                               const TargetLibraryInfo *li) {
1077  AA = aa;
1078  AC = ac;
1079  GFI = gfi;
1080  LibInfo = li;
1081  Context = DAG.getContext();
1082  LPadToCallSiteMap.clear();
1083  SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout());
1084  AssignmentTrackingEnabled = isAssignmentTrackingEnabled(
1085      *DAG.getMachineFunction().getFunction().getParent());
1086}
1087
1088void SelectionDAGBuilder::clear() {
1089  NodeMap.clear();
1090  UnusedArgNodeMap.clear();
1091  PendingLoads.clear();
1092  PendingExports.clear();
1093  PendingConstrainedFP.clear();
1094  PendingConstrainedFPStrict.clear();
1095  CurInst = nullptr;
1096  HasTailCall = false;
1097  SDNodeOrder = LowestSDNodeOrder;
1098  StatepointLowering.clear();
1099}
1100
1101void SelectionDAGBuilder::clearDanglingDebugInfo() {
1102  DanglingDebugInfoMap.clear();
1103}
1104
1105// Update DAG root to include dependencies on Pending chains.
1106SDValue SelectionDAGBuilder::updateRoot(SmallVectorImpl<SDValue> &Pending) {
1107  SDValue Root = DAG.getRoot();
1108
1109  if (Pending.empty())
1110    return Root;
1111
1112  // Add current root to PendingChains, unless we already indirectly
1113  // depend on it.
1114  if (Root.getOpcode() != ISD::EntryToken) {
1115    unsigned i = 0, e = Pending.size();
1116    for (; i != e; ++i) {
1117      assert(Pending[i].getNode()->getNumOperands() > 1);
1118      if (Pending[i].getNode()->getOperand(0) == Root)
1119        break;  // Don't add the root if we already indirectly depend on it.
1120    }
1121
1122    if (i == e)
1123      Pending.push_back(Root);
1124  }
1125
1126  if (Pending.size() == 1)
1127    Root = Pending[0];
1128  else
1129    Root = DAG.getTokenFactor(getCurSDLoc(), Pending);
1130
1131  DAG.setRoot(Root);
1132  Pending.clear();
1133  return Root;
1134}
1135
1136SDValue SelectionDAGBuilder::getMemoryRoot() {
1137  return updateRoot(PendingLoads);
1138}
1139
1140SDValue SelectionDAGBuilder::getRoot() {
1141  // Chain up all pending constrained intrinsics together with all
1142  // pending loads, by simply appending them to PendingLoads and
1143  // then calling getMemoryRoot().
1144  PendingLoads.reserve(PendingLoads.size() +
1145                       PendingConstrainedFP.size() +
1146                       PendingConstrainedFPStrict.size());
1147  PendingLoads.append(PendingConstrainedFP.begin(),
1148                      PendingConstrainedFP.end());
1149  PendingLoads.append(PendingConstrainedFPStrict.begin(),
1150                      PendingConstrainedFPStrict.end());
1151  PendingConstrainedFP.clear();
1152  PendingConstrainedFPStrict.clear();
1153  return getMemoryRoot();
1154}
1155
1156SDValue SelectionDAGBuilder::getControlRoot() {
1157  // We need to emit pending fpexcept.strict constrained intrinsics,
1158  // so append them to the PendingExports list.
1159  PendingExports.append(PendingConstrainedFPStrict.begin(),
1160                        PendingConstrainedFPStrict.end());
1161  PendingConstrainedFPStrict.clear();
1162  return updateRoot(PendingExports);
1163}
1164
1165void SelectionDAGBuilder::handleDebugDeclare(Value *Address,
1166                                             DILocalVariable *Variable,
1167                                             DIExpression *Expression,
1168                                             DebugLoc DL) {
1169  assert(Variable && "Missing variable");
1170
1171  // Check if address has undef value.
1172  if (!Address || isa<UndefValue>(Address) ||
1173      (Address->use_empty() && !isa<Argument>(Address))) {
1174    LLVM_DEBUG(
1175        dbgs()
1176        << "dbg_declare: Dropping debug info (bad/undef/unused-arg address)\n");
1177    return;
1178  }
1179
1180  bool IsParameter = Variable->isParameter() || isa<Argument>(Address);
1181
1182  SDValue &N = NodeMap[Address];
1183  if (!N.getNode() && isa<Argument>(Address))
1184    // Check unused arguments map.
1185    N = UnusedArgNodeMap[Address];
1186  SDDbgValue *SDV;
1187  if (N.getNode()) {
1188    if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address))
1189      Address = BCI->getOperand(0);
1190    // Parameters are handled specially.
1191    auto *FINode = dyn_cast<FrameIndexSDNode>(N.getNode());
1192    if (IsParameter && FINode) {
1193      // Byval parameter. We have a frame index at this point.
1194      SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(),
1195                                      /*IsIndirect*/ true, DL, SDNodeOrder);
1196    } else if (isa<Argument>(Address)) {
1197      // Address is an argument, so try to emit its dbg value using
1198      // virtual register info from the FuncInfo.ValueMap.
1199      EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1200                               FuncArgumentDbgValueKind::Declare, N);
1201      return;
1202    } else {
1203      SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(),
1204                            true, DL, SDNodeOrder);
1205    }
1206    DAG.AddDbgValue(SDV, IsParameter);
1207  } else {
1208    // If Address is an argument then try to emit its dbg value using
1209    // virtual register info from the FuncInfo.ValueMap.
1210    if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, DL,
1211                                  FuncArgumentDbgValueKind::Declare, N)) {
1212      LLVM_DEBUG(dbgs() << "dbg_declare: Dropping debug info"
1213                        << " (could not emit func-arg dbg_value)\n");
1214    }
1215  }
1216  return;
1217}
1218
1219void SelectionDAGBuilder::visitDbgInfo(const Instruction &I) {
1220  // Add SDDbgValue nodes for any var locs here. Do so before updating
1221  // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1222  if (FunctionVarLocs const *FnVarLocs = DAG.getFunctionVarLocs()) {
1223    // Add SDDbgValue nodes for any var locs here. Do so before updating
1224    // SDNodeOrder, as this mapping is {Inst -> Locs BEFORE Inst}.
1225    for (auto It = FnVarLocs->locs_begin(&I), End = FnVarLocs->locs_end(&I);
1226         It != End; ++It) {
1227      auto *Var = FnVarLocs->getDILocalVariable(It->VariableID);
1228      dropDanglingDebugInfo(Var, It->Expr);
1229      if (It->Values.isKillLocation(It->Expr)) {
1230        handleKillDebugValue(Var, It->Expr, It->DL, SDNodeOrder);
1231        continue;
1232      }
1233      SmallVector<Value *> Values(It->Values.location_ops());
1234      if (!handleDebugValue(Values, Var, It->Expr, It->DL, SDNodeOrder,
1235                            It->Values.hasArgList())) {
1236        SmallVector<Value *, 4> Vals;
1237        for (Value *V : It->Values.location_ops())
1238          Vals.push_back(V);
1239        addDanglingDebugInfo(Vals,
1240                             FnVarLocs->getDILocalVariable(It->VariableID),
1241                             It->Expr, Vals.size() > 1, It->DL, SDNodeOrder);
1242      }
1243    }
1244    // We must early-exit here to prevent any DPValues from being emitted below,
1245    // as we have just emitted the debug values resulting from assignment
1246    // tracking analysis, making any existing DPValues redundant (and probably
1247    // less correct).
1248    return;
1249  }
1250
1251  // Is there is any debug-info attached to this instruction, in the form of
1252  // DPValue non-instruction debug-info records.
1253  for (DPValue &DPV : I.getDbgValueRange()) {
1254    DILocalVariable *Variable = DPV.getVariable();
1255    DIExpression *Expression = DPV.getExpression();
1256    dropDanglingDebugInfo(Variable, Expression);
1257
1258    if (DPV.getType() == DPValue::LocationType::Declare) {
1259      if (FuncInfo.PreprocessedDPVDeclares.contains(&DPV))
1260        continue;
1261      LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DPV
1262                        << "\n");
1263      handleDebugDeclare(DPV.getVariableLocationOp(0), Variable, Expression,
1264                         DPV.getDebugLoc());
1265      continue;
1266    }
1267
1268    // A DPValue with no locations is a kill location.
1269    SmallVector<Value *, 4> Values(DPV.location_ops());
1270    if (Values.empty()) {
1271      handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1272                           SDNodeOrder);
1273      continue;
1274    }
1275
1276    // A DPValue with an undef or absent location is also a kill location.
1277    if (llvm::any_of(Values,
1278                     [](Value *V) { return !V || isa<UndefValue>(V); })) {
1279      handleKillDebugValue(Variable, Expression, DPV.getDebugLoc(),
1280                           SDNodeOrder);
1281      continue;
1282    }
1283
1284    bool IsVariadic = DPV.hasArgList();
1285    if (!handleDebugValue(Values, Variable, Expression, DPV.getDebugLoc(),
1286                          SDNodeOrder, IsVariadic)) {
1287      addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
1288                           DPV.getDebugLoc(), SDNodeOrder);
1289    }
1290  }
1291}
1292
1293void SelectionDAGBuilder::visit(const Instruction &I) {
1294  visitDbgInfo(I);
1295
1296  // Set up outgoing PHI node register values before emitting the terminator.
1297  if (I.isTerminator()) {
1298    HandlePHINodesInSuccessorBlocks(I.getParent());
1299  }
1300
1301  // Increase the SDNodeOrder if dealing with a non-debug instruction.
1302  if (!isa<DbgInfoIntrinsic>(I))
1303    ++SDNodeOrder;
1304
1305  CurInst = &I;
1306
1307  // Set inserted listener only if required.
1308  bool NodeInserted = false;
1309  std::unique_ptr<SelectionDAG::DAGNodeInsertedListener> InsertedListener;
1310  MDNode *PCSectionsMD = I.getMetadata(LLVMContext::MD_pcsections);
1311  if (PCSectionsMD) {
1312    InsertedListener = std::make_unique<SelectionDAG::DAGNodeInsertedListener>(
1313        DAG, [&](SDNode *) { NodeInserted = true; });
1314  }
1315
1316  visit(I.getOpcode(), I);
1317
1318  if (!I.isTerminator() && !HasTailCall &&
1319      !isa<GCStatepointInst>(I)) // statepoints handle their exports internally
1320    CopyToExportRegsIfNeeded(&I);
1321
1322  // Handle metadata.
1323  if (PCSectionsMD) {
1324    auto It = NodeMap.find(&I);
1325    if (It != NodeMap.end()) {
1326      DAG.addPCSections(It->second.getNode(), PCSectionsMD);
1327    } else if (NodeInserted) {
1328      // This should not happen; if it does, don't let it go unnoticed so we can
1329      // fix it. Relevant visit*() function is probably missing a setValue().
1330      errs() << "warning: loosing !pcsections metadata ["
1331             << I.getModule()->getName() << "]\n";
1332      LLVM_DEBUG(I.dump());
1333      assert(false);
1334    }
1335  }
1336
1337  CurInst = nullptr;
1338}
1339
1340void SelectionDAGBuilder::visitPHI(const PHINode &) {
1341  llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!");
1342}
1343
1344void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) {
1345  // Note: this doesn't use InstVisitor, because it has to work with
1346  // ConstantExpr's in addition to instructions.
1347  switch (Opcode) {
1348  default: llvm_unreachable("Unknown instruction type encountered!");
1349    // Build the switch statement using the Instruction.def file.
1350#define HANDLE_INST(NUM, OPCODE, CLASS) \
1351    case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break;
1352#include "llvm/IR/Instruction.def"
1353  }
1354}
1355
1356static bool handleDanglingVariadicDebugInfo(SelectionDAG &DAG,
1357                                            DILocalVariable *Variable,
1358                                            DebugLoc DL, unsigned Order,
1359                                            SmallVectorImpl<Value *> &Values,
1360                                            DIExpression *Expression) {
1361  // For variadic dbg_values we will now insert an undef.
1362  // FIXME: We can potentially recover these!
1363  SmallVector<SDDbgOperand, 2> Locs;
1364  for (const Value *V : Values) {
1365    auto *Undef = UndefValue::get(V->getType());
1366    Locs.push_back(SDDbgOperand::fromConst(Undef));
1367  }
1368  SDDbgValue *SDV = DAG.getDbgValueList(Variable, Expression, Locs, {},
1369                                        /*IsIndirect=*/false, DL, Order,
1370                                        /*IsVariadic=*/true);
1371  DAG.AddDbgValue(SDV, /*isParameter=*/false);
1372  return true;
1373}
1374
1375void SelectionDAGBuilder::addDanglingDebugInfo(SmallVectorImpl<Value *> &Values,
1376                                               DILocalVariable *Var,
1377                                               DIExpression *Expr,
1378                                               bool IsVariadic, DebugLoc DL,
1379                                               unsigned Order) {
1380  if (IsVariadic) {
1381    handleDanglingVariadicDebugInfo(DAG, Var, DL, Order, Values, Expr);
1382    return;
1383  }
1384  // TODO: Dangling debug info will eventually either be resolved or produce
1385  // an Undef DBG_VALUE. However in the resolution case, a gap may appear
1386  // between the original dbg.value location and its resolved DBG_VALUE,
1387  // which we should ideally fill with an extra Undef DBG_VALUE.
1388  assert(Values.size() == 1);
1389  DanglingDebugInfoMap[Values[0]].emplace_back(Var, Expr, DL, Order);
1390}
1391
1392void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable,
1393                                                const DIExpression *Expr) {
1394  auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) {
1395    DIVariable *DanglingVariable = DDI.getVariable();
1396    DIExpression *DanglingExpr = DDI.getExpression();
1397    if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) {
1398      LLVM_DEBUG(dbgs() << "Dropping dangling debug info for "
1399                        << printDDI(nullptr, DDI) << "\n");
1400      return true;
1401    }
1402    return false;
1403  };
1404
1405  for (auto &DDIMI : DanglingDebugInfoMap) {
1406    DanglingDebugInfoVector &DDIV = DDIMI.second;
1407
1408    // If debug info is to be dropped, run it through final checks to see
1409    // whether it can be salvaged.
1410    for (auto &DDI : DDIV)
1411      if (isMatchingDbgValue(DDI))
1412        salvageUnresolvedDbgValue(DDIMI.first, DDI);
1413
1414    erase_if(DDIV, isMatchingDbgValue);
1415  }
1416}
1417
1418// resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V,
1419// generate the debug data structures now that we've seen its definition.
1420void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V,
1421                                                   SDValue Val) {
1422  auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V);
1423  if (DanglingDbgInfoIt == DanglingDebugInfoMap.end())
1424    return;
1425
1426  DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second;
1427  for (auto &DDI : DDIV) {
1428    DebugLoc DL = DDI.getDebugLoc();
1429    unsigned ValSDNodeOrder = Val.getNode()->getIROrder();
1430    unsigned DbgSDNodeOrder = DDI.getSDNodeOrder();
1431    DILocalVariable *Variable = DDI.getVariable();
1432    DIExpression *Expr = DDI.getExpression();
1433    assert(Variable->isValidLocationForIntrinsic(DL) &&
1434           "Expected inlined-at fields to agree");
1435    SDDbgValue *SDV;
1436    if (Val.getNode()) {
1437      // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a
1438      // FuncArgumentDbgValue (it would be hoisted to the function entry, and if
1439      // we couldn't resolve it directly when examining the DbgValue intrinsic
1440      // in the first place we should not be more successful here). Unless we
1441      // have some test case that prove this to be correct we should avoid
1442      // calling EmitFuncArgumentDbgValue here.
1443      if (!EmitFuncArgumentDbgValue(V, Variable, Expr, DL,
1444                                    FuncArgumentDbgValueKind::Value, Val)) {
1445        LLVM_DEBUG(dbgs() << "Resolve dangling debug info for "
1446                          << printDDI(V, DDI) << "\n");
1447        LLVM_DEBUG(dbgs() << "  By mapping to:\n    "; Val.dump());
1448        // Increase the SDNodeOrder for the DbgValue here to make sure it is
1449        // inserted after the definition of Val when emitting the instructions
1450        // after ISel. An alternative could be to teach
1451        // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly.
1452        LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs()
1453                   << "changing SDNodeOrder from " << DbgSDNodeOrder << " to "
1454                   << ValSDNodeOrder << "\n");
1455        SDV = getDbgValue(Val, Variable, Expr, DL,
1456                          std::max(DbgSDNodeOrder, ValSDNodeOrder));
1457        DAG.AddDbgValue(SDV, false);
1458      } else
1459        LLVM_DEBUG(dbgs() << "Resolved dangling debug info for "
1460                          << printDDI(V, DDI)
1461                          << " in EmitFuncArgumentDbgValue\n");
1462    } else {
1463      LLVM_DEBUG(dbgs() << "Dropping debug info for " << printDDI(V, DDI)
1464                        << "\n");
1465      auto Undef = UndefValue::get(V->getType());
1466      auto SDV =
1467          DAG.getConstantDbgValue(Variable, Expr, Undef, DL, DbgSDNodeOrder);
1468      DAG.AddDbgValue(SDV, false);
1469    }
1470  }
1471  DDIV.clear();
1472}
1473
1474void SelectionDAGBuilder::salvageUnresolvedDbgValue(const Value *V,
1475                                                    DanglingDebugInfo &DDI) {
1476  // TODO: For the variadic implementation, instead of only checking the fail
1477  // state of `handleDebugValue`, we need know specifically which values were
1478  // invalid, so that we attempt to salvage only those values when processing
1479  // a DIArgList.
1480  const Value *OrigV = V;
1481  DILocalVariable *Var = DDI.getVariable();
1482  DIExpression *Expr = DDI.getExpression();
1483  DebugLoc DL = DDI.getDebugLoc();
1484  unsigned SDOrder = DDI.getSDNodeOrder();
1485
1486  // Currently we consider only dbg.value intrinsics -- we tell the salvager
1487  // that DW_OP_stack_value is desired.
1488  bool StackValue = true;
1489
1490  // Can this Value can be encoded without any further work?
1491  if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false))
1492    return;
1493
1494  // Attempt to salvage back through as many instructions as possible. Bail if
1495  // a non-instruction is seen, such as a constant expression or global
1496  // variable. FIXME: Further work could recover those too.
1497  while (isa<Instruction>(V)) {
1498    const Instruction &VAsInst = *cast<const Instruction>(V);
1499    // Temporary "0", awaiting real implementation.
1500    SmallVector<uint64_t, 16> Ops;
1501    SmallVector<Value *, 4> AdditionalValues;
1502    V = salvageDebugInfoImpl(const_cast<Instruction &>(VAsInst),
1503                             Expr->getNumLocationOperands(), Ops,
1504                             AdditionalValues);
1505    // If we cannot salvage any further, and haven't yet found a suitable debug
1506    // expression, bail out.
1507    if (!V)
1508      break;
1509
1510    // TODO: If AdditionalValues isn't empty, then the salvage can only be
1511    // represented with a DBG_VALUE_LIST, so we give up. When we have support
1512    // here for variadic dbg_values, remove that condition.
1513    if (!AdditionalValues.empty())
1514      break;
1515
1516    // New value and expr now represent this debuginfo.
1517    Expr = DIExpression::appendOpsToArg(Expr, Ops, 0, StackValue);
1518
1519    // Some kind of simplification occurred: check whether the operand of the
1520    // salvaged debug expression can be encoded in this DAG.
1521    if (handleDebugValue(V, Var, Expr, DL, SDOrder, /*IsVariadic=*/false)) {
1522      LLVM_DEBUG(
1523          dbgs() << "Salvaged debug location info for:\n  " << *Var << "\n"
1524                 << *OrigV << "\nBy stripping back to:\n  " << *V << "\n");
1525      return;
1526    }
1527  }
1528
1529  // This was the final opportunity to salvage this debug information, and it
1530  // couldn't be done. Place an undef DBG_VALUE at this location to terminate
1531  // any earlier variable location.
1532  assert(OrigV && "V shouldn't be null");
1533  auto *Undef = UndefValue::get(OrigV->getType());
1534  auto *SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder);
1535  DAG.AddDbgValue(SDV, false);
1536  LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n  "
1537                    << printDDI(OrigV, DDI) << "\n");
1538}
1539
1540void SelectionDAGBuilder::handleKillDebugValue(DILocalVariable *Var,
1541                                               DIExpression *Expr,
1542                                               DebugLoc DbgLoc,
1543                                               unsigned Order) {
1544  Value *Poison = PoisonValue::get(Type::getInt1Ty(*Context));
1545  DIExpression *NewExpr =
1546      const_cast<DIExpression *>(DIExpression::convertToUndefExpression(Expr));
1547  handleDebugValue(Poison, Var, NewExpr, DbgLoc, Order,
1548                   /*IsVariadic*/ false);
1549}
1550
1551bool SelectionDAGBuilder::handleDebugValue(ArrayRef<const Value *> Values,
1552                                           DILocalVariable *Var,
1553                                           DIExpression *Expr, DebugLoc DbgLoc,
1554                                           unsigned Order, bool IsVariadic) {
1555  if (Values.empty())
1556    return true;
1557
1558  // Filter EntryValue locations out early.
1559  if (visitEntryValueDbgValue(Values, Var, Expr, DbgLoc))
1560    return true;
1561
1562  SmallVector<SDDbgOperand> LocationOps;
1563  SmallVector<SDNode *> Dependencies;
1564  for (const Value *V : Values) {
1565    // Constant value.
1566    if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) ||
1567        isa<ConstantPointerNull>(V)) {
1568      LocationOps.emplace_back(SDDbgOperand::fromConst(V));
1569      continue;
1570    }
1571
1572    // Look through IntToPtr constants.
1573    if (auto *CE = dyn_cast<ConstantExpr>(V))
1574      if (CE->getOpcode() == Instruction::IntToPtr) {
1575        LocationOps.emplace_back(SDDbgOperand::fromConst(CE->getOperand(0)));
1576        continue;
1577      }
1578
1579    // If the Value is a frame index, we can create a FrameIndex debug value
1580    // without relying on the DAG at all.
1581    if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1582      auto SI = FuncInfo.StaticAllocaMap.find(AI);
1583      if (SI != FuncInfo.StaticAllocaMap.end()) {
1584        LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(SI->second));
1585        continue;
1586      }
1587    }
1588
1589    // Do not use getValue() in here; we don't want to generate code at
1590    // this point if it hasn't been done yet.
1591    SDValue N = NodeMap[V];
1592    if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
1593      N = UnusedArgNodeMap[V];
1594    if (N.getNode()) {
1595      // Only emit func arg dbg value for non-variadic dbg.values for now.
1596      if (!IsVariadic &&
1597          EmitFuncArgumentDbgValue(V, Var, Expr, DbgLoc,
1598                                   FuncArgumentDbgValueKind::Value, N))
1599        return true;
1600      if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
1601        // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can
1602        // describe stack slot locations.
1603        //
1604        // Consider "int x = 0; int *px = &x;". There are two kinds of
1605        // interesting debug values here after optimization:
1606        //
1607        //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
1608        //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
1609        //
1610        // Both describe the direct values of their associated variables.
1611        Dependencies.push_back(N.getNode());
1612        LocationOps.emplace_back(SDDbgOperand::fromFrameIdx(FISDN->getIndex()));
1613        continue;
1614      }
1615      LocationOps.emplace_back(
1616          SDDbgOperand::fromNode(N.getNode(), N.getResNo()));
1617      continue;
1618    }
1619
1620    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1621    // Special rules apply for the first dbg.values of parameter variables in a
1622    // function. Identify them by the fact they reference Argument Values, that
1623    // they're parameters, and they are parameters of the current function. We
1624    // need to let them dangle until they get an SDNode.
1625    bool IsParamOfFunc =
1626        isa<Argument>(V) && Var->isParameter() && !DbgLoc.getInlinedAt();
1627    if (IsParamOfFunc)
1628      return false;
1629
1630    // The value is not used in this block yet (or it would have an SDNode).
1631    // We still want the value to appear for the user if possible -- if it has
1632    // an associated VReg, we can refer to that instead.
1633    auto VMI = FuncInfo.ValueMap.find(V);
1634    if (VMI != FuncInfo.ValueMap.end()) {
1635      unsigned Reg = VMI->second;
1636      // If this is a PHI node, it may be split up into several MI PHI nodes
1637      // (in FunctionLoweringInfo::set).
1638      RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg,
1639                       V->getType(), std::nullopt);
1640      if (RFV.occupiesMultipleRegs()) {
1641        // FIXME: We could potentially support variadic dbg_values here.
1642        if (IsVariadic)
1643          return false;
1644        unsigned Offset = 0;
1645        unsigned BitsToDescribe = 0;
1646        if (auto VarSize = Var->getSizeInBits())
1647          BitsToDescribe = *VarSize;
1648        if (auto Fragment = Expr->getFragmentInfo())
1649          BitsToDescribe = Fragment->SizeInBits;
1650        for (const auto &RegAndSize : RFV.getRegsAndSizes()) {
1651          // Bail out if all bits are described already.
1652          if (Offset >= BitsToDescribe)
1653            break;
1654          // TODO: handle scalable vectors.
1655          unsigned RegisterSize = RegAndSize.second;
1656          unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe)
1657                                      ? BitsToDescribe - Offset
1658                                      : RegisterSize;
1659          auto FragmentExpr = DIExpression::createFragmentExpression(
1660              Expr, Offset, FragmentSize);
1661          if (!FragmentExpr)
1662            continue;
1663          SDDbgValue *SDV = DAG.getVRegDbgValue(
1664              Var, *FragmentExpr, RegAndSize.first, false, DbgLoc, SDNodeOrder);
1665          DAG.AddDbgValue(SDV, false);
1666          Offset += RegisterSize;
1667        }
1668        return true;
1669      }
1670      // We can use simple vreg locations for variadic dbg_values as well.
1671      LocationOps.emplace_back(SDDbgOperand::fromVReg(Reg));
1672      continue;
1673    }
1674    // We failed to create a SDDbgOperand for V.
1675    return false;
1676  }
1677
1678  // We have created a SDDbgOperand for each Value in Values.
1679  // Should use Order instead of SDNodeOrder?
1680  assert(!LocationOps.empty());
1681  SDDbgValue *SDV = DAG.getDbgValueList(Var, Expr, LocationOps, Dependencies,
1682                                        /*IsIndirect=*/false, DbgLoc,
1683                                        SDNodeOrder, IsVariadic);
1684  DAG.AddDbgValue(SDV, /*isParameter=*/false);
1685  return true;
1686}
1687
1688void SelectionDAGBuilder::resolveOrClearDbgInfo() {
1689  // Try to fixup any remaining dangling debug info -- and drop it if we can't.
1690  for (auto &Pair : DanglingDebugInfoMap)
1691    for (auto &DDI : Pair.second)
1692      salvageUnresolvedDbgValue(const_cast<Value *>(Pair.first), DDI);
1693  clearDanglingDebugInfo();
1694}
1695
1696/// getCopyFromRegs - If there was virtual register allocated for the value V
1697/// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise.
1698SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) {
1699  DenseMap<const Value *, Register>::iterator It = FuncInfo.ValueMap.find(V);
1700  SDValue Result;
1701
1702  if (It != FuncInfo.ValueMap.end()) {
1703    Register InReg = It->second;
1704
1705    RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(),
1706                     DAG.getDataLayout(), InReg, Ty,
1707                     std::nullopt); // This is not an ABI copy.
1708    SDValue Chain = DAG.getEntryNode();
1709    Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr,
1710                                 V);
1711    resolveDanglingDebugInfo(V, Result);
1712  }
1713
1714  return Result;
1715}
1716
1717/// getValue - Return an SDValue for the given Value.
1718SDValue SelectionDAGBuilder::getValue(const Value *V) {
1719  // If we already have an SDValue for this value, use it. It's important
1720  // to do this first, so that we don't create a CopyFromReg if we already
1721  // have a regular SDValue.
1722  SDValue &N = NodeMap[V];
1723  if (N.getNode()) return N;
1724
1725  // If there's a virtual register allocated and initialized for this
1726  // value, use it.
1727  if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
1728    return copyFromReg;
1729
1730  // Otherwise create a new SDValue and remember it.
1731  SDValue Val = getValueImpl(V);
1732  NodeMap[V] = Val;
1733  resolveDanglingDebugInfo(V, Val);
1734  return Val;
1735}
1736
1737/// getNonRegisterValue - Return an SDValue for the given Value, but
1738/// don't look in FuncInfo.ValueMap for a virtual register.
1739SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) {
1740  // If we already have an SDValue for this value, use it.
1741  SDValue &N = NodeMap[V];
1742  if (N.getNode()) {
1743    if (isIntOrFPConstant(N)) {
1744      // Remove the debug location from the node as the node is about to be used
1745      // in a location which may differ from the original debug location.  This
1746      // is relevant to Constant and ConstantFP nodes because they can appear
1747      // as constant expressions inside PHI nodes.
1748      N->setDebugLoc(DebugLoc());
1749    }
1750    return N;
1751  }
1752
1753  // Otherwise create a new SDValue and remember it.
1754  SDValue Val = getValueImpl(V);
1755  NodeMap[V] = Val;
1756  resolveDanglingDebugInfo(V, Val);
1757  return Val;
1758}
1759
1760/// getValueImpl - Helper function for getValue and getNonRegisterValue.
1761/// Create an SDValue for the given value.
1762SDValue SelectionDAGBuilder::getValueImpl(const Value *V) {
1763  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1764
1765  if (const Constant *C = dyn_cast<Constant>(V)) {
1766    EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true);
1767
1768    if (const ConstantInt *CI = dyn_cast<ConstantInt>(C))
1769      return DAG.getConstant(*CI, getCurSDLoc(), VT);
1770
1771    if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
1772      return DAG.getGlobalAddress(GV, getCurSDLoc(), VT);
1773
1774    if (isa<ConstantPointerNull>(C)) {
1775      unsigned AS = V->getType()->getPointerAddressSpace();
1776      return DAG.getConstant(0, getCurSDLoc(),
1777                             TLI.getPointerTy(DAG.getDataLayout(), AS));
1778    }
1779
1780    if (match(C, m_VScale()))
1781      return DAG.getVScale(getCurSDLoc(), VT, APInt(VT.getSizeInBits(), 1));
1782
1783    if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
1784      return DAG.getConstantFP(*CFP, getCurSDLoc(), VT);
1785
1786    if (isa<UndefValue>(C) && !V->getType()->isAggregateType())
1787      return DAG.getUNDEF(VT);
1788
1789    if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
1790      visit(CE->getOpcode(), *CE);
1791      SDValue N1 = NodeMap[V];
1792      assert(N1.getNode() && "visit didn't populate the NodeMap!");
1793      return N1;
1794    }
1795
1796    if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) {
1797      SmallVector<SDValue, 4> Constants;
1798      for (const Use &U : C->operands()) {
1799        SDNode *Val = getValue(U).getNode();
1800        // If the operand is an empty aggregate, there are no values.
1801        if (!Val) continue;
1802        // Add each leaf value from the operand to the Constants list
1803        // to form a flattened list of all the values.
1804        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1805          Constants.push_back(SDValue(Val, i));
1806      }
1807
1808      return DAG.getMergeValues(Constants, getCurSDLoc());
1809    }
1810
1811    if (const ConstantDataSequential *CDS =
1812          dyn_cast<ConstantDataSequential>(C)) {
1813      SmallVector<SDValue, 4> Ops;
1814      for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
1815        SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode();
1816        // Add each leaf value from the operand to the Constants list
1817        // to form a flattened list of all the values.
1818        for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i)
1819          Ops.push_back(SDValue(Val, i));
1820      }
1821
1822      if (isa<ArrayType>(CDS->getType()))
1823        return DAG.getMergeValues(Ops, getCurSDLoc());
1824      return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1825    }
1826
1827    if (C->getType()->isStructTy() || C->getType()->isArrayTy()) {
1828      assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) &&
1829             "Unknown struct or array constant!");
1830
1831      SmallVector<EVT, 4> ValueVTs;
1832      ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs);
1833      unsigned NumElts = ValueVTs.size();
1834      if (NumElts == 0)
1835        return SDValue(); // empty struct
1836      SmallVector<SDValue, 4> Constants(NumElts);
1837      for (unsigned i = 0; i != NumElts; ++i) {
1838        EVT EltVT = ValueVTs[i];
1839        if (isa<UndefValue>(C))
1840          Constants[i] = DAG.getUNDEF(EltVT);
1841        else if (EltVT.isFloatingPoint())
1842          Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1843        else
1844          Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT);
1845      }
1846
1847      return DAG.getMergeValues(Constants, getCurSDLoc());
1848    }
1849
1850    if (const BlockAddress *BA = dyn_cast<BlockAddress>(C))
1851      return DAG.getBlockAddress(BA, VT);
1852
1853    if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C))
1854      return getValue(Equiv->getGlobalValue());
1855
1856    if (const auto *NC = dyn_cast<NoCFIValue>(C))
1857      return getValue(NC->getGlobalValue());
1858
1859    if (VT == MVT::aarch64svcount) {
1860      assert(C->isNullValue() && "Can only zero this target type!");
1861      return DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT,
1862                         DAG.getConstant(0, getCurSDLoc(), MVT::nxv16i1));
1863    }
1864
1865    VectorType *VecTy = cast<VectorType>(V->getType());
1866
1867    // Now that we know the number and type of the elements, get that number of
1868    // elements into the Ops array based on what kind of constant it is.
1869    if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
1870      SmallVector<SDValue, 16> Ops;
1871      unsigned NumElements = cast<FixedVectorType>(VecTy)->getNumElements();
1872      for (unsigned i = 0; i != NumElements; ++i)
1873        Ops.push_back(getValue(CV->getOperand(i)));
1874
1875      return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops);
1876    }
1877
1878    if (isa<ConstantAggregateZero>(C)) {
1879      EVT EltVT =
1880          TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType());
1881
1882      SDValue Op;
1883      if (EltVT.isFloatingPoint())
1884        Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT);
1885      else
1886        Op = DAG.getConstant(0, getCurSDLoc(), EltVT);
1887
1888      return NodeMap[V] = DAG.getSplat(VT, getCurSDLoc(), Op);
1889    }
1890
1891    llvm_unreachable("Unknown vector constant");
1892  }
1893
1894  // If this is a static alloca, generate it as the frameindex instead of
1895  // computation.
1896  if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
1897    DenseMap<const AllocaInst*, int>::iterator SI =
1898      FuncInfo.StaticAllocaMap.find(AI);
1899    if (SI != FuncInfo.StaticAllocaMap.end())
1900      return DAG.getFrameIndex(
1901          SI->second, TLI.getValueType(DAG.getDataLayout(), AI->getType()));
1902  }
1903
1904  // If this is an instruction which fast-isel has deferred, select it now.
1905  if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
1906    Register InReg = FuncInfo.InitializeRegForValue(Inst);
1907
1908    RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg,
1909                     Inst->getType(), std::nullopt);
1910    SDValue Chain = DAG.getEntryNode();
1911    return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V);
1912  }
1913
1914  if (const MetadataAsValue *MD = dyn_cast<MetadataAsValue>(V))
1915    return DAG.getMDNode(cast<MDNode>(MD->getMetadata()));
1916
1917  if (const auto *BB = dyn_cast<BasicBlock>(V))
1918    return DAG.getBasicBlock(FuncInfo.MBBMap[BB]);
1919
1920  llvm_unreachable("Can't get register for value!");
1921}
1922
1923void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) {
1924  auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1925  bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX;
1926  bool IsCoreCLR = Pers == EHPersonality::CoreCLR;
1927  bool IsSEH = isAsynchronousEHPersonality(Pers);
1928  MachineBasicBlock *CatchPadMBB = FuncInfo.MBB;
1929  if (!IsSEH)
1930    CatchPadMBB->setIsEHScopeEntry();
1931  // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues.
1932  if (IsMSVCCXX || IsCoreCLR)
1933    CatchPadMBB->setIsEHFuncletEntry();
1934}
1935
1936void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) {
1937  // Update machine-CFG edge.
1938  MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()];
1939  FuncInfo.MBB->addSuccessor(TargetMBB);
1940  TargetMBB->setIsEHCatchretTarget(true);
1941  DAG.getMachineFunction().setHasEHCatchret(true);
1942
1943  auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1944  bool IsSEH = isAsynchronousEHPersonality(Pers);
1945  if (IsSEH) {
1946    // If this is not a fall-through branch or optimizations are switched off,
1947    // emit the branch.
1948    if (TargetMBB != NextBlock(FuncInfo.MBB) ||
1949        TM.getOptLevel() == CodeGenOptLevel::None)
1950      DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
1951                              getControlRoot(), DAG.getBasicBlock(TargetMBB)));
1952    return;
1953  }
1954
1955  // Figure out the funclet membership for the catchret's successor.
1956  // This will be used by the FuncletLayout pass to determine how to order the
1957  // BB's.
1958  // A 'catchret' returns to the outer scope's color.
1959  Value *ParentPad = I.getCatchSwitchParentPad();
1960  const BasicBlock *SuccessorColor;
1961  if (isa<ConstantTokenNone>(ParentPad))
1962    SuccessorColor = &FuncInfo.Fn->getEntryBlock();
1963  else
1964    SuccessorColor = cast<Instruction>(ParentPad)->getParent();
1965  assert(SuccessorColor && "No parent funclet for catchret!");
1966  MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor];
1967  assert(SuccessorColorMBB && "No MBB for SuccessorColor!");
1968
1969  // Create the terminator node.
1970  SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other,
1971                            getControlRoot(), DAG.getBasicBlock(TargetMBB),
1972                            DAG.getBasicBlock(SuccessorColorMBB));
1973  DAG.setRoot(Ret);
1974}
1975
1976void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) {
1977  // Don't emit any special code for the cleanuppad instruction. It just marks
1978  // the start of an EH scope/funclet.
1979  FuncInfo.MBB->setIsEHScopeEntry();
1980  auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
1981  if (Pers != EHPersonality::Wasm_CXX) {
1982    FuncInfo.MBB->setIsEHFuncletEntry();
1983    FuncInfo.MBB->setIsCleanupFuncletEntry();
1984  }
1985}
1986
1987// In wasm EH, even though a catchpad may not catch an exception if a tag does
1988// not match, it is OK to add only the first unwind destination catchpad to the
1989// successors, because there will be at least one invoke instruction within the
1990// catch scope that points to the next unwind destination, if one exists, so
1991// CFGSort cannot mess up with BB sorting order.
1992// (All catchpads with 'catch (type)' clauses have a 'llvm.rethrow' intrinsic
1993// call within them, and catchpads only consisting of 'catch (...)' have a
1994// '__cxa_end_catch' call within them, both of which generate invokes in case
1995// the next unwind destination exists, i.e., the next unwind destination is not
1996// the caller.)
1997//
1998// Having at most one EH pad successor is also simpler and helps later
1999// transformations.
2000//
2001// For example,
2002// current:
2003//   invoke void @foo to ... unwind label %catch.dispatch
2004// catch.dispatch:
2005//   %0 = catchswitch within ... [label %catch.start] unwind label %next
2006// catch.start:
2007//   ...
2008//   ... in this BB or some other child BB dominated by this BB there will be an
2009//   invoke that points to 'next' BB as an unwind destination
2010//
2011// next: ; We don't need to add this to 'current' BB's successor
2012//   ...
2013static void findWasmUnwindDestinations(
2014    FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2015    BranchProbability Prob,
2016    SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2017        &UnwindDests) {
2018  while (EHPadBB) {
2019    const Instruction *Pad = EHPadBB->getFirstNonPHI();
2020    if (isa<CleanupPadInst>(Pad)) {
2021      // Stop on cleanup pads.
2022      UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2023      UnwindDests.back().first->setIsEHScopeEntry();
2024      break;
2025    } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2026      // Add the catchpad handlers to the possible destinations. We don't
2027      // continue to the unwind destination of the catchswitch for wasm.
2028      for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2029        UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
2030        UnwindDests.back().first->setIsEHScopeEntry();
2031      }
2032      break;
2033    } else {
2034      continue;
2035    }
2036  }
2037}
2038
2039/// When an invoke or a cleanupret unwinds to the next EH pad, there are
2040/// many places it could ultimately go. In the IR, we have a single unwind
2041/// destination, but in the machine CFG, we enumerate all the possible blocks.
2042/// This function skips over imaginary basic blocks that hold catchswitch
2043/// instructions, and finds all the "real" machine
2044/// basic block destinations. As those destinations may not be successors of
2045/// EHPadBB, here we also calculate the edge probability to those destinations.
2046/// The passed-in Prob is the edge probability to EHPadBB.
2047static void findUnwindDestinations(
2048    FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB,
2049    BranchProbability Prob,
2050    SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2051        &UnwindDests) {
2052  EHPersonality Personality =
2053    classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
2054  bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2055  bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2056  bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2057  bool IsSEH = isAsynchronousEHPersonality(Personality);
2058
2059  if (IsWasmCXX) {
2060    findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests);
2061    assert(UnwindDests.size() <= 1 &&
2062           "There should be at most one unwind destination for wasm");
2063    return;
2064  }
2065
2066  while (EHPadBB) {
2067    const Instruction *Pad = EHPadBB->getFirstNonPHI();
2068    BasicBlock *NewEHPadBB = nullptr;
2069    if (isa<LandingPadInst>(Pad)) {
2070      // Stop on landingpads. They are not funclets.
2071      UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2072      break;
2073    } else if (isa<CleanupPadInst>(Pad)) {
2074      // Stop on cleanup pads. Cleanups are always funclet entries for all known
2075      // personalities.
2076      UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob);
2077      UnwindDests.back().first->setIsEHScopeEntry();
2078      UnwindDests.back().first->setIsEHFuncletEntry();
2079      break;
2080    } else if (const auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2081      // Add the catchpad handlers to the possible destinations.
2082      for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2083        UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob);
2084        // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2085        if (IsMSVCCXX || IsCoreCLR)
2086          UnwindDests.back().first->setIsEHFuncletEntry();
2087        if (!IsSEH)
2088          UnwindDests.back().first->setIsEHScopeEntry();
2089      }
2090      NewEHPadBB = CatchSwitch->getUnwindDest();
2091    } else {
2092      continue;
2093    }
2094
2095    BranchProbabilityInfo *BPI = FuncInfo.BPI;
2096    if (BPI && NewEHPadBB)
2097      Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2098    EHPadBB = NewEHPadBB;
2099  }
2100}
2101
2102void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) {
2103  // Update successor info.
2104  SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2105  auto UnwindDest = I.getUnwindDest();
2106  BranchProbabilityInfo *BPI = FuncInfo.BPI;
2107  BranchProbability UnwindDestProb =
2108      (BPI && UnwindDest)
2109          ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest)
2110          : BranchProbability::getZero();
2111  findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests);
2112  for (auto &UnwindDest : UnwindDests) {
2113    UnwindDest.first->setIsEHPad();
2114    addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second);
2115  }
2116  FuncInfo.MBB->normalizeSuccProbs();
2117
2118  // Create the terminator node.
2119  SDValue Ret =
2120      DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot());
2121  DAG.setRoot(Ret);
2122}
2123
2124void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) {
2125  report_fatal_error("visitCatchSwitch not yet implemented!");
2126}
2127
2128void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
2129  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2130  auto &DL = DAG.getDataLayout();
2131  SDValue Chain = getControlRoot();
2132  SmallVector<ISD::OutputArg, 8> Outs;
2133  SmallVector<SDValue, 8> OutVals;
2134
2135  // Calls to @llvm.experimental.deoptimize don't generate a return value, so
2136  // lower
2137  //
2138  //   %val = call <ty> @llvm.experimental.deoptimize()
2139  //   ret <ty> %val
2140  //
2141  // differently.
2142  if (I.getParent()->getTerminatingDeoptimizeCall()) {
2143    LowerDeoptimizingReturn();
2144    return;
2145  }
2146
2147  if (!FuncInfo.CanLowerReturn) {
2148    unsigned DemoteReg = FuncInfo.DemoteRegister;
2149    const Function *F = I.getParent()->getParent();
2150
2151    // Emit a store of the return value through the virtual register.
2152    // Leave Outs empty so that LowerReturn won't try to load return
2153    // registers the usual way.
2154    SmallVector<EVT, 1> PtrValueVTs;
2155    ComputeValueVTs(TLI, DL,
2156                    PointerType::get(F->getContext(),
2157                                     DAG.getDataLayout().getAllocaAddrSpace()),
2158                    PtrValueVTs);
2159
2160    SDValue RetPtr =
2161        DAG.getCopyFromReg(Chain, getCurSDLoc(), DemoteReg, PtrValueVTs[0]);
2162    SDValue RetOp = getValue(I.getOperand(0));
2163
2164    SmallVector<EVT, 4> ValueVTs, MemVTs;
2165    SmallVector<uint64_t, 4> Offsets;
2166    ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs,
2167                    &Offsets, 0);
2168    unsigned NumValues = ValueVTs.size();
2169
2170    SmallVector<SDValue, 4> Chains(NumValues);
2171    Align BaseAlign = DL.getPrefTypeAlign(I.getOperand(0)->getType());
2172    for (unsigned i = 0; i != NumValues; ++i) {
2173      // An aggregate return value cannot wrap around the address space, so
2174      // offsets to its parts don't wrap either.
2175      SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr,
2176                                           TypeSize::getFixed(Offsets[i]));
2177
2178      SDValue Val = RetOp.getValue(RetOp.getResNo() + i);
2179      if (MemVTs[i] != ValueVTs[i])
2180        Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]);
2181      Chains[i] = DAG.getStore(
2182          Chain, getCurSDLoc(), Val,
2183          // FIXME: better loc info would be nice.
2184          Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction()),
2185          commonAlignment(BaseAlign, Offsets[i]));
2186    }
2187
2188    Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(),
2189                        MVT::Other, Chains);
2190  } else if (I.getNumOperands() != 0) {
2191    SmallVector<EVT, 4> ValueVTs;
2192    ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs);
2193    unsigned NumValues = ValueVTs.size();
2194    if (NumValues) {
2195      SDValue RetOp = getValue(I.getOperand(0));
2196
2197      const Function *F = I.getParent()->getParent();
2198
2199      bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
2200          I.getOperand(0)->getType(), F->getCallingConv(),
2201          /*IsVarArg*/ false, DL);
2202
2203      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
2204      if (F->getAttributes().hasRetAttr(Attribute::SExt))
2205        ExtendKind = ISD::SIGN_EXTEND;
2206      else if (F->getAttributes().hasRetAttr(Attribute::ZExt))
2207        ExtendKind = ISD::ZERO_EXTEND;
2208
2209      LLVMContext &Context = F->getContext();
2210      bool RetInReg = F->getAttributes().hasRetAttr(Attribute::InReg);
2211
2212      for (unsigned j = 0; j != NumValues; ++j) {
2213        EVT VT = ValueVTs[j];
2214
2215        if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger())
2216          VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind);
2217
2218        CallingConv::ID CC = F->getCallingConv();
2219
2220        unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT);
2221        MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT);
2222        SmallVector<SDValue, 4> Parts(NumParts);
2223        getCopyToParts(DAG, getCurSDLoc(),
2224                       SDValue(RetOp.getNode(), RetOp.getResNo() + j),
2225                       &Parts[0], NumParts, PartVT, &I, CC, ExtendKind);
2226
2227        // 'inreg' on function refers to return value
2228        ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2229        if (RetInReg)
2230          Flags.setInReg();
2231
2232        if (I.getOperand(0)->getType()->isPointerTy()) {
2233          Flags.setPointer();
2234          Flags.setPointerAddrSpace(
2235              cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace());
2236        }
2237
2238        if (NeedsRegBlock) {
2239          Flags.setInConsecutiveRegs();
2240          if (j == NumValues - 1)
2241            Flags.setInConsecutiveRegsLast();
2242        }
2243
2244        // Propagate extension type if any
2245        if (ExtendKind == ISD::SIGN_EXTEND)
2246          Flags.setSExt();
2247        else if (ExtendKind == ISD::ZERO_EXTEND)
2248          Flags.setZExt();
2249
2250        for (unsigned i = 0; i < NumParts; ++i) {
2251          Outs.push_back(ISD::OutputArg(Flags,
2252                                        Parts[i].getValueType().getSimpleVT(),
2253                                        VT, /*isfixed=*/true, 0, 0));
2254          OutVals.push_back(Parts[i]);
2255        }
2256      }
2257    }
2258  }
2259
2260  // Push in swifterror virtual register as the last element of Outs. This makes
2261  // sure swifterror virtual register will be returned in the swifterror
2262  // physical register.
2263  const Function *F = I.getParent()->getParent();
2264  if (TLI.supportSwiftError() &&
2265      F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) {
2266    assert(SwiftError.getFunctionArg() && "Need a swift error argument");
2267    ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy();
2268    Flags.setSwiftError();
2269    Outs.push_back(ISD::OutputArg(
2270        Flags, /*vt=*/TLI.getPointerTy(DL), /*argvt=*/EVT(TLI.getPointerTy(DL)),
2271        /*isfixed=*/true, /*origidx=*/1, /*partOffs=*/0));
2272    // Create SDNode for the swifterror virtual register.
2273    OutVals.push_back(
2274        DAG.getRegister(SwiftError.getOrCreateVRegUseAt(
2275                            &I, FuncInfo.MBB, SwiftError.getFunctionArg()),
2276                        EVT(TLI.getPointerTy(DL))));
2277  }
2278
2279  bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg();
2280  CallingConv::ID CallConv =
2281    DAG.getMachineFunction().getFunction().getCallingConv();
2282  Chain = DAG.getTargetLoweringInfo().LowerReturn(
2283      Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG);
2284
2285  // Verify that the target's LowerReturn behaved as expected.
2286  assert(Chain.getNode() && Chain.getValueType() == MVT::Other &&
2287         "LowerReturn didn't return a valid chain!");
2288
2289  // Update the DAG with the new chain value resulting from return lowering.
2290  DAG.setRoot(Chain);
2291}
2292
2293/// CopyToExportRegsIfNeeded - If the given value has virtual registers
2294/// created for it, emit nodes to copy the value into the virtual
2295/// registers.
2296void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) {
2297  // Skip empty types
2298  if (V->getType()->isEmptyTy())
2299    return;
2300
2301  DenseMap<const Value *, Register>::iterator VMI = FuncInfo.ValueMap.find(V);
2302  if (VMI != FuncInfo.ValueMap.end()) {
2303    assert((!V->use_empty() || isa<CallBrInst>(V)) &&
2304           "Unused value assigned virtual registers!");
2305    CopyValueToVirtualRegister(V, VMI->second);
2306  }
2307}
2308
2309/// ExportFromCurrentBlock - If this condition isn't known to be exported from
2310/// the current basic block, add it to ValueMap now so that we'll get a
2311/// CopyTo/FromReg.
2312void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) {
2313  // No need to export constants.
2314  if (!isa<Instruction>(V) && !isa<Argument>(V)) return;
2315
2316  // Already exported?
2317  if (FuncInfo.isExportedInst(V)) return;
2318
2319  Register Reg = FuncInfo.InitializeRegForValue(V);
2320  CopyValueToVirtualRegister(V, Reg);
2321}
2322
2323bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V,
2324                                                     const BasicBlock *FromBB) {
2325  // The operands of the setcc have to be in this block.  We don't know
2326  // how to export them from some other block.
2327  if (const Instruction *VI = dyn_cast<Instruction>(V)) {
2328    // Can export from current BB.
2329    if (VI->getParent() == FromBB)
2330      return true;
2331
2332    // Is already exported, noop.
2333    return FuncInfo.isExportedInst(V);
2334  }
2335
2336  // If this is an argument, we can export it if the BB is the entry block or
2337  // if it is already exported.
2338  if (isa<Argument>(V)) {
2339    if (FromBB->isEntryBlock())
2340      return true;
2341
2342    // Otherwise, can only export this if it is already exported.
2343    return FuncInfo.isExportedInst(V);
2344  }
2345
2346  // Otherwise, constants can always be exported.
2347  return true;
2348}
2349
2350/// Return branch probability calculated by BranchProbabilityInfo for IR blocks.
2351BranchProbability
2352SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src,
2353                                        const MachineBasicBlock *Dst) const {
2354  BranchProbabilityInfo *BPI = FuncInfo.BPI;
2355  const BasicBlock *SrcBB = Src->getBasicBlock();
2356  const BasicBlock *DstBB = Dst->getBasicBlock();
2357  if (!BPI) {
2358    // If BPI is not available, set the default probability as 1 / N, where N is
2359    // the number of successors.
2360    auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
2361    return BranchProbability(1, SuccSize);
2362  }
2363  return BPI->getEdgeProbability(SrcBB, DstBB);
2364}
2365
2366void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src,
2367                                               MachineBasicBlock *Dst,
2368                                               BranchProbability Prob) {
2369  if (!FuncInfo.BPI)
2370    Src->addSuccessorWithoutProb(Dst);
2371  else {
2372    if (Prob.isUnknown())
2373      Prob = getEdgeProbability(Src, Dst);
2374    Src->addSuccessor(Dst, Prob);
2375  }
2376}
2377
2378static bool InBlock(const Value *V, const BasicBlock *BB) {
2379  if (const Instruction *I = dyn_cast<Instruction>(V))
2380    return I->getParent() == BB;
2381  return true;
2382}
2383
2384/// EmitBranchForMergedCondition - Helper method for FindMergedConditions.
2385/// This function emits a branch and is used at the leaves of an OR or an
2386/// AND operator tree.
2387void
2388SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond,
2389                                                  MachineBasicBlock *TBB,
2390                                                  MachineBasicBlock *FBB,
2391                                                  MachineBasicBlock *CurBB,
2392                                                  MachineBasicBlock *SwitchBB,
2393                                                  BranchProbability TProb,
2394                                                  BranchProbability FProb,
2395                                                  bool InvertCond) {
2396  const BasicBlock *BB = CurBB->getBasicBlock();
2397
2398  // If the leaf of the tree is a comparison, merge the condition into
2399  // the caseblock.
2400  if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
2401    // The operands of the cmp have to be in this block.  We don't know
2402    // how to export them from some other block.  If this is the first block
2403    // of the sequence, no exporting is needed.
2404    if (CurBB == SwitchBB ||
2405        (isExportableFromCurrentBlock(BOp->getOperand(0), BB) &&
2406         isExportableFromCurrentBlock(BOp->getOperand(1), BB))) {
2407      ISD::CondCode Condition;
2408      if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
2409        ICmpInst::Predicate Pred =
2410            InvertCond ? IC->getInversePredicate() : IC->getPredicate();
2411        Condition = getICmpCondCode(Pred);
2412      } else {
2413        const FCmpInst *FC = cast<FCmpInst>(Cond);
2414        FCmpInst::Predicate Pred =
2415            InvertCond ? FC->getInversePredicate() : FC->getPredicate();
2416        Condition = getFCmpCondCode(Pred);
2417        if (TM.Options.NoNaNsFPMath)
2418          Condition = getFCmpCodeWithoutNaN(Condition);
2419      }
2420
2421      CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr,
2422                   TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2423      SL->SwitchCases.push_back(CB);
2424      return;
2425    }
2426  }
2427
2428  // Create a CaseBlock record representing this branch.
2429  ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ;
2430  CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()),
2431               nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb);
2432  SL->SwitchCases.push_back(CB);
2433}
2434
2435void SelectionDAGBuilder::FindMergedConditions(const Value *Cond,
2436                                               MachineBasicBlock *TBB,
2437                                               MachineBasicBlock *FBB,
2438                                               MachineBasicBlock *CurBB,
2439                                               MachineBasicBlock *SwitchBB,
2440                                               Instruction::BinaryOps Opc,
2441                                               BranchProbability TProb,
2442                                               BranchProbability FProb,
2443                                               bool InvertCond) {
2444  // Skip over not part of the tree and remember to invert op and operands at
2445  // next level.
2446  Value *NotCond;
2447  if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
2448      InBlock(NotCond, CurBB->getBasicBlock())) {
2449    FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
2450                         !InvertCond);
2451    return;
2452  }
2453
2454  const Instruction *BOp = dyn_cast<Instruction>(Cond);
2455  const Value *BOpOp0, *BOpOp1;
2456  // Compute the effective opcode for Cond, taking into account whether it needs
2457  // to be inverted, e.g.
2458  //   and (not (or A, B)), C
2459  // gets lowered as
2460  //   and (and (not A, not B), C)
2461  Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
2462  if (BOp) {
2463    BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
2464               ? Instruction::And
2465               : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
2466                      ? Instruction::Or
2467                      : (Instruction::BinaryOps)0);
2468    if (InvertCond) {
2469      if (BOpc == Instruction::And)
2470        BOpc = Instruction::Or;
2471      else if (BOpc == Instruction::Or)
2472        BOpc = Instruction::And;
2473    }
2474  }
2475
2476  // If this node is not part of the or/and tree, emit it as a branch.
2477  // Note that all nodes in the tree should have same opcode.
2478  bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
2479  if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
2480      !InBlock(BOpOp0, CurBB->getBasicBlock()) ||
2481      !InBlock(BOpOp1, CurBB->getBasicBlock())) {
2482    EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB,
2483                                 TProb, FProb, InvertCond);
2484    return;
2485  }
2486
2487  //  Create TmpBB after CurBB.
2488  MachineFunction::iterator BBI(CurBB);
2489  MachineFunction &MF = DAG.getMachineFunction();
2490  MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock());
2491  CurBB->getParent()->insert(++BBI, TmpBB);
2492
2493  if (Opc == Instruction::Or) {
2494    // Codegen X | Y as:
2495    // BB1:
2496    //   jmp_if_X TBB
2497    //   jmp TmpBB
2498    // TmpBB:
2499    //   jmp_if_Y TBB
2500    //   jmp FBB
2501    //
2502
2503    // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2504    // The requirement is that
2505    //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
2506    //     = TrueProb for original BB.
2507    // Assuming the original probabilities are A and B, one choice is to set
2508    // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
2509    // A/(1+B) and 2B/(1+B). This choice assumes that
2510    //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
2511    // Another choice is to assume TrueProb for BB1 equals to TrueProb for
2512    // TmpBB, but the math is more complicated.
2513
2514    auto NewTrueProb = TProb / 2;
2515    auto NewFalseProb = TProb / 2 + FProb;
2516    // Emit the LHS condition.
2517    FindMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
2518                         NewFalseProb, InvertCond);
2519
2520    // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
2521    SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
2522    BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2523    // Emit the RHS condition into TmpBB.
2524    FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2525                         Probs[1], InvertCond);
2526  } else {
2527    assert(Opc == Instruction::And && "Unknown merge op!");
2528    // Codegen X & Y as:
2529    // BB1:
2530    //   jmp_if_X TmpBB
2531    //   jmp FBB
2532    // TmpBB:
2533    //   jmp_if_Y TBB
2534    //   jmp FBB
2535    //
2536    //  This requires creation of TmpBB after CurBB.
2537
2538    // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
2539    // The requirement is that
2540    //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
2541    //     = FalseProb for original BB.
2542    // Assuming the original probabilities are A and B, one choice is to set
2543    // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
2544    // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
2545    // TrueProb for BB1 * FalseProb for TmpBB.
2546
2547    auto NewTrueProb = TProb + FProb / 2;
2548    auto NewFalseProb = FProb / 2;
2549    // Emit the LHS condition.
2550    FindMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
2551                         NewFalseProb, InvertCond);
2552
2553    // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
2554    SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
2555    BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
2556    // Emit the RHS condition into TmpBB.
2557    FindMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
2558                         Probs[1], InvertCond);
2559  }
2560}
2561
2562/// If the set of cases should be emitted as a series of branches, return true.
2563/// If we should emit this as a bunch of and/or'd together conditions, return
2564/// false.
2565bool
2566SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) {
2567  if (Cases.size() != 2) return true;
2568
2569  // If this is two comparisons of the same values or'd or and'd together, they
2570  // will get folded into a single comparison, so don't emit two blocks.
2571  if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
2572       Cases[0].CmpRHS == Cases[1].CmpRHS) ||
2573      (Cases[0].CmpRHS == Cases[1].CmpLHS &&
2574       Cases[0].CmpLHS == Cases[1].CmpRHS)) {
2575    return false;
2576  }
2577
2578  // Handle: (X != null) | (Y != null) --> (X|Y) != 0
2579  // Handle: (X == null) & (Y == null) --> (X|Y) == 0
2580  if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
2581      Cases[0].CC == Cases[1].CC &&
2582      isa<Constant>(Cases[0].CmpRHS) &&
2583      cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
2584    if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB)
2585      return false;
2586    if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB)
2587      return false;
2588  }
2589
2590  return true;
2591}
2592
2593void SelectionDAGBuilder::visitBr(const BranchInst &I) {
2594  MachineBasicBlock *BrMBB = FuncInfo.MBB;
2595
2596  // Update machine-CFG edges.
2597  MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
2598
2599  if (I.isUnconditional()) {
2600    // Update machine-CFG edges.
2601    BrMBB->addSuccessor(Succ0MBB);
2602
2603    // If this is not a fall-through branch or optimizations are switched off,
2604    // emit the branch.
2605    if (Succ0MBB != NextBlock(BrMBB) ||
2606        TM.getOptLevel() == CodeGenOptLevel::None) {
2607      auto Br = DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
2608                            getControlRoot(), DAG.getBasicBlock(Succ0MBB));
2609      setValue(&I, Br);
2610      DAG.setRoot(Br);
2611    }
2612
2613    return;
2614  }
2615
2616  // If this condition is one of the special cases we handle, do special stuff
2617  // now.
2618  const Value *CondVal = I.getCondition();
2619  MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
2620
2621  // If this is a series of conditions that are or'd or and'd together, emit
2622  // this as a sequence of branches instead of setcc's with and/or operations.
2623  // As long as jumps are not expensive (exceptions for multi-use logic ops,
2624  // unpredictable branches, and vector extracts because those jumps are likely
2625  // expensive for any target), this should improve performance.
2626  // For example, instead of something like:
2627  //     cmp A, B
2628  //     C = seteq
2629  //     cmp D, E
2630  //     F = setle
2631  //     or C, F
2632  //     jnz foo
2633  // Emit:
2634  //     cmp A, B
2635  //     je foo
2636  //     cmp D, E
2637  //     jle foo
2638  const Instruction *BOp = dyn_cast<Instruction>(CondVal);
2639  if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp &&
2640      BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable)) {
2641    Value *Vec;
2642    const Value *BOp0, *BOp1;
2643    Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
2644    if (match(BOp, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
2645      Opcode = Instruction::And;
2646    else if (match(BOp, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
2647      Opcode = Instruction::Or;
2648
2649    if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
2650                    match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
2651      FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode,
2652                           getEdgeProbability(BrMBB, Succ0MBB),
2653                           getEdgeProbability(BrMBB, Succ1MBB),
2654                           /*InvertCond=*/false);
2655      // If the compares in later blocks need to use values not currently
2656      // exported from this block, export them now.  This block should always
2657      // be the first entry.
2658      assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!");
2659
2660      // Allow some cases to be rejected.
2661      if (ShouldEmitAsBranches(SL->SwitchCases)) {
2662        for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) {
2663          ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS);
2664          ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS);
2665        }
2666
2667        // Emit the branch for this block.
2668        visitSwitchCase(SL->SwitchCases[0], BrMBB);
2669        SL->SwitchCases.erase(SL->SwitchCases.begin());
2670        return;
2671      }
2672
2673      // Okay, we decided not to do this, remove any inserted MBB's and clear
2674      // SwitchCases.
2675      for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i)
2676        FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB);
2677
2678      SL->SwitchCases.clear();
2679    }
2680  }
2681
2682  // Create a CaseBlock record representing this branch.
2683  CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()),
2684               nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc());
2685
2686  // Use visitSwitchCase to actually insert the fast branch sequence for this
2687  // cond branch.
2688  visitSwitchCase(CB, BrMBB);
2689}
2690
2691/// visitSwitchCase - Emits the necessary code to represent a single node in
2692/// the binary search tree resulting from lowering a switch instruction.
2693void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB,
2694                                          MachineBasicBlock *SwitchBB) {
2695  SDValue Cond;
2696  SDValue CondLHS = getValue(CB.CmpLHS);
2697  SDLoc dl = CB.DL;
2698
2699  if (CB.CC == ISD::SETTRUE) {
2700    // Branch or fall through to TrueBB.
2701    addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2702    SwitchBB->normalizeSuccProbs();
2703    if (CB.TrueBB != NextBlock(SwitchBB)) {
2704      DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(),
2705                              DAG.getBasicBlock(CB.TrueBB)));
2706    }
2707    return;
2708  }
2709
2710  auto &TLI = DAG.getTargetLoweringInfo();
2711  EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType());
2712
2713  // Build the setcc now.
2714  if (!CB.CmpMHS) {
2715    // Fold "(X == true)" to X and "(X == false)" to !X to
2716    // handle common cases produced by branch lowering.
2717    if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) &&
2718        CB.CC == ISD::SETEQ)
2719      Cond = CondLHS;
2720    else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) &&
2721             CB.CC == ISD::SETEQ) {
2722      SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType());
2723      Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True);
2724    } else {
2725      SDValue CondRHS = getValue(CB.CmpRHS);
2726
2727      // If a pointer's DAG type is larger than its memory type then the DAG
2728      // values are zero-extended. This breaks signed comparisons so truncate
2729      // back to the underlying type before doing the compare.
2730      if (CondLHS.getValueType() != MemVT) {
2731        CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT);
2732        CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT);
2733      }
2734      Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC);
2735    }
2736  } else {
2737    assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now");
2738
2739    const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
2740    const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
2741
2742    SDValue CmpOp = getValue(CB.CmpMHS);
2743    EVT VT = CmpOp.getValueType();
2744
2745    if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
2746      Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT),
2747                          ISD::SETLE);
2748    } else {
2749      SDValue SUB = DAG.getNode(ISD::SUB, dl,
2750                                VT, CmpOp, DAG.getConstant(Low, dl, VT));
2751      Cond = DAG.getSetCC(dl, MVT::i1, SUB,
2752                          DAG.getConstant(High-Low, dl, VT), ISD::SETULE);
2753    }
2754  }
2755
2756  // Update successor info
2757  addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb);
2758  // TrueBB and FalseBB are always different unless the incoming IR is
2759  // degenerate. This only happens when running llc on weird IR.
2760  if (CB.TrueBB != CB.FalseBB)
2761    addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb);
2762  SwitchBB->normalizeSuccProbs();
2763
2764  // If the lhs block is the next block, invert the condition so that we can
2765  // fall through to the lhs instead of the rhs block.
2766  if (CB.TrueBB == NextBlock(SwitchBB)) {
2767    std::swap(CB.TrueBB, CB.FalseBB);
2768    SDValue True = DAG.getConstant(1, dl, Cond.getValueType());
2769    Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True);
2770  }
2771
2772  SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2773                               MVT::Other, getControlRoot(), Cond,
2774                               DAG.getBasicBlock(CB.TrueBB));
2775
2776  setValue(CurInst, BrCond);
2777
2778  // Insert the false branch. Do this even if it's a fall through branch,
2779  // this makes it easier to do DAG optimizations which require inverting
2780  // the branch condition.
2781  BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2782                       DAG.getBasicBlock(CB.FalseBB));
2783
2784  DAG.setRoot(BrCond);
2785}
2786
2787/// visitJumpTable - Emit JumpTable node in the current MBB
2788void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) {
2789  // Emit the code for the jump table
2790  assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2791  assert(JT.Reg != -1U && "Should lower JT Header first!");
2792  EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2793  SDValue Index = DAG.getCopyFromReg(getControlRoot(), *JT.SL, JT.Reg, PTy);
2794  SDValue Table = DAG.getJumpTable(JT.JTI, PTy);
2795  SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, *JT.SL, MVT::Other,
2796                                    Index.getValue(1), Table, Index);
2797  DAG.setRoot(BrJumpTable);
2798}
2799
2800/// visitJumpTableHeader - This function emits necessary code to produce index
2801/// in the JumpTable from switch case.
2802void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT,
2803                                               JumpTableHeader &JTH,
2804                                               MachineBasicBlock *SwitchBB) {
2805  assert(JT.SL && "Should set SDLoc for SelectionDAG!");
2806  const SDLoc &dl = *JT.SL;
2807
2808  // Subtract the lowest switch case value from the value being switched on.
2809  SDValue SwitchOp = getValue(JTH.SValue);
2810  EVT VT = SwitchOp.getValueType();
2811  SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp,
2812                            DAG.getConstant(JTH.First, dl, VT));
2813
2814  // The SDNode we just created, which holds the value being switched on minus
2815  // the smallest case value, needs to be copied to a virtual register so it
2816  // can be used as an index into the jump table in a subsequent basic block.
2817  // This value may be smaller or larger than the target's pointer type, and
2818  // therefore require extension or truncating.
2819  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2820  SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout()));
2821
2822  unsigned JumpTableReg =
2823      FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout()));
2824  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl,
2825                                    JumpTableReg, SwitchOp);
2826  JT.Reg = JumpTableReg;
2827
2828  if (!JTH.FallthroughUnreachable) {
2829    // Emit the range check for the jump table, and branch to the default block
2830    // for the switch statement if the value being switched on exceeds the
2831    // largest case in the switch.
2832    SDValue CMP = DAG.getSetCC(
2833        dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
2834                                   Sub.getValueType()),
2835        Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT);
2836
2837    SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2838                                 MVT::Other, CopyTo, CMP,
2839                                 DAG.getBasicBlock(JT.Default));
2840
2841    // Avoid emitting unnecessary branches to the next block.
2842    if (JT.MBB != NextBlock(SwitchBB))
2843      BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond,
2844                           DAG.getBasicBlock(JT.MBB));
2845
2846    DAG.setRoot(BrCond);
2847  } else {
2848    // Avoid emitting unnecessary branches to the next block.
2849    if (JT.MBB != NextBlock(SwitchBB))
2850      DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo,
2851                              DAG.getBasicBlock(JT.MBB)));
2852    else
2853      DAG.setRoot(CopyTo);
2854  }
2855}
2856
2857/// Create a LOAD_STACK_GUARD node, and let it carry the target specific global
2858/// variable if there exists one.
2859static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL,
2860                                 SDValue &Chain) {
2861  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2862  EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2863  EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2864  MachineFunction &MF = DAG.getMachineFunction();
2865  Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent());
2866  MachineSDNode *Node =
2867      DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain);
2868  if (Global) {
2869    MachinePointerInfo MPInfo(Global);
2870    auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
2871                 MachineMemOperand::MODereferenceable;
2872    MachineMemOperand *MemRef = MF.getMachineMemOperand(
2873        MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlign(PtrTy));
2874    DAG.setNodeMemRefs(Node, {MemRef});
2875  }
2876  if (PtrTy != PtrMemTy)
2877    return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy);
2878  return SDValue(Node, 0);
2879}
2880
2881/// Codegen a new tail for a stack protector check ParentMBB which has had its
2882/// tail spliced into a stack protector check success bb.
2883///
2884/// For a high level explanation of how this fits into the stack protector
2885/// generation see the comment on the declaration of class
2886/// StackProtectorDescriptor.
2887void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
2888                                                  MachineBasicBlock *ParentBB) {
2889
2890  // First create the loads to the guard/stack slot for the comparison.
2891  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2892  EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout());
2893  EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout());
2894
2895  MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
2896  int FI = MFI.getStackProtectorIndex();
2897
2898  SDValue Guard;
2899  SDLoc dl = getCurSDLoc();
2900  SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy);
2901  const Module &M = *ParentBB->getParent()->getFunction().getParent();
2902  Align Align =
2903      DAG.getDataLayout().getPrefTypeAlign(PointerType::get(M.getContext(), 0));
2904
2905  // Generate code to load the content of the guard slot.
2906  SDValue GuardVal = DAG.getLoad(
2907      PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr,
2908      MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
2909      MachineMemOperand::MOVolatile);
2910
2911  if (TLI.useStackGuardXorFP())
2912    GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
2913
2914  // Retrieve guard check function, nullptr if instrumentation is inlined.
2915  if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
2916    // The target provides a guard check function to validate the guard value.
2917    // Generate a call to that function with the content of the guard slot as
2918    // argument.
2919    FunctionType *FnTy = GuardCheckFn->getFunctionType();
2920    assert(FnTy->getNumParams() == 1 && "Invalid function signature");
2921
2922    TargetLowering::ArgListTy Args;
2923    TargetLowering::ArgListEntry Entry;
2924    Entry.Node = GuardVal;
2925    Entry.Ty = FnTy->getParamType(0);
2926    if (GuardCheckFn->hasParamAttribute(0, Attribute::AttrKind::InReg))
2927      Entry.IsInReg = true;
2928    Args.push_back(Entry);
2929
2930    TargetLowering::CallLoweringInfo CLI(DAG);
2931    CLI.setDebugLoc(getCurSDLoc())
2932        .setChain(DAG.getEntryNode())
2933        .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(),
2934                   getValue(GuardCheckFn), std::move(Args));
2935
2936    std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
2937    DAG.setRoot(Result.second);
2938    return;
2939  }
2940
2941  // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
2942  // Otherwise, emit a volatile load to retrieve the stack guard value.
2943  SDValue Chain = DAG.getEntryNode();
2944  if (TLI.useLoadStackGuardNode()) {
2945    Guard = getLoadStackGuard(DAG, dl, Chain);
2946  } else {
2947    const Value *IRGuard = TLI.getSDagStackGuard(M);
2948    SDValue GuardPtr = getValue(IRGuard);
2949
2950    Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr,
2951                        MachinePointerInfo(IRGuard, 0), Align,
2952                        MachineMemOperand::MOVolatile);
2953  }
2954
2955  // Perform the comparison via a getsetcc.
2956  SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
2957                                                        *DAG.getContext(),
2958                                                        Guard.getValueType()),
2959                             Guard, GuardVal, ISD::SETNE);
2960
2961  // If the guard/stackslot do not equal, branch to failure MBB.
2962  SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
2963                               MVT::Other, GuardVal.getOperand(0),
2964                               Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
2965  // Otherwise branch to success MBB.
2966  SDValue Br = DAG.getNode(ISD::BR, dl,
2967                           MVT::Other, BrCond,
2968                           DAG.getBasicBlock(SPD.getSuccessMBB()));
2969
2970  DAG.setRoot(Br);
2971}
2972
2973/// Codegen the failure basic block for a stack protector check.
2974///
2975/// A failure stack protector machine basic block consists simply of a call to
2976/// __stack_chk_fail().
2977///
2978/// For a high level explanation of how this fits into the stack protector
2979/// generation see the comment on the declaration of class
2980/// StackProtectorDescriptor.
2981void
2982SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) {
2983  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
2984  TargetLowering::MakeLibCallOptions CallOptions;
2985  CallOptions.setDiscardResult(true);
2986  SDValue Chain =
2987      TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid,
2988                      std::nullopt, CallOptions, getCurSDLoc())
2989          .second;
2990  // On PS4/PS5, the "return address" must still be within the calling
2991  // function, even if it's at the very end, so emit an explicit TRAP here.
2992  // Passing 'true' for doesNotReturn above won't generate the trap for us.
2993  if (TM.getTargetTriple().isPS())
2994    Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
2995  // WebAssembly needs an unreachable instruction after a non-returning call,
2996  // because the function return type can be different from __stack_chk_fail's
2997  // return type (void).
2998  if (TM.getTargetTriple().isWasm())
2999    Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain);
3000
3001  DAG.setRoot(Chain);
3002}
3003
3004/// visitBitTestHeader - This function emits necessary code to produce value
3005/// suitable for "bit tests"
3006void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B,
3007                                             MachineBasicBlock *SwitchBB) {
3008  SDLoc dl = getCurSDLoc();
3009
3010  // Subtract the minimum value.
3011  SDValue SwitchOp = getValue(B.SValue);
3012  EVT VT = SwitchOp.getValueType();
3013  SDValue RangeSub =
3014      DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT));
3015
3016  // Determine the type of the test operands.
3017  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3018  bool UsePtrType = false;
3019  if (!TLI.isTypeLegal(VT)) {
3020    UsePtrType = true;
3021  } else {
3022    for (unsigned i = 0, e = B.Cases.size(); i != e; ++i)
3023      if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) {
3024        // Switch table case range are encoded into series of masks.
3025        // Just use pointer type, it's guaranteed to fit.
3026        UsePtrType = true;
3027        break;
3028      }
3029  }
3030  SDValue Sub = RangeSub;
3031  if (UsePtrType) {
3032    VT = TLI.getPointerTy(DAG.getDataLayout());
3033    Sub = DAG.getZExtOrTrunc(Sub, dl, VT);
3034  }
3035
3036  B.RegVT = VT.getSimpleVT();
3037  B.Reg = FuncInfo.CreateReg(B.RegVT);
3038  SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub);
3039
3040  MachineBasicBlock* MBB = B.Cases[0].ThisBB;
3041
3042  if (!B.FallthroughUnreachable)
3043    addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
3044  addSuccessorWithProb(SwitchBB, MBB, B.Prob);
3045  SwitchBB->normalizeSuccProbs();
3046
3047  SDValue Root = CopyTo;
3048  if (!B.FallthroughUnreachable) {
3049    // Conditional branch to the default block.
3050    SDValue RangeCmp = DAG.getSetCC(dl,
3051        TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
3052                               RangeSub.getValueType()),
3053        RangeSub, DAG.getConstant(B.Range, dl, RangeSub.getValueType()),
3054        ISD::SETUGT);
3055
3056    Root = DAG.getNode(ISD::BRCOND, dl, MVT::Other, Root, RangeCmp,
3057                       DAG.getBasicBlock(B.Default));
3058  }
3059
3060  // Avoid emitting unnecessary branches to the next block.
3061  if (MBB != NextBlock(SwitchBB))
3062    Root = DAG.getNode(ISD::BR, dl, MVT::Other, Root, DAG.getBasicBlock(MBB));
3063
3064  DAG.setRoot(Root);
3065}
3066
3067/// visitBitTestCase - this function produces one "bit test"
3068void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB,
3069                                           MachineBasicBlock* NextMBB,
3070                                           BranchProbability BranchProbToNext,
3071                                           unsigned Reg,
3072                                           BitTestCase &B,
3073                                           MachineBasicBlock *SwitchBB) {
3074  SDLoc dl = getCurSDLoc();
3075  MVT VT = BB.RegVT;
3076  SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT);
3077  SDValue Cmp;
3078  unsigned PopCount = llvm::popcount(B.Mask);
3079  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3080  if (PopCount == 1) {
3081    // Testing for a single bit; just compare the shift count with what it
3082    // would need to be to shift a 1 bit in that position.
3083    Cmp = DAG.getSetCC(
3084        dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3085        ShiftOp, DAG.getConstant(llvm::countr_zero(B.Mask), dl, VT),
3086        ISD::SETEQ);
3087  } else if (PopCount == BB.Range) {
3088    // There is only one zero bit in the range, test for it directly.
3089    Cmp = DAG.getSetCC(
3090        dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3091        ShiftOp, DAG.getConstant(llvm::countr_one(B.Mask), dl, VT), ISD::SETNE);
3092  } else {
3093    // Make desired shift
3094    SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT,
3095                                    DAG.getConstant(1, dl, VT), ShiftOp);
3096
3097    // Emit bit tests and jumps
3098    SDValue AndOp = DAG.getNode(ISD::AND, dl,
3099                                VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT));
3100    Cmp = DAG.getSetCC(
3101        dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT),
3102        AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE);
3103  }
3104
3105  // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
3106  addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
3107  // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
3108  addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
3109  // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
3110  // one as they are relative probabilities (and thus work more like weights),
3111  // and hence we need to normalize them to let the sum of them become one.
3112  SwitchBB->normalizeSuccProbs();
3113
3114  SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl,
3115                              MVT::Other, getControlRoot(),
3116                              Cmp, DAG.getBasicBlock(B.TargetBB));
3117
3118  // Avoid emitting unnecessary branches to the next block.
3119  if (NextMBB != NextBlock(SwitchBB))
3120    BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd,
3121                        DAG.getBasicBlock(NextMBB));
3122
3123  DAG.setRoot(BrAnd);
3124}
3125
3126void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) {
3127  MachineBasicBlock *InvokeMBB = FuncInfo.MBB;
3128
3129  // Retrieve successors. Look through artificial IR level blocks like
3130  // catchswitch for successors.
3131  MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)];
3132  const BasicBlock *EHPadBB = I.getSuccessor(1);
3133  MachineBasicBlock *EHPadMBB = FuncInfo.MBBMap[EHPadBB];
3134
3135  // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3136  // have to do anything here to lower funclet bundles.
3137  assert(!I.hasOperandBundlesOtherThan(
3138             {LLVMContext::OB_deopt, LLVMContext::OB_gc_transition,
3139              LLVMContext::OB_gc_live, LLVMContext::OB_funclet,
3140              LLVMContext::OB_cfguardtarget,
3141              LLVMContext::OB_clang_arc_attachedcall}) &&
3142         "Cannot lower invokes with arbitrary operand bundles yet!");
3143
3144  const Value *Callee(I.getCalledOperand());
3145  const Function *Fn = dyn_cast<Function>(Callee);
3146  if (isa<InlineAsm>(Callee))
3147    visitInlineAsm(I, EHPadBB);
3148  else if (Fn && Fn->isIntrinsic()) {
3149    switch (Fn->getIntrinsicID()) {
3150    default:
3151      llvm_unreachable("Cannot invoke this intrinsic");
3152    case Intrinsic::donothing:
3153      // Ignore invokes to @llvm.donothing: jump directly to the next BB.
3154    case Intrinsic::seh_try_begin:
3155    case Intrinsic::seh_scope_begin:
3156    case Intrinsic::seh_try_end:
3157    case Intrinsic::seh_scope_end:
3158      if (EHPadMBB)
3159          // a block referenced by EH table
3160          // so dtor-funclet not removed by opts
3161          EHPadMBB->setMachineBlockAddressTaken();
3162      break;
3163    case Intrinsic::experimental_patchpoint_void:
3164    case Intrinsic::experimental_patchpoint_i64:
3165      visitPatchpoint(I, EHPadBB);
3166      break;
3167    case Intrinsic::experimental_gc_statepoint:
3168      LowerStatepoint(cast<GCStatepointInst>(I), EHPadBB);
3169      break;
3170    case Intrinsic::wasm_rethrow: {
3171      // This is usually done in visitTargetIntrinsic, but this intrinsic is
3172      // special because it can be invoked, so we manually lower it to a DAG
3173      // node here.
3174      SmallVector<SDValue, 8> Ops;
3175      Ops.push_back(getRoot()); // inchain
3176      const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3177      Ops.push_back(
3178          DAG.getTargetConstant(Intrinsic::wasm_rethrow, getCurSDLoc(),
3179                                TLI.getPointerTy(DAG.getDataLayout())));
3180      SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain
3181      DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops));
3182      break;
3183    }
3184    }
3185  } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) {
3186    // Currently we do not lower any intrinsic calls with deopt operand bundles.
3187    // Eventually we will support lowering the @llvm.experimental.deoptimize
3188    // intrinsic, and right now there are no plans to support other intrinsics
3189    // with deopt state.
3190    LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB);
3191  } else {
3192    LowerCallTo(I, getValue(Callee), false, false, EHPadBB);
3193  }
3194
3195  // If the value of the invoke is used outside of its defining block, make it
3196  // available as a virtual register.
3197  // We already took care of the exported value for the statepoint instruction
3198  // during call to the LowerStatepoint.
3199  if (!isa<GCStatepointInst>(I)) {
3200    CopyToExportRegsIfNeeded(&I);
3201  }
3202
3203  SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
3204  BranchProbabilityInfo *BPI = FuncInfo.BPI;
3205  BranchProbability EHPadBBProb =
3206      BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
3207          : BranchProbability::getZero();
3208  findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests);
3209
3210  // Update successor info.
3211  addSuccessorWithProb(InvokeMBB, Return);
3212  for (auto &UnwindDest : UnwindDests) {
3213    UnwindDest.first->setIsEHPad();
3214    addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
3215  }
3216  InvokeMBB->normalizeSuccProbs();
3217
3218  // Drop into normal successor.
3219  DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(),
3220                          DAG.getBasicBlock(Return)));
3221}
3222
3223void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) {
3224  MachineBasicBlock *CallBrMBB = FuncInfo.MBB;
3225
3226  // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
3227  // have to do anything here to lower funclet bundles.
3228  assert(!I.hasOperandBundlesOtherThan(
3229             {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) &&
3230         "Cannot lower callbrs with arbitrary operand bundles yet!");
3231
3232  assert(I.isInlineAsm() && "Only know how to handle inlineasm callbr");
3233  visitInlineAsm(I);
3234  CopyToExportRegsIfNeeded(&I);
3235
3236  // Retrieve successors.
3237  SmallPtrSet<BasicBlock *, 8> Dests;
3238  Dests.insert(I.getDefaultDest());
3239  MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()];
3240
3241  // Update successor info.
3242  addSuccessorWithProb(CallBrMBB, Return, BranchProbability::getOne());
3243  for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) {
3244    BasicBlock *Dest = I.getIndirectDest(i);
3245    MachineBasicBlock *Target = FuncInfo.MBBMap[Dest];
3246    Target->setIsInlineAsmBrIndirectTarget();
3247    Target->setMachineBlockAddressTaken();
3248    Target->setLabelMustBeEmitted();
3249    // Don't add duplicate machine successors.
3250    if (Dests.insert(Dest).second)
3251      addSuccessorWithProb(CallBrMBB, Target, BranchProbability::getZero());
3252  }
3253  CallBrMBB->normalizeSuccProbs();
3254
3255  // Drop into default successor.
3256  DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(),
3257                          MVT::Other, getControlRoot(),
3258                          DAG.getBasicBlock(Return)));
3259}
3260
3261void SelectionDAGBuilder::visitResume(const ResumeInst &RI) {
3262  llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!");
3263}
3264
3265void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
3266  assert(FuncInfo.MBB->isEHPad() &&
3267         "Call to landingpad not in landing pad!");
3268
3269  // If there aren't registers to copy the values into (e.g., during SjLj
3270  // exceptions), then don't bother to create these DAG nodes.
3271  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3272  const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn();
3273  if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
3274      TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
3275    return;
3276
3277  // If landingpad's return type is token type, we don't create DAG nodes
3278  // for its exception pointer and selector value. The extraction of exception
3279  // pointer or selector value from token type landingpads is not currently
3280  // supported.
3281  if (LP.getType()->isTokenTy())
3282    return;
3283
3284  SmallVector<EVT, 2> ValueVTs;
3285  SDLoc dl = getCurSDLoc();
3286  ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs);
3287  assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported");
3288
3289  // Get the two live-in registers as SDValues. The physregs have already been
3290  // copied into virtual registers.
3291  SDValue Ops[2];
3292  if (FuncInfo.ExceptionPointerVirtReg) {
3293    Ops[0] = DAG.getZExtOrTrunc(
3294        DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3295                           FuncInfo.ExceptionPointerVirtReg,
3296                           TLI.getPointerTy(DAG.getDataLayout())),
3297        dl, ValueVTs[0]);
3298  } else {
3299    Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout()));
3300  }
3301  Ops[1] = DAG.getZExtOrTrunc(
3302      DAG.getCopyFromReg(DAG.getEntryNode(), dl,
3303                         FuncInfo.ExceptionSelectorVirtReg,
3304                         TLI.getPointerTy(DAG.getDataLayout())),
3305      dl, ValueVTs[1]);
3306
3307  // Merge into one.
3308  SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl,
3309                            DAG.getVTList(ValueVTs), Ops);
3310  setValue(&LP, Res);
3311}
3312
3313void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First,
3314                                           MachineBasicBlock *Last) {
3315  // Update JTCases.
3316  for (JumpTableBlock &JTB : SL->JTCases)
3317    if (JTB.first.HeaderBB == First)
3318      JTB.first.HeaderBB = Last;
3319
3320  // Update BitTestCases.
3321  for (BitTestBlock &BTB : SL->BitTestCases)
3322    if (BTB.Parent == First)
3323      BTB.Parent = Last;
3324}
3325
3326void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) {
3327  MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB;
3328
3329  // Update machine-CFG edges with unique successors.
3330  SmallSet<BasicBlock*, 32> Done;
3331  for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) {
3332    BasicBlock *BB = I.getSuccessor(i);
3333    bool Inserted = Done.insert(BB).second;
3334    if (!Inserted)
3335        continue;
3336
3337    MachineBasicBlock *Succ = FuncInfo.MBBMap[BB];
3338    addSuccessorWithProb(IndirectBrMBB, Succ);
3339  }
3340  IndirectBrMBB->normalizeSuccProbs();
3341
3342  DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(),
3343                          MVT::Other, getControlRoot(),
3344                          getValue(I.getAddress())));
3345}
3346
3347void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) {
3348  if (!DAG.getTarget().Options.TrapUnreachable)
3349    return;
3350
3351  // We may be able to ignore unreachable behind a noreturn call.
3352  if (DAG.getTarget().Options.NoTrapAfterNoreturn) {
3353    if (const CallInst *Call = dyn_cast_or_null<CallInst>(I.getPrevNode())) {
3354      if (Call->doesNotReturn())
3355        return;
3356    }
3357  }
3358
3359  DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot()));
3360}
3361
3362void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) {
3363  SDNodeFlags Flags;
3364  if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3365    Flags.copyFMF(*FPOp);
3366
3367  SDValue Op = getValue(I.getOperand(0));
3368  SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(),
3369                                    Op, Flags);
3370  setValue(&I, UnNodeValue);
3371}
3372
3373void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) {
3374  SDNodeFlags Flags;
3375  if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) {
3376    Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap());
3377    Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap());
3378  }
3379  if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I))
3380    Flags.setExact(ExactOp->isExact());
3381  if (auto *DisjointOp = dyn_cast<PossiblyDisjointInst>(&I))
3382    Flags.setDisjoint(DisjointOp->isDisjoint());
3383  if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3384    Flags.copyFMF(*FPOp);
3385
3386  SDValue Op1 = getValue(I.getOperand(0));
3387  SDValue Op2 = getValue(I.getOperand(1));
3388  SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(),
3389                                     Op1, Op2, Flags);
3390  setValue(&I, BinNodeValue);
3391}
3392
3393void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) {
3394  SDValue Op1 = getValue(I.getOperand(0));
3395  SDValue Op2 = getValue(I.getOperand(1));
3396
3397  EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy(
3398      Op1.getValueType(), DAG.getDataLayout());
3399
3400  // Coerce the shift amount to the right type if we can. This exposes the
3401  // truncate or zext to optimization early.
3402  if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) {
3403    assert(ShiftTy.getSizeInBits() >= Log2_32_Ceil(Op1.getValueSizeInBits()) &&
3404           "Unexpected shift type");
3405    Op2 = DAG.getZExtOrTrunc(Op2, getCurSDLoc(), ShiftTy);
3406  }
3407
3408  bool nuw = false;
3409  bool nsw = false;
3410  bool exact = false;
3411
3412  if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) {
3413
3414    if (const OverflowingBinaryOperator *OFBinOp =
3415            dyn_cast<const OverflowingBinaryOperator>(&I)) {
3416      nuw = OFBinOp->hasNoUnsignedWrap();
3417      nsw = OFBinOp->hasNoSignedWrap();
3418    }
3419    if (const PossiblyExactOperator *ExactOp =
3420            dyn_cast<const PossiblyExactOperator>(&I))
3421      exact = ExactOp->isExact();
3422  }
3423  SDNodeFlags Flags;
3424  Flags.setExact(exact);
3425  Flags.setNoSignedWrap(nsw);
3426  Flags.setNoUnsignedWrap(nuw);
3427  SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2,
3428                            Flags);
3429  setValue(&I, Res);
3430}
3431
3432void SelectionDAGBuilder::visitSDiv(const User &I) {
3433  SDValue Op1 = getValue(I.getOperand(0));
3434  SDValue Op2 = getValue(I.getOperand(1));
3435
3436  SDNodeFlags Flags;
3437  Flags.setExact(isa<PossiblyExactOperator>(&I) &&
3438                 cast<PossiblyExactOperator>(&I)->isExact());
3439  setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1,
3440                           Op2, Flags));
3441}
3442
3443void SelectionDAGBuilder::visitICmp(const User &I) {
3444  ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE;
3445  if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I))
3446    predicate = IC->getPredicate();
3447  else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I))
3448    predicate = ICmpInst::Predicate(IC->getPredicate());
3449  SDValue Op1 = getValue(I.getOperand(0));
3450  SDValue Op2 = getValue(I.getOperand(1));
3451  ISD::CondCode Opcode = getICmpCondCode(predicate);
3452
3453  auto &TLI = DAG.getTargetLoweringInfo();
3454  EVT MemVT =
3455      TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3456
3457  // If a pointer's DAG type is larger than its memory type then the DAG values
3458  // are zero-extended. This breaks signed comparisons so truncate back to the
3459  // underlying type before doing the compare.
3460  if (Op1.getValueType() != MemVT) {
3461    Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT);
3462    Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT);
3463  }
3464
3465  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3466                                                        I.getType());
3467  setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode));
3468}
3469
3470void SelectionDAGBuilder::visitFCmp(const User &I) {
3471  FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE;
3472  if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I))
3473    predicate = FC->getPredicate();
3474  else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I))
3475    predicate = FCmpInst::Predicate(FC->getPredicate());
3476  SDValue Op1 = getValue(I.getOperand(0));
3477  SDValue Op2 = getValue(I.getOperand(1));
3478
3479  ISD::CondCode Condition = getFCmpCondCode(predicate);
3480  auto *FPMO = cast<FPMathOperator>(&I);
3481  if (FPMO->hasNoNaNs() || TM.Options.NoNaNsFPMath)
3482    Condition = getFCmpCodeWithoutNaN(Condition);
3483
3484  SDNodeFlags Flags;
3485  Flags.copyFMF(*FPMO);
3486  SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
3487
3488  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3489                                                        I.getType());
3490  setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition));
3491}
3492
3493// Check if the condition of the select has one use or two users that are both
3494// selects with the same condition.
3495static bool hasOnlySelectUsers(const Value *Cond) {
3496  return llvm::all_of(Cond->users(), [](const Value *V) {
3497    return isa<SelectInst>(V);
3498  });
3499}
3500
3501void SelectionDAGBuilder::visitSelect(const User &I) {
3502  SmallVector<EVT, 4> ValueVTs;
3503  ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
3504                  ValueVTs);
3505  unsigned NumValues = ValueVTs.size();
3506  if (NumValues == 0) return;
3507
3508  SmallVector<SDValue, 4> Values(NumValues);
3509  SDValue Cond     = getValue(I.getOperand(0));
3510  SDValue LHSVal   = getValue(I.getOperand(1));
3511  SDValue RHSVal   = getValue(I.getOperand(2));
3512  SmallVector<SDValue, 1> BaseOps(1, Cond);
3513  ISD::NodeType OpCode =
3514      Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT;
3515
3516  bool IsUnaryAbs = false;
3517  bool Negate = false;
3518
3519  SDNodeFlags Flags;
3520  if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
3521    Flags.copyFMF(*FPOp);
3522
3523  Flags.setUnpredictable(
3524      cast<SelectInst>(I).getMetadata(LLVMContext::MD_unpredictable));
3525
3526  // Min/max matching is only viable if all output VTs are the same.
3527  if (all_equal(ValueVTs)) {
3528    EVT VT = ValueVTs[0];
3529    LLVMContext &Ctx = *DAG.getContext();
3530    auto &TLI = DAG.getTargetLoweringInfo();
3531
3532    // We care about the legality of the operation after it has been type
3533    // legalized.
3534    while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal)
3535      VT = TLI.getTypeToTransformTo(Ctx, VT);
3536
3537    // If the vselect is legal, assume we want to leave this as a vector setcc +
3538    // vselect. Otherwise, if this is going to be scalarized, we want to see if
3539    // min/max is legal on the scalar type.
3540    bool UseScalarMinMax = VT.isVector() &&
3541      !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT);
3542
3543    // ValueTracking's select pattern matching does not account for -0.0,
3544    // so we can't lower to FMINIMUM/FMAXIMUM because those nodes specify that
3545    // -0.0 is less than +0.0.
3546    Value *LHS, *RHS;
3547    auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS);
3548    ISD::NodeType Opc = ISD::DELETED_NODE;
3549    switch (SPR.Flavor) {
3550    case SPF_UMAX:    Opc = ISD::UMAX; break;
3551    case SPF_UMIN:    Opc = ISD::UMIN; break;
3552    case SPF_SMAX:    Opc = ISD::SMAX; break;
3553    case SPF_SMIN:    Opc = ISD::SMIN; break;
3554    case SPF_FMINNUM:
3555      switch (SPR.NaNBehavior) {
3556      case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3557      case SPNB_RETURNS_NAN: break;
3558      case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break;
3559      case SPNB_RETURNS_ANY:
3560        if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT) ||
3561            (UseScalarMinMax &&
3562             TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType())))
3563          Opc = ISD::FMINNUM;
3564        break;
3565      }
3566      break;
3567    case SPF_FMAXNUM:
3568      switch (SPR.NaNBehavior) {
3569      case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?");
3570      case SPNB_RETURNS_NAN: break;
3571      case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break;
3572      case SPNB_RETURNS_ANY:
3573        if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT) ||
3574            (UseScalarMinMax &&
3575             TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType())))
3576          Opc = ISD::FMAXNUM;
3577        break;
3578      }
3579      break;
3580    case SPF_NABS:
3581      Negate = true;
3582      [[fallthrough]];
3583    case SPF_ABS:
3584      IsUnaryAbs = true;
3585      Opc = ISD::ABS;
3586      break;
3587    default: break;
3588    }
3589
3590    if (!IsUnaryAbs && Opc != ISD::DELETED_NODE &&
3591        (TLI.isOperationLegalOrCustomOrPromote(Opc, VT) ||
3592         (UseScalarMinMax &&
3593          TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) &&
3594        // If the underlying comparison instruction is used by any other
3595        // instruction, the consumed instructions won't be destroyed, so it is
3596        // not profitable to convert to a min/max.
3597        hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) {
3598      OpCode = Opc;
3599      LHSVal = getValue(LHS);
3600      RHSVal = getValue(RHS);
3601      BaseOps.clear();
3602    }
3603
3604    if (IsUnaryAbs) {
3605      OpCode = Opc;
3606      LHSVal = getValue(LHS);
3607      BaseOps.clear();
3608    }
3609  }
3610
3611  if (IsUnaryAbs) {
3612    for (unsigned i = 0; i != NumValues; ++i) {
3613      SDLoc dl = getCurSDLoc();
3614      EVT VT = LHSVal.getNode()->getValueType(LHSVal.getResNo() + i);
3615      Values[i] =
3616          DAG.getNode(OpCode, dl, VT, LHSVal.getValue(LHSVal.getResNo() + i));
3617      if (Negate)
3618        Values[i] = DAG.getNegative(Values[i], dl, VT);
3619    }
3620  } else {
3621    for (unsigned i = 0; i != NumValues; ++i) {
3622      SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end());
3623      Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i));
3624      Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i));
3625      Values[i] = DAG.getNode(
3626          OpCode, getCurSDLoc(),
3627          LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops, Flags);
3628    }
3629  }
3630
3631  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
3632                           DAG.getVTList(ValueVTs), Values));
3633}
3634
3635void SelectionDAGBuilder::visitTrunc(const User &I) {
3636  // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest).
3637  SDValue N = getValue(I.getOperand(0));
3638  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3639                                                        I.getType());
3640  setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N));
3641}
3642
3643void SelectionDAGBuilder::visitZExt(const User &I) {
3644  // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3645  // ZExt also can't be a cast to bool for same reason. So, nothing much to do
3646  SDValue N = getValue(I.getOperand(0));
3647  auto &TLI = DAG.getTargetLoweringInfo();
3648  EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3649
3650  SDNodeFlags Flags;
3651  if (auto *PNI = dyn_cast<PossiblyNonNegInst>(&I))
3652    Flags.setNonNeg(PNI->hasNonNeg());
3653
3654  // Eagerly use nonneg information to canonicalize towards sign_extend if
3655  // that is the target's preference.
3656  // TODO: Let the target do this later.
3657  if (Flags.hasNonNeg() &&
3658      TLI.isSExtCheaperThanZExt(N.getValueType(), DestVT)) {
3659    setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3660    return;
3661  }
3662
3663  setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N, Flags));
3664}
3665
3666void SelectionDAGBuilder::visitSExt(const User &I) {
3667  // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest).
3668  // SExt also can't be a cast to bool for same reason. So, nothing much to do
3669  SDValue N = getValue(I.getOperand(0));
3670  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3671                                                        I.getType());
3672  setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N));
3673}
3674
3675void SelectionDAGBuilder::visitFPTrunc(const User &I) {
3676  // FPTrunc is never a no-op cast, no need to check
3677  SDValue N = getValue(I.getOperand(0));
3678  SDLoc dl = getCurSDLoc();
3679  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3680  EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3681  setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N,
3682                           DAG.getTargetConstant(
3683                               0, dl, TLI.getPointerTy(DAG.getDataLayout()))));
3684}
3685
3686void SelectionDAGBuilder::visitFPExt(const User &I) {
3687  // FPExt is never a no-op cast, no need to check
3688  SDValue N = getValue(I.getOperand(0));
3689  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3690                                                        I.getType());
3691  setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N));
3692}
3693
3694void SelectionDAGBuilder::visitFPToUI(const User &I) {
3695  // FPToUI is never a no-op cast, no need to check
3696  SDValue N = getValue(I.getOperand(0));
3697  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3698                                                        I.getType());
3699  setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N));
3700}
3701
3702void SelectionDAGBuilder::visitFPToSI(const User &I) {
3703  // FPToSI is never a no-op cast, no need to check
3704  SDValue N = getValue(I.getOperand(0));
3705  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3706                                                        I.getType());
3707  setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N));
3708}
3709
3710void SelectionDAGBuilder::visitUIToFP(const User &I) {
3711  // UIToFP is never a no-op cast, no need to check
3712  SDValue N = getValue(I.getOperand(0));
3713  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3714                                                        I.getType());
3715  setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N));
3716}
3717
3718void SelectionDAGBuilder::visitSIToFP(const User &I) {
3719  // SIToFP is never a no-op cast, no need to check
3720  SDValue N = getValue(I.getOperand(0));
3721  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3722                                                        I.getType());
3723  setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N));
3724}
3725
3726void SelectionDAGBuilder::visitPtrToInt(const User &I) {
3727  // What to do depends on the size of the integer and the size of the pointer.
3728  // We can either truncate, zero extend, or no-op, accordingly.
3729  SDValue N = getValue(I.getOperand(0));
3730  auto &TLI = DAG.getTargetLoweringInfo();
3731  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3732                                                        I.getType());
3733  EVT PtrMemVT =
3734      TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType());
3735  N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3736  N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT);
3737  setValue(&I, N);
3738}
3739
3740void SelectionDAGBuilder::visitIntToPtr(const User &I) {
3741  // What to do depends on the size of the integer and the size of the pointer.
3742  // We can either truncate, zero extend, or no-op, accordingly.
3743  SDValue N = getValue(I.getOperand(0));
3744  auto &TLI = DAG.getTargetLoweringInfo();
3745  EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3746  EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
3747  N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT);
3748  N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT);
3749  setValue(&I, N);
3750}
3751
3752void SelectionDAGBuilder::visitBitCast(const User &I) {
3753  SDValue N = getValue(I.getOperand(0));
3754  SDLoc dl = getCurSDLoc();
3755  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
3756                                                        I.getType());
3757
3758  // BitCast assures us that source and destination are the same size so this is
3759  // either a BITCAST or a no-op.
3760  if (DestVT != N.getValueType())
3761    setValue(&I, DAG.getNode(ISD::BITCAST, dl,
3762                             DestVT, N)); // convert types.
3763  // Check if the original LLVM IR Operand was a ConstantInt, because getValue()
3764  // might fold any kind of constant expression to an integer constant and that
3765  // is not what we are looking for. Only recognize a bitcast of a genuine
3766  // constant integer as an opaque constant.
3767  else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0)))
3768    setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false,
3769                                 /*isOpaque*/true));
3770  else
3771    setValue(&I, N);            // noop cast.
3772}
3773
3774void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) {
3775  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3776  const Value *SV = I.getOperand(0);
3777  SDValue N = getValue(SV);
3778  EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3779
3780  unsigned SrcAS = SV->getType()->getPointerAddressSpace();
3781  unsigned DestAS = I.getType()->getPointerAddressSpace();
3782
3783  if (!TM.isNoopAddrSpaceCast(SrcAS, DestAS))
3784    N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS);
3785
3786  setValue(&I, N);
3787}
3788
3789void SelectionDAGBuilder::visitInsertElement(const User &I) {
3790  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3791  SDValue InVec = getValue(I.getOperand(0));
3792  SDValue InVal = getValue(I.getOperand(1));
3793  SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(),
3794                                     TLI.getVectorIdxTy(DAG.getDataLayout()));
3795  setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(),
3796                           TLI.getValueType(DAG.getDataLayout(), I.getType()),
3797                           InVec, InVal, InIdx));
3798}
3799
3800void SelectionDAGBuilder::visitExtractElement(const User &I) {
3801  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3802  SDValue InVec = getValue(I.getOperand(0));
3803  SDValue InIdx = DAG.getZExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(),
3804                                     TLI.getVectorIdxTy(DAG.getDataLayout()));
3805  setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(),
3806                           TLI.getValueType(DAG.getDataLayout(), I.getType()),
3807                           InVec, InIdx));
3808}
3809
3810void SelectionDAGBuilder::visitShuffleVector(const User &I) {
3811  SDValue Src1 = getValue(I.getOperand(0));
3812  SDValue Src2 = getValue(I.getOperand(1));
3813  ArrayRef<int> Mask;
3814  if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
3815    Mask = SVI->getShuffleMask();
3816  else
3817    Mask = cast<ConstantExpr>(I).getShuffleMask();
3818  SDLoc DL = getCurSDLoc();
3819  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3820  EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
3821  EVT SrcVT = Src1.getValueType();
3822
3823  if (all_of(Mask, [](int Elem) { return Elem == 0; }) &&
3824      VT.isScalableVector()) {
3825    // Canonical splat form of first element of first input vector.
3826    SDValue FirstElt =
3827        DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT.getScalarType(), Src1,
3828                    DAG.getVectorIdxConstant(0, DL));
3829    setValue(&I, DAG.getNode(ISD::SPLAT_VECTOR, DL, VT, FirstElt));
3830    return;
3831  }
3832
3833  // For now, we only handle splats for scalable vectors.
3834  // The DAGCombiner will perform a BUILD_VECTOR -> SPLAT_VECTOR transformation
3835  // for targets that support a SPLAT_VECTOR for non-scalable vector types.
3836  assert(!VT.isScalableVector() && "Unsupported scalable vector shuffle");
3837
3838  unsigned SrcNumElts = SrcVT.getVectorNumElements();
3839  unsigned MaskNumElts = Mask.size();
3840
3841  if (SrcNumElts == MaskNumElts) {
3842    setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask));
3843    return;
3844  }
3845
3846  // Normalize the shuffle vector since mask and vector length don't match.
3847  if (SrcNumElts < MaskNumElts) {
3848    // Mask is longer than the source vectors. We can use concatenate vector to
3849    // make the mask and vectors lengths match.
3850
3851    if (MaskNumElts % SrcNumElts == 0) {
3852      // Mask length is a multiple of the source vector length.
3853      // Check if the shuffle is some kind of concatenation of the input
3854      // vectors.
3855      unsigned NumConcat = MaskNumElts / SrcNumElts;
3856      bool IsConcat = true;
3857      SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
3858      for (unsigned i = 0; i != MaskNumElts; ++i) {
3859        int Idx = Mask[i];
3860        if (Idx < 0)
3861          continue;
3862        // Ensure the indices in each SrcVT sized piece are sequential and that
3863        // the same source is used for the whole piece.
3864        if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
3865            (ConcatSrcs[i / SrcNumElts] >= 0 &&
3866             ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) {
3867          IsConcat = false;
3868          break;
3869        }
3870        // Remember which source this index came from.
3871        ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
3872      }
3873
3874      // The shuffle is concatenating multiple vectors together. Just emit
3875      // a CONCAT_VECTORS operation.
3876      if (IsConcat) {
3877        SmallVector<SDValue, 8> ConcatOps;
3878        for (auto Src : ConcatSrcs) {
3879          if (Src < 0)
3880            ConcatOps.push_back(DAG.getUNDEF(SrcVT));
3881          else if (Src == 0)
3882            ConcatOps.push_back(Src1);
3883          else
3884            ConcatOps.push_back(Src2);
3885        }
3886        setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps));
3887        return;
3888      }
3889    }
3890
3891    unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts);
3892    unsigned NumConcat = PaddedMaskNumElts / SrcNumElts;
3893    EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(),
3894                                    PaddedMaskNumElts);
3895
3896    // Pad both vectors with undefs to make them the same length as the mask.
3897    SDValue UndefVal = DAG.getUNDEF(SrcVT);
3898
3899    SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal);
3900    SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal);
3901    MOps1[0] = Src1;
3902    MOps2[0] = Src2;
3903
3904    Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1);
3905    Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2);
3906
3907    // Readjust mask for new input vector length.
3908    SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1);
3909    for (unsigned i = 0; i != MaskNumElts; ++i) {
3910      int Idx = Mask[i];
3911      if (Idx >= (int)SrcNumElts)
3912        Idx -= SrcNumElts - PaddedMaskNumElts;
3913      MappedOps[i] = Idx;
3914    }
3915
3916    SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps);
3917
3918    // If the concatenated vector was padded, extract a subvector with the
3919    // correct number of elements.
3920    if (MaskNumElts != PaddedMaskNumElts)
3921      Result = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Result,
3922                           DAG.getVectorIdxConstant(0, DL));
3923
3924    setValue(&I, Result);
3925    return;
3926  }
3927
3928  if (SrcNumElts > MaskNumElts) {
3929    // Analyze the access pattern of the vector to see if we can extract
3930    // two subvectors and do the shuffle.
3931    int StartIdx[2] = { -1, -1 };  // StartIdx to extract from
3932    bool CanExtract = true;
3933    for (int Idx : Mask) {
3934      unsigned Input = 0;
3935      if (Idx < 0)
3936        continue;
3937
3938      if (Idx >= (int)SrcNumElts) {
3939        Input = 1;
3940        Idx -= SrcNumElts;
3941      }
3942
3943      // If all the indices come from the same MaskNumElts sized portion of
3944      // the sources we can use extract. Also make sure the extract wouldn't
3945      // extract past the end of the source.
3946      int NewStartIdx = alignDown(Idx, MaskNumElts);
3947      if (NewStartIdx + MaskNumElts > SrcNumElts ||
3948          (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx))
3949        CanExtract = false;
3950      // Make sure we always update StartIdx as we use it to track if all
3951      // elements are undef.
3952      StartIdx[Input] = NewStartIdx;
3953    }
3954
3955    if (StartIdx[0] < 0 && StartIdx[1] < 0) {
3956      setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used.
3957      return;
3958    }
3959    if (CanExtract) {
3960      // Extract appropriate subvector and generate a vector shuffle
3961      for (unsigned Input = 0; Input < 2; ++Input) {
3962        SDValue &Src = Input == 0 ? Src1 : Src2;
3963        if (StartIdx[Input] < 0)
3964          Src = DAG.getUNDEF(VT);
3965        else {
3966          Src = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src,
3967                            DAG.getVectorIdxConstant(StartIdx[Input], DL));
3968        }
3969      }
3970
3971      // Calculate new mask.
3972      SmallVector<int, 8> MappedOps(Mask);
3973      for (int &Idx : MappedOps) {
3974        if (Idx >= (int)SrcNumElts)
3975          Idx -= SrcNumElts + StartIdx[1] - MaskNumElts;
3976        else if (Idx >= 0)
3977          Idx -= StartIdx[0];
3978      }
3979
3980      setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps));
3981      return;
3982    }
3983  }
3984
3985  // We can't use either concat vectors or extract subvectors so fall back to
3986  // replacing the shuffle with extract and build vector.
3987  // to insert and build vector.
3988  EVT EltVT = VT.getVectorElementType();
3989  SmallVector<SDValue,8> Ops;
3990  for (int Idx : Mask) {
3991    SDValue Res;
3992
3993    if (Idx < 0) {
3994      Res = DAG.getUNDEF(EltVT);
3995    } else {
3996      SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2;
3997      if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts;
3998
3999      Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src,
4000                        DAG.getVectorIdxConstant(Idx, DL));
4001    }
4002
4003    Ops.push_back(Res);
4004  }
4005
4006  setValue(&I, DAG.getBuildVector(VT, DL, Ops));
4007}
4008
4009void SelectionDAGBuilder::visitInsertValue(const InsertValueInst &I) {
4010  ArrayRef<unsigned> Indices = I.getIndices();
4011  const Value *Op0 = I.getOperand(0);
4012  const Value *Op1 = I.getOperand(1);
4013  Type *AggTy = I.getType();
4014  Type *ValTy = Op1->getType();
4015  bool IntoUndef = isa<UndefValue>(Op0);
4016  bool FromUndef = isa<UndefValue>(Op1);
4017
4018  unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4019
4020  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4021  SmallVector<EVT, 4> AggValueVTs;
4022  ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs);
4023  SmallVector<EVT, 4> ValValueVTs;
4024  ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4025
4026  unsigned NumAggValues = AggValueVTs.size();
4027  unsigned NumValValues = ValValueVTs.size();
4028  SmallVector<SDValue, 4> Values(NumAggValues);
4029
4030  // Ignore an insertvalue that produces an empty object
4031  if (!NumAggValues) {
4032    setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4033    return;
4034  }
4035
4036  SDValue Agg = getValue(Op0);
4037  unsigned i = 0;
4038  // Copy the beginning value(s) from the original aggregate.
4039  for (; i != LinearIndex; ++i)
4040    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4041                SDValue(Agg.getNode(), Agg.getResNo() + i);
4042  // Copy values from the inserted value(s).
4043  if (NumValValues) {
4044    SDValue Val = getValue(Op1);
4045    for (; i != LinearIndex + NumValValues; ++i)
4046      Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4047                  SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex);
4048  }
4049  // Copy remaining value(s) from the original aggregate.
4050  for (; i != NumAggValues; ++i)
4051    Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) :
4052                SDValue(Agg.getNode(), Agg.getResNo() + i);
4053
4054  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4055                           DAG.getVTList(AggValueVTs), Values));
4056}
4057
4058void SelectionDAGBuilder::visitExtractValue(const ExtractValueInst &I) {
4059  ArrayRef<unsigned> Indices = I.getIndices();
4060  const Value *Op0 = I.getOperand(0);
4061  Type *AggTy = Op0->getType();
4062  Type *ValTy = I.getType();
4063  bool OutOfUndef = isa<UndefValue>(Op0);
4064
4065  unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices);
4066
4067  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4068  SmallVector<EVT, 4> ValValueVTs;
4069  ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs);
4070
4071  unsigned NumValValues = ValValueVTs.size();
4072
4073  // Ignore a extractvalue that produces an empty object
4074  if (!NumValValues) {
4075    setValue(&I, DAG.getUNDEF(MVT(MVT::Other)));
4076    return;
4077  }
4078
4079  SmallVector<SDValue, 4> Values(NumValValues);
4080
4081  SDValue Agg = getValue(Op0);
4082  // Copy out the selected value(s).
4083  for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i)
4084    Values[i - LinearIndex] =
4085      OutOfUndef ?
4086        DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) :
4087        SDValue(Agg.getNode(), Agg.getResNo() + i);
4088
4089  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
4090                           DAG.getVTList(ValValueVTs), Values));
4091}
4092
4093void SelectionDAGBuilder::visitGetElementPtr(const User &I) {
4094  Value *Op0 = I.getOperand(0);
4095  // Note that the pointer operand may be a vector of pointers. Take the scalar
4096  // element which holds a pointer.
4097  unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace();
4098  SDValue N = getValue(Op0);
4099  SDLoc dl = getCurSDLoc();
4100  auto &TLI = DAG.getTargetLoweringInfo();
4101
4102  // Normalize Vector GEP - all scalar operands should be converted to the
4103  // splat vector.
4104  bool IsVectorGEP = I.getType()->isVectorTy();
4105  ElementCount VectorElementCount =
4106      IsVectorGEP ? cast<VectorType>(I.getType())->getElementCount()
4107                  : ElementCount::getFixed(0);
4108
4109  if (IsVectorGEP && !N.getValueType().isVector()) {
4110    LLVMContext &Context = *DAG.getContext();
4111    EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorElementCount);
4112    N = DAG.getSplat(VT, dl, N);
4113  }
4114
4115  for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I);
4116       GTI != E; ++GTI) {
4117    const Value *Idx = GTI.getOperand();
4118    if (StructType *StTy = GTI.getStructTypeOrNull()) {
4119      unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
4120      if (Field) {
4121        // N = N + Offset
4122        uint64_t Offset =
4123            DAG.getDataLayout().getStructLayout(StTy)->getElementOffset(Field);
4124
4125        // In an inbounds GEP with an offset that is nonnegative even when
4126        // interpreted as signed, assume there is no unsigned overflow.
4127        SDNodeFlags Flags;
4128        if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds())
4129          Flags.setNoUnsignedWrap(true);
4130
4131        N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N,
4132                        DAG.getConstant(Offset, dl, N.getValueType()), Flags);
4133      }
4134    } else {
4135      // IdxSize is the width of the arithmetic according to IR semantics.
4136      // In SelectionDAG, we may prefer to do arithmetic in a wider bitwidth
4137      // (and fix up the result later).
4138      unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS);
4139      MVT IdxTy = MVT::getIntegerVT(IdxSize);
4140      TypeSize ElementSize =
4141          GTI.getSequentialElementStride(DAG.getDataLayout());
4142      // We intentionally mask away the high bits here; ElementSize may not
4143      // fit in IdxTy.
4144      APInt ElementMul(IdxSize, ElementSize.getKnownMinValue());
4145      bool ElementScalable = ElementSize.isScalable();
4146
4147      // If this is a scalar constant or a splat vector of constants,
4148      // handle it quickly.
4149      const auto *C = dyn_cast<Constant>(Idx);
4150      if (C && isa<VectorType>(C->getType()))
4151        C = C->getSplatValue();
4152
4153      const auto *CI = dyn_cast_or_null<ConstantInt>(C);
4154      if (CI && CI->isZero())
4155        continue;
4156      if (CI && !ElementScalable) {
4157        APInt Offs = ElementMul * CI->getValue().sextOrTrunc(IdxSize);
4158        LLVMContext &Context = *DAG.getContext();
4159        SDValue OffsVal;
4160        if (IsVectorGEP)
4161          OffsVal = DAG.getConstant(
4162              Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorElementCount));
4163        else
4164          OffsVal = DAG.getConstant(Offs, dl, IdxTy);
4165
4166        // In an inbounds GEP with an offset that is nonnegative even when
4167        // interpreted as signed, assume there is no unsigned overflow.
4168        SDNodeFlags Flags;
4169        if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds())
4170          Flags.setNoUnsignedWrap(true);
4171
4172        OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType());
4173
4174        N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags);
4175        continue;
4176      }
4177
4178      // N = N + Idx * ElementMul;
4179      SDValue IdxN = getValue(Idx);
4180
4181      if (!IdxN.getValueType().isVector() && IsVectorGEP) {
4182        EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(),
4183                                  VectorElementCount);
4184        IdxN = DAG.getSplat(VT, dl, IdxN);
4185      }
4186
4187      // If the index is smaller or larger than intptr_t, truncate or extend
4188      // it.
4189      IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType());
4190
4191      if (ElementScalable) {
4192        EVT VScaleTy = N.getValueType().getScalarType();
4193        SDValue VScale = DAG.getNode(
4194            ISD::VSCALE, dl, VScaleTy,
4195            DAG.getConstant(ElementMul.getZExtValue(), dl, VScaleTy));
4196        if (IsVectorGEP)
4197          VScale = DAG.getSplatVector(N.getValueType(), dl, VScale);
4198        IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, VScale);
4199      } else {
4200        // If this is a multiply by a power of two, turn it into a shl
4201        // immediately.  This is a very common case.
4202        if (ElementMul != 1) {
4203          if (ElementMul.isPowerOf2()) {
4204            unsigned Amt = ElementMul.logBase2();
4205            IdxN = DAG.getNode(ISD::SHL, dl,
4206                               N.getValueType(), IdxN,
4207                               DAG.getConstant(Amt, dl, IdxN.getValueType()));
4208          } else {
4209            SDValue Scale = DAG.getConstant(ElementMul.getZExtValue(), dl,
4210                                            IdxN.getValueType());
4211            IdxN = DAG.getNode(ISD::MUL, dl,
4212                               N.getValueType(), IdxN, Scale);
4213          }
4214        }
4215      }
4216
4217      N = DAG.getNode(ISD::ADD, dl,
4218                      N.getValueType(), N, IdxN);
4219    }
4220  }
4221
4222  MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS);
4223  MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS);
4224  if (IsVectorGEP) {
4225    PtrTy = MVT::getVectorVT(PtrTy, VectorElementCount);
4226    PtrMemTy = MVT::getVectorVT(PtrMemTy, VectorElementCount);
4227  }
4228
4229  if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds())
4230    N = DAG.getPtrExtendInReg(N, dl, PtrMemTy);
4231
4232  setValue(&I, N);
4233}
4234
4235void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
4236  // If this is a fixed sized alloca in the entry block of the function,
4237  // allocate it statically on the stack.
4238  if (FuncInfo.StaticAllocaMap.count(&I))
4239    return;   // getValue will auto-populate this.
4240
4241  SDLoc dl = getCurSDLoc();
4242  Type *Ty = I.getAllocatedType();
4243  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4244  auto &DL = DAG.getDataLayout();
4245  TypeSize TySize = DL.getTypeAllocSize(Ty);
4246  MaybeAlign Alignment = std::max(DL.getPrefTypeAlign(Ty), I.getAlign());
4247
4248  SDValue AllocSize = getValue(I.getArraySize());
4249
4250  EVT IntPtr = TLI.getPointerTy(DL, I.getAddressSpace());
4251  if (AllocSize.getValueType() != IntPtr)
4252    AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr);
4253
4254  if (TySize.isScalable())
4255    AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4256                            DAG.getVScale(dl, IntPtr,
4257                                          APInt(IntPtr.getScalarSizeInBits(),
4258                                                TySize.getKnownMinValue())));
4259  else {
4260    SDValue TySizeValue =
4261        DAG.getConstant(TySize.getFixedValue(), dl, MVT::getIntegerVT(64));
4262    AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize,
4263                            DAG.getZExtOrTrunc(TySizeValue, dl, IntPtr));
4264  }
4265
4266  // Handle alignment.  If the requested alignment is less than or equal to
4267  // the stack alignment, ignore it.  If the size is greater than or equal to
4268  // the stack alignment, we note this in the DYNAMIC_STACKALLOC node.
4269  Align StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlign();
4270  if (*Alignment <= StackAlign)
4271    Alignment = std::nullopt;
4272
4273  const uint64_t StackAlignMask = StackAlign.value() - 1U;
4274  // Round the size of the allocation up to the stack alignment size
4275  // by add SA-1 to the size. This doesn't overflow because we're computing
4276  // an address inside an alloca.
4277  SDNodeFlags Flags;
4278  Flags.setNoUnsignedWrap(true);
4279  AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize,
4280                          DAG.getConstant(StackAlignMask, dl, IntPtr), Flags);
4281
4282  // Mask out the low bits for alignment purposes.
4283  AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize,
4284                          DAG.getConstant(~StackAlignMask, dl, IntPtr));
4285
4286  SDValue Ops[] = {
4287      getRoot(), AllocSize,
4288      DAG.getConstant(Alignment ? Alignment->value() : 0, dl, IntPtr)};
4289  SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other);
4290  SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops);
4291  setValue(&I, DSA);
4292  DAG.setRoot(DSA.getValue(1));
4293
4294  assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects());
4295}
4296
4297static const MDNode *getRangeMetadata(const Instruction &I) {
4298  // If !noundef is not present, then !range violation results in a poison
4299  // value rather than immediate undefined behavior. In theory, transferring
4300  // these annotations to SDAG is fine, but in practice there are key SDAG
4301  // transforms that are known not to be poison-safe, such as folding logical
4302  // and/or to bitwise and/or. For now, only transfer !range if !noundef is
4303  // also present.
4304  if (!I.hasMetadata(LLVMContext::MD_noundef))
4305    return nullptr;
4306  return I.getMetadata(LLVMContext::MD_range);
4307}
4308
4309void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
4310  if (I.isAtomic())
4311    return visitAtomicLoad(I);
4312
4313  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4314  const Value *SV = I.getOperand(0);
4315  if (TLI.supportSwiftError()) {
4316    // Swifterror values can come from either a function parameter with
4317    // swifterror attribute or an alloca with swifterror attribute.
4318    if (const Argument *Arg = dyn_cast<Argument>(SV)) {
4319      if (Arg->hasSwiftErrorAttr())
4320        return visitLoadFromSwiftError(I);
4321    }
4322
4323    if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
4324      if (Alloca->isSwiftError())
4325        return visitLoadFromSwiftError(I);
4326    }
4327  }
4328
4329  SDValue Ptr = getValue(SV);
4330
4331  Type *Ty = I.getType();
4332  SmallVector<EVT, 4> ValueVTs, MemVTs;
4333  SmallVector<TypeSize, 4> Offsets;
4334  ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets, 0);
4335  unsigned NumValues = ValueVTs.size();
4336  if (NumValues == 0)
4337    return;
4338
4339  Align Alignment = I.getAlign();
4340  AAMDNodes AAInfo = I.getAAMetadata();
4341  const MDNode *Ranges = getRangeMetadata(I);
4342  bool isVolatile = I.isVolatile();
4343  MachineMemOperand::Flags MMOFlags =
4344      TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4345
4346  SDValue Root;
4347  bool ConstantMemory = false;
4348  if (isVolatile)
4349    // Serialize volatile loads with other side effects.
4350    Root = getRoot();
4351  else if (NumValues > MaxParallelChains)
4352    Root = getMemoryRoot();
4353  else if (AA &&
4354           AA->pointsToConstantMemory(MemoryLocation(
4355               SV,
4356               LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4357               AAInfo))) {
4358    // Do not serialize (non-volatile) loads of constant memory with anything.
4359    Root = DAG.getEntryNode();
4360    ConstantMemory = true;
4361    MMOFlags |= MachineMemOperand::MOInvariant;
4362  } else {
4363    // Do not serialize non-volatile loads against each other.
4364    Root = DAG.getRoot();
4365  }
4366
4367  SDLoc dl = getCurSDLoc();
4368
4369  if (isVolatile)
4370    Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG);
4371
4372  SmallVector<SDValue, 4> Values(NumValues);
4373  SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4374
4375  unsigned ChainI = 0;
4376  for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4377    // Serializing loads here may result in excessive register pressure, and
4378    // TokenFactor places arbitrary choke points on the scheduler. SD scheduling
4379    // could recover a bit by hoisting nodes upward in the chain by recognizing
4380    // they are side-effect free or do not alias. The optimizer should really
4381    // avoid this case by converting large object/array copies to llvm.memcpy
4382    // (MaxParallelChains should always remain as failsafe).
4383    if (ChainI == MaxParallelChains) {
4384      assert(PendingLoads.empty() && "PendingLoads must be serialized first");
4385      SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4386                                  ArrayRef(Chains.data(), ChainI));
4387      Root = Chain;
4388      ChainI = 0;
4389    }
4390
4391    // TODO: MachinePointerInfo only supports a fixed length offset.
4392    MachinePointerInfo PtrInfo =
4393        !Offsets[i].isScalable() || Offsets[i].isZero()
4394            ? MachinePointerInfo(SV, Offsets[i].getKnownMinValue())
4395            : MachinePointerInfo();
4396
4397    SDValue A = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4398    SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, PtrInfo, Alignment,
4399                            MMOFlags, AAInfo, Ranges);
4400    Chains[ChainI] = L.getValue(1);
4401
4402    if (MemVTs[i] != ValueVTs[i])
4403      L = DAG.getPtrExtOrTrunc(L, dl, ValueVTs[i]);
4404
4405    Values[i] = L;
4406  }
4407
4408  if (!ConstantMemory) {
4409    SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4410                                ArrayRef(Chains.data(), ChainI));
4411    if (isVolatile)
4412      DAG.setRoot(Chain);
4413    else
4414      PendingLoads.push_back(Chain);
4415  }
4416
4417  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl,
4418                           DAG.getVTList(ValueVTs), Values));
4419}
4420
4421void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
4422  assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4423         "call visitStoreToSwiftError when backend supports swifterror");
4424
4425  SmallVector<EVT, 4> ValueVTs;
4426  SmallVector<uint64_t, 4> Offsets;
4427  const Value *SrcV = I.getOperand(0);
4428  ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4429                  SrcV->getType(), ValueVTs, &Offsets, 0);
4430  assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4431         "expect a single EVT for swifterror");
4432
4433  SDValue Src = getValue(SrcV);
4434  // Create a virtual register, then update the virtual register.
4435  Register VReg =
4436      SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand());
4437  // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
4438  // Chain can be getRoot or getControlRoot.
4439  SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
4440                                      SDValue(Src.getNode(), Src.getResNo()));
4441  DAG.setRoot(CopyNode);
4442}
4443
4444void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
4445  assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
4446         "call visitLoadFromSwiftError when backend supports swifterror");
4447
4448  assert(!I.isVolatile() &&
4449         !I.hasMetadata(LLVMContext::MD_nontemporal) &&
4450         !I.hasMetadata(LLVMContext::MD_invariant_load) &&
4451         "Support volatile, non temporal, invariant for load_from_swift_error");
4452
4453  const Value *SV = I.getOperand(0);
4454  Type *Ty = I.getType();
4455  assert(
4456      (!AA ||
4457       !AA->pointsToConstantMemory(MemoryLocation(
4458           SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)),
4459           I.getAAMetadata()))) &&
4460      "load_from_swift_error should not be constant memory");
4461
4462  SmallVector<EVT, 4> ValueVTs;
4463  SmallVector<uint64_t, 4> Offsets;
4464  ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty,
4465                  ValueVTs, &Offsets, 0);
4466  assert(ValueVTs.size() == 1 && Offsets[0] == 0 &&
4467         "expect a single EVT for swifterror");
4468
4469  // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
4470  SDValue L = DAG.getCopyFromReg(
4471      getRoot(), getCurSDLoc(),
4472      SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]);
4473
4474  setValue(&I, L);
4475}
4476
4477void SelectionDAGBuilder::visitStore(const StoreInst &I) {
4478  if (I.isAtomic())
4479    return visitAtomicStore(I);
4480
4481  const Value *SrcV = I.getOperand(0);
4482  const Value *PtrV = I.getOperand(1);
4483
4484  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4485  if (TLI.supportSwiftError()) {
4486    // Swifterror values can come from either a function parameter with
4487    // swifterror attribute or an alloca with swifterror attribute.
4488    if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
4489      if (Arg->hasSwiftErrorAttr())
4490        return visitStoreToSwiftError(I);
4491    }
4492
4493    if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
4494      if (Alloca->isSwiftError())
4495        return visitStoreToSwiftError(I);
4496    }
4497  }
4498
4499  SmallVector<EVT, 4> ValueVTs, MemVTs;
4500  SmallVector<TypeSize, 4> Offsets;
4501  ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(),
4502                  SrcV->getType(), ValueVTs, &MemVTs, &Offsets, 0);
4503  unsigned NumValues = ValueVTs.size();
4504  if (NumValues == 0)
4505    return;
4506
4507  // Get the lowered operands. Note that we do this after
4508  // checking if NumResults is zero, because with zero results
4509  // the operands won't have values in the map.
4510  SDValue Src = getValue(SrcV);
4511  SDValue Ptr = getValue(PtrV);
4512
4513  SDValue Root = I.isVolatile() ? getRoot() : getMemoryRoot();
4514  SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues));
4515  SDLoc dl = getCurSDLoc();
4516  Align Alignment = I.getAlign();
4517  AAMDNodes AAInfo = I.getAAMetadata();
4518
4519  auto MMOFlags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4520
4521  unsigned ChainI = 0;
4522  for (unsigned i = 0; i != NumValues; ++i, ++ChainI) {
4523    // See visitLoad comments.
4524    if (ChainI == MaxParallelChains) {
4525      SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4526                                  ArrayRef(Chains.data(), ChainI));
4527      Root = Chain;
4528      ChainI = 0;
4529    }
4530
4531    // TODO: MachinePointerInfo only supports a fixed length offset.
4532    MachinePointerInfo PtrInfo =
4533        !Offsets[i].isScalable() || Offsets[i].isZero()
4534            ? MachinePointerInfo(PtrV, Offsets[i].getKnownMinValue())
4535            : MachinePointerInfo();
4536
4537    SDValue Add = DAG.getObjectPtrOffset(dl, Ptr, Offsets[i]);
4538    SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i);
4539    if (MemVTs[i] != ValueVTs[i])
4540      Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]);
4541    SDValue St =
4542        DAG.getStore(Root, dl, Val, Add, PtrInfo, Alignment, MMOFlags, AAInfo);
4543    Chains[ChainI] = St;
4544  }
4545
4546  SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
4547                                  ArrayRef(Chains.data(), ChainI));
4548  setValue(&I, StoreNode);
4549  DAG.setRoot(StoreNode);
4550}
4551
4552void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
4553                                           bool IsCompressing) {
4554  SDLoc sdl = getCurSDLoc();
4555
4556  auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4557                               MaybeAlign &Alignment) {
4558    // llvm.masked.store.*(Src0, Ptr, alignment, Mask)
4559    Src0 = I.getArgOperand(0);
4560    Ptr = I.getArgOperand(1);
4561    Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
4562    Mask = I.getArgOperand(3);
4563  };
4564  auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4565                                    MaybeAlign &Alignment) {
4566    // llvm.masked.compressstore.*(Src0, Ptr, Mask)
4567    Src0 = I.getArgOperand(0);
4568    Ptr = I.getArgOperand(1);
4569    Mask = I.getArgOperand(2);
4570    Alignment = std::nullopt;
4571  };
4572
4573  Value  *PtrOperand, *MaskOperand, *Src0Operand;
4574  MaybeAlign Alignment;
4575  if (IsCompressing)
4576    getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4577  else
4578    getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4579
4580  SDValue Ptr = getValue(PtrOperand);
4581  SDValue Src0 = getValue(Src0Operand);
4582  SDValue Mask = getValue(MaskOperand);
4583  SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4584
4585  EVT VT = Src0.getValueType();
4586  if (!Alignment)
4587    Alignment = DAG.getEVTAlign(VT);
4588
4589  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4590      MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
4591      MemoryLocation::UnknownSize, *Alignment, I.getAAMetadata());
4592  SDValue StoreNode =
4593      DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
4594                         ISD::UNINDEXED, false /* Truncating */, IsCompressing);
4595  DAG.setRoot(StoreNode);
4596  setValue(&I, StoreNode);
4597}
4598
4599// Get a uniform base for the Gather/Scatter intrinsic.
4600// The first argument of the Gather/Scatter intrinsic is a vector of pointers.
4601// We try to represent it as a base pointer + vector of indices.
4602// Usually, the vector of pointers comes from a 'getelementptr' instruction.
4603// The first operand of the GEP may be a single pointer or a vector of pointers
4604// Example:
4605//   %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind
4606//  or
4607//   %gep.ptr = getelementptr i32, i32* %ptr,        <8 x i32> %ind
4608// %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, ..
4609//
4610// When the first GEP operand is a single pointer - it is the uniform base we
4611// are looking for. If first operand of the GEP is a splat vector - we
4612// extract the splat value and use it as a uniform base.
4613// In all other cases the function returns 'false'.
4614static bool getUniformBase(const Value *Ptr, SDValue &Base, SDValue &Index,
4615                           ISD::MemIndexType &IndexType, SDValue &Scale,
4616                           SelectionDAGBuilder *SDB, const BasicBlock *CurBB,
4617                           uint64_t ElemSize) {
4618  SelectionDAG& DAG = SDB->DAG;
4619  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4620  const DataLayout &DL = DAG.getDataLayout();
4621
4622  assert(Ptr->getType()->isVectorTy() && "Unexpected pointer type");
4623
4624  // Handle splat constant pointer.
4625  if (auto *C = dyn_cast<Constant>(Ptr)) {
4626    C = C->getSplatValue();
4627    if (!C)
4628      return false;
4629
4630    Base = SDB->getValue(C);
4631
4632    ElementCount NumElts = cast<VectorType>(Ptr->getType())->getElementCount();
4633    EVT VT = EVT::getVectorVT(*DAG.getContext(), TLI.getPointerTy(DL), NumElts);
4634    Index = DAG.getConstant(0, SDB->getCurSDLoc(), VT);
4635    IndexType = ISD::SIGNED_SCALED;
4636    Scale = DAG.getTargetConstant(1, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4637    return true;
4638  }
4639
4640  const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
4641  if (!GEP || GEP->getParent() != CurBB)
4642    return false;
4643
4644  if (GEP->getNumOperands() != 2)
4645    return false;
4646
4647  const Value *BasePtr = GEP->getPointerOperand();
4648  const Value *IndexVal = GEP->getOperand(GEP->getNumOperands() - 1);
4649
4650  // Make sure the base is scalar and the index is a vector.
4651  if (BasePtr->getType()->isVectorTy() || !IndexVal->getType()->isVectorTy())
4652    return false;
4653
4654  TypeSize ScaleVal = DL.getTypeAllocSize(GEP->getResultElementType());
4655  if (ScaleVal.isScalable())
4656    return false;
4657
4658  // Target may not support the required addressing mode.
4659  if (ScaleVal != 1 &&
4660      !TLI.isLegalScaleForGatherScatter(ScaleVal.getFixedValue(), ElemSize))
4661    return false;
4662
4663  Base = SDB->getValue(BasePtr);
4664  Index = SDB->getValue(IndexVal);
4665  IndexType = ISD::SIGNED_SCALED;
4666
4667  Scale =
4668      DAG.getTargetConstant(ScaleVal, SDB->getCurSDLoc(), TLI.getPointerTy(DL));
4669  return true;
4670}
4671
4672void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
4673  SDLoc sdl = getCurSDLoc();
4674
4675  // llvm.masked.scatter.*(Src0, Ptrs, alignment, Mask)
4676  const Value *Ptr = I.getArgOperand(1);
4677  SDValue Src0 = getValue(I.getArgOperand(0));
4678  SDValue Mask = getValue(I.getArgOperand(3));
4679  EVT VT = Src0.getValueType();
4680  Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
4681                        ->getMaybeAlignValue()
4682                        .value_or(DAG.getEVTAlign(VT.getScalarType()));
4683  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4684
4685  SDValue Base;
4686  SDValue Index;
4687  ISD::MemIndexType IndexType;
4688  SDValue Scale;
4689  bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4690                                    I.getParent(), VT.getScalarStoreSize());
4691
4692  unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4693  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4694      MachinePointerInfo(AS), MachineMemOperand::MOStore,
4695      // TODO: Make MachineMemOperands aware of scalable
4696      // vectors.
4697      MemoryLocation::UnknownSize, Alignment, I.getAAMetadata());
4698  if (!UniformBase) {
4699    Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4700    Index = getValue(Ptr);
4701    IndexType = ISD::SIGNED_SCALED;
4702    Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4703  }
4704
4705  EVT IdxVT = Index.getValueType();
4706  EVT EltTy = IdxVT.getVectorElementType();
4707  if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4708    EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4709    Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4710  }
4711
4712  SDValue Ops[] = { getMemoryRoot(), Src0, Mask, Base, Index, Scale };
4713  SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl,
4714                                         Ops, MMO, IndexType, false);
4715  DAG.setRoot(Scatter);
4716  setValue(&I, Scatter);
4717}
4718
4719void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
4720  SDLoc sdl = getCurSDLoc();
4721
4722  auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4723                              MaybeAlign &Alignment) {
4724    // @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
4725    Ptr = I.getArgOperand(0);
4726    Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
4727    Mask = I.getArgOperand(2);
4728    Src0 = I.getArgOperand(3);
4729  };
4730  auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
4731                                 MaybeAlign &Alignment) {
4732    // @llvm.masked.expandload.*(Ptr, Mask, Src0)
4733    Ptr = I.getArgOperand(0);
4734    Alignment = std::nullopt;
4735    Mask = I.getArgOperand(1);
4736    Src0 = I.getArgOperand(2);
4737  };
4738
4739  Value  *PtrOperand, *MaskOperand, *Src0Operand;
4740  MaybeAlign Alignment;
4741  if (IsExpanding)
4742    getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4743  else
4744    getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
4745
4746  SDValue Ptr = getValue(PtrOperand);
4747  SDValue Src0 = getValue(Src0Operand);
4748  SDValue Mask = getValue(MaskOperand);
4749  SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
4750
4751  EVT VT = Src0.getValueType();
4752  if (!Alignment)
4753    Alignment = DAG.getEVTAlign(VT);
4754
4755  AAMDNodes AAInfo = I.getAAMetadata();
4756  const MDNode *Ranges = getRangeMetadata(I);
4757
4758  // Do not serialize masked loads of constant memory with anything.
4759  MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
4760  bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
4761
4762  SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
4763
4764  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4765      MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
4766      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
4767
4768  SDValue Load =
4769      DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
4770                        ISD::UNINDEXED, ISD::NON_EXTLOAD, IsExpanding);
4771  if (AddToChain)
4772    PendingLoads.push_back(Load.getValue(1));
4773  setValue(&I, Load);
4774}
4775
4776void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
4777  SDLoc sdl = getCurSDLoc();
4778
4779  // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0)
4780  const Value *Ptr = I.getArgOperand(0);
4781  SDValue Src0 = getValue(I.getArgOperand(3));
4782  SDValue Mask = getValue(I.getArgOperand(2));
4783
4784  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4785  EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4786  Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
4787                        ->getMaybeAlignValue()
4788                        .value_or(DAG.getEVTAlign(VT.getScalarType()));
4789
4790  const MDNode *Ranges = getRangeMetadata(I);
4791
4792  SDValue Root = DAG.getRoot();
4793  SDValue Base;
4794  SDValue Index;
4795  ISD::MemIndexType IndexType;
4796  SDValue Scale;
4797  bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this,
4798                                    I.getParent(), VT.getScalarStoreSize());
4799  unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
4800  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4801      MachinePointerInfo(AS), MachineMemOperand::MOLoad,
4802      // TODO: Make MachineMemOperands aware of scalable
4803      // vectors.
4804      MemoryLocation::UnknownSize, Alignment, I.getAAMetadata(), Ranges);
4805
4806  if (!UniformBase) {
4807    Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4808    Index = getValue(Ptr);
4809    IndexType = ISD::SIGNED_SCALED;
4810    Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout()));
4811  }
4812
4813  EVT IdxVT = Index.getValueType();
4814  EVT EltTy = IdxVT.getVectorElementType();
4815  if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
4816    EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
4817    Index = DAG.getNode(ISD::SIGN_EXTEND, sdl, NewIdxVT, Index);
4818  }
4819
4820  SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale };
4821  SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl,
4822                                       Ops, MMO, IndexType, ISD::NON_EXTLOAD);
4823
4824  PendingLoads.push_back(Gather.getValue(1));
4825  setValue(&I, Gather);
4826}
4827
4828void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
4829  SDLoc dl = getCurSDLoc();
4830  AtomicOrdering SuccessOrdering = I.getSuccessOrdering();
4831  AtomicOrdering FailureOrdering = I.getFailureOrdering();
4832  SyncScope::ID SSID = I.getSyncScopeID();
4833
4834  SDValue InChain = getRoot();
4835
4836  MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
4837  SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);
4838
4839  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4840  auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4841
4842  MachineFunction &MF = DAG.getMachineFunction();
4843  MachineMemOperand *MMO = MF.getMachineMemOperand(
4844      MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4845      DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
4846      FailureOrdering);
4847
4848  SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
4849                                   dl, MemVT, VTs, InChain,
4850                                   getValue(I.getPointerOperand()),
4851                                   getValue(I.getCompareOperand()),
4852                                   getValue(I.getNewValOperand()), MMO);
4853
4854  SDValue OutChain = L.getValue(2);
4855
4856  setValue(&I, L);
4857  DAG.setRoot(OutChain);
4858}
4859
4860void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
4861  SDLoc dl = getCurSDLoc();
4862  ISD::NodeType NT;
4863  switch (I.getOperation()) {
4864  default: llvm_unreachable("Unknown atomicrmw operation");
4865  case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break;
4866  case AtomicRMWInst::Add:  NT = ISD::ATOMIC_LOAD_ADD; break;
4867  case AtomicRMWInst::Sub:  NT = ISD::ATOMIC_LOAD_SUB; break;
4868  case AtomicRMWInst::And:  NT = ISD::ATOMIC_LOAD_AND; break;
4869  case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break;
4870  case AtomicRMWInst::Or:   NT = ISD::ATOMIC_LOAD_OR; break;
4871  case AtomicRMWInst::Xor:  NT = ISD::ATOMIC_LOAD_XOR; break;
4872  case AtomicRMWInst::Max:  NT = ISD::ATOMIC_LOAD_MAX; break;
4873  case AtomicRMWInst::Min:  NT = ISD::ATOMIC_LOAD_MIN; break;
4874  case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
4875  case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
4876  case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
4877  case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
4878  case AtomicRMWInst::FMax: NT = ISD::ATOMIC_LOAD_FMAX; break;
4879  case AtomicRMWInst::FMin: NT = ISD::ATOMIC_LOAD_FMIN; break;
4880  case AtomicRMWInst::UIncWrap:
4881    NT = ISD::ATOMIC_LOAD_UINC_WRAP;
4882    break;
4883  case AtomicRMWInst::UDecWrap:
4884    NT = ISD::ATOMIC_LOAD_UDEC_WRAP;
4885    break;
4886  }
4887  AtomicOrdering Ordering = I.getOrdering();
4888  SyncScope::ID SSID = I.getSyncScopeID();
4889
4890  SDValue InChain = getRoot();
4891
4892  auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
4893  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4894  auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());
4895
4896  MachineFunction &MF = DAG.getMachineFunction();
4897  MachineMemOperand *MMO = MF.getMachineMemOperand(
4898      MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4899      DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);
4900
4901  SDValue L =
4902    DAG.getAtomic(NT, dl, MemVT, InChain,
4903                  getValue(I.getPointerOperand()), getValue(I.getValOperand()),
4904                  MMO);
4905
4906  SDValue OutChain = L.getValue(1);
4907
4908  setValue(&I, L);
4909  DAG.setRoot(OutChain);
4910}
4911
4912void SelectionDAGBuilder::visitFence(const FenceInst &I) {
4913  SDLoc dl = getCurSDLoc();
4914  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4915  SDValue Ops[3];
4916  Ops[0] = getRoot();
4917  Ops[1] = DAG.getTargetConstant((unsigned)I.getOrdering(), dl,
4918                                 TLI.getFenceOperandTy(DAG.getDataLayout()));
4919  Ops[2] = DAG.getTargetConstant(I.getSyncScopeID(), dl,
4920                                 TLI.getFenceOperandTy(DAG.getDataLayout()));
4921  SDValue N = DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops);
4922  setValue(&I, N);
4923  DAG.setRoot(N);
4924}
4925
4926void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
4927  SDLoc dl = getCurSDLoc();
4928  AtomicOrdering Order = I.getOrdering();
4929  SyncScope::ID SSID = I.getSyncScopeID();
4930
4931  SDValue InChain = getRoot();
4932
4933  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4934  EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
4935  EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType());
4936
4937  if (!TLI.supportsUnalignedAtomics() &&
4938      I.getAlign().value() < MemVT.getSizeInBits() / 8)
4939    report_fatal_error("Cannot generate unaligned atomic load");
4940
4941  auto Flags = TLI.getLoadMemOperandFlags(I, DAG.getDataLayout(), AC, LibInfo);
4942
4943  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
4944      MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4945      I.getAlign(), AAMDNodes(), nullptr, SSID, Order);
4946
4947  InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG);
4948
4949  SDValue Ptr = getValue(I.getPointerOperand());
4950  SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain,
4951                            Ptr, MMO);
4952
4953  SDValue OutChain = L.getValue(1);
4954  if (MemVT != VT)
4955    L = DAG.getPtrExtOrTrunc(L, dl, VT);
4956
4957  setValue(&I, L);
4958  DAG.setRoot(OutChain);
4959}
4960
4961void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) {
4962  SDLoc dl = getCurSDLoc();
4963
4964  AtomicOrdering Ordering = I.getOrdering();
4965  SyncScope::ID SSID = I.getSyncScopeID();
4966
4967  SDValue InChain = getRoot();
4968
4969  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4970  EVT MemVT =
4971      TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType());
4972
4973  if (!TLI.supportsUnalignedAtomics() &&
4974      I.getAlign().value() < MemVT.getSizeInBits() / 8)
4975    report_fatal_error("Cannot generate unaligned atomic store");
4976
4977  auto Flags = TLI.getStoreMemOperandFlags(I, DAG.getDataLayout());
4978
4979  MachineFunction &MF = DAG.getMachineFunction();
4980  MachineMemOperand *MMO = MF.getMachineMemOperand(
4981      MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
4982      I.getAlign(), AAMDNodes(), nullptr, SSID, Ordering);
4983
4984  SDValue Val = getValue(I.getValueOperand());
4985  if (Val.getValueType() != MemVT)
4986    Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT);
4987  SDValue Ptr = getValue(I.getPointerOperand());
4988
4989  SDValue OutChain =
4990      DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Val, Ptr, MMO);
4991
4992  setValue(&I, OutChain);
4993  DAG.setRoot(OutChain);
4994}
4995
4996/// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC
4997/// node.
4998void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I,
4999                                               unsigned Intrinsic) {
5000  // Ignore the callsite's attributes. A specific call site may be marked with
5001  // readnone, but the lowering code will expect the chain based on the
5002  // definition.
5003  const Function *F = I.getCalledFunction();
5004  bool HasChain = !F->doesNotAccessMemory();
5005  bool OnlyLoad = HasChain && F->onlyReadsMemory();
5006
5007  // Build the operand list.
5008  SmallVector<SDValue, 8> Ops;
5009  if (HasChain) {  // If this intrinsic has side-effects, chainify it.
5010    if (OnlyLoad) {
5011      // We don't need to serialize loads against other loads.
5012      Ops.push_back(DAG.getRoot());
5013    } else {
5014      Ops.push_back(getRoot());
5015    }
5016  }
5017
5018  // Info is set by getTgtMemIntrinsic
5019  TargetLowering::IntrinsicInfo Info;
5020  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5021  bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I,
5022                                               DAG.getMachineFunction(),
5023                                               Intrinsic);
5024
5025  // Add the intrinsic ID as an integer operand if it's not a target intrinsic.
5026  if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID ||
5027      Info.opc == ISD::INTRINSIC_W_CHAIN)
5028    Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(),
5029                                        TLI.getPointerTy(DAG.getDataLayout())));
5030
5031  // Add all operands of the call to the operand list.
5032  for (unsigned i = 0, e = I.arg_size(); i != e; ++i) {
5033    const Value *Arg = I.getArgOperand(i);
5034    if (!I.paramHasAttr(i, Attribute::ImmArg)) {
5035      Ops.push_back(getValue(Arg));
5036      continue;
5037    }
5038
5039    // Use TargetConstant instead of a regular constant for immarg.
5040    EVT VT = TLI.getValueType(DAG.getDataLayout(), Arg->getType(), true);
5041    if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) {
5042      assert(CI->getBitWidth() <= 64 &&
5043             "large intrinsic immediates not handled");
5044      Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT));
5045    } else {
5046      Ops.push_back(
5047          DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT));
5048    }
5049  }
5050
5051  SmallVector<EVT, 4> ValueVTs;
5052  ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
5053
5054  if (HasChain)
5055    ValueVTs.push_back(MVT::Other);
5056
5057  SDVTList VTs = DAG.getVTList(ValueVTs);
5058
5059  // Propagate fast-math-flags from IR to node(s).
5060  SDNodeFlags Flags;
5061  if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
5062    Flags.copyFMF(*FPMO);
5063  SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
5064
5065  // Create the node.
5066  SDValue Result;
5067  // In some cases, custom collection of operands from CallInst I may be needed.
5068  TLI.CollectTargetIntrinsicOperands(I, Ops, DAG);
5069  if (IsTgtIntrinsic) {
5070    // This is target intrinsic that touches memory
5071    //
5072    // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
5073    //       didn't yield anything useful.
5074    MachinePointerInfo MPI;
5075    if (Info.ptrVal)
5076      MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
5077    else if (Info.fallbackAddressSpace)
5078      MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
5079    Result = DAG.getMemIntrinsicNode(Info.opc, getCurSDLoc(), VTs, Ops,
5080                                     Info.memVT, MPI, Info.align, Info.flags,
5081                                     Info.size, I.getAAMetadata());
5082  } else if (!HasChain) {
5083    Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops);
5084  } else if (!I.getType()->isVoidTy()) {
5085    Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops);
5086  } else {
5087    Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops);
5088  }
5089
5090  if (HasChain) {
5091    SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1);
5092    if (OnlyLoad)
5093      PendingLoads.push_back(Chain);
5094    else
5095      DAG.setRoot(Chain);
5096  }
5097
5098  if (!I.getType()->isVoidTy()) {
5099    if (!isa<VectorType>(I.getType()))
5100      Result = lowerRangeToAssertZExt(DAG, I, Result);
5101
5102    MaybeAlign Alignment = I.getRetAlign();
5103
5104    // Insert `assertalign` node if there's an alignment.
5105    if (InsertAssertAlign && Alignment) {
5106      Result =
5107          DAG.getAssertAlign(getCurSDLoc(), Result, Alignment.valueOrOne());
5108    }
5109
5110    setValue(&I, Result);
5111  }
5112}
5113
5114/// GetSignificand - Get the significand and build it into a floating-point
5115/// number with exponent of 1:
5116///
5117///   Op = (Op & 0x007fffff) | 0x3f800000;
5118///
5119/// where Op is the hexadecimal representation of floating point value.
5120static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) {
5121  SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5122                           DAG.getConstant(0x007fffff, dl, MVT::i32));
5123  SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1,
5124                           DAG.getConstant(0x3f800000, dl, MVT::i32));
5125  return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2);
5126}
5127
5128/// GetExponent - Get the exponent:
5129///
5130///   (float)(int)(((Op & 0x7f800000) >> 23) - 127);
5131///
5132/// where Op is the hexadecimal representation of floating point value.
5133static SDValue GetExponent(SelectionDAG &DAG, SDValue Op,
5134                           const TargetLowering &TLI, const SDLoc &dl) {
5135  SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op,
5136                           DAG.getConstant(0x7f800000, dl, MVT::i32));
5137  SDValue t1 = DAG.getNode(
5138      ISD::SRL, dl, MVT::i32, t0,
5139      DAG.getConstant(23, dl,
5140                      TLI.getShiftAmountTy(MVT::i32, DAG.getDataLayout())));
5141  SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1,
5142                           DAG.getConstant(127, dl, MVT::i32));
5143  return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2);
5144}
5145
5146/// getF32Constant - Get 32-bit floating point constant.
5147static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt,
5148                              const SDLoc &dl) {
5149  return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl,
5150                           MVT::f32);
5151}
5152
5153static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl,
5154                                       SelectionDAG &DAG) {
5155  // TODO: What fast-math-flags should be set on the floating-point nodes?
5156
5157  //   IntegerPartOfX = ((int32_t)(t0);
5158  SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0);
5159
5160  //   FractionalPartOfX = t0 - (float)IntegerPartOfX;
5161  SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX);
5162  SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1);
5163
5164  //   IntegerPartOfX <<= 23;
5165  IntegerPartOfX =
5166      DAG.getNode(ISD::SHL, dl, MVT::i32, IntegerPartOfX,
5167                  DAG.getConstant(23, dl,
5168                                  DAG.getTargetLoweringInfo().getShiftAmountTy(
5169                                      MVT::i32, DAG.getDataLayout())));
5170
5171  SDValue TwoToFractionalPartOfX;
5172  if (LimitFloatPrecision <= 6) {
5173    // For floating-point precision of 6:
5174    //
5175    //   TwoToFractionalPartOfX =
5176    //     0.997535578f +
5177    //       (0.735607626f + 0.252464424f * x) * x;
5178    //
5179    // error 0.0144103317, which is 6 bits
5180    SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5181                             getF32Constant(DAG, 0x3e814304, dl));
5182    SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5183                             getF32Constant(DAG, 0x3f3c50c8, dl));
5184    SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5185    TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5186                                         getF32Constant(DAG, 0x3f7f5e7e, dl));
5187  } else if (LimitFloatPrecision <= 12) {
5188    // For floating-point precision of 12:
5189    //
5190    //   TwoToFractionalPartOfX =
5191    //     0.999892986f +
5192    //       (0.696457318f +
5193    //         (0.224338339f + 0.792043434e-1f * x) * x) * x;
5194    //
5195    // error 0.000107046256, which is 13 to 14 bits
5196    SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5197                             getF32Constant(DAG, 0x3da235e3, dl));
5198    SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5199                             getF32Constant(DAG, 0x3e65b8f3, dl));
5200    SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5201    SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5202                             getF32Constant(DAG, 0x3f324b07, dl));
5203    SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5204    TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5205                                         getF32Constant(DAG, 0x3f7ff8fd, dl));
5206  } else { // LimitFloatPrecision <= 18
5207    // For floating-point precision of 18:
5208    //
5209    //   TwoToFractionalPartOfX =
5210    //     0.999999982f +
5211    //       (0.693148872f +
5212    //         (0.240227044f +
5213    //           (0.554906021e-1f +
5214    //             (0.961591928e-2f +
5215    //               (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x;
5216    // error 2.47208000*10^(-7), which is better than 18 bits
5217    SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5218                             getF32Constant(DAG, 0x3924b03e, dl));
5219    SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5220                             getF32Constant(DAG, 0x3ab24b87, dl));
5221    SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5222    SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5223                             getF32Constant(DAG, 0x3c1d8c17, dl));
5224    SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5225    SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5226                             getF32Constant(DAG, 0x3d634a1d, dl));
5227    SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5228    SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5229                             getF32Constant(DAG, 0x3e75fe14, dl));
5230    SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5231    SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10,
5232                              getF32Constant(DAG, 0x3f317234, dl));
5233    SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X);
5234    TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12,
5235                                         getF32Constant(DAG, 0x3f800000, dl));
5236  }
5237
5238  // Add the exponent into the result in integer domain.
5239  SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX);
5240  return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5241                     DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX));
5242}
5243
5244/// expandExp - Lower an exp intrinsic. Handles the special sequences for
5245/// limited-precision mode.
5246static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5247                         const TargetLowering &TLI, SDNodeFlags Flags) {
5248  if (Op.getValueType() == MVT::f32 &&
5249      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5250
5251    // Put the exponent in the right bit position for later addition to the
5252    // final result:
5253    //
5254    // t0 = Op * log2(e)
5255
5256    // TODO: What fast-math-flags should be set here?
5257    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op,
5258                             DAG.getConstantFP(numbers::log2ef, dl, MVT::f32));
5259    return getLimitedPrecisionExp2(t0, dl, DAG);
5260  }
5261
5262  // No special expansion.
5263  return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op, Flags);
5264}
5265
5266/// expandLog - Lower a log intrinsic. Handles the special sequences for
5267/// limited-precision mode.
5268static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5269                         const TargetLowering &TLI, SDNodeFlags Flags) {
5270  // TODO: What fast-math-flags should be set on the floating-point nodes?
5271
5272  if (Op.getValueType() == MVT::f32 &&
5273      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5274    SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5275
5276    // Scale the exponent by log(2).
5277    SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5278    SDValue LogOfExponent =
5279        DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5280                    DAG.getConstantFP(numbers::ln2f, dl, MVT::f32));
5281
5282    // Get the significand and build it into a floating-point number with
5283    // exponent of 1.
5284    SDValue X = GetSignificand(DAG, Op1, dl);
5285
5286    SDValue LogOfMantissa;
5287    if (LimitFloatPrecision <= 6) {
5288      // For floating-point precision of 6:
5289      //
5290      //   LogofMantissa =
5291      //     -1.1609546f +
5292      //       (1.4034025f - 0.23903021f * x) * x;
5293      //
5294      // error 0.0034276066, which is better than 8 bits
5295      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5296                               getF32Constant(DAG, 0xbe74c456, dl));
5297      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5298                               getF32Constant(DAG, 0x3fb3a2b1, dl));
5299      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5300      LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5301                                  getF32Constant(DAG, 0x3f949a29, dl));
5302    } else if (LimitFloatPrecision <= 12) {
5303      // For floating-point precision of 12:
5304      //
5305      //   LogOfMantissa =
5306      //     -1.7417939f +
5307      //       (2.8212026f +
5308      //         (-1.4699568f +
5309      //           (0.44717955f - 0.56570851e-1f * x) * x) * x) * x;
5310      //
5311      // error 0.000061011436, which is 14 bits
5312      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5313                               getF32Constant(DAG, 0xbd67b6d6, dl));
5314      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5315                               getF32Constant(DAG, 0x3ee4f4b8, dl));
5316      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5317      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5318                               getF32Constant(DAG, 0x3fbc278b, dl));
5319      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5320      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5321                               getF32Constant(DAG, 0x40348e95, dl));
5322      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5323      LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5324                                  getF32Constant(DAG, 0x3fdef31a, dl));
5325    } else { // LimitFloatPrecision <= 18
5326      // For floating-point precision of 18:
5327      //
5328      //   LogOfMantissa =
5329      //     -2.1072184f +
5330      //       (4.2372794f +
5331      //         (-3.7029485f +
5332      //           (2.2781945f +
5333      //             (-0.87823314f +
5334      //               (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x;
5335      //
5336      // error 0.0000023660568, which is better than 18 bits
5337      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5338                               getF32Constant(DAG, 0xbc91e5ac, dl));
5339      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5340                               getF32Constant(DAG, 0x3e4350aa, dl));
5341      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5342      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5343                               getF32Constant(DAG, 0x3f60d3e3, dl));
5344      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5345      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5346                               getF32Constant(DAG, 0x4011cdf0, dl));
5347      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5348      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5349                               getF32Constant(DAG, 0x406cfd1c, dl));
5350      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5351      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5352                               getF32Constant(DAG, 0x408797cb, dl));
5353      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5354      LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5355                                  getF32Constant(DAG, 0x4006dcab, dl));
5356    }
5357
5358    return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa);
5359  }
5360
5361  // No special expansion.
5362  return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op, Flags);
5363}
5364
5365/// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for
5366/// limited-precision mode.
5367static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5368                          const TargetLowering &TLI, SDNodeFlags Flags) {
5369  // TODO: What fast-math-flags should be set on the floating-point nodes?
5370
5371  if (Op.getValueType() == MVT::f32 &&
5372      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5373    SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5374
5375    // Get the exponent.
5376    SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl);
5377
5378    // Get the significand and build it into a floating-point number with
5379    // exponent of 1.
5380    SDValue X = GetSignificand(DAG, Op1, dl);
5381
5382    // Different possible minimax approximations of significand in
5383    // floating-point for various degrees of accuracy over [1,2].
5384    SDValue Log2ofMantissa;
5385    if (LimitFloatPrecision <= 6) {
5386      // For floating-point precision of 6:
5387      //
5388      //   Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x;
5389      //
5390      // error 0.0049451742, which is more than 7 bits
5391      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5392                               getF32Constant(DAG, 0xbeb08fe0, dl));
5393      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5394                               getF32Constant(DAG, 0x40019463, dl));
5395      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5396      Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5397                                   getF32Constant(DAG, 0x3fd6633d, dl));
5398    } else if (LimitFloatPrecision <= 12) {
5399      // For floating-point precision of 12:
5400      //
5401      //   Log2ofMantissa =
5402      //     -2.51285454f +
5403      //       (4.07009056f +
5404      //         (-2.12067489f +
5405      //           (.645142248f - 0.816157886e-1f * x) * x) * x) * x;
5406      //
5407      // error 0.0000876136000, which is better than 13 bits
5408      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5409                               getF32Constant(DAG, 0xbda7262e, dl));
5410      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5411                               getF32Constant(DAG, 0x3f25280b, dl));
5412      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5413      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5414                               getF32Constant(DAG, 0x4007b923, dl));
5415      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5416      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5417                               getF32Constant(DAG, 0x40823e2f, dl));
5418      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5419      Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5420                                   getF32Constant(DAG, 0x4020d29c, dl));
5421    } else { // LimitFloatPrecision <= 18
5422      // For floating-point precision of 18:
5423      //
5424      //   Log2ofMantissa =
5425      //     -3.0400495f +
5426      //       (6.1129976f +
5427      //         (-5.3420409f +
5428      //           (3.2865683f +
5429      //             (-1.2669343f +
5430      //               (0.27515199f -
5431      //                 0.25691327e-1f * x) * x) * x) * x) * x) * x;
5432      //
5433      // error 0.0000018516, which is better than 18 bits
5434      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5435                               getF32Constant(DAG, 0xbcd2769e, dl));
5436      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5437                               getF32Constant(DAG, 0x3e8ce0b9, dl));
5438      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5439      SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5440                               getF32Constant(DAG, 0x3fa22ae7, dl));
5441      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5442      SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4,
5443                               getF32Constant(DAG, 0x40525723, dl));
5444      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5445      SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6,
5446                               getF32Constant(DAG, 0x40aaf200, dl));
5447      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5448      SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8,
5449                               getF32Constant(DAG, 0x40c39dad, dl));
5450      SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X);
5451      Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10,
5452                                   getF32Constant(DAG, 0x4042902c, dl));
5453    }
5454
5455    return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa);
5456  }
5457
5458  // No special expansion.
5459  return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op, Flags);
5460}
5461
5462/// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for
5463/// limited-precision mode.
5464static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5465                           const TargetLowering &TLI, SDNodeFlags Flags) {
5466  // TODO: What fast-math-flags should be set on the floating-point nodes?
5467
5468  if (Op.getValueType() == MVT::f32 &&
5469      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5470    SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op);
5471
5472    // Scale the exponent by log10(2) [0.30102999f].
5473    SDValue Exp = GetExponent(DAG, Op1, TLI, dl);
5474    SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp,
5475                                        getF32Constant(DAG, 0x3e9a209a, dl));
5476
5477    // Get the significand and build it into a floating-point number with
5478    // exponent of 1.
5479    SDValue X = GetSignificand(DAG, Op1, dl);
5480
5481    SDValue Log10ofMantissa;
5482    if (LimitFloatPrecision <= 6) {
5483      // For floating-point precision of 6:
5484      //
5485      //   Log10ofMantissa =
5486      //     -0.50419619f +
5487      //       (0.60948995f - 0.10380950f * x) * x;
5488      //
5489      // error 0.0014886165, which is 6 bits
5490      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5491                               getF32Constant(DAG, 0xbdd49a13, dl));
5492      SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0,
5493                               getF32Constant(DAG, 0x3f1c0789, dl));
5494      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5495      Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2,
5496                                    getF32Constant(DAG, 0x3f011300, dl));
5497    } else if (LimitFloatPrecision <= 12) {
5498      // For floating-point precision of 12:
5499      //
5500      //   Log10ofMantissa =
5501      //     -0.64831180f +
5502      //       (0.91751397f +
5503      //         (-0.31664806f + 0.47637168e-1f * x) * x) * x;
5504      //
5505      // error 0.00019228036, which is better than 12 bits
5506      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5507                               getF32Constant(DAG, 0x3d431f31, dl));
5508      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5509                               getF32Constant(DAG, 0x3ea21fb2, dl));
5510      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5511      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5512                               getF32Constant(DAG, 0x3f6ae232, dl));
5513      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5514      Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5515                                    getF32Constant(DAG, 0x3f25f7c3, dl));
5516    } else { // LimitFloatPrecision <= 18
5517      // For floating-point precision of 18:
5518      //
5519      //   Log10ofMantissa =
5520      //     -0.84299375f +
5521      //       (1.5327582f +
5522      //         (-1.0688956f +
5523      //           (0.49102474f +
5524      //             (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x;
5525      //
5526      // error 0.0000037995730, which is better than 18 bits
5527      SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X,
5528                               getF32Constant(DAG, 0x3c5d51ce, dl));
5529      SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0,
5530                               getF32Constant(DAG, 0x3e00685a, dl));
5531      SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X);
5532      SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2,
5533                               getF32Constant(DAG, 0x3efb6798, dl));
5534      SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X);
5535      SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4,
5536                               getF32Constant(DAG, 0x3f88d192, dl));
5537      SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X);
5538      SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6,
5539                               getF32Constant(DAG, 0x3fc4316c, dl));
5540      SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X);
5541      Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8,
5542                                    getF32Constant(DAG, 0x3f57ce70, dl));
5543    }
5544
5545    return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa);
5546  }
5547
5548  // No special expansion.
5549  return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op, Flags);
5550}
5551
5552/// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for
5553/// limited-precision mode.
5554static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG,
5555                          const TargetLowering &TLI, SDNodeFlags Flags) {
5556  if (Op.getValueType() == MVT::f32 &&
5557      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18)
5558    return getLimitedPrecisionExp2(Op, dl, DAG);
5559
5560  // No special expansion.
5561  return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op, Flags);
5562}
5563
5564/// visitPow - Lower a pow intrinsic. Handles the special sequences for
5565/// limited-precision mode with x == 10.0f.
5566static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS,
5567                         SelectionDAG &DAG, const TargetLowering &TLI,
5568                         SDNodeFlags Flags) {
5569  bool IsExp10 = false;
5570  if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 &&
5571      LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) {
5572    if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) {
5573      APFloat Ten(10.0f);
5574      IsExp10 = LHSC->isExactlyValue(Ten);
5575    }
5576  }
5577
5578  // TODO: What fast-math-flags should be set on the FMUL node?
5579  if (IsExp10) {
5580    // Put the exponent in the right bit position for later addition to the
5581    // final result:
5582    //
5583    //   #define LOG2OF10 3.3219281f
5584    //   t0 = Op * LOG2OF10;
5585    SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS,
5586                             getF32Constant(DAG, 0x40549a78, dl));
5587    return getLimitedPrecisionExp2(t0, dl, DAG);
5588  }
5589
5590  // No special expansion.
5591  return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS, Flags);
5592}
5593
5594/// ExpandPowI - Expand a llvm.powi intrinsic.
5595static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS,
5596                          SelectionDAG &DAG) {
5597  // If RHS is a constant, we can expand this out to a multiplication tree if
5598  // it's beneficial on the target, otherwise we end up lowering to a call to
5599  // __powidf2 (for example).
5600  if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5601    unsigned Val = RHSC->getSExtValue();
5602
5603    // powi(x, 0) -> 1.0
5604    if (Val == 0)
5605      return DAG.getConstantFP(1.0, DL, LHS.getValueType());
5606
5607    if (DAG.getTargetLoweringInfo().isBeneficialToExpandPowI(
5608            Val, DAG.shouldOptForSize())) {
5609      // Get the exponent as a positive value.
5610      if ((int)Val < 0)
5611        Val = -Val;
5612      // We use the simple binary decomposition method to generate the multiply
5613      // sequence.  There are more optimal ways to do this (for example,
5614      // powi(x,15) generates one more multiply than it should), but this has
5615      // the benefit of being both really simple and much better than a libcall.
5616      SDValue Res; // Logically starts equal to 1.0
5617      SDValue CurSquare = LHS;
5618      // TODO: Intrinsics should have fast-math-flags that propagate to these
5619      // nodes.
5620      while (Val) {
5621        if (Val & 1) {
5622          if (Res.getNode())
5623            Res =
5624                DAG.getNode(ISD::FMUL, DL, Res.getValueType(), Res, CurSquare);
5625          else
5626            Res = CurSquare; // 1.0*CurSquare.
5627        }
5628
5629        CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(),
5630                                CurSquare, CurSquare);
5631        Val >>= 1;
5632      }
5633
5634      // If the original was negative, invert the result, producing 1/(x*x*x).
5635      if (RHSC->getSExtValue() < 0)
5636        Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(),
5637                          DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res);
5638      return Res;
5639    }
5640  }
5641
5642  // Otherwise, expand to a libcall.
5643  return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS);
5644}
5645
5646static SDValue expandDivFix(unsigned Opcode, const SDLoc &DL,
5647                            SDValue LHS, SDValue RHS, SDValue Scale,
5648                            SelectionDAG &DAG, const TargetLowering &TLI) {
5649  EVT VT = LHS.getValueType();
5650  bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT;
5651  bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT;
5652  LLVMContext &Ctx = *DAG.getContext();
5653
5654  // If the type is legal but the operation isn't, this node might survive all
5655  // the way to operation legalization. If we end up there and we do not have
5656  // the ability to widen the type (if VT*2 is not legal), we cannot expand the
5657  // node.
5658
5659  // Coax the legalizer into expanding the node during type legalization instead
5660  // by bumping the size by one bit. This will force it to Promote, enabling the
5661  // early expansion and avoiding the need to expand later.
5662
5663  // We don't have to do this if Scale is 0; that can always be expanded, unless
5664  // it's a saturating signed operation. Those can experience true integer
5665  // division overflow, a case which we must avoid.
5666
5667  // FIXME: We wouldn't have to do this (or any of the early
5668  // expansion/promotion) if it was possible to expand a libcall of an
5669  // illegal type during operation legalization. But it's not, so things
5670  // get a bit hacky.
5671  unsigned ScaleInt = Scale->getAsZExtVal();
5672  if ((ScaleInt > 0 || (Saturating && Signed)) &&
5673      (TLI.isTypeLegal(VT) ||
5674       (VT.isVector() && TLI.isTypeLegal(VT.getVectorElementType())))) {
5675    TargetLowering::LegalizeAction Action = TLI.getFixedPointOperationAction(
5676        Opcode, VT, ScaleInt);
5677    if (Action != TargetLowering::Legal && Action != TargetLowering::Custom) {
5678      EVT PromVT;
5679      if (VT.isScalarInteger())
5680        PromVT = EVT::getIntegerVT(Ctx, VT.getSizeInBits() + 1);
5681      else if (VT.isVector()) {
5682        PromVT = VT.getVectorElementType();
5683        PromVT = EVT::getIntegerVT(Ctx, PromVT.getSizeInBits() + 1);
5684        PromVT = EVT::getVectorVT(Ctx, PromVT, VT.getVectorElementCount());
5685      } else
5686        llvm_unreachable("Wrong VT for DIVFIX?");
5687      LHS = DAG.getExtOrTrunc(Signed, LHS, DL, PromVT);
5688      RHS = DAG.getExtOrTrunc(Signed, RHS, DL, PromVT);
5689      EVT ShiftTy = TLI.getShiftAmountTy(PromVT, DAG.getDataLayout());
5690      // For saturating operations, we need to shift up the LHS to get the
5691      // proper saturation width, and then shift down again afterwards.
5692      if (Saturating)
5693        LHS = DAG.getNode(ISD::SHL, DL, PromVT, LHS,
5694                          DAG.getConstant(1, DL, ShiftTy));
5695      SDValue Res = DAG.getNode(Opcode, DL, PromVT, LHS, RHS, Scale);
5696      if (Saturating)
5697        Res = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, PromVT, Res,
5698                          DAG.getConstant(1, DL, ShiftTy));
5699      return DAG.getZExtOrTrunc(Res, DL, VT);
5700    }
5701  }
5702
5703  return DAG.getNode(Opcode, DL, VT, LHS, RHS, Scale);
5704}
5705
5706// getUnderlyingArgRegs - Find underlying registers used for a truncated,
5707// bitcasted, or split argument. Returns a list of <Register, size in bits>
5708static void
5709getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, TypeSize>> &Regs,
5710                     const SDValue &N) {
5711  switch (N.getOpcode()) {
5712  case ISD::CopyFromReg: {
5713    SDValue Op = N.getOperand(1);
5714    Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(),
5715                      Op.getValueType().getSizeInBits());
5716    return;
5717  }
5718  case ISD::BITCAST:
5719  case ISD::AssertZext:
5720  case ISD::AssertSext:
5721  case ISD::TRUNCATE:
5722    getUnderlyingArgRegs(Regs, N.getOperand(0));
5723    return;
5724  case ISD::BUILD_PAIR:
5725  case ISD::BUILD_VECTOR:
5726  case ISD::CONCAT_VECTORS:
5727    for (SDValue Op : N->op_values())
5728      getUnderlyingArgRegs(Regs, Op);
5729    return;
5730  default:
5731    return;
5732  }
5733}
5734
5735/// If the DbgValueInst is a dbg_value of a function argument, create the
5736/// corresponding DBG_VALUE machine instruction for it now.  At the end of
5737/// instruction selection, they will be inserted to the entry BB.
5738/// We don't currently support this for variadic dbg_values, as they shouldn't
5739/// appear for function arguments or in the prologue.
5740bool SelectionDAGBuilder::EmitFuncArgumentDbgValue(
5741    const Value *V, DILocalVariable *Variable, DIExpression *Expr,
5742    DILocation *DL, FuncArgumentDbgValueKind Kind, const SDValue &N) {
5743  const Argument *Arg = dyn_cast<Argument>(V);
5744  if (!Arg)
5745    return false;
5746
5747  MachineFunction &MF = DAG.getMachineFunction();
5748  const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
5749
5750  // Helper to create DBG_INSTR_REFs or DBG_VALUEs, depending on what kind
5751  // we've been asked to pursue.
5752  auto MakeVRegDbgValue = [&](Register Reg, DIExpression *FragExpr,
5753                              bool Indirect) {
5754    if (Reg.isVirtual() && MF.useDebugInstrRef()) {
5755      // For VRegs, in instruction referencing mode, create a DBG_INSTR_REF
5756      // pointing at the VReg, which will be patched up later.
5757      auto &Inst = TII->get(TargetOpcode::DBG_INSTR_REF);
5758      SmallVector<MachineOperand, 1> MOs({MachineOperand::CreateReg(
5759          /* Reg */ Reg, /* isDef */ false, /* isImp */ false,
5760          /* isKill */ false, /* isDead */ false,
5761          /* isUndef */ false, /* isEarlyClobber */ false,
5762          /* SubReg */ 0, /* isDebug */ true)});
5763
5764      auto *NewDIExpr = FragExpr;
5765      // We don't have an "Indirect" field in DBG_INSTR_REF, fold that into
5766      // the DIExpression.
5767      if (Indirect)
5768        NewDIExpr = DIExpression::prepend(FragExpr, DIExpression::DerefBefore);
5769      SmallVector<uint64_t, 2> Ops({dwarf::DW_OP_LLVM_arg, 0});
5770      NewDIExpr = DIExpression::prependOpcodes(NewDIExpr, Ops);
5771      return BuildMI(MF, DL, Inst, false, MOs, Variable, NewDIExpr);
5772    } else {
5773      // Create a completely standard DBG_VALUE.
5774      auto &Inst = TII->get(TargetOpcode::DBG_VALUE);
5775      return BuildMI(MF, DL, Inst, Indirect, Reg, Variable, FragExpr);
5776    }
5777  };
5778
5779  if (Kind == FuncArgumentDbgValueKind::Value) {
5780    // ArgDbgValues are hoisted to the beginning of the entry block. So we
5781    // should only emit as ArgDbgValue if the dbg.value intrinsic is found in
5782    // the entry block.
5783    bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front();
5784    if (!IsInEntryBlock)
5785      return false;
5786
5787    // ArgDbgValues are hoisted to the beginning of the entry block.  So we
5788    // should only emit as ArgDbgValue if the dbg.value intrinsic describes a
5789    // variable that also is a param.
5790    //
5791    // Although, if we are at the top of the entry block already, we can still
5792    // emit using ArgDbgValue. This might catch some situations when the
5793    // dbg.value refers to an argument that isn't used in the entry block, so
5794    // any CopyToReg node would be optimized out and the only way to express
5795    // this DBG_VALUE is by using the physical reg (or FI) as done in this
5796    // method.  ArgDbgValues are hoisted to the beginning of the entry block. So
5797    // we should only emit as ArgDbgValue if the Variable is an argument to the
5798    // current function, and the dbg.value intrinsic is found in the entry
5799    // block.
5800    bool VariableIsFunctionInputArg = Variable->isParameter() &&
5801        !DL->getInlinedAt();
5802    bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder;
5803    if (!IsInPrologue && !VariableIsFunctionInputArg)
5804      return false;
5805
5806    // Here we assume that a function argument on IR level only can be used to
5807    // describe one input parameter on source level. If we for example have
5808    // source code like this
5809    //
5810    //    struct A { long x, y; };
5811    //    void foo(struct A a, long b) {
5812    //      ...
5813    //      b = a.x;
5814    //      ...
5815    //    }
5816    //
5817    // and IR like this
5818    //
5819    //  define void @foo(i32 %a1, i32 %a2, i32 %b)  {
5820    //  entry:
5821    //    call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment
5822    //    call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment
5823    //    call void @llvm.dbg.value(metadata i32 %b, "b",
5824    //    ...
5825    //    call void @llvm.dbg.value(metadata i32 %a1, "b"
5826    //    ...
5827    //
5828    // then the last dbg.value is describing a parameter "b" using a value that
5829    // is an argument. But since we already has used %a1 to describe a parameter
5830    // we should not handle that last dbg.value here (that would result in an
5831    // incorrect hoisting of the DBG_VALUE to the function entry).
5832    // Notice that we allow one dbg.value per IR level argument, to accommodate
5833    // for the situation with fragments above.
5834    if (VariableIsFunctionInputArg) {
5835      unsigned ArgNo = Arg->getArgNo();
5836      if (ArgNo >= FuncInfo.DescribedArgs.size())
5837        FuncInfo.DescribedArgs.resize(ArgNo + 1, false);
5838      else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo))
5839        return false;
5840      FuncInfo.DescribedArgs.set(ArgNo);
5841    }
5842  }
5843
5844  bool IsIndirect = false;
5845  std::optional<MachineOperand> Op;
5846  // Some arguments' frame index is recorded during argument lowering.
5847  int FI = FuncInfo.getArgumentFrameIndex(Arg);
5848  if (FI != std::numeric_limits<int>::max())
5849    Op = MachineOperand::CreateFI(FI);
5850
5851  SmallVector<std::pair<unsigned, TypeSize>, 8> ArgRegsAndSizes;
5852  if (!Op && N.getNode()) {
5853    getUnderlyingArgRegs(ArgRegsAndSizes, N);
5854    Register Reg;
5855    if (ArgRegsAndSizes.size() == 1)
5856      Reg = ArgRegsAndSizes.front().first;
5857
5858    if (Reg && Reg.isVirtual()) {
5859      MachineRegisterInfo &RegInfo = MF.getRegInfo();
5860      Register PR = RegInfo.getLiveInPhysReg(Reg);
5861      if (PR)
5862        Reg = PR;
5863    }
5864    if (Reg) {
5865      Op = MachineOperand::CreateReg(Reg, false);
5866      IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5867    }
5868  }
5869
5870  if (!Op && N.getNode()) {
5871    // Check if frame index is available.
5872    SDValue LCandidate = peekThroughBitcasts(N);
5873    if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode()))
5874      if (FrameIndexSDNode *FINode =
5875          dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
5876        Op = MachineOperand::CreateFI(FINode->getIndex());
5877  }
5878
5879  if (!Op) {
5880    // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg
5881    auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, TypeSize>>
5882                                         SplitRegs) {
5883      unsigned Offset = 0;
5884      for (const auto &RegAndSize : SplitRegs) {
5885        // If the expression is already a fragment, the current register
5886        // offset+size might extend beyond the fragment. In this case, only
5887        // the register bits that are inside the fragment are relevant.
5888        int RegFragmentSizeInBits = RegAndSize.second;
5889        if (auto ExprFragmentInfo = Expr->getFragmentInfo()) {
5890          uint64_t ExprFragmentSizeInBits = ExprFragmentInfo->SizeInBits;
5891          // The register is entirely outside the expression fragment,
5892          // so is irrelevant for debug info.
5893          if (Offset >= ExprFragmentSizeInBits)
5894            break;
5895          // The register is partially outside the expression fragment, only
5896          // the low bits within the fragment are relevant for debug info.
5897          if (Offset + RegFragmentSizeInBits > ExprFragmentSizeInBits) {
5898            RegFragmentSizeInBits = ExprFragmentSizeInBits - Offset;
5899          }
5900        }
5901
5902        auto FragmentExpr = DIExpression::createFragmentExpression(
5903            Expr, Offset, RegFragmentSizeInBits);
5904        Offset += RegAndSize.second;
5905        // If a valid fragment expression cannot be created, the variable's
5906        // correct value cannot be determined and so it is set as Undef.
5907        if (!FragmentExpr) {
5908          SDDbgValue *SDV = DAG.getConstantDbgValue(
5909              Variable, Expr, UndefValue::get(V->getType()), DL, SDNodeOrder);
5910          DAG.AddDbgValue(SDV, false);
5911          continue;
5912        }
5913        MachineInstr *NewMI =
5914            MakeVRegDbgValue(RegAndSize.first, *FragmentExpr,
5915                             Kind != FuncArgumentDbgValueKind::Value);
5916        FuncInfo.ArgDbgValues.push_back(NewMI);
5917      }
5918    };
5919
5920    // Check if ValueMap has reg number.
5921    DenseMap<const Value *, Register>::const_iterator
5922      VMI = FuncInfo.ValueMap.find(V);
5923    if (VMI != FuncInfo.ValueMap.end()) {
5924      const auto &TLI = DAG.getTargetLoweringInfo();
5925      RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second,
5926                       V->getType(), std::nullopt);
5927      if (RFV.occupiesMultipleRegs()) {
5928        splitMultiRegDbgValue(RFV.getRegsAndSizes());
5929        return true;
5930      }
5931
5932      Op = MachineOperand::CreateReg(VMI->second, false);
5933      IsIndirect = Kind != FuncArgumentDbgValueKind::Value;
5934    } else if (ArgRegsAndSizes.size() > 1) {
5935      // This was split due to the calling convention, and no virtual register
5936      // mapping exists for the value.
5937      splitMultiRegDbgValue(ArgRegsAndSizes);
5938      return true;
5939    }
5940  }
5941
5942  if (!Op)
5943    return false;
5944
5945  assert(Variable->isValidLocationForIntrinsic(DL) &&
5946         "Expected inlined-at fields to agree");
5947  MachineInstr *NewMI = nullptr;
5948
5949  if (Op->isReg())
5950    NewMI = MakeVRegDbgValue(Op->getReg(), Expr, IsIndirect);
5951  else
5952    NewMI = BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), true, *Op,
5953                    Variable, Expr);
5954
5955  // Otherwise, use ArgDbgValues.
5956  FuncInfo.ArgDbgValues.push_back(NewMI);
5957  return true;
5958}
5959
5960/// Return the appropriate SDDbgValue based on N.
5961SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N,
5962                                             DILocalVariable *Variable,
5963                                             DIExpression *Expr,
5964                                             const DebugLoc &dl,
5965                                             unsigned DbgSDNodeOrder) {
5966  if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
5967    // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
5968    // stack slot locations.
5969    //
5970    // Consider "int x = 0; int *px = &x;". There are two kinds of interesting
5971    // debug values here after optimization:
5972    //
5973    //   dbg.value(i32* %px, !"int *px", !DIExpression()), and
5974    //   dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref))
5975    //
5976    // Both describe the direct values of their associated variables.
5977    return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(),
5978                                     /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5979  }
5980  return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(),
5981                         /*IsIndirect*/ false, dl, DbgSDNodeOrder);
5982}
5983
5984static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) {
5985  switch (Intrinsic) {
5986  case Intrinsic::smul_fix:
5987    return ISD::SMULFIX;
5988  case Intrinsic::umul_fix:
5989    return ISD::UMULFIX;
5990  case Intrinsic::smul_fix_sat:
5991    return ISD::SMULFIXSAT;
5992  case Intrinsic::umul_fix_sat:
5993    return ISD::UMULFIXSAT;
5994  case Intrinsic::sdiv_fix:
5995    return ISD::SDIVFIX;
5996  case Intrinsic::udiv_fix:
5997    return ISD::UDIVFIX;
5998  case Intrinsic::sdiv_fix_sat:
5999    return ISD::SDIVFIXSAT;
6000  case Intrinsic::udiv_fix_sat:
6001    return ISD::UDIVFIXSAT;
6002  default:
6003    llvm_unreachable("Unhandled fixed point intrinsic");
6004  }
6005}
6006
6007void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I,
6008                                           const char *FunctionName) {
6009  assert(FunctionName && "FunctionName must not be nullptr");
6010  SDValue Callee = DAG.getExternalSymbol(
6011      FunctionName,
6012      DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()));
6013  LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
6014}
6015
6016/// Given a @llvm.call.preallocated.setup, return the corresponding
6017/// preallocated call.
6018static const CallBase *FindPreallocatedCall(const Value *PreallocatedSetup) {
6019  assert(cast<CallBase>(PreallocatedSetup)
6020                 ->getCalledFunction()
6021                 ->getIntrinsicID() == Intrinsic::call_preallocated_setup &&
6022         "expected call_preallocated_setup Value");
6023  for (const auto *U : PreallocatedSetup->users()) {
6024    auto *UseCall = cast<CallBase>(U);
6025    const Function *Fn = UseCall->getCalledFunction();
6026    if (!Fn || Fn->getIntrinsicID() != Intrinsic::call_preallocated_arg) {
6027      return UseCall;
6028    }
6029  }
6030  llvm_unreachable("expected corresponding call to preallocated setup/arg");
6031}
6032
6033/// If DI is a debug value with an EntryValue expression, lower it using the
6034/// corresponding physical register of the associated Argument value
6035/// (guaranteed to exist by the verifier).
6036bool SelectionDAGBuilder::visitEntryValueDbgValue(
6037    ArrayRef<const Value *> Values, DILocalVariable *Variable,
6038    DIExpression *Expr, DebugLoc DbgLoc) {
6039  if (!Expr->isEntryValue() || !hasSingleElement(Values))
6040    return false;
6041
6042  // These properties are guaranteed by the verifier.
6043  const Argument *Arg = cast<Argument>(Values[0]);
6044  assert(Arg->hasAttribute(Attribute::AttrKind::SwiftAsync));
6045
6046  auto ArgIt = FuncInfo.ValueMap.find(Arg);
6047  if (ArgIt == FuncInfo.ValueMap.end()) {
6048    LLVM_DEBUG(
6049        dbgs() << "Dropping dbg.value: expression is entry_value but "
6050                  "couldn't find an associated register for the Argument\n");
6051    return true;
6052  }
6053  Register ArgVReg = ArgIt->getSecond();
6054
6055  for (auto [PhysReg, VirtReg] : FuncInfo.RegInfo->liveins())
6056    if (ArgVReg == VirtReg || ArgVReg == PhysReg) {
6057      SDDbgValue *SDV = DAG.getVRegDbgValue(
6058          Variable, Expr, PhysReg, false /*IsIndidrect*/, DbgLoc, SDNodeOrder);
6059      DAG.AddDbgValue(SDV, false /*treat as dbg.declare byval parameter*/);
6060      return true;
6061    }
6062  LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
6063                       "couldn't find a physical register\n");
6064  return true;
6065}
6066
6067/// Lower the call to the specified intrinsic function.
6068void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
6069                                             unsigned Intrinsic) {
6070  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6071  SDLoc sdl = getCurSDLoc();
6072  DebugLoc dl = getCurDebugLoc();
6073  SDValue Res;
6074
6075  SDNodeFlags Flags;
6076  if (auto *FPOp = dyn_cast<FPMathOperator>(&I))
6077    Flags.copyFMF(*FPOp);
6078
6079  switch (Intrinsic) {
6080  default:
6081    // By default, turn this into a target intrinsic node.
6082    visitTargetIntrinsic(I, Intrinsic);
6083    return;
6084  case Intrinsic::vscale: {
6085    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6086    setValue(&I, DAG.getVScale(sdl, VT, APInt(VT.getSizeInBits(), 1)));
6087    return;
6088  }
6089  case Intrinsic::vastart:  visitVAStart(I); return;
6090  case Intrinsic::vaend:    visitVAEnd(I); return;
6091  case Intrinsic::vacopy:   visitVACopy(I); return;
6092  case Intrinsic::returnaddress:
6093    setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl,
6094                             TLI.getValueType(DAG.getDataLayout(), I.getType()),
6095                             getValue(I.getArgOperand(0))));
6096    return;
6097  case Intrinsic::addressofreturnaddress:
6098    setValue(&I,
6099             DAG.getNode(ISD::ADDROFRETURNADDR, sdl,
6100                         TLI.getValueType(DAG.getDataLayout(), I.getType())));
6101    return;
6102  case Intrinsic::sponentry:
6103    setValue(&I,
6104             DAG.getNode(ISD::SPONENTRY, sdl,
6105                         TLI.getValueType(DAG.getDataLayout(), I.getType())));
6106    return;
6107  case Intrinsic::frameaddress:
6108    setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl,
6109                             TLI.getFrameIndexTy(DAG.getDataLayout()),
6110                             getValue(I.getArgOperand(0))));
6111    return;
6112  case Intrinsic::read_volatile_register:
6113  case Intrinsic::read_register: {
6114    Value *Reg = I.getArgOperand(0);
6115    SDValue Chain = getRoot();
6116    SDValue RegName =
6117        DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6118    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6119    Res = DAG.getNode(ISD::READ_REGISTER, sdl,
6120      DAG.getVTList(VT, MVT::Other), Chain, RegName);
6121    setValue(&I, Res);
6122    DAG.setRoot(Res.getValue(1));
6123    return;
6124  }
6125  case Intrinsic::write_register: {
6126    Value *Reg = I.getArgOperand(0);
6127    Value *RegValue = I.getArgOperand(1);
6128    SDValue Chain = getRoot();
6129    SDValue RegName =
6130        DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata()));
6131    DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain,
6132                            RegName, getValue(RegValue)));
6133    return;
6134  }
6135  case Intrinsic::memcpy: {
6136    const auto &MCI = cast<MemCpyInst>(I);
6137    SDValue Op1 = getValue(I.getArgOperand(0));
6138    SDValue Op2 = getValue(I.getArgOperand(1));
6139    SDValue Op3 = getValue(I.getArgOperand(2));
6140    // @llvm.memcpy defines 0 and 1 to both mean no alignment.
6141    Align DstAlign = MCI.getDestAlign().valueOrOne();
6142    Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6143    Align Alignment = std::min(DstAlign, SrcAlign);
6144    bool isVol = MCI.isVolatile();
6145    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6146    // FIXME: Support passing different dest/src alignments to the memcpy DAG
6147    // node.
6148    SDValue Root = isVol ? getRoot() : getMemoryRoot();
6149    SDValue MC = DAG.getMemcpy(
6150        Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6151        /* AlwaysInline */ false, isTC, MachinePointerInfo(I.getArgOperand(0)),
6152        MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6153    updateDAGForMaybeTailCall(MC);
6154    return;
6155  }
6156  case Intrinsic::memcpy_inline: {
6157    const auto &MCI = cast<MemCpyInlineInst>(I);
6158    SDValue Dst = getValue(I.getArgOperand(0));
6159    SDValue Src = getValue(I.getArgOperand(1));
6160    SDValue Size = getValue(I.getArgOperand(2));
6161    assert(isa<ConstantSDNode>(Size) && "memcpy_inline needs constant size");
6162    // @llvm.memcpy.inline defines 0 and 1 to both mean no alignment.
6163    Align DstAlign = MCI.getDestAlign().valueOrOne();
6164    Align SrcAlign = MCI.getSourceAlign().valueOrOne();
6165    Align Alignment = std::min(DstAlign, SrcAlign);
6166    bool isVol = MCI.isVolatile();
6167    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6168    // FIXME: Support passing different dest/src alignments to the memcpy DAG
6169    // node.
6170    SDValue MC = DAG.getMemcpy(
6171        getRoot(), sdl, Dst, Src, Size, Alignment, isVol,
6172        /* AlwaysInline */ true, isTC, MachinePointerInfo(I.getArgOperand(0)),
6173        MachinePointerInfo(I.getArgOperand(1)), I.getAAMetadata(), AA);
6174    updateDAGForMaybeTailCall(MC);
6175    return;
6176  }
6177  case Intrinsic::memset: {
6178    const auto &MSI = cast<MemSetInst>(I);
6179    SDValue Op1 = getValue(I.getArgOperand(0));
6180    SDValue Op2 = getValue(I.getArgOperand(1));
6181    SDValue Op3 = getValue(I.getArgOperand(2));
6182    // @llvm.memset defines 0 and 1 to both mean no alignment.
6183    Align Alignment = MSI.getDestAlign().valueOrOne();
6184    bool isVol = MSI.isVolatile();
6185    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6186    SDValue Root = isVol ? getRoot() : getMemoryRoot();
6187    SDValue MS = DAG.getMemset(
6188        Root, sdl, Op1, Op2, Op3, Alignment, isVol, /* AlwaysInline */ false,
6189        isTC, MachinePointerInfo(I.getArgOperand(0)), I.getAAMetadata());
6190    updateDAGForMaybeTailCall(MS);
6191    return;
6192  }
6193  case Intrinsic::memset_inline: {
6194    const auto &MSII = cast<MemSetInlineInst>(I);
6195    SDValue Dst = getValue(I.getArgOperand(0));
6196    SDValue Value = getValue(I.getArgOperand(1));
6197    SDValue Size = getValue(I.getArgOperand(2));
6198    assert(isa<ConstantSDNode>(Size) && "memset_inline needs constant size");
6199    // @llvm.memset defines 0 and 1 to both mean no alignment.
6200    Align DstAlign = MSII.getDestAlign().valueOrOne();
6201    bool isVol = MSII.isVolatile();
6202    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6203    SDValue Root = isVol ? getRoot() : getMemoryRoot();
6204    SDValue MC = DAG.getMemset(Root, sdl, Dst, Value, Size, DstAlign, isVol,
6205                               /* AlwaysInline */ true, isTC,
6206                               MachinePointerInfo(I.getArgOperand(0)),
6207                               I.getAAMetadata());
6208    updateDAGForMaybeTailCall(MC);
6209    return;
6210  }
6211  case Intrinsic::memmove: {
6212    const auto &MMI = cast<MemMoveInst>(I);
6213    SDValue Op1 = getValue(I.getArgOperand(0));
6214    SDValue Op2 = getValue(I.getArgOperand(1));
6215    SDValue Op3 = getValue(I.getArgOperand(2));
6216    // @llvm.memmove defines 0 and 1 to both mean no alignment.
6217    Align DstAlign = MMI.getDestAlign().valueOrOne();
6218    Align SrcAlign = MMI.getSourceAlign().valueOrOne();
6219    Align Alignment = std::min(DstAlign, SrcAlign);
6220    bool isVol = MMI.isVolatile();
6221    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6222    // FIXME: Support passing different dest/src alignments to the memmove DAG
6223    // node.
6224    SDValue Root = isVol ? getRoot() : getMemoryRoot();
6225    SDValue MM = DAG.getMemmove(Root, sdl, Op1, Op2, Op3, Alignment, isVol,
6226                                isTC, MachinePointerInfo(I.getArgOperand(0)),
6227                                MachinePointerInfo(I.getArgOperand(1)),
6228                                I.getAAMetadata(), AA);
6229    updateDAGForMaybeTailCall(MM);
6230    return;
6231  }
6232  case Intrinsic::memcpy_element_unordered_atomic: {
6233    const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I);
6234    SDValue Dst = getValue(MI.getRawDest());
6235    SDValue Src = getValue(MI.getRawSource());
6236    SDValue Length = getValue(MI.getLength());
6237
6238    Type *LengthTy = MI.getLength()->getType();
6239    unsigned ElemSz = MI.getElementSizeInBytes();
6240    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6241    SDValue MC =
6242        DAG.getAtomicMemcpy(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6243                            isTC, MachinePointerInfo(MI.getRawDest()),
6244                            MachinePointerInfo(MI.getRawSource()));
6245    updateDAGForMaybeTailCall(MC);
6246    return;
6247  }
6248  case Intrinsic::memmove_element_unordered_atomic: {
6249    auto &MI = cast<AtomicMemMoveInst>(I);
6250    SDValue Dst = getValue(MI.getRawDest());
6251    SDValue Src = getValue(MI.getRawSource());
6252    SDValue Length = getValue(MI.getLength());
6253
6254    Type *LengthTy = MI.getLength()->getType();
6255    unsigned ElemSz = MI.getElementSizeInBytes();
6256    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6257    SDValue MC =
6258        DAG.getAtomicMemmove(getRoot(), sdl, Dst, Src, Length, LengthTy, ElemSz,
6259                             isTC, MachinePointerInfo(MI.getRawDest()),
6260                             MachinePointerInfo(MI.getRawSource()));
6261    updateDAGForMaybeTailCall(MC);
6262    return;
6263  }
6264  case Intrinsic::memset_element_unordered_atomic: {
6265    auto &MI = cast<AtomicMemSetInst>(I);
6266    SDValue Dst = getValue(MI.getRawDest());
6267    SDValue Val = getValue(MI.getValue());
6268    SDValue Length = getValue(MI.getLength());
6269
6270    Type *LengthTy = MI.getLength()->getType();
6271    unsigned ElemSz = MI.getElementSizeInBytes();
6272    bool isTC = I.isTailCall() && isInTailCallPosition(I, DAG.getTarget());
6273    SDValue MC =
6274        DAG.getAtomicMemset(getRoot(), sdl, Dst, Val, Length, LengthTy, ElemSz,
6275                            isTC, MachinePointerInfo(MI.getRawDest()));
6276    updateDAGForMaybeTailCall(MC);
6277    return;
6278  }
6279  case Intrinsic::call_preallocated_setup: {
6280    const CallBase *PreallocatedCall = FindPreallocatedCall(&I);
6281    SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6282    SDValue Res = DAG.getNode(ISD::PREALLOCATED_SETUP, sdl, MVT::Other,
6283                              getRoot(), SrcValue);
6284    setValue(&I, Res);
6285    DAG.setRoot(Res);
6286    return;
6287  }
6288  case Intrinsic::call_preallocated_arg: {
6289    const CallBase *PreallocatedCall = FindPreallocatedCall(I.getOperand(0));
6290    SDValue SrcValue = DAG.getSrcValue(PreallocatedCall);
6291    SDValue Ops[3];
6292    Ops[0] = getRoot();
6293    Ops[1] = SrcValue;
6294    Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
6295                                   MVT::i32); // arg index
6296    SDValue Res = DAG.getNode(
6297        ISD::PREALLOCATED_ARG, sdl,
6298        DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Ops);
6299    setValue(&I, Res);
6300    DAG.setRoot(Res.getValue(1));
6301    return;
6302  }
6303  case Intrinsic::dbg_declare: {
6304    const auto &DI = cast<DbgDeclareInst>(I);
6305    // Debug intrinsics are handled separately in assignment tracking mode.
6306    // Some intrinsics are handled right after Argument lowering.
6307    if (AssignmentTrackingEnabled ||
6308        FuncInfo.PreprocessedDbgDeclares.count(&DI))
6309      return;
6310    LLVM_DEBUG(dbgs() << "SelectionDAG visiting dbg_declare: " << DI << "\n");
6311    DILocalVariable *Variable = DI.getVariable();
6312    DIExpression *Expression = DI.getExpression();
6313    dropDanglingDebugInfo(Variable, Expression);
6314    // Assume dbg.declare can not currently use DIArgList, i.e.
6315    // it is non-variadic.
6316    assert(!DI.hasArgList() && "Only dbg.value should currently use DIArgList");
6317    handleDebugDeclare(DI.getVariableLocationOp(0), Variable, Expression,
6318                       DI.getDebugLoc());
6319    return;
6320  }
6321  case Intrinsic::dbg_label: {
6322    const DbgLabelInst &DI = cast<DbgLabelInst>(I);
6323    DILabel *Label = DI.getLabel();
6324    assert(Label && "Missing label");
6325
6326    SDDbgLabel *SDV;
6327    SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder);
6328    DAG.AddDbgLabel(SDV);
6329    return;
6330  }
6331  case Intrinsic::dbg_assign: {
6332    // Debug intrinsics are handled seperately in assignment tracking mode.
6333    if (AssignmentTrackingEnabled)
6334      return;
6335    // If assignment tracking hasn't been enabled then fall through and treat
6336    // the dbg.assign as a dbg.value.
6337    [[fallthrough]];
6338  }
6339  case Intrinsic::dbg_value: {
6340    // Debug intrinsics are handled seperately in assignment tracking mode.
6341    if (AssignmentTrackingEnabled)
6342      return;
6343    const DbgValueInst &DI = cast<DbgValueInst>(I);
6344    assert(DI.getVariable() && "Missing variable");
6345
6346    DILocalVariable *Variable = DI.getVariable();
6347    DIExpression *Expression = DI.getExpression();
6348    dropDanglingDebugInfo(Variable, Expression);
6349
6350    if (DI.isKillLocation()) {
6351      handleKillDebugValue(Variable, Expression, DI.getDebugLoc(), SDNodeOrder);
6352      return;
6353    }
6354
6355    SmallVector<Value *, 4> Values(DI.getValues());
6356    if (Values.empty())
6357      return;
6358
6359    bool IsVariadic = DI.hasArgList();
6360    if (!handleDebugValue(Values, Variable, Expression, DI.getDebugLoc(),
6361                          SDNodeOrder, IsVariadic))
6362      addDanglingDebugInfo(Values, Variable, Expression, IsVariadic,
6363                           DI.getDebugLoc(), SDNodeOrder);
6364    return;
6365  }
6366
6367  case Intrinsic::eh_typeid_for: {
6368    // Find the type id for the given typeinfo.
6369    GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0));
6370    unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV);
6371    Res = DAG.getConstant(TypeID, sdl, MVT::i32);
6372    setValue(&I, Res);
6373    return;
6374  }
6375
6376  case Intrinsic::eh_return_i32:
6377  case Intrinsic::eh_return_i64:
6378    DAG.getMachineFunction().setCallsEHReturn(true);
6379    DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl,
6380                            MVT::Other,
6381                            getControlRoot(),
6382                            getValue(I.getArgOperand(0)),
6383                            getValue(I.getArgOperand(1))));
6384    return;
6385  case Intrinsic::eh_unwind_init:
6386    DAG.getMachineFunction().setCallsUnwindInit(true);
6387    return;
6388  case Intrinsic::eh_dwarf_cfa:
6389    setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl,
6390                             TLI.getPointerTy(DAG.getDataLayout()),
6391                             getValue(I.getArgOperand(0))));
6392    return;
6393  case Intrinsic::eh_sjlj_callsite: {
6394    MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI();
6395    ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(0));
6396    assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!");
6397
6398    MMI.setCurrentCallSite(CI->getZExtValue());
6399    return;
6400  }
6401  case Intrinsic::eh_sjlj_functioncontext: {
6402    // Get and store the index of the function context.
6403    MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6404    AllocaInst *FnCtx =
6405      cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts());
6406    int FI = FuncInfo.StaticAllocaMap[FnCtx];
6407    MFI.setFunctionContextIndex(FI);
6408    return;
6409  }
6410  case Intrinsic::eh_sjlj_setjmp: {
6411    SDValue Ops[2];
6412    Ops[0] = getRoot();
6413    Ops[1] = getValue(I.getArgOperand(0));
6414    SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl,
6415                             DAG.getVTList(MVT::i32, MVT::Other), Ops);
6416    setValue(&I, Op.getValue(0));
6417    DAG.setRoot(Op.getValue(1));
6418    return;
6419  }
6420  case Intrinsic::eh_sjlj_longjmp:
6421    DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other,
6422                            getRoot(), getValue(I.getArgOperand(0))));
6423    return;
6424  case Intrinsic::eh_sjlj_setup_dispatch:
6425    DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other,
6426                            getRoot()));
6427    return;
6428  case Intrinsic::masked_gather:
6429    visitMaskedGather(I);
6430    return;
6431  case Intrinsic::masked_load:
6432    visitMaskedLoad(I);
6433    return;
6434  case Intrinsic::masked_scatter:
6435    visitMaskedScatter(I);
6436    return;
6437  case Intrinsic::masked_store:
6438    visitMaskedStore(I);
6439    return;
6440  case Intrinsic::masked_expandload:
6441    visitMaskedLoad(I, true /* IsExpanding */);
6442    return;
6443  case Intrinsic::masked_compressstore:
6444    visitMaskedStore(I, true /* IsCompressing */);
6445    return;
6446  case Intrinsic::powi:
6447    setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)),
6448                            getValue(I.getArgOperand(1)), DAG));
6449    return;
6450  case Intrinsic::log:
6451    setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6452    return;
6453  case Intrinsic::log2:
6454    setValue(&I,
6455             expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6456    return;
6457  case Intrinsic::log10:
6458    setValue(&I,
6459             expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6460    return;
6461  case Intrinsic::exp:
6462    setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6463    return;
6464  case Intrinsic::exp2:
6465    setValue(&I,
6466             expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI, Flags));
6467    return;
6468  case Intrinsic::pow:
6469    setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)),
6470                           getValue(I.getArgOperand(1)), DAG, TLI, Flags));
6471    return;
6472  case Intrinsic::sqrt:
6473  case Intrinsic::fabs:
6474  case Intrinsic::sin:
6475  case Intrinsic::cos:
6476  case Intrinsic::exp10:
6477  case Intrinsic::floor:
6478  case Intrinsic::ceil:
6479  case Intrinsic::trunc:
6480  case Intrinsic::rint:
6481  case Intrinsic::nearbyint:
6482  case Intrinsic::round:
6483  case Intrinsic::roundeven:
6484  case Intrinsic::canonicalize: {
6485    unsigned Opcode;
6486    switch (Intrinsic) {
6487    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6488    case Intrinsic::sqrt:      Opcode = ISD::FSQRT;      break;
6489    case Intrinsic::fabs:      Opcode = ISD::FABS;       break;
6490    case Intrinsic::sin:       Opcode = ISD::FSIN;       break;
6491    case Intrinsic::cos:       Opcode = ISD::FCOS;       break;
6492    case Intrinsic::exp10:     Opcode = ISD::FEXP10;     break;
6493    case Intrinsic::floor:     Opcode = ISD::FFLOOR;     break;
6494    case Intrinsic::ceil:      Opcode = ISD::FCEIL;      break;
6495    case Intrinsic::trunc:     Opcode = ISD::FTRUNC;     break;
6496    case Intrinsic::rint:      Opcode = ISD::FRINT;      break;
6497    case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break;
6498    case Intrinsic::round:     Opcode = ISD::FROUND;     break;
6499    case Intrinsic::roundeven: Opcode = ISD::FROUNDEVEN; break;
6500    case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break;
6501    }
6502
6503    setValue(&I, DAG.getNode(Opcode, sdl,
6504                             getValue(I.getArgOperand(0)).getValueType(),
6505                             getValue(I.getArgOperand(0)), Flags));
6506    return;
6507  }
6508  case Intrinsic::lround:
6509  case Intrinsic::llround:
6510  case Intrinsic::lrint:
6511  case Intrinsic::llrint: {
6512    unsigned Opcode;
6513    switch (Intrinsic) {
6514    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
6515    case Intrinsic::lround:  Opcode = ISD::LROUND;  break;
6516    case Intrinsic::llround: Opcode = ISD::LLROUND; break;
6517    case Intrinsic::lrint:   Opcode = ISD::LRINT;   break;
6518    case Intrinsic::llrint:  Opcode = ISD::LLRINT;  break;
6519    }
6520
6521    EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6522    setValue(&I, DAG.getNode(Opcode, sdl, RetVT,
6523                             getValue(I.getArgOperand(0))));
6524    return;
6525  }
6526  case Intrinsic::minnum:
6527    setValue(&I, DAG.getNode(ISD::FMINNUM, sdl,
6528                             getValue(I.getArgOperand(0)).getValueType(),
6529                             getValue(I.getArgOperand(0)),
6530                             getValue(I.getArgOperand(1)), Flags));
6531    return;
6532  case Intrinsic::maxnum:
6533    setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl,
6534                             getValue(I.getArgOperand(0)).getValueType(),
6535                             getValue(I.getArgOperand(0)),
6536                             getValue(I.getArgOperand(1)), Flags));
6537    return;
6538  case Intrinsic::minimum:
6539    setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl,
6540                             getValue(I.getArgOperand(0)).getValueType(),
6541                             getValue(I.getArgOperand(0)),
6542                             getValue(I.getArgOperand(1)), Flags));
6543    return;
6544  case Intrinsic::maximum:
6545    setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl,
6546                             getValue(I.getArgOperand(0)).getValueType(),
6547                             getValue(I.getArgOperand(0)),
6548                             getValue(I.getArgOperand(1)), Flags));
6549    return;
6550  case Intrinsic::copysign:
6551    setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl,
6552                             getValue(I.getArgOperand(0)).getValueType(),
6553                             getValue(I.getArgOperand(0)),
6554                             getValue(I.getArgOperand(1)), Flags));
6555    return;
6556  case Intrinsic::ldexp:
6557    setValue(&I, DAG.getNode(ISD::FLDEXP, sdl,
6558                             getValue(I.getArgOperand(0)).getValueType(),
6559                             getValue(I.getArgOperand(0)),
6560                             getValue(I.getArgOperand(1)), Flags));
6561    return;
6562  case Intrinsic::frexp: {
6563    SmallVector<EVT, 2> ValueVTs;
6564    ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs);
6565    SDVTList VTs = DAG.getVTList(ValueVTs);
6566    setValue(&I,
6567             DAG.getNode(ISD::FFREXP, sdl, VTs, getValue(I.getArgOperand(0))));
6568    return;
6569  }
6570  case Intrinsic::arithmetic_fence: {
6571    setValue(&I, DAG.getNode(ISD::ARITH_FENCE, sdl,
6572                             getValue(I.getArgOperand(0)).getValueType(),
6573                             getValue(I.getArgOperand(0)), Flags));
6574    return;
6575  }
6576  case Intrinsic::fma:
6577    setValue(&I, DAG.getNode(
6578                     ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(),
6579                     getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)),
6580                     getValue(I.getArgOperand(2)), Flags));
6581    return;
6582#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)                         \
6583  case Intrinsic::INTRINSIC:
6584#include "llvm/IR/ConstrainedOps.def"
6585    visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I));
6586    return;
6587#define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID:
6588#include "llvm/IR/VPIntrinsics.def"
6589    visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
6590    return;
6591  case Intrinsic::fptrunc_round: {
6592    // Get the last argument, the metadata and convert it to an integer in the
6593    // call
6594    Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
6595    std::optional<RoundingMode> RoundMode =
6596        convertStrToRoundingMode(cast<MDString>(MD)->getString());
6597
6598    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6599
6600    // Propagate fast-math-flags from IR to node(s).
6601    SDNodeFlags Flags;
6602    Flags.copyFMF(*cast<FPMathOperator>(&I));
6603    SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
6604
6605    SDValue Result;
6606    Result = DAG.getNode(
6607        ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
6608        DAG.getTargetConstant((int)*RoundMode, sdl,
6609                              TLI.getPointerTy(DAG.getDataLayout())));
6610    setValue(&I, Result);
6611
6612    return;
6613  }
6614  case Intrinsic::fmuladd: {
6615    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6616    if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
6617        TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
6618      setValue(&I, DAG.getNode(ISD::FMA, sdl,
6619                               getValue(I.getArgOperand(0)).getValueType(),
6620                               getValue(I.getArgOperand(0)),
6621                               getValue(I.getArgOperand(1)),
6622                               getValue(I.getArgOperand(2)), Flags));
6623    } else {
6624      // TODO: Intrinsic calls should have fast-math-flags.
6625      SDValue Mul = DAG.getNode(
6626          ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(),
6627          getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), Flags);
6628      SDValue Add = DAG.getNode(ISD::FADD, sdl,
6629                                getValue(I.getArgOperand(0)).getValueType(),
6630                                Mul, getValue(I.getArgOperand(2)), Flags);
6631      setValue(&I, Add);
6632    }
6633    return;
6634  }
6635  case Intrinsic::convert_to_fp16:
6636    setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16,
6637                             DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16,
6638                                         getValue(I.getArgOperand(0)),
6639                                         DAG.getTargetConstant(0, sdl,
6640                                                               MVT::i32))));
6641    return;
6642  case Intrinsic::convert_from_fp16:
6643    setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl,
6644                             TLI.getValueType(DAG.getDataLayout(), I.getType()),
6645                             DAG.getNode(ISD::BITCAST, sdl, MVT::f16,
6646                                         getValue(I.getArgOperand(0)))));
6647    return;
6648  case Intrinsic::fptosi_sat: {
6649    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6650    setValue(&I, DAG.getNode(ISD::FP_TO_SINT_SAT, sdl, VT,
6651                             getValue(I.getArgOperand(0)),
6652                             DAG.getValueType(VT.getScalarType())));
6653    return;
6654  }
6655  case Intrinsic::fptoui_sat: {
6656    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6657    setValue(&I, DAG.getNode(ISD::FP_TO_UINT_SAT, sdl, VT,
6658                             getValue(I.getArgOperand(0)),
6659                             DAG.getValueType(VT.getScalarType())));
6660    return;
6661  }
6662  case Intrinsic::set_rounding:
6663    Res = DAG.getNode(ISD::SET_ROUNDING, sdl, MVT::Other,
6664                      {getRoot(), getValue(I.getArgOperand(0))});
6665    setValue(&I, Res);
6666    DAG.setRoot(Res.getValue(0));
6667    return;
6668  case Intrinsic::is_fpclass: {
6669    const DataLayout DLayout = DAG.getDataLayout();
6670    EVT DestVT = TLI.getValueType(DLayout, I.getType());
6671    EVT ArgVT = TLI.getValueType(DLayout, I.getArgOperand(0)->getType());
6672    FPClassTest Test = static_cast<FPClassTest>(
6673        cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
6674    MachineFunction &MF = DAG.getMachineFunction();
6675    const Function &F = MF.getFunction();
6676    SDValue Op = getValue(I.getArgOperand(0));
6677    SDNodeFlags Flags;
6678    Flags.setNoFPExcept(
6679        !F.getAttributes().hasFnAttr(llvm::Attribute::StrictFP));
6680    // If ISD::IS_FPCLASS should be expanded, do it right now, because the
6681    // expansion can use illegal types. Making expansion early allows
6682    // legalizing these types prior to selection.
6683    if (!TLI.isOperationLegalOrCustom(ISD::IS_FPCLASS, ArgVT)) {
6684      SDValue Result = TLI.expandIS_FPCLASS(DestVT, Op, Test, Flags, sdl, DAG);
6685      setValue(&I, Result);
6686      return;
6687    }
6688
6689    SDValue Check = DAG.getTargetConstant(Test, sdl, MVT::i32);
6690    SDValue V = DAG.getNode(ISD::IS_FPCLASS, sdl, DestVT, {Op, Check}, Flags);
6691    setValue(&I, V);
6692    return;
6693  }
6694  case Intrinsic::get_fpenv: {
6695    const DataLayout DLayout = DAG.getDataLayout();
6696    EVT EnvVT = TLI.getValueType(DLayout, I.getType());
6697    Align TempAlign = DAG.getEVTAlign(EnvVT);
6698    SDValue Chain = getRoot();
6699    // Use GET_FPENV if it is legal or custom. Otherwise use memory-based node
6700    // and temporary storage in stack.
6701    if (TLI.isOperationLegalOrCustom(ISD::GET_FPENV, EnvVT)) {
6702      Res = DAG.getNode(
6703          ISD::GET_FPENV, sdl,
6704          DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6705                        MVT::Other),
6706          Chain);
6707    } else {
6708      SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6709      int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6710      auto MPI =
6711          MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6712      MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6713          MPI, MachineMemOperand::MOStore, MemoryLocation::UnknownSize,
6714          TempAlign);
6715      Chain = DAG.getGetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6716      Res = DAG.getLoad(EnvVT, sdl, Chain, Temp, MPI);
6717    }
6718    setValue(&I, Res);
6719    DAG.setRoot(Res.getValue(1));
6720    return;
6721  }
6722  case Intrinsic::set_fpenv: {
6723    const DataLayout DLayout = DAG.getDataLayout();
6724    SDValue Env = getValue(I.getArgOperand(0));
6725    EVT EnvVT = Env.getValueType();
6726    Align TempAlign = DAG.getEVTAlign(EnvVT);
6727    SDValue Chain = getRoot();
6728    // If SET_FPENV is custom or legal, use it. Otherwise use loading
6729    // environment from memory.
6730    if (TLI.isOperationLegalOrCustom(ISD::SET_FPENV, EnvVT)) {
6731      Chain = DAG.getNode(ISD::SET_FPENV, sdl, MVT::Other, Chain, Env);
6732    } else {
6733      // Allocate space in stack, copy environment bits into it and use this
6734      // memory in SET_FPENV_MEM.
6735      SDValue Temp = DAG.CreateStackTemporary(EnvVT, TempAlign.value());
6736      int SPFI = cast<FrameIndexSDNode>(Temp.getNode())->getIndex();
6737      auto MPI =
6738          MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
6739      Chain = DAG.getStore(Chain, sdl, Env, Temp, MPI, TempAlign,
6740                           MachineMemOperand::MOStore);
6741      MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
6742          MPI, MachineMemOperand::MOLoad, MemoryLocation::UnknownSize,
6743          TempAlign);
6744      Chain = DAG.getSetFPEnv(Chain, sdl, Temp, EnvVT, MMO);
6745    }
6746    DAG.setRoot(Chain);
6747    return;
6748  }
6749  case Intrinsic::reset_fpenv:
6750    DAG.setRoot(DAG.getNode(ISD::RESET_FPENV, sdl, MVT::Other, getRoot()));
6751    return;
6752  case Intrinsic::get_fpmode:
6753    Res = DAG.getNode(
6754        ISD::GET_FPMODE, sdl,
6755        DAG.getVTList(TLI.getValueType(DAG.getDataLayout(), I.getType()),
6756                      MVT::Other),
6757        DAG.getRoot());
6758    setValue(&I, Res);
6759    DAG.setRoot(Res.getValue(1));
6760    return;
6761  case Intrinsic::set_fpmode:
6762    Res = DAG.getNode(ISD::SET_FPMODE, sdl, MVT::Other, {DAG.getRoot()},
6763                      getValue(I.getArgOperand(0)));
6764    DAG.setRoot(Res);
6765    return;
6766  case Intrinsic::reset_fpmode: {
6767    Res = DAG.getNode(ISD::RESET_FPMODE, sdl, MVT::Other, getRoot());
6768    DAG.setRoot(Res);
6769    return;
6770  }
6771  case Intrinsic::pcmarker: {
6772    SDValue Tmp = getValue(I.getArgOperand(0));
6773    DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp));
6774    return;
6775  }
6776  case Intrinsic::readcyclecounter: {
6777    SDValue Op = getRoot();
6778    Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl,
6779                      DAG.getVTList(MVT::i64, MVT::Other), Op);
6780    setValue(&I, Res);
6781    DAG.setRoot(Res.getValue(1));
6782    return;
6783  }
6784  case Intrinsic::bitreverse:
6785    setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl,
6786                             getValue(I.getArgOperand(0)).getValueType(),
6787                             getValue(I.getArgOperand(0))));
6788    return;
6789  case Intrinsic::bswap:
6790    setValue(&I, DAG.getNode(ISD::BSWAP, sdl,
6791                             getValue(I.getArgOperand(0)).getValueType(),
6792                             getValue(I.getArgOperand(0))));
6793    return;
6794  case Intrinsic::cttz: {
6795    SDValue Arg = getValue(I.getArgOperand(0));
6796    ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6797    EVT Ty = Arg.getValueType();
6798    setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF,
6799                             sdl, Ty, Arg));
6800    return;
6801  }
6802  case Intrinsic::ctlz: {
6803    SDValue Arg = getValue(I.getArgOperand(0));
6804    ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1));
6805    EVT Ty = Arg.getValueType();
6806    setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF,
6807                             sdl, Ty, Arg));
6808    return;
6809  }
6810  case Intrinsic::ctpop: {
6811    SDValue Arg = getValue(I.getArgOperand(0));
6812    EVT Ty = Arg.getValueType();
6813    setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg));
6814    return;
6815  }
6816  case Intrinsic::fshl:
6817  case Intrinsic::fshr: {
6818    bool IsFSHL = Intrinsic == Intrinsic::fshl;
6819    SDValue X = getValue(I.getArgOperand(0));
6820    SDValue Y = getValue(I.getArgOperand(1));
6821    SDValue Z = getValue(I.getArgOperand(2));
6822    EVT VT = X.getValueType();
6823
6824    if (X == Y) {
6825      auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR;
6826      setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z));
6827    } else {
6828      auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR;
6829      setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z));
6830    }
6831    return;
6832  }
6833  case Intrinsic::sadd_sat: {
6834    SDValue Op1 = getValue(I.getArgOperand(0));
6835    SDValue Op2 = getValue(I.getArgOperand(1));
6836    setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6837    return;
6838  }
6839  case Intrinsic::uadd_sat: {
6840    SDValue Op1 = getValue(I.getArgOperand(0));
6841    SDValue Op2 = getValue(I.getArgOperand(1));
6842    setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2));
6843    return;
6844  }
6845  case Intrinsic::ssub_sat: {
6846    SDValue Op1 = getValue(I.getArgOperand(0));
6847    SDValue Op2 = getValue(I.getArgOperand(1));
6848    setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6849    return;
6850  }
6851  case Intrinsic::usub_sat: {
6852    SDValue Op1 = getValue(I.getArgOperand(0));
6853    SDValue Op2 = getValue(I.getArgOperand(1));
6854    setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2));
6855    return;
6856  }
6857  case Intrinsic::sshl_sat: {
6858    SDValue Op1 = getValue(I.getArgOperand(0));
6859    SDValue Op2 = getValue(I.getArgOperand(1));
6860    setValue(&I, DAG.getNode(ISD::SSHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6861    return;
6862  }
6863  case Intrinsic::ushl_sat: {
6864    SDValue Op1 = getValue(I.getArgOperand(0));
6865    SDValue Op2 = getValue(I.getArgOperand(1));
6866    setValue(&I, DAG.getNode(ISD::USHLSAT, sdl, Op1.getValueType(), Op1, Op2));
6867    return;
6868  }
6869  case Intrinsic::smul_fix:
6870  case Intrinsic::umul_fix:
6871  case Intrinsic::smul_fix_sat:
6872  case Intrinsic::umul_fix_sat: {
6873    SDValue Op1 = getValue(I.getArgOperand(0));
6874    SDValue Op2 = getValue(I.getArgOperand(1));
6875    SDValue Op3 = getValue(I.getArgOperand(2));
6876    setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6877                             Op1.getValueType(), Op1, Op2, Op3));
6878    return;
6879  }
6880  case Intrinsic::sdiv_fix:
6881  case Intrinsic::udiv_fix:
6882  case Intrinsic::sdiv_fix_sat:
6883  case Intrinsic::udiv_fix_sat: {
6884    SDValue Op1 = getValue(I.getArgOperand(0));
6885    SDValue Op2 = getValue(I.getArgOperand(1));
6886    SDValue Op3 = getValue(I.getArgOperand(2));
6887    setValue(&I, expandDivFix(FixedPointIntrinsicToOpcode(Intrinsic), sdl,
6888                              Op1, Op2, Op3, DAG, TLI));
6889    return;
6890  }
6891  case Intrinsic::smax: {
6892    SDValue Op1 = getValue(I.getArgOperand(0));
6893    SDValue Op2 = getValue(I.getArgOperand(1));
6894    setValue(&I, DAG.getNode(ISD::SMAX, sdl, Op1.getValueType(), Op1, Op2));
6895    return;
6896  }
6897  case Intrinsic::smin: {
6898    SDValue Op1 = getValue(I.getArgOperand(0));
6899    SDValue Op2 = getValue(I.getArgOperand(1));
6900    setValue(&I, DAG.getNode(ISD::SMIN, sdl, Op1.getValueType(), Op1, Op2));
6901    return;
6902  }
6903  case Intrinsic::umax: {
6904    SDValue Op1 = getValue(I.getArgOperand(0));
6905    SDValue Op2 = getValue(I.getArgOperand(1));
6906    setValue(&I, DAG.getNode(ISD::UMAX, sdl, Op1.getValueType(), Op1, Op2));
6907    return;
6908  }
6909  case Intrinsic::umin: {
6910    SDValue Op1 = getValue(I.getArgOperand(0));
6911    SDValue Op2 = getValue(I.getArgOperand(1));
6912    setValue(&I, DAG.getNode(ISD::UMIN, sdl, Op1.getValueType(), Op1, Op2));
6913    return;
6914  }
6915  case Intrinsic::abs: {
6916    // TODO: Preserve "int min is poison" arg in SDAG?
6917    SDValue Op1 = getValue(I.getArgOperand(0));
6918    setValue(&I, DAG.getNode(ISD::ABS, sdl, Op1.getValueType(), Op1));
6919    return;
6920  }
6921  case Intrinsic::stacksave: {
6922    SDValue Op = getRoot();
6923    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
6924    Res = DAG.getNode(ISD::STACKSAVE, sdl, DAG.getVTList(VT, MVT::Other), Op);
6925    setValue(&I, Res);
6926    DAG.setRoot(Res.getValue(1));
6927    return;
6928  }
6929  case Intrinsic::stackrestore:
6930    Res = getValue(I.getArgOperand(0));
6931    DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res));
6932    return;
6933  case Intrinsic::get_dynamic_area_offset: {
6934    SDValue Op = getRoot();
6935    EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6936    EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6937    // Result type for @llvm.get.dynamic.area.offset should match PtrTy for
6938    // target.
6939    if (PtrTy.getFixedSizeInBits() < ResTy.getFixedSizeInBits())
6940      report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset"
6941                         " intrinsic!");
6942    Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy),
6943                      Op);
6944    DAG.setRoot(Op);
6945    setValue(&I, Res);
6946    return;
6947  }
6948  case Intrinsic::stackguard: {
6949    MachineFunction &MF = DAG.getMachineFunction();
6950    const Module &M = *MF.getFunction().getParent();
6951    SDValue Chain = getRoot();
6952    if (TLI.useLoadStackGuardNode()) {
6953      Res = getLoadStackGuard(DAG, sdl, Chain);
6954    } else {
6955      EVT PtrTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
6956      const Value *Global = TLI.getSDagStackGuard(M);
6957      Align Align = DAG.getDataLayout().getPrefTypeAlign(Global->getType());
6958      Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global),
6959                        MachinePointerInfo(Global, 0), Align,
6960                        MachineMemOperand::MOVolatile);
6961    }
6962    if (TLI.useStackGuardXorFP())
6963      Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
6964    DAG.setRoot(Chain);
6965    setValue(&I, Res);
6966    return;
6967  }
6968  case Intrinsic::stackprotector: {
6969    // Emit code into the DAG to store the stack guard onto the stack.
6970    MachineFunction &MF = DAG.getMachineFunction();
6971    MachineFrameInfo &MFI = MF.getFrameInfo();
6972    SDValue Src, Chain = getRoot();
6973
6974    if (TLI.useLoadStackGuardNode())
6975      Src = getLoadStackGuard(DAG, sdl, Chain);
6976    else
6977      Src = getValue(I.getArgOperand(0));   // The guard's value.
6978
6979    AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1));
6980
6981    int FI = FuncInfo.StaticAllocaMap[Slot];
6982    MFI.setStackProtectorIndex(FI);
6983    EVT PtrTy = TLI.getFrameIndexTy(DAG.getDataLayout());
6984
6985    SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
6986
6987    // Store the stack protector onto the stack.
6988    Res = DAG.getStore(
6989        Chain, sdl, Src, FIN,
6990        MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
6991        MaybeAlign(), MachineMemOperand::MOVolatile);
6992    setValue(&I, Res);
6993    DAG.setRoot(Res);
6994    return;
6995  }
6996  case Intrinsic::objectsize:
6997    llvm_unreachable("llvm.objectsize.* should have been lowered already");
6998
6999  case Intrinsic::is_constant:
7000    llvm_unreachable("llvm.is.constant.* should have been lowered already");
7001
7002  case Intrinsic::annotation:
7003  case Intrinsic::ptr_annotation:
7004  case Intrinsic::launder_invariant_group:
7005  case Intrinsic::strip_invariant_group:
7006    // Drop the intrinsic, but forward the value
7007    setValue(&I, getValue(I.getOperand(0)));
7008    return;
7009
7010  case Intrinsic::assume:
7011  case Intrinsic::experimental_noalias_scope_decl:
7012  case Intrinsic::var_annotation:
7013  case Intrinsic::sideeffect:
7014    // Discard annotate attributes, noalias scope declarations, assumptions, and
7015    // artificial side-effects.
7016    return;
7017
7018  case Intrinsic::codeview_annotation: {
7019    // Emit a label associated with this metadata.
7020    MachineFunction &MF = DAG.getMachineFunction();
7021    MCSymbol *Label =
7022        MF.getMMI().getContext().createTempSymbol("annotation", true);
7023    Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata();
7024    MF.addCodeViewAnnotation(Label, cast<MDNode>(MD));
7025    Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label);
7026    DAG.setRoot(Res);
7027    return;
7028  }
7029
7030  case Intrinsic::init_trampoline: {
7031    const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts());
7032
7033    SDValue Ops[6];
7034    Ops[0] = getRoot();
7035    Ops[1] = getValue(I.getArgOperand(0));
7036    Ops[2] = getValue(I.getArgOperand(1));
7037    Ops[3] = getValue(I.getArgOperand(2));
7038    Ops[4] = DAG.getSrcValue(I.getArgOperand(0));
7039    Ops[5] = DAG.getSrcValue(F);
7040
7041    Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops);
7042
7043    DAG.setRoot(Res);
7044    return;
7045  }
7046  case Intrinsic::adjust_trampoline:
7047    setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl,
7048                             TLI.getPointerTy(DAG.getDataLayout()),
7049                             getValue(I.getArgOperand(0))));
7050    return;
7051  case Intrinsic::gcroot: {
7052    assert(DAG.getMachineFunction().getFunction().hasGC() &&
7053           "only valid in functions with gc specified, enforced by Verifier");
7054    assert(GFI && "implied by previous");
7055    const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
7056    const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
7057
7058    FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());
7059    GFI->addStackRoot(FI->getIndex(), TypeMap);
7060    return;
7061  }
7062  case Intrinsic::gcread:
7063  case Intrinsic::gcwrite:
7064    llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!");
7065  case Intrinsic::get_rounding:
7066    Res = DAG.getNode(ISD::GET_ROUNDING, sdl, {MVT::i32, MVT::Other}, getRoot());
7067    setValue(&I, Res);
7068    DAG.setRoot(Res.getValue(1));
7069    return;
7070
7071  case Intrinsic::expect:
7072    // Just replace __builtin_expect(exp, c) with EXP.
7073    setValue(&I, getValue(I.getArgOperand(0)));
7074    return;
7075
7076  case Intrinsic::ubsantrap:
7077  case Intrinsic::debugtrap:
7078  case Intrinsic::trap: {
7079    StringRef TrapFuncName =
7080        I.getAttributes().getFnAttr("trap-func-name").getValueAsString();
7081    if (TrapFuncName.empty()) {
7082      switch (Intrinsic) {
7083      case Intrinsic::trap:
7084        DAG.setRoot(DAG.getNode(ISD::TRAP, sdl, MVT::Other, getRoot()));
7085        break;
7086      case Intrinsic::debugtrap:
7087        DAG.setRoot(DAG.getNode(ISD::DEBUGTRAP, sdl, MVT::Other, getRoot()));
7088        break;
7089      case Intrinsic::ubsantrap:
7090        DAG.setRoot(DAG.getNode(
7091            ISD::UBSANTRAP, sdl, MVT::Other, getRoot(),
7092            DAG.getTargetConstant(
7093                cast<ConstantInt>(I.getArgOperand(0))->getZExtValue(), sdl,
7094                MVT::i32)));
7095        break;
7096      default: llvm_unreachable("unknown trap intrinsic");
7097      }
7098      return;
7099    }
7100    TargetLowering::ArgListTy Args;
7101    if (Intrinsic == Intrinsic::ubsantrap) {
7102      Args.push_back(TargetLoweringBase::ArgListEntry());
7103      Args[0].Val = I.getArgOperand(0);
7104      Args[0].Node = getValue(Args[0].Val);
7105      Args[0].Ty = Args[0].Val->getType();
7106    }
7107
7108    TargetLowering::CallLoweringInfo CLI(DAG);
7109    CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee(
7110        CallingConv::C, I.getType(),
7111        DAG.getExternalSymbol(TrapFuncName.data(),
7112                              TLI.getPointerTy(DAG.getDataLayout())),
7113        std::move(Args));
7114
7115    std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
7116    DAG.setRoot(Result.second);
7117    return;
7118  }
7119
7120  case Intrinsic::uadd_with_overflow:
7121  case Intrinsic::sadd_with_overflow:
7122  case Intrinsic::usub_with_overflow:
7123  case Intrinsic::ssub_with_overflow:
7124  case Intrinsic::umul_with_overflow:
7125  case Intrinsic::smul_with_overflow: {
7126    ISD::NodeType Op;
7127    switch (Intrinsic) {
7128    default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7129    case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break;
7130    case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break;
7131    case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break;
7132    case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break;
7133    case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break;
7134    case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break;
7135    }
7136    SDValue Op1 = getValue(I.getArgOperand(0));
7137    SDValue Op2 = getValue(I.getArgOperand(1));
7138
7139    EVT ResultVT = Op1.getValueType();
7140    EVT OverflowVT = MVT::i1;
7141    if (ResultVT.isVector())
7142      OverflowVT = EVT::getVectorVT(
7143          *Context, OverflowVT, ResultVT.getVectorElementCount());
7144
7145    SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT);
7146    setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2));
7147    return;
7148  }
7149  case Intrinsic::prefetch: {
7150    SDValue Ops[5];
7151    unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7152    auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore;
7153    Ops[0] = DAG.getRoot();
7154    Ops[1] = getValue(I.getArgOperand(0));
7155    Ops[2] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(1)), sdl,
7156                                   MVT::i32);
7157    Ops[3] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(2)), sdl,
7158                                   MVT::i32);
7159    Ops[4] = DAG.getTargetConstant(*cast<ConstantInt>(I.getArgOperand(3)), sdl,
7160                                   MVT::i32);
7161    SDValue Result = DAG.getMemIntrinsicNode(
7162        ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops,
7163        EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)),
7164        /* align */ std::nullopt, Flags);
7165
7166    // Chain the prefetch in parallel with any pending loads, to stay out of
7167    // the way of later optimizations.
7168    PendingLoads.push_back(Result);
7169    Result = getRoot();
7170    DAG.setRoot(Result);
7171    return;
7172  }
7173  case Intrinsic::lifetime_start:
7174  case Intrinsic::lifetime_end: {
7175    bool IsStart = (Intrinsic == Intrinsic::lifetime_start);
7176    // Stack coloring is not enabled in O0, discard region information.
7177    if (TM.getOptLevel() == CodeGenOptLevel::None)
7178      return;
7179
7180    const int64_t ObjectSize =
7181        cast<ConstantInt>(I.getArgOperand(0))->getSExtValue();
7182    Value *const ObjectPtr = I.getArgOperand(1);
7183    SmallVector<const Value *, 4> Allocas;
7184    getUnderlyingObjects(ObjectPtr, Allocas);
7185
7186    for (const Value *Alloca : Allocas) {
7187      const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(Alloca);
7188
7189      // Could not find an Alloca.
7190      if (!LifetimeObject)
7191        continue;
7192
7193      // First check that the Alloca is static, otherwise it won't have a
7194      // valid frame index.
7195      auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject);
7196      if (SI == FuncInfo.StaticAllocaMap.end())
7197        return;
7198
7199      const int FrameIndex = SI->second;
7200      int64_t Offset;
7201      if (GetPointerBaseWithConstantOffset(
7202              ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject)
7203        Offset = -1; // Cannot determine offset from alloca to lifetime object.
7204      Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize,
7205                                Offset);
7206      DAG.setRoot(Res);
7207    }
7208    return;
7209  }
7210  case Intrinsic::pseudoprobe: {
7211    auto Guid = cast<ConstantInt>(I.getArgOperand(0))->getZExtValue();
7212    auto Index = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
7213    auto Attr = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
7214    Res = DAG.getPseudoProbeNode(sdl, getRoot(), Guid, Index, Attr);
7215    DAG.setRoot(Res);
7216    return;
7217  }
7218  case Intrinsic::invariant_start:
7219    // Discard region information.
7220    setValue(&I,
7221             DAG.getUNDEF(TLI.getValueType(DAG.getDataLayout(), I.getType())));
7222    return;
7223  case Intrinsic::invariant_end:
7224    // Discard region information.
7225    return;
7226  case Intrinsic::clear_cache:
7227    /// FunctionName may be null.
7228    if (const char *FunctionName = TLI.getClearCacheBuiltinName())
7229      lowerCallToExternalSymbol(I, FunctionName);
7230    return;
7231  case Intrinsic::donothing:
7232  case Intrinsic::seh_try_begin:
7233  case Intrinsic::seh_scope_begin:
7234  case Intrinsic::seh_try_end:
7235  case Intrinsic::seh_scope_end:
7236    // ignore
7237    return;
7238  case Intrinsic::experimental_stackmap:
7239    visitStackmap(I);
7240    return;
7241  case Intrinsic::experimental_patchpoint_void:
7242  case Intrinsic::experimental_patchpoint_i64:
7243    visitPatchpoint(I);
7244    return;
7245  case Intrinsic::experimental_gc_statepoint:
7246    LowerStatepoint(cast<GCStatepointInst>(I));
7247    return;
7248  case Intrinsic::experimental_gc_result:
7249    visitGCResult(cast<GCResultInst>(I));
7250    return;
7251  case Intrinsic::experimental_gc_relocate:
7252    visitGCRelocate(cast<GCRelocateInst>(I));
7253    return;
7254  case Intrinsic::instrprof_cover:
7255    llvm_unreachable("instrprof failed to lower a cover");
7256  case Intrinsic::instrprof_increment:
7257    llvm_unreachable("instrprof failed to lower an increment");
7258  case Intrinsic::instrprof_timestamp:
7259    llvm_unreachable("instrprof failed to lower a timestamp");
7260  case Intrinsic::instrprof_value_profile:
7261    llvm_unreachable("instrprof failed to lower a value profiling call");
7262  case Intrinsic::instrprof_mcdc_parameters:
7263    llvm_unreachable("instrprof failed to lower mcdc parameters");
7264  case Intrinsic::instrprof_mcdc_tvbitmap_update:
7265    llvm_unreachable("instrprof failed to lower an mcdc tvbitmap update");
7266  case Intrinsic::instrprof_mcdc_condbitmap_update:
7267    llvm_unreachable("instrprof failed to lower an mcdc condbitmap update");
7268  case Intrinsic::localescape: {
7269    MachineFunction &MF = DAG.getMachineFunction();
7270    const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo();
7271
7272    // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
7273    // is the same on all targets.
7274    for (unsigned Idx = 0, E = I.arg_size(); Idx < E; ++Idx) {
7275      Value *Arg = I.getArgOperand(Idx)->stripPointerCasts();
7276      if (isa<ConstantPointerNull>(Arg))
7277        continue; // Skip null pointers. They represent a hole in index space.
7278      AllocaInst *Slot = cast<AllocaInst>(Arg);
7279      assert(FuncInfo.StaticAllocaMap.count(Slot) &&
7280             "can only escape static allocas");
7281      int FI = FuncInfo.StaticAllocaMap[Slot];
7282      MCSymbol *FrameAllocSym =
7283          MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7284              GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx);
7285      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl,
7286              TII->get(TargetOpcode::LOCAL_ESCAPE))
7287          .addSym(FrameAllocSym)
7288          .addFrameIndex(FI);
7289    }
7290
7291    return;
7292  }
7293
7294  case Intrinsic::localrecover: {
7295    // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx)
7296    MachineFunction &MF = DAG.getMachineFunction();
7297
7298    // Get the symbol that defines the frame offset.
7299    auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts());
7300    auto *Idx = cast<ConstantInt>(I.getArgOperand(2));
7301    unsigned IdxVal =
7302        unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max()));
7303    MCSymbol *FrameAllocSym =
7304        MF.getMMI().getContext().getOrCreateFrameAllocSymbol(
7305            GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal);
7306
7307    Value *FP = I.getArgOperand(1);
7308    SDValue FPVal = getValue(FP);
7309    EVT PtrVT = FPVal.getValueType();
7310
7311    // Create a MCSymbol for the label to avoid any target lowering
7312    // that would make this PC relative.
7313    SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT);
7314    SDValue OffsetVal =
7315        DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym);
7316
7317    // Add the offset to the FP.
7318    SDValue Add = DAG.getMemBasePlusOffset(FPVal, OffsetVal, sdl);
7319    setValue(&I, Add);
7320
7321    return;
7322  }
7323
7324  case Intrinsic::eh_exceptionpointer:
7325  case Intrinsic::eh_exceptioncode: {
7326    // Get the exception pointer vreg, copy from it, and resize it to fit.
7327    const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0));
7328    MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
7329    const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT);
7330    unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC);
7331    SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), sdl, VReg, PtrVT);
7332    if (Intrinsic == Intrinsic::eh_exceptioncode)
7333      N = DAG.getZExtOrTrunc(N, sdl, MVT::i32);
7334    setValue(&I, N);
7335    return;
7336  }
7337  case Intrinsic::xray_customevent: {
7338    // Here we want to make sure that the intrinsic behaves as if it has a
7339    // specific calling convention.
7340    const auto &Triple = DAG.getTarget().getTargetTriple();
7341    if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7342      return;
7343
7344    SmallVector<SDValue, 8> Ops;
7345
7346    // We want to say that we always want the arguments in registers.
7347    SDValue LogEntryVal = getValue(I.getArgOperand(0));
7348    SDValue StrSizeVal = getValue(I.getArgOperand(1));
7349    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7350    SDValue Chain = getRoot();
7351    Ops.push_back(LogEntryVal);
7352    Ops.push_back(StrSizeVal);
7353    Ops.push_back(Chain);
7354
7355    // We need to enforce the calling convention for the callsite, so that
7356    // argument ordering is enforced correctly, and that register allocation can
7357    // see that some registers may be assumed clobbered and have to preserve
7358    // them across calls to the intrinsic.
7359    MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
7360                                           sdl, NodeTys, Ops);
7361    SDValue patchableNode = SDValue(MN, 0);
7362    DAG.setRoot(patchableNode);
7363    setValue(&I, patchableNode);
7364    return;
7365  }
7366  case Intrinsic::xray_typedevent: {
7367    // Here we want to make sure that the intrinsic behaves as if it has a
7368    // specific calling convention.
7369    const auto &Triple = DAG.getTarget().getTargetTriple();
7370    if (!Triple.isAArch64(64) && Triple.getArch() != Triple::x86_64)
7371      return;
7372
7373    SmallVector<SDValue, 8> Ops;
7374
7375    // We want to say that we always want the arguments in registers.
7376    // It's unclear to me how manipulating the selection DAG here forces callers
7377    // to provide arguments in registers instead of on the stack.
7378    SDValue LogTypeId = getValue(I.getArgOperand(0));
7379    SDValue LogEntryVal = getValue(I.getArgOperand(1));
7380    SDValue StrSizeVal = getValue(I.getArgOperand(2));
7381    SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
7382    SDValue Chain = getRoot();
7383    Ops.push_back(LogTypeId);
7384    Ops.push_back(LogEntryVal);
7385    Ops.push_back(StrSizeVal);
7386    Ops.push_back(Chain);
7387
7388    // We need to enforce the calling convention for the callsite, so that
7389    // argument ordering is enforced correctly, and that register allocation can
7390    // see that some registers may be assumed clobbered and have to preserve
7391    // them across calls to the intrinsic.
7392    MachineSDNode *MN = DAG.getMachineNode(
7393        TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, sdl, NodeTys, Ops);
7394    SDValue patchableNode = SDValue(MN, 0);
7395    DAG.setRoot(patchableNode);
7396    setValue(&I, patchableNode);
7397    return;
7398  }
7399  case Intrinsic::experimental_deoptimize:
7400    LowerDeoptimizeCall(&I);
7401    return;
7402  case Intrinsic::experimental_stepvector:
7403    visitStepVector(I);
7404    return;
7405  case Intrinsic::vector_reduce_fadd:
7406  case Intrinsic::vector_reduce_fmul:
7407  case Intrinsic::vector_reduce_add:
7408  case Intrinsic::vector_reduce_mul:
7409  case Intrinsic::vector_reduce_and:
7410  case Intrinsic::vector_reduce_or:
7411  case Intrinsic::vector_reduce_xor:
7412  case Intrinsic::vector_reduce_smax:
7413  case Intrinsic::vector_reduce_smin:
7414  case Intrinsic::vector_reduce_umax:
7415  case Intrinsic::vector_reduce_umin:
7416  case Intrinsic::vector_reduce_fmax:
7417  case Intrinsic::vector_reduce_fmin:
7418  case Intrinsic::vector_reduce_fmaximum:
7419  case Intrinsic::vector_reduce_fminimum:
7420    visitVectorReduce(I, Intrinsic);
7421    return;
7422
7423  case Intrinsic::icall_branch_funnel: {
7424    SmallVector<SDValue, 16> Ops;
7425    Ops.push_back(getValue(I.getArgOperand(0)));
7426
7427    int64_t Offset;
7428    auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7429        I.getArgOperand(1), Offset, DAG.getDataLayout()));
7430    if (!Base)
7431      report_fatal_error(
7432          "llvm.icall.branch.funnel operand must be a GlobalValue");
7433    Ops.push_back(DAG.getTargetGlobalAddress(Base, sdl, MVT::i64, 0));
7434
7435    struct BranchFunnelTarget {
7436      int64_t Offset;
7437      SDValue Target;
7438    };
7439    SmallVector<BranchFunnelTarget, 8> Targets;
7440
7441    for (unsigned Op = 1, N = I.arg_size(); Op != N; Op += 2) {
7442      auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset(
7443          I.getArgOperand(Op), Offset, DAG.getDataLayout()));
7444      if (ElemBase != Base)
7445        report_fatal_error("all llvm.icall.branch.funnel operands must refer "
7446                           "to the same GlobalValue");
7447
7448      SDValue Val = getValue(I.getArgOperand(Op + 1));
7449      auto *GA = dyn_cast<GlobalAddressSDNode>(Val);
7450      if (!GA)
7451        report_fatal_error(
7452            "llvm.icall.branch.funnel operand must be a GlobalValue");
7453      Targets.push_back({Offset, DAG.getTargetGlobalAddress(
7454                                     GA->getGlobal(), sdl, Val.getValueType(),
7455                                     GA->getOffset())});
7456    }
7457    llvm::sort(Targets,
7458               [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) {
7459                 return T1.Offset < T2.Offset;
7460               });
7461
7462    for (auto &T : Targets) {
7463      Ops.push_back(DAG.getTargetConstant(T.Offset, sdl, MVT::i32));
7464      Ops.push_back(T.Target);
7465    }
7466
7467    Ops.push_back(DAG.getRoot()); // Chain
7468    SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, sdl,
7469                                 MVT::Other, Ops),
7470              0);
7471    DAG.setRoot(N);
7472    setValue(&I, N);
7473    HasTailCall = true;
7474    return;
7475  }
7476
7477  case Intrinsic::wasm_landingpad_index:
7478    // Information this intrinsic contained has been transferred to
7479    // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely
7480    // delete it now.
7481    return;
7482
7483  case Intrinsic::aarch64_settag:
7484  case Intrinsic::aarch64_settag_zero: {
7485    const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
7486    bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero;
7487    SDValue Val = TSI.EmitTargetCodeForSetTag(
7488        DAG, sdl, getRoot(), getValue(I.getArgOperand(0)),
7489        getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)),
7490        ZeroMemory);
7491    DAG.setRoot(Val);
7492    setValue(&I, Val);
7493    return;
7494  }
7495  case Intrinsic::amdgcn_cs_chain: {
7496    assert(I.arg_size() == 5 && "Additional args not supported yet");
7497    assert(cast<ConstantInt>(I.getOperand(4))->isZero() &&
7498           "Non-zero flags not supported yet");
7499
7500    // At this point we don't care if it's amdgpu_cs_chain or
7501    // amdgpu_cs_chain_preserve.
7502    CallingConv::ID CC = CallingConv::AMDGPU_CS_Chain;
7503
7504    Type *RetTy = I.getType();
7505    assert(RetTy->isVoidTy() && "Should not return");
7506
7507    SDValue Callee = getValue(I.getOperand(0));
7508
7509    // We only have 2 actual args: one for the SGPRs and one for the VGPRs.
7510    // We'll also tack the value of the EXEC mask at the end.
7511    TargetLowering::ArgListTy Args;
7512    Args.reserve(3);
7513
7514    for (unsigned Idx : {2, 3, 1}) {
7515      TargetLowering::ArgListEntry Arg;
7516      Arg.Node = getValue(I.getOperand(Idx));
7517      Arg.Ty = I.getOperand(Idx)->getType();
7518      Arg.setAttributes(&I, Idx);
7519      Args.push_back(Arg);
7520    }
7521
7522    assert(Args[0].IsInReg && "SGPR args should be marked inreg");
7523    assert(!Args[1].IsInReg && "VGPR args should not be marked inreg");
7524    Args[2].IsInReg = true; // EXEC should be inreg
7525
7526    TargetLowering::CallLoweringInfo CLI(DAG);
7527    CLI.setDebugLoc(getCurSDLoc())
7528        .setChain(getRoot())
7529        .setCallee(CC, RetTy, Callee, std::move(Args))
7530        .setNoReturn(true)
7531        .setTailCall(true)
7532        .setConvergent(I.isConvergent());
7533    CLI.CB = &I;
7534    std::pair<SDValue, SDValue> Result =
7535        lowerInvokable(CLI, /*EHPadBB*/ nullptr);
7536    (void)Result;
7537    assert(!Result.first.getNode() && !Result.second.getNode() &&
7538           "Should've lowered as tail call");
7539
7540    HasTailCall = true;
7541    return;
7542  }
7543  case Intrinsic::ptrmask: {
7544    SDValue Ptr = getValue(I.getOperand(0));
7545    SDValue Mask = getValue(I.getOperand(1));
7546
7547    EVT PtrVT = Ptr.getValueType();
7548    assert(PtrVT == Mask.getValueType() &&
7549           "Pointers with different index type are not supported by SDAG");
7550    setValue(&I, DAG.getNode(ISD::AND, sdl, PtrVT, Ptr, Mask));
7551    return;
7552  }
7553  case Intrinsic::threadlocal_address: {
7554    setValue(&I, getValue(I.getOperand(0)));
7555    return;
7556  }
7557  case Intrinsic::get_active_lane_mask: {
7558    EVT CCVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7559    SDValue Index = getValue(I.getOperand(0));
7560    EVT ElementVT = Index.getValueType();
7561
7562    if (!TLI.shouldExpandGetActiveLaneMask(CCVT, ElementVT)) {
7563      visitTargetIntrinsic(I, Intrinsic);
7564      return;
7565    }
7566
7567    SDValue TripCount = getValue(I.getOperand(1));
7568    EVT VecTy = EVT::getVectorVT(*DAG.getContext(), ElementVT,
7569                                 CCVT.getVectorElementCount());
7570
7571    SDValue VectorIndex = DAG.getSplat(VecTy, sdl, Index);
7572    SDValue VectorTripCount = DAG.getSplat(VecTy, sdl, TripCount);
7573    SDValue VectorStep = DAG.getStepVector(sdl, VecTy);
7574    SDValue VectorInduction = DAG.getNode(
7575        ISD::UADDSAT, sdl, VecTy, VectorIndex, VectorStep);
7576    SDValue SetCC = DAG.getSetCC(sdl, CCVT, VectorInduction,
7577                                 VectorTripCount, ISD::CondCode::SETULT);
7578    setValue(&I, SetCC);
7579    return;
7580  }
7581  case Intrinsic::experimental_get_vector_length: {
7582    assert(cast<ConstantInt>(I.getOperand(1))->getSExtValue() > 0 &&
7583           "Expected positive VF");
7584    unsigned VF = cast<ConstantInt>(I.getOperand(1))->getZExtValue();
7585    bool IsScalable = cast<ConstantInt>(I.getOperand(2))->isOne();
7586
7587    SDValue Count = getValue(I.getOperand(0));
7588    EVT CountVT = Count.getValueType();
7589
7590    if (!TLI.shouldExpandGetVectorLength(CountVT, VF, IsScalable)) {
7591      visitTargetIntrinsic(I, Intrinsic);
7592      return;
7593    }
7594
7595    // Expand to a umin between the trip count and the maximum elements the type
7596    // can hold.
7597    EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7598
7599    // Extend the trip count to at least the result VT.
7600    if (CountVT.bitsLT(VT)) {
7601      Count = DAG.getNode(ISD::ZERO_EXTEND, sdl, VT, Count);
7602      CountVT = VT;
7603    }
7604
7605    SDValue MaxEVL = DAG.getElementCount(sdl, CountVT,
7606                                         ElementCount::get(VF, IsScalable));
7607
7608    SDValue UMin = DAG.getNode(ISD::UMIN, sdl, CountVT, Count, MaxEVL);
7609    // Clip to the result type if needed.
7610    SDValue Trunc = DAG.getNode(ISD::TRUNCATE, sdl, VT, UMin);
7611
7612    setValue(&I, Trunc);
7613    return;
7614  }
7615  case Intrinsic::experimental_cttz_elts: {
7616    auto DL = getCurSDLoc();
7617    SDValue Op = getValue(I.getOperand(0));
7618    EVT OpVT = Op.getValueType();
7619
7620    if (!TLI.shouldExpandCttzElements(OpVT)) {
7621      visitTargetIntrinsic(I, Intrinsic);
7622      return;
7623    }
7624
7625    if (OpVT.getScalarType() != MVT::i1) {
7626      // Compare the input vector elements to zero & use to count trailing zeros
7627      SDValue AllZero = DAG.getConstant(0, DL, OpVT);
7628      OpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
7629                              OpVT.getVectorElementCount());
7630      Op = DAG.getSetCC(DL, OpVT, Op, AllZero, ISD::SETNE);
7631    }
7632
7633    // Find the smallest "sensible" element type to use for the expansion.
7634    ConstantRange CR(
7635        APInt(64, OpVT.getVectorElementCount().getKnownMinValue()));
7636    if (OpVT.isScalableVT())
7637      CR = CR.umul_sat(getVScaleRange(I.getCaller(), 64));
7638
7639    // If the zero-is-poison flag is set, we can assume the upper limit
7640    // of the result is VF-1.
7641    if (!cast<ConstantSDNode>(getValue(I.getOperand(1)))->isZero())
7642      CR = CR.subtract(APInt(64, 1));
7643
7644    unsigned EltWidth = I.getType()->getScalarSizeInBits();
7645    EltWidth = std::min(EltWidth, (unsigned)CR.getActiveBits());
7646    EltWidth = std::max(llvm::bit_ceil(EltWidth), (unsigned)8);
7647
7648    MVT NewEltTy = MVT::getIntegerVT(EltWidth);
7649
7650    // Create the new vector type & get the vector length
7651    EVT NewVT = EVT::getVectorVT(*DAG.getContext(), NewEltTy,
7652                                 OpVT.getVectorElementCount());
7653
7654    SDValue VL =
7655        DAG.getElementCount(DL, NewEltTy, OpVT.getVectorElementCount());
7656
7657    SDValue StepVec = DAG.getStepVector(DL, NewVT);
7658    SDValue SplatVL = DAG.getSplat(NewVT, DL, VL);
7659    SDValue StepVL = DAG.getNode(ISD::SUB, DL, NewVT, SplatVL, StepVec);
7660    SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, NewVT, Op);
7661    SDValue And = DAG.getNode(ISD::AND, DL, NewVT, StepVL, Ext);
7662    SDValue Max = DAG.getNode(ISD::VECREDUCE_UMAX, DL, NewEltTy, And);
7663    SDValue Sub = DAG.getNode(ISD::SUB, DL, NewEltTy, VL, Max);
7664
7665    EVT RetTy = TLI.getValueType(DAG.getDataLayout(), I.getType());
7666    SDValue Ret = DAG.getZExtOrTrunc(Sub, DL, RetTy);
7667
7668    setValue(&I, Ret);
7669    return;
7670  }
7671  case Intrinsic::vector_insert: {
7672    SDValue Vec = getValue(I.getOperand(0));
7673    SDValue SubVec = getValue(I.getOperand(1));
7674    SDValue Index = getValue(I.getOperand(2));
7675
7676    // The intrinsic's index type is i64, but the SDNode requires an index type
7677    // suitable for the target. Convert the index as required.
7678    MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7679    if (Index.getValueType() != VectorIdxTy)
7680      Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
7681
7682    EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7683    setValue(&I, DAG.getNode(ISD::INSERT_SUBVECTOR, sdl, ResultVT, Vec, SubVec,
7684                             Index));
7685    return;
7686  }
7687  case Intrinsic::vector_extract: {
7688    SDValue Vec = getValue(I.getOperand(0));
7689    SDValue Index = getValue(I.getOperand(1));
7690    EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
7691
7692    // The intrinsic's index type is i64, but the SDNode requires an index type
7693    // suitable for the target. Convert the index as required.
7694    MVT VectorIdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
7695    if (Index.getValueType() != VectorIdxTy)
7696      Index = DAG.getVectorIdxConstant(Index->getAsZExtVal(), sdl);
7697
7698    setValue(&I,
7699             DAG.getNode(ISD::EXTRACT_SUBVECTOR, sdl, ResultVT, Vec, Index));
7700    return;
7701  }
7702  case Intrinsic::experimental_vector_reverse:
7703    visitVectorReverse(I);
7704    return;
7705  case Intrinsic::experimental_vector_splice:
7706    visitVectorSplice(I);
7707    return;
7708  case Intrinsic::callbr_landingpad:
7709    visitCallBrLandingPad(I);
7710    return;
7711  case Intrinsic::experimental_vector_interleave2:
7712    visitVectorInterleave(I);
7713    return;
7714  case Intrinsic::experimental_vector_deinterleave2:
7715    visitVectorDeinterleave(I);
7716    return;
7717  }
7718}
7719
7720void SelectionDAGBuilder::visitConstrainedFPIntrinsic(
7721    const ConstrainedFPIntrinsic &FPI) {
7722  SDLoc sdl = getCurSDLoc();
7723
7724  // We do not need to serialize constrained FP intrinsics against
7725  // each other or against (nonvolatile) loads, so they can be
7726  // chained like loads.
7727  SDValue Chain = DAG.getRoot();
7728  SmallVector<SDValue, 4> Opers;
7729  Opers.push_back(Chain);
7730  if (FPI.isUnaryOp()) {
7731    Opers.push_back(getValue(FPI.getArgOperand(0)));
7732  } else if (FPI.isTernaryOp()) {
7733    Opers.push_back(getValue(FPI.getArgOperand(0)));
7734    Opers.push_back(getValue(FPI.getArgOperand(1)));
7735    Opers.push_back(getValue(FPI.getArgOperand(2)));
7736  } else {
7737    Opers.push_back(getValue(FPI.getArgOperand(0)));
7738    Opers.push_back(getValue(FPI.getArgOperand(1)));
7739  }
7740
7741  auto pushOutChain = [this](SDValue Result, fp::ExceptionBehavior EB) {
7742    assert(Result.getNode()->getNumValues() == 2);
7743
7744    // Push node to the appropriate list so that future instructions can be
7745    // chained up correctly.
7746    SDValue OutChain = Result.getValue(1);
7747    switch (EB) {
7748    case fp::ExceptionBehavior::ebIgnore:
7749      // The only reason why ebIgnore nodes still need to be chained is that
7750      // they might depend on the current rounding mode, and therefore must
7751      // not be moved across instruction that may change that mode.
7752      [[fallthrough]];
7753    case fp::ExceptionBehavior::ebMayTrap:
7754      // These must not be moved across calls or instructions that may change
7755      // floating-point exception masks.
7756      PendingConstrainedFP.push_back(OutChain);
7757      break;
7758    case fp::ExceptionBehavior::ebStrict:
7759      // These must not be moved across calls or instructions that may change
7760      // floating-point exception masks or read floating-point exception flags.
7761      // In addition, they cannot be optimized out even if unused.
7762      PendingConstrainedFPStrict.push_back(OutChain);
7763      break;
7764    }
7765  };
7766
7767  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7768  EVT VT = TLI.getValueType(DAG.getDataLayout(), FPI.getType());
7769  SDVTList VTs = DAG.getVTList(VT, MVT::Other);
7770  fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
7771
7772  SDNodeFlags Flags;
7773  if (EB == fp::ExceptionBehavior::ebIgnore)
7774    Flags.setNoFPExcept(true);
7775
7776  if (auto *FPOp = dyn_cast<FPMathOperator>(&FPI))
7777    Flags.copyFMF(*FPOp);
7778
7779  unsigned Opcode;
7780  switch (FPI.getIntrinsicID()) {
7781  default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
7782#define DAG_INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC, DAGN)               \
7783  case Intrinsic::INTRINSIC:                                                   \
7784    Opcode = ISD::STRICT_##DAGN;                                               \
7785    break;
7786#include "llvm/IR/ConstrainedOps.def"
7787  case Intrinsic::experimental_constrained_fmuladd: {
7788    Opcode = ISD::STRICT_FMA;
7789    // Break fmuladd into fmul and fadd.
7790    if (TM.Options.AllowFPOpFusion == FPOpFusion::Strict ||
7791        !TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), VT)) {
7792      Opers.pop_back();
7793      SDValue Mul = DAG.getNode(ISD::STRICT_FMUL, sdl, VTs, Opers, Flags);
7794      pushOutChain(Mul, EB);
7795      Opcode = ISD::STRICT_FADD;
7796      Opers.clear();
7797      Opers.push_back(Mul.getValue(1));
7798      Opers.push_back(Mul.getValue(0));
7799      Opers.push_back(getValue(FPI.getArgOperand(2)));
7800    }
7801    break;
7802  }
7803  }
7804
7805  // A few strict DAG nodes carry additional operands that are not
7806  // set up by the default code above.
7807  switch (Opcode) {
7808  default: break;
7809  case ISD::STRICT_FP_ROUND:
7810    Opers.push_back(
7811        DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())));
7812    break;
7813  case ISD::STRICT_FSETCC:
7814  case ISD::STRICT_FSETCCS: {
7815    auto *FPCmp = dyn_cast<ConstrainedFPCmpIntrinsic>(&FPI);
7816    ISD::CondCode Condition = getFCmpCondCode(FPCmp->getPredicate());
7817    if (TM.Options.NoNaNsFPMath)
7818      Condition = getFCmpCodeWithoutNaN(Condition);
7819    Opers.push_back(DAG.getCondCode(Condition));
7820    break;
7821  }
7822  }
7823
7824  SDValue Result = DAG.getNode(Opcode, sdl, VTs, Opers, Flags);
7825  pushOutChain(Result, EB);
7826
7827  SDValue FPResult = Result.getValue(0);
7828  setValue(&FPI, FPResult);
7829}
7830
7831static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) {
7832  std::optional<unsigned> ResOPC;
7833  switch (VPIntrin.getIntrinsicID()) {
7834  case Intrinsic::vp_ctlz: {
7835    bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7836    ResOPC = IsZeroUndef ? ISD::VP_CTLZ_ZERO_UNDEF : ISD::VP_CTLZ;
7837    break;
7838  }
7839  case Intrinsic::vp_cttz: {
7840    bool IsZeroUndef = cast<ConstantInt>(VPIntrin.getArgOperand(1))->isOne();
7841    ResOPC = IsZeroUndef ? ISD::VP_CTTZ_ZERO_UNDEF : ISD::VP_CTTZ;
7842    break;
7843  }
7844#define HELPER_MAP_VPID_TO_VPSD(VPID, VPSD)                                    \
7845  case Intrinsic::VPID:                                                        \
7846    ResOPC = ISD::VPSD;                                                        \
7847    break;
7848#include "llvm/IR/VPIntrinsics.def"
7849  }
7850
7851  if (!ResOPC)
7852    llvm_unreachable(
7853        "Inconsistency: no SDNode available for this VPIntrinsic!");
7854
7855  if (*ResOPC == ISD::VP_REDUCE_SEQ_FADD ||
7856      *ResOPC == ISD::VP_REDUCE_SEQ_FMUL) {
7857    if (VPIntrin.getFastMathFlags().allowReassoc())
7858      return *ResOPC == ISD::VP_REDUCE_SEQ_FADD ? ISD::VP_REDUCE_FADD
7859                                                : ISD::VP_REDUCE_FMUL;
7860  }
7861
7862  return *ResOPC;
7863}
7864
7865void SelectionDAGBuilder::visitVPLoad(
7866    const VPIntrinsic &VPIntrin, EVT VT,
7867    const SmallVectorImpl<SDValue> &OpValues) {
7868  SDLoc DL = getCurSDLoc();
7869  Value *PtrOperand = VPIntrin.getArgOperand(0);
7870  MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7871  AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7872  const MDNode *Ranges = getRangeMetadata(VPIntrin);
7873  SDValue LD;
7874  // Do not serialize variable-length loads of constant memory with
7875  // anything.
7876  if (!Alignment)
7877    Alignment = DAG.getEVTAlign(VT);
7878  MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
7879  bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
7880  SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
7881  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7882      MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
7883      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7884  LD = DAG.getLoadVP(VT, DL, InChain, OpValues[0], OpValues[1], OpValues[2],
7885                     MMO, false /*IsExpanding */);
7886  if (AddToChain)
7887    PendingLoads.push_back(LD.getValue(1));
7888  setValue(&VPIntrin, LD);
7889}
7890
7891void SelectionDAGBuilder::visitVPGather(
7892    const VPIntrinsic &VPIntrin, EVT VT,
7893    const SmallVectorImpl<SDValue> &OpValues) {
7894  SDLoc DL = getCurSDLoc();
7895  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7896  Value *PtrOperand = VPIntrin.getArgOperand(0);
7897  MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7898  AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7899  const MDNode *Ranges = getRangeMetadata(VPIntrin);
7900  SDValue LD;
7901  if (!Alignment)
7902    Alignment = DAG.getEVTAlign(VT.getScalarType());
7903  unsigned AS =
7904    PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7905  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7906     MachinePointerInfo(AS), MachineMemOperand::MOLoad,
7907     MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
7908  SDValue Base, Index, Scale;
7909  ISD::MemIndexType IndexType;
7910  bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7911                                    this, VPIntrin.getParent(),
7912                                    VT.getScalarStoreSize());
7913  if (!UniformBase) {
7914    Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7915    Index = getValue(PtrOperand);
7916    IndexType = ISD::SIGNED_SCALED;
7917    Scale = DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7918  }
7919  EVT IdxVT = Index.getValueType();
7920  EVT EltTy = IdxVT.getVectorElementType();
7921  if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7922    EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7923    Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7924  }
7925  LD = DAG.getGatherVP(
7926      DAG.getVTList(VT, MVT::Other), VT, DL,
7927      {DAG.getRoot(), Base, Index, Scale, OpValues[1], OpValues[2]}, MMO,
7928      IndexType);
7929  PendingLoads.push_back(LD.getValue(1));
7930  setValue(&VPIntrin, LD);
7931}
7932
7933void SelectionDAGBuilder::visitVPStore(
7934    const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7935  SDLoc DL = getCurSDLoc();
7936  Value *PtrOperand = VPIntrin.getArgOperand(1);
7937  EVT VT = OpValues[0].getValueType();
7938  MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7939  AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7940  SDValue ST;
7941  if (!Alignment)
7942    Alignment = DAG.getEVTAlign(VT);
7943  SDValue Ptr = OpValues[1];
7944  SDValue Offset = DAG.getUNDEF(Ptr.getValueType());
7945  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7946      MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
7947      MemoryLocation::UnknownSize, *Alignment, AAInfo);
7948  ST = DAG.getStoreVP(getMemoryRoot(), DL, OpValues[0], Ptr, Offset,
7949                      OpValues[2], OpValues[3], VT, MMO, ISD::UNINDEXED,
7950                      /* IsTruncating */ false, /*IsCompressing*/ false);
7951  DAG.setRoot(ST);
7952  setValue(&VPIntrin, ST);
7953}
7954
7955void SelectionDAGBuilder::visitVPScatter(
7956    const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
7957  SDLoc DL = getCurSDLoc();
7958  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7959  Value *PtrOperand = VPIntrin.getArgOperand(1);
7960  EVT VT = OpValues[0].getValueType();
7961  MaybeAlign Alignment = VPIntrin.getPointerAlignment();
7962  AAMDNodes AAInfo = VPIntrin.getAAMetadata();
7963  SDValue ST;
7964  if (!Alignment)
7965    Alignment = DAG.getEVTAlign(VT.getScalarType());
7966  unsigned AS =
7967      PtrOperand->getType()->getScalarType()->getPointerAddressSpace();
7968  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
7969      MachinePointerInfo(AS), MachineMemOperand::MOStore,
7970      MemoryLocation::UnknownSize, *Alignment, AAInfo);
7971  SDValue Base, Index, Scale;
7972  ISD::MemIndexType IndexType;
7973  bool UniformBase = getUniformBase(PtrOperand, Base, Index, IndexType, Scale,
7974                                    this, VPIntrin.getParent(),
7975                                    VT.getScalarStoreSize());
7976  if (!UniformBase) {
7977    Base = DAG.getConstant(0, DL, TLI.getPointerTy(DAG.getDataLayout()));
7978    Index = getValue(PtrOperand);
7979    IndexType = ISD::SIGNED_SCALED;
7980    Scale =
7981      DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()));
7982  }
7983  EVT IdxVT = Index.getValueType();
7984  EVT EltTy = IdxVT.getVectorElementType();
7985  if (TLI.shouldExtendGSIndex(IdxVT, EltTy)) {
7986    EVT NewIdxVT = IdxVT.changeVectorElementType(EltTy);
7987    Index = DAG.getNode(ISD::SIGN_EXTEND, DL, NewIdxVT, Index);
7988  }
7989  ST = DAG.getScatterVP(DAG.getVTList(MVT::Other), VT, DL,
7990                        {getMemoryRoot(), OpValues[0], Base, Index, Scale,
7991                         OpValues[2], OpValues[3]},
7992                        MMO, IndexType);
7993  DAG.setRoot(ST);
7994  setValue(&VPIntrin, ST);
7995}
7996
7997void SelectionDAGBuilder::visitVPStridedLoad(
7998    const VPIntrinsic &VPIntrin, EVT VT,
7999    const SmallVectorImpl<SDValue> &OpValues) {
8000  SDLoc DL = getCurSDLoc();
8001  Value *PtrOperand = VPIntrin.getArgOperand(0);
8002  MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8003  if (!Alignment)
8004    Alignment = DAG.getEVTAlign(VT.getScalarType());
8005  AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8006  const MDNode *Ranges = getRangeMetadata(VPIntrin);
8007  MemoryLocation ML = MemoryLocation::getAfter(PtrOperand, AAInfo);
8008  bool AddToChain = !AA || !AA->pointsToConstantMemory(ML);
8009  SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();
8010  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8011      MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
8012      MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
8013
8014  SDValue LD = DAG.getStridedLoadVP(VT, DL, InChain, OpValues[0], OpValues[1],
8015                                    OpValues[2], OpValues[3], MMO,
8016                                    false /*IsExpanding*/);
8017
8018  if (AddToChain)
8019    PendingLoads.push_back(LD.getValue(1));
8020  setValue(&VPIntrin, LD);
8021}
8022
8023void SelectionDAGBuilder::visitVPStridedStore(
8024    const VPIntrinsic &VPIntrin, const SmallVectorImpl<SDValue> &OpValues) {
8025  SDLoc DL = getCurSDLoc();
8026  Value *PtrOperand = VPIntrin.getArgOperand(1);
8027  EVT VT = OpValues[0].getValueType();
8028  MaybeAlign Alignment = VPIntrin.getPointerAlignment();
8029  if (!Alignment)
8030    Alignment = DAG.getEVTAlign(VT.getScalarType());
8031  AAMDNodes AAInfo = VPIntrin.getAAMetadata();
8032  MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
8033      MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
8034      MemoryLocation::UnknownSize, *Alignment, AAInfo);
8035
8036  SDValue ST = DAG.getStridedStoreVP(
8037      getMemoryRoot(), DL, OpValues[0], OpValues[1],
8038      DAG.getUNDEF(OpValues[1].getValueType()), OpValues[2], OpValues[3],
8039      OpValues[4], VT, MMO, ISD::UNINDEXED, /*IsTruncating*/ false,
8040      /*IsCompressing*/ false);
8041
8042  DAG.setRoot(ST);
8043  setValue(&VPIntrin, ST);
8044}
8045
8046void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) {
8047  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8048  SDLoc DL = getCurSDLoc();
8049
8050  ISD::CondCode Condition;
8051  CmpInst::Predicate CondCode = VPIntrin.getPredicate();
8052  bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy();
8053  if (IsFP) {
8054    // FIXME: Regular fcmps are FPMathOperators which may have fast-math (nnan)
8055    // flags, but calls that don't return floating-point types can't be
8056    // FPMathOperators, like vp.fcmp. This affects constrained fcmp too.
8057    Condition = getFCmpCondCode(CondCode);
8058    if (TM.Options.NoNaNsFPMath)
8059      Condition = getFCmpCodeWithoutNaN(Condition);
8060  } else {
8061    Condition = getICmpCondCode(CondCode);
8062  }
8063
8064  SDValue Op1 = getValue(VPIntrin.getOperand(0));
8065  SDValue Op2 = getValue(VPIntrin.getOperand(1));
8066  // #2 is the condition code
8067  SDValue MaskOp = getValue(VPIntrin.getOperand(3));
8068  SDValue EVL = getValue(VPIntrin.getOperand(4));
8069  MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8070  assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8071         "Unexpected target EVL type");
8072  EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL);
8073
8074  EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8075                                                        VPIntrin.getType());
8076  setValue(&VPIntrin,
8077           DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL));
8078}
8079
8080void SelectionDAGBuilder::visitVectorPredicationIntrinsic(
8081    const VPIntrinsic &VPIntrin) {
8082  SDLoc DL = getCurSDLoc();
8083  unsigned Opcode = getISDForVPIntrinsic(VPIntrin);
8084
8085  auto IID = VPIntrin.getIntrinsicID();
8086
8087  if (const auto *CmpI = dyn_cast<VPCmpIntrinsic>(&VPIntrin))
8088    return visitVPCmp(*CmpI);
8089
8090  SmallVector<EVT, 4> ValueVTs;
8091  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8092  ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs);
8093  SDVTList VTs = DAG.getVTList(ValueVTs);
8094
8095  auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID);
8096
8097  MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy();
8098  assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) &&
8099         "Unexpected target EVL type");
8100
8101  // Request operands.
8102  SmallVector<SDValue, 7> OpValues;
8103  for (unsigned I = 0; I < VPIntrin.arg_size(); ++I) {
8104    auto Op = getValue(VPIntrin.getArgOperand(I));
8105    if (I == EVLParamPos)
8106      Op = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, Op);
8107    OpValues.push_back(Op);
8108  }
8109
8110  switch (Opcode) {
8111  default: {
8112    SDNodeFlags SDFlags;
8113    if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8114      SDFlags.copyFMF(*FPMO);
8115    SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues, SDFlags);
8116    setValue(&VPIntrin, Result);
8117    break;
8118  }
8119  case ISD::VP_LOAD:
8120    visitVPLoad(VPIntrin, ValueVTs[0], OpValues);
8121    break;
8122  case ISD::VP_GATHER:
8123    visitVPGather(VPIntrin, ValueVTs[0], OpValues);
8124    break;
8125  case ISD::EXPERIMENTAL_VP_STRIDED_LOAD:
8126    visitVPStridedLoad(VPIntrin, ValueVTs[0], OpValues);
8127    break;
8128  case ISD::VP_STORE:
8129    visitVPStore(VPIntrin, OpValues);
8130    break;
8131  case ISD::VP_SCATTER:
8132    visitVPScatter(VPIntrin, OpValues);
8133    break;
8134  case ISD::EXPERIMENTAL_VP_STRIDED_STORE:
8135    visitVPStridedStore(VPIntrin, OpValues);
8136    break;
8137  case ISD::VP_FMULADD: {
8138    assert(OpValues.size() == 5 && "Unexpected number of operands");
8139    SDNodeFlags SDFlags;
8140    if (auto *FPMO = dyn_cast<FPMathOperator>(&VPIntrin))
8141      SDFlags.copyFMF(*FPMO);
8142    if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
8143        TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) {
8144      setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags));
8145    } else {
8146      SDValue Mul = DAG.getNode(
8147          ISD::VP_FMUL, DL, VTs,
8148          {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags);
8149      SDValue Add =
8150          DAG.getNode(ISD::VP_FADD, DL, VTs,
8151                      {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags);
8152      setValue(&VPIntrin, Add);
8153    }
8154    break;
8155  }
8156  case ISD::VP_IS_FPCLASS: {
8157    const DataLayout DLayout = DAG.getDataLayout();
8158    EVT DestVT = TLI.getValueType(DLayout, VPIntrin.getType());
8159    auto Constant = OpValues[1]->getAsZExtVal();
8160    SDValue Check = DAG.getTargetConstant(Constant, DL, MVT::i32);
8161    SDValue V = DAG.getNode(ISD::VP_IS_FPCLASS, DL, DestVT,
8162                            {OpValues[0], Check, OpValues[2], OpValues[3]});
8163    setValue(&VPIntrin, V);
8164    return;
8165  }
8166  case ISD::VP_INTTOPTR: {
8167    SDValue N = OpValues[0];
8168    EVT DestVT = TLI.getValueType(DAG.getDataLayout(), VPIntrin.getType());
8169    EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), VPIntrin.getType());
8170    N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8171                               OpValues[2]);
8172    N = DAG.getVPZExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8173                             OpValues[2]);
8174    setValue(&VPIntrin, N);
8175    break;
8176  }
8177  case ISD::VP_PTRTOINT: {
8178    SDValue N = OpValues[0];
8179    EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8180                                                          VPIntrin.getType());
8181    EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(),
8182                                       VPIntrin.getOperand(0)->getType());
8183    N = DAG.getVPPtrExtOrTrunc(getCurSDLoc(), PtrMemVT, N, OpValues[1],
8184                               OpValues[2]);
8185    N = DAG.getVPZExtOrTrunc(getCurSDLoc(), DestVT, N, OpValues[1],
8186                             OpValues[2]);
8187    setValue(&VPIntrin, N);
8188    break;
8189  }
8190  case ISD::VP_ABS:
8191  case ISD::VP_CTLZ:
8192  case ISD::VP_CTLZ_ZERO_UNDEF:
8193  case ISD::VP_CTTZ:
8194  case ISD::VP_CTTZ_ZERO_UNDEF: {
8195    SDValue Result =
8196        DAG.getNode(Opcode, DL, VTs, {OpValues[0], OpValues[2], OpValues[3]});
8197    setValue(&VPIntrin, Result);
8198    break;
8199  }
8200  }
8201}
8202
8203SDValue SelectionDAGBuilder::lowerStartEH(SDValue Chain,
8204                                          const BasicBlock *EHPadBB,
8205                                          MCSymbol *&BeginLabel) {
8206  MachineFunction &MF = DAG.getMachineFunction();
8207  MachineModuleInfo &MMI = MF.getMMI();
8208
8209  // Insert a label before the invoke call to mark the try range.  This can be
8210  // used to detect deletion of the invoke via the MachineModuleInfo.
8211  BeginLabel = MMI.getContext().createTempSymbol();
8212
8213  // For SjLj, keep track of which landing pads go with which invokes
8214  // so as to maintain the ordering of pads in the LSDA.
8215  unsigned CallSiteIndex = MMI.getCurrentCallSite();
8216  if (CallSiteIndex) {
8217    MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex);
8218    LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex);
8219
8220    // Now that the call site is handled, stop tracking it.
8221    MMI.setCurrentCallSite(0);
8222  }
8223
8224  return DAG.getEHLabel(getCurSDLoc(), Chain, BeginLabel);
8225}
8226
8227SDValue SelectionDAGBuilder::lowerEndEH(SDValue Chain, const InvokeInst *II,
8228                                        const BasicBlock *EHPadBB,
8229                                        MCSymbol *BeginLabel) {
8230  assert(BeginLabel && "BeginLabel should've been set");
8231
8232  MachineFunction &MF = DAG.getMachineFunction();
8233  MachineModuleInfo &MMI = MF.getMMI();
8234
8235  // Insert a label at the end of the invoke call to mark the try range.  This
8236  // can be used to detect deletion of the invoke via the MachineModuleInfo.
8237  MCSymbol *EndLabel = MMI.getContext().createTempSymbol();
8238  Chain = DAG.getEHLabel(getCurSDLoc(), Chain, EndLabel);
8239
8240  // Inform MachineModuleInfo of range.
8241  auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn());
8242  // There is a platform (e.g. wasm) that uses funclet style IR but does not
8243  // actually use outlined funclets and their LSDA info style.
8244  if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) {
8245    assert(II && "II should've been set");
8246    WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
8247    EHInfo->addIPToStateRange(II, BeginLabel, EndLabel);
8248  } else if (!isScopedEHPersonality(Pers)) {
8249    assert(EHPadBB);
8250    MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel);
8251  }
8252
8253  return Chain;
8254}
8255
8256std::pair<SDValue, SDValue>
8257SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI,
8258                                    const BasicBlock *EHPadBB) {
8259  MCSymbol *BeginLabel = nullptr;
8260
8261  if (EHPadBB) {
8262    // Both PendingLoads and PendingExports must be flushed here;
8263    // this call might not return.
8264    (void)getRoot();
8265    DAG.setRoot(lowerStartEH(getControlRoot(), EHPadBB, BeginLabel));
8266    CLI.setChain(getRoot());
8267  }
8268
8269  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8270  std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
8271
8272  assert((CLI.IsTailCall || Result.second.getNode()) &&
8273         "Non-null chain expected with non-tail call!");
8274  assert((Result.second.getNode() || !Result.first.getNode()) &&
8275         "Null value expected with tail call!");
8276
8277  if (!Result.second.getNode()) {
8278    // As a special case, a null chain means that a tail call has been emitted
8279    // and the DAG root is already updated.
8280    HasTailCall = true;
8281
8282    // Since there's no actual continuation from this block, nothing can be
8283    // relying on us setting vregs for them.
8284    PendingExports.clear();
8285  } else {
8286    DAG.setRoot(Result.second);
8287  }
8288
8289  if (EHPadBB) {
8290    DAG.setRoot(lowerEndEH(getRoot(), cast_or_null<InvokeInst>(CLI.CB), EHPadBB,
8291                           BeginLabel));
8292  }
8293
8294  return Result;
8295}
8296
8297void SelectionDAGBuilder::LowerCallTo(const CallBase &CB, SDValue Callee,
8298                                      bool isTailCall,
8299                                      bool isMustTailCall,
8300                                      const BasicBlock *EHPadBB) {
8301  auto &DL = DAG.getDataLayout();
8302  FunctionType *FTy = CB.getFunctionType();
8303  Type *RetTy = CB.getType();
8304
8305  TargetLowering::ArgListTy Args;
8306  Args.reserve(CB.arg_size());
8307
8308  const Value *SwiftErrorVal = nullptr;
8309  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8310
8311  if (isTailCall) {
8312    // Avoid emitting tail calls in functions with the disable-tail-calls
8313    // attribute.
8314    auto *Caller = CB.getParent()->getParent();
8315    if (Caller->getFnAttribute("disable-tail-calls").getValueAsString() ==
8316        "true" && !isMustTailCall)
8317      isTailCall = false;
8318
8319    // We can't tail call inside a function with a swifterror argument. Lowering
8320    // does not support this yet. It would have to move into the swifterror
8321    // register before the call.
8322    if (TLI.supportSwiftError() &&
8323        Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
8324      isTailCall = false;
8325  }
8326
8327  for (auto I = CB.arg_begin(), E = CB.arg_end(); I != E; ++I) {
8328    TargetLowering::ArgListEntry Entry;
8329    const Value *V = *I;
8330
8331    // Skip empty types
8332    if (V->getType()->isEmptyTy())
8333      continue;
8334
8335    SDValue ArgNode = getValue(V);
8336    Entry.Node = ArgNode; Entry.Ty = V->getType();
8337
8338    Entry.setAttributes(&CB, I - CB.arg_begin());
8339
8340    // Use swifterror virtual register as input to the call.
8341    if (Entry.IsSwiftError && TLI.supportSwiftError()) {
8342      SwiftErrorVal = V;
8343      // We find the virtual register for the actual swifterror argument.
8344      // Instead of using the Value, we use the virtual register instead.
8345      Entry.Node =
8346          DAG.getRegister(SwiftError.getOrCreateVRegUseAt(&CB, FuncInfo.MBB, V),
8347                          EVT(TLI.getPointerTy(DL)));
8348    }
8349
8350    Args.push_back(Entry);
8351
8352    // If we have an explicit sret argument that is an Instruction, (i.e., it
8353    // might point to function-local memory), we can't meaningfully tail-call.
8354    if (Entry.IsSRet && isa<Instruction>(V))
8355      isTailCall = false;
8356  }
8357
8358  // If call site has a cfguardtarget operand bundle, create and add an
8359  // additional ArgListEntry.
8360  if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_cfguardtarget)) {
8361    TargetLowering::ArgListEntry Entry;
8362    Value *V = Bundle->Inputs[0];
8363    SDValue ArgNode = getValue(V);
8364    Entry.Node = ArgNode;
8365    Entry.Ty = V->getType();
8366    Entry.IsCFGuardTarget = true;
8367    Args.push_back(Entry);
8368  }
8369
8370  // Check if target-independent constraints permit a tail call here.
8371  // Target-dependent constraints are checked within TLI->LowerCallTo.
8372  if (isTailCall && !isInTailCallPosition(CB, DAG.getTarget()))
8373    isTailCall = false;
8374
8375  // Disable tail calls if there is an swifterror argument. Targets have not
8376  // been updated to support tail calls.
8377  if (TLI.supportSwiftError() && SwiftErrorVal)
8378    isTailCall = false;
8379
8380  ConstantInt *CFIType = nullptr;
8381  if (CB.isIndirectCall()) {
8382    if (auto Bundle = CB.getOperandBundle(LLVMContext::OB_kcfi)) {
8383      if (!TLI.supportKCFIBundles())
8384        report_fatal_error(
8385            "Target doesn't support calls with kcfi operand bundles.");
8386      CFIType = cast<ConstantInt>(Bundle->Inputs[0]);
8387      assert(CFIType->getType()->isIntegerTy(32) && "Invalid CFI type");
8388    }
8389  }
8390
8391  TargetLowering::CallLoweringInfo CLI(DAG);
8392  CLI.setDebugLoc(getCurSDLoc())
8393      .setChain(getRoot())
8394      .setCallee(RetTy, FTy, Callee, std::move(Args), CB)
8395      .setTailCall(isTailCall)
8396      .setConvergent(CB.isConvergent())
8397      .setIsPreallocated(
8398          CB.countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0)
8399      .setCFIType(CFIType);
8400  std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
8401
8402  if (Result.first.getNode()) {
8403    Result.first = lowerRangeToAssertZExt(DAG, CB, Result.first);
8404    setValue(&CB, Result.first);
8405  }
8406
8407  // The last element of CLI.InVals has the SDValue for swifterror return.
8408  // Here we copy it to a virtual register and update SwiftErrorMap for
8409  // book-keeping.
8410  if (SwiftErrorVal && TLI.supportSwiftError()) {
8411    // Get the last element of InVals.
8412    SDValue Src = CLI.InVals.back();
8413    Register VReg =
8414        SwiftError.getOrCreateVRegDefAt(&CB, FuncInfo.MBB, SwiftErrorVal);
8415    SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
8416    DAG.setRoot(CopyNode);
8417  }
8418}
8419
8420static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT,
8421                             SelectionDAGBuilder &Builder) {
8422  // Check to see if this load can be trivially constant folded, e.g. if the
8423  // input is from a string literal.
8424  if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) {
8425    // Cast pointer to the type we really want to load.
8426    Type *LoadTy =
8427        Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits());
8428    if (LoadVT.isVector())
8429      LoadTy = FixedVectorType::get(LoadTy, LoadVT.getVectorNumElements());
8430
8431    LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput),
8432                                         PointerType::getUnqual(LoadTy));
8433
8434    if (const Constant *LoadCst =
8435            ConstantFoldLoadFromConstPtr(const_cast<Constant *>(LoadInput),
8436                                         LoadTy, Builder.DAG.getDataLayout()))
8437      return Builder.getValue(LoadCst);
8438  }
8439
8440  // Otherwise, we have to emit the load.  If the pointer is to unfoldable but
8441  // still constant memory, the input chain can be the entry node.
8442  SDValue Root;
8443  bool ConstantMemory = false;
8444
8445  // Do not serialize (non-volatile) loads of constant memory with anything.
8446  if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) {
8447    Root = Builder.DAG.getEntryNode();
8448    ConstantMemory = true;
8449  } else {
8450    // Do not serialize non-volatile loads against each other.
8451    Root = Builder.DAG.getRoot();
8452  }
8453
8454  SDValue Ptr = Builder.getValue(PtrVal);
8455  SDValue LoadVal =
8456      Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr,
8457                          MachinePointerInfo(PtrVal), Align(1));
8458
8459  if (!ConstantMemory)
8460    Builder.PendingLoads.push_back(LoadVal.getValue(1));
8461  return LoadVal;
8462}
8463
8464/// Record the value for an instruction that produces an integer result,
8465/// converting the type where necessary.
8466void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I,
8467                                                  SDValue Value,
8468                                                  bool IsSigned) {
8469  EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8470                                                    I.getType(), true);
8471  Value = DAG.getExtOrTrunc(IsSigned, Value, getCurSDLoc(), VT);
8472  setValue(&I, Value);
8473}
8474
8475/// See if we can lower a memcmp/bcmp call into an optimized form. If so, return
8476/// true and lower it. Otherwise return false, and it will be lowered like a
8477/// normal call.
8478/// The caller already checked that \p I calls the appropriate LibFunc with a
8479/// correct prototype.
8480bool SelectionDAGBuilder::visitMemCmpBCmpCall(const CallInst &I) {
8481  const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1);
8482  const Value *Size = I.getArgOperand(2);
8483  const ConstantSDNode *CSize = dyn_cast<ConstantSDNode>(getValue(Size));
8484  if (CSize && CSize->getZExtValue() == 0) {
8485    EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(),
8486                                                          I.getType(), true);
8487    setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT));
8488    return true;
8489  }
8490
8491  const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8492  std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp(
8493      DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS),
8494      getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS));
8495  if (Res.first.getNode()) {
8496    processIntegerCallValue(I, Res.first, true);
8497    PendingLoads.push_back(Res.second);
8498    return true;
8499  }
8500
8501  // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS)  != 0
8502  // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS)  != 0
8503  if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I))
8504    return false;
8505
8506  // If the target has a fast compare for the given size, it will return a
8507  // preferred load type for that size. Require that the load VT is legal and
8508  // that the target supports unaligned loads of that type. Otherwise, return
8509  // INVALID.
8510  auto hasFastLoadsAndCompare = [&](unsigned NumBits) {
8511    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8512    MVT LVT = TLI.hasFastEqualityCompare(NumBits);
8513    if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) {
8514      // TODO: Handle 5 byte compare as 4-byte + 1 byte.
8515      // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads.
8516      // TODO: Check alignment of src and dest ptrs.
8517      unsigned DstAS = LHS->getType()->getPointerAddressSpace();
8518      unsigned SrcAS = RHS->getType()->getPointerAddressSpace();
8519      if (!TLI.isTypeLegal(LVT) ||
8520          !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) ||
8521          !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS))
8522        LVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
8523    }
8524
8525    return LVT;
8526  };
8527
8528  // This turns into unaligned loads. We only do this if the target natively
8529  // supports the MVT we'll be loading or if it is small enough (<= 4) that
8530  // we'll only produce a small number of byte loads.
8531  MVT LoadVT;
8532  unsigned NumBitsToCompare = CSize->getZExtValue() * 8;
8533  switch (NumBitsToCompare) {
8534  default:
8535    return false;
8536  case 16:
8537    LoadVT = MVT::i16;
8538    break;
8539  case 32:
8540    LoadVT = MVT::i32;
8541    break;
8542  case 64:
8543  case 128:
8544  case 256:
8545    LoadVT = hasFastLoadsAndCompare(NumBitsToCompare);
8546    break;
8547  }
8548
8549  if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE)
8550    return false;
8551
8552  SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this);
8553  SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this);
8554
8555  // Bitcast to a wide integer type if the loads are vectors.
8556  if (LoadVT.isVector()) {
8557    EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits());
8558    LoadL = DAG.getBitcast(CmpVT, LoadL);
8559    LoadR = DAG.getBitcast(CmpVT, LoadR);
8560  }
8561
8562  SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE);
8563  processIntegerCallValue(I, Cmp, false);
8564  return true;
8565}
8566
8567/// See if we can lower a memchr call into an optimized form. If so, return
8568/// true and lower it. Otherwise return false, and it will be lowered like a
8569/// normal call.
8570/// The caller already checked that \p I calls the appropriate LibFunc with a
8571/// correct prototype.
8572bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) {
8573  const Value *Src = I.getArgOperand(0);
8574  const Value *Char = I.getArgOperand(1);
8575  const Value *Length = I.getArgOperand(2);
8576
8577  const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8578  std::pair<SDValue, SDValue> Res =
8579    TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(),
8580                                getValue(Src), getValue(Char), getValue(Length),
8581                                MachinePointerInfo(Src));
8582  if (Res.first.getNode()) {
8583    setValue(&I, Res.first);
8584    PendingLoads.push_back(Res.second);
8585    return true;
8586  }
8587
8588  return false;
8589}
8590
8591/// See if we can lower a mempcpy call into an optimized form. If so, return
8592/// true and lower it. Otherwise return false, and it will be lowered like a
8593/// normal call.
8594/// The caller already checked that \p I calls the appropriate LibFunc with a
8595/// correct prototype.
8596bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) {
8597  SDValue Dst = getValue(I.getArgOperand(0));
8598  SDValue Src = getValue(I.getArgOperand(1));
8599  SDValue Size = getValue(I.getArgOperand(2));
8600
8601  Align DstAlign = DAG.InferPtrAlign(Dst).valueOrOne();
8602  Align SrcAlign = DAG.InferPtrAlign(Src).valueOrOne();
8603  // DAG::getMemcpy needs Alignment to be defined.
8604  Align Alignment = std::min(DstAlign, SrcAlign);
8605
8606  SDLoc sdl = getCurSDLoc();
8607
8608  // In the mempcpy context we need to pass in a false value for isTailCall
8609  // because the return pointer needs to be adjusted by the size of
8610  // the copied memory.
8611  SDValue Root = getMemoryRoot();
8612  SDValue MC = DAG.getMemcpy(Root, sdl, Dst, Src, Size, Alignment, false, false,
8613                             /*isTailCall=*/false,
8614                             MachinePointerInfo(I.getArgOperand(0)),
8615                             MachinePointerInfo(I.getArgOperand(1)),
8616                             I.getAAMetadata());
8617  assert(MC.getNode() != nullptr &&
8618         "** memcpy should not be lowered as TailCall in mempcpy context **");
8619  DAG.setRoot(MC);
8620
8621  // Check if Size needs to be truncated or extended.
8622  Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType());
8623
8624  // Adjust return pointer to point just past the last dst byte.
8625  SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(),
8626                                    Dst, Size);
8627  setValue(&I, DstPlusSize);
8628  return true;
8629}
8630
8631/// See if we can lower a strcpy call into an optimized form.  If so, return
8632/// true and lower it, otherwise return false and it will be lowered like a
8633/// normal call.
8634/// The caller already checked that \p I calls the appropriate LibFunc with a
8635/// correct prototype.
8636bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) {
8637  const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8638
8639  const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8640  std::pair<SDValue, SDValue> Res =
8641    TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(),
8642                                getValue(Arg0), getValue(Arg1),
8643                                MachinePointerInfo(Arg0),
8644                                MachinePointerInfo(Arg1), isStpcpy);
8645  if (Res.first.getNode()) {
8646    setValue(&I, Res.first);
8647    DAG.setRoot(Res.second);
8648    return true;
8649  }
8650
8651  return false;
8652}
8653
8654/// See if we can lower a strcmp call into an optimized form.  If so, return
8655/// true and lower it, otherwise return false and it will be lowered like a
8656/// normal call.
8657/// The caller already checked that \p I calls the appropriate LibFunc with a
8658/// correct prototype.
8659bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) {
8660  const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8661
8662  const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8663  std::pair<SDValue, SDValue> Res =
8664    TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(),
8665                                getValue(Arg0), getValue(Arg1),
8666                                MachinePointerInfo(Arg0),
8667                                MachinePointerInfo(Arg1));
8668  if (Res.first.getNode()) {
8669    processIntegerCallValue(I, Res.first, true);
8670    PendingLoads.push_back(Res.second);
8671    return true;
8672  }
8673
8674  return false;
8675}
8676
8677/// See if we can lower a strlen call into an optimized form.  If so, return
8678/// true and lower it, otherwise return false and it will be lowered like a
8679/// normal call.
8680/// The caller already checked that \p I calls the appropriate LibFunc with a
8681/// correct prototype.
8682bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) {
8683  const Value *Arg0 = I.getArgOperand(0);
8684
8685  const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8686  std::pair<SDValue, SDValue> Res =
8687    TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(),
8688                                getValue(Arg0), MachinePointerInfo(Arg0));
8689  if (Res.first.getNode()) {
8690    processIntegerCallValue(I, Res.first, false);
8691    PendingLoads.push_back(Res.second);
8692    return true;
8693  }
8694
8695  return false;
8696}
8697
8698/// See if we can lower a strnlen call into an optimized form.  If so, return
8699/// true and lower it, otherwise return false and it will be lowered like a
8700/// normal call.
8701/// The caller already checked that \p I calls the appropriate LibFunc with a
8702/// correct prototype.
8703bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) {
8704  const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1);
8705
8706  const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo();
8707  std::pair<SDValue, SDValue> Res =
8708    TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(),
8709                                 getValue(Arg0), getValue(Arg1),
8710                                 MachinePointerInfo(Arg0));
8711  if (Res.first.getNode()) {
8712    processIntegerCallValue(I, Res.first, false);
8713    PendingLoads.push_back(Res.second);
8714    return true;
8715  }
8716
8717  return false;
8718}
8719
8720/// See if we can lower a unary floating-point operation into an SDNode with
8721/// the specified Opcode.  If so, return true and lower it, otherwise return
8722/// false and it will be lowered like a normal call.
8723/// The caller already checked that \p I calls the appropriate LibFunc with a
8724/// correct prototype.
8725bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I,
8726                                              unsigned Opcode) {
8727  // We already checked this call's prototype; verify it doesn't modify errno.
8728  if (!I.onlyReadsMemory())
8729    return false;
8730
8731  SDNodeFlags Flags;
8732  Flags.copyFMF(cast<FPMathOperator>(I));
8733
8734  SDValue Tmp = getValue(I.getArgOperand(0));
8735  setValue(&I,
8736           DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp, Flags));
8737  return true;
8738}
8739
8740/// See if we can lower a binary floating-point operation into an SDNode with
8741/// the specified Opcode. If so, return true and lower it. Otherwise return
8742/// false, and it will be lowered like a normal call.
8743/// The caller already checked that \p I calls the appropriate LibFunc with a
8744/// correct prototype.
8745bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I,
8746                                               unsigned Opcode) {
8747  // We already checked this call's prototype; verify it doesn't modify errno.
8748  if (!I.onlyReadsMemory())
8749    return false;
8750
8751  SDNodeFlags Flags;
8752  Flags.copyFMF(cast<FPMathOperator>(I));
8753
8754  SDValue Tmp0 = getValue(I.getArgOperand(0));
8755  SDValue Tmp1 = getValue(I.getArgOperand(1));
8756  EVT VT = Tmp0.getValueType();
8757  setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1, Flags));
8758  return true;
8759}
8760
8761void SelectionDAGBuilder::visitCall(const CallInst &I) {
8762  // Handle inline assembly differently.
8763  if (I.isInlineAsm()) {
8764    visitInlineAsm(I);
8765    return;
8766  }
8767
8768  diagnoseDontCall(I);
8769
8770  if (Function *F = I.getCalledFunction()) {
8771    if (F->isDeclaration()) {
8772      // Is this an LLVM intrinsic or a target-specific intrinsic?
8773      unsigned IID = F->getIntrinsicID();
8774      if (!IID)
8775        if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo())
8776          IID = II->getIntrinsicID(F);
8777
8778      if (IID) {
8779        visitIntrinsicCall(I, IID);
8780        return;
8781      }
8782    }
8783
8784    // Check for well-known libc/libm calls.  If the function is internal, it
8785    // can't be a library call.  Don't do the check if marked as nobuiltin for
8786    // some reason or the call site requires strict floating point semantics.
8787    LibFunc Func;
8788    if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() &&
8789        F->hasName() && LibInfo->getLibFunc(*F, Func) &&
8790        LibInfo->hasOptimizedCodeGen(Func)) {
8791      switch (Func) {
8792      default: break;
8793      case LibFunc_bcmp:
8794        if (visitMemCmpBCmpCall(I))
8795          return;
8796        break;
8797      case LibFunc_copysign:
8798      case LibFunc_copysignf:
8799      case LibFunc_copysignl:
8800        // We already checked this call's prototype; verify it doesn't modify
8801        // errno.
8802        if (I.onlyReadsMemory()) {
8803          SDValue LHS = getValue(I.getArgOperand(0));
8804          SDValue RHS = getValue(I.getArgOperand(1));
8805          setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(),
8806                                   LHS.getValueType(), LHS, RHS));
8807          return;
8808        }
8809        break;
8810      case LibFunc_fabs:
8811      case LibFunc_fabsf:
8812      case LibFunc_fabsl:
8813        if (visitUnaryFloatCall(I, ISD::FABS))
8814          return;
8815        break;
8816      case LibFunc_fmin:
8817      case LibFunc_fminf:
8818      case LibFunc_fminl:
8819        if (visitBinaryFloatCall(I, ISD::FMINNUM))
8820          return;
8821        break;
8822      case LibFunc_fmax:
8823      case LibFunc_fmaxf:
8824      case LibFunc_fmaxl:
8825        if (visitBinaryFloatCall(I, ISD::FMAXNUM))
8826          return;
8827        break;
8828      case LibFunc_sin:
8829      case LibFunc_sinf:
8830      case LibFunc_sinl:
8831        if (visitUnaryFloatCall(I, ISD::FSIN))
8832          return;
8833        break;
8834      case LibFunc_cos:
8835      case LibFunc_cosf:
8836      case LibFunc_cosl:
8837        if (visitUnaryFloatCall(I, ISD::FCOS))
8838          return;
8839        break;
8840      case LibFunc_sqrt:
8841      case LibFunc_sqrtf:
8842      case LibFunc_sqrtl:
8843      case LibFunc_sqrt_finite:
8844      case LibFunc_sqrtf_finite:
8845      case LibFunc_sqrtl_finite:
8846        if (visitUnaryFloatCall(I, ISD::FSQRT))
8847          return;
8848        break;
8849      case LibFunc_floor:
8850      case LibFunc_floorf:
8851      case LibFunc_floorl:
8852        if (visitUnaryFloatCall(I, ISD::FFLOOR))
8853          return;
8854        break;
8855      case LibFunc_nearbyint:
8856      case LibFunc_nearbyintf:
8857      case LibFunc_nearbyintl:
8858        if (visitUnaryFloatCall(I, ISD::FNEARBYINT))
8859          return;
8860        break;
8861      case LibFunc_ceil:
8862      case LibFunc_ceilf:
8863      case LibFunc_ceill:
8864        if (visitUnaryFloatCall(I, ISD::FCEIL))
8865          return;
8866        break;
8867      case LibFunc_rint:
8868      case LibFunc_rintf:
8869      case LibFunc_rintl:
8870        if (visitUnaryFloatCall(I, ISD::FRINT))
8871          return;
8872        break;
8873      case LibFunc_round:
8874      case LibFunc_roundf:
8875      case LibFunc_roundl:
8876        if (visitUnaryFloatCall(I, ISD::FROUND))
8877          return;
8878        break;
8879      case LibFunc_trunc:
8880      case LibFunc_truncf:
8881      case LibFunc_truncl:
8882        if (visitUnaryFloatCall(I, ISD::FTRUNC))
8883          return;
8884        break;
8885      case LibFunc_log2:
8886      case LibFunc_log2f:
8887      case LibFunc_log2l:
8888        if (visitUnaryFloatCall(I, ISD::FLOG2))
8889          return;
8890        break;
8891      case LibFunc_exp2:
8892      case LibFunc_exp2f:
8893      case LibFunc_exp2l:
8894        if (visitUnaryFloatCall(I, ISD::FEXP2))
8895          return;
8896        break;
8897      case LibFunc_exp10:
8898      case LibFunc_exp10f:
8899      case LibFunc_exp10l:
8900        if (visitUnaryFloatCall(I, ISD::FEXP10))
8901          return;
8902        break;
8903      case LibFunc_ldexp:
8904      case LibFunc_ldexpf:
8905      case LibFunc_ldexpl:
8906        if (visitBinaryFloatCall(I, ISD::FLDEXP))
8907          return;
8908        break;
8909      case LibFunc_memcmp:
8910        if (visitMemCmpBCmpCall(I))
8911          return;
8912        break;
8913      case LibFunc_mempcpy:
8914        if (visitMemPCpyCall(I))
8915          return;
8916        break;
8917      case LibFunc_memchr:
8918        if (visitMemChrCall(I))
8919          return;
8920        break;
8921      case LibFunc_strcpy:
8922        if (visitStrCpyCall(I, false))
8923          return;
8924        break;
8925      case LibFunc_stpcpy:
8926        if (visitStrCpyCall(I, true))
8927          return;
8928        break;
8929      case LibFunc_strcmp:
8930        if (visitStrCmpCall(I))
8931          return;
8932        break;
8933      case LibFunc_strlen:
8934        if (visitStrLenCall(I))
8935          return;
8936        break;
8937      case LibFunc_strnlen:
8938        if (visitStrNLenCall(I))
8939          return;
8940        break;
8941      }
8942    }
8943  }
8944
8945  // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't
8946  // have to do anything here to lower funclet bundles.
8947  // CFGuardTarget bundles are lowered in LowerCallTo.
8948  assert(!I.hasOperandBundlesOtherThan(
8949             {LLVMContext::OB_deopt, LLVMContext::OB_funclet,
8950              LLVMContext::OB_cfguardtarget, LLVMContext::OB_preallocated,
8951              LLVMContext::OB_clang_arc_attachedcall, LLVMContext::OB_kcfi}) &&
8952         "Cannot lower calls with arbitrary operand bundles!");
8953
8954  SDValue Callee = getValue(I.getCalledOperand());
8955
8956  if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
8957    LowerCallSiteWithDeoptBundle(&I, Callee, nullptr);
8958  else
8959    // Check if we can potentially perform a tail call. More detailed checking
8960    // is be done within LowerCallTo, after more information about the call is
8961    // known.
8962    LowerCallTo(I, Callee, I.isTailCall(), I.isMustTailCall());
8963}
8964
8965namespace {
8966
8967/// AsmOperandInfo - This contains information for each constraint that we are
8968/// lowering.
8969class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo {
8970public:
8971  /// CallOperand - If this is the result output operand or a clobber
8972  /// this is null, otherwise it is the incoming operand to the CallInst.
8973  /// This gets modified as the asm is processed.
8974  SDValue CallOperand;
8975
8976  /// AssignedRegs - If this is a register or register class operand, this
8977  /// contains the set of register corresponding to the operand.
8978  RegsForValue AssignedRegs;
8979
8980  explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info)
8981    : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) {
8982  }
8983
8984  /// Whether or not this operand accesses memory
8985  bool hasMemory(const TargetLowering &TLI) const {
8986    // Indirect operand accesses access memory.
8987    if (isIndirect)
8988      return true;
8989
8990    for (const auto &Code : Codes)
8991      if (TLI.getConstraintType(Code) == TargetLowering::C_Memory)
8992        return true;
8993
8994    return false;
8995  }
8996};
8997
8998
8999} // end anonymous namespace
9000
9001/// Make sure that the output operand \p OpInfo and its corresponding input
9002/// operand \p MatchingOpInfo have compatible constraint types (otherwise error
9003/// out).
9004static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo,
9005                               SDISelAsmOperandInfo &MatchingOpInfo,
9006                               SelectionDAG &DAG) {
9007  if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT)
9008    return;
9009
9010  const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
9011  const auto &TLI = DAG.getTargetLoweringInfo();
9012
9013  std::pair<unsigned, const TargetRegisterClass *> MatchRC =
9014      TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode,
9015                                       OpInfo.ConstraintVT);
9016  std::pair<unsigned, const TargetRegisterClass *> InputRC =
9017      TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode,
9018                                       MatchingOpInfo.ConstraintVT);
9019  if ((OpInfo.ConstraintVT.isInteger() !=
9020       MatchingOpInfo.ConstraintVT.isInteger()) ||
9021      (MatchRC.second != InputRC.second)) {
9022    // FIXME: error out in a more elegant fashion
9023    report_fatal_error("Unsupported asm: input constraint"
9024                       " with a matching output constraint of"
9025                       " incompatible type!");
9026  }
9027  MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT;
9028}
9029
9030/// Get a direct memory input to behave well as an indirect operand.
9031/// This may introduce stores, hence the need for a \p Chain.
9032/// \return The (possibly updated) chain.
9033static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location,
9034                                        SDISelAsmOperandInfo &OpInfo,
9035                                        SelectionDAG &DAG) {
9036  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9037
9038  // If we don't have an indirect input, put it in the constpool if we can,
9039  // otherwise spill it to a stack slot.
9040  // TODO: This isn't quite right. We need to handle these according to
9041  // the addressing mode that the constraint wants. Also, this may take
9042  // an additional register for the computation and we don't want that
9043  // either.
9044
9045  // If the operand is a float, integer, or vector constant, spill to a
9046  // constant pool entry to get its address.
9047  const Value *OpVal = OpInfo.CallOperandVal;
9048  if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) ||
9049      isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) {
9050    OpInfo.CallOperand = DAG.getConstantPool(
9051        cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout()));
9052    return Chain;
9053  }
9054
9055  // Otherwise, create a stack slot and emit a store to it before the asm.
9056  Type *Ty = OpVal->getType();
9057  auto &DL = DAG.getDataLayout();
9058  uint64_t TySize = DL.getTypeAllocSize(Ty);
9059  MachineFunction &MF = DAG.getMachineFunction();
9060  int SSFI = MF.getFrameInfo().CreateStackObject(
9061      TySize, DL.getPrefTypeAlign(Ty), false);
9062  SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL));
9063  Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot,
9064                            MachinePointerInfo::getFixedStack(MF, SSFI),
9065                            TLI.getMemValueType(DL, Ty));
9066  OpInfo.CallOperand = StackSlot;
9067
9068  return Chain;
9069}
9070
9071/// GetRegistersForValue - Assign registers (virtual or physical) for the
9072/// specified operand.  We prefer to assign virtual registers, to allow the
9073/// register allocator to handle the assignment process.  However, if the asm
9074/// uses features that we can't model on machineinstrs, we have SDISel do the
9075/// allocation.  This produces generally horrible, but correct, code.
9076///
9077///   OpInfo describes the operand
9078///   RefOpInfo describes the matching operand if any, the operand otherwise
9079static std::optional<unsigned>
9080getRegistersForValue(SelectionDAG &DAG, const SDLoc &DL,
9081                     SDISelAsmOperandInfo &OpInfo,
9082                     SDISelAsmOperandInfo &RefOpInfo) {
9083  LLVMContext &Context = *DAG.getContext();
9084  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9085
9086  MachineFunction &MF = DAG.getMachineFunction();
9087  SmallVector<unsigned, 4> Regs;
9088  const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9089
9090  // No work to do for memory/address operands.
9091  if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9092      OpInfo.ConstraintType == TargetLowering::C_Address)
9093    return std::nullopt;
9094
9095  // If this is a constraint for a single physreg, or a constraint for a
9096  // register class, find it.
9097  unsigned AssignedReg;
9098  const TargetRegisterClass *RC;
9099  std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint(
9100      &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT);
9101  // RC is unset only on failure. Return immediately.
9102  if (!RC)
9103    return std::nullopt;
9104
9105  // Get the actual register value type.  This is important, because the user
9106  // may have asked for (e.g.) the AX register in i32 type.  We need to
9107  // remember that AX is actually i16 to get the right extension.
9108  const MVT RegVT = *TRI.legalclasstypes_begin(*RC);
9109
9110  if (OpInfo.ConstraintVT != MVT::Other && RegVT != MVT::Untyped) {
9111    // If this is an FP operand in an integer register (or visa versa), or more
9112    // generally if the operand value disagrees with the register class we plan
9113    // to stick it in, fix the operand type.
9114    //
9115    // If this is an input value, the bitcast to the new type is done now.
9116    // Bitcast for output value is done at the end of visitInlineAsm().
9117    if ((OpInfo.Type == InlineAsm::isOutput ||
9118         OpInfo.Type == InlineAsm::isInput) &&
9119        !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) {
9120      // Try to convert to the first EVT that the reg class contains.  If the
9121      // types are identical size, use a bitcast to convert (e.g. two differing
9122      // vector types).  Note: output bitcast is done at the end of
9123      // visitInlineAsm().
9124      if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) {
9125        // Exclude indirect inputs while they are unsupported because the code
9126        // to perform the load is missing and thus OpInfo.CallOperand still
9127        // refers to the input address rather than the pointed-to value.
9128        if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect)
9129          OpInfo.CallOperand =
9130              DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand);
9131        OpInfo.ConstraintVT = RegVT;
9132        // If the operand is an FP value and we want it in integer registers,
9133        // use the corresponding integer type. This turns an f64 value into
9134        // i64, which can be passed with two i32 values on a 32-bit machine.
9135      } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) {
9136        MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits());
9137        if (OpInfo.Type == InlineAsm::isInput)
9138          OpInfo.CallOperand =
9139              DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand);
9140        OpInfo.ConstraintVT = VT;
9141      }
9142    }
9143  }
9144
9145  // No need to allocate a matching input constraint since the constraint it's
9146  // matching to has already been allocated.
9147  if (OpInfo.isMatchingInputConstraint())
9148    return std::nullopt;
9149
9150  EVT ValueVT = OpInfo.ConstraintVT;
9151  if (OpInfo.ConstraintVT == MVT::Other)
9152    ValueVT = RegVT;
9153
9154  // Initialize NumRegs.
9155  unsigned NumRegs = 1;
9156  if (OpInfo.ConstraintVT != MVT::Other)
9157    NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT, RegVT);
9158
9159  // If this is a constraint for a specific physical register, like {r17},
9160  // assign it now.
9161
9162  // If this associated to a specific register, initialize iterator to correct
9163  // place. If virtual, make sure we have enough registers
9164
9165  // Initialize iterator if necessary
9166  TargetRegisterClass::iterator I = RC->begin();
9167  MachineRegisterInfo &RegInfo = MF.getRegInfo();
9168
9169  // Do not check for single registers.
9170  if (AssignedReg) {
9171    I = std::find(I, RC->end(), AssignedReg);
9172    if (I == RC->end()) {
9173      // RC does not contain the selected register, which indicates a
9174      // mismatch between the register and the required type/bitwidth.
9175      return {AssignedReg};
9176    }
9177  }
9178
9179  for (; NumRegs; --NumRegs, ++I) {
9180    assert(I != RC->end() && "Ran out of registers to allocate!");
9181    Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC);
9182    Regs.push_back(R);
9183  }
9184
9185  OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT);
9186  return std::nullopt;
9187}
9188
9189static unsigned
9190findMatchingInlineAsmOperand(unsigned OperandNo,
9191                             const std::vector<SDValue> &AsmNodeOperands) {
9192  // Scan until we find the definition we already emitted of this operand.
9193  unsigned CurOp = InlineAsm::Op_FirstOperand;
9194  for (; OperandNo; --OperandNo) {
9195    // Advance to the next operand.
9196    unsigned OpFlag = AsmNodeOperands[CurOp]->getAsZExtVal();
9197    const InlineAsm::Flag F(OpFlag);
9198    assert(
9199        (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) &&
9200        "Skipped past definitions?");
9201    CurOp += F.getNumOperandRegisters() + 1;
9202  }
9203  return CurOp;
9204}
9205
9206namespace {
9207
9208class ExtraFlags {
9209  unsigned Flags = 0;
9210
9211public:
9212  explicit ExtraFlags(const CallBase &Call) {
9213    const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9214    if (IA->hasSideEffects())
9215      Flags |= InlineAsm::Extra_HasSideEffects;
9216    if (IA->isAlignStack())
9217      Flags |= InlineAsm::Extra_IsAlignStack;
9218    if (Call.isConvergent())
9219      Flags |= InlineAsm::Extra_IsConvergent;
9220    Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
9221  }
9222
9223  void update(const TargetLowering::AsmOperandInfo &OpInfo) {
9224    // Ideally, we would only check against memory constraints.  However, the
9225    // meaning of an Other constraint can be target-specific and we can't easily
9226    // reason about it.  Therefore, be conservative and set MayLoad/MayStore
9227    // for Other constraints as well.
9228    if (OpInfo.ConstraintType == TargetLowering::C_Memory ||
9229        OpInfo.ConstraintType == TargetLowering::C_Other) {
9230      if (OpInfo.Type == InlineAsm::isInput)
9231        Flags |= InlineAsm::Extra_MayLoad;
9232      else if (OpInfo.Type == InlineAsm::isOutput)
9233        Flags |= InlineAsm::Extra_MayStore;
9234      else if (OpInfo.Type == InlineAsm::isClobber)
9235        Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore);
9236    }
9237  }
9238
9239  unsigned get() const { return Flags; }
9240};
9241
9242} // end anonymous namespace
9243
9244static bool isFunction(SDValue Op) {
9245  if (Op && Op.getOpcode() == ISD::GlobalAddress) {
9246    if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9247      auto Fn = dyn_cast_or_null<Function>(GA->getGlobal());
9248
9249      // In normal "call dllimport func" instruction (non-inlineasm) it force
9250      // indirect access by specifing call opcode. And usually specially print
9251      // asm with indirect symbol (i.g: "*") according to opcode. Inline asm can
9252      // not do in this way now. (In fact, this is similar with "Data Access"
9253      // action). So here we ignore dllimport function.
9254      if (Fn && !Fn->hasDLLImportStorageClass())
9255        return true;
9256    }
9257  }
9258  return false;
9259}
9260
9261/// visitInlineAsm - Handle a call to an InlineAsm object.
9262void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call,
9263                                         const BasicBlock *EHPadBB) {
9264  const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand());
9265
9266  /// ConstraintOperands - Information about all of the constraints.
9267  SmallVector<SDISelAsmOperandInfo, 16> ConstraintOperands;
9268
9269  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9270  TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(
9271      DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), Call);
9272
9273  // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack,
9274  // AsmDialect, MayLoad, MayStore).
9275  bool HasSideEffect = IA->hasSideEffects();
9276  ExtraFlags ExtraInfo(Call);
9277
9278  for (auto &T : TargetConstraints) {
9279    ConstraintOperands.push_back(SDISelAsmOperandInfo(T));
9280    SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back();
9281
9282    if (OpInfo.CallOperandVal)
9283      OpInfo.CallOperand = getValue(OpInfo.CallOperandVal);
9284
9285    if (!HasSideEffect)
9286      HasSideEffect = OpInfo.hasMemory(TLI);
9287
9288    // Determine if this InlineAsm MayLoad or MayStore based on the constraints.
9289    // FIXME: Could we compute this on OpInfo rather than T?
9290
9291    // Compute the constraint code and ConstraintType to use.
9292    TLI.ComputeConstraintToUse(T, SDValue());
9293
9294    if (T.ConstraintType == TargetLowering::C_Immediate &&
9295        OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand))
9296      // We've delayed emitting a diagnostic like the "n" constraint because
9297      // inlining could cause an integer showing up.
9298      return emitInlineAsmError(Call, "constraint '" + Twine(T.ConstraintCode) +
9299                                          "' expects an integer constant "
9300                                          "expression");
9301
9302    ExtraInfo.update(T);
9303  }
9304
9305  // We won't need to flush pending loads if this asm doesn't touch
9306  // memory and is nonvolatile.
9307  SDValue Glue, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot();
9308
9309  bool EmitEHLabels = isa<InvokeInst>(Call);
9310  if (EmitEHLabels) {
9311    assert(EHPadBB && "InvokeInst must have an EHPadBB");
9312  }
9313  bool IsCallBr = isa<CallBrInst>(Call);
9314
9315  if (IsCallBr || EmitEHLabels) {
9316    // If this is a callbr or invoke we need to flush pending exports since
9317    // inlineasm_br and invoke are terminators.
9318    // We need to do this before nodes are glued to the inlineasm_br node.
9319    Chain = getControlRoot();
9320  }
9321
9322  MCSymbol *BeginLabel = nullptr;
9323  if (EmitEHLabels) {
9324    Chain = lowerStartEH(Chain, EHPadBB, BeginLabel);
9325  }
9326
9327  int OpNo = -1;
9328  SmallVector<StringRef> AsmStrs;
9329  IA->collectAsmStrs(AsmStrs);
9330
9331  // Second pass over the constraints: compute which constraint option to use.
9332  for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9333    if (OpInfo.hasArg() || OpInfo.Type == InlineAsm::isOutput)
9334      OpNo++;
9335
9336    // If this is an output operand with a matching input operand, look up the
9337    // matching input. If their types mismatch, e.g. one is an integer, the
9338    // other is floating point, or their sizes are different, flag it as an
9339    // error.
9340    if (OpInfo.hasMatchingInput()) {
9341      SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput];
9342      patchMatchingInput(OpInfo, Input, DAG);
9343    }
9344
9345    // Compute the constraint code and ConstraintType to use.
9346    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
9347
9348    if ((OpInfo.ConstraintType == TargetLowering::C_Memory &&
9349         OpInfo.Type == InlineAsm::isClobber) ||
9350        OpInfo.ConstraintType == TargetLowering::C_Address)
9351      continue;
9352
9353    // In Linux PIC model, there are 4 cases about value/label addressing:
9354    //
9355    // 1: Function call or Label jmp inside the module.
9356    // 2: Data access (such as global variable, static variable) inside module.
9357    // 3: Function call or Label jmp outside the module.
9358    // 4: Data access (such as global variable) outside the module.
9359    //
9360    // Due to current llvm inline asm architecture designed to not "recognize"
9361    // the asm code, there are quite troubles for us to treat mem addressing
9362    // differently for same value/adress used in different instuctions.
9363    // For example, in pic model, call a func may in plt way or direclty
9364    // pc-related, but lea/mov a function adress may use got.
9365    //
9366    // Here we try to "recognize" function call for the case 1 and case 3 in
9367    // inline asm. And try to adjust the constraint for them.
9368    //
9369    // TODO: Due to current inline asm didn't encourage to jmp to the outsider
9370    // label, so here we don't handle jmp function label now, but we need to
9371    // enhance it (especilly in PIC model) if we meet meaningful requirements.
9372    if (OpInfo.isIndirect && isFunction(OpInfo.CallOperand) &&
9373        TLI.isInlineAsmTargetBranch(AsmStrs, OpNo) &&
9374        TM.getCodeModel() != CodeModel::Large) {
9375      OpInfo.isIndirect = false;
9376      OpInfo.ConstraintType = TargetLowering::C_Address;
9377    }
9378
9379    // If this is a memory input, and if the operand is not indirect, do what we
9380    // need to provide an address for the memory input.
9381    if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
9382        !OpInfo.isIndirect) {
9383      assert((OpInfo.isMultipleAlternative ||
9384              (OpInfo.Type == InlineAsm::isInput)) &&
9385             "Can only indirectify direct input operands!");
9386
9387      // Memory operands really want the address of the value.
9388      Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG);
9389
9390      // There is no longer a Value* corresponding to this operand.
9391      OpInfo.CallOperandVal = nullptr;
9392
9393      // It is now an indirect operand.
9394      OpInfo.isIndirect = true;
9395    }
9396
9397  }
9398
9399  // AsmNodeOperands - The operands for the ISD::INLINEASM node.
9400  std::vector<SDValue> AsmNodeOperands;
9401  AsmNodeOperands.push_back(SDValue());  // reserve space for input chain
9402  AsmNodeOperands.push_back(DAG.getTargetExternalSymbol(
9403      IA->getAsmString().c_str(), TLI.getProgramPointerTy(DAG.getDataLayout())));
9404
9405  // If we have a !srcloc metadata node associated with it, we want to attach
9406  // this to the ultimately generated inline asm machineinstr.  To do this, we
9407  // pass in the third operand as this (potentially null) inline asm MDNode.
9408  const MDNode *SrcLoc = Call.getMetadata("srcloc");
9409  AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc));
9410
9411  // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore
9412  // bits as operand 3.
9413  AsmNodeOperands.push_back(DAG.getTargetConstant(
9414      ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9415
9416  // Third pass: Loop over operands to prepare DAG-level operands.. As part of
9417  // this, assign virtual and physical registers for inputs and otput.
9418  for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9419    // Assign Registers.
9420    SDISelAsmOperandInfo &RefOpInfo =
9421        OpInfo.isMatchingInputConstraint()
9422            ? ConstraintOperands[OpInfo.getMatchedOperand()]
9423            : OpInfo;
9424    const auto RegError =
9425        getRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo);
9426    if (RegError) {
9427      const MachineFunction &MF = DAG.getMachineFunction();
9428      const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9429      const char *RegName = TRI.getName(*RegError);
9430      emitInlineAsmError(Call, "register '" + Twine(RegName) +
9431                                   "' allocated for constraint '" +
9432                                   Twine(OpInfo.ConstraintCode) +
9433                                   "' does not match required type");
9434      return;
9435    }
9436
9437    auto DetectWriteToReservedRegister = [&]() {
9438      const MachineFunction &MF = DAG.getMachineFunction();
9439      const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9440      for (unsigned Reg : OpInfo.AssignedRegs.Regs) {
9441        if (Register::isPhysicalRegister(Reg) &&
9442            TRI.isInlineAsmReadOnlyReg(MF, Reg)) {
9443          const char *RegName = TRI.getName(Reg);
9444          emitInlineAsmError(Call, "write to reserved register '" +
9445                                       Twine(RegName) + "'");
9446          return true;
9447        }
9448      }
9449      return false;
9450    };
9451    assert((OpInfo.ConstraintType != TargetLowering::C_Address ||
9452            (OpInfo.Type == InlineAsm::isInput &&
9453             !OpInfo.isMatchingInputConstraint())) &&
9454           "Only address as input operand is allowed.");
9455
9456    switch (OpInfo.Type) {
9457    case InlineAsm::isOutput:
9458      if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9459        const InlineAsm::ConstraintCode ConstraintID =
9460            TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9461        assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9462               "Failed to convert memory constraint code to constraint id.");
9463
9464        // Add information to the INLINEASM node to know about this output.
9465        InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1);
9466        OpFlags.setMemConstraint(ConstraintID);
9467        AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(),
9468                                                        MVT::i32));
9469        AsmNodeOperands.push_back(OpInfo.CallOperand);
9470      } else {
9471        // Otherwise, this outputs to a register (directly for C_Register /
9472        // C_RegisterClass, and a target-defined fashion for
9473        // C_Immediate/C_Other). Find a register that we can use.
9474        if (OpInfo.AssignedRegs.Regs.empty()) {
9475          emitInlineAsmError(
9476              Call, "couldn't allocate output register for constraint '" +
9477                        Twine(OpInfo.ConstraintCode) + "'");
9478          return;
9479        }
9480
9481        if (DetectWriteToReservedRegister())
9482          return;
9483
9484        // Add information to the INLINEASM node to know that this register is
9485        // set.
9486        OpInfo.AssignedRegs.AddInlineAsmOperands(
9487            OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber
9488                                  : InlineAsm::Kind::RegDef,
9489            false, 0, getCurSDLoc(), DAG, AsmNodeOperands);
9490      }
9491      break;
9492
9493    case InlineAsm::isInput:
9494    case InlineAsm::isLabel: {
9495      SDValue InOperandVal = OpInfo.CallOperand;
9496
9497      if (OpInfo.isMatchingInputConstraint()) {
9498        // If this is required to match an output register we have already set,
9499        // just use its register.
9500        auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(),
9501                                                  AsmNodeOperands);
9502        InlineAsm::Flag Flag(AsmNodeOperands[CurOp]->getAsZExtVal());
9503        if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) {
9504          if (OpInfo.isIndirect) {
9505            // This happens on gcc/testsuite/gcc.dg/pr8788-1.c
9506            emitInlineAsmError(Call, "inline asm not supported yet: "
9507                                     "don't know how to handle tied "
9508                                     "indirect register inputs");
9509            return;
9510          }
9511
9512          SmallVector<unsigned, 4> Regs;
9513          MachineFunction &MF = DAG.getMachineFunction();
9514          MachineRegisterInfo &MRI = MF.getRegInfo();
9515          const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
9516          auto *R = cast<RegisterSDNode>(AsmNodeOperands[CurOp+1]);
9517          Register TiedReg = R->getReg();
9518          MVT RegVT = R->getSimpleValueType(0);
9519          const TargetRegisterClass *RC =
9520              TiedReg.isVirtual()     ? MRI.getRegClass(TiedReg)
9521              : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT)
9522                                      : TRI.getMinimalPhysRegClass(TiedReg);
9523          for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i)
9524            Regs.push_back(MRI.createVirtualRegister(RC));
9525
9526          RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType());
9527
9528          SDLoc dl = getCurSDLoc();
9529          // Use the produced MatchedRegs object to
9530          MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue, &Call);
9531          MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, true,
9532                                           OpInfo.getMatchedOperand(), dl, DAG,
9533                                           AsmNodeOperands);
9534          break;
9535        }
9536
9537        assert(Flag.isMemKind() && "Unknown matching constraint!");
9538        assert(Flag.getNumOperandRegisters() == 1 &&
9539               "Unexpected number of operands");
9540        // Add information to the INLINEASM node to know about this input.
9541        // See InlineAsm.h isUseOperandTiedToDef.
9542        Flag.clearMemConstraint();
9543        Flag.setMatchingOp(OpInfo.getMatchedOperand());
9544        AsmNodeOperands.push_back(DAG.getTargetConstant(
9545            Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9546        AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]);
9547        break;
9548      }
9549
9550      // Treat indirect 'X' constraint as memory.
9551      if (OpInfo.ConstraintType == TargetLowering::C_Other &&
9552          OpInfo.isIndirect)
9553        OpInfo.ConstraintType = TargetLowering::C_Memory;
9554
9555      if (OpInfo.ConstraintType == TargetLowering::C_Immediate ||
9556          OpInfo.ConstraintType == TargetLowering::C_Other) {
9557        std::vector<SDValue> Ops;
9558        TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode,
9559                                          Ops, DAG);
9560        if (Ops.empty()) {
9561          if (OpInfo.ConstraintType == TargetLowering::C_Immediate)
9562            if (isa<ConstantSDNode>(InOperandVal)) {
9563              emitInlineAsmError(Call, "value out of range for constraint '" +
9564                                           Twine(OpInfo.ConstraintCode) + "'");
9565              return;
9566            }
9567
9568          emitInlineAsmError(Call,
9569                             "invalid operand for inline asm constraint '" +
9570                                 Twine(OpInfo.ConstraintCode) + "'");
9571          return;
9572        }
9573
9574        // Add information to the INLINEASM node to know about this input.
9575        InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size());
9576        AsmNodeOperands.push_back(DAG.getTargetConstant(
9577            ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout())));
9578        llvm::append_range(AsmNodeOperands, Ops);
9579        break;
9580      }
9581
9582      if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
9583        assert((OpInfo.isIndirect ||
9584                OpInfo.ConstraintType != TargetLowering::C_Memory) &&
9585               "Operand must be indirect to be a mem!");
9586        assert(InOperandVal.getValueType() ==
9587                   TLI.getPointerTy(DAG.getDataLayout()) &&
9588               "Memory operands expect pointer values");
9589
9590        const InlineAsm::ConstraintCode ConstraintID =
9591            TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9592        assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9593               "Failed to convert memory constraint code to constraint id.");
9594
9595        // Add information to the INLINEASM node to know about this input.
9596        InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9597        ResOpType.setMemConstraint(ConstraintID);
9598        AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType,
9599                                                        getCurSDLoc(),
9600                                                        MVT::i32));
9601        AsmNodeOperands.push_back(InOperandVal);
9602        break;
9603      }
9604
9605      if (OpInfo.ConstraintType == TargetLowering::C_Address) {
9606        const InlineAsm::ConstraintCode ConstraintID =
9607            TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode);
9608        assert(ConstraintID != InlineAsm::ConstraintCode::Unknown &&
9609               "Failed to convert memory constraint code to constraint id.");
9610
9611        InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1);
9612
9613        SDValue AsmOp = InOperandVal;
9614        if (isFunction(InOperandVal)) {
9615          auto *GA = cast<GlobalAddressSDNode>(InOperandVal);
9616          ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1);
9617          AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(),
9618                                             InOperandVal.getValueType(),
9619                                             GA->getOffset());
9620        }
9621
9622        // Add information to the INLINEASM node to know about this input.
9623        ResOpType.setMemConstraint(ConstraintID);
9624
9625        AsmNodeOperands.push_back(
9626            DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32));
9627
9628        AsmNodeOperands.push_back(AsmOp);
9629        break;
9630      }
9631
9632      assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass ||
9633              OpInfo.ConstraintType == TargetLowering::C_Register) &&
9634             "Unknown constraint type!");
9635
9636      // TODO: Support this.
9637      if (OpInfo.isIndirect) {
9638        emitInlineAsmError(
9639            Call, "Don't know how to handle indirect register inputs yet "
9640                  "for constraint '" +
9641                      Twine(OpInfo.ConstraintCode) + "'");
9642        return;
9643      }
9644
9645      // Copy the input into the appropriate registers.
9646      if (OpInfo.AssignedRegs.Regs.empty()) {
9647        emitInlineAsmError(Call,
9648                           "couldn't allocate input reg for constraint '" +
9649                               Twine(OpInfo.ConstraintCode) + "'");
9650        return;
9651      }
9652
9653      if (DetectWriteToReservedRegister())
9654        return;
9655
9656      SDLoc dl = getCurSDLoc();
9657
9658      OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Glue,
9659                                        &Call);
9660
9661      OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::RegUse, false,
9662                                               0, dl, DAG, AsmNodeOperands);
9663      break;
9664    }
9665    case InlineAsm::isClobber:
9666      // Add the clobbered value to the operand list, so that the register
9667      // allocator is aware that the physreg got clobbered.
9668      if (!OpInfo.AssignedRegs.Regs.empty())
9669        OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind::Clobber,
9670                                                 false, 0, getCurSDLoc(), DAG,
9671                                                 AsmNodeOperands);
9672      break;
9673    }
9674  }
9675
9676  // Finish up input operands.  Set the input chain and add the flag last.
9677  AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
9678  if (Glue.getNode()) AsmNodeOperands.push_back(Glue);
9679
9680  unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM;
9681  Chain = DAG.getNode(ISDOpc, getCurSDLoc(),
9682                      DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
9683  Glue = Chain.getValue(1);
9684
9685  // Do additional work to generate outputs.
9686
9687  SmallVector<EVT, 1> ResultVTs;
9688  SmallVector<SDValue, 1> ResultValues;
9689  SmallVector<SDValue, 8> OutChains;
9690
9691  llvm::Type *CallResultType = Call.getType();
9692  ArrayRef<Type *> ResultTypes;
9693  if (StructType *StructResult = dyn_cast<StructType>(CallResultType))
9694    ResultTypes = StructResult->elements();
9695  else if (!CallResultType->isVoidTy())
9696    ResultTypes = ArrayRef(CallResultType);
9697
9698  auto CurResultType = ResultTypes.begin();
9699  auto handleRegAssign = [&](SDValue V) {
9700    assert(CurResultType != ResultTypes.end() && "Unexpected value");
9701    assert((*CurResultType)->isSized() && "Unexpected unsized type");
9702    EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType);
9703    ++CurResultType;
9704    // If the type of the inline asm call site return value is different but has
9705    // same size as the type of the asm output bitcast it.  One example of this
9706    // is for vectors with different width / number of elements.  This can
9707    // happen for register classes that can contain multiple different value
9708    // types.  The preg or vreg allocated may not have the same VT as was
9709    // expected.
9710    //
9711    // This can also happen for a return value that disagrees with the register
9712    // class it is put in, eg. a double in a general-purpose register on a
9713    // 32-bit machine.
9714    if (ResultVT != V.getValueType() &&
9715        ResultVT.getSizeInBits() == V.getValueSizeInBits())
9716      V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V);
9717    else if (ResultVT != V.getValueType() && ResultVT.isInteger() &&
9718             V.getValueType().isInteger()) {
9719      // If a result value was tied to an input value, the computed result
9720      // may have a wider width than the expected result.  Extract the
9721      // relevant portion.
9722      V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V);
9723    }
9724    assert(ResultVT == V.getValueType() && "Asm result value mismatch!");
9725    ResultVTs.push_back(ResultVT);
9726    ResultValues.push_back(V);
9727  };
9728
9729  // Deal with output operands.
9730  for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) {
9731    if (OpInfo.Type == InlineAsm::isOutput) {
9732      SDValue Val;
9733      // Skip trivial output operands.
9734      if (OpInfo.AssignedRegs.Regs.empty())
9735        continue;
9736
9737      switch (OpInfo.ConstraintType) {
9738      case TargetLowering::C_Register:
9739      case TargetLowering::C_RegisterClass:
9740        Val = OpInfo.AssignedRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
9741                                                  Chain, &Glue, &Call);
9742        break;
9743      case TargetLowering::C_Immediate:
9744      case TargetLowering::C_Other:
9745        Val = TLI.LowerAsmOutputForConstraint(Chain, Glue, getCurSDLoc(),
9746                                              OpInfo, DAG);
9747        break;
9748      case TargetLowering::C_Memory:
9749        break; // Already handled.
9750      case TargetLowering::C_Address:
9751        break; // Silence warning.
9752      case TargetLowering::C_Unknown:
9753        assert(false && "Unexpected unknown constraint");
9754      }
9755
9756      // Indirect output manifest as stores. Record output chains.
9757      if (OpInfo.isIndirect) {
9758        const Value *Ptr = OpInfo.CallOperandVal;
9759        assert(Ptr && "Expected value CallOperandVal for indirect asm operand");
9760        SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr),
9761                                     MachinePointerInfo(Ptr));
9762        OutChains.push_back(Store);
9763      } else {
9764        // generate CopyFromRegs to associated registers.
9765        assert(!Call.getType()->isVoidTy() && "Bad inline asm!");
9766        if (Val.getOpcode() == ISD::MERGE_VALUES) {
9767          for (const SDValue &V : Val->op_values())
9768            handleRegAssign(V);
9769        } else
9770          handleRegAssign(Val);
9771      }
9772    }
9773  }
9774
9775  // Set results.
9776  if (!ResultValues.empty()) {
9777    assert(CurResultType == ResultTypes.end() &&
9778           "Mismatch in number of ResultTypes");
9779    assert(ResultValues.size() == ResultTypes.size() &&
9780           "Mismatch in number of output operands in asm result");
9781
9782    SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
9783                            DAG.getVTList(ResultVTs), ResultValues);
9784    setValue(&Call, V);
9785  }
9786
9787  // Collect store chains.
9788  if (!OutChains.empty())
9789    Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains);
9790
9791  if (EmitEHLabels) {
9792    Chain = lowerEndEH(Chain, cast<InvokeInst>(&Call), EHPadBB, BeginLabel);
9793  }
9794
9795  // Only Update Root if inline assembly has a memory effect.
9796  if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr ||
9797      EmitEHLabels)
9798    DAG.setRoot(Chain);
9799}
9800
9801void SelectionDAGBuilder::emitInlineAsmError(const CallBase &Call,
9802                                             const Twine &Message) {
9803  LLVMContext &Ctx = *DAG.getContext();
9804  Ctx.emitError(&Call, Message);
9805
9806  // Make sure we leave the DAG in a valid state
9807  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9808  SmallVector<EVT, 1> ValueVTs;
9809  ComputeValueVTs(TLI, DAG.getDataLayout(), Call.getType(), ValueVTs);
9810
9811  if (ValueVTs.empty())
9812    return;
9813
9814  SmallVector<SDValue, 1> Ops;
9815  for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i)
9816    Ops.push_back(DAG.getUNDEF(ValueVTs[i]));
9817
9818  setValue(&Call, DAG.getMergeValues(Ops, getCurSDLoc()));
9819}
9820
9821void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
9822  DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(),
9823                          MVT::Other, getRoot(),
9824                          getValue(I.getArgOperand(0)),
9825                          DAG.getSrcValue(I.getArgOperand(0))));
9826}
9827
9828void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
9829  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9830  const DataLayout &DL = DAG.getDataLayout();
9831  SDValue V = DAG.getVAArg(
9832      TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(),
9833      getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
9834      DL.getABITypeAlign(I.getType()).value());
9835  DAG.setRoot(V.getValue(1));
9836
9837  if (I.getType()->isPointerTy())
9838    V = DAG.getPtrExtOrTrunc(
9839        V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()));
9840  setValue(&I, V);
9841}
9842
9843void SelectionDAGBuilder::visitVAEnd(const CallInst &I) {
9844  DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(),
9845                          MVT::Other, getRoot(),
9846                          getValue(I.getArgOperand(0)),
9847                          DAG.getSrcValue(I.getArgOperand(0))));
9848}
9849
9850void SelectionDAGBuilder::visitVACopy(const CallInst &I) {
9851  DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(),
9852                          MVT::Other, getRoot(),
9853                          getValue(I.getArgOperand(0)),
9854                          getValue(I.getArgOperand(1)),
9855                          DAG.getSrcValue(I.getArgOperand(0)),
9856                          DAG.getSrcValue(I.getArgOperand(1))));
9857}
9858
9859SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG,
9860                                                    const Instruction &I,
9861                                                    SDValue Op) {
9862  const MDNode *Range = getRangeMetadata(I);
9863  if (!Range)
9864    return Op;
9865
9866  ConstantRange CR = getConstantRangeFromMetadata(*Range);
9867  if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped())
9868    return Op;
9869
9870  APInt Lo = CR.getUnsignedMin();
9871  if (!Lo.isMinValue())
9872    return Op;
9873
9874  APInt Hi = CR.getUnsignedMax();
9875  unsigned Bits = std::max(Hi.getActiveBits(),
9876                           static_cast<unsigned>(IntegerType::MIN_INT_BITS));
9877
9878  EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits);
9879
9880  SDLoc SL = getCurSDLoc();
9881
9882  SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op,
9883                             DAG.getValueType(SmallVT));
9884  unsigned NumVals = Op.getNode()->getNumValues();
9885  if (NumVals == 1)
9886    return ZExt;
9887
9888  SmallVector<SDValue, 4> Ops;
9889
9890  Ops.push_back(ZExt);
9891  for (unsigned I = 1; I != NumVals; ++I)
9892    Ops.push_back(Op.getValue(I));
9893
9894  return DAG.getMergeValues(Ops, SL);
9895}
9896
9897/// Populate a CallLowerinInfo (into \p CLI) based on the properties of
9898/// the call being lowered.
9899///
9900/// This is a helper for lowering intrinsics that follow a target calling
9901/// convention or require stack pointer adjustment. Only a subset of the
9902/// intrinsic's operands need to participate in the calling convention.
9903void SelectionDAGBuilder::populateCallLoweringInfo(
9904    TargetLowering::CallLoweringInfo &CLI, const CallBase *Call,
9905    unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy,
9906    AttributeSet RetAttrs, bool IsPatchPoint) {
9907  TargetLowering::ArgListTy Args;
9908  Args.reserve(NumArgs);
9909
9910  // Populate the argument list.
9911  // Attributes for args start at offset 1, after the return attribute.
9912  for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs;
9913       ArgI != ArgE; ++ArgI) {
9914    const Value *V = Call->getOperand(ArgI);
9915
9916    assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
9917
9918    TargetLowering::ArgListEntry Entry;
9919    Entry.Node = getValue(V);
9920    Entry.Ty = V->getType();
9921    Entry.setAttributes(Call, ArgI);
9922    Args.push_back(Entry);
9923  }
9924
9925  CLI.setDebugLoc(getCurSDLoc())
9926      .setChain(getRoot())
9927      .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args),
9928                 RetAttrs)
9929      .setDiscardResult(Call->use_empty())
9930      .setIsPatchPoint(IsPatchPoint)
9931      .setIsPreallocated(
9932          Call->countOperandBundlesOfType(LLVMContext::OB_preallocated) != 0);
9933}
9934
9935/// Add a stack map intrinsic call's live variable operands to a stackmap
9936/// or patchpoint target node's operand list.
9937///
9938/// Constants are converted to TargetConstants purely as an optimization to
9939/// avoid constant materialization and register allocation.
9940///
9941/// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not
9942/// generate addess computation nodes, and so FinalizeISel can convert the
9943/// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids
9944/// address materialization and register allocation, but may also be required
9945/// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an
9946/// alloca in the entry block, then the runtime may assume that the alloca's
9947/// StackMap location can be read immediately after compilation and that the
9948/// location is valid at any point during execution (this is similar to the
9949/// assumption made by the llvm.gcroot intrinsic). If the alloca's location were
9950/// only available in a register, then the runtime would need to trap when
9951/// execution reaches the StackMap in order to read the alloca's location.
9952static void addStackMapLiveVars(const CallBase &Call, unsigned StartIdx,
9953                                const SDLoc &DL, SmallVectorImpl<SDValue> &Ops,
9954                                SelectionDAGBuilder &Builder) {
9955  SelectionDAG &DAG = Builder.DAG;
9956  for (unsigned I = StartIdx; I < Call.arg_size(); I++) {
9957    SDValue Op = Builder.getValue(Call.getArgOperand(I));
9958
9959    // Things on the stack are pointer-typed, meaning that they are already
9960    // legal and can be emitted directly to target nodes.
9961    if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op)) {
9962      Ops.push_back(DAG.getTargetFrameIndex(FI->getIndex(), Op.getValueType()));
9963    } else {
9964      // Otherwise emit a target independent node to be legalised.
9965      Ops.push_back(Builder.getValue(Call.getArgOperand(I)));
9966    }
9967  }
9968}
9969
9970/// Lower llvm.experimental.stackmap.
9971void SelectionDAGBuilder::visitStackmap(const CallInst &CI) {
9972  // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
9973  //                                  [live variables...])
9974
9975  assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value.");
9976
9977  SDValue Chain, InGlue, Callee;
9978  SmallVector<SDValue, 32> Ops;
9979
9980  SDLoc DL = getCurSDLoc();
9981  Callee = getValue(CI.getCalledOperand());
9982
9983  // The stackmap intrinsic only records the live variables (the arguments
9984  // passed to it) and emits NOPS (if requested). Unlike the patchpoint
9985  // intrinsic, this won't be lowered to a function call. This means we don't
9986  // have to worry about calling conventions and target specific lowering code.
9987  // Instead we perform the call lowering right here.
9988  //
9989  // chain, flag = CALLSEQ_START(chain, 0, 0)
9990  // chain, flag = STACKMAP(id, nbytes, ..., chain, flag)
9991  // chain, flag = CALLSEQ_END(chain, 0, 0, flag)
9992  //
9993  Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL);
9994  InGlue = Chain.getValue(1);
9995
9996  // Add the STACKMAP operands, starting with DAG house-keeping.
9997  Ops.push_back(Chain);
9998  Ops.push_back(InGlue);
9999
10000  // Add the <id>, <numShadowBytes> operands.
10001  //
10002  // These do not require legalisation, and can be emitted directly to target
10003  // constant nodes.
10004  SDValue ID = getValue(CI.getArgOperand(0));
10005  assert(ID.getValueType() == MVT::i64);
10006  SDValue IDConst =
10007      DAG.getTargetConstant(ID->getAsZExtVal(), DL, ID.getValueType());
10008  Ops.push_back(IDConst);
10009
10010  SDValue Shad = getValue(CI.getArgOperand(1));
10011  assert(Shad.getValueType() == MVT::i32);
10012  SDValue ShadConst =
10013      DAG.getTargetConstant(Shad->getAsZExtVal(), DL, Shad.getValueType());
10014  Ops.push_back(ShadConst);
10015
10016  // Add the live variables.
10017  addStackMapLiveVars(CI, 2, DL, Ops, *this);
10018
10019  // Create the STACKMAP node.
10020  SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10021  Chain = DAG.getNode(ISD::STACKMAP, DL, NodeTys, Ops);
10022  InGlue = Chain.getValue(1);
10023
10024  Chain = DAG.getCALLSEQ_END(Chain, 0, 0, InGlue, DL);
10025
10026  // Stackmaps don't generate values, so nothing goes into the NodeMap.
10027
10028  // Set the root to the target-lowered call chain.
10029  DAG.setRoot(Chain);
10030
10031  // Inform the Frame Information that we have a stackmap in this function.
10032  FuncInfo.MF->getFrameInfo().setHasStackMap();
10033}
10034
10035/// Lower llvm.experimental.patchpoint directly to its target opcode.
10036void SelectionDAGBuilder::visitPatchpoint(const CallBase &CB,
10037                                          const BasicBlock *EHPadBB) {
10038  // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
10039  //                                                 i32 <numBytes>,
10040  //                                                 i8* <target>,
10041  //                                                 i32 <numArgs>,
10042  //                                                 [Args...],
10043  //                                                 [live variables...])
10044
10045  CallingConv::ID CC = CB.getCallingConv();
10046  bool IsAnyRegCC = CC == CallingConv::AnyReg;
10047  bool HasDef = !CB.getType()->isVoidTy();
10048  SDLoc dl = getCurSDLoc();
10049  SDValue Callee = getValue(CB.getArgOperand(PatchPointOpers::TargetPos));
10050
10051  // Handle immediate and symbolic callees.
10052  if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee))
10053    Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl,
10054                                   /*isTarget=*/true);
10055  else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee))
10056    Callee =  DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(),
10057                                         SDLoc(SymbolicCallee),
10058                                         SymbolicCallee->getValueType(0));
10059
10060  // Get the real number of arguments participating in the call <numArgs>
10061  SDValue NArgVal = getValue(CB.getArgOperand(PatchPointOpers::NArgPos));
10062  unsigned NumArgs = NArgVal->getAsZExtVal();
10063
10064  // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
10065  // Intrinsics include all meta-operands up to but not including CC.
10066  unsigned NumMetaOpers = PatchPointOpers::CCPos;
10067  assert(CB.arg_size() >= NumMetaOpers + NumArgs &&
10068         "Not enough arguments provided to the patchpoint intrinsic");
10069
10070  // For AnyRegCC the arguments are lowered later on manually.
10071  unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
10072  Type *ReturnTy =
10073      IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CB.getType();
10074
10075  TargetLowering::CallLoweringInfo CLI(DAG);
10076  populateCallLoweringInfo(CLI, &CB, NumMetaOpers, NumCallArgs, Callee,
10077                           ReturnTy, CB.getAttributes().getRetAttrs(), true);
10078  std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB);
10079
10080  SDNode *CallEnd = Result.second.getNode();
10081  if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg))
10082    CallEnd = CallEnd->getOperand(0).getNode();
10083
10084  /// Get a call instruction from the call sequence chain.
10085  /// Tail calls are not allowed.
10086  assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
10087         "Expected a callseq node.");
10088  SDNode *Call = CallEnd->getOperand(0).getNode();
10089  bool HasGlue = Call->getGluedNode();
10090
10091  // Replace the target specific call node with the patchable intrinsic.
10092  SmallVector<SDValue, 8> Ops;
10093
10094  // Push the chain.
10095  Ops.push_back(*(Call->op_begin()));
10096
10097  // Optionally, push the glue (if any).
10098  if (HasGlue)
10099    Ops.push_back(*(Call->op_end() - 1));
10100
10101  // Push the register mask info.
10102  if (HasGlue)
10103    Ops.push_back(*(Call->op_end() - 2));
10104  else
10105    Ops.push_back(*(Call->op_end() - 1));
10106
10107  // Add the <id> and <numBytes> constants.
10108  SDValue IDVal = getValue(CB.getArgOperand(PatchPointOpers::IDPos));
10109  Ops.push_back(DAG.getTargetConstant(IDVal->getAsZExtVal(), dl, MVT::i64));
10110  SDValue NBytesVal = getValue(CB.getArgOperand(PatchPointOpers::NBytesPos));
10111  Ops.push_back(DAG.getTargetConstant(NBytesVal->getAsZExtVal(), dl, MVT::i32));
10112
10113  // Add the callee.
10114  Ops.push_back(Callee);
10115
10116  // Adjust <numArgs> to account for any arguments that have been passed on the
10117  // stack instead.
10118  // Call Node: Chain, Target, {Args}, RegMask, [Glue]
10119  unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3);
10120  NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs;
10121  Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32));
10122
10123  // Add the calling convention
10124  Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32));
10125
10126  // Add the arguments we omitted previously. The register allocator should
10127  // place these in any free register.
10128  if (IsAnyRegCC)
10129    for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i)
10130      Ops.push_back(getValue(CB.getArgOperand(i)));
10131
10132  // Push the arguments from the call instruction.
10133  SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1;
10134  Ops.append(Call->op_begin() + 2, e);
10135
10136  // Push live variables for the stack map.
10137  addStackMapLiveVars(CB, NumMetaOpers + NumArgs, dl, Ops, *this);
10138
10139  SDVTList NodeTys;
10140  if (IsAnyRegCC && HasDef) {
10141    // Create the return types based on the intrinsic definition
10142    const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10143    SmallVector<EVT, 3> ValueVTs;
10144    ComputeValueVTs(TLI, DAG.getDataLayout(), CB.getType(), ValueVTs);
10145    assert(ValueVTs.size() == 1 && "Expected only one return value type.");
10146
10147    // There is always a chain and a glue type at the end
10148    ValueVTs.push_back(MVT::Other);
10149    ValueVTs.push_back(MVT::Glue);
10150    NodeTys = DAG.getVTList(ValueVTs);
10151  } else
10152    NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
10153
10154  // Replace the target specific call node with a PATCHPOINT node.
10155  SDValue PPV = DAG.getNode(ISD::PATCHPOINT, dl, NodeTys, Ops);
10156
10157  // Update the NodeMap.
10158  if (HasDef) {
10159    if (IsAnyRegCC)
10160      setValue(&CB, SDValue(PPV.getNode(), 0));
10161    else
10162      setValue(&CB, Result.first);
10163  }
10164
10165  // Fixup the consumers of the intrinsic. The chain and glue may be used in the
10166  // call sequence. Furthermore the location of the chain and glue can change
10167  // when the AnyReg calling convention is used and the intrinsic returns a
10168  // value.
10169  if (IsAnyRegCC && HasDef) {
10170    SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)};
10171    SDValue To[] = {PPV.getValue(1), PPV.getValue(2)};
10172    DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
10173  } else
10174    DAG.ReplaceAllUsesWith(Call, PPV.getNode());
10175  DAG.DeleteNode(Call);
10176
10177  // Inform the Frame Information that we have a patchpoint in this function.
10178  FuncInfo.MF->getFrameInfo().setHasPatchPoint();
10179}
10180
10181void SelectionDAGBuilder::visitVectorReduce(const CallInst &I,
10182                                            unsigned Intrinsic) {
10183  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10184  SDValue Op1 = getValue(I.getArgOperand(0));
10185  SDValue Op2;
10186  if (I.arg_size() > 1)
10187    Op2 = getValue(I.getArgOperand(1));
10188  SDLoc dl = getCurSDLoc();
10189  EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
10190  SDValue Res;
10191  SDNodeFlags SDFlags;
10192  if (auto *FPMO = dyn_cast<FPMathOperator>(&I))
10193    SDFlags.copyFMF(*FPMO);
10194
10195  switch (Intrinsic) {
10196  case Intrinsic::vector_reduce_fadd:
10197    if (SDFlags.hasAllowReassociation())
10198      Res = DAG.getNode(ISD::FADD, dl, VT, Op1,
10199                        DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2, SDFlags),
10200                        SDFlags);
10201    else
10202      Res = DAG.getNode(ISD::VECREDUCE_SEQ_FADD, dl, VT, Op1, Op2, SDFlags);
10203    break;
10204  case Intrinsic::vector_reduce_fmul:
10205    if (SDFlags.hasAllowReassociation())
10206      Res = DAG.getNode(ISD::FMUL, dl, VT, Op1,
10207                        DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2, SDFlags),
10208                        SDFlags);
10209    else
10210      Res = DAG.getNode(ISD::VECREDUCE_SEQ_FMUL, dl, VT, Op1, Op2, SDFlags);
10211    break;
10212  case Intrinsic::vector_reduce_add:
10213    Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1);
10214    break;
10215  case Intrinsic::vector_reduce_mul:
10216    Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1);
10217    break;
10218  case Intrinsic::vector_reduce_and:
10219    Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1);
10220    break;
10221  case Intrinsic::vector_reduce_or:
10222    Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1);
10223    break;
10224  case Intrinsic::vector_reduce_xor:
10225    Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1);
10226    break;
10227  case Intrinsic::vector_reduce_smax:
10228    Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1);
10229    break;
10230  case Intrinsic::vector_reduce_smin:
10231    Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1);
10232    break;
10233  case Intrinsic::vector_reduce_umax:
10234    Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1);
10235    break;
10236  case Intrinsic::vector_reduce_umin:
10237    Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1);
10238    break;
10239  case Intrinsic::vector_reduce_fmax:
10240    Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1, SDFlags);
10241    break;
10242  case Intrinsic::vector_reduce_fmin:
10243    Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1, SDFlags);
10244    break;
10245  case Intrinsic::vector_reduce_fmaximum:
10246    Res = DAG.getNode(ISD::VECREDUCE_FMAXIMUM, dl, VT, Op1, SDFlags);
10247    break;
10248  case Intrinsic::vector_reduce_fminimum:
10249    Res = DAG.getNode(ISD::VECREDUCE_FMINIMUM, dl, VT, Op1, SDFlags);
10250    break;
10251  default:
10252    llvm_unreachable("Unhandled vector reduce intrinsic");
10253  }
10254  setValue(&I, Res);
10255}
10256
10257/// Returns an AttributeList representing the attributes applied to the return
10258/// value of the given call.
10259static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) {
10260  SmallVector<Attribute::AttrKind, 2> Attrs;
10261  if (CLI.RetSExt)
10262    Attrs.push_back(Attribute::SExt);
10263  if (CLI.RetZExt)
10264    Attrs.push_back(Attribute::ZExt);
10265  if (CLI.IsInReg)
10266    Attrs.push_back(Attribute::InReg);
10267
10268  return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
10269                            Attrs);
10270}
10271
10272/// TargetLowering::LowerCallTo - This is the default LowerCallTo
10273/// implementation, which just calls LowerCall.
10274/// FIXME: When all targets are
10275/// migrated to using LowerCall, this hook should be integrated into SDISel.
10276std::pair<SDValue, SDValue>
10277TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
10278  // Handle the incoming return values from the call.
10279  CLI.Ins.clear();
10280  Type *OrigRetTy = CLI.RetTy;
10281  SmallVector<EVT, 4> RetTys;
10282  SmallVector<uint64_t, 4> Offsets;
10283  auto &DL = CLI.DAG.getDataLayout();
10284  ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets, 0);
10285
10286  if (CLI.IsPostTypeLegalization) {
10287    // If we are lowering a libcall after legalization, split the return type.
10288    SmallVector<EVT, 4> OldRetTys;
10289    SmallVector<uint64_t, 4> OldOffsets;
10290    RetTys.swap(OldRetTys);
10291    Offsets.swap(OldOffsets);
10292
10293    for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) {
10294      EVT RetVT = OldRetTys[i];
10295      uint64_t Offset = OldOffsets[i];
10296      MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT);
10297      unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT);
10298      unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8;
10299      RetTys.append(NumRegs, RegisterVT);
10300      for (unsigned j = 0; j != NumRegs; ++j)
10301        Offsets.push_back(Offset + j * RegisterVTByteSZ);
10302    }
10303  }
10304
10305  SmallVector<ISD::OutputArg, 4> Outs;
10306  GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL);
10307
10308  bool CanLowerReturn =
10309      this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(),
10310                           CLI.IsVarArg, Outs, CLI.RetTy->getContext());
10311
10312  SDValue DemoteStackSlot;
10313  int DemoteStackIdx = -100;
10314  if (!CanLowerReturn) {
10315    // FIXME: equivalent assert?
10316    // assert(!CS.hasInAllocaArgument() &&
10317    //        "sret demotion is incompatible with inalloca");
10318    uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy);
10319    Align Alignment = DL.getPrefTypeAlign(CLI.RetTy);
10320    MachineFunction &MF = CLI.DAG.getMachineFunction();
10321    DemoteStackIdx =
10322        MF.getFrameInfo().CreateStackObject(TySize, Alignment, false);
10323    Type *StackSlotPtrType = PointerType::get(CLI.RetTy,
10324                                              DL.getAllocaAddrSpace());
10325
10326    DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
10327    ArgListEntry Entry;
10328    Entry.Node = DemoteStackSlot;
10329    Entry.Ty = StackSlotPtrType;
10330    Entry.IsSExt = false;
10331    Entry.IsZExt = false;
10332    Entry.IsInReg = false;
10333    Entry.IsSRet = true;
10334    Entry.IsNest = false;
10335    Entry.IsByVal = false;
10336    Entry.IsByRef = false;
10337    Entry.IsReturned = false;
10338    Entry.IsSwiftSelf = false;
10339    Entry.IsSwiftAsync = false;
10340    Entry.IsSwiftError = false;
10341    Entry.IsCFGuardTarget = false;
10342    Entry.Alignment = Alignment;
10343    CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
10344    CLI.NumFixedArgs += 1;
10345    CLI.getArgs()[0].IndirectType = CLI.RetTy;
10346    CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext());
10347
10348    // sret demotion isn't compatible with tail-calls, since the sret argument
10349    // points into the callers stack frame.
10350    CLI.IsTailCall = false;
10351  } else {
10352    bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10353        CLI.RetTy, CLI.CallConv, CLI.IsVarArg, DL);
10354    for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
10355      ISD::ArgFlagsTy Flags;
10356      if (NeedsRegBlock) {
10357        Flags.setInConsecutiveRegs();
10358        if (I == RetTys.size() - 1)
10359          Flags.setInConsecutiveRegsLast();
10360      }
10361      EVT VT = RetTys[I];
10362      MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10363                                                     CLI.CallConv, VT);
10364      unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10365                                                       CLI.CallConv, VT);
10366      for (unsigned i = 0; i != NumRegs; ++i) {
10367        ISD::InputArg MyFlags;
10368        MyFlags.Flags = Flags;
10369        MyFlags.VT = RegisterVT;
10370        MyFlags.ArgVT = VT;
10371        MyFlags.Used = CLI.IsReturnValueUsed;
10372        if (CLI.RetTy->isPointerTy()) {
10373          MyFlags.Flags.setPointer();
10374          MyFlags.Flags.setPointerAddrSpace(
10375              cast<PointerType>(CLI.RetTy)->getAddressSpace());
10376        }
10377        if (CLI.RetSExt)
10378          MyFlags.Flags.setSExt();
10379        if (CLI.RetZExt)
10380          MyFlags.Flags.setZExt();
10381        if (CLI.IsInReg)
10382          MyFlags.Flags.setInReg();
10383        CLI.Ins.push_back(MyFlags);
10384      }
10385    }
10386  }
10387
10388  // We push in swifterror return as the last element of CLI.Ins.
10389  ArgListTy &Args = CLI.getArgs();
10390  if (supportSwiftError()) {
10391    for (const ArgListEntry &Arg : Args) {
10392      if (Arg.IsSwiftError) {
10393        ISD::InputArg MyFlags;
10394        MyFlags.VT = getPointerTy(DL);
10395        MyFlags.ArgVT = EVT(getPointerTy(DL));
10396        MyFlags.Flags.setSwiftError();
10397        CLI.Ins.push_back(MyFlags);
10398      }
10399    }
10400  }
10401
10402  // Handle all of the outgoing arguments.
10403  CLI.Outs.clear();
10404  CLI.OutVals.clear();
10405  for (unsigned i = 0, e = Args.size(); i != e; ++i) {
10406    SmallVector<EVT, 4> ValueVTs;
10407    ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs);
10408    // FIXME: Split arguments if CLI.IsPostTypeLegalization
10409    Type *FinalType = Args[i].Ty;
10410    if (Args[i].IsByVal)
10411      FinalType = Args[i].IndirectType;
10412    bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
10413        FinalType, CLI.CallConv, CLI.IsVarArg, DL);
10414    for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues;
10415         ++Value) {
10416      EVT VT = ValueVTs[Value];
10417      Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext());
10418      SDValue Op = SDValue(Args[i].Node.getNode(),
10419                           Args[i].Node.getResNo() + Value);
10420      ISD::ArgFlagsTy Flags;
10421
10422      // Certain targets (such as MIPS), may have a different ABI alignment
10423      // for a type depending on the context. Give the target a chance to
10424      // specify the alignment it wants.
10425      const Align OriginalAlignment(getABIAlignmentForCallingConv(ArgTy, DL));
10426      Flags.setOrigAlign(OriginalAlignment);
10427
10428      if (Args[i].Ty->isPointerTy()) {
10429        Flags.setPointer();
10430        Flags.setPointerAddrSpace(
10431            cast<PointerType>(Args[i].Ty)->getAddressSpace());
10432      }
10433      if (Args[i].IsZExt)
10434        Flags.setZExt();
10435      if (Args[i].IsSExt)
10436        Flags.setSExt();
10437      if (Args[i].IsInReg) {
10438        // If we are using vectorcall calling convention, a structure that is
10439        // passed InReg - is surely an HVA
10440        if (CLI.CallConv == CallingConv::X86_VectorCall &&
10441            isa<StructType>(FinalType)) {
10442          // The first value of a structure is marked
10443          if (0 == Value)
10444            Flags.setHvaStart();
10445          Flags.setHva();
10446        }
10447        // Set InReg Flag
10448        Flags.setInReg();
10449      }
10450      if (Args[i].IsSRet)
10451        Flags.setSRet();
10452      if (Args[i].IsSwiftSelf)
10453        Flags.setSwiftSelf();
10454      if (Args[i].IsSwiftAsync)
10455        Flags.setSwiftAsync();
10456      if (Args[i].IsSwiftError)
10457        Flags.setSwiftError();
10458      if (Args[i].IsCFGuardTarget)
10459        Flags.setCFGuardTarget();
10460      if (Args[i].IsByVal)
10461        Flags.setByVal();
10462      if (Args[i].IsByRef)
10463        Flags.setByRef();
10464      if (Args[i].IsPreallocated) {
10465        Flags.setPreallocated();
10466        // Set the byval flag for CCAssignFn callbacks that don't know about
10467        // preallocated.  This way we can know how many bytes we should've
10468        // allocated and how many bytes a callee cleanup function will pop.  If
10469        // we port preallocated to more targets, we'll have to add custom
10470        // preallocated handling in the various CC lowering callbacks.
10471        Flags.setByVal();
10472      }
10473      if (Args[i].IsInAlloca) {
10474        Flags.setInAlloca();
10475        // Set the byval flag for CCAssignFn callbacks that don't know about
10476        // inalloca.  This way we can know how many bytes we should've allocated
10477        // and how many bytes a callee cleanup function will pop.  If we port
10478        // inalloca to more targets, we'll have to add custom inalloca handling
10479        // in the various CC lowering callbacks.
10480        Flags.setByVal();
10481      }
10482      Align MemAlign;
10483      if (Args[i].IsByVal || Args[i].IsInAlloca || Args[i].IsPreallocated) {
10484        unsigned FrameSize = DL.getTypeAllocSize(Args[i].IndirectType);
10485        Flags.setByValSize(FrameSize);
10486
10487        // info is not there but there are cases it cannot get right.
10488        if (auto MA = Args[i].Alignment)
10489          MemAlign = *MA;
10490        else
10491          MemAlign = Align(getByValTypeAlignment(Args[i].IndirectType, DL));
10492      } else if (auto MA = Args[i].Alignment) {
10493        MemAlign = *MA;
10494      } else {
10495        MemAlign = OriginalAlignment;
10496      }
10497      Flags.setMemAlign(MemAlign);
10498      if (Args[i].IsNest)
10499        Flags.setNest();
10500      if (NeedsRegBlock)
10501        Flags.setInConsecutiveRegs();
10502
10503      MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10504                                                 CLI.CallConv, VT);
10505      unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10506                                                        CLI.CallConv, VT);
10507      SmallVector<SDValue, 4> Parts(NumParts);
10508      ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
10509
10510      if (Args[i].IsSExt)
10511        ExtendKind = ISD::SIGN_EXTEND;
10512      else if (Args[i].IsZExt)
10513        ExtendKind = ISD::ZERO_EXTEND;
10514
10515      // Conservatively only handle 'returned' on non-vectors that can be lowered,
10516      // for now.
10517      if (Args[i].IsReturned && !Op.getValueType().isVector() &&
10518          CanLowerReturn) {
10519        assert((CLI.RetTy == Args[i].Ty ||
10520                (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() &&
10521                 CLI.RetTy->getPointerAddressSpace() ==
10522                     Args[i].Ty->getPointerAddressSpace())) &&
10523               RetTys.size() == NumValues && "unexpected use of 'returned'");
10524        // Before passing 'returned' to the target lowering code, ensure that
10525        // either the register MVT and the actual EVT are the same size or that
10526        // the return value and argument are extended in the same way; in these
10527        // cases it's safe to pass the argument register value unchanged as the
10528        // return register value (although it's at the target's option whether
10529        // to do so)
10530        // TODO: allow code generation to take advantage of partially preserved
10531        // registers rather than clobbering the entire register when the
10532        // parameter extension method is not compatible with the return
10533        // extension method
10534        if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) ||
10535            (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt &&
10536             CLI.RetZExt == Args[i].IsZExt))
10537          Flags.setReturned();
10538      }
10539
10540      getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CB,
10541                     CLI.CallConv, ExtendKind);
10542
10543      for (unsigned j = 0; j != NumParts; ++j) {
10544        // if it isn't first piece, alignment must be 1
10545        // For scalable vectors the scalable part is currently handled
10546        // by individual targets, so we just use the known minimum size here.
10547        ISD::OutputArg MyFlags(
10548            Flags, Parts[j].getValueType().getSimpleVT(), VT,
10549            i < CLI.NumFixedArgs, i,
10550            j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
10551        if (NumParts > 1 && j == 0)
10552          MyFlags.Flags.setSplit();
10553        else if (j != 0) {
10554          MyFlags.Flags.setOrigAlign(Align(1));
10555          if (j == NumParts - 1)
10556            MyFlags.Flags.setSplitEnd();
10557        }
10558
10559        CLI.Outs.push_back(MyFlags);
10560        CLI.OutVals.push_back(Parts[j]);
10561      }
10562
10563      if (NeedsRegBlock && Value == NumValues - 1)
10564        CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast();
10565    }
10566  }
10567
10568  SmallVector<SDValue, 4> InVals;
10569  CLI.Chain = LowerCall(CLI, InVals);
10570
10571  // Update CLI.InVals to use outside of this function.
10572  CLI.InVals = InVals;
10573
10574  // Verify that the target's LowerCall behaved as expected.
10575  assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other &&
10576         "LowerCall didn't return a valid chain!");
10577  assert((!CLI.IsTailCall || InVals.empty()) &&
10578         "LowerCall emitted a return value for a tail call!");
10579  assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) &&
10580         "LowerCall didn't emit the correct number of values!");
10581
10582  // For a tail call, the return value is merely live-out and there aren't
10583  // any nodes in the DAG representing it. Return a special value to
10584  // indicate that a tail call has been emitted and no more Instructions
10585  // should be processed in the current block.
10586  if (CLI.IsTailCall) {
10587    CLI.DAG.setRoot(CLI.Chain);
10588    return std::make_pair(SDValue(), SDValue());
10589  }
10590
10591#ifndef NDEBUG
10592  for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) {
10593    assert(InVals[i].getNode() && "LowerCall emitted a null value!");
10594    assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() &&
10595           "LowerCall emitted a value with the wrong type!");
10596  }
10597#endif
10598
10599  SmallVector<SDValue, 4> ReturnValues;
10600  if (!CanLowerReturn) {
10601    // The instruction result is the result of loading from the
10602    // hidden sret parameter.
10603    SmallVector<EVT, 1> PVTs;
10604    Type *PtrRetTy =
10605        PointerType::get(OrigRetTy->getContext(), DL.getAllocaAddrSpace());
10606
10607    ComputeValueVTs(*this, DL, PtrRetTy, PVTs);
10608    assert(PVTs.size() == 1 && "Pointers should fit in one register");
10609    EVT PtrVT = PVTs[0];
10610
10611    unsigned NumValues = RetTys.size();
10612    ReturnValues.resize(NumValues);
10613    SmallVector<SDValue, 4> Chains(NumValues);
10614
10615    // An aggregate return value cannot wrap around the address space, so
10616    // offsets to its parts don't wrap either.
10617    SDNodeFlags Flags;
10618    Flags.setNoUnsignedWrap(true);
10619
10620    MachineFunction &MF = CLI.DAG.getMachineFunction();
10621    Align HiddenSRetAlign = MF.getFrameInfo().getObjectAlign(DemoteStackIdx);
10622    for (unsigned i = 0; i < NumValues; ++i) {
10623      SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot,
10624                                    CLI.DAG.getConstant(Offsets[i], CLI.DL,
10625                                                        PtrVT), Flags);
10626      SDValue L = CLI.DAG.getLoad(
10627          RetTys[i], CLI.DL, CLI.Chain, Add,
10628          MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(),
10629                                            DemoteStackIdx, Offsets[i]),
10630          HiddenSRetAlign);
10631      ReturnValues[i] = L;
10632      Chains[i] = L.getValue(1);
10633    }
10634
10635    CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains);
10636  } else {
10637    // Collect the legal value parts into potentially illegal values
10638    // that correspond to the original function's return values.
10639    std::optional<ISD::NodeType> AssertOp;
10640    if (CLI.RetSExt)
10641      AssertOp = ISD::AssertSext;
10642    else if (CLI.RetZExt)
10643      AssertOp = ISD::AssertZext;
10644    unsigned CurReg = 0;
10645    for (EVT VT : RetTys) {
10646      MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(),
10647                                                     CLI.CallConv, VT);
10648      unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(),
10649                                                       CLI.CallConv, VT);
10650
10651      ReturnValues.push_back(getCopyFromParts(
10652          CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr,
10653          CLI.Chain, CLI.CallConv, AssertOp));
10654      CurReg += NumRegs;
10655    }
10656
10657    // For a function returning void, there is no return value. We can't create
10658    // such a node, so we just return a null return value in that case. In
10659    // that case, nothing will actually look at the value.
10660    if (ReturnValues.empty())
10661      return std::make_pair(SDValue(), CLI.Chain);
10662  }
10663
10664  SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL,
10665                                CLI.DAG.getVTList(RetTys), ReturnValues);
10666  return std::make_pair(Res, CLI.Chain);
10667}
10668
10669/// Places new result values for the node in Results (their number
10670/// and types must exactly match those of the original return values of
10671/// the node), or leaves Results empty, which indicates that the node is not
10672/// to be custom lowered after all.
10673void TargetLowering::LowerOperationWrapper(SDNode *N,
10674                                           SmallVectorImpl<SDValue> &Results,
10675                                           SelectionDAG &DAG) const {
10676  SDValue Res = LowerOperation(SDValue(N, 0), DAG);
10677
10678  if (!Res.getNode())
10679    return;
10680
10681  // If the original node has one result, take the return value from
10682  // LowerOperation as is. It might not be result number 0.
10683  if (N->getNumValues() == 1) {
10684    Results.push_back(Res);
10685    return;
10686  }
10687
10688  // If the original node has multiple results, then the return node should
10689  // have the same number of results.
10690  assert((N->getNumValues() == Res->getNumValues()) &&
10691      "Lowering returned the wrong number of results!");
10692
10693  // Places new result values base on N result number.
10694  for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
10695    Results.push_back(Res.getValue(I));
10696}
10697
10698SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10699  llvm_unreachable("LowerOperation not implemented for this target!");
10700}
10701
10702void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V,
10703                                                     unsigned Reg,
10704                                                     ISD::NodeType ExtendType) {
10705  SDValue Op = getNonRegisterValue(V);
10706  assert((Op.getOpcode() != ISD::CopyFromReg ||
10707          cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) &&
10708         "Copy from a reg to the same reg!");
10709  assert(!Register::isPhysicalRegister(Reg) && "Is a physreg");
10710
10711  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10712  // If this is an InlineAsm we have to match the registers required, not the
10713  // notional registers required by the type.
10714
10715  RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(),
10716                   std::nullopt); // This is not an ABI copy.
10717  SDValue Chain = DAG.getEntryNode();
10718
10719  if (ExtendType == ISD::ANY_EXTEND) {
10720    auto PreferredExtendIt = FuncInfo.PreferredExtendType.find(V);
10721    if (PreferredExtendIt != FuncInfo.PreferredExtendType.end())
10722      ExtendType = PreferredExtendIt->second;
10723  }
10724  RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType);
10725  PendingExports.push_back(Chain);
10726}
10727
10728#include "llvm/CodeGen/SelectionDAGISel.h"
10729
10730/// isOnlyUsedInEntryBlock - If the specified argument is only used in the
10731/// entry block, return true.  This includes arguments used by switches, since
10732/// the switch may expand into multiple basic blocks.
10733static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) {
10734  // With FastISel active, we may be splitting blocks, so force creation
10735  // of virtual registers for all non-dead arguments.
10736  if (FastISel)
10737    return A->use_empty();
10738
10739  const BasicBlock &Entry = A->getParent()->front();
10740  for (const User *U : A->users())
10741    if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U))
10742      return false;  // Use not in entry block.
10743
10744  return true;
10745}
10746
10747using ArgCopyElisionMapTy =
10748    DenseMap<const Argument *,
10749             std::pair<const AllocaInst *, const StoreInst *>>;
10750
10751/// Scan the entry block of the function in FuncInfo for arguments that look
10752/// like copies into a local alloca. Record any copied arguments in
10753/// ArgCopyElisionCandidates.
10754static void
10755findArgumentCopyElisionCandidates(const DataLayout &DL,
10756                                  FunctionLoweringInfo *FuncInfo,
10757                                  ArgCopyElisionMapTy &ArgCopyElisionCandidates) {
10758  // Record the state of every static alloca used in the entry block. Argument
10759  // allocas are all used in the entry block, so we need approximately as many
10760  // entries as we have arguments.
10761  enum StaticAllocaInfo { Unknown, Clobbered, Elidable };
10762  SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas;
10763  unsigned NumArgs = FuncInfo->Fn->arg_size();
10764  StaticAllocas.reserve(NumArgs * 2);
10765
10766  auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * {
10767    if (!V)
10768      return nullptr;
10769    V = V->stripPointerCasts();
10770    const auto *AI = dyn_cast<AllocaInst>(V);
10771    if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI))
10772      return nullptr;
10773    auto Iter = StaticAllocas.insert({AI, Unknown});
10774    return &Iter.first->second;
10775  };
10776
10777  // Look for stores of arguments to static allocas. Look through bitcasts and
10778  // GEPs to handle type coercions, as long as the alloca is fully initialized
10779  // by the store. Any non-store use of an alloca escapes it and any subsequent
10780  // unanalyzed store might write it.
10781  // FIXME: Handle structs initialized with multiple stores.
10782  for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) {
10783    // Look for stores, and handle non-store uses conservatively.
10784    const auto *SI = dyn_cast<StoreInst>(&I);
10785    if (!SI) {
10786      // We will look through cast uses, so ignore them completely.
10787      if (I.isCast())
10788        continue;
10789      // Ignore debug info and pseudo op intrinsics, they don't escape or store
10790      // to allocas.
10791      if (I.isDebugOrPseudoInst())
10792        continue;
10793      // This is an unknown instruction. Assume it escapes or writes to all
10794      // static alloca operands.
10795      for (const Use &U : I.operands()) {
10796        if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U))
10797          *Info = StaticAllocaInfo::Clobbered;
10798      }
10799      continue;
10800    }
10801
10802    // If the stored value is a static alloca, mark it as escaped.
10803    if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand()))
10804      *Info = StaticAllocaInfo::Clobbered;
10805
10806    // Check if the destination is a static alloca.
10807    const Value *Dst = SI->getPointerOperand()->stripPointerCasts();
10808    StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst);
10809    if (!Info)
10810      continue;
10811    const AllocaInst *AI = cast<AllocaInst>(Dst);
10812
10813    // Skip allocas that have been initialized or clobbered.
10814    if (*Info != StaticAllocaInfo::Unknown)
10815      continue;
10816
10817    // Check if the stored value is an argument, and that this store fully
10818    // initializes the alloca.
10819    // If the argument type has padding bits we can't directly forward a pointer
10820    // as the upper bits may contain garbage.
10821    // Don't elide copies from the same argument twice.
10822    const Value *Val = SI->getValueOperand()->stripPointerCasts();
10823    const auto *Arg = dyn_cast<Argument>(Val);
10824    if (!Arg || Arg->hasPassPointeeByValueCopyAttr() ||
10825        Arg->getType()->isEmptyTy() ||
10826        DL.getTypeStoreSize(Arg->getType()) !=
10827            DL.getTypeAllocSize(AI->getAllocatedType()) ||
10828        !DL.typeSizeEqualsStoreSize(Arg->getType()) ||
10829        ArgCopyElisionCandidates.count(Arg)) {
10830      *Info = StaticAllocaInfo::Clobbered;
10831      continue;
10832    }
10833
10834    LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI
10835                      << '\n');
10836
10837    // Mark this alloca and store for argument copy elision.
10838    *Info = StaticAllocaInfo::Elidable;
10839    ArgCopyElisionCandidates.insert({Arg, {AI, SI}});
10840
10841    // Stop scanning if we've seen all arguments. This will happen early in -O0
10842    // builds, which is useful, because -O0 builds have large entry blocks and
10843    // many allocas.
10844    if (ArgCopyElisionCandidates.size() == NumArgs)
10845      break;
10846  }
10847}
10848
10849/// Try to elide argument copies from memory into a local alloca. Succeeds if
10850/// ArgVal is a load from a suitable fixed stack object.
10851static void tryToElideArgumentCopy(
10852    FunctionLoweringInfo &FuncInfo, SmallVectorImpl<SDValue> &Chains,
10853    DenseMap<int, int> &ArgCopyElisionFrameIndexMap,
10854    SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs,
10855    ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg,
10856    ArrayRef<SDValue> ArgVals, bool &ArgHasUses) {
10857  // Check if this is a load from a fixed stack object.
10858  auto *LNode = dyn_cast<LoadSDNode>(ArgVals[0]);
10859  if (!LNode)
10860    return;
10861  auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode());
10862  if (!FINode)
10863    return;
10864
10865  // Check that the fixed stack object is the right size and alignment.
10866  // Look at the alignment that the user wrote on the alloca instead of looking
10867  // at the stack object.
10868  auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg);
10869  assert(ArgCopyIter != ArgCopyElisionCandidates.end());
10870  const AllocaInst *AI = ArgCopyIter->second.first;
10871  int FixedIndex = FINode->getIndex();
10872  int &AllocaIndex = FuncInfo.StaticAllocaMap[AI];
10873  int OldIndex = AllocaIndex;
10874  MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
10875  if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) {
10876    LLVM_DEBUG(
10877        dbgs() << "  argument copy elision failed due to bad fixed stack "
10878                  "object size\n");
10879    return;
10880  }
10881  Align RequiredAlignment = AI->getAlign();
10882  if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
10883    LLVM_DEBUG(dbgs() << "  argument copy elision failed: alignment of alloca "
10884                         "greater than stack argument alignment ("
10885                      << DebugStr(RequiredAlignment) << " vs "
10886                      << DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
10887    return;
10888  }
10889
10890  // Perform the elision. Delete the old stack object and replace its only use
10891  // in the variable info map. Mark the stack object as mutable and aliased.
10892  LLVM_DEBUG({
10893    dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n'
10894           << "  Replacing frame index " << OldIndex << " with " << FixedIndex
10895           << '\n';
10896  });
10897  MFI.RemoveStackObject(OldIndex);
10898  MFI.setIsImmutableObjectIndex(FixedIndex, false);
10899  MFI.setIsAliasedObjectIndex(FixedIndex, true);
10900  AllocaIndex = FixedIndex;
10901  ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex});
10902  for (SDValue ArgVal : ArgVals)
10903    Chains.push_back(ArgVal.getValue(1));
10904
10905  // Avoid emitting code for the store implementing the copy.
10906  const StoreInst *SI = ArgCopyIter->second.second;
10907  ElidedArgCopyInstrs.insert(SI);
10908
10909  // Check for uses of the argument again so that we can avoid exporting ArgVal
10910  // if it is't used by anything other than the store.
10911  for (const Value *U : Arg.users()) {
10912    if (U != SI) {
10913      ArgHasUses = true;
10914      break;
10915    }
10916  }
10917}
10918
10919void SelectionDAGISel::LowerArguments(const Function &F) {
10920  SelectionDAG &DAG = SDB->DAG;
10921  SDLoc dl = SDB->getCurSDLoc();
10922  const DataLayout &DL = DAG.getDataLayout();
10923  SmallVector<ISD::InputArg, 16> Ins;
10924
10925  // In Naked functions we aren't going to save any registers.
10926  if (F.hasFnAttribute(Attribute::Naked))
10927    return;
10928
10929  if (!FuncInfo->CanLowerReturn) {
10930    // Put in an sret pointer parameter before all the other parameters.
10931    SmallVector<EVT, 1> ValueVTs;
10932    ComputeValueVTs(*TLI, DAG.getDataLayout(),
10933                    PointerType::get(F.getContext(),
10934                                     DAG.getDataLayout().getAllocaAddrSpace()),
10935                    ValueVTs);
10936
10937    // NOTE: Assuming that a pointer will never break down to more than one VT
10938    // or one register.
10939    ISD::ArgFlagsTy Flags;
10940    Flags.setSRet();
10941    MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]);
10942    ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true,
10943                         ISD::InputArg::NoArgIndex, 0);
10944    Ins.push_back(RetArg);
10945  }
10946
10947  // Look for stores of arguments to static allocas. Mark such arguments with a
10948  // flag to ask the target to give us the memory location of that argument if
10949  // available.
10950  ArgCopyElisionMapTy ArgCopyElisionCandidates;
10951  findArgumentCopyElisionCandidates(DL, FuncInfo.get(),
10952                                    ArgCopyElisionCandidates);
10953
10954  // Set up the incoming argument description vector.
10955  for (const Argument &Arg : F.args()) {
10956    unsigned ArgNo = Arg.getArgNo();
10957    SmallVector<EVT, 4> ValueVTs;
10958    ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
10959    bool isArgValueUsed = !Arg.use_empty();
10960    unsigned PartBase = 0;
10961    Type *FinalType = Arg.getType();
10962    if (Arg.hasAttribute(Attribute::ByVal))
10963      FinalType = Arg.getParamByValType();
10964    bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters(
10965        FinalType, F.getCallingConv(), F.isVarArg(), DL);
10966    for (unsigned Value = 0, NumValues = ValueVTs.size();
10967         Value != NumValues; ++Value) {
10968      EVT VT = ValueVTs[Value];
10969      Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
10970      ISD::ArgFlagsTy Flags;
10971
10972
10973      if (Arg.getType()->isPointerTy()) {
10974        Flags.setPointer();
10975        Flags.setPointerAddrSpace(
10976            cast<PointerType>(Arg.getType())->getAddressSpace());
10977      }
10978      if (Arg.hasAttribute(Attribute::ZExt))
10979        Flags.setZExt();
10980      if (Arg.hasAttribute(Attribute::SExt))
10981        Flags.setSExt();
10982      if (Arg.hasAttribute(Attribute::InReg)) {
10983        // If we are using vectorcall calling convention, a structure that is
10984        // passed InReg - is surely an HVA
10985        if (F.getCallingConv() == CallingConv::X86_VectorCall &&
10986            isa<StructType>(Arg.getType())) {
10987          // The first value of a structure is marked
10988          if (0 == Value)
10989            Flags.setHvaStart();
10990          Flags.setHva();
10991        }
10992        // Set InReg Flag
10993        Flags.setInReg();
10994      }
10995      if (Arg.hasAttribute(Attribute::StructRet))
10996        Flags.setSRet();
10997      if (Arg.hasAttribute(Attribute::SwiftSelf))
10998        Flags.setSwiftSelf();
10999      if (Arg.hasAttribute(Attribute::SwiftAsync))
11000        Flags.setSwiftAsync();
11001      if (Arg.hasAttribute(Attribute::SwiftError))
11002        Flags.setSwiftError();
11003      if (Arg.hasAttribute(Attribute::ByVal))
11004        Flags.setByVal();
11005      if (Arg.hasAttribute(Attribute::ByRef))
11006        Flags.setByRef();
11007      if (Arg.hasAttribute(Attribute::InAlloca)) {
11008        Flags.setInAlloca();
11009        // Set the byval flag for CCAssignFn callbacks that don't know about
11010        // inalloca.  This way we can know how many bytes we should've allocated
11011        // and how many bytes a callee cleanup function will pop.  If we port
11012        // inalloca to more targets, we'll have to add custom inalloca handling
11013        // in the various CC lowering callbacks.
11014        Flags.setByVal();
11015      }
11016      if (Arg.hasAttribute(Attribute::Preallocated)) {
11017        Flags.setPreallocated();
11018        // Set the byval flag for CCAssignFn callbacks that don't know about
11019        // preallocated.  This way we can know how many bytes we should've
11020        // allocated and how many bytes a callee cleanup function will pop.  If
11021        // we port preallocated to more targets, we'll have to add custom
11022        // preallocated handling in the various CC lowering callbacks.
11023        Flags.setByVal();
11024      }
11025
11026      // Certain targets (such as MIPS), may have a different ABI alignment
11027      // for a type depending on the context. Give the target a chance to
11028      // specify the alignment it wants.
11029      const Align OriginalAlignment(
11030          TLI->getABIAlignmentForCallingConv(ArgTy, DL));
11031      Flags.setOrigAlign(OriginalAlignment);
11032
11033      Align MemAlign;
11034      Type *ArgMemTy = nullptr;
11035      if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated() ||
11036          Flags.isByRef()) {
11037        if (!ArgMemTy)
11038          ArgMemTy = Arg.getPointeeInMemoryValueType();
11039
11040        uint64_t MemSize = DL.getTypeAllocSize(ArgMemTy);
11041
11042        // For in-memory arguments, size and alignment should be passed from FE.
11043        // BE will guess if this info is not there but there are cases it cannot
11044        // get right.
11045        if (auto ParamAlign = Arg.getParamStackAlign())
11046          MemAlign = *ParamAlign;
11047        else if ((ParamAlign = Arg.getParamAlign()))
11048          MemAlign = *ParamAlign;
11049        else
11050          MemAlign = Align(TLI->getByValTypeAlignment(ArgMemTy, DL));
11051        if (Flags.isByRef())
11052          Flags.setByRefSize(MemSize);
11053        else
11054          Flags.setByValSize(MemSize);
11055      } else if (auto ParamAlign = Arg.getParamStackAlign()) {
11056        MemAlign = *ParamAlign;
11057      } else {
11058        MemAlign = OriginalAlignment;
11059      }
11060      Flags.setMemAlign(MemAlign);
11061
11062      if (Arg.hasAttribute(Attribute::Nest))
11063        Flags.setNest();
11064      if (NeedsRegBlock)
11065        Flags.setInConsecutiveRegs();
11066      if (ArgCopyElisionCandidates.count(&Arg))
11067        Flags.setCopyElisionCandidate();
11068      if (Arg.hasAttribute(Attribute::Returned))
11069        Flags.setReturned();
11070
11071      MVT RegisterVT = TLI->getRegisterTypeForCallingConv(
11072          *CurDAG->getContext(), F.getCallingConv(), VT);
11073      unsigned NumRegs = TLI->getNumRegistersForCallingConv(
11074          *CurDAG->getContext(), F.getCallingConv(), VT);
11075      for (unsigned i = 0; i != NumRegs; ++i) {
11076        // For scalable vectors, use the minimum size; individual targets
11077        // are responsible for handling scalable vector arguments and
11078        // return values.
11079        ISD::InputArg MyFlags(
11080            Flags, RegisterVT, VT, isArgValueUsed, ArgNo,
11081            PartBase + i * RegisterVT.getStoreSize().getKnownMinValue());
11082        if (NumRegs > 1 && i == 0)
11083          MyFlags.Flags.setSplit();
11084        // if it isn't first piece, alignment must be 1
11085        else if (i > 0) {
11086          MyFlags.Flags.setOrigAlign(Align(1));
11087          if (i == NumRegs - 1)
11088            MyFlags.Flags.setSplitEnd();
11089        }
11090        Ins.push_back(MyFlags);
11091      }
11092      if (NeedsRegBlock && Value == NumValues - 1)
11093        Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast();
11094      PartBase += VT.getStoreSize().getKnownMinValue();
11095    }
11096  }
11097
11098  // Call the target to set up the argument values.
11099  SmallVector<SDValue, 8> InVals;
11100  SDValue NewRoot = TLI->LowerFormalArguments(
11101      DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals);
11102
11103  // Verify that the target's LowerFormalArguments behaved as expected.
11104  assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other &&
11105         "LowerFormalArguments didn't return a valid chain!");
11106  assert(InVals.size() == Ins.size() &&
11107         "LowerFormalArguments didn't emit the correct number of values!");
11108  LLVM_DEBUG({
11109    for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
11110      assert(InVals[i].getNode() &&
11111             "LowerFormalArguments emitted a null value!");
11112      assert(EVT(Ins[i].VT) == InVals[i].getValueType() &&
11113             "LowerFormalArguments emitted a value with the wrong type!");
11114    }
11115  });
11116
11117  // Update the DAG with the new chain value resulting from argument lowering.
11118  DAG.setRoot(NewRoot);
11119
11120  // Set up the argument values.
11121  unsigned i = 0;
11122  if (!FuncInfo->CanLowerReturn) {
11123    // Create a virtual register for the sret pointer, and put in a copy
11124    // from the sret argument into it.
11125    SmallVector<EVT, 1> ValueVTs;
11126    ComputeValueVTs(*TLI, DAG.getDataLayout(),
11127                    PointerType::get(F.getContext(),
11128                                     DAG.getDataLayout().getAllocaAddrSpace()),
11129                    ValueVTs);
11130    MVT VT = ValueVTs[0].getSimpleVT();
11131    MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT);
11132    std::optional<ISD::NodeType> AssertOp;
11133    SDValue ArgValue =
11134        getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, NewRoot,
11135                         F.getCallingConv(), AssertOp);
11136
11137    MachineFunction& MF = SDB->DAG.getMachineFunction();
11138    MachineRegisterInfo& RegInfo = MF.getRegInfo();
11139    Register SRetReg =
11140        RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT));
11141    FuncInfo->DemoteRegister = SRetReg;
11142    NewRoot =
11143        SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue);
11144    DAG.setRoot(NewRoot);
11145
11146    // i indexes lowered arguments.  Bump it past the hidden sret argument.
11147    ++i;
11148  }
11149
11150  SmallVector<SDValue, 4> Chains;
11151  DenseMap<int, int> ArgCopyElisionFrameIndexMap;
11152  for (const Argument &Arg : F.args()) {
11153    SmallVector<SDValue, 4> ArgValues;
11154    SmallVector<EVT, 4> ValueVTs;
11155    ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs);
11156    unsigned NumValues = ValueVTs.size();
11157    if (NumValues == 0)
11158      continue;
11159
11160    bool ArgHasUses = !Arg.use_empty();
11161
11162    // Elide the copying store if the target loaded this argument from a
11163    // suitable fixed stack object.
11164    if (Ins[i].Flags.isCopyElisionCandidate()) {
11165      unsigned NumParts = 0;
11166      for (EVT VT : ValueVTs)
11167        NumParts += TLI->getNumRegistersForCallingConv(*CurDAG->getContext(),
11168                                                       F.getCallingConv(), VT);
11169
11170      tryToElideArgumentCopy(*FuncInfo, Chains, ArgCopyElisionFrameIndexMap,
11171                             ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg,
11172                             ArrayRef(&InVals[i], NumParts), ArgHasUses);
11173    }
11174
11175    // If this argument is unused then remember its value. It is used to generate
11176    // debugging information.
11177    bool isSwiftErrorArg =
11178        TLI->supportSwiftError() &&
11179        Arg.hasAttribute(Attribute::SwiftError);
11180    if (!ArgHasUses && !isSwiftErrorArg) {
11181      SDB->setUnusedArgValue(&Arg, InVals[i]);
11182
11183      // Also remember any frame index for use in FastISel.
11184      if (FrameIndexSDNode *FI =
11185          dyn_cast<FrameIndexSDNode>(InVals[i].getNode()))
11186        FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11187    }
11188
11189    for (unsigned Val = 0; Val != NumValues; ++Val) {
11190      EVT VT = ValueVTs[Val];
11191      MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(),
11192                                                      F.getCallingConv(), VT);
11193      unsigned NumParts = TLI->getNumRegistersForCallingConv(
11194          *CurDAG->getContext(), F.getCallingConv(), VT);
11195
11196      // Even an apparent 'unused' swifterror argument needs to be returned. So
11197      // we do generate a copy for it that can be used on return from the
11198      // function.
11199      if (ArgHasUses || isSwiftErrorArg) {
11200        std::optional<ISD::NodeType> AssertOp;
11201        if (Arg.hasAttribute(Attribute::SExt))
11202          AssertOp = ISD::AssertSext;
11203        else if (Arg.hasAttribute(Attribute::ZExt))
11204          AssertOp = ISD::AssertZext;
11205
11206        ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts,
11207                                             PartVT, VT, nullptr, NewRoot,
11208                                             F.getCallingConv(), AssertOp));
11209      }
11210
11211      i += NumParts;
11212    }
11213
11214    // We don't need to do anything else for unused arguments.
11215    if (ArgValues.empty())
11216      continue;
11217
11218    // Note down frame index.
11219    if (FrameIndexSDNode *FI =
11220        dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode()))
11221      FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11222
11223    SDValue Res = DAG.getMergeValues(ArrayRef(ArgValues.data(), NumValues),
11224                                     SDB->getCurSDLoc());
11225
11226    SDB->setValue(&Arg, Res);
11227    if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) {
11228      // We want to associate the argument with the frame index, among
11229      // involved operands, that correspond to the lowest address. The
11230      // getCopyFromParts function, called earlier, is swapping the order of
11231      // the operands to BUILD_PAIR depending on endianness. The result of
11232      // that swapping is that the least significant bits of the argument will
11233      // be in the first operand of the BUILD_PAIR node, and the most
11234      // significant bits will be in the second operand.
11235      unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0;
11236      if (LoadSDNode *LNode =
11237          dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode()))
11238        if (FrameIndexSDNode *FI =
11239            dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()))
11240          FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex());
11241    }
11242
11243    // Analyses past this point are naive and don't expect an assertion.
11244    if (Res.getOpcode() == ISD::AssertZext)
11245      Res = Res.getOperand(0);
11246
11247    // Update the SwiftErrorVRegDefMap.
11248    if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) {
11249      unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11250      if (Register::isVirtualRegister(Reg))
11251        SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(),
11252                                   Reg);
11253    }
11254
11255    // If this argument is live outside of the entry block, insert a copy from
11256    // wherever we got it to the vreg that other BB's will reference it as.
11257    if (Res.getOpcode() == ISD::CopyFromReg) {
11258      // If we can, though, try to skip creating an unnecessary vreg.
11259      // FIXME: This isn't very clean... it would be nice to make this more
11260      // general.
11261      unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
11262      if (Register::isVirtualRegister(Reg)) {
11263        FuncInfo->ValueMap[&Arg] = Reg;
11264        continue;
11265      }
11266    }
11267    if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) {
11268      FuncInfo->InitializeRegForValue(&Arg);
11269      SDB->CopyToExportRegsIfNeeded(&Arg);
11270    }
11271  }
11272
11273  if (!Chains.empty()) {
11274    Chains.push_back(NewRoot);
11275    NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
11276  }
11277
11278  DAG.setRoot(NewRoot);
11279
11280  assert(i == InVals.size() && "Argument register count mismatch!");
11281
11282  // If any argument copy elisions occurred and we have debug info, update the
11283  // stale frame indices used in the dbg.declare variable info table.
11284  if (!ArgCopyElisionFrameIndexMap.empty()) {
11285    for (MachineFunction::VariableDbgInfo &VI :
11286         MF->getInStackSlotVariableDbgInfo()) {
11287      auto I = ArgCopyElisionFrameIndexMap.find(VI.getStackSlot());
11288      if (I != ArgCopyElisionFrameIndexMap.end())
11289        VI.updateStackSlot(I->second);
11290    }
11291  }
11292
11293  // Finally, if the target has anything special to do, allow it to do so.
11294  emitFunctionEntryCode();
11295}
11296
11297/// Handle PHI nodes in successor blocks.  Emit code into the SelectionDAG to
11298/// ensure constants are generated when needed.  Remember the virtual registers
11299/// that need to be added to the Machine PHI nodes as input.  We cannot just
11300/// directly add them, because expansion might result in multiple MBB's for one
11301/// BB.  As such, the start of the BB might correspond to a different MBB than
11302/// the end.
11303void
11304SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
11305  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11306
11307  SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
11308
11309  // Check PHI nodes in successors that expect a value to be available from this
11310  // block.
11311  for (const BasicBlock *SuccBB : successors(LLVMBB->getTerminator())) {
11312    if (!isa<PHINode>(SuccBB->begin())) continue;
11313    MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
11314
11315    // If this terminator has multiple identical successors (common for
11316    // switches), only handle each succ once.
11317    if (!SuccsHandled.insert(SuccMBB).second)
11318      continue;
11319
11320    MachineBasicBlock::iterator MBBI = SuccMBB->begin();
11321
11322    // At this point we know that there is a 1-1 correspondence between LLVM PHI
11323    // nodes and Machine PHI nodes, but the incoming operands have not been
11324    // emitted yet.
11325    for (const PHINode &PN : SuccBB->phis()) {
11326      // Ignore dead phi's.
11327      if (PN.use_empty())
11328        continue;
11329
11330      // Skip empty types
11331      if (PN.getType()->isEmptyTy())
11332        continue;
11333
11334      unsigned Reg;
11335      const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
11336
11337      if (const auto *C = dyn_cast<Constant>(PHIOp)) {
11338        unsigned &RegOut = ConstantsOut[C];
11339        if (RegOut == 0) {
11340          RegOut = FuncInfo.CreateRegs(C);
11341          // We need to zero/sign extend ConstantInt phi operands to match
11342          // assumptions in FunctionLoweringInfo::ComputePHILiveOutRegInfo.
11343          ISD::NodeType ExtendType = ISD::ANY_EXTEND;
11344          if (auto *CI = dyn_cast<ConstantInt>(C))
11345            ExtendType = TLI.signExtendConstant(CI) ? ISD::SIGN_EXTEND
11346                                                    : ISD::ZERO_EXTEND;
11347          CopyValueToVirtualRegister(C, RegOut, ExtendType);
11348        }
11349        Reg = RegOut;
11350      } else {
11351        DenseMap<const Value *, Register>::iterator I =
11352          FuncInfo.ValueMap.find(PHIOp);
11353        if (I != FuncInfo.ValueMap.end())
11354          Reg = I->second;
11355        else {
11356          assert(isa<AllocaInst>(PHIOp) &&
11357                 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
11358                 "Didn't codegen value into a register!??");
11359          Reg = FuncInfo.CreateRegs(PHIOp);
11360          CopyValueToVirtualRegister(PHIOp, Reg);
11361        }
11362      }
11363
11364      // Remember that this register needs to added to the machine PHI node as
11365      // the input for this MBB.
11366      SmallVector<EVT, 4> ValueVTs;
11367      ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs);
11368      for (EVT VT : ValueVTs) {
11369        const unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT);
11370        for (unsigned i = 0; i != NumRegisters; ++i)
11371          FuncInfo.PHINodesToUpdate.push_back(
11372              std::make_pair(&*MBBI++, Reg + i));
11373        Reg += NumRegisters;
11374      }
11375    }
11376  }
11377
11378  ConstantsOut.clear();
11379}
11380
11381MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) {
11382  MachineFunction::iterator I(MBB);
11383  if (++I == FuncInfo.MF->end())
11384    return nullptr;
11385  return &*I;
11386}
11387
11388/// During lowering new call nodes can be created (such as memset, etc.).
11389/// Those will become new roots of the current DAG, but complications arise
11390/// when they are tail calls. In such cases, the call lowering will update
11391/// the root, but the builder still needs to know that a tail call has been
11392/// lowered in order to avoid generating an additional return.
11393void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) {
11394  // If the node is null, we do have a tail call.
11395  if (MaybeTC.getNode() != nullptr)
11396    DAG.setRoot(MaybeTC);
11397  else
11398    HasTailCall = true;
11399}
11400
11401void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond,
11402                                        MachineBasicBlock *SwitchMBB,
11403                                        MachineBasicBlock *DefaultMBB) {
11404  MachineFunction *CurMF = FuncInfo.MF;
11405  MachineBasicBlock *NextMBB = nullptr;
11406  MachineFunction::iterator BBI(W.MBB);
11407  if (++BBI != FuncInfo.MF->end())
11408    NextMBB = &*BBI;
11409
11410  unsigned Size = W.LastCluster - W.FirstCluster + 1;
11411
11412  BranchProbabilityInfo *BPI = FuncInfo.BPI;
11413
11414  if (Size == 2 && W.MBB == SwitchMBB) {
11415    // If any two of the cases has the same destination, and if one value
11416    // is the same as the other, but has one bit unset that the other has set,
11417    // use bit manipulation to do two compares at once.  For example:
11418    // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)"
11419    // TODO: This could be extended to merge any 2 cases in switches with 3
11420    // cases.
11421    // TODO: Handle cases where W.CaseBB != SwitchBB.
11422    CaseCluster &Small = *W.FirstCluster;
11423    CaseCluster &Big = *W.LastCluster;
11424
11425    if (Small.Low == Small.High && Big.Low == Big.High &&
11426        Small.MBB == Big.MBB) {
11427      const APInt &SmallValue = Small.Low->getValue();
11428      const APInt &BigValue = Big.Low->getValue();
11429
11430      // Check that there is only one bit different.
11431      APInt CommonBit = BigValue ^ SmallValue;
11432      if (CommonBit.isPowerOf2()) {
11433        SDValue CondLHS = getValue(Cond);
11434        EVT VT = CondLHS.getValueType();
11435        SDLoc DL = getCurSDLoc();
11436
11437        SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS,
11438                                 DAG.getConstant(CommonBit, DL, VT));
11439        SDValue Cond = DAG.getSetCC(
11440            DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT),
11441            ISD::SETEQ);
11442
11443        // Update successor info.
11444        // Both Small and Big will jump to Small.BB, so we sum up the
11445        // probabilities.
11446        addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob);
11447        if (BPI)
11448          addSuccessorWithProb(
11449              SwitchMBB, DefaultMBB,
11450              // The default destination is the first successor in IR.
11451              BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0));
11452        else
11453          addSuccessorWithProb(SwitchMBB, DefaultMBB);
11454
11455        // Insert the true branch.
11456        SDValue BrCond =
11457            DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond,
11458                        DAG.getBasicBlock(Small.MBB));
11459        // Insert the false branch.
11460        BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond,
11461                             DAG.getBasicBlock(DefaultMBB));
11462
11463        DAG.setRoot(BrCond);
11464        return;
11465      }
11466    }
11467  }
11468
11469  if (TM.getOptLevel() != CodeGenOptLevel::None) {
11470    // Here, we order cases by probability so the most likely case will be
11471    // checked first. However, two clusters can have the same probability in
11472    // which case their relative ordering is non-deterministic. So we use Low
11473    // as a tie-breaker as clusters are guaranteed to never overlap.
11474    llvm::sort(W.FirstCluster, W.LastCluster + 1,
11475               [](const CaseCluster &a, const CaseCluster &b) {
11476      return a.Prob != b.Prob ?
11477             a.Prob > b.Prob :
11478             a.Low->getValue().slt(b.Low->getValue());
11479    });
11480
11481    // Rearrange the case blocks so that the last one falls through if possible
11482    // without changing the order of probabilities.
11483    for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) {
11484      --I;
11485      if (I->Prob > W.LastCluster->Prob)
11486        break;
11487      if (I->Kind == CC_Range && I->MBB == NextMBB) {
11488        std::swap(*I, *W.LastCluster);
11489        break;
11490      }
11491    }
11492  }
11493
11494  // Compute total probability.
11495  BranchProbability DefaultProb = W.DefaultProb;
11496  BranchProbability UnhandledProbs = DefaultProb;
11497  for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
11498    UnhandledProbs += I->Prob;
11499
11500  MachineBasicBlock *CurMBB = W.MBB;
11501  for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
11502    bool FallthroughUnreachable = false;
11503    MachineBasicBlock *Fallthrough;
11504    if (I == W.LastCluster) {
11505      // For the last cluster, fall through to the default destination.
11506      Fallthrough = DefaultMBB;
11507      FallthroughUnreachable = isa<UnreachableInst>(
11508          DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
11509    } else {
11510      Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
11511      CurMF->insert(BBI, Fallthrough);
11512      // Put Cond in a virtual register to make it available from the new blocks.
11513      ExportFromCurrentBlock(Cond);
11514    }
11515    UnhandledProbs -= I->Prob;
11516
11517    switch (I->Kind) {
11518      case CC_JumpTable: {
11519        // FIXME: Optimize away range check based on pivot comparisons.
11520        JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
11521        SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
11522
11523        // The jump block hasn't been inserted yet; insert it here.
11524        MachineBasicBlock *JumpMBB = JT->MBB;
11525        CurMF->insert(BBI, JumpMBB);
11526
11527        auto JumpProb = I->Prob;
11528        auto FallthroughProb = UnhandledProbs;
11529
11530        // If the default statement is a target of the jump table, we evenly
11531        // distribute the default probability to successors of CurMBB. Also
11532        // update the probability on the edge from JumpMBB to Fallthrough.
11533        for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
11534                                              SE = JumpMBB->succ_end();
11535             SI != SE; ++SI) {
11536          if (*SI == DefaultMBB) {
11537            JumpProb += DefaultProb / 2;
11538            FallthroughProb -= DefaultProb / 2;
11539            JumpMBB->setSuccProbability(SI, DefaultProb / 2);
11540            JumpMBB->normalizeSuccProbs();
11541            break;
11542          }
11543        }
11544
11545        // If the default clause is unreachable, propagate that knowledge into
11546        // JTH->FallthroughUnreachable which will use it to suppress the range
11547        // check.
11548        //
11549        // However, don't do this if we're doing branch target enforcement,
11550        // because a table branch _without_ a range check can be a tempting JOP
11551        // gadget - out-of-bounds inputs that are impossible in correct
11552        // execution become possible again if an attacker can influence the
11553        // control flow. So if an attacker doesn't already have a BTI bypass
11554        // available, we don't want them to be able to get one out of this
11555        // table branch.
11556        if (FallthroughUnreachable) {
11557          Function &CurFunc = CurMF->getFunction();
11558          bool HasBranchTargetEnforcement = false;
11559          if (CurFunc.hasFnAttribute("branch-target-enforcement")) {
11560            HasBranchTargetEnforcement =
11561                CurFunc.getFnAttribute("branch-target-enforcement")
11562                    .getValueAsBool();
11563          } else {
11564            HasBranchTargetEnforcement =
11565                CurMF->getMMI().getModule()->getModuleFlag(
11566                    "branch-target-enforcement");
11567          }
11568          if (!HasBranchTargetEnforcement)
11569            JTH->FallthroughUnreachable = true;
11570        }
11571
11572        if (!JTH->FallthroughUnreachable)
11573          addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
11574        addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
11575        CurMBB->normalizeSuccProbs();
11576
11577        // The jump table header will be inserted in our current block, do the
11578        // range check, and fall through to our fallthrough block.
11579        JTH->HeaderBB = CurMBB;
11580        JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
11581
11582        // If we're in the right place, emit the jump table header right now.
11583        if (CurMBB == SwitchMBB) {
11584          visitJumpTableHeader(*JT, *JTH, SwitchMBB);
11585          JTH->Emitted = true;
11586        }
11587        break;
11588      }
11589      case CC_BitTests: {
11590        // FIXME: Optimize away range check based on pivot comparisons.
11591        BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
11592
11593        // The bit test blocks haven't been inserted yet; insert them here.
11594        for (BitTestCase &BTC : BTB->Cases)
11595          CurMF->insert(BBI, BTC.ThisBB);
11596
11597        // Fill in fields of the BitTestBlock.
11598        BTB->Parent = CurMBB;
11599        BTB->Default = Fallthrough;
11600
11601        BTB->DefaultProb = UnhandledProbs;
11602        // If the cases in bit test don't form a contiguous range, we evenly
11603        // distribute the probability on the edge to Fallthrough to two
11604        // successors of CurMBB.
11605        if (!BTB->ContiguousRange) {
11606          BTB->Prob += DefaultProb / 2;
11607          BTB->DefaultProb -= DefaultProb / 2;
11608        }
11609
11610        if (FallthroughUnreachable)
11611          BTB->FallthroughUnreachable = true;
11612
11613        // If we're in the right place, emit the bit test header right now.
11614        if (CurMBB == SwitchMBB) {
11615          visitBitTestHeader(*BTB, SwitchMBB);
11616          BTB->Emitted = true;
11617        }
11618        break;
11619      }
11620      case CC_Range: {
11621        const Value *RHS, *LHS, *MHS;
11622        ISD::CondCode CC;
11623        if (I->Low == I->High) {
11624          // Check Cond == I->Low.
11625          CC = ISD::SETEQ;
11626          LHS = Cond;
11627          RHS=I->Low;
11628          MHS = nullptr;
11629        } else {
11630          // Check I->Low <= Cond <= I->High.
11631          CC = ISD::SETLE;
11632          LHS = I->Low;
11633          MHS = Cond;
11634          RHS = I->High;
11635        }
11636
11637        // If Fallthrough is unreachable, fold away the comparison.
11638        if (FallthroughUnreachable)
11639          CC = ISD::SETTRUE;
11640
11641        // The false probability is the sum of all unhandled cases.
11642        CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB,
11643                     getCurSDLoc(), I->Prob, UnhandledProbs);
11644
11645        if (CurMBB == SwitchMBB)
11646          visitSwitchCase(CB, SwitchMBB);
11647        else
11648          SL->SwitchCases.push_back(CB);
11649
11650        break;
11651      }
11652    }
11653    CurMBB = Fallthrough;
11654  }
11655}
11656
11657void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList,
11658                                        const SwitchWorkListItem &W,
11659                                        Value *Cond,
11660                                        MachineBasicBlock *SwitchMBB) {
11661  assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) &&
11662         "Clusters not sorted?");
11663  assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!");
11664
11665  auto [LastLeft, FirstRight, LeftProb, RightProb] =
11666      SL->computeSplitWorkItemInfo(W);
11667
11668  // Use the first element on the right as pivot since we will make less-than
11669  // comparisons against it.
11670  CaseClusterIt PivotCluster = FirstRight;
11671  assert(PivotCluster > W.FirstCluster);
11672  assert(PivotCluster <= W.LastCluster);
11673
11674  CaseClusterIt FirstLeft = W.FirstCluster;
11675  CaseClusterIt LastRight = W.LastCluster;
11676
11677  const ConstantInt *Pivot = PivotCluster->Low;
11678
11679  // New blocks will be inserted immediately after the current one.
11680  MachineFunction::iterator BBI(W.MBB);
11681  ++BBI;
11682
11683  // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
11684  // we can branch to its destination directly if it's squeezed exactly in
11685  // between the known lower bound and Pivot - 1.
11686  MachineBasicBlock *LeftMBB;
11687  if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range &&
11688      FirstLeft->Low == W.GE &&
11689      (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) {
11690    LeftMBB = FirstLeft->MBB;
11691  } else {
11692    LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11693    FuncInfo.MF->insert(BBI, LeftMBB);
11694    WorkList.push_back(
11695        {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2});
11696    // Put Cond in a virtual register to make it available from the new blocks.
11697    ExportFromCurrentBlock(Cond);
11698  }
11699
11700  // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
11701  // single cluster, RHS.Low == Pivot, and we can branch to its destination
11702  // directly if RHS.High equals the current upper bound.
11703  MachineBasicBlock *RightMBB;
11704  if (FirstRight == LastRight && FirstRight->Kind == CC_Range &&
11705      W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) {
11706    RightMBB = FirstRight->MBB;
11707  } else {
11708    RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock());
11709    FuncInfo.MF->insert(BBI, RightMBB);
11710    WorkList.push_back(
11711        {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2});
11712    // Put Cond in a virtual register to make it available from the new blocks.
11713    ExportFromCurrentBlock(Cond);
11714  }
11715
11716  // Create the CaseBlock record that will be used to lower the branch.
11717  CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB,
11718               getCurSDLoc(), LeftProb, RightProb);
11719
11720  if (W.MBB == SwitchMBB)
11721    visitSwitchCase(CB, SwitchMBB);
11722  else
11723    SL->SwitchCases.push_back(CB);
11724}
11725
11726// Scale CaseProb after peeling a case with the probablity of PeeledCaseProb
11727// from the swith statement.
11728static BranchProbability scaleCaseProbality(BranchProbability CaseProb,
11729                                            BranchProbability PeeledCaseProb) {
11730  if (PeeledCaseProb == BranchProbability::getOne())
11731    return BranchProbability::getZero();
11732  BranchProbability SwitchProb = PeeledCaseProb.getCompl();
11733
11734  uint32_t Numerator = CaseProb.getNumerator();
11735  uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator());
11736  return BranchProbability(Numerator, std::max(Numerator, Denominator));
11737}
11738
11739// Try to peel the top probability case if it exceeds the threshold.
11740// Return current MachineBasicBlock for the switch statement if the peeling
11741// does not occur.
11742// If the peeling is performed, return the newly created MachineBasicBlock
11743// for the peeled switch statement. Also update Clusters to remove the peeled
11744// case. PeeledCaseProb is the BranchProbability for the peeled case.
11745MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster(
11746    const SwitchInst &SI, CaseClusterVector &Clusters,
11747    BranchProbability &PeeledCaseProb) {
11748  MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11749  // Don't perform if there is only one cluster or optimizing for size.
11750  if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 ||
11751      TM.getOptLevel() == CodeGenOptLevel::None ||
11752      SwitchMBB->getParent()->getFunction().hasMinSize())
11753    return SwitchMBB;
11754
11755  BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100);
11756  unsigned PeeledCaseIndex = 0;
11757  bool SwitchPeeled = false;
11758  for (unsigned Index = 0; Index < Clusters.size(); ++Index) {
11759    CaseCluster &CC = Clusters[Index];
11760    if (CC.Prob < TopCaseProb)
11761      continue;
11762    TopCaseProb = CC.Prob;
11763    PeeledCaseIndex = Index;
11764    SwitchPeeled = true;
11765  }
11766  if (!SwitchPeeled)
11767    return SwitchMBB;
11768
11769  LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: "
11770                    << TopCaseProb << "\n");
11771
11772  // Record the MBB for the peeled switch statement.
11773  MachineFunction::iterator BBI(SwitchMBB);
11774  ++BBI;
11775  MachineBasicBlock *PeeledSwitchMBB =
11776      FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock());
11777  FuncInfo.MF->insert(BBI, PeeledSwitchMBB);
11778
11779  ExportFromCurrentBlock(SI.getCondition());
11780  auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex;
11781  SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt,
11782                          nullptr,   nullptr,      TopCaseProb.getCompl()};
11783  lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB);
11784
11785  Clusters.erase(PeeledCaseIt);
11786  for (CaseCluster &CC : Clusters) {
11787    LLVM_DEBUG(
11788        dbgs() << "Scale the probablity for one cluster, before scaling: "
11789               << CC.Prob << "\n");
11790    CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb);
11791    LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n");
11792  }
11793  PeeledCaseProb = TopCaseProb;
11794  return PeeledSwitchMBB;
11795}
11796
11797void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) {
11798  // Extract cases from the switch.
11799  BranchProbabilityInfo *BPI = FuncInfo.BPI;
11800  CaseClusterVector Clusters;
11801  Clusters.reserve(SI.getNumCases());
11802  for (auto I : SI.cases()) {
11803    MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()];
11804    const ConstantInt *CaseVal = I.getCaseValue();
11805    BranchProbability Prob =
11806        BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
11807            : BranchProbability(1, SI.getNumCases() + 1);
11808    Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
11809  }
11810
11811  MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()];
11812
11813  // Cluster adjacent cases with the same destination. We do this at all
11814  // optimization levels because it's cheap to do and will make codegen faster
11815  // if there are many clusters.
11816  sortAndRangeify(Clusters);
11817
11818  // The branch probablity of the peeled case.
11819  BranchProbability PeeledCaseProb = BranchProbability::getZero();
11820  MachineBasicBlock *PeeledSwitchMBB =
11821      peelDominantCaseCluster(SI, Clusters, PeeledCaseProb);
11822
11823  // If there is only the default destination, jump there directly.
11824  MachineBasicBlock *SwitchMBB = FuncInfo.MBB;
11825  if (Clusters.empty()) {
11826    assert(PeeledSwitchMBB == SwitchMBB);
11827    SwitchMBB->addSuccessor(DefaultMBB);
11828    if (DefaultMBB != NextBlock(SwitchMBB)) {
11829      DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other,
11830                              getControlRoot(), DAG.getBasicBlock(DefaultMBB)));
11831    }
11832    return;
11833  }
11834
11835  SL->findJumpTables(Clusters, &SI, getCurSDLoc(), DefaultMBB, DAG.getPSI(),
11836                     DAG.getBFI());
11837  SL->findBitTestClusters(Clusters, &SI);
11838
11839  LLVM_DEBUG({
11840    dbgs() << "Case clusters: ";
11841    for (const CaseCluster &C : Clusters) {
11842      if (C.Kind == CC_JumpTable)
11843        dbgs() << "JT:";
11844      if (C.Kind == CC_BitTests)
11845        dbgs() << "BT:";
11846
11847      C.Low->getValue().print(dbgs(), true);
11848      if (C.Low != C.High) {
11849        dbgs() << '-';
11850        C.High->getValue().print(dbgs(), true);
11851      }
11852      dbgs() << ' ';
11853    }
11854    dbgs() << '\n';
11855  });
11856
11857  assert(!Clusters.empty());
11858  SwitchWorkList WorkList;
11859  CaseClusterIt First = Clusters.begin();
11860  CaseClusterIt Last = Clusters.end() - 1;
11861  auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB);
11862  // Scale the branchprobability for DefaultMBB if the peel occurs and
11863  // DefaultMBB is not replaced.
11864  if (PeeledCaseProb != BranchProbability::getZero() &&
11865      DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()])
11866    DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb);
11867  WorkList.push_back(
11868      {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
11869
11870  while (!WorkList.empty()) {
11871    SwitchWorkListItem W = WorkList.pop_back_val();
11872    unsigned NumClusters = W.LastCluster - W.FirstCluster + 1;
11873
11874    if (NumClusters > 3 && TM.getOptLevel() != CodeGenOptLevel::None &&
11875        !DefaultMBB->getParent()->getFunction().hasMinSize()) {
11876      // For optimized builds, lower large range as a balanced binary tree.
11877      splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB);
11878      continue;
11879    }
11880
11881    lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB);
11882  }
11883}
11884
11885void SelectionDAGBuilder::visitStepVector(const CallInst &I) {
11886  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11887  auto DL = getCurSDLoc();
11888  EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11889  setValue(&I, DAG.getStepVector(DL, ResultVT));
11890}
11891
11892void SelectionDAGBuilder::visitVectorReverse(const CallInst &I) {
11893  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11894  EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11895
11896  SDLoc DL = getCurSDLoc();
11897  SDValue V = getValue(I.getOperand(0));
11898  assert(VT == V.getValueType() && "Malformed vector.reverse!");
11899
11900  if (VT.isScalableVector()) {
11901    setValue(&I, DAG.getNode(ISD::VECTOR_REVERSE, DL, VT, V));
11902    return;
11903  }
11904
11905  // Use VECTOR_SHUFFLE for the fixed-length vector
11906  // to maintain existing behavior.
11907  SmallVector<int, 8> Mask;
11908  unsigned NumElts = VT.getVectorMinNumElements();
11909  for (unsigned i = 0; i != NumElts; ++i)
11910    Mask.push_back(NumElts - 1 - i);
11911
11912  setValue(&I, DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), Mask));
11913}
11914
11915void SelectionDAGBuilder::visitVectorDeinterleave(const CallInst &I) {
11916  auto DL = getCurSDLoc();
11917  SDValue InVec = getValue(I.getOperand(0));
11918  EVT OutVT =
11919      InVec.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
11920
11921  unsigned OutNumElts = OutVT.getVectorMinNumElements();
11922
11923  // ISD Node needs the input vectors split into two equal parts
11924  SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11925                           DAG.getVectorIdxConstant(0, DL));
11926  SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT, InVec,
11927                           DAG.getVectorIdxConstant(OutNumElts, DL));
11928
11929  // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11930  // legalisation and combines.
11931  if (OutVT.isFixedLengthVector()) {
11932    SDValue Even = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11933                                        createStrideMask(0, 2, OutNumElts));
11934    SDValue Odd = DAG.getVectorShuffle(OutVT, DL, Lo, Hi,
11935                                       createStrideMask(1, 2, OutNumElts));
11936    SDValue Res = DAG.getMergeValues({Even, Odd}, getCurSDLoc());
11937    setValue(&I, Res);
11938    return;
11939  }
11940
11941  SDValue Res = DAG.getNode(ISD::VECTOR_DEINTERLEAVE, DL,
11942                            DAG.getVTList(OutVT, OutVT), Lo, Hi);
11943  setValue(&I, Res);
11944}
11945
11946void SelectionDAGBuilder::visitVectorInterleave(const CallInst &I) {
11947  auto DL = getCurSDLoc();
11948  EVT InVT = getValue(I.getOperand(0)).getValueType();
11949  SDValue InVec0 = getValue(I.getOperand(0));
11950  SDValue InVec1 = getValue(I.getOperand(1));
11951  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11952  EVT OutVT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11953
11954  // Use VECTOR_SHUFFLE for fixed-length vectors to benefit from existing
11955  // legalisation and combines.
11956  if (OutVT.isFixedLengthVector()) {
11957    unsigned NumElts = InVT.getVectorMinNumElements();
11958    SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, InVec0, InVec1);
11959    setValue(&I, DAG.getVectorShuffle(OutVT, DL, V, DAG.getUNDEF(OutVT),
11960                                      createInterleaveMask(NumElts, 2)));
11961    return;
11962  }
11963
11964  SDValue Res = DAG.getNode(ISD::VECTOR_INTERLEAVE, DL,
11965                            DAG.getVTList(InVT, InVT), InVec0, InVec1);
11966  Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, OutVT, Res.getValue(0),
11967                    Res.getValue(1));
11968  setValue(&I, Res);
11969}
11970
11971void SelectionDAGBuilder::visitFreeze(const FreezeInst &I) {
11972  SmallVector<EVT, 4> ValueVTs;
11973  ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(),
11974                  ValueVTs);
11975  unsigned NumValues = ValueVTs.size();
11976  if (NumValues == 0) return;
11977
11978  SmallVector<SDValue, 4> Values(NumValues);
11979  SDValue Op = getValue(I.getOperand(0));
11980
11981  for (unsigned i = 0; i != NumValues; ++i)
11982    Values[i] = DAG.getNode(ISD::FREEZE, getCurSDLoc(), ValueVTs[i],
11983                            SDValue(Op.getNode(), Op.getResNo() + i));
11984
11985  setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
11986                           DAG.getVTList(ValueVTs), Values));
11987}
11988
11989void SelectionDAGBuilder::visitVectorSplice(const CallInst &I) {
11990  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11991  EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
11992
11993  SDLoc DL = getCurSDLoc();
11994  SDValue V1 = getValue(I.getOperand(0));
11995  SDValue V2 = getValue(I.getOperand(1));
11996  int64_t Imm = cast<ConstantInt>(I.getOperand(2))->getSExtValue();
11997
11998  // VECTOR_SHUFFLE doesn't support a scalable mask so use a dedicated node.
11999  if (VT.isScalableVector()) {
12000    MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout());
12001    setValue(&I, DAG.getNode(ISD::VECTOR_SPLICE, DL, VT, V1, V2,
12002                             DAG.getConstant(Imm, DL, IdxVT)));
12003    return;
12004  }
12005
12006  unsigned NumElts = VT.getVectorNumElements();
12007
12008  uint64_t Idx = (NumElts + Imm) % NumElts;
12009
12010  // Use VECTOR_SHUFFLE to maintain original behaviour for fixed-length vectors.
12011  SmallVector<int, 8> Mask;
12012  for (unsigned i = 0; i < NumElts; ++i)
12013    Mask.push_back(Idx + i);
12014  setValue(&I, DAG.getVectorShuffle(VT, DL, V1, V2, Mask));
12015}
12016
12017// Consider the following MIR after SelectionDAG, which produces output in
12018// phyregs in the first case or virtregs in the second case.
12019//
12020// INLINEASM_BR ..., implicit-def $ebx, ..., implicit-def $edx
12021// %5:gr32 = COPY $ebx
12022// %6:gr32 = COPY $edx
12023// %1:gr32 = COPY %6:gr32
12024// %0:gr32 = COPY %5:gr32
12025//
12026// INLINEASM_BR ..., def %5:gr32, ..., def %6:gr32
12027// %1:gr32 = COPY %6:gr32
12028// %0:gr32 = COPY %5:gr32
12029//
12030// Given %0, we'd like to return $ebx in the first case and %5 in the second.
12031// Given %1, we'd like to return $edx in the first case and %6 in the second.
12032//
12033// If a callbr has outputs, it will have a single mapping in FuncInfo.ValueMap
12034// to a single virtreg (such as %0). The remaining outputs monotonically
12035// increase in virtreg number from there. If a callbr has no outputs, then it
12036// should not have a corresponding callbr landingpad; in fact, the callbr
12037// landingpad would not even be able to refer to such a callbr.
12038static Register FollowCopyChain(MachineRegisterInfo &MRI, Register Reg) {
12039  MachineInstr *MI = MRI.def_begin(Reg)->getParent();
12040  // There is definitely at least one copy.
12041  assert(MI->getOpcode() == TargetOpcode::COPY &&
12042         "start of copy chain MUST be COPY");
12043  Reg = MI->getOperand(1).getReg();
12044  MI = MRI.def_begin(Reg)->getParent();
12045  // There may be an optional second copy.
12046  if (MI->getOpcode() == TargetOpcode::COPY) {
12047    assert(Reg.isVirtual() && "expected COPY of virtual register");
12048    Reg = MI->getOperand(1).getReg();
12049    assert(Reg.isPhysical() && "expected COPY of physical register");
12050    MI = MRI.def_begin(Reg)->getParent();
12051  }
12052  // The start of the chain must be an INLINEASM_BR.
12053  assert(MI->getOpcode() == TargetOpcode::INLINEASM_BR &&
12054         "end of copy chain MUST be INLINEASM_BR");
12055  return Reg;
12056}
12057
12058// We must do this walk rather than the simpler
12059//   setValue(&I, getCopyFromRegs(CBR, CBR->getType()));
12060// otherwise we will end up with copies of virtregs only valid along direct
12061// edges.
12062void SelectionDAGBuilder::visitCallBrLandingPad(const CallInst &I) {
12063  SmallVector<EVT, 8> ResultVTs;
12064  SmallVector<SDValue, 8> ResultValues;
12065  const auto *CBR =
12066      cast<CallBrInst>(I.getParent()->getUniquePredecessor()->getTerminator());
12067
12068  const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12069  const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
12070  MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
12071
12072  unsigned InitialDef = FuncInfo.ValueMap[CBR];
12073  SDValue Chain = DAG.getRoot();
12074
12075  // Re-parse the asm constraints string.
12076  TargetLowering::AsmOperandInfoVector TargetConstraints =
12077      TLI.ParseConstraints(DAG.getDataLayout(), TRI, *CBR);
12078  for (auto &T : TargetConstraints) {
12079    SDISelAsmOperandInfo OpInfo(T);
12080    if (OpInfo.Type != InlineAsm::isOutput)
12081      continue;
12082
12083    // Pencil in OpInfo.ConstraintType and OpInfo.ConstraintVT based on the
12084    // individual constraint.
12085    TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG);
12086
12087    switch (OpInfo.ConstraintType) {
12088    case TargetLowering::C_Register:
12089    case TargetLowering::C_RegisterClass: {
12090      // Fill in OpInfo.AssignedRegs.Regs.
12091      getRegistersForValue(DAG, getCurSDLoc(), OpInfo, OpInfo);
12092
12093      // getRegistersForValue may produce 1 to many registers based on whether
12094      // the OpInfo.ConstraintVT is legal on the target or not.
12095      for (size_t i = 0, e = OpInfo.AssignedRegs.Regs.size(); i != e; ++i) {
12096        Register OriginalDef = FollowCopyChain(MRI, InitialDef++);
12097        if (Register::isPhysicalRegister(OriginalDef))
12098          FuncInfo.MBB->addLiveIn(OriginalDef);
12099        // Update the assigned registers to use the original defs.
12100        OpInfo.AssignedRegs.Regs[i] = OriginalDef;
12101      }
12102
12103      SDValue V = OpInfo.AssignedRegs.getCopyFromRegs(
12104          DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, CBR);
12105      ResultValues.push_back(V);
12106      ResultVTs.push_back(OpInfo.ConstraintVT);
12107      break;
12108    }
12109    case TargetLowering::C_Other: {
12110      SDValue Flag;
12111      SDValue V = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(),
12112                                                  OpInfo, DAG);
12113      ++InitialDef;
12114      ResultValues.push_back(V);
12115      ResultVTs.push_back(OpInfo.ConstraintVT);
12116      break;
12117    }
12118    default:
12119      break;
12120    }
12121  }
12122  SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
12123                          DAG.getVTList(ResultVTs), ResultValues);
12124  setValue(&I, V);
12125}
12126