151237Speter//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
251237Speter//
3146Srgrimes// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
41131Srgrimes// See https://llvm.org/LICENSE.txt for license information.
5127633Scperciva// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6127633Scperciva//
7127633Scperciva//===----------------------------------------------------------------------===//
8127633Scperciva//
9127633Scperciva// This file defines a DAG pattern matching instruction selector for X86,
10127633Scperciva// converting from a legalized dag to a X86 dag.
11127633Scperciva//
12127633Scperciva//===----------------------------------------------------------------------===//
13127633Scperciva
14127633Scperciva#include "X86.h"
15127633Scperciva#include "X86MachineFunctionInfo.h"
16241823Smarcel#include "X86RegisterInfo.h"
17127633Scperciva#include "X86Subtarget.h"
18255597Sdes#include "X86TargetMachine.h"
19127633Scperciva#include "llvm/ADT/Statistic.h"
20130968Smlaier#include "llvm/CodeGen/MachineModuleInfo.h"
21147062Sbrooks#include "llvm/CodeGen/SelectionDAGISel.h"
22132981Smarkm#include "llvm/Config/llvm-config.h"
23127633Scperciva#include "llvm/IR/ConstantRange.h"
24243752Srwatson#include "llvm/IR/Function.h"
25127633Scperciva#include "llvm/IR/Instructions.h"
26218047Spjd#include "llvm/IR/Intrinsics.h"
27127633Scperciva#include "llvm/IR/IntrinsicsX86.h"
28#include "llvm/IR/Type.h"
29#include "llvm/Support/Debug.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/KnownBits.h"
32#include "llvm/Support/MathExtras.h"
33#include <cstdint>
34
35using namespace llvm;
36
37#define DEBUG_TYPE "x86-isel"
38#define PASS_NAME "X86 DAG->DAG Instruction Selection"
39
40STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
41
42static cl::opt<bool> AndImmShrink("x86-and-imm-shrink", cl::init(true),
43    cl::desc("Enable setting constant bits to reduce size of mask immediates"),
44    cl::Hidden);
45
46static cl::opt<bool> EnablePromoteAnyextLoad(
47    "x86-promote-anyext-load", cl::init(true),
48    cl::desc("Enable promoting aligned anyext load to wider load"), cl::Hidden);
49
50extern cl::opt<bool> IndirectBranchTracking;
51
52//===----------------------------------------------------------------------===//
53//                      Pattern Matcher Implementation
54//===----------------------------------------------------------------------===//
55
56namespace {
57  /// This corresponds to X86AddressMode, but uses SDValue's instead of register
58  /// numbers for the leaves of the matched tree.
59  struct X86ISelAddressMode {
60    enum {
61      RegBase,
62      FrameIndexBase
63    } BaseType = RegBase;
64
65    // This is really a union, discriminated by BaseType!
66    SDValue Base_Reg;
67    int Base_FrameIndex = 0;
68
69    unsigned Scale = 1;
70    SDValue IndexReg;
71    int32_t Disp = 0;
72    SDValue Segment;
73    const GlobalValue *GV = nullptr;
74    const Constant *CP = nullptr;
75    const BlockAddress *BlockAddr = nullptr;
76    const char *ES = nullptr;
77    MCSymbol *MCSym = nullptr;
78    int JT = -1;
79    Align Alignment;            // CP alignment.
80    unsigned char SymbolFlags = X86II::MO_NO_FLAG;  // X86II::MO_*
81    bool NegateIndex = false;
82
83    X86ISelAddressMode() = default;
84
85    bool hasSymbolicDisplacement() const {
86      return GV != nullptr || CP != nullptr || ES != nullptr ||
87             MCSym != nullptr || JT != -1 || BlockAddr != nullptr;
88    }
89
90    bool hasBaseOrIndexReg() const {
91      return BaseType == FrameIndexBase ||
92             IndexReg.getNode() != nullptr || Base_Reg.getNode() != nullptr;
93    }
94
95    /// Return true if this addressing mode is already RIP-relative.
96    bool isRIPRelative() const {
97      if (BaseType != RegBase) return false;
98      if (RegisterSDNode *RegNode =
99            dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
100        return RegNode->getReg() == X86::RIP;
101      return false;
102    }
103
104    void setBaseReg(SDValue Reg) {
105      BaseType = RegBase;
106      Base_Reg = Reg;
107    }
108
109#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
110    void dump(SelectionDAG *DAG = nullptr) {
111      dbgs() << "X86ISelAddressMode " << this << '\n';
112      dbgs() << "Base_Reg ";
113      if (Base_Reg.getNode())
114        Base_Reg.getNode()->dump(DAG);
115      else
116        dbgs() << "nul\n";
117      if (BaseType == FrameIndexBase)
118        dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n';
119      dbgs() << " Scale " << Scale << '\n'
120             << "IndexReg ";
121      if (NegateIndex)
122        dbgs() << "negate ";
123      if (IndexReg.getNode())
124        IndexReg.getNode()->dump(DAG);
125      else
126        dbgs() << "nul\n";
127      dbgs() << " Disp " << Disp << '\n'
128             << "GV ";
129      if (GV)
130        GV->dump();
131      else
132        dbgs() << "nul";
133      dbgs() << " CP ";
134      if (CP)
135        CP->dump();
136      else
137        dbgs() << "nul";
138      dbgs() << '\n'
139             << "ES ";
140      if (ES)
141        dbgs() << ES;
142      else
143        dbgs() << "nul";
144      dbgs() << " MCSym ";
145      if (MCSym)
146        dbgs() << MCSym;
147      else
148        dbgs() << "nul";
149      dbgs() << " JT" << JT << " Align" << Alignment.value() << '\n';
150    }
151#endif
152  };
153}
154
155namespace {
156  //===--------------------------------------------------------------------===//
157  /// ISel - X86-specific code to select X86 machine instructions for
158  /// SelectionDAG operations.
159  ///
160  class X86DAGToDAGISel final : public SelectionDAGISel {
161    /// Keep a pointer to the X86Subtarget around so that we can
162    /// make the right decision when generating code for different targets.
163    const X86Subtarget *Subtarget;
164
165    /// If true, selector should try to optimize for minimum code size.
166    bool OptForMinSize;
167
168    /// Disable direct TLS access through segment registers.
169    bool IndirectTlsSegRefs;
170
171  public:
172    static char ID;
173
174    X86DAGToDAGISel() = delete;
175
176    explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOptLevel OptLevel)
177        : SelectionDAGISel(ID, tm, OptLevel), Subtarget(nullptr),
178          OptForMinSize(false), IndirectTlsSegRefs(false) {}
179
180    bool runOnMachineFunction(MachineFunction &MF) override {
181      // Reset the subtarget each time through.
182      Subtarget = &MF.getSubtarget<X86Subtarget>();
183      IndirectTlsSegRefs = MF.getFunction().hasFnAttribute(
184                             "indirect-tls-seg-refs");
185
186      // OptFor[Min]Size are used in pattern predicates that isel is matching.
187      OptForMinSize = MF.getFunction().hasMinSize();
188      assert((!OptForMinSize || MF.getFunction().hasOptSize()) &&
189             "OptForMinSize implies OptForSize");
190
191      SelectionDAGISel::runOnMachineFunction(MF);
192      return true;
193    }
194
195    void emitFunctionEntryCode() override;
196
197    bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const override;
198
199    void PreprocessISelDAG() override;
200    void PostprocessISelDAG() override;
201
202// Include the pieces autogenerated from the target description.
203#include "X86GenDAGISel.inc"
204
205  private:
206    void Select(SDNode *N) override;
207
208    bool foldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
209    bool matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
210                            bool AllowSegmentRegForX32 = false);
211    bool matchWrapper(SDValue N, X86ISelAddressMode &AM);
212    bool matchAddress(SDValue N, X86ISelAddressMode &AM);
213    bool matchVectorAddress(SDValue N, X86ISelAddressMode &AM);
214    bool matchAdd(SDValue &N, X86ISelAddressMode &AM, unsigned Depth);
215    SDValue matchIndexRecursively(SDValue N, X86ISelAddressMode &AM,
216                                  unsigned Depth);
217    bool matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
218                                 unsigned Depth);
219    bool matchVectorAddressRecursively(SDValue N, X86ISelAddressMode &AM,
220                                       unsigned Depth);
221    bool matchAddressBase(SDValue N, X86ISelAddressMode &AM);
222    bool selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
223                    SDValue &Scale, SDValue &Index, SDValue &Disp,
224                    SDValue &Segment);
225    bool selectVectorAddr(MemSDNode *Parent, SDValue BasePtr, SDValue IndexOp,
226                          SDValue ScaleOp, SDValue &Base, SDValue &Scale,
227                          SDValue &Index, SDValue &Disp, SDValue &Segment);
228    bool selectMOV64Imm32(SDValue N, SDValue &Imm);
229    bool selectLEAAddr(SDValue N, SDValue &Base,
230                       SDValue &Scale, SDValue &Index, SDValue &Disp,
231                       SDValue &Segment);
232    bool selectLEA64_32Addr(SDValue N, SDValue &Base,
233                            SDValue &Scale, SDValue &Index, SDValue &Disp,
234                            SDValue &Segment);
235    bool selectTLSADDRAddr(SDValue N, SDValue &Base,
236                           SDValue &Scale, SDValue &Index, SDValue &Disp,
237                           SDValue &Segment);
238    bool selectRelocImm(SDValue N, SDValue &Op);
239
240    bool tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
241                     SDValue &Base, SDValue &Scale,
242                     SDValue &Index, SDValue &Disp,
243                     SDValue &Segment);
244
245    // Convenience method where P is also root.
246    bool tryFoldLoad(SDNode *P, SDValue N,
247                     SDValue &Base, SDValue &Scale,
248                     SDValue &Index, SDValue &Disp,
249                     SDValue &Segment) {
250      return tryFoldLoad(P, P, N, Base, Scale, Index, Disp, Segment);
251    }
252
253    bool tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
254                          SDValue &Base, SDValue &Scale,
255                          SDValue &Index, SDValue &Disp,
256                          SDValue &Segment);
257
258    bool isProfitableToFormMaskedOp(SDNode *N) const;
259
260    /// Implement addressing mode selection for inline asm expressions.
261    bool SelectInlineAsmMemoryOperand(const SDValue &Op,
262                                      InlineAsm::ConstraintCode ConstraintID,
263                                      std::vector<SDValue> &OutOps) override;
264
265    void emitSpecialCodeForMain();
266
267    inline void getAddressOperands(X86ISelAddressMode &AM, const SDLoc &DL,
268                                   MVT VT, SDValue &Base, SDValue &Scale,
269                                   SDValue &Index, SDValue &Disp,
270                                   SDValue &Segment) {
271      if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
272        Base = CurDAG->getTargetFrameIndex(
273            AM.Base_FrameIndex, TLI->getPointerTy(CurDAG->getDataLayout()));
274      else if (AM.Base_Reg.getNode())
275        Base = AM.Base_Reg;
276      else
277        Base = CurDAG->getRegister(0, VT);
278
279      Scale = getI8Imm(AM.Scale, DL);
280
281      // Negate the index if needed.
282      if (AM.NegateIndex) {
283        unsigned NegOpc = VT == MVT::i64 ? X86::NEG64r : X86::NEG32r;
284        SDValue Neg = SDValue(CurDAG->getMachineNode(NegOpc, DL, VT, MVT::i32,
285                                                     AM.IndexReg), 0);
286        AM.IndexReg = Neg;
287      }
288
289      if (AM.IndexReg.getNode())
290        Index = AM.IndexReg;
291      else
292        Index = CurDAG->getRegister(0, VT);
293
294      // These are 32-bit even in 64-bit mode since RIP-relative offset
295      // is 32-bit.
296      if (AM.GV)
297        Disp = CurDAG->getTargetGlobalAddress(AM.GV, SDLoc(),
298                                              MVT::i32, AM.Disp,
299                                              AM.SymbolFlags);
300      else if (AM.CP)
301        Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Alignment,
302                                             AM.Disp, AM.SymbolFlags);
303      else if (AM.ES) {
304        assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
305        Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
306      } else if (AM.MCSym) {
307        assert(!AM.Disp && "Non-zero displacement is ignored with MCSym.");
308        assert(AM.SymbolFlags == 0 && "oo");
309        Disp = CurDAG->getMCSymbol(AM.MCSym, MVT::i32);
310      } else if (AM.JT != -1) {
311        assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
312        Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
313      } else if (AM.BlockAddr)
314        Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
315                                             AM.SymbolFlags);
316      else
317        Disp = CurDAG->getTargetConstant(AM.Disp, DL, MVT::i32);
318
319      if (AM.Segment.getNode())
320        Segment = AM.Segment;
321      else
322        Segment = CurDAG->getRegister(0, MVT::i16);
323    }
324
325    // Utility function to determine whether we should avoid selecting
326    // immediate forms of instructions for better code size or not.
327    // At a high level, we'd like to avoid such instructions when
328    // we have similar constants used within the same basic block
329    // that can be kept in a register.
330    //
331    bool shouldAvoidImmediateInstFormsForSize(SDNode *N) const {
332      uint32_t UseCount = 0;
333
334      // Do not want to hoist if we're not optimizing for size.
335      // TODO: We'd like to remove this restriction.
336      // See the comment in X86InstrInfo.td for more info.
337      if (!CurDAG->shouldOptForSize())
338        return false;
339
340      // Walk all the users of the immediate.
341      for (const SDNode *User : N->uses()) {
342        if (UseCount >= 2)
343          break;
344
345        // This user is already selected. Count it as a legitimate use and
346        // move on.
347        if (User->isMachineOpcode()) {
348          UseCount++;
349          continue;
350        }
351
352        // We want to count stores of immediates as real uses.
353        if (User->getOpcode() == ISD::STORE &&
354            User->getOperand(1).getNode() == N) {
355          UseCount++;
356          continue;
357        }
358
359        // We don't currently match users that have > 2 operands (except
360        // for stores, which are handled above)
361        // Those instruction won't match in ISEL, for now, and would
362        // be counted incorrectly.
363        // This may change in the future as we add additional instruction
364        // types.
365        if (User->getNumOperands() != 2)
366          continue;
367
368        // If this is a sign-extended 8-bit integer immediate used in an ALU
369        // instruction, there is probably an opcode encoding to save space.
370        auto *C = dyn_cast<ConstantSDNode>(N);
371        if (C && isInt<8>(C->getSExtValue()))
372          continue;
373
374        // Immediates that are used for offsets as part of stack
375        // manipulation should be left alone. These are typically
376        // used to indicate SP offsets for argument passing and
377        // will get pulled into stores/pushes (implicitly).
378        if (User->getOpcode() == X86ISD::ADD ||
379            User->getOpcode() == ISD::ADD    ||
380            User->getOpcode() == X86ISD::SUB ||
381            User->getOpcode() == ISD::SUB) {
382
383          // Find the other operand of the add/sub.
384          SDValue OtherOp = User->getOperand(0);
385          if (OtherOp.getNode() == N)
386            OtherOp = User->getOperand(1);
387
388          // Don't count if the other operand is SP.
389          RegisterSDNode *RegNode;
390          if (OtherOp->getOpcode() == ISD::CopyFromReg &&
391              (RegNode = dyn_cast_or_null<RegisterSDNode>(
392                 OtherOp->getOperand(1).getNode())))
393            if ((RegNode->getReg() == X86::ESP) ||
394                (RegNode->getReg() == X86::RSP))
395              continue;
396        }
397
398        // ... otherwise, count this and move on.
399        UseCount++;
400      }
401
402      // If we have more than 1 use, then recommend for hoisting.
403      return (UseCount > 1);
404    }
405
406    /// Return a target constant with the specified value of type i8.
407    inline SDValue getI8Imm(unsigned Imm, const SDLoc &DL) {
408      return CurDAG->getTargetConstant(Imm, DL, MVT::i8);
409    }
410
411    /// Return a target constant with the specified value, of type i32.
412    inline SDValue getI32Imm(unsigned Imm, const SDLoc &DL) {
413      return CurDAG->getTargetConstant(Imm, DL, MVT::i32);
414    }
415
416    /// Return a target constant with the specified value, of type i64.
417    inline SDValue getI64Imm(uint64_t Imm, const SDLoc &DL) {
418      return CurDAG->getTargetConstant(Imm, DL, MVT::i64);
419    }
420
421    SDValue getExtractVEXTRACTImmediate(SDNode *N, unsigned VecWidth,
422                                        const SDLoc &DL) {
423      assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
424      uint64_t Index = N->getConstantOperandVal(1);
425      MVT VecVT = N->getOperand(0).getSimpleValueType();
426      return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
427    }
428
429    SDValue getInsertVINSERTImmediate(SDNode *N, unsigned VecWidth,
430                                      const SDLoc &DL) {
431      assert((VecWidth == 128 || VecWidth == 256) && "Unexpected vector width");
432      uint64_t Index = N->getConstantOperandVal(2);
433      MVT VecVT = N->getSimpleValueType(0);
434      return getI8Imm((Index * VecVT.getScalarSizeInBits()) / VecWidth, DL);
435    }
436
437    SDValue getPermuteVINSERTCommutedImmediate(SDNode *N, unsigned VecWidth,
438                                               const SDLoc &DL) {
439      assert(VecWidth == 128 && "Unexpected vector width");
440      uint64_t Index = N->getConstantOperandVal(2);
441      MVT VecVT = N->getSimpleValueType(0);
442      uint64_t InsertIdx = (Index * VecVT.getScalarSizeInBits()) / VecWidth;
443      assert((InsertIdx == 0 || InsertIdx == 1) && "Bad insertf128 index");
444      // vinsert(0,sub,vec) -> [sub0][vec1] -> vperm2x128(0x30,vec,sub)
445      // vinsert(1,sub,vec) -> [vec0][sub0] -> vperm2x128(0x02,vec,sub)
446      return getI8Imm(InsertIdx ? 0x02 : 0x30, DL);
447    }
448
449    SDValue getSBBZero(SDNode *N) {
450      SDLoc dl(N);
451      MVT VT = N->getSimpleValueType(0);
452
453      // Create zero.
454      SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32);
455      SDValue Zero = SDValue(
456          CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, std::nullopt), 0);
457      if (VT == MVT::i64) {
458        Zero = SDValue(
459            CurDAG->getMachineNode(
460                TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
461                CurDAG->getTargetConstant(0, dl, MVT::i64), Zero,
462                CurDAG->getTargetConstant(X86::sub_32bit, dl, MVT::i32)),
463            0);
464      }
465
466      // Copy flags to the EFLAGS register and glue it to next node.
467      unsigned Opcode = N->getOpcode();
468      assert((Opcode == X86ISD::SBB || Opcode == X86ISD::SETCC_CARRY) &&
469             "Unexpected opcode for SBB materialization");
470      unsigned FlagOpIndex = Opcode == X86ISD::SBB ? 2 : 1;
471      SDValue EFLAGS =
472          CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS,
473                               N->getOperand(FlagOpIndex), SDValue());
474
475      // Create a 64-bit instruction if the result is 64-bits otherwise use the
476      // 32-bit version.
477      unsigned Opc = VT == MVT::i64 ? X86::SBB64rr : X86::SBB32rr;
478      MVT SBBVT = VT == MVT::i64 ? MVT::i64 : MVT::i32;
479      VTs = CurDAG->getVTList(SBBVT, MVT::i32);
480      return SDValue(
481          CurDAG->getMachineNode(Opc, dl, VTs,
482                                 {Zero, Zero, EFLAGS, EFLAGS.getValue(1)}),
483          0);
484    }
485
486    // Helper to detect unneeded and instructions on shift amounts. Called
487    // from PatFrags in tablegen.
488    bool isUnneededShiftMask(SDNode *N, unsigned Width) const {
489      assert(N->getOpcode() == ISD::AND && "Unexpected opcode");
490      const APInt &Val = N->getConstantOperandAPInt(1);
491
492      if (Val.countr_one() >= Width)
493        return true;
494
495      APInt Mask = Val | CurDAG->computeKnownBits(N->getOperand(0)).Zero;
496      return Mask.countr_one() >= Width;
497    }
498
499    /// Return an SDNode that returns the value of the global base register.
500    /// Output instructions required to initialize the global base register,
501    /// if necessary.
502    SDNode *getGlobalBaseReg();
503
504    /// Return a reference to the TargetMachine, casted to the target-specific
505    /// type.
506    const X86TargetMachine &getTargetMachine() const {
507      return static_cast<const X86TargetMachine &>(TM);
508    }
509
510    /// Return a reference to the TargetInstrInfo, casted to the target-specific
511    /// type.
512    const X86InstrInfo *getInstrInfo() const {
513      return Subtarget->getInstrInfo();
514    }
515
516    /// Return a condition code of the given SDNode
517    X86::CondCode getCondFromNode(SDNode *N) const;
518
519    /// Address-mode matching performs shift-of-and to and-of-shift
520    /// reassociation in order to expose more scaled addressing
521    /// opportunities.
522    bool ComplexPatternFuncMutatesDAG() const override {
523      return true;
524    }
525
526    bool isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const;
527
528    // Indicates we should prefer to use a non-temporal load for this load.
529    bool useNonTemporalLoad(LoadSDNode *N) const {
530      if (!N->isNonTemporal())
531        return false;
532
533      unsigned StoreSize = N->getMemoryVT().getStoreSize();
534
535      if (N->getAlign().value() < StoreSize)
536        return false;
537
538      switch (StoreSize) {
539      default: llvm_unreachable("Unsupported store size");
540      case 4:
541      case 8:
542        return false;
543      case 16:
544        return Subtarget->hasSSE41();
545      case 32:
546        return Subtarget->hasAVX2();
547      case 64:
548        return Subtarget->hasAVX512();
549      }
550    }
551
552    bool foldLoadStoreIntoMemOperand(SDNode *Node);
553    MachineSDNode *matchBEXTRFromAndImm(SDNode *Node);
554    bool matchBitExtract(SDNode *Node);
555    bool shrinkAndImmediate(SDNode *N);
556    bool isMaskZeroExtended(SDNode *N) const;
557    bool tryShiftAmountMod(SDNode *N);
558    bool tryShrinkShlLogicImm(SDNode *N);
559    bool tryVPTERNLOG(SDNode *N);
560    bool matchVPTERNLOG(SDNode *Root, SDNode *ParentA, SDNode *ParentB,
561                        SDNode *ParentC, SDValue A, SDValue B, SDValue C,
562                        uint8_t Imm);
563    bool tryVPTESTM(SDNode *Root, SDValue Setcc, SDValue Mask);
564    bool tryMatchBitSelect(SDNode *N);
565
566    MachineSDNode *emitPCMPISTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
567                                const SDLoc &dl, MVT VT, SDNode *Node);
568    MachineSDNode *emitPCMPESTR(unsigned ROpc, unsigned MOpc, bool MayFoldLoad,
569                                const SDLoc &dl, MVT VT, SDNode *Node,
570                                SDValue &InGlue);
571
572    bool tryOptimizeRem8Extend(SDNode *N);
573
574    bool onlyUsesZeroFlag(SDValue Flags) const;
575    bool hasNoSignFlagUses(SDValue Flags) const;
576    bool hasNoCarryFlagUses(SDValue Flags) const;
577  };
578}
579
580char X86DAGToDAGISel::ID = 0;
581
582INITIALIZE_PASS(X86DAGToDAGISel, DEBUG_TYPE, PASS_NAME, false, false)
583
584// Returns true if this masked compare can be implemented legally with this
585// type.
586static bool isLegalMaskCompare(SDNode *N, const X86Subtarget *Subtarget) {
587  unsigned Opcode = N->getOpcode();
588  if (Opcode == X86ISD::CMPM || Opcode == X86ISD::CMPMM ||
589      Opcode == X86ISD::STRICT_CMPM || Opcode == ISD::SETCC ||
590      Opcode == X86ISD::CMPMM_SAE || Opcode == X86ISD::VFPCLASS) {
591    // We can get 256-bit 8 element types here without VLX being enabled. When
592    // this happens we will use 512-bit operations and the mask will not be
593    // zero extended.
594    EVT OpVT = N->getOperand(0).getValueType();
595    // The first operand of X86ISD::STRICT_CMPM is chain, so we need to get the
596    // second operand.
597    if (Opcode == X86ISD::STRICT_CMPM)
598      OpVT = N->getOperand(1).getValueType();
599    if (OpVT.is256BitVector() || OpVT.is128BitVector())
600      return Subtarget->hasVLX();
601
602    return true;
603  }
604  // Scalar opcodes use 128 bit registers, but aren't subject to the VLX check.
605  if (Opcode == X86ISD::VFPCLASSS || Opcode == X86ISD::FSETCCM ||
606      Opcode == X86ISD::FSETCCM_SAE)
607    return true;
608
609  return false;
610}
611
612// Returns true if we can assume the writer of the mask has zero extended it
613// for us.
614bool X86DAGToDAGISel::isMaskZeroExtended(SDNode *N) const {
615  // If this is an AND, check if we have a compare on either side. As long as
616  // one side guarantees the mask is zero extended, the AND will preserve those
617  // zeros.
618  if (N->getOpcode() == ISD::AND)
619    return isLegalMaskCompare(N->getOperand(0).getNode(), Subtarget) ||
620           isLegalMaskCompare(N->getOperand(1).getNode(), Subtarget);
621
622  return isLegalMaskCompare(N, Subtarget);
623}
624
625bool
626X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
627  if (OptLevel == CodeGenOptLevel::None)
628    return false;
629
630  if (!N.hasOneUse())
631    return false;
632
633  if (N.getOpcode() != ISD::LOAD)
634    return true;
635
636  // Don't fold non-temporal loads if we have an instruction for them.
637  if (useNonTemporalLoad(cast<LoadSDNode>(N)))
638    return false;
639
640  // If N is a load, do additional profitability checks.
641  if (U == Root) {
642    switch (U->getOpcode()) {
643    default: break;
644    case X86ISD::ADD:
645    case X86ISD::ADC:
646    case X86ISD::SUB:
647    case X86ISD::SBB:
648    case X86ISD::AND:
649    case X86ISD::XOR:
650    case X86ISD::OR:
651    case ISD::ADD:
652    case ISD::UADDO_CARRY:
653    case ISD::AND:
654    case ISD::OR:
655    case ISD::XOR: {
656      SDValue Op1 = U->getOperand(1);
657
658      // If the other operand is a 8-bit immediate we should fold the immediate
659      // instead. This reduces code size.
660      // e.g.
661      // movl 4(%esp), %eax
662      // addl $4, %eax
663      // vs.
664      // movl $4, %eax
665      // addl 4(%esp), %eax
666      // The former is 2 bytes shorter. In case where the increment is 1, then
667      // the saving can be 4 bytes (by using incl %eax).
668      if (auto *Imm = dyn_cast<ConstantSDNode>(Op1)) {
669        if (Imm->getAPIntValue().isSignedIntN(8))
670          return false;
671
672        // If this is a 64-bit AND with an immediate that fits in 32-bits,
673        // prefer using the smaller and over folding the load. This is needed to
674        // make sure immediates created by shrinkAndImmediate are always folded.
675        // Ideally we would narrow the load during DAG combine and get the
676        // best of both worlds.
677        if (U->getOpcode() == ISD::AND &&
678            Imm->getAPIntValue().getBitWidth() == 64 &&
679            Imm->getAPIntValue().isIntN(32))
680          return false;
681
682        // If this really a zext_inreg that can be represented with a movzx
683        // instruction, prefer that.
684        // TODO: We could shrink the load and fold if it is non-volatile.
685        if (U->getOpcode() == ISD::AND &&
686            (Imm->getAPIntValue() == UINT8_MAX ||
687             Imm->getAPIntValue() == UINT16_MAX ||
688             Imm->getAPIntValue() == UINT32_MAX))
689          return false;
690
691        // ADD/SUB with can negate the immediate and use the opposite operation
692        // to fit 128 into a sign extended 8 bit immediate.
693        if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB) &&
694            (-Imm->getAPIntValue()).isSignedIntN(8))
695          return false;
696
697        if ((U->getOpcode() == X86ISD::ADD || U->getOpcode() == X86ISD::SUB) &&
698            (-Imm->getAPIntValue()).isSignedIntN(8) &&
699            hasNoCarryFlagUses(SDValue(U, 1)))
700          return false;
701      }
702
703      // If the other operand is a TLS address, we should fold it instead.
704      // This produces
705      // movl    %gs:0, %eax
706      // leal    i@NTPOFF(%eax), %eax
707      // instead of
708      // movl    $i@NTPOFF, %eax
709      // addl    %gs:0, %eax
710      // if the block also has an access to a second TLS address this will save
711      // a load.
712      // FIXME: This is probably also true for non-TLS addresses.
713      if (Op1.getOpcode() == X86ISD::Wrapper) {
714        SDValue Val = Op1.getOperand(0);
715        if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
716          return false;
717      }
718
719      // Don't fold load if this matches the BTS/BTR/BTC patterns.
720      // BTS: (or X, (shl 1, n))
721      // BTR: (and X, (rotl -2, n))
722      // BTC: (xor X, (shl 1, n))
723      if (U->getOpcode() == ISD::OR || U->getOpcode() == ISD::XOR) {
724        if (U->getOperand(0).getOpcode() == ISD::SHL &&
725            isOneConstant(U->getOperand(0).getOperand(0)))
726          return false;
727
728        if (U->getOperand(1).getOpcode() == ISD::SHL &&
729            isOneConstant(U->getOperand(1).getOperand(0)))
730          return false;
731      }
732      if (U->getOpcode() == ISD::AND) {
733        SDValue U0 = U->getOperand(0);
734        SDValue U1 = U->getOperand(1);
735        if (U0.getOpcode() == ISD::ROTL) {
736          auto *C = dyn_cast<ConstantSDNode>(U0.getOperand(0));
737          if (C && C->getSExtValue() == -2)
738            return false;
739        }
740
741        if (U1.getOpcode() == ISD::ROTL) {
742          auto *C = dyn_cast<ConstantSDNode>(U1.getOperand(0));
743          if (C && C->getSExtValue() == -2)
744            return false;
745        }
746      }
747
748      break;
749    }
750    case ISD::SHL:
751    case ISD::SRA:
752    case ISD::SRL:
753      // Don't fold a load into a shift by immediate. The BMI2 instructions
754      // support folding a load, but not an immediate. The legacy instructions
755      // support folding an immediate, but can't fold a load. Folding an
756      // immediate is preferable to folding a load.
757      if (isa<ConstantSDNode>(U->getOperand(1)))
758        return false;
759
760      break;
761    }
762  }
763
764  // Prevent folding a load if this can implemented with an insert_subreg or
765  // a move that implicitly zeroes.
766  if (Root->getOpcode() == ISD::INSERT_SUBVECTOR &&
767      isNullConstant(Root->getOperand(2)) &&
768      (Root->getOperand(0).isUndef() ||
769       ISD::isBuildVectorAllZeros(Root->getOperand(0).getNode())))
770    return false;
771
772  return true;
773}
774
775// Indicates it is profitable to form an AVX512 masked operation. Returning
776// false will favor a masked register-register masked move or vblendm and the
777// operation will be selected separately.
778bool X86DAGToDAGISel::isProfitableToFormMaskedOp(SDNode *N) const {
779  assert(
780      (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::SELECTS) &&
781      "Unexpected opcode!");
782
783  // If the operation has additional users, the operation will be duplicated.
784  // Check the use count to prevent that.
785  // FIXME: Are there cheap opcodes we might want to duplicate?
786  return N->getOperand(1).hasOneUse();
787}
788
789/// Replace the original chain operand of the call with
790/// load's chain operand and move load below the call's chain operand.
791static void moveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
792                               SDValue Call, SDValue OrigChain) {
793  SmallVector<SDValue, 8> Ops;
794  SDValue Chain = OrigChain.getOperand(0);
795  if (Chain.getNode() == Load.getNode())
796    Ops.push_back(Load.getOperand(0));
797  else {
798    assert(Chain.getOpcode() == ISD::TokenFactor &&
799           "Unexpected chain operand");
800    for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
801      if (Chain.getOperand(i).getNode() == Load.getNode())
802        Ops.push_back(Load.getOperand(0));
803      else
804        Ops.push_back(Chain.getOperand(i));
805    SDValue NewChain =
806      CurDAG->getNode(ISD::TokenFactor, SDLoc(Load), MVT::Other, Ops);
807    Ops.clear();
808    Ops.push_back(NewChain);
809  }
810  Ops.append(OrigChain->op_begin() + 1, OrigChain->op_end());
811  CurDAG->UpdateNodeOperands(OrigChain.getNode(), Ops);
812  CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
813                             Load.getOperand(1), Load.getOperand(2));
814
815  Ops.clear();
816  Ops.push_back(SDValue(Load.getNode(), 1));
817  Ops.append(Call->op_begin() + 1, Call->op_end());
818  CurDAG->UpdateNodeOperands(Call.getNode(), Ops);
819}
820
821/// Return true if call address is a load and it can be
822/// moved below CALLSEQ_START and the chains leading up to the call.
823/// Return the CALLSEQ_START by reference as a second output.
824/// In the case of a tail call, there isn't a callseq node between the call
825/// chain and the load.
826static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
827  // The transformation is somewhat dangerous if the call's chain was glued to
828  // the call. After MoveBelowOrigChain the load is moved between the call and
829  // the chain, this can create a cycle if the load is not folded. So it is
830  // *really* important that we are sure the load will be folded.
831  if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
832    return false;
833  auto *LD = dyn_cast<LoadSDNode>(Callee.getNode());
834  if (!LD ||
835      !LD->isSimple() ||
836      LD->getAddressingMode() != ISD::UNINDEXED ||
837      LD->getExtensionType() != ISD::NON_EXTLOAD)
838    return false;
839
840  // Now let's find the callseq_start.
841  while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
842    if (!Chain.hasOneUse())
843      return false;
844    Chain = Chain.getOperand(0);
845  }
846
847  if (!Chain.getNumOperands())
848    return false;
849  // Since we are not checking for AA here, conservatively abort if the chain
850  // writes to memory. It's not safe to move the callee (a load) across a store.
851  if (isa<MemSDNode>(Chain.getNode()) &&
852      cast<MemSDNode>(Chain.getNode())->writeMem())
853    return false;
854  if (Chain.getOperand(0).getNode() == Callee.getNode())
855    return true;
856  if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
857      Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
858      Callee.getValue(1).hasOneUse())
859    return true;
860  return false;
861}
862
863static bool isEndbrImm64(uint64_t Imm) {
864// There may be some other prefix bytes between 0xF3 and 0x0F1EFA.
865// i.g: 0xF3660F1EFA, 0xF3670F1EFA
866  if ((Imm & 0x00FFFFFF) != 0x0F1EFA)
867    return false;
868
869  uint8_t OptionalPrefixBytes [] = {0x26, 0x2e, 0x36, 0x3e, 0x64,
870                                    0x65, 0x66, 0x67, 0xf0, 0xf2};
871  int i = 24; // 24bit 0x0F1EFA has matched
872  while (i < 64) {
873    uint8_t Byte = (Imm >> i) & 0xFF;
874    if (Byte == 0xF3)
875      return true;
876    if (!llvm::is_contained(OptionalPrefixBytes, Byte))
877      return false;
878    i += 8;
879  }
880
881  return false;
882}
883
884static bool needBWI(MVT VT) {
885  return (VT == MVT::v32i16 || VT == MVT::v32f16 || VT == MVT::v64i8);
886}
887
888void X86DAGToDAGISel::PreprocessISelDAG() {
889  bool MadeChange = false;
890  for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
891       E = CurDAG->allnodes_end(); I != E; ) {
892    SDNode *N = &*I++; // Preincrement iterator to avoid invalidation issues.
893
894    // This is for CET enhancement.
895    //
896    // ENDBR32 and ENDBR64 have specific opcodes:
897    // ENDBR32: F3 0F 1E FB
898    // ENDBR64: F3 0F 1E FA
899    // And we want that attackers won���t find unintended ENDBR32/64
900    // opcode matches in the binary
901    // Here���s an example:
902    // If the compiler had to generate asm for the following code:
903    // a = 0xF30F1EFA
904    // it could, for example, generate:
905    // mov 0xF30F1EFA, dword ptr[a]
906    // In such a case, the binary would include a gadget that starts
907    // with a fake ENDBR64 opcode. Therefore, we split such generation
908    // into multiple operations, let it not shows in the binary
909    if (N->getOpcode() == ISD::Constant) {
910      MVT VT = N->getSimpleValueType(0);
911      int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
912      int32_t EndbrImm = Subtarget->is64Bit() ? 0xF30F1EFA : 0xF30F1EFB;
913      if (Imm == EndbrImm || isEndbrImm64(Imm)) {
914        // Check that the cf-protection-branch is enabled.
915        Metadata *CFProtectionBranch =
916          MF->getMMI().getModule()->getModuleFlag("cf-protection-branch");
917        if (CFProtectionBranch || IndirectBranchTracking) {
918          SDLoc dl(N);
919          SDValue Complement = CurDAG->getConstant(~Imm, dl, VT, false, true);
920          Complement = CurDAG->getNOT(dl, Complement, VT);
921          --I;
922          CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Complement);
923          ++I;
924          MadeChange = true;
925          continue;
926        }
927      }
928    }
929
930    // If this is a target specific AND node with no flag usages, turn it back
931    // into ISD::AND to enable test instruction matching.
932    if (N->getOpcode() == X86ISD::AND && !N->hasAnyUseOfValue(1)) {
933      SDValue Res = CurDAG->getNode(ISD::AND, SDLoc(N), N->getValueType(0),
934                                    N->getOperand(0), N->getOperand(1));
935      --I;
936      CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
937      ++I;
938      MadeChange = true;
939      continue;
940    }
941
942    // Convert vector increment or decrement to sub/add with an all-ones
943    // constant:
944    // add X, <1, 1...> --> sub X, <-1, -1...>
945    // sub X, <1, 1...> --> add X, <-1, -1...>
946    // The all-ones vector constant can be materialized using a pcmpeq
947    // instruction that is commonly recognized as an idiom (has no register
948    // dependency), so that's better/smaller than loading a splat 1 constant.
949    //
950    // But don't do this if it would inhibit a potentially profitable load
951    // folding opportunity for the other operand. That only occurs with the
952    // intersection of:
953    // (1) The other operand (op0) is load foldable.
954    // (2) The op is an add (otherwise, we are *creating* an add and can still
955    //     load fold the other op).
956    // (3) The target has AVX (otherwise, we have a destructive add and can't
957    //     load fold the other op without killing the constant op).
958    // (4) The constant 1 vector has multiple uses (so it is profitable to load
959    //     into a register anyway).
960    auto mayPreventLoadFold = [&]() {
961      return X86::mayFoldLoad(N->getOperand(0), *Subtarget) &&
962             N->getOpcode() == ISD::ADD && Subtarget->hasAVX() &&
963             !N->getOperand(1).hasOneUse();
964    };
965    if ((N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
966        N->getSimpleValueType(0).isVector() && !mayPreventLoadFold()) {
967      APInt SplatVal;
968      if (X86::isConstantSplat(N->getOperand(1), SplatVal) &&
969          SplatVal.isOne()) {
970        SDLoc DL(N);
971
972        MVT VT = N->getSimpleValueType(0);
973        unsigned NumElts = VT.getSizeInBits() / 32;
974        SDValue AllOnes =
975            CurDAG->getAllOnesConstant(DL, MVT::getVectorVT(MVT::i32, NumElts));
976        AllOnes = CurDAG->getBitcast(VT, AllOnes);
977
978        unsigned NewOpcode = N->getOpcode() == ISD::ADD ? ISD::SUB : ISD::ADD;
979        SDValue Res =
980            CurDAG->getNode(NewOpcode, DL, VT, N->getOperand(0), AllOnes);
981        --I;
982        CurDAG->ReplaceAllUsesWith(N, Res.getNode());
983        ++I;
984        MadeChange = true;
985        continue;
986      }
987    }
988
989    switch (N->getOpcode()) {
990    case X86ISD::VBROADCAST: {
991      MVT VT = N->getSimpleValueType(0);
992      // Emulate v32i16/v64i8 broadcast without BWI.
993      if (!Subtarget->hasBWI() && needBWI(VT)) {
994        MVT NarrowVT = VT.getHalfNumVectorElementsVT();
995        SDLoc dl(N);
996        SDValue NarrowBCast =
997            CurDAG->getNode(X86ISD::VBROADCAST, dl, NarrowVT, N->getOperand(0));
998        SDValue Res =
999            CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
1000                            NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
1001        unsigned Index = NarrowVT.getVectorMinNumElements();
1002        Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
1003                              CurDAG->getIntPtrConstant(Index, dl));
1004
1005        --I;
1006        CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1007        ++I;
1008        MadeChange = true;
1009        continue;
1010      }
1011
1012      break;
1013    }
1014    case X86ISD::VBROADCAST_LOAD: {
1015      MVT VT = N->getSimpleValueType(0);
1016      // Emulate v32i16/v64i8 broadcast without BWI.
1017      if (!Subtarget->hasBWI() && needBWI(VT)) {
1018        MVT NarrowVT = VT.getHalfNumVectorElementsVT();
1019        auto *MemNode = cast<MemSDNode>(N);
1020        SDLoc dl(N);
1021        SDVTList VTs = CurDAG->getVTList(NarrowVT, MVT::Other);
1022        SDValue Ops[] = {MemNode->getChain(), MemNode->getBasePtr()};
1023        SDValue NarrowBCast = CurDAG->getMemIntrinsicNode(
1024            X86ISD::VBROADCAST_LOAD, dl, VTs, Ops, MemNode->getMemoryVT(),
1025            MemNode->getMemOperand());
1026        SDValue Res =
1027            CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, CurDAG->getUNDEF(VT),
1028                            NarrowBCast, CurDAG->getIntPtrConstant(0, dl));
1029        unsigned Index = NarrowVT.getVectorMinNumElements();
1030        Res = CurDAG->getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, NarrowBCast,
1031                              CurDAG->getIntPtrConstant(Index, dl));
1032
1033        --I;
1034        SDValue To[] = {Res, NarrowBCast.getValue(1)};
1035        CurDAG->ReplaceAllUsesWith(N, To);
1036        ++I;
1037        MadeChange = true;
1038        continue;
1039      }
1040
1041      break;
1042    }
1043    case ISD::LOAD: {
1044      // If this is a XMM/YMM load of the same lower bits as another YMM/ZMM
1045      // load, then just extract the lower subvector and avoid the second load.
1046      auto *Ld = cast<LoadSDNode>(N);
1047      MVT VT = N->getSimpleValueType(0);
1048      if (!ISD::isNormalLoad(Ld) || !Ld->isSimple() ||
1049          !(VT.is128BitVector() || VT.is256BitVector()))
1050        break;
1051
1052      MVT MaxVT = VT;
1053      SDNode *MaxLd = nullptr;
1054      SDValue Ptr = Ld->getBasePtr();
1055      SDValue Chain = Ld->getChain();
1056      for (SDNode *User : Ptr->uses()) {
1057        auto *UserLd = dyn_cast<LoadSDNode>(User);
1058        MVT UserVT = User->getSimpleValueType(0);
1059        if (User != N && UserLd && ISD::isNormalLoad(User) &&
1060            UserLd->getBasePtr() == Ptr && UserLd->getChain() == Chain &&
1061            !User->hasAnyUseOfValue(1) &&
1062            (UserVT.is256BitVector() || UserVT.is512BitVector()) &&
1063            UserVT.getSizeInBits() > VT.getSizeInBits() &&
1064            (!MaxLd || UserVT.getSizeInBits() > MaxVT.getSizeInBits())) {
1065          MaxLd = User;
1066          MaxVT = UserVT;
1067        }
1068      }
1069      if (MaxLd) {
1070        SDLoc dl(N);
1071        unsigned NumSubElts = VT.getSizeInBits() / MaxVT.getScalarSizeInBits();
1072        MVT SubVT = MVT::getVectorVT(MaxVT.getScalarType(), NumSubElts);
1073        SDValue Extract = CurDAG->getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT,
1074                                          SDValue(MaxLd, 0),
1075                                          CurDAG->getIntPtrConstant(0, dl));
1076        SDValue Res = CurDAG->getBitcast(VT, Extract);
1077
1078        --I;
1079        SDValue To[] = {Res, SDValue(MaxLd, 1)};
1080        CurDAG->ReplaceAllUsesWith(N, To);
1081        ++I;
1082        MadeChange = true;
1083        continue;
1084      }
1085      break;
1086    }
1087    case ISD::VSELECT: {
1088      // Replace VSELECT with non-mask conditions with with BLENDV/VPTERNLOG.
1089      EVT EleVT = N->getOperand(0).getValueType().getVectorElementType();
1090      if (EleVT == MVT::i1)
1091        break;
1092
1093      assert(Subtarget->hasSSE41() && "Expected SSE4.1 support!");
1094      assert(N->getValueType(0).getVectorElementType() != MVT::i16 &&
1095             "We can't replace VSELECT with BLENDV in vXi16!");
1096      SDValue R;
1097      if (Subtarget->hasVLX() && CurDAG->ComputeNumSignBits(N->getOperand(0)) ==
1098                                     EleVT.getSizeInBits()) {
1099        R = CurDAG->getNode(X86ISD::VPTERNLOG, SDLoc(N), N->getValueType(0),
1100                            N->getOperand(0), N->getOperand(1), N->getOperand(2),
1101                            CurDAG->getTargetConstant(0xCA, SDLoc(N), MVT::i8));
1102      } else {
1103        R = CurDAG->getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0),
1104                            N->getOperand(0), N->getOperand(1),
1105                            N->getOperand(2));
1106      }
1107      --I;
1108      CurDAG->ReplaceAllUsesWith(N, R.getNode());
1109      ++I;
1110      MadeChange = true;
1111      continue;
1112    }
1113    case ISD::FP_ROUND:
1114    case ISD::STRICT_FP_ROUND:
1115    case ISD::FP_TO_SINT:
1116    case ISD::FP_TO_UINT:
1117    case ISD::STRICT_FP_TO_SINT:
1118    case ISD::STRICT_FP_TO_UINT: {
1119      // Replace vector fp_to_s/uint with their X86 specific equivalent so we
1120      // don't need 2 sets of patterns.
1121      if (!N->getSimpleValueType(0).isVector())
1122        break;
1123
1124      unsigned NewOpc;
1125      switch (N->getOpcode()) {
1126      default: llvm_unreachable("Unexpected opcode!");
1127      case ISD::FP_ROUND:          NewOpc = X86ISD::VFPROUND;        break;
1128      case ISD::STRICT_FP_ROUND:   NewOpc = X86ISD::STRICT_VFPROUND; break;
1129      case ISD::STRICT_FP_TO_SINT: NewOpc = X86ISD::STRICT_CVTTP2SI; break;
1130      case ISD::FP_TO_SINT:        NewOpc = X86ISD::CVTTP2SI;        break;
1131      case ISD::STRICT_FP_TO_UINT: NewOpc = X86ISD::STRICT_CVTTP2UI; break;
1132      case ISD::FP_TO_UINT:        NewOpc = X86ISD::CVTTP2UI;        break;
1133      }
1134      SDValue Res;
1135      if (N->isStrictFPOpcode())
1136        Res =
1137            CurDAG->getNode(NewOpc, SDLoc(N), {N->getValueType(0), MVT::Other},
1138                            {N->getOperand(0), N->getOperand(1)});
1139      else
1140        Res =
1141            CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1142                            N->getOperand(0));
1143      --I;
1144      CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1145      ++I;
1146      MadeChange = true;
1147      continue;
1148    }
1149    case ISD::SHL:
1150    case ISD::SRA:
1151    case ISD::SRL: {
1152      // Replace vector shifts with their X86 specific equivalent so we don't
1153      // need 2 sets of patterns.
1154      if (!N->getValueType(0).isVector())
1155        break;
1156
1157      unsigned NewOpc;
1158      switch (N->getOpcode()) {
1159      default: llvm_unreachable("Unexpected opcode!");
1160      case ISD::SHL: NewOpc = X86ISD::VSHLV; break;
1161      case ISD::SRA: NewOpc = X86ISD::VSRAV; break;
1162      case ISD::SRL: NewOpc = X86ISD::VSRLV; break;
1163      }
1164      SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1165                                    N->getOperand(0), N->getOperand(1));
1166      --I;
1167      CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1168      ++I;
1169      MadeChange = true;
1170      continue;
1171    }
1172    case ISD::ANY_EXTEND:
1173    case ISD::ANY_EXTEND_VECTOR_INREG: {
1174      // Replace vector any extend with the zero extend equivalents so we don't
1175      // need 2 sets of patterns. Ignore vXi1 extensions.
1176      if (!N->getValueType(0).isVector())
1177        break;
1178
1179      unsigned NewOpc;
1180      if (N->getOperand(0).getScalarValueSizeInBits() == 1) {
1181        assert(N->getOpcode() == ISD::ANY_EXTEND &&
1182               "Unexpected opcode for mask vector!");
1183        NewOpc = ISD::SIGN_EXTEND;
1184      } else {
1185        NewOpc = N->getOpcode() == ISD::ANY_EXTEND
1186                              ? ISD::ZERO_EXTEND
1187                              : ISD::ZERO_EXTEND_VECTOR_INREG;
1188      }
1189
1190      SDValue Res = CurDAG->getNode(NewOpc, SDLoc(N), N->getValueType(0),
1191                                    N->getOperand(0));
1192      --I;
1193      CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1194      ++I;
1195      MadeChange = true;
1196      continue;
1197    }
1198    case ISD::FCEIL:
1199    case ISD::STRICT_FCEIL:
1200    case ISD::FFLOOR:
1201    case ISD::STRICT_FFLOOR:
1202    case ISD::FTRUNC:
1203    case ISD::STRICT_FTRUNC:
1204    case ISD::FROUNDEVEN:
1205    case ISD::STRICT_FROUNDEVEN:
1206    case ISD::FNEARBYINT:
1207    case ISD::STRICT_FNEARBYINT:
1208    case ISD::FRINT:
1209    case ISD::STRICT_FRINT: {
1210      // Replace fp rounding with their X86 specific equivalent so we don't
1211      // need 2 sets of patterns.
1212      unsigned Imm;
1213      switch (N->getOpcode()) {
1214      default: llvm_unreachable("Unexpected opcode!");
1215      case ISD::STRICT_FCEIL:
1216      case ISD::FCEIL:      Imm = 0xA; break;
1217      case ISD::STRICT_FFLOOR:
1218      case ISD::FFLOOR:     Imm = 0x9; break;
1219      case ISD::STRICT_FTRUNC:
1220      case ISD::FTRUNC:     Imm = 0xB; break;
1221      case ISD::STRICT_FROUNDEVEN:
1222      case ISD::FROUNDEVEN: Imm = 0x8; break;
1223      case ISD::STRICT_FNEARBYINT:
1224      case ISD::FNEARBYINT: Imm = 0xC; break;
1225      case ISD::STRICT_FRINT:
1226      case ISD::FRINT:      Imm = 0x4; break;
1227      }
1228      SDLoc dl(N);
1229      bool IsStrict = N->isStrictFPOpcode();
1230      SDValue Res;
1231      if (IsStrict)
1232        Res = CurDAG->getNode(X86ISD::STRICT_VRNDSCALE, dl,
1233                              {N->getValueType(0), MVT::Other},
1234                              {N->getOperand(0), N->getOperand(1),
1235                               CurDAG->getTargetConstant(Imm, dl, MVT::i32)});
1236      else
1237        Res = CurDAG->getNode(X86ISD::VRNDSCALE, dl, N->getValueType(0),
1238                              N->getOperand(0),
1239                              CurDAG->getTargetConstant(Imm, dl, MVT::i32));
1240      --I;
1241      CurDAG->ReplaceAllUsesWith(N, Res.getNode());
1242      ++I;
1243      MadeChange = true;
1244      continue;
1245    }
1246    case X86ISD::FANDN:
1247    case X86ISD::FAND:
1248    case X86ISD::FOR:
1249    case X86ISD::FXOR: {
1250      // Widen scalar fp logic ops to vector to reduce isel patterns.
1251      // FIXME: Can we do this during lowering/combine.
1252      MVT VT = N->getSimpleValueType(0);
1253      if (VT.isVector() || VT == MVT::f128)
1254        break;
1255
1256      MVT VecVT = VT == MVT::f64   ? MVT::v2f64
1257                  : VT == MVT::f32 ? MVT::v4f32
1258                                   : MVT::v8f16;
1259
1260      SDLoc dl(N);
1261      SDValue Op0 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1262                                    N->getOperand(0));
1263      SDValue Op1 = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT,
1264                                    N->getOperand(1));
1265
1266      SDValue Res;
1267      if (Subtarget->hasSSE2()) {
1268        EVT IntVT = EVT(VecVT).changeVectorElementTypeToInteger();
1269        Op0 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op0);
1270        Op1 = CurDAG->getNode(ISD::BITCAST, dl, IntVT, Op1);
1271        unsigned Opc;
1272        switch (N->getOpcode()) {
1273        default: llvm_unreachable("Unexpected opcode!");
1274        case X86ISD::FANDN: Opc = X86ISD::ANDNP; break;
1275        case X86ISD::FAND:  Opc = ISD::AND;      break;
1276        case X86ISD::FOR:   Opc = ISD::OR;       break;
1277        case X86ISD::FXOR:  Opc = ISD::XOR;      break;
1278        }
1279        Res = CurDAG->getNode(Opc, dl, IntVT, Op0, Op1);
1280        Res = CurDAG->getNode(ISD::BITCAST, dl, VecVT, Res);
1281      } else {
1282        Res = CurDAG->getNode(N->getOpcode(), dl, VecVT, Op0, Op1);
1283      }
1284      Res = CurDAG->getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res,
1285                            CurDAG->getIntPtrConstant(0, dl));
1286      --I;
1287      CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res);
1288      ++I;
1289      MadeChange = true;
1290      continue;
1291    }
1292    }
1293
1294    if (OptLevel != CodeGenOptLevel::None &&
1295        // Only do this when the target can fold the load into the call or
1296        // jmp.
1297        !Subtarget->useIndirectThunkCalls() &&
1298        ((N->getOpcode() == X86ISD::CALL && !Subtarget->slowTwoMemOps()) ||
1299         (N->getOpcode() == X86ISD::TC_RETURN &&
1300          (Subtarget->is64Bit() ||
1301           !getTargetMachine().isPositionIndependent())))) {
1302      /// Also try moving call address load from outside callseq_start to just
1303      /// before the call to allow it to be folded.
1304      ///
1305      ///     [Load chain]
1306      ///         ^
1307      ///         |
1308      ///       [Load]
1309      ///       ^    ^
1310      ///       |    |
1311      ///      /      \--
1312      ///     /          |
1313      ///[CALLSEQ_START] |
1314      ///     ^          |
1315      ///     |          |
1316      /// [LOAD/C2Reg]   |
1317      ///     |          |
1318      ///      \        /
1319      ///       \      /
1320      ///       [CALL]
1321      bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
1322      SDValue Chain = N->getOperand(0);
1323      SDValue Load  = N->getOperand(1);
1324      if (!isCalleeLoad(Load, Chain, HasCallSeq))
1325        continue;
1326      moveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
1327      ++NumLoadMoved;
1328      MadeChange = true;
1329      continue;
1330    }
1331
1332    // Lower fpround and fpextend nodes that target the FP stack to be store and
1333    // load to the stack.  This is a gross hack.  We would like to simply mark
1334    // these as being illegal, but when we do that, legalize produces these when
1335    // it expands calls, then expands these in the same legalize pass.  We would
1336    // like dag combine to be able to hack on these between the call expansion
1337    // and the node legalization.  As such this pass basically does "really
1338    // late" legalization of these inline with the X86 isel pass.
1339    // FIXME: This should only happen when not compiled with -O0.
1340    switch (N->getOpcode()) {
1341    default: continue;
1342    case ISD::FP_ROUND:
1343    case ISD::FP_EXTEND:
1344    {
1345      MVT SrcVT = N->getOperand(0).getSimpleValueType();
1346      MVT DstVT = N->getSimpleValueType(0);
1347
1348      // If any of the sources are vectors, no fp stack involved.
1349      if (SrcVT.isVector() || DstVT.isVector())
1350        continue;
1351
1352      // If the source and destination are SSE registers, then this is a legal
1353      // conversion that should not be lowered.
1354      const X86TargetLowering *X86Lowering =
1355          static_cast<const X86TargetLowering *>(TLI);
1356      bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1357      bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1358      if (SrcIsSSE && DstIsSSE)
1359        continue;
1360
1361      if (!SrcIsSSE && !DstIsSSE) {
1362        // If this is an FPStack extension, it is a noop.
1363        if (N->getOpcode() == ISD::FP_EXTEND)
1364          continue;
1365        // If this is a value-preserving FPStack truncation, it is a noop.
1366        if (N->getConstantOperandVal(1))
1367          continue;
1368      }
1369
1370      // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1371      // FPStack has extload and truncstore.  SSE can fold direct loads into other
1372      // operations.  Based on this, decide what we want to do.
1373      MVT MemVT = (N->getOpcode() == ISD::FP_ROUND) ? DstVT : SrcVT;
1374      SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1375      int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1376      MachinePointerInfo MPI =
1377          MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1378      SDLoc dl(N);
1379
1380      // FIXME: optimize the case where the src/dest is a load or store?
1381
1382      SDValue Store = CurDAG->getTruncStore(
1383          CurDAG->getEntryNode(), dl, N->getOperand(0), MemTmp, MPI, MemVT);
1384      SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store,
1385                                          MemTmp, MPI, MemVT);
1386
1387      // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1388      // extload we created.  This will cause general havok on the dag because
1389      // anything below the conversion could be folded into other existing nodes.
1390      // To avoid invalidating 'I', back it up to the convert node.
1391      --I;
1392      CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
1393      break;
1394    }
1395
1396    //The sequence of events for lowering STRICT_FP versions of these nodes requires
1397    //dealing with the chain differently, as there is already a preexisting chain.
1398    case ISD::STRICT_FP_ROUND:
1399    case ISD::STRICT_FP_EXTEND:
1400    {
1401      MVT SrcVT = N->getOperand(1).getSimpleValueType();
1402      MVT DstVT = N->getSimpleValueType(0);
1403
1404      // If any of the sources are vectors, no fp stack involved.
1405      if (SrcVT.isVector() || DstVT.isVector())
1406        continue;
1407
1408      // If the source and destination are SSE registers, then this is a legal
1409      // conversion that should not be lowered.
1410      const X86TargetLowering *X86Lowering =
1411          static_cast<const X86TargetLowering *>(TLI);
1412      bool SrcIsSSE = X86Lowering->isScalarFPTypeInSSEReg(SrcVT);
1413      bool DstIsSSE = X86Lowering->isScalarFPTypeInSSEReg(DstVT);
1414      if (SrcIsSSE && DstIsSSE)
1415        continue;
1416
1417      if (!SrcIsSSE && !DstIsSSE) {
1418        // If this is an FPStack extension, it is a noop.
1419        if (N->getOpcode() == ISD::STRICT_FP_EXTEND)
1420          continue;
1421        // If this is a value-preserving FPStack truncation, it is a noop.
1422        if (N->getConstantOperandVal(2))
1423          continue;
1424      }
1425
1426      // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
1427      // FPStack has extload and truncstore.  SSE can fold direct loads into other
1428      // operations.  Based on this, decide what we want to do.
1429      MVT MemVT = (N->getOpcode() == ISD::STRICT_FP_ROUND) ? DstVT : SrcVT;
1430      SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
1431      int SPFI = cast<FrameIndexSDNode>(MemTmp)->getIndex();
1432      MachinePointerInfo MPI =
1433          MachinePointerInfo::getFixedStack(CurDAG->getMachineFunction(), SPFI);
1434      SDLoc dl(N);
1435
1436      // FIXME: optimize the case where the src/dest is a load or store?
1437
1438      //Since the operation is StrictFP, use the preexisting chain.
1439      SDValue Store, Result;
1440      if (!SrcIsSSE) {
1441        SDVTList VTs = CurDAG->getVTList(MVT::Other);
1442        SDValue Ops[] = {N->getOperand(0), N->getOperand(1), MemTmp};
1443        Store = CurDAG->getMemIntrinsicNode(X86ISD::FST, dl, VTs, Ops, MemVT,
1444                                            MPI, /*Align*/ std::nullopt,
1445                                            MachineMemOperand::MOStore);
1446        if (N->getFlags().hasNoFPExcept()) {
1447          SDNodeFlags Flags = Store->getFlags();
1448          Flags.setNoFPExcept(true);
1449          Store->setFlags(Flags);
1450        }
1451      } else {
1452        assert(SrcVT == MemVT && "Unexpected VT!");
1453        Store = CurDAG->getStore(N->getOperand(0), dl, N->getOperand(1), MemTmp,
1454                                 MPI);
1455      }
1456
1457      if (!DstIsSSE) {
1458        SDVTList VTs = CurDAG->getVTList(DstVT, MVT::Other);
1459        SDValue Ops[] = {Store, MemTmp};
1460        Result = CurDAG->getMemIntrinsicNode(
1461            X86ISD::FLD, dl, VTs, Ops, MemVT, MPI,
1462            /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
1463        if (N->getFlags().hasNoFPExcept()) {
1464          SDNodeFlags Flags = Result->getFlags();
1465          Flags.setNoFPExcept(true);
1466          Result->setFlags(Flags);
1467        }
1468      } else {
1469        assert(DstVT == MemVT && "Unexpected VT!");
1470        Result = CurDAG->getLoad(DstVT, dl, Store, MemTmp, MPI);
1471      }
1472
1473      // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
1474      // extload we created.  This will cause general havok on the dag because
1475      // anything below the conversion could be folded into other existing nodes.
1476      // To avoid invalidating 'I', back it up to the convert node.
1477      --I;
1478      CurDAG->ReplaceAllUsesWith(N, Result.getNode());
1479      break;
1480    }
1481    }
1482
1483
1484    // Now that we did that, the node is dead.  Increment the iterator to the
1485    // next node to process, then delete N.
1486    ++I;
1487    MadeChange = true;
1488  }
1489
1490  // Remove any dead nodes that may have been left behind.
1491  if (MadeChange)
1492    CurDAG->RemoveDeadNodes();
1493}
1494
1495// Look for a redundant movzx/movsx that can occur after an 8-bit divrem.
1496bool X86DAGToDAGISel::tryOptimizeRem8Extend(SDNode *N) {
1497  unsigned Opc = N->getMachineOpcode();
1498  if (Opc != X86::MOVZX32rr8 && Opc != X86::MOVSX32rr8 &&
1499      Opc != X86::MOVSX64rr8)
1500    return false;
1501
1502  SDValue N0 = N->getOperand(0);
1503
1504  // We need to be extracting the lower bit of an extend.
1505  if (!N0.isMachineOpcode() ||
1506      N0.getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG ||
1507      N0.getConstantOperandVal(1) != X86::sub_8bit)
1508    return false;
1509
1510  // We're looking for either a movsx or movzx to match the original opcode.
1511  unsigned ExpectedOpc = Opc == X86::MOVZX32rr8 ? X86::MOVZX32rr8_NOREX
1512                                                : X86::MOVSX32rr8_NOREX;
1513  SDValue N00 = N0.getOperand(0);
1514  if (!N00.isMachineOpcode() || N00.getMachineOpcode() != ExpectedOpc)
1515    return false;
1516
1517  if (Opc == X86::MOVSX64rr8) {
1518    // If we had a sign extend from 8 to 64 bits. We still need to go from 32
1519    // to 64.
1520    MachineSDNode *Extend = CurDAG->getMachineNode(X86::MOVSX64rr32, SDLoc(N),
1521                                                   MVT::i64, N00);
1522    ReplaceUses(N, Extend);
1523  } else {
1524    // Ok we can drop this extend and just use the original extend.
1525    ReplaceUses(N, N00.getNode());
1526  }
1527
1528  return true;
1529}
1530
1531void X86DAGToDAGISel::PostprocessISelDAG() {
1532  // Skip peepholes at -O0.
1533  if (TM.getOptLevel() == CodeGenOptLevel::None)
1534    return;
1535
1536  SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
1537
1538  bool MadeChange = false;
1539  while (Position != CurDAG->allnodes_begin()) {
1540    SDNode *N = &*--Position;
1541    // Skip dead nodes and any non-machine opcodes.
1542    if (N->use_empty() || !N->isMachineOpcode())
1543      continue;
1544
1545    if (tryOptimizeRem8Extend(N)) {
1546      MadeChange = true;
1547      continue;
1548    }
1549
1550    // Look for a TESTrr+ANDrr pattern where both operands of the test are
1551    // the same. Rewrite to remove the AND.
1552    unsigned Opc = N->getMachineOpcode();
1553    if ((Opc == X86::TEST8rr || Opc == X86::TEST16rr ||
1554         Opc == X86::TEST32rr || Opc == X86::TEST64rr) &&
1555        N->getOperand(0) == N->getOperand(1) &&
1556        N->getOperand(0)->hasNUsesOfValue(2, N->getOperand(0).getResNo()) &&
1557        N->getOperand(0).isMachineOpcode()) {
1558      SDValue And = N->getOperand(0);
1559      unsigned N0Opc = And.getMachineOpcode();
1560      if ((N0Opc == X86::AND8rr || N0Opc == X86::AND16rr ||
1561           N0Opc == X86::AND32rr || N0Opc == X86::AND64rr) &&
1562          !And->hasAnyUseOfValue(1)) {
1563        MachineSDNode *Test = CurDAG->getMachineNode(Opc, SDLoc(N),
1564                                                     MVT::i32,
1565                                                     And.getOperand(0),
1566                                                     And.getOperand(1));
1567        ReplaceUses(N, Test);
1568        MadeChange = true;
1569        continue;
1570      }
1571      if ((N0Opc == X86::AND8rm || N0Opc == X86::AND16rm ||
1572           N0Opc == X86::AND32rm || N0Opc == X86::AND64rm) &&
1573          !And->hasAnyUseOfValue(1)) {
1574        unsigned NewOpc;
1575        switch (N0Opc) {
1576        case X86::AND8rm:  NewOpc = X86::TEST8mr; break;
1577        case X86::AND16rm: NewOpc = X86::TEST16mr; break;
1578        case X86::AND32rm: NewOpc = X86::TEST32mr; break;
1579        case X86::AND64rm: NewOpc = X86::TEST64mr; break;
1580        }
1581
1582        // Need to swap the memory and register operand.
1583        SDValue Ops[] = { And.getOperand(1),
1584                          And.getOperand(2),
1585                          And.getOperand(3),
1586                          And.getOperand(4),
1587                          And.getOperand(5),
1588                          And.getOperand(0),
1589                          And.getOperand(6)  /* Chain */ };
1590        MachineSDNode *Test = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1591                                                     MVT::i32, MVT::Other, Ops);
1592        CurDAG->setNodeMemRefs(
1593            Test, cast<MachineSDNode>(And.getNode())->memoperands());
1594        ReplaceUses(And.getValue(2), SDValue(Test, 1));
1595        ReplaceUses(SDValue(N, 0), SDValue(Test, 0));
1596        MadeChange = true;
1597        continue;
1598      }
1599    }
1600
1601    // Look for a KAND+KORTEST and turn it into KTEST if only the zero flag is
1602    // used. We're doing this late so we can prefer to fold the AND into masked
1603    // comparisons. Doing that can be better for the live range of the mask
1604    // register.
1605    if ((Opc == X86::KORTESTBrr || Opc == X86::KORTESTWrr ||
1606         Opc == X86::KORTESTDrr || Opc == X86::KORTESTQrr) &&
1607        N->getOperand(0) == N->getOperand(1) &&
1608        N->isOnlyUserOf(N->getOperand(0).getNode()) &&
1609        N->getOperand(0).isMachineOpcode() &&
1610        onlyUsesZeroFlag(SDValue(N, 0))) {
1611      SDValue And = N->getOperand(0);
1612      unsigned N0Opc = And.getMachineOpcode();
1613      // KANDW is legal with AVX512F, but KTESTW requires AVX512DQ. The other
1614      // KAND instructions and KTEST use the same ISA feature.
1615      if (N0Opc == X86::KANDBrr ||
1616          (N0Opc == X86::KANDWrr && Subtarget->hasDQI()) ||
1617          N0Opc == X86::KANDDrr || N0Opc == X86::KANDQrr) {
1618        unsigned NewOpc;
1619        switch (Opc) {
1620        default: llvm_unreachable("Unexpected opcode!");
1621        case X86::KORTESTBrr: NewOpc = X86::KTESTBrr; break;
1622        case X86::KORTESTWrr: NewOpc = X86::KTESTWrr; break;
1623        case X86::KORTESTDrr: NewOpc = X86::KTESTDrr; break;
1624        case X86::KORTESTQrr: NewOpc = X86::KTESTQrr; break;
1625        }
1626        MachineSDNode *KTest = CurDAG->getMachineNode(NewOpc, SDLoc(N),
1627                                                      MVT::i32,
1628                                                      And.getOperand(0),
1629                                                      And.getOperand(1));
1630        ReplaceUses(N, KTest);
1631        MadeChange = true;
1632        continue;
1633      }
1634    }
1635
1636    // Attempt to remove vectors moves that were inserted to zero upper bits.
1637    if (Opc != TargetOpcode::SUBREG_TO_REG)
1638      continue;
1639
1640    unsigned SubRegIdx = N->getConstantOperandVal(2);
1641    if (SubRegIdx != X86::sub_xmm && SubRegIdx != X86::sub_ymm)
1642      continue;
1643
1644    SDValue Move = N->getOperand(1);
1645    if (!Move.isMachineOpcode())
1646      continue;
1647
1648    // Make sure its one of the move opcodes we recognize.
1649    switch (Move.getMachineOpcode()) {
1650    default:
1651      continue;
1652    case X86::VMOVAPDrr:       case X86::VMOVUPDrr:
1653    case X86::VMOVAPSrr:       case X86::VMOVUPSrr:
1654    case X86::VMOVDQArr:       case X86::VMOVDQUrr:
1655    case X86::VMOVAPDYrr:      case X86::VMOVUPDYrr:
1656    case X86::VMOVAPSYrr:      case X86::VMOVUPSYrr:
1657    case X86::VMOVDQAYrr:      case X86::VMOVDQUYrr:
1658    case X86::VMOVAPDZ128rr:   case X86::VMOVUPDZ128rr:
1659    case X86::VMOVAPSZ128rr:   case X86::VMOVUPSZ128rr:
1660    case X86::VMOVDQA32Z128rr: case X86::VMOVDQU32Z128rr:
1661    case X86::VMOVDQA64Z128rr: case X86::VMOVDQU64Z128rr:
1662    case X86::VMOVAPDZ256rr:   case X86::VMOVUPDZ256rr:
1663    case X86::VMOVAPSZ256rr:   case X86::VMOVUPSZ256rr:
1664    case X86::VMOVDQA32Z256rr: case X86::VMOVDQU32Z256rr:
1665    case X86::VMOVDQA64Z256rr: case X86::VMOVDQU64Z256rr:
1666      break;
1667    }
1668
1669    SDValue In = Move.getOperand(0);
1670    if (!In.isMachineOpcode() ||
1671        In.getMachineOpcode() <= TargetOpcode::GENERIC_OP_END)
1672      continue;
1673
1674    // Make sure the instruction has a VEX, XOP, or EVEX prefix. This covers
1675    // the SHA instructions which use a legacy encoding.
1676    uint64_t TSFlags = getInstrInfo()->get(In.getMachineOpcode()).TSFlags;
1677    if ((TSFlags & X86II::EncodingMask) != X86II::VEX &&
1678        (TSFlags & X86II::EncodingMask) != X86II::EVEX &&
1679        (TSFlags & X86II::EncodingMask) != X86II::XOP)
1680      continue;
1681
1682    // Producing instruction is another vector instruction. We can drop the
1683    // move.
1684    CurDAG->UpdateNodeOperands(N, N->getOperand(0), In, N->getOperand(2));
1685    MadeChange = true;
1686  }
1687
1688  if (MadeChange)
1689    CurDAG->RemoveDeadNodes();
1690}
1691
1692
1693/// Emit any code that needs to be executed only in the main function.
1694void X86DAGToDAGISel::emitSpecialCodeForMain() {
1695  if (Subtarget->isTargetCygMing()) {
1696    TargetLowering::ArgListTy Args;
1697    auto &DL = CurDAG->getDataLayout();
1698
1699    TargetLowering::CallLoweringInfo CLI(*CurDAG);
1700    CLI.setChain(CurDAG->getRoot())
1701        .setCallee(CallingConv::C, Type::getVoidTy(*CurDAG->getContext()),
1702                   CurDAG->getExternalSymbol("__main", TLI->getPointerTy(DL)),
1703                   std::move(Args));
1704    const TargetLowering &TLI = CurDAG->getTargetLoweringInfo();
1705    std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI);
1706    CurDAG->setRoot(Result.second);
1707  }
1708}
1709
1710void X86DAGToDAGISel::emitFunctionEntryCode() {
1711  // If this is main, emit special code for main.
1712  const Function &F = MF->getFunction();
1713  if (F.hasExternalLinkage() && F.getName() == "main")
1714    emitSpecialCodeForMain();
1715}
1716
1717static bool isDispSafeForFrameIndex(int64_t Val) {
1718  // On 64-bit platforms, we can run into an issue where a frame index
1719  // includes a displacement that, when added to the explicit displacement,
1720  // will overflow the displacement field. Assuming that the frame index
1721  // displacement fits into a 31-bit integer  (which is only slightly more
1722  // aggressive than the current fundamental assumption that it fits into
1723  // a 32-bit integer), a 31-bit disp should always be safe.
1724  return isInt<31>(Val);
1725}
1726
1727bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
1728                                            X86ISelAddressMode &AM) {
1729  // We may have already matched a displacement and the caller just added the
1730  // symbolic displacement. So we still need to do the checks even if Offset
1731  // is zero.
1732
1733  int64_t Val = AM.Disp + Offset;
1734
1735  // Cannot combine ExternalSymbol displacements with integer offsets.
1736  if (Val != 0 && (AM.ES || AM.MCSym))
1737    return true;
1738
1739  CodeModel::Model M = TM.getCodeModel();
1740  if (Subtarget->is64Bit()) {
1741    if (Val != 0 &&
1742        !X86::isOffsetSuitableForCodeModel(Val, M,
1743                                           AM.hasSymbolicDisplacement()))
1744      return true;
1745    // In addition to the checks required for a register base, check that
1746    // we do not try to use an unsafe Disp with a frame index.
1747    if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
1748        !isDispSafeForFrameIndex(Val))
1749      return true;
1750    // In ILP32 (x32) mode, pointers are 32 bits and need to be zero-extended to
1751    // 64 bits. Instructions with 32-bit register addresses perform this zero
1752    // extension for us and we can safely ignore the high bits of Offset.
1753    // Instructions with only a 32-bit immediate address do not, though: they
1754    // sign extend instead. This means only address the low 2GB of address space
1755    // is directly addressable, we need indirect addressing for the high 2GB of
1756    // address space.
1757    // TODO: Some of the earlier checks may be relaxed for ILP32 mode as the
1758    // implicit zero extension of instructions would cover up any problem.
1759    // However, we have asserts elsewhere that get triggered if we do, so keep
1760    // the checks for now.
1761    // TODO: We would actually be able to accept these, as well as the same
1762    // addresses in LP64 mode, by adding the EIZ pseudo-register as an operand
1763    // to get an address size override to be emitted. However, this
1764    // pseudo-register is not part of any register class and therefore causes
1765    // MIR verification to fail.
1766    if (Subtarget->isTarget64BitILP32() && !isUInt<31>(Val) &&
1767        !AM.hasBaseOrIndexReg())
1768      return true;
1769  }
1770  AM.Disp = Val;
1771  return false;
1772}
1773
1774bool X86DAGToDAGISel::matchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM,
1775                                         bool AllowSegmentRegForX32) {
1776  SDValue Address = N->getOperand(1);
1777
1778  // load gs:0 -> GS segment register.
1779  // load fs:0 -> FS segment register.
1780  //
1781  // This optimization is generally valid because the GNU TLS model defines that
1782  // gs:0 (or fs:0 on X86-64) contains its own address. However, for X86-64 mode
1783  // with 32-bit registers, as we get in ILP32 mode, those registers are first
1784  // zero-extended to 64 bits and then added it to the base address, which gives
1785  // unwanted results when the register holds a negative value.
1786  // For more information see http://people.redhat.com/drepper/tls.pdf
1787  if (isNullConstant(Address) && AM.Segment.getNode() == nullptr &&
1788      !IndirectTlsSegRefs &&
1789      (Subtarget->isTargetGlibc() || Subtarget->isTargetAndroid() ||
1790       Subtarget->isTargetFuchsia())) {
1791    if (Subtarget->isTarget64BitILP32() && !AllowSegmentRegForX32)
1792      return true;
1793    switch (N->getPointerInfo().getAddrSpace()) {
1794    case X86AS::GS:
1795      AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1796      return false;
1797    case X86AS::FS:
1798      AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1799      return false;
1800      // Address space X86AS::SS is not handled here, because it is not used to
1801      // address TLS areas.
1802    }
1803  }
1804
1805  return true;
1806}
1807
1808/// Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes into an addressing
1809/// mode. These wrap things that will resolve down into a symbol reference.
1810/// If no match is possible, this returns true, otherwise it returns false.
1811bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
1812  // If the addressing mode already has a symbol as the displacement, we can
1813  // never match another symbol.
1814  if (AM.hasSymbolicDisplacement())
1815    return true;
1816
1817  bool IsRIPRelTLS = false;
1818  bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
1819  if (IsRIPRel) {
1820    SDValue Val = N.getOperand(0);
1821    if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
1822      IsRIPRelTLS = true;
1823  }
1824
1825  // We can't use an addressing mode in the 64-bit large code model.
1826  // Global TLS addressing is an exception. In the medium code model,
1827  // we use can use a mode when RIP wrappers are present.
1828  // That signifies access to globals that are known to be "near",
1829  // such as the GOT itself.
1830  CodeModel::Model M = TM.getCodeModel();
1831  if (Subtarget->is64Bit() && M == CodeModel::Large && !IsRIPRelTLS)
1832    return true;
1833
1834  // Base and index reg must be 0 in order to use %rip as base.
1835  if (IsRIPRel && AM.hasBaseOrIndexReg())
1836    return true;
1837
1838  // Make a local copy in case we can't do this fold.
1839  X86ISelAddressMode Backup = AM;
1840
1841  int64_t Offset = 0;
1842  SDValue N0 = N.getOperand(0);
1843  if (auto *G = dyn_cast<GlobalAddressSDNode>(N0)) {
1844    AM.GV = G->getGlobal();
1845    AM.SymbolFlags = G->getTargetFlags();
1846    Offset = G->getOffset();
1847  } else if (auto *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
1848    AM.CP = CP->getConstVal();
1849    AM.Alignment = CP->getAlign();
1850    AM.SymbolFlags = CP->getTargetFlags();
1851    Offset = CP->getOffset();
1852  } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
1853    AM.ES = S->getSymbol();
1854    AM.SymbolFlags = S->getTargetFlags();
1855  } else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
1856    AM.MCSym = S->getMCSymbol();
1857  } else if (auto *J = dyn_cast<JumpTableSDNode>(N0)) {
1858    AM.JT = J->getIndex();
1859    AM.SymbolFlags = J->getTargetFlags();
1860  } else if (auto *BA = dyn_cast<BlockAddressSDNode>(N0)) {
1861    AM.BlockAddr = BA->getBlockAddress();
1862    AM.SymbolFlags = BA->getTargetFlags();
1863    Offset = BA->getOffset();
1864  } else
1865    llvm_unreachable("Unhandled symbol reference node.");
1866
1867  // Can't use an addressing mode with large globals.
1868  if (Subtarget->is64Bit() && !IsRIPRel && AM.GV &&
1869      TM.isLargeGlobalValue(AM.GV)) {
1870    AM = Backup;
1871    return true;
1872  }
1873
1874  if (foldOffsetIntoAddress(Offset, AM)) {
1875    AM = Backup;
1876    return true;
1877  }
1878
1879  if (IsRIPRel)
1880    AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
1881
1882  // Commit the changes now that we know this fold is safe.
1883  return false;
1884}
1885
1886/// Add the specified node to the specified addressing mode, returning true if
1887/// it cannot be done. This just pattern matches for the addressing mode.
1888bool X86DAGToDAGISel::matchAddress(SDValue N, X86ISelAddressMode &AM) {
1889  if (matchAddressRecursively(N, AM, 0))
1890    return true;
1891
1892  // Post-processing: Make a second attempt to fold a load, if we now know
1893  // that there will not be any other register. This is only performed for
1894  // 64-bit ILP32 mode since 32-bit mode and 64-bit LP64 mode will have folded
1895  // any foldable load the first time.
1896  if (Subtarget->isTarget64BitILP32() &&
1897      AM.BaseType == X86ISelAddressMode::RegBase &&
1898      AM.Base_Reg.getNode() != nullptr && AM.IndexReg.getNode() == nullptr) {
1899    SDValue Save_Base_Reg = AM.Base_Reg;
1900    if (auto *LoadN = dyn_cast<LoadSDNode>(Save_Base_Reg)) {
1901      AM.Base_Reg = SDValue();
1902      if (matchLoadInAddress(LoadN, AM, /*AllowSegmentRegForX32=*/true))
1903        AM.Base_Reg = Save_Base_Reg;
1904    }
1905  }
1906
1907  // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
1908  // a smaller encoding and avoids a scaled-index.
1909  if (AM.Scale == 2 &&
1910      AM.BaseType == X86ISelAddressMode::RegBase &&
1911      AM.Base_Reg.getNode() == nullptr) {
1912    AM.Base_Reg = AM.IndexReg;
1913    AM.Scale = 1;
1914  }
1915
1916  // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
1917  // because it has a smaller encoding.
1918  if (TM.getCodeModel() != CodeModel::Large &&
1919      (!AM.GV || !TM.isLargeGlobalValue(AM.GV)) && Subtarget->is64Bit() &&
1920      AM.Scale == 1 && AM.BaseType == X86ISelAddressMode::RegBase &&
1921      AM.Base_Reg.getNode() == nullptr && AM.IndexReg.getNode() == nullptr &&
1922      AM.SymbolFlags == X86II::MO_NO_FLAG && AM.hasSymbolicDisplacement()) {
1923    AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
1924  }
1925
1926  return false;
1927}
1928
1929bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
1930                               unsigned Depth) {
1931  // Add an artificial use to this node so that we can keep track of
1932  // it if it gets CSE'd with a different node.
1933  HandleSDNode Handle(N);
1934
1935  X86ISelAddressMode Backup = AM;
1936  if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1937      !matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
1938    return false;
1939  AM = Backup;
1940
1941  // Try again after commutating the operands.
1942  if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM,
1943                               Depth + 1) &&
1944      !matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
1945    return false;
1946  AM = Backup;
1947
1948  // If we couldn't fold both operands into the address at the same time,
1949  // see if we can just put each operand into a register and fold at least
1950  // the add.
1951  if (AM.BaseType == X86ISelAddressMode::RegBase &&
1952      !AM.Base_Reg.getNode() &&
1953      !AM.IndexReg.getNode()) {
1954    N = Handle.getValue();
1955    AM.Base_Reg = N.getOperand(0);
1956    AM.IndexReg = N.getOperand(1);
1957    AM.Scale = 1;
1958    return false;
1959  }
1960  N = Handle.getValue();
1961  return true;
1962}
1963
1964// Insert a node into the DAG at least before the Pos node's position. This
1965// will reposition the node as needed, and will assign it a node ID that is <=
1966// the Pos node's ID. Note that this does *not* preserve the uniqueness of node
1967// IDs! The selection DAG must no longer depend on their uniqueness when this
1968// is used.
1969static void insertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
1970  if (N->getNodeId() == -1 ||
1971      (SelectionDAGISel::getUninvalidatedNodeId(N.getNode()) >
1972       SelectionDAGISel::getUninvalidatedNodeId(Pos.getNode()))) {
1973    DAG.RepositionNode(Pos->getIterator(), N.getNode());
1974    // Mark Node as invalid for pruning as after this it may be a successor to a
1975    // selected node but otherwise be in the same position of Pos.
1976    // Conservatively mark it with the same -abs(Id) to assure node id
1977    // invariant is preserved.
1978    N->setNodeId(Pos->getNodeId());
1979    SelectionDAGISel::InvalidateNodeId(N.getNode());
1980  }
1981}
1982
1983// Transform "(X >> (8-C1)) & (0xff << C1)" to "((X >> 8) & 0xff) << C1" if
1984// safe. This allows us to convert the shift and and into an h-register
1985// extract and a scaled index. Returns false if the simplification is
1986// performed.
1987static bool foldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
1988                                      uint64_t Mask,
1989                                      SDValue Shift, SDValue X,
1990                                      X86ISelAddressMode &AM) {
1991  if (Shift.getOpcode() != ISD::SRL ||
1992      !isa<ConstantSDNode>(Shift.getOperand(1)) ||
1993      !Shift.hasOneUse())
1994    return true;
1995
1996  int ScaleLog = 8 - Shift.getConstantOperandVal(1);
1997  if (ScaleLog <= 0 || ScaleLog >= 4 ||
1998      Mask != (0xffu << ScaleLog))
1999    return true;
2000
2001  MVT XVT = X.getSimpleValueType();
2002  MVT VT = N.getSimpleValueType();
2003  SDLoc DL(N);
2004  SDValue Eight = DAG.getConstant(8, DL, MVT::i8);
2005  SDValue NewMask = DAG.getConstant(0xff, DL, XVT);
2006  SDValue Srl = DAG.getNode(ISD::SRL, DL, XVT, X, Eight);
2007  SDValue And = DAG.getNode(ISD::AND, DL, XVT, Srl, NewMask);
2008  SDValue Ext = DAG.getZExtOrTrunc(And, DL, VT);
2009  SDValue ShlCount = DAG.getConstant(ScaleLog, DL, MVT::i8);
2010  SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, Ext, ShlCount);
2011
2012  // Insert the new nodes into the topological ordering. We must do this in
2013  // a valid topological ordering as nothing is going to go back and re-sort
2014  // these nodes. We continually insert before 'N' in sequence as this is
2015  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2016  // hierarchy left to express.
2017  insertDAGNode(DAG, N, Eight);
2018  insertDAGNode(DAG, N, NewMask);
2019  insertDAGNode(DAG, N, Srl);
2020  insertDAGNode(DAG, N, And);
2021  insertDAGNode(DAG, N, Ext);
2022  insertDAGNode(DAG, N, ShlCount);
2023  insertDAGNode(DAG, N, Shl);
2024  DAG.ReplaceAllUsesWith(N, Shl);
2025  DAG.RemoveDeadNode(N.getNode());
2026  AM.IndexReg = Ext;
2027  AM.Scale = (1 << ScaleLog);
2028  return false;
2029}
2030
2031// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
2032// allows us to fold the shift into this addressing mode. Returns false if the
2033// transform succeeded.
2034static bool foldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
2035                                        X86ISelAddressMode &AM) {
2036  SDValue Shift = N.getOperand(0);
2037
2038  // Use a signed mask so that shifting right will insert sign bits. These
2039  // bits will be removed when we shift the result left so it doesn't matter
2040  // what we use. This might allow a smaller immediate encoding.
2041  int64_t Mask = cast<ConstantSDNode>(N->getOperand(1))->getSExtValue();
2042
2043  // If we have an any_extend feeding the AND, look through it to see if there
2044  // is a shift behind it. But only if the AND doesn't use the extended bits.
2045  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
2046  bool FoundAnyExtend = false;
2047  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
2048      Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
2049      isUInt<32>(Mask)) {
2050    FoundAnyExtend = true;
2051    Shift = Shift.getOperand(0);
2052  }
2053
2054  if (Shift.getOpcode() != ISD::SHL ||
2055      !isa<ConstantSDNode>(Shift.getOperand(1)))
2056    return true;
2057
2058  SDValue X = Shift.getOperand(0);
2059
2060  // Not likely to be profitable if either the AND or SHIFT node has more
2061  // than one use (unless all uses are for address computation). Besides,
2062  // isel mechanism requires their node ids to be reused.
2063  if (!N.hasOneUse() || !Shift.hasOneUse())
2064    return true;
2065
2066  // Verify that the shift amount is something we can fold.
2067  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2068  if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
2069    return true;
2070
2071  MVT VT = N.getSimpleValueType();
2072  SDLoc DL(N);
2073  if (FoundAnyExtend) {
2074    SDValue NewX = DAG.getNode(ISD::ANY_EXTEND, DL, VT, X);
2075    insertDAGNode(DAG, N, NewX);
2076    X = NewX;
2077  }
2078
2079  SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, DL, VT);
2080  SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
2081  SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
2082
2083  // Insert the new nodes into the topological ordering. We must do this in
2084  // a valid topological ordering as nothing is going to go back and re-sort
2085  // these nodes. We continually insert before 'N' in sequence as this is
2086  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2087  // hierarchy left to express.
2088  insertDAGNode(DAG, N, NewMask);
2089  insertDAGNode(DAG, N, NewAnd);
2090  insertDAGNode(DAG, N, NewShift);
2091  DAG.ReplaceAllUsesWith(N, NewShift);
2092  DAG.RemoveDeadNode(N.getNode());
2093
2094  AM.Scale = 1 << ShiftAmt;
2095  AM.IndexReg = NewAnd;
2096  return false;
2097}
2098
2099// Implement some heroics to detect shifts of masked values where the mask can
2100// be replaced by extending the shift and undoing that in the addressing mode
2101// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
2102// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
2103// the addressing mode. This results in code such as:
2104//
2105//   int f(short *y, int *lookup_table) {
2106//     ...
2107//     return *y + lookup_table[*y >> 11];
2108//   }
2109//
2110// Turning into:
2111//   movzwl (%rdi), %eax
2112//   movl %eax, %ecx
2113//   shrl $11, %ecx
2114//   addl (%rsi,%rcx,4), %eax
2115//
2116// Instead of:
2117//   movzwl (%rdi), %eax
2118//   movl %eax, %ecx
2119//   shrl $9, %ecx
2120//   andl $124, %rcx
2121//   addl (%rsi,%rcx), %eax
2122//
2123// Note that this function assumes the mask is provided as a mask *after* the
2124// value is shifted. The input chain may or may not match that, but computing
2125// such a mask is trivial.
2126static bool foldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
2127                                    uint64_t Mask,
2128                                    SDValue Shift, SDValue X,
2129                                    X86ISelAddressMode &AM) {
2130  if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
2131      !isa<ConstantSDNode>(Shift.getOperand(1)))
2132    return true;
2133
2134  // We need to ensure that mask is a continuous run of bits.
2135  unsigned MaskIdx, MaskLen;
2136  if (!isShiftedMask_64(Mask, MaskIdx, MaskLen))
2137    return true;
2138  unsigned MaskLZ = 64 - (MaskIdx + MaskLen);
2139
2140  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2141
2142  // The amount of shift we're trying to fit into the addressing mode is taken
2143  // from the shifted mask index (number of trailing zeros of the mask).
2144  unsigned AMShiftAmt = MaskIdx;
2145
2146  // There is nothing we can do here unless the mask is removing some bits.
2147  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2148  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2149
2150  // Scale the leading zero count down based on the actual size of the value.
2151  // Also scale it down based on the size of the shift.
2152  unsigned ScaleDown = (64 - X.getSimpleValueType().getSizeInBits()) + ShiftAmt;
2153  if (MaskLZ < ScaleDown)
2154    return true;
2155  MaskLZ -= ScaleDown;
2156
2157  // The final check is to ensure that any masked out high bits of X are
2158  // already known to be zero. Otherwise, the mask has a semantic impact
2159  // other than masking out a couple of low bits. Unfortunately, because of
2160  // the mask, zero extensions will be removed from operands in some cases.
2161  // This code works extra hard to look through extensions because we can
2162  // replace them with zero extensions cheaply if necessary.
2163  bool ReplacingAnyExtend = false;
2164  if (X.getOpcode() == ISD::ANY_EXTEND) {
2165    unsigned ExtendBits = X.getSimpleValueType().getSizeInBits() -
2166                          X.getOperand(0).getSimpleValueType().getSizeInBits();
2167    // Assume that we'll replace the any-extend with a zero-extend, and
2168    // narrow the search to the extended value.
2169    X = X.getOperand(0);
2170    MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
2171    ReplacingAnyExtend = true;
2172  }
2173  APInt MaskedHighBits =
2174    APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
2175  if (!DAG.MaskedValueIsZero(X, MaskedHighBits))
2176    return true;
2177
2178  // We've identified a pattern that can be transformed into a single shift
2179  // and an addressing mode. Make it so.
2180  MVT VT = N.getSimpleValueType();
2181  if (ReplacingAnyExtend) {
2182    assert(X.getValueType() != VT);
2183    // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
2184    SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(X), VT, X);
2185    insertDAGNode(DAG, N, NewX);
2186    X = NewX;
2187  }
2188
2189  MVT XVT = X.getSimpleValueType();
2190  SDLoc DL(N);
2191  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2192  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, XVT, X, NewSRLAmt);
2193  SDValue NewExt = DAG.getZExtOrTrunc(NewSRL, DL, VT);
2194  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2195  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewExt, NewSHLAmt);
2196
2197  // Insert the new nodes into the topological ordering. We must do this in
2198  // a valid topological ordering as nothing is going to go back and re-sort
2199  // these nodes. We continually insert before 'N' in sequence as this is
2200  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2201  // hierarchy left to express.
2202  insertDAGNode(DAG, N, NewSRLAmt);
2203  insertDAGNode(DAG, N, NewSRL);
2204  insertDAGNode(DAG, N, NewExt);
2205  insertDAGNode(DAG, N, NewSHLAmt);
2206  insertDAGNode(DAG, N, NewSHL);
2207  DAG.ReplaceAllUsesWith(N, NewSHL);
2208  DAG.RemoveDeadNode(N.getNode());
2209
2210  AM.Scale = 1 << AMShiftAmt;
2211  AM.IndexReg = NewExt;
2212  return false;
2213}
2214
2215// Transform "(X >> SHIFT) & (MASK << C1)" to
2216// "((X >> (SHIFT + C1)) & (MASK)) << C1". Everything before the SHL will be
2217// matched to a BEXTR later. Returns false if the simplification is performed.
2218static bool foldMaskedShiftToBEXTR(SelectionDAG &DAG, SDValue N,
2219                                   uint64_t Mask,
2220                                   SDValue Shift, SDValue X,
2221                                   X86ISelAddressMode &AM,
2222                                   const X86Subtarget &Subtarget) {
2223  if (Shift.getOpcode() != ISD::SRL ||
2224      !isa<ConstantSDNode>(Shift.getOperand(1)) ||
2225      !Shift.hasOneUse() || !N.hasOneUse())
2226    return true;
2227
2228  // Only do this if BEXTR will be matched by matchBEXTRFromAndImm.
2229  if (!Subtarget.hasTBM() &&
2230      !(Subtarget.hasBMI() && Subtarget.hasFastBEXTR()))
2231    return true;
2232
2233  // We need to ensure that mask is a continuous run of bits.
2234  unsigned MaskIdx, MaskLen;
2235  if (!isShiftedMask_64(Mask, MaskIdx, MaskLen))
2236    return true;
2237
2238  unsigned ShiftAmt = Shift.getConstantOperandVal(1);
2239
2240  // The amount of shift we're trying to fit into the addressing mode is taken
2241  // from the shifted mask index (number of trailing zeros of the mask).
2242  unsigned AMShiftAmt = MaskIdx;
2243
2244  // There is nothing we can do here unless the mask is removing some bits.
2245  // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
2246  if (AMShiftAmt == 0 || AMShiftAmt > 3) return true;
2247
2248  MVT XVT = X.getSimpleValueType();
2249  MVT VT = N.getSimpleValueType();
2250  SDLoc DL(N);
2251  SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, DL, MVT::i8);
2252  SDValue NewSRL = DAG.getNode(ISD::SRL, DL, XVT, X, NewSRLAmt);
2253  SDValue NewMask = DAG.getConstant(Mask >> AMShiftAmt, DL, XVT);
2254  SDValue NewAnd = DAG.getNode(ISD::AND, DL, XVT, NewSRL, NewMask);
2255  SDValue NewExt = DAG.getZExtOrTrunc(NewAnd, DL, VT);
2256  SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, DL, MVT::i8);
2257  SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewExt, NewSHLAmt);
2258
2259  // Insert the new nodes into the topological ordering. We must do this in
2260  // a valid topological ordering as nothing is going to go back and re-sort
2261  // these nodes. We continually insert before 'N' in sequence as this is
2262  // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
2263  // hierarchy left to express.
2264  insertDAGNode(DAG, N, NewSRLAmt);
2265  insertDAGNode(DAG, N, NewSRL);
2266  insertDAGNode(DAG, N, NewMask);
2267  insertDAGNode(DAG, N, NewAnd);
2268  insertDAGNode(DAG, N, NewExt);
2269  insertDAGNode(DAG, N, NewSHLAmt);
2270  insertDAGNode(DAG, N, NewSHL);
2271  DAG.ReplaceAllUsesWith(N, NewSHL);
2272  DAG.RemoveDeadNode(N.getNode());
2273
2274  AM.Scale = 1 << AMShiftAmt;
2275  AM.IndexReg = NewExt;
2276  return false;
2277}
2278
2279// Attempt to peek further into a scaled index register, collecting additional
2280// extensions / offsets / etc. Returns /p N if we can't peek any further.
2281SDValue X86DAGToDAGISel::matchIndexRecursively(SDValue N,
2282                                               X86ISelAddressMode &AM,
2283                                               unsigned Depth) {
2284  assert(AM.IndexReg.getNode() == nullptr && "IndexReg already matched");
2285  assert((AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8) &&
2286         "Illegal index scale");
2287
2288  // Limit recursion.
2289  if (Depth >= SelectionDAG::MaxRecursionDepth)
2290    return N;
2291
2292  EVT VT = N.getValueType();
2293  unsigned Opc = N.getOpcode();
2294
2295  // index: add(x,c) -> index: x, disp + c
2296  if (CurDAG->isBaseWithConstantOffset(N)) {
2297    auto *AddVal = cast<ConstantSDNode>(N.getOperand(1));
2298    uint64_t Offset = (uint64_t)AddVal->getSExtValue() * AM.Scale;
2299    if (!foldOffsetIntoAddress(Offset, AM))
2300      return matchIndexRecursively(N.getOperand(0), AM, Depth + 1);
2301  }
2302
2303  // index: add(x,x) -> index: x, scale * 2
2304  if (Opc == ISD::ADD && N.getOperand(0) == N.getOperand(1)) {
2305    if (AM.Scale <= 4) {
2306      AM.Scale *= 2;
2307      return matchIndexRecursively(N.getOperand(0), AM, Depth + 1);
2308    }
2309  }
2310
2311  // index: shl(x,i) -> index: x, scale * (1 << i)
2312  if (Opc == X86ISD::VSHLI) {
2313    uint64_t ShiftAmt = N.getConstantOperandVal(1);
2314    uint64_t ScaleAmt = 1ULL << ShiftAmt;
2315    if ((AM.Scale * ScaleAmt) <= 8) {
2316      AM.Scale *= ScaleAmt;
2317      return matchIndexRecursively(N.getOperand(0), AM, Depth + 1);
2318    }
2319  }
2320
2321  // index: sext(add_nsw(x,c)) -> index: sext(x), disp + sext(c)
2322  // TODO: call matchIndexRecursively(AddSrc) if we won't corrupt sext?
2323  if (Opc == ISD::SIGN_EXTEND && !VT.isVector() && N.hasOneUse()) {
2324    SDValue Src = N.getOperand(0);
2325    if (Src.getOpcode() == ISD::ADD && Src->getFlags().hasNoSignedWrap() &&
2326        Src.hasOneUse()) {
2327      if (CurDAG->isBaseWithConstantOffset(Src)) {
2328        SDValue AddSrc = Src.getOperand(0);
2329        auto *AddVal = cast<ConstantSDNode>(Src.getOperand(1));
2330        uint64_t Offset = (uint64_t)AddVal->getSExtValue();
2331        if (!foldOffsetIntoAddress(Offset * AM.Scale, AM)) {
2332          SDLoc DL(N);
2333          SDValue ExtSrc = CurDAG->getNode(Opc, DL, VT, AddSrc);
2334          SDValue ExtVal = CurDAG->getConstant(Offset, DL, VT);
2335          SDValue ExtAdd = CurDAG->getNode(ISD::ADD, DL, VT, ExtSrc, ExtVal);
2336          insertDAGNode(*CurDAG, N, ExtSrc);
2337          insertDAGNode(*CurDAG, N, ExtVal);
2338          insertDAGNode(*CurDAG, N, ExtAdd);
2339          CurDAG->ReplaceAllUsesWith(N, ExtAdd);
2340          CurDAG->RemoveDeadNode(N.getNode());
2341          return ExtSrc;
2342        }
2343      }
2344    }
2345  }
2346
2347  // index: zext(add_nuw(x,c)) -> index: zext(x), disp + zext(c)
2348  // index: zext(addlike(x,c)) -> index: zext(x), disp + zext(c)
2349  // TODO: call matchIndexRecursively(AddSrc) if we won't corrupt sext?
2350  if (Opc == ISD::ZERO_EXTEND && !VT.isVector() && N.hasOneUse()) {
2351    SDValue Src = N.getOperand(0);
2352    unsigned SrcOpc = Src.getOpcode();
2353    if (((SrcOpc == ISD::ADD && Src->getFlags().hasNoUnsignedWrap()) ||
2354         CurDAG->isADDLike(Src)) &&
2355        Src.hasOneUse()) {
2356      if (CurDAG->isBaseWithConstantOffset(Src)) {
2357        SDValue AddSrc = Src.getOperand(0);
2358        uint64_t Offset = Src.getConstantOperandVal(1);
2359        if (!foldOffsetIntoAddress(Offset * AM.Scale, AM)) {
2360          SDLoc DL(N);
2361          SDValue Res;
2362          // If we're also scaling, see if we can use that as well.
2363          if (AddSrc.getOpcode() == ISD::SHL &&
2364              isa<ConstantSDNode>(AddSrc.getOperand(1))) {
2365            SDValue ShVal = AddSrc.getOperand(0);
2366            uint64_t ShAmt = AddSrc.getConstantOperandVal(1);
2367            APInt HiBits =
2368                APInt::getHighBitsSet(AddSrc.getScalarValueSizeInBits(), ShAmt);
2369            uint64_t ScaleAmt = 1ULL << ShAmt;
2370            if ((AM.Scale * ScaleAmt) <= 8 &&
2371                (AddSrc->getFlags().hasNoUnsignedWrap() ||
2372                 CurDAG->MaskedValueIsZero(ShVal, HiBits))) {
2373              AM.Scale *= ScaleAmt;
2374              SDValue ExtShVal = CurDAG->getNode(Opc, DL, VT, ShVal);
2375              SDValue ExtShift = CurDAG->getNode(ISD::SHL, DL, VT, ExtShVal,
2376                                                 AddSrc.getOperand(1));
2377              insertDAGNode(*CurDAG, N, ExtShVal);
2378              insertDAGNode(*CurDAG, N, ExtShift);
2379              AddSrc = ExtShift;
2380              Res = ExtShVal;
2381            }
2382          }
2383          SDValue ExtSrc = CurDAG->getNode(Opc, DL, VT, AddSrc);
2384          SDValue ExtVal = CurDAG->getConstant(Offset, DL, VT);
2385          SDValue ExtAdd = CurDAG->getNode(SrcOpc, DL, VT, ExtSrc, ExtVal);
2386          insertDAGNode(*CurDAG, N, ExtSrc);
2387          insertDAGNode(*CurDAG, N, ExtVal);
2388          insertDAGNode(*CurDAG, N, ExtAdd);
2389          CurDAG->ReplaceAllUsesWith(N, ExtAdd);
2390          CurDAG->RemoveDeadNode(N.getNode());
2391          return Res ? Res : ExtSrc;
2392        }
2393      }
2394    }
2395  }
2396
2397  // TODO: Handle extensions, shifted masks etc.
2398  return N;
2399}
2400
2401bool X86DAGToDAGISel::matchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
2402                                              unsigned Depth) {
2403  SDLoc dl(N);
2404  LLVM_DEBUG({
2405    dbgs() << "MatchAddress: ";
2406    AM.dump(CurDAG);
2407  });
2408  // Limit recursion.
2409  if (Depth >= SelectionDAG::MaxRecursionDepth)
2410    return matchAddressBase(N, AM);
2411
2412  // If this is already a %rip relative address, we can only merge immediates
2413  // into it.  Instead of handling this in every case, we handle it here.
2414  // RIP relative addressing: %rip + 32-bit displacement!
2415  if (AM.isRIPRelative()) {
2416    // FIXME: JumpTable and ExternalSymbol address currently don't like
2417    // displacements.  It isn't very important, but this should be fixed for
2418    // consistency.
2419    if (!(AM.ES || AM.MCSym) && AM.JT != -1)
2420      return true;
2421
2422    if (auto *Cst = dyn_cast<ConstantSDNode>(N))
2423      if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM))
2424        return false;
2425    return true;
2426  }
2427
2428  switch (N.getOpcode()) {
2429  default: break;
2430  case ISD::LOCAL_RECOVER: {
2431    if (!AM.hasSymbolicDisplacement() && AM.Disp == 0)
2432      if (const auto *ESNode = dyn_cast<MCSymbolSDNode>(N.getOperand(0))) {
2433        // Use the symbol and don't prefix it.
2434        AM.MCSym = ESNode->getMCSymbol();
2435        return false;
2436      }
2437    break;
2438  }
2439  case ISD::Constant: {
2440    uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2441    if (!foldOffsetIntoAddress(Val, AM))
2442      return false;
2443    break;
2444  }
2445
2446  case X86ISD::Wrapper:
2447  case X86ISD::WrapperRIP:
2448    if (!matchWrapper(N, AM))
2449      return false;
2450    break;
2451
2452  case ISD::LOAD:
2453    if (!matchLoadInAddress(cast<LoadSDNode>(N), AM))
2454      return false;
2455    break;
2456
2457  case ISD::FrameIndex:
2458    if (AM.BaseType == X86ISelAddressMode::RegBase &&
2459        AM.Base_Reg.getNode() == nullptr &&
2460        (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
2461      AM.BaseType = X86ISelAddressMode::FrameIndexBase;
2462      AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
2463      return false;
2464    }
2465    break;
2466
2467  case ISD::SHL:
2468    if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2469      break;
2470
2471    if (auto *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
2472      unsigned Val = CN->getZExtValue();
2473      // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
2474      // that the base operand remains free for further matching. If
2475      // the base doesn't end up getting used, a post-processing step
2476      // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
2477      if (Val == 1 || Val == 2 || Val == 3) {
2478        SDValue ShVal = N.getOperand(0);
2479        AM.Scale = 1 << Val;
2480        AM.IndexReg = matchIndexRecursively(ShVal, AM, Depth + 1);
2481        return false;
2482      }
2483    }
2484    break;
2485
2486  case ISD::SRL: {
2487    // Scale must not be used already.
2488    if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2489
2490    // We only handle up to 64-bit values here as those are what matter for
2491    // addressing mode optimizations.
2492    assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2493           "Unexpected value size!");
2494
2495    SDValue And = N.getOperand(0);
2496    if (And.getOpcode() != ISD::AND) break;
2497    SDValue X = And.getOperand(0);
2498
2499    // The mask used for the transform is expected to be post-shift, but we
2500    // found the shift first so just apply the shift to the mask before passing
2501    // it down.
2502    if (!isa<ConstantSDNode>(N.getOperand(1)) ||
2503        !isa<ConstantSDNode>(And.getOperand(1)))
2504      break;
2505    uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
2506
2507    // Try to fold the mask and shift into the scale, and return false if we
2508    // succeed.
2509    if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
2510      return false;
2511    break;
2512  }
2513
2514  case ISD::SMUL_LOHI:
2515  case ISD::UMUL_LOHI:
2516    // A mul_lohi where we need the low part can be folded as a plain multiply.
2517    if (N.getResNo() != 0) break;
2518    [[fallthrough]];
2519  case ISD::MUL:
2520  case X86ISD::MUL_IMM:
2521    // X*[3,5,9] -> X+X*[2,4,8]
2522    if (AM.BaseType == X86ISelAddressMode::RegBase &&
2523        AM.Base_Reg.getNode() == nullptr &&
2524        AM.IndexReg.getNode() == nullptr) {
2525      if (auto *CN = dyn_cast<ConstantSDNode>(N.getOperand(1)))
2526        if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
2527            CN->getZExtValue() == 9) {
2528          AM.Scale = unsigned(CN->getZExtValue())-1;
2529
2530          SDValue MulVal = N.getOperand(0);
2531          SDValue Reg;
2532
2533          // Okay, we know that we have a scale by now.  However, if the scaled
2534          // value is an add of something and a constant, we can fold the
2535          // constant into the disp field here.
2536          if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
2537              isa<ConstantSDNode>(MulVal.getOperand(1))) {
2538            Reg = MulVal.getOperand(0);
2539            auto *AddVal = cast<ConstantSDNode>(MulVal.getOperand(1));
2540            uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
2541            if (foldOffsetIntoAddress(Disp, AM))
2542              Reg = N.getOperand(0);
2543          } else {
2544            Reg = N.getOperand(0);
2545          }
2546
2547          AM.IndexReg = AM.Base_Reg = Reg;
2548          return false;
2549        }
2550    }
2551    break;
2552
2553  case ISD::SUB: {
2554    // Given A-B, if A can be completely folded into the address and
2555    // the index field with the index field unused, use -B as the index.
2556    // This is a win if a has multiple parts that can be folded into
2557    // the address. Also, this saves a mov if the base register has
2558    // other uses, since it avoids a two-address sub instruction, however
2559    // it costs an additional mov if the index register has other uses.
2560
2561    // Add an artificial use to this node so that we can keep track of
2562    // it if it gets CSE'd with a different node.
2563    HandleSDNode Handle(N);
2564
2565    // Test if the LHS of the sub can be folded.
2566    X86ISelAddressMode Backup = AM;
2567    if (matchAddressRecursively(N.getOperand(0), AM, Depth+1)) {
2568      N = Handle.getValue();
2569      AM = Backup;
2570      break;
2571    }
2572    N = Handle.getValue();
2573    // Test if the index field is free for use.
2574    if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
2575      AM = Backup;
2576      break;
2577    }
2578
2579    int Cost = 0;
2580    SDValue RHS = N.getOperand(1);
2581    // If the RHS involves a register with multiple uses, this
2582    // transformation incurs an extra mov, due to the neg instruction
2583    // clobbering its operand.
2584    if (!RHS.getNode()->hasOneUse() ||
2585        RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
2586        RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
2587        RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
2588        (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
2589         RHS.getOperand(0).getValueType() == MVT::i32))
2590      ++Cost;
2591    // If the base is a register with multiple uses, this
2592    // transformation may save a mov.
2593    if ((AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode() &&
2594         !AM.Base_Reg.getNode()->hasOneUse()) ||
2595        AM.BaseType == X86ISelAddressMode::FrameIndexBase)
2596      --Cost;
2597    // If the folded LHS was interesting, this transformation saves
2598    // address arithmetic.
2599    if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
2600        ((AM.Disp != 0) && (Backup.Disp == 0)) +
2601        (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
2602      --Cost;
2603    // If it doesn't look like it may be an overall win, don't do it.
2604    if (Cost >= 0) {
2605      AM = Backup;
2606      break;
2607    }
2608
2609    // Ok, the transformation is legal and appears profitable. Go for it.
2610    // Negation will be emitted later to avoid creating dangling nodes if this
2611    // was an unprofitable LEA.
2612    AM.IndexReg = RHS;
2613    AM.NegateIndex = true;
2614    AM.Scale = 1;
2615    return false;
2616  }
2617
2618  case ISD::OR:
2619  case ISD::XOR:
2620    // See if we can treat the OR/XOR node as an ADD node.
2621    if (!CurDAG->isADDLike(N))
2622      break;
2623    [[fallthrough]];
2624  case ISD::ADD:
2625    if (!matchAdd(N, AM, Depth))
2626      return false;
2627    break;
2628
2629  case ISD::AND: {
2630    // Perform some heroic transforms on an and of a constant-count shift
2631    // with a constant to enable use of the scaled offset field.
2632
2633    // Scale must not be used already.
2634    if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1) break;
2635
2636    // We only handle up to 64-bit values here as those are what matter for
2637    // addressing mode optimizations.
2638    assert(N.getSimpleValueType().getSizeInBits() <= 64 &&
2639           "Unexpected value size!");
2640
2641    if (!isa<ConstantSDNode>(N.getOperand(1)))
2642      break;
2643
2644    if (N.getOperand(0).getOpcode() == ISD::SRL) {
2645      SDValue Shift = N.getOperand(0);
2646      SDValue X = Shift.getOperand(0);
2647
2648      uint64_t Mask = N.getConstantOperandVal(1);
2649
2650      // Try to fold the mask and shift into an extract and scale.
2651      if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
2652        return false;
2653
2654      // Try to fold the mask and shift directly into the scale.
2655      if (!foldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
2656        return false;
2657
2658      // Try to fold the mask and shift into BEXTR and scale.
2659      if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask, Shift, X, AM, *Subtarget))
2660        return false;
2661    }
2662
2663    // Try to swap the mask and shift to place shifts which can be done as
2664    // a scale on the outside of the mask.
2665    if (!foldMaskedShiftToScaledMask(*CurDAG, N, AM))
2666      return false;
2667
2668    break;
2669  }
2670  case ISD::ZERO_EXTEND: {
2671    // Try to widen a zexted shift left to the same size as its use, so we can
2672    // match the shift as a scale factor.
2673    if (AM.IndexReg.getNode() != nullptr || AM.Scale != 1)
2674      break;
2675
2676    SDValue Src = N.getOperand(0);
2677
2678    // See if we can match a zext(addlike(x,c)).
2679    // TODO: Move more ZERO_EXTEND patterns into matchIndexRecursively.
2680    if (Src.getOpcode() == ISD::ADD || Src.getOpcode() == ISD::OR)
2681      if (SDValue Index = matchIndexRecursively(N, AM, Depth + 1))
2682        if (Index != N) {
2683          AM.IndexReg = Index;
2684          return false;
2685        }
2686
2687    // Peek through mask: zext(and(shl(x,c1),c2))
2688    APInt Mask = APInt::getAllOnes(Src.getScalarValueSizeInBits());
2689    if (Src.getOpcode() == ISD::AND && Src.hasOneUse())
2690      if (auto *MaskC = dyn_cast<ConstantSDNode>(Src.getOperand(1))) {
2691        Mask = MaskC->getAPIntValue();
2692        Src = Src.getOperand(0);
2693      }
2694
2695    if (Src.getOpcode() == ISD::SHL && Src.hasOneUse()) {
2696      // Give up if the shift is not a valid scale factor [1,2,3].
2697      SDValue ShlSrc = Src.getOperand(0);
2698      SDValue ShlAmt = Src.getOperand(1);
2699      auto *ShAmtC = dyn_cast<ConstantSDNode>(ShlAmt);
2700      if (!ShAmtC)
2701        break;
2702      unsigned ShAmtV = ShAmtC->getZExtValue();
2703      if (ShAmtV > 3)
2704        break;
2705
2706      // The narrow shift must only shift out zero bits (it must be 'nuw').
2707      // That makes it safe to widen to the destination type.
2708      APInt HighZeros =
2709          APInt::getHighBitsSet(ShlSrc.getValueSizeInBits(), ShAmtV);
2710      if (!Src->getFlags().hasNoUnsignedWrap() &&
2711          !CurDAG->MaskedValueIsZero(ShlSrc, HighZeros & Mask))
2712        break;
2713
2714      // zext (shl nuw i8 %x, C1) to i32
2715      // --> shl (zext i8 %x to i32), (zext C1)
2716      // zext (and (shl nuw i8 %x, C1), C2) to i32
2717      // --> shl (zext i8 (and %x, C2 >> C1) to i32), (zext C1)
2718      MVT SrcVT = ShlSrc.getSimpleValueType();
2719      MVT VT = N.getSimpleValueType();
2720      SDLoc DL(N);
2721
2722      SDValue Res = ShlSrc;
2723      if (!Mask.isAllOnes()) {
2724        Res = CurDAG->getConstant(Mask.lshr(ShAmtV), DL, SrcVT);
2725        insertDAGNode(*CurDAG, N, Res);
2726        Res = CurDAG->getNode(ISD::AND, DL, SrcVT, ShlSrc, Res);
2727        insertDAGNode(*CurDAG, N, Res);
2728      }
2729      SDValue Zext = CurDAG->getNode(ISD::ZERO_EXTEND, DL, VT, Res);
2730      insertDAGNode(*CurDAG, N, Zext);
2731      SDValue NewShl = CurDAG->getNode(ISD::SHL, DL, VT, Zext, ShlAmt);
2732      insertDAGNode(*CurDAG, N, NewShl);
2733
2734      // Convert the shift to scale factor.
2735      AM.Scale = 1 << ShAmtV;
2736      AM.IndexReg = Zext;
2737
2738      CurDAG->ReplaceAllUsesWith(N, NewShl);
2739      CurDAG->RemoveDeadNode(N.getNode());
2740      return false;
2741    }
2742
2743    if (Src.getOpcode() == ISD::SRL && !Mask.isAllOnes()) {
2744      // Try to fold the mask and shift into an extract and scale.
2745      if (!foldMaskAndShiftToExtract(*CurDAG, N, Mask.getZExtValue(), Src,
2746                                     Src.getOperand(0), AM))
2747        return false;
2748
2749      // Try to fold the mask and shift directly into the scale.
2750      if (!foldMaskAndShiftToScale(*CurDAG, N, Mask.getZExtValue(), Src,
2751                                   Src.getOperand(0), AM))
2752        return false;
2753
2754      // Try to fold the mask and shift into BEXTR and scale.
2755      if (!foldMaskedShiftToBEXTR(*CurDAG, N, Mask.getZExtValue(), Src,
2756                                  Src.getOperand(0), AM, *Subtarget))
2757        return false;
2758    }
2759
2760    break;
2761  }
2762  }
2763
2764  return matchAddressBase(N, AM);
2765}
2766
2767/// Helper for MatchAddress. Add the specified node to the
2768/// specified addressing mode without any further recursion.
2769bool X86DAGToDAGISel::matchAddressBase(SDValue N, X86ISelAddressMode &AM) {
2770  // Is the base register already occupied?
2771  if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
2772    // If so, check to see if the scale index register is set.
2773    if (!AM.IndexReg.getNode()) {
2774      AM.IndexReg = N;
2775      AM.Scale = 1;
2776      return false;
2777    }
2778
2779    // Otherwise, we cannot select it.
2780    return true;
2781  }
2782
2783  // Default, generate it as a register.
2784  AM.BaseType = X86ISelAddressMode::RegBase;
2785  AM.Base_Reg = N;
2786  return false;
2787}
2788
2789bool X86DAGToDAGISel::matchVectorAddressRecursively(SDValue N,
2790                                                    X86ISelAddressMode &AM,
2791                                                    unsigned Depth) {
2792  SDLoc dl(N);
2793  LLVM_DEBUG({
2794    dbgs() << "MatchVectorAddress: ";
2795    AM.dump(CurDAG);
2796  });
2797  // Limit recursion.
2798  if (Depth >= SelectionDAG::MaxRecursionDepth)
2799    return matchAddressBase(N, AM);
2800
2801  // TODO: Support other operations.
2802  switch (N.getOpcode()) {
2803  case ISD::Constant: {
2804    uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
2805    if (!foldOffsetIntoAddress(Val, AM))
2806      return false;
2807    break;
2808  }
2809  case X86ISD::Wrapper:
2810    if (!matchWrapper(N, AM))
2811      return false;
2812    break;
2813  case ISD::ADD: {
2814    // Add an artificial use to this node so that we can keep track of
2815    // it if it gets CSE'd with a different node.
2816    HandleSDNode Handle(N);
2817
2818    X86ISelAddressMode Backup = AM;
2819    if (!matchVectorAddressRecursively(N.getOperand(0), AM, Depth + 1) &&
2820        !matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2821                                       Depth + 1))
2822      return false;
2823    AM = Backup;
2824
2825    // Try again after commuting the operands.
2826    if (!matchVectorAddressRecursively(Handle.getValue().getOperand(1), AM,
2827                                       Depth + 1) &&
2828        !matchVectorAddressRecursively(Handle.getValue().getOperand(0), AM,
2829                                       Depth + 1))
2830      return false;
2831    AM = Backup;
2832
2833    N = Handle.getValue();
2834    break;
2835  }
2836  }
2837
2838  return matchAddressBase(N, AM);
2839}
2840
2841/// Helper for selectVectorAddr. Handles things that can be folded into a
2842/// gather/scatter address. The index register and scale should have already
2843/// been handled.
2844bool X86DAGToDAGISel::matchVectorAddress(SDValue N, X86ISelAddressMode &AM) {
2845  return matchVectorAddressRecursively(N, AM, 0);
2846}
2847
2848bool X86DAGToDAGISel::selectVectorAddr(MemSDNode *Parent, SDValue BasePtr,
2849                                       SDValue IndexOp, SDValue ScaleOp,
2850                                       SDValue &Base, SDValue &Scale,
2851                                       SDValue &Index, SDValue &Disp,
2852                                       SDValue &Segment) {
2853  X86ISelAddressMode AM;
2854  AM.Scale = ScaleOp->getAsZExtVal();
2855
2856  // Attempt to match index patterns, as long as we're not relying on implicit
2857  // sign-extension, which is performed BEFORE scale.
2858  if (IndexOp.getScalarValueSizeInBits() == BasePtr.getScalarValueSizeInBits())
2859    AM.IndexReg = matchIndexRecursively(IndexOp, AM, 0);
2860  else
2861    AM.IndexReg = IndexOp;
2862
2863  unsigned AddrSpace = Parent->getPointerInfo().getAddrSpace();
2864  if (AddrSpace == X86AS::GS)
2865    AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2866  if (AddrSpace == X86AS::FS)
2867    AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2868  if (AddrSpace == X86AS::SS)
2869    AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2870
2871  SDLoc DL(BasePtr);
2872  MVT VT = BasePtr.getSimpleValueType();
2873
2874  // Try to match into the base and displacement fields.
2875  if (matchVectorAddress(BasePtr, AM))
2876    return false;
2877
2878  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2879  return true;
2880}
2881
2882/// Returns true if it is able to pattern match an addressing mode.
2883/// It returns the operands which make up the maximal addressing mode it can
2884/// match by reference.
2885///
2886/// Parent is the parent node of the addr operand that is being matched.  It
2887/// is always a load, store, atomic node, or null.  It is only null when
2888/// checking memory operands for inline asm nodes.
2889bool X86DAGToDAGISel::selectAddr(SDNode *Parent, SDValue N, SDValue &Base,
2890                                 SDValue &Scale, SDValue &Index,
2891                                 SDValue &Disp, SDValue &Segment) {
2892  X86ISelAddressMode AM;
2893
2894  if (Parent &&
2895      // This list of opcodes are all the nodes that have an "addr:$ptr" operand
2896      // that are not a MemSDNode, and thus don't have proper addrspace info.
2897      Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
2898      Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
2899      Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
2900      Parent->getOpcode() != X86ISD::ENQCMD && // Fixme
2901      Parent->getOpcode() != X86ISD::ENQCMDS && // Fixme
2902      Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
2903      Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
2904    unsigned AddrSpace =
2905      cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
2906    if (AddrSpace == X86AS::GS)
2907      AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
2908    if (AddrSpace == X86AS::FS)
2909      AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
2910    if (AddrSpace == X86AS::SS)
2911      AM.Segment = CurDAG->getRegister(X86::SS, MVT::i16);
2912  }
2913
2914  // Save the DL and VT before calling matchAddress, it can invalidate N.
2915  SDLoc DL(N);
2916  MVT VT = N.getSimpleValueType();
2917
2918  if (matchAddress(N, AM))
2919    return false;
2920
2921  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
2922  return true;
2923}
2924
2925bool X86DAGToDAGISel::selectMOV64Imm32(SDValue N, SDValue &Imm) {
2926  // Cannot use 32 bit constants to reference objects in kernel/large code
2927  // model.
2928  if (TM.getCodeModel() == CodeModel::Kernel ||
2929      TM.getCodeModel() == CodeModel::Large)
2930    return false;
2931
2932  // In static codegen with small code model, we can get the address of a label
2933  // into a register with 'movl'
2934  if (N->getOpcode() != X86ISD::Wrapper)
2935    return false;
2936
2937  N = N.getOperand(0);
2938
2939  // At least GNU as does not accept 'movl' for TPOFF relocations.
2940  // FIXME: We could use 'movl' when we know we are targeting MC.
2941  if (N->getOpcode() == ISD::TargetGlobalTLSAddress)
2942    return false;
2943
2944  Imm = N;
2945  // Small/medium code model can reference non-TargetGlobalAddress objects with
2946  // 32 bit constants.
2947  if (N->getOpcode() != ISD::TargetGlobalAddress) {
2948    return TM.getCodeModel() == CodeModel::Small ||
2949           TM.getCodeModel() == CodeModel::Medium;
2950  }
2951
2952  const GlobalValue *GV = cast<GlobalAddressSDNode>(N)->getGlobal();
2953  if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange())
2954    return CR->getUnsignedMax().ult(1ull << 32);
2955
2956  return !TM.isLargeGlobalValue(GV);
2957}
2958
2959bool X86DAGToDAGISel::selectLEA64_32Addr(SDValue N, SDValue &Base,
2960                                         SDValue &Scale, SDValue &Index,
2961                                         SDValue &Disp, SDValue &Segment) {
2962  // Save the debug loc before calling selectLEAAddr, in case it invalidates N.
2963  SDLoc DL(N);
2964
2965  if (!selectLEAAddr(N, Base, Scale, Index, Disp, Segment))
2966    return false;
2967
2968  auto *RN = dyn_cast<RegisterSDNode>(Base);
2969  if (RN && RN->getReg() == 0)
2970    Base = CurDAG->getRegister(0, MVT::i64);
2971  else if (Base.getValueType() == MVT::i32 && !isa<FrameIndexSDNode>(Base)) {
2972    // Base could already be %rip, particularly in the x32 ABI.
2973    SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2974                                                     MVT::i64), 0);
2975    Base = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2976                                         Base);
2977  }
2978
2979  RN = dyn_cast<RegisterSDNode>(Index);
2980  if (RN && RN->getReg() == 0)
2981    Index = CurDAG->getRegister(0, MVT::i64);
2982  else {
2983    assert(Index.getValueType() == MVT::i32 &&
2984           "Expect to be extending 32-bit registers for use in LEA");
2985    SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, DL,
2986                                                     MVT::i64), 0);
2987    Index = CurDAG->getTargetInsertSubreg(X86::sub_32bit, DL, MVT::i64, ImplDef,
2988                                          Index);
2989  }
2990
2991  return true;
2992}
2993
2994/// Calls SelectAddr and determines if the maximal addressing
2995/// mode it matches can be cost effectively emitted as an LEA instruction.
2996bool X86DAGToDAGISel::selectLEAAddr(SDValue N,
2997                                    SDValue &Base, SDValue &Scale,
2998                                    SDValue &Index, SDValue &Disp,
2999                                    SDValue &Segment) {
3000  X86ISelAddressMode AM;
3001
3002  // Save the DL and VT before calling matchAddress, it can invalidate N.
3003  SDLoc DL(N);
3004  MVT VT = N.getSimpleValueType();
3005
3006  // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
3007  // segments.
3008  SDValue Copy = AM.Segment;
3009  SDValue T = CurDAG->getRegister(0, MVT::i32);
3010  AM.Segment = T;
3011  if (matchAddress(N, AM))
3012    return false;
3013  assert (T == AM.Segment);
3014  AM.Segment = Copy;
3015
3016  unsigned Complexity = 0;
3017  if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base_Reg.getNode())
3018    Complexity = 1;
3019  else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
3020    Complexity = 4;
3021
3022  if (AM.IndexReg.getNode())
3023    Complexity++;
3024
3025  // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
3026  // a simple shift.
3027  if (AM.Scale > 1)
3028    Complexity++;
3029
3030  // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
3031  // to a LEA. This is determined with some experimentation but is by no means
3032  // optimal (especially for code size consideration). LEA is nice because of
3033  // its three-address nature. Tweak the cost function again when we can run
3034  // convertToThreeAddress() at register allocation time.
3035  if (AM.hasSymbolicDisplacement()) {
3036    // For X86-64, always use LEA to materialize RIP-relative addresses.
3037    if (Subtarget->is64Bit())
3038      Complexity = 4;
3039    else
3040      Complexity += 2;
3041  }
3042
3043  // Heuristic: try harder to form an LEA from ADD if the operands set flags.
3044  // Unlike ADD, LEA does not affect flags, so we will be less likely to require
3045  // duplicating flag-producing instructions later in the pipeline.
3046  if (N.getOpcode() == ISD::ADD) {
3047    auto isMathWithFlags = [](SDValue V) {
3048      switch (V.getOpcode()) {
3049      case X86ISD::ADD:
3050      case X86ISD::SUB:
3051      case X86ISD::ADC:
3052      case X86ISD::SBB:
3053      case X86ISD::SMUL:
3054      case X86ISD::UMUL:
3055      /* TODO: These opcodes can be added safely, but we may want to justify
3056               their inclusion for different reasons (better for reg-alloc).
3057      case X86ISD::OR:
3058      case X86ISD::XOR:
3059      case X86ISD::AND:
3060      */
3061        // Value 1 is the flag output of the node - verify it's not dead.
3062        return !SDValue(V.getNode(), 1).use_empty();
3063      default:
3064        return false;
3065      }
3066    };
3067    // TODO: We might want to factor in whether there's a load folding
3068    // opportunity for the math op that disappears with LEA.
3069    if (isMathWithFlags(N.getOperand(0)) || isMathWithFlags(N.getOperand(1)))
3070      Complexity++;
3071  }
3072
3073  if (AM.Disp)
3074    Complexity++;
3075
3076  // If it isn't worth using an LEA, reject it.
3077  if (Complexity <= 2)
3078    return false;
3079
3080  getAddressOperands(AM, DL, VT, Base, Scale, Index, Disp, Segment);
3081  return true;
3082}
3083
3084/// This is only run on TargetGlobalTLSAddress nodes.
3085bool X86DAGToDAGISel::selectTLSADDRAddr(SDValue N, SDValue &Base,
3086                                        SDValue &Scale, SDValue &Index,
3087                                        SDValue &Disp, SDValue &Segment) {
3088  assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
3089  auto *GA = cast<GlobalAddressSDNode>(N);
3090
3091  X86ISelAddressMode AM;
3092  AM.GV = GA->getGlobal();
3093  AM.Disp += GA->getOffset();
3094  AM.SymbolFlags = GA->getTargetFlags();
3095
3096  if (Subtarget->is32Bit()) {
3097    AM.Scale = 1;
3098    AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
3099  }
3100
3101  MVT VT = N.getSimpleValueType();
3102  getAddressOperands(AM, SDLoc(N), VT, Base, Scale, Index, Disp, Segment);
3103  return true;
3104}
3105
3106bool X86DAGToDAGISel::selectRelocImm(SDValue N, SDValue &Op) {
3107  // Keep track of the original value type and whether this value was
3108  // truncated. If we see a truncation from pointer type to VT that truncates
3109  // bits that are known to be zero, we can use a narrow reference.
3110  EVT VT = N.getValueType();
3111  bool WasTruncated = false;
3112  if (N.getOpcode() == ISD::TRUNCATE) {
3113    WasTruncated = true;
3114    N = N.getOperand(0);
3115  }
3116
3117  if (N.getOpcode() != X86ISD::Wrapper)
3118    return false;
3119
3120  // We can only use non-GlobalValues as immediates if they were not truncated,
3121  // as we do not have any range information. If we have a GlobalValue and the
3122  // address was not truncated, we can select it as an operand directly.
3123  unsigned Opc = N.getOperand(0)->getOpcode();
3124  if (Opc != ISD::TargetGlobalAddress || !WasTruncated) {
3125    Op = N.getOperand(0);
3126    // We can only select the operand directly if we didn't have to look past a
3127    // truncate.
3128    return !WasTruncated;
3129  }
3130
3131  // Check that the global's range fits into VT.
3132  auto *GA = cast<GlobalAddressSDNode>(N.getOperand(0));
3133  std::optional<ConstantRange> CR = GA->getGlobal()->getAbsoluteSymbolRange();
3134  if (!CR || CR->getUnsignedMax().uge(1ull << VT.getSizeInBits()))
3135    return false;
3136
3137  // Okay, we can use a narrow reference.
3138  Op = CurDAG->getTargetGlobalAddress(GA->getGlobal(), SDLoc(N), VT,
3139                                      GA->getOffset(), GA->getTargetFlags());
3140  return true;
3141}
3142
3143bool X86DAGToDAGISel::tryFoldLoad(SDNode *Root, SDNode *P, SDValue N,
3144                                  SDValue &Base, SDValue &Scale,
3145                                  SDValue &Index, SDValue &Disp,
3146                                  SDValue &Segment) {
3147  assert(Root && P && "Unknown root/parent nodes");
3148  if (!ISD::isNON_EXTLoad(N.getNode()) ||
3149      !IsProfitableToFold(N, P, Root) ||
3150      !IsLegalToFold(N, P, Root, OptLevel))
3151    return false;
3152
3153  return selectAddr(N.getNode(),
3154                    N.getOperand(1), Base, Scale, Index, Disp, Segment);
3155}
3156
3157bool X86DAGToDAGISel::tryFoldBroadcast(SDNode *Root, SDNode *P, SDValue N,
3158                                       SDValue &Base, SDValue &Scale,
3159                                       SDValue &Index, SDValue &Disp,
3160                                       SDValue &Segment) {
3161  assert(Root && P && "Unknown root/parent nodes");
3162  if (N->getOpcode() != X86ISD::VBROADCAST_LOAD ||
3163      !IsProfitableToFold(N, P, Root) ||
3164      !IsLegalToFold(N, P, Root, OptLevel))
3165    return false;
3166
3167  return selectAddr(N.getNode(),
3168                    N.getOperand(1), Base, Scale, Index, Disp, Segment);
3169}
3170
3171/// Return an SDNode that returns the value of the global base register.
3172/// Output instructions required to initialize the global base register,
3173/// if necessary.
3174SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
3175  unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
3176  auto &DL = MF->getDataLayout();
3177  return CurDAG->getRegister(GlobalBaseReg, TLI->getPointerTy(DL)).getNode();
3178}
3179
3180bool X86DAGToDAGISel::isSExtAbsoluteSymbolRef(unsigned Width, SDNode *N) const {
3181  if (N->getOpcode() == ISD::TRUNCATE)
3182    N = N->getOperand(0).getNode();
3183  if (N->getOpcode() != X86ISD::Wrapper)
3184    return false;
3185
3186  auto *GA = dyn_cast<GlobalAddressSDNode>(N->getOperand(0));
3187  if (!GA)
3188    return false;
3189
3190  auto *GV = GA->getGlobal();
3191  std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange();
3192  if (CR)
3193    return CR->getSignedMin().sge(-1ull << Width) &&
3194           CR->getSignedMax().slt(1ull << Width);
3195  // In the kernel code model, globals are in the negative 2GB of the address
3196  // space, so globals can be a sign extended 32-bit immediate.
3197  // In other code models, small globals are in the low 2GB of the address
3198  // space, so sign extending them is equivalent to zero extending them.
3199  return Width == 32 && !TM.isLargeGlobalValue(GV);
3200}
3201
3202X86::CondCode X86DAGToDAGISel::getCondFromNode(SDNode *N) const {
3203  assert(N->isMachineOpcode() && "Unexpected node");
3204  unsigned Opc = N->getMachineOpcode();
3205  const MCInstrDesc &MCID = getInstrInfo()->get(Opc);
3206  int CondNo = X86::getCondSrcNoFromDesc(MCID);
3207  if (CondNo < 0)
3208    return X86::COND_INVALID;
3209
3210  return static_cast<X86::CondCode>(N->getConstantOperandVal(CondNo));
3211}
3212
3213/// Test whether the given X86ISD::CMP node has any users that use a flag
3214/// other than ZF.
3215bool X86DAGToDAGISel::onlyUsesZeroFlag(SDValue Flags) const {
3216  // Examine each user of the node.
3217  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3218         UI != UE; ++UI) {
3219    // Only check things that use the flags.
3220    if (UI.getUse().getResNo() != Flags.getResNo())
3221      continue;
3222    // Only examine CopyToReg uses that copy to EFLAGS.
3223    if (UI->getOpcode() != ISD::CopyToReg ||
3224        cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3225      return false;
3226    // Examine each user of the CopyToReg use.
3227    for (SDNode::use_iterator FlagUI = UI->use_begin(),
3228           FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
3229      // Only examine the Flag result.
3230      if (FlagUI.getUse().getResNo() != 1) continue;
3231      // Anything unusual: assume conservatively.
3232      if (!FlagUI->isMachineOpcode()) return false;
3233      // Examine the condition code of the user.
3234      X86::CondCode CC = getCondFromNode(*FlagUI);
3235
3236      switch (CC) {
3237      // Comparisons which only use the zero flag.
3238      case X86::COND_E: case X86::COND_NE:
3239        continue;
3240      // Anything else: assume conservatively.
3241      default:
3242        return false;
3243      }
3244    }
3245  }
3246  return true;
3247}
3248
3249/// Test whether the given X86ISD::CMP node has any uses which require the SF
3250/// flag to be accurate.
3251bool X86DAGToDAGISel::hasNoSignFlagUses(SDValue Flags) const {
3252  // Examine each user of the node.
3253  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3254         UI != UE; ++UI) {
3255    // Only check things that use the flags.
3256    if (UI.getUse().getResNo() != Flags.getResNo())
3257      continue;
3258    // Only examine CopyToReg uses that copy to EFLAGS.
3259    if (UI->getOpcode() != ISD::CopyToReg ||
3260        cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3261      return false;
3262    // Examine each user of the CopyToReg use.
3263    for (SDNode::use_iterator FlagUI = UI->use_begin(),
3264           FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
3265      // Only examine the Flag result.
3266      if (FlagUI.getUse().getResNo() != 1) continue;
3267      // Anything unusual: assume conservatively.
3268      if (!FlagUI->isMachineOpcode()) return false;
3269      // Examine the condition code of the user.
3270      X86::CondCode CC = getCondFromNode(*FlagUI);
3271
3272      switch (CC) {
3273      // Comparisons which don't examine the SF flag.
3274      case X86::COND_A: case X86::COND_AE:
3275      case X86::COND_B: case X86::COND_BE:
3276      case X86::COND_E: case X86::COND_NE:
3277      case X86::COND_O: case X86::COND_NO:
3278      case X86::COND_P: case X86::COND_NP:
3279        continue;
3280      // Anything else: assume conservatively.
3281      default:
3282        return false;
3283      }
3284    }
3285  }
3286  return true;
3287}
3288
3289static bool mayUseCarryFlag(X86::CondCode CC) {
3290  switch (CC) {
3291  // Comparisons which don't examine the CF flag.
3292  case X86::COND_O: case X86::COND_NO:
3293  case X86::COND_E: case X86::COND_NE:
3294  case X86::COND_S: case X86::COND_NS:
3295  case X86::COND_P: case X86::COND_NP:
3296  case X86::COND_L: case X86::COND_GE:
3297  case X86::COND_G: case X86::COND_LE:
3298    return false;
3299  // Anything else: assume conservatively.
3300  default:
3301    return true;
3302  }
3303}
3304
3305/// Test whether the given node which sets flags has any uses which require the
3306/// CF flag to be accurate.
3307 bool X86DAGToDAGISel::hasNoCarryFlagUses(SDValue Flags) const {
3308  // Examine each user of the node.
3309  for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
3310         UI != UE; ++UI) {
3311    // Only check things that use the flags.
3312    if (UI.getUse().getResNo() != Flags.getResNo())
3313      continue;
3314
3315    unsigned UIOpc = UI->getOpcode();
3316
3317    if (UIOpc == ISD::CopyToReg) {
3318      // Only examine CopyToReg uses that copy to EFLAGS.
3319      if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() != X86::EFLAGS)
3320        return false;
3321      // Examine each user of the CopyToReg use.
3322      for (SDNode::use_iterator FlagUI = UI->use_begin(), FlagUE = UI->use_end();
3323           FlagUI != FlagUE; ++FlagUI) {
3324        // Only examine the Flag result.
3325        if (FlagUI.getUse().getResNo() != 1)
3326          continue;
3327        // Anything unusual: assume conservatively.
3328        if (!FlagUI->isMachineOpcode())
3329          return false;
3330        // Examine the condition code of the user.
3331        X86::CondCode CC = getCondFromNode(*FlagUI);
3332
3333        if (mayUseCarryFlag(CC))
3334          return false;
3335      }
3336
3337      // This CopyToReg is ok. Move on to the next user.
3338      continue;
3339    }
3340
3341    // This might be an unselected node. So look for the pre-isel opcodes that
3342    // use flags.
3343    unsigned CCOpNo;
3344    switch (UIOpc) {
3345    default:
3346      // Something unusual. Be conservative.
3347      return false;
3348    case X86ISD::SETCC:       CCOpNo = 0; break;
3349    case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
3350    case X86ISD::CMOV:        CCOpNo = 2; break;
3351    case X86ISD::BRCOND:      CCOpNo = 2; break;
3352    }
3353
3354    X86::CondCode CC = (X86::CondCode)UI->getConstantOperandVal(CCOpNo);
3355    if (mayUseCarryFlag(CC))
3356      return false;
3357  }
3358  return true;
3359}
3360
3361/// Check whether or not the chain ending in StoreNode is suitable for doing
3362/// the {load; op; store} to modify transformation.
3363static bool isFusableLoadOpStorePattern(StoreSDNode *StoreNode,
3364                                        SDValue StoredVal, SelectionDAG *CurDAG,
3365                                        unsigned LoadOpNo,
3366                                        LoadSDNode *&LoadNode,
3367                                        SDValue &InputChain) {
3368  // Is the stored value result 0 of the operation?
3369  if (StoredVal.getResNo() != 0) return false;
3370
3371  // Are there other uses of the operation other than the store?
3372  if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
3373
3374  // Is the store non-extending and non-indexed?
3375  if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
3376    return false;
3377
3378  SDValue Load = StoredVal->getOperand(LoadOpNo);
3379  // Is the stored value a non-extending and non-indexed load?
3380  if (!ISD::isNormalLoad(Load.getNode())) return false;
3381
3382  // Return LoadNode by reference.
3383  LoadNode = cast<LoadSDNode>(Load);
3384
3385  // Is store the only read of the loaded value?
3386  if (!Load.hasOneUse())
3387    return false;
3388
3389  // Is the address of the store the same as the load?
3390  if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
3391      LoadNode->getOffset() != StoreNode->getOffset())
3392    return false;
3393
3394  bool FoundLoad = false;
3395  SmallVector<SDValue, 4> ChainOps;
3396  SmallVector<const SDNode *, 4> LoopWorklist;
3397  SmallPtrSet<const SDNode *, 16> Visited;
3398  const unsigned int Max = 1024;
3399
3400  //  Visualization of Load-Op-Store fusion:
3401  // -------------------------
3402  // Legend:
3403  //    *-lines = Chain operand dependencies.
3404  //    |-lines = Normal operand dependencies.
3405  //    Dependencies flow down and right. n-suffix references multiple nodes.
3406  //
3407  //        C                        Xn  C
3408  //        *                         *  *
3409  //        *                          * *
3410  //  Xn  A-LD    Yn                    TF         Yn
3411  //   *    * \   |                       *        |
3412  //    *   *  \  |                        *       |
3413  //     *  *   \ |             =>       A--LD_OP_ST
3414  //      * *    \|                                 \
3415  //       TF    OP                                  \
3416  //         *   | \                                  Zn
3417  //          *  |  \
3418  //         A-ST    Zn
3419  //
3420
3421  // This merge induced dependences from: #1: Xn -> LD, OP, Zn
3422  //                                      #2: Yn -> LD
3423  //                                      #3: ST -> Zn
3424
3425  // Ensure the transform is safe by checking for the dual
3426  // dependencies to make sure we do not induce a loop.
3427
3428  // As LD is a predecessor to both OP and ST we can do this by checking:
3429  //  a). if LD is a predecessor to a member of Xn or Yn.
3430  //  b). if a Zn is a predecessor to ST.
3431
3432  // However, (b) can only occur through being a chain predecessor to
3433  // ST, which is the same as Zn being a member or predecessor of Xn,
3434  // which is a subset of LD being a predecessor of Xn. So it's
3435  // subsumed by check (a).
3436
3437  SDValue Chain = StoreNode->getChain();
3438
3439  // Gather X elements in ChainOps.
3440  if (Chain == Load.getValue(1)) {
3441    FoundLoad = true;
3442    ChainOps.push_back(Load.getOperand(0));
3443  } else if (Chain.getOpcode() == ISD::TokenFactor) {
3444    for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
3445      SDValue Op = Chain.getOperand(i);
3446      if (Op == Load.getValue(1)) {
3447        FoundLoad = true;
3448        // Drop Load, but keep its chain. No cycle check necessary.
3449        ChainOps.push_back(Load.getOperand(0));
3450        continue;
3451      }
3452      LoopWorklist.push_back(Op.getNode());
3453      ChainOps.push_back(Op);
3454    }
3455  }
3456
3457  if (!FoundLoad)
3458    return false;
3459
3460  // Worklist is currently Xn. Add Yn to worklist.
3461  for (SDValue Op : StoredVal->ops())
3462    if (Op.getNode() != LoadNode)
3463      LoopWorklist.push_back(Op.getNode());
3464
3465  // Check (a) if Load is a predecessor to Xn + Yn
3466  if (SDNode::hasPredecessorHelper(Load.getNode(), Visited, LoopWorklist, Max,
3467                                   true))
3468    return false;
3469
3470  InputChain =
3471      CurDAG->getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ChainOps);
3472  return true;
3473}
3474
3475// Change a chain of {load; op; store} of the same value into a simple op
3476// through memory of that value, if the uses of the modified value and its
3477// address are suitable.
3478//
3479// The tablegen pattern memory operand pattern is currently not able to match
3480// the case where the EFLAGS on the original operation are used.
3481//
3482// To move this to tablegen, we'll need to improve tablegen to allow flags to
3483// be transferred from a node in the pattern to the result node, probably with
3484// a new keyword. For example, we have this
3485// def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3486//  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3487//   (implicit EFLAGS)]>;
3488// but maybe need something like this
3489// def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
3490//  [(store (add (loadi64 addr:$dst), -1), addr:$dst),
3491//   (transferrable EFLAGS)]>;
3492//
3493// Until then, we manually fold these and instruction select the operation
3494// here.
3495bool X86DAGToDAGISel::foldLoadStoreIntoMemOperand(SDNode *Node) {
3496  auto *StoreNode = cast<StoreSDNode>(Node);
3497  SDValue StoredVal = StoreNode->getOperand(1);
3498  unsigned Opc = StoredVal->getOpcode();
3499
3500  // Before we try to select anything, make sure this is memory operand size
3501  // and opcode we can handle. Note that this must match the code below that
3502  // actually lowers the opcodes.
3503  EVT MemVT = StoreNode->getMemoryVT();
3504  if (MemVT != MVT::i64 && MemVT != MVT::i32 && MemVT != MVT::i16 &&
3505      MemVT != MVT::i8)
3506    return false;
3507
3508  bool IsCommutable = false;
3509  bool IsNegate = false;
3510  switch (Opc) {
3511  default:
3512    return false;
3513  case X86ISD::SUB:
3514    IsNegate = isNullConstant(StoredVal.getOperand(0));
3515    break;
3516  case X86ISD::SBB:
3517    break;
3518  case X86ISD::ADD:
3519  case X86ISD::ADC:
3520  case X86ISD::AND:
3521  case X86ISD::OR:
3522  case X86ISD::XOR:
3523    IsCommutable = true;
3524    break;
3525  }
3526
3527  unsigned LoadOpNo = IsNegate ? 1 : 0;
3528  LoadSDNode *LoadNode = nullptr;
3529  SDValue InputChain;
3530  if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3531                                   LoadNode, InputChain)) {
3532    if (!IsCommutable)
3533      return false;
3534
3535    // This operation is commutable, try the other operand.
3536    LoadOpNo = 1;
3537    if (!isFusableLoadOpStorePattern(StoreNode, StoredVal, CurDAG, LoadOpNo,
3538                                     LoadNode, InputChain))
3539      return false;
3540  }
3541
3542  SDValue Base, Scale, Index, Disp, Segment;
3543  if (!selectAddr(LoadNode, LoadNode->getBasePtr(), Base, Scale, Index, Disp,
3544                  Segment))
3545    return false;
3546
3547  auto SelectOpcode = [&](unsigned Opc64, unsigned Opc32, unsigned Opc16,
3548                          unsigned Opc8) {
3549    switch (MemVT.getSimpleVT().SimpleTy) {
3550    case MVT::i64:
3551      return Opc64;
3552    case MVT::i32:
3553      return Opc32;
3554    case MVT::i16:
3555      return Opc16;
3556    case MVT::i8:
3557      return Opc8;
3558    default:
3559      llvm_unreachable("Invalid size!");
3560    }
3561  };
3562
3563  MachineSDNode *Result;
3564  switch (Opc) {
3565  case X86ISD::SUB:
3566    // Handle negate.
3567    if (IsNegate) {
3568      unsigned NewOpc = SelectOpcode(X86::NEG64m, X86::NEG32m, X86::NEG16m,
3569                                     X86::NEG8m);
3570      const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3571      Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3572                                      MVT::Other, Ops);
3573      break;
3574    }
3575   [[fallthrough]];
3576  case X86ISD::ADD:
3577    // Try to match inc/dec.
3578    if (!Subtarget->slowIncDec() || CurDAG->shouldOptForSize()) {
3579      bool IsOne = isOneConstant(StoredVal.getOperand(1));
3580      bool IsNegOne = isAllOnesConstant(StoredVal.getOperand(1));
3581      // ADD/SUB with 1/-1 and carry flag isn't used can use inc/dec.
3582      if ((IsOne || IsNegOne) && hasNoCarryFlagUses(StoredVal.getValue(1))) {
3583        unsigned NewOpc =
3584          ((Opc == X86ISD::ADD) == IsOne)
3585              ? SelectOpcode(X86::INC64m, X86::INC32m, X86::INC16m, X86::INC8m)
3586              : SelectOpcode(X86::DEC64m, X86::DEC32m, X86::DEC16m, X86::DEC8m);
3587        const SDValue Ops[] = {Base, Scale, Index, Disp, Segment, InputChain};
3588        Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32,
3589                                        MVT::Other, Ops);
3590        break;
3591      }
3592    }
3593    [[fallthrough]];
3594  case X86ISD::ADC:
3595  case X86ISD::SBB:
3596  case X86ISD::AND:
3597  case X86ISD::OR:
3598  case X86ISD::XOR: {
3599    auto SelectRegOpcode = [SelectOpcode](unsigned Opc) {
3600      switch (Opc) {
3601      case X86ISD::ADD:
3602        return SelectOpcode(X86::ADD64mr, X86::ADD32mr, X86::ADD16mr,
3603                            X86::ADD8mr);
3604      case X86ISD::ADC:
3605        return SelectOpcode(X86::ADC64mr, X86::ADC32mr, X86::ADC16mr,
3606                            X86::ADC8mr);
3607      case X86ISD::SUB:
3608        return SelectOpcode(X86::SUB64mr, X86::SUB32mr, X86::SUB16mr,
3609                            X86::SUB8mr);
3610      case X86ISD::SBB:
3611        return SelectOpcode(X86::SBB64mr, X86::SBB32mr, X86::SBB16mr,
3612                            X86::SBB8mr);
3613      case X86ISD::AND:
3614        return SelectOpcode(X86::AND64mr, X86::AND32mr, X86::AND16mr,
3615                            X86::AND8mr);
3616      case X86ISD::OR:
3617        return SelectOpcode(X86::OR64mr, X86::OR32mr, X86::OR16mr, X86::OR8mr);
3618      case X86ISD::XOR:
3619        return SelectOpcode(X86::XOR64mr, X86::XOR32mr, X86::XOR16mr,
3620                            X86::XOR8mr);
3621      default:
3622        llvm_unreachable("Invalid opcode!");
3623      }
3624    };
3625    auto SelectImmOpcode = [SelectOpcode](unsigned Opc) {
3626      switch (Opc) {
3627      case X86ISD::ADD:
3628        return SelectOpcode(X86::ADD64mi32, X86::ADD32mi, X86::ADD16mi,
3629                            X86::ADD8mi);
3630      case X86ISD::ADC:
3631        return SelectOpcode(X86::ADC64mi32, X86::ADC32mi, X86::ADC16mi,
3632                            X86::ADC8mi);
3633      case X86ISD::SUB:
3634        return SelectOpcode(X86::SUB64mi32, X86::SUB32mi, X86::SUB16mi,
3635                            X86::SUB8mi);
3636      case X86ISD::SBB:
3637        return SelectOpcode(X86::SBB64mi32, X86::SBB32mi, X86::SBB16mi,
3638                            X86::SBB8mi);
3639      case X86ISD::AND:
3640        return SelectOpcode(X86::AND64mi32, X86::AND32mi, X86::AND16mi,
3641                            X86::AND8mi);
3642      case X86ISD::OR:
3643        return SelectOpcode(X86::OR64mi32, X86::OR32mi, X86::OR16mi,
3644                            X86::OR8mi);
3645      case X86ISD::XOR:
3646        return SelectOpcode(X86::XOR64mi32, X86::XOR32mi, X86::XOR16mi,
3647                            X86::XOR8mi);
3648      default:
3649        llvm_unreachable("Invalid opcode!");
3650      }
3651    };
3652
3653    unsigned NewOpc = SelectRegOpcode(Opc);
3654    SDValue Operand = StoredVal->getOperand(1-LoadOpNo);
3655
3656    // See if the operand is a constant that we can fold into an immediate
3657    // operand.
3658    if (auto *OperandC = dyn_cast<ConstantSDNode>(Operand)) {
3659      int64_t OperandV = OperandC->getSExtValue();
3660
3661      // Check if we can shrink the operand enough to fit in an immediate (or
3662      // fit into a smaller immediate) by negating it and switching the
3663      // operation.
3664      if ((Opc == X86ISD::ADD || Opc == X86ISD::SUB) &&
3665          ((MemVT != MVT::i8 && !isInt<8>(OperandV) && isInt<8>(-OperandV)) ||
3666           (MemVT == MVT::i64 && !isInt<32>(OperandV) &&
3667            isInt<32>(-OperandV))) &&
3668          hasNoCarryFlagUses(StoredVal.getValue(1))) {
3669        OperandV = -OperandV;
3670        Opc = Opc == X86ISD::ADD ? X86ISD::SUB : X86ISD::ADD;
3671      }
3672
3673      if (MemVT != MVT::i64 || isInt<32>(OperandV)) {
3674        Operand = CurDAG->getTargetConstant(OperandV, SDLoc(Node), MemVT);
3675        NewOpc = SelectImmOpcode(Opc);
3676      }
3677    }
3678
3679    if (Opc == X86ISD::ADC || Opc == X86ISD::SBB) {
3680      SDValue CopyTo =
3681          CurDAG->getCopyToReg(InputChain, SDLoc(Node), X86::EFLAGS,
3682                               StoredVal.getOperand(2), SDValue());
3683
3684      const SDValue Ops[] = {Base,    Scale,   Index,  Disp,
3685                             Segment, Operand, CopyTo, CopyTo.getValue(1)};
3686      Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3687                                      Ops);
3688    } else {
3689      const SDValue Ops[] = {Base,    Scale,   Index,     Disp,
3690                             Segment, Operand, InputChain};
3691      Result = CurDAG->getMachineNode(NewOpc, SDLoc(Node), MVT::i32, MVT::Other,
3692                                      Ops);
3693    }
3694    break;
3695  }
3696  default:
3697    llvm_unreachable("Invalid opcode!");
3698  }
3699
3700  MachineMemOperand *MemOps[] = {StoreNode->getMemOperand(),
3701                                 LoadNode->getMemOperand()};
3702  CurDAG->setNodeMemRefs(Result, MemOps);
3703
3704  // Update Load Chain uses as well.
3705  ReplaceUses(SDValue(LoadNode, 1), SDValue(Result, 1));
3706  ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
3707  ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
3708  CurDAG->RemoveDeadNode(Node);
3709  return true;
3710}
3711
3712// See if this is an  X & Mask  that we can match to BEXTR/BZHI.
3713// Where Mask is one of the following patterns:
3714//   a) x &  (1 << nbits) - 1
3715//   b) x & ~(-1 << nbits)
3716//   c) x &  (-1 >> (32 - y))
3717//   d) x << (32 - y) >> (32 - y)
3718//   e) (1 << nbits) - 1
3719bool X86DAGToDAGISel::matchBitExtract(SDNode *Node) {
3720  assert(
3721      (Node->getOpcode() == ISD::ADD || Node->getOpcode() == ISD::AND ||
3722       Node->getOpcode() == ISD::SRL) &&
3723      "Should be either an and-mask, or right-shift after clearing high bits.");
3724
3725  // BEXTR is BMI instruction, BZHI is BMI2 instruction. We need at least one.
3726  if (!Subtarget->hasBMI() && !Subtarget->hasBMI2())
3727    return false;
3728
3729  MVT NVT = Node->getSimpleValueType(0);
3730
3731  // Only supported for 32 and 64 bits.
3732  if (NVT != MVT::i32 && NVT != MVT::i64)
3733    return false;
3734
3735  SDValue NBits;
3736  bool NegateNBits;
3737
3738  // If we have BMI2's BZHI, we are ok with muti-use patterns.
3739  // Else, if we only have BMI1's BEXTR, we require one-use.
3740  const bool AllowExtraUsesByDefault = Subtarget->hasBMI2();
3741  auto checkUses = [AllowExtraUsesByDefault](
3742                       SDValue Op, unsigned NUses,
3743                       std::optional<bool> AllowExtraUses) {
3744    return AllowExtraUses.value_or(AllowExtraUsesByDefault) ||
3745           Op.getNode()->hasNUsesOfValue(NUses, Op.getResNo());
3746  };
3747  auto checkOneUse = [checkUses](SDValue Op,
3748                                 std::optional<bool> AllowExtraUses =
3749                                     std::nullopt) {
3750    return checkUses(Op, 1, AllowExtraUses);
3751  };
3752  auto checkTwoUse = [checkUses](SDValue Op,
3753                                 std::optional<bool> AllowExtraUses =
3754                                     std::nullopt) {
3755    return checkUses(Op, 2, AllowExtraUses);
3756  };
3757
3758  auto peekThroughOneUseTruncation = [checkOneUse](SDValue V) {
3759    if (V->getOpcode() == ISD::TRUNCATE && checkOneUse(V)) {
3760      assert(V.getSimpleValueType() == MVT::i32 &&
3761             V.getOperand(0).getSimpleValueType() == MVT::i64 &&
3762             "Expected i64 -> i32 truncation");
3763      V = V.getOperand(0);
3764    }
3765    return V;
3766  };
3767
3768  // a) x & ((1 << nbits) + (-1))
3769  auto matchPatternA = [checkOneUse, peekThroughOneUseTruncation, &NBits,
3770                        &NegateNBits](SDValue Mask) -> bool {
3771    // Match `add`. Must only have one use!
3772    if (Mask->getOpcode() != ISD::ADD || !checkOneUse(Mask))
3773      return false;
3774    // We should be adding all-ones constant (i.e. subtracting one.)
3775    if (!isAllOnesConstant(Mask->getOperand(1)))
3776      return false;
3777    // Match `1 << nbits`. Might be truncated. Must only have one use!
3778    SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3779    if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3780      return false;
3781    if (!isOneConstant(M0->getOperand(0)))
3782      return false;
3783    NBits = M0->getOperand(1);
3784    NegateNBits = false;
3785    return true;
3786  };
3787
3788  auto isAllOnes = [this, peekThroughOneUseTruncation, NVT](SDValue V) {
3789    V = peekThroughOneUseTruncation(V);
3790    return CurDAG->MaskedValueIsAllOnes(
3791        V, APInt::getLowBitsSet(V.getSimpleValueType().getSizeInBits(),
3792                                NVT.getSizeInBits()));
3793  };
3794
3795  // b) x & ~(-1 << nbits)
3796  auto matchPatternB = [checkOneUse, isAllOnes, peekThroughOneUseTruncation,
3797                        &NBits, &NegateNBits](SDValue Mask) -> bool {
3798    // Match `~()`. Must only have one use!
3799    if (Mask.getOpcode() != ISD::XOR || !checkOneUse(Mask))
3800      return false;
3801    // The -1 only has to be all-ones for the final Node's NVT.
3802    if (!isAllOnes(Mask->getOperand(1)))
3803      return false;
3804    // Match `-1 << nbits`. Might be truncated. Must only have one use!
3805    SDValue M0 = peekThroughOneUseTruncation(Mask->getOperand(0));
3806    if (M0->getOpcode() != ISD::SHL || !checkOneUse(M0))
3807      return false;
3808    // The -1 only has to be all-ones for the final Node's NVT.
3809    if (!isAllOnes(M0->getOperand(0)))
3810      return false;
3811    NBits = M0->getOperand(1);
3812    NegateNBits = false;
3813    return true;
3814  };
3815
3816  // Try to match potentially-truncated shift amount as `(bitwidth - y)`,
3817  // or leave the shift amount as-is, but then we'll have to negate it.
3818  auto canonicalizeShiftAmt = [&NBits, &NegateNBits](SDValue ShiftAmt,
3819                                                     unsigned Bitwidth) {
3820    NBits = ShiftAmt;
3821    NegateNBits = true;
3822    // Skip over a truncate of the shift amount, if any.
3823    if (NBits.getOpcode() == ISD::TRUNCATE)
3824      NBits = NBits.getOperand(0);
3825    // Try to match the shift amount as (bitwidth - y). It should go away, too.
3826    // If it doesn't match, that's fine, we'll just negate it ourselves.
3827    if (NBits.getOpcode() != ISD::SUB)
3828      return;
3829    auto *V0 = dyn_cast<ConstantSDNode>(NBits.getOperand(0));
3830    if (!V0 || V0->getZExtValue() != Bitwidth)
3831      return;
3832    NBits = NBits.getOperand(1);
3833    NegateNBits = false;
3834  };
3835
3836  // c) x &  (-1 >> z)  but then we'll have to subtract z from bitwidth
3837  //   or
3838  // c) x &  (-1 >> (32 - y))
3839  auto matchPatternC = [checkOneUse, peekThroughOneUseTruncation, &NegateNBits,
3840                        canonicalizeShiftAmt](SDValue Mask) -> bool {
3841    // The mask itself may be truncated.
3842    Mask = peekThroughOneUseTruncation(Mask);
3843    unsigned Bitwidth = Mask.getSimpleValueType().getSizeInBits();
3844    // Match `l>>`. Must only have one use!
3845    if (Mask.getOpcode() != ISD::SRL || !checkOneUse(Mask))
3846      return false;
3847    // We should be shifting truly all-ones constant.
3848    if (!isAllOnesConstant(Mask.getOperand(0)))
3849      return false;
3850    SDValue M1 = Mask.getOperand(1);
3851    // The shift amount should not be used externally.
3852    if (!checkOneUse(M1))
3853      return false;
3854    canonicalizeShiftAmt(M1, Bitwidth);
3855    // Pattern c. is non-canonical, and is expanded into pattern d. iff there
3856    // is no extra use of the mask. Clearly, there was one since we are here.
3857    // But at the same time, if we need to negate the shift amount,
3858    // then we don't want the mask to stick around, else it's unprofitable.
3859    return !NegateNBits;
3860  };
3861
3862  SDValue X;
3863
3864  // d) x << z >> z  but then we'll have to subtract z from bitwidth
3865  //   or
3866  // d) x << (32 - y) >> (32 - y)
3867  auto matchPatternD = [checkOneUse, checkTwoUse, canonicalizeShiftAmt,
3868                        AllowExtraUsesByDefault, &NegateNBits,
3869                        &X](SDNode *Node) -> bool {
3870    if (Node->getOpcode() != ISD::SRL)
3871      return false;
3872    SDValue N0 = Node->getOperand(0);
3873    if (N0->getOpcode() != ISD::SHL)
3874      return false;
3875    unsigned Bitwidth = N0.getSimpleValueType().getSizeInBits();
3876    SDValue N1 = Node->getOperand(1);
3877    SDValue N01 = N0->getOperand(1);
3878    // Both of the shifts must be by the exact same value.
3879    if (N1 != N01)
3880      return false;
3881    canonicalizeShiftAmt(N1, Bitwidth);
3882    // There should not be any external uses of the inner shift / shift amount.
3883    // Note that while we are generally okay with external uses given BMI2,
3884    // iff we need to negate the shift amount, we are not okay with extra uses.
3885    const bool AllowExtraUses = AllowExtraUsesByDefault && !NegateNBits;
3886    if (!checkOneUse(N0, AllowExtraUses) || !checkTwoUse(N1, AllowExtraUses))
3887      return false;
3888    X = N0->getOperand(0);
3889    return true;
3890  };
3891
3892  auto matchLowBitMask = [matchPatternA, matchPatternB,
3893                          matchPatternC](SDValue Mask) -> bool {
3894    return matchPatternA(Mask) || matchPatternB(Mask) || matchPatternC(Mask);
3895  };
3896
3897  if (Node->getOpcode() == ISD::AND) {
3898    X = Node->getOperand(0);
3899    SDValue Mask = Node->getOperand(1);
3900
3901    if (matchLowBitMask(Mask)) {
3902      // Great.
3903    } else {
3904      std::swap(X, Mask);
3905      if (!matchLowBitMask(Mask))
3906        return false;
3907    }
3908  } else if (matchLowBitMask(SDValue(Node, 0))) {
3909    X = CurDAG->getAllOnesConstant(SDLoc(Node), NVT);
3910  } else if (!matchPatternD(Node))
3911    return false;
3912
3913  // If we need to negate the shift amount, require BMI2 BZHI support.
3914  // It's just too unprofitable for BMI1 BEXTR.
3915  if (NegateNBits && !Subtarget->hasBMI2())
3916    return false;
3917
3918  SDLoc DL(Node);
3919
3920  // Truncate the shift amount.
3921  NBits = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NBits);
3922  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3923
3924  // Insert 8-bit NBits into lowest 8 bits of 32-bit register.
3925  // All the other bits are undefined, we do not care about them.
3926  SDValue ImplDef = SDValue(
3927      CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, DL, MVT::i32), 0);
3928  insertDAGNode(*CurDAG, SDValue(Node, 0), ImplDef);
3929
3930  SDValue SRIdxVal = CurDAG->getTargetConstant(X86::sub_8bit, DL, MVT::i32);
3931  insertDAGNode(*CurDAG, SDValue(Node, 0), SRIdxVal);
3932  NBits = SDValue(CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
3933                                         MVT::i32, ImplDef, NBits, SRIdxVal),
3934                  0);
3935  insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3936
3937  // We might have matched the amount of high bits to be cleared,
3938  // but we want the amount of low bits to be kept, so negate it then.
3939  if (NegateNBits) {
3940    SDValue BitWidthC = CurDAG->getConstant(NVT.getSizeInBits(), DL, MVT::i32);
3941    insertDAGNode(*CurDAG, SDValue(Node, 0), BitWidthC);
3942
3943    NBits = CurDAG->getNode(ISD::SUB, DL, MVT::i32, BitWidthC, NBits);
3944    insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3945  }
3946
3947  if (Subtarget->hasBMI2()) {
3948    // Great, just emit the BZHI..
3949    if (NVT != MVT::i32) {
3950      // But have to place the bit count into the wide-enough register first.
3951      NBits = CurDAG->getNode(ISD::ANY_EXTEND, DL, NVT, NBits);
3952      insertDAGNode(*CurDAG, SDValue(Node, 0), NBits);
3953    }
3954
3955    SDValue Extract = CurDAG->getNode(X86ISD::BZHI, DL, NVT, X, NBits);
3956    ReplaceNode(Node, Extract.getNode());
3957    SelectCode(Extract.getNode());
3958    return true;
3959  }
3960
3961  // Else, if we do *NOT* have BMI2, let's find out if the if the 'X' is
3962  // *logically* shifted (potentially with one-use trunc inbetween),
3963  // and the truncation was the only use of the shift,
3964  // and if so look past one-use truncation.
3965  {
3966    SDValue RealX = peekThroughOneUseTruncation(X);
3967    // FIXME: only if the shift is one-use?
3968    if (RealX != X && RealX.getOpcode() == ISD::SRL)
3969      X = RealX;
3970  }
3971
3972  MVT XVT = X.getSimpleValueType();
3973
3974  // Else, emitting BEXTR requires one more step.
3975  // The 'control' of BEXTR has the pattern of:
3976  // [15...8 bit][ 7...0 bit] location
3977  // [ bit count][     shift] name
3978  // I.e. 0b000000011'00000001 means  (x >> 0b1) & 0b11
3979
3980  // Shift NBits left by 8 bits, thus producing 'control'.
3981  // This makes the low 8 bits to be zero.
3982  SDValue C8 = CurDAG->getConstant(8, DL, MVT::i8);
3983  insertDAGNode(*CurDAG, SDValue(Node, 0), C8);
3984  SDValue Control = CurDAG->getNode(ISD::SHL, DL, MVT::i32, NBits, C8);
3985  insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
3986
3987  // If the 'X' is *logically* shifted, we can fold that shift into 'control'.
3988  // FIXME: only if the shift is one-use?
3989  if (X.getOpcode() == ISD::SRL) {
3990    SDValue ShiftAmt = X.getOperand(1);
3991    X = X.getOperand(0);
3992
3993    assert(ShiftAmt.getValueType() == MVT::i8 &&
3994           "Expected shift amount to be i8");
3995
3996    // Now, *zero*-extend the shift amount. The bits 8...15 *must* be zero!
3997    // We could zext to i16 in some form, but we intentionally don't do that.
3998    SDValue OrigShiftAmt = ShiftAmt;
3999    ShiftAmt = CurDAG->getNode(ISD::ZERO_EXTEND, DL, MVT::i32, ShiftAmt);
4000    insertDAGNode(*CurDAG, OrigShiftAmt, ShiftAmt);
4001
4002    // And now 'or' these low 8 bits of shift amount into the 'control'.
4003    Control = CurDAG->getNode(ISD::OR, DL, MVT::i32, Control, ShiftAmt);
4004    insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
4005  }
4006
4007  // But have to place the 'control' into the wide-enough register first.
4008  if (XVT != MVT::i32) {
4009    Control = CurDAG->getNode(ISD::ANY_EXTEND, DL, XVT, Control);
4010    insertDAGNode(*CurDAG, SDValue(Node, 0), Control);
4011  }
4012
4013  // And finally, form the BEXTR itself.
4014  SDValue Extract = CurDAG->getNode(X86ISD::BEXTR, DL, XVT, X, Control);
4015
4016  // The 'X' was originally truncated. Do that now.
4017  if (XVT != NVT) {
4018    insertDAGNode(*CurDAG, SDValue(Node, 0), Extract);
4019    Extract = CurDAG->getNode(ISD::TRUNCATE, DL, NVT, Extract);
4020  }
4021
4022  ReplaceNode(Node, Extract.getNode());
4023  SelectCode(Extract.getNode());
4024
4025  return true;
4026}
4027
4028// See if this is an (X >> C1) & C2 that we can match to BEXTR/BEXTRI.
4029MachineSDNode *X86DAGToDAGISel::matchBEXTRFromAndImm(SDNode *Node) {
4030  MVT NVT = Node->getSimpleValueType(0);
4031  SDLoc dl(Node);
4032
4033  SDValue N0 = Node->getOperand(0);
4034  SDValue N1 = Node->getOperand(1);
4035
4036  // If we have TBM we can use an immediate for the control. If we have BMI
4037  // we should only do this if the BEXTR instruction is implemented well.
4038  // Otherwise moving the control into a register makes this more costly.
4039  // TODO: Maybe load folding, greater than 32-bit masks, or a guarantee of LICM
4040  // hoisting the move immediate would make it worthwhile with a less optimal
4041  // BEXTR?
4042  bool PreferBEXTR =
4043      Subtarget->hasTBM() || (Subtarget->hasBMI() && Subtarget->hasFastBEXTR());
4044  if (!PreferBEXTR && !Subtarget->hasBMI2())
4045    return nullptr;
4046
4047  // Must have a shift right.
4048  if (N0->getOpcode() != ISD::SRL && N0->getOpcode() != ISD::SRA)
4049    return nullptr;
4050
4051  // Shift can't have additional users.
4052  if (!N0->hasOneUse())
4053    return nullptr;
4054
4055  // Only supported for 32 and 64 bits.
4056  if (NVT != MVT::i32 && NVT != MVT::i64)
4057    return nullptr;
4058
4059  // Shift amount and RHS of and must be constant.
4060  auto *MaskCst = dyn_cast<ConstantSDNode>(N1);
4061  auto *ShiftCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
4062  if (!MaskCst || !ShiftCst)
4063    return nullptr;
4064
4065  // And RHS must be a mask.
4066  uint64_t Mask = MaskCst->getZExtValue();
4067  if (!isMask_64(Mask))
4068    return nullptr;
4069
4070  uint64_t Shift = ShiftCst->getZExtValue();
4071  uint64_t MaskSize = llvm::popcount(Mask);
4072
4073  // Don't interfere with something that can be handled by extracting AH.
4074  // TODO: If we are able to fold a load, BEXTR might still be better than AH.
4075  if (Shift == 8 && MaskSize == 8)
4076    return nullptr;
4077
4078  // Make sure we are only using bits that were in the original value, not
4079  // shifted in.
4080  if (Shift + MaskSize > NVT.getSizeInBits())
4081    return nullptr;
4082
4083  // BZHI, if available, is always fast, unlike BEXTR. But even if we decide
4084  // that we can't use BEXTR, it is only worthwhile using BZHI if the mask
4085  // does not fit into 32 bits. Load folding is not a sufficient reason.
4086  if (!PreferBEXTR && MaskSize <= 32)
4087    return nullptr;
4088
4089  SDValue Control;
4090  unsigned ROpc, MOpc;
4091
4092#define GET_EGPR_IF_ENABLED(OPC) (Subtarget->hasEGPR() ? OPC##_EVEX : OPC)
4093  if (!PreferBEXTR) {
4094    assert(Subtarget->hasBMI2() && "We must have BMI2's BZHI then.");
4095    // If we can't make use of BEXTR then we can't fuse shift+mask stages.
4096    // Let's perform the mask first, and apply shift later. Note that we need to
4097    // widen the mask to account for the fact that we'll apply shift afterwards!
4098    Control = CurDAG->getTargetConstant(Shift + MaskSize, dl, NVT);
4099    ROpc = NVT == MVT::i64 ? GET_EGPR_IF_ENABLED(X86::BZHI64rr)
4100                           : GET_EGPR_IF_ENABLED(X86::BZHI32rr);
4101    MOpc = NVT == MVT::i64 ? GET_EGPR_IF_ENABLED(X86::BZHI64rm)
4102                           : GET_EGPR_IF_ENABLED(X86::BZHI32rm);
4103    unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
4104    Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
4105  } else {
4106    // The 'control' of BEXTR has the pattern of:
4107    // [15...8 bit][ 7...0 bit] location
4108    // [ bit count][     shift] name
4109    // I.e. 0b000000011'00000001 means  (x >> 0b1) & 0b11
4110    Control = CurDAG->getTargetConstant(Shift | (MaskSize << 8), dl, NVT);
4111    if (Subtarget->hasTBM()) {
4112      ROpc = NVT == MVT::i64 ? X86::BEXTRI64ri : X86::BEXTRI32ri;
4113      MOpc = NVT == MVT::i64 ? X86::BEXTRI64mi : X86::BEXTRI32mi;
4114    } else {
4115      assert(Subtarget->hasBMI() && "We must have BMI1's BEXTR then.");
4116      // BMI requires the immediate to placed in a register.
4117      ROpc = NVT == MVT::i64 ? GET_EGPR_IF_ENABLED(X86::BEXTR64rr)
4118                             : GET_EGPR_IF_ENABLED(X86::BEXTR32rr);
4119      MOpc = NVT == MVT::i64 ? GET_EGPR_IF_ENABLED(X86::BEXTR64rm)
4120                             : GET_EGPR_IF_ENABLED(X86::BEXTR32rm);
4121      unsigned NewOpc = NVT == MVT::i64 ? X86::MOV32ri64 : X86::MOV32ri;
4122      Control = SDValue(CurDAG->getMachineNode(NewOpc, dl, NVT, Control), 0);
4123    }
4124  }
4125
4126  MachineSDNode *NewNode;
4127  SDValue Input = N0->getOperand(0);
4128  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4129  if (tryFoldLoad(Node, N0.getNode(), Input, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4130    SDValue Ops[] = {
4131        Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Control, Input.getOperand(0)};
4132    SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
4133    NewNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4134    // Update the chain.
4135    ReplaceUses(Input.getValue(1), SDValue(NewNode, 2));
4136    // Record the mem-refs
4137    CurDAG->setNodeMemRefs(NewNode, {cast<LoadSDNode>(Input)->getMemOperand()});
4138  } else {
4139    NewNode = CurDAG->getMachineNode(ROpc, dl, NVT, MVT::i32, Input, Control);
4140  }
4141
4142  if (!PreferBEXTR) {
4143    // We still need to apply the shift.
4144    SDValue ShAmt = CurDAG->getTargetConstant(Shift, dl, NVT);
4145    unsigned NewOpc = NVT == MVT::i64 ? X86::SHR64ri : X86::SHR32ri;
4146    NewNode =
4147        CurDAG->getMachineNode(NewOpc, dl, NVT, SDValue(NewNode, 0), ShAmt);
4148  }
4149
4150  return NewNode;
4151}
4152
4153// Emit a PCMISTR(I/M) instruction.
4154MachineSDNode *X86DAGToDAGISel::emitPCMPISTR(unsigned ROpc, unsigned MOpc,
4155                                             bool MayFoldLoad, const SDLoc &dl,
4156                                             MVT VT, SDNode *Node) {
4157  SDValue N0 = Node->getOperand(0);
4158  SDValue N1 = Node->getOperand(1);
4159  SDValue Imm = Node->getOperand(2);
4160  auto *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
4161  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
4162
4163  // Try to fold a load. No need to check alignment.
4164  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4165  if (MayFoldLoad && tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4166    SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
4167                      N1.getOperand(0) };
4168    SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other);
4169    MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4170    // Update the chain.
4171    ReplaceUses(N1.getValue(1), SDValue(CNode, 2));
4172    // Record the mem-refs
4173    CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
4174    return CNode;
4175  }
4176
4177  SDValue Ops[] = { N0, N1, Imm };
4178  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32);
4179  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
4180  return CNode;
4181}
4182
4183// Emit a PCMESTR(I/M) instruction. Also return the Glue result in case we need
4184// to emit a second instruction after this one. This is needed since we have two
4185// copyToReg nodes glued before this and we need to continue that glue through.
4186MachineSDNode *X86DAGToDAGISel::emitPCMPESTR(unsigned ROpc, unsigned MOpc,
4187                                             bool MayFoldLoad, const SDLoc &dl,
4188                                             MVT VT, SDNode *Node,
4189                                             SDValue &InGlue) {
4190  SDValue N0 = Node->getOperand(0);
4191  SDValue N2 = Node->getOperand(2);
4192  SDValue Imm = Node->getOperand(4);
4193  auto *Val = cast<ConstantSDNode>(Imm)->getConstantIntValue();
4194  Imm = CurDAG->getTargetConstant(*Val, SDLoc(Node), Imm.getValueType());
4195
4196  // Try to fold a load. No need to check alignment.
4197  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4198  if (MayFoldLoad && tryFoldLoad(Node, N2, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4199    SDValue Ops[] = { N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
4200                      N2.getOperand(0), InGlue };
4201    SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Other, MVT::Glue);
4202    MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
4203    InGlue = SDValue(CNode, 3);
4204    // Update the chain.
4205    ReplaceUses(N2.getValue(1), SDValue(CNode, 2));
4206    // Record the mem-refs
4207    CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N2)->getMemOperand()});
4208    return CNode;
4209  }
4210
4211  SDValue Ops[] = { N0, N2, Imm, InGlue };
4212  SDVTList VTs = CurDAG->getVTList(VT, MVT::i32, MVT::Glue);
4213  MachineSDNode *CNode = CurDAG->getMachineNode(ROpc, dl, VTs, Ops);
4214  InGlue = SDValue(CNode, 2);
4215  return CNode;
4216}
4217
4218bool X86DAGToDAGISel::tryShiftAmountMod(SDNode *N) {
4219  EVT VT = N->getValueType(0);
4220
4221  // Only handle scalar shifts.
4222  if (VT.isVector())
4223    return false;
4224
4225  // Narrower shifts only mask to 5 bits in hardware.
4226  unsigned Size = VT == MVT::i64 ? 64 : 32;
4227
4228  SDValue OrigShiftAmt = N->getOperand(1);
4229  SDValue ShiftAmt = OrigShiftAmt;
4230  SDLoc DL(N);
4231
4232  // Skip over a truncate of the shift amount.
4233  if (ShiftAmt->getOpcode() == ISD::TRUNCATE)
4234    ShiftAmt = ShiftAmt->getOperand(0);
4235
4236  // This function is called after X86DAGToDAGISel::matchBitExtract(),
4237  // so we are not afraid that we might mess up BZHI/BEXTR pattern.
4238
4239  SDValue NewShiftAmt;
4240  if (ShiftAmt->getOpcode() == ISD::ADD || ShiftAmt->getOpcode() == ISD::SUB ||
4241      ShiftAmt->getOpcode() == ISD::XOR) {
4242    SDValue Add0 = ShiftAmt->getOperand(0);
4243    SDValue Add1 = ShiftAmt->getOperand(1);
4244    auto *Add0C = dyn_cast<ConstantSDNode>(Add0);
4245    auto *Add1C = dyn_cast<ConstantSDNode>(Add1);
4246    // If we are shifting by X+/-/^N where N == 0 mod Size, then just shift by X
4247    // to avoid the ADD/SUB/XOR.
4248    if (Add1C && Add1C->getAPIntValue().urem(Size) == 0) {
4249      NewShiftAmt = Add0;
4250
4251    } else if (ShiftAmt->getOpcode() != ISD::ADD && ShiftAmt.hasOneUse() &&
4252               ((Add0C && Add0C->getAPIntValue().urem(Size) == Size - 1) ||
4253                (Add1C && Add1C->getAPIntValue().urem(Size) == Size - 1))) {
4254      // If we are doing a NOT on just the lower bits with (Size*N-1) -/^ X
4255      // we can replace it with a NOT. In the XOR case it may save some code
4256      // size, in the SUB case it also may save a move.
4257      assert(Add0C == nullptr || Add1C == nullptr);
4258
4259      // We can only do N-X, not X-N
4260      if (ShiftAmt->getOpcode() == ISD::SUB && Add0C == nullptr)
4261        return false;
4262
4263      EVT OpVT = ShiftAmt.getValueType();
4264
4265      SDValue AllOnes = CurDAG->getAllOnesConstant(DL, OpVT);
4266      NewShiftAmt = CurDAG->getNode(ISD::XOR, DL, OpVT,
4267                                    Add0C == nullptr ? Add0 : Add1, AllOnes);
4268      insertDAGNode(*CurDAG, OrigShiftAmt, AllOnes);
4269      insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4270      // If we are shifting by N-X where N == 0 mod Size, then just shift by
4271      // -X to generate a NEG instead of a SUB of a constant.
4272    } else if (ShiftAmt->getOpcode() == ISD::SUB && Add0C &&
4273               Add0C->getZExtValue() != 0) {
4274      EVT SubVT = ShiftAmt.getValueType();
4275      SDValue X;
4276      if (Add0C->getZExtValue() % Size == 0)
4277        X = Add1;
4278      else if (ShiftAmt.hasOneUse() && Size == 64 &&
4279               Add0C->getZExtValue() % 32 == 0) {
4280        // We have a 64-bit shift by (n*32-x), turn it into -(x+n*32).
4281        // This is mainly beneficial if we already compute (x+n*32).
4282        if (Add1.getOpcode() == ISD::TRUNCATE) {
4283          Add1 = Add1.getOperand(0);
4284          SubVT = Add1.getValueType();
4285        }
4286        if (Add0.getValueType() != SubVT) {
4287          Add0 = CurDAG->getZExtOrTrunc(Add0, DL, SubVT);
4288          insertDAGNode(*CurDAG, OrigShiftAmt, Add0);
4289        }
4290
4291        X = CurDAG->getNode(ISD::ADD, DL, SubVT, Add1, Add0);
4292        insertDAGNode(*CurDAG, OrigShiftAmt, X);
4293      } else
4294        return false;
4295      // Insert a negate op.
4296      // TODO: This isn't guaranteed to replace the sub if there is a logic cone
4297      // that uses it that's not a shift.
4298      SDValue Zero = CurDAG->getConstant(0, DL, SubVT);
4299      SDValue Neg = CurDAG->getNode(ISD::SUB, DL, SubVT, Zero, X);
4300      NewShiftAmt = Neg;
4301
4302      // Insert these operands into a valid topological order so they can
4303      // get selected independently.
4304      insertDAGNode(*CurDAG, OrigShiftAmt, Zero);
4305      insertDAGNode(*CurDAG, OrigShiftAmt, Neg);
4306    } else
4307      return false;
4308  } else
4309    return false;
4310
4311  if (NewShiftAmt.getValueType() != MVT::i8) {
4312    // Need to truncate the shift amount.
4313    NewShiftAmt = CurDAG->getNode(ISD::TRUNCATE, DL, MVT::i8, NewShiftAmt);
4314    // Add to a correct topological ordering.
4315    insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4316  }
4317
4318  // Insert a new mask to keep the shift amount legal. This should be removed
4319  // by isel patterns.
4320  NewShiftAmt = CurDAG->getNode(ISD::AND, DL, MVT::i8, NewShiftAmt,
4321                                CurDAG->getConstant(Size - 1, DL, MVT::i8));
4322  // Place in a correct topological ordering.
4323  insertDAGNode(*CurDAG, OrigShiftAmt, NewShiftAmt);
4324
4325  SDNode *UpdatedNode = CurDAG->UpdateNodeOperands(N, N->getOperand(0),
4326                                                   NewShiftAmt);
4327  if (UpdatedNode != N) {
4328    // If we found an existing node, we should replace ourselves with that node
4329    // and wait for it to be selected after its other users.
4330    ReplaceNode(N, UpdatedNode);
4331    return true;
4332  }
4333
4334  // If the original shift amount is now dead, delete it so that we don't run
4335  // it through isel.
4336  if (OrigShiftAmt.getNode()->use_empty())
4337    CurDAG->RemoveDeadNode(OrigShiftAmt.getNode());
4338
4339  // Now that we've optimized the shift amount, defer to normal isel to get
4340  // load folding and legacy vs BMI2 selection without repeating it here.
4341  SelectCode(N);
4342  return true;
4343}
4344
4345bool X86DAGToDAGISel::tryShrinkShlLogicImm(SDNode *N) {
4346  MVT NVT = N->getSimpleValueType(0);
4347  unsigned Opcode = N->getOpcode();
4348  SDLoc dl(N);
4349
4350  // For operations of the form (x << C1) op C2, check if we can use a smaller
4351  // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
4352  SDValue Shift = N->getOperand(0);
4353  SDValue N1 = N->getOperand(1);
4354
4355  auto *Cst = dyn_cast<ConstantSDNode>(N1);
4356  if (!Cst)
4357    return false;
4358
4359  int64_t Val = Cst->getSExtValue();
4360
4361  // If we have an any_extend feeding the AND, look through it to see if there
4362  // is a shift behind it. But only if the AND doesn't use the extended bits.
4363  // FIXME: Generalize this to other ANY_EXTEND than i32 to i64?
4364  bool FoundAnyExtend = false;
4365  if (Shift.getOpcode() == ISD::ANY_EXTEND && Shift.hasOneUse() &&
4366      Shift.getOperand(0).getSimpleValueType() == MVT::i32 &&
4367      isUInt<32>(Val)) {
4368    FoundAnyExtend = true;
4369    Shift = Shift.getOperand(0);
4370  }
4371
4372  if (Shift.getOpcode() != ISD::SHL || !Shift.hasOneUse())
4373    return false;
4374
4375  // i8 is unshrinkable, i16 should be promoted to i32.
4376  if (NVT != MVT::i32 && NVT != MVT::i64)
4377    return false;
4378
4379  auto *ShlCst = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
4380  if (!ShlCst)
4381    return false;
4382
4383  uint64_t ShAmt = ShlCst->getZExtValue();
4384
4385  // Make sure that we don't change the operation by removing bits.
4386  // This only matters for OR and XOR, AND is unaffected.
4387  uint64_t RemovedBitsMask = (1ULL << ShAmt) - 1;
4388  if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
4389    return false;
4390
4391  // Check the minimum bitwidth for the new constant.
4392  // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
4393  auto CanShrinkImmediate = [&](int64_t &ShiftedVal) {
4394    if (Opcode == ISD::AND) {
4395      // AND32ri is the same as AND64ri32 with zext imm.
4396      // Try this before sign extended immediates below.
4397      ShiftedVal = (uint64_t)Val >> ShAmt;
4398      if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4399        return true;
4400      // Also swap order when the AND can become MOVZX.
4401      if (ShiftedVal == UINT8_MAX || ShiftedVal == UINT16_MAX)
4402        return true;
4403    }
4404    ShiftedVal = Val >> ShAmt;
4405    if ((!isInt<8>(Val) && isInt<8>(ShiftedVal)) ||
4406        (!isInt<32>(Val) && isInt<32>(ShiftedVal)))
4407      return true;
4408    if (Opcode != ISD::AND) {
4409      // MOV32ri+OR64r/XOR64r is cheaper than MOV64ri64+OR64rr/XOR64rr
4410      ShiftedVal = (uint64_t)Val >> ShAmt;
4411      if (NVT == MVT::i64 && !isUInt<32>(Val) && isUInt<32>(ShiftedVal))
4412        return true;
4413    }
4414    return false;
4415  };
4416
4417  int64_t ShiftedVal;
4418  if (!CanShrinkImmediate(ShiftedVal))
4419    return false;
4420
4421  // Ok, we can reorder to get a smaller immediate.
4422
4423  // But, its possible the original immediate allowed an AND to become MOVZX.
4424  // Doing this late due to avoid the MakedValueIsZero call as late as
4425  // possible.
4426  if (Opcode == ISD::AND) {
4427    // Find the smallest zext this could possibly be.
4428    unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits();
4429    ZExtWidth = llvm::bit_ceil(std::max(ZExtWidth, 8U));
4430
4431    // Figure out which bits need to be zero to achieve that mask.
4432    APInt NeededMask = APInt::getLowBitsSet(NVT.getSizeInBits(),
4433                                            ZExtWidth);
4434    NeededMask &= ~Cst->getAPIntValue();
4435
4436    if (CurDAG->MaskedValueIsZero(N->getOperand(0), NeededMask))
4437      return false;
4438  }
4439
4440  SDValue X = Shift.getOperand(0);
4441  if (FoundAnyExtend) {
4442    SDValue NewX = CurDAG->getNode(ISD::ANY_EXTEND, dl, NVT, X);
4443    insertDAGNode(*CurDAG, SDValue(N, 0), NewX);
4444    X = NewX;
4445  }
4446
4447  SDValue NewCst = CurDAG->getConstant(ShiftedVal, dl, NVT);
4448  insertDAGNode(*CurDAG, SDValue(N, 0), NewCst);
4449  SDValue NewBinOp = CurDAG->getNode(Opcode, dl, NVT, X, NewCst);
4450  insertDAGNode(*CurDAG, SDValue(N, 0), NewBinOp);
4451  SDValue NewSHL = CurDAG->getNode(ISD::SHL, dl, NVT, NewBinOp,
4452                                   Shift.getOperand(1));
4453  ReplaceNode(N, NewSHL.getNode());
4454  SelectCode(NewSHL.getNode());
4455  return true;
4456}
4457
4458bool X86DAGToDAGISel::matchVPTERNLOG(SDNode *Root, SDNode *ParentA,
4459                                     SDNode *ParentB, SDNode *ParentC,
4460                                     SDValue A, SDValue B, SDValue C,
4461                                     uint8_t Imm) {
4462  assert(A.isOperandOf(ParentA) && B.isOperandOf(ParentB) &&
4463         C.isOperandOf(ParentC) && "Incorrect parent node");
4464
4465  auto tryFoldLoadOrBCast =
4466      [this](SDNode *Root, SDNode *P, SDValue &L, SDValue &Base, SDValue &Scale,
4467             SDValue &Index, SDValue &Disp, SDValue &Segment) {
4468        if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4469          return true;
4470
4471        // Not a load, check for broadcast which may be behind a bitcast.
4472        if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4473          P = L.getNode();
4474          L = L.getOperand(0);
4475        }
4476
4477        if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4478          return false;
4479
4480        // Only 32 and 64 bit broadcasts are supported.
4481        auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4482        unsigned Size = MemIntr->getMemoryVT().getSizeInBits();
4483        if (Size != 32 && Size != 64)
4484          return false;
4485
4486        return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4487      };
4488
4489  bool FoldedLoad = false;
4490  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4491  if (tryFoldLoadOrBCast(Root, ParentC, C, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
4492    FoldedLoad = true;
4493  } else if (tryFoldLoadOrBCast(Root, ParentA, A, Tmp0, Tmp1, Tmp2, Tmp3,
4494                                Tmp4)) {
4495    FoldedLoad = true;
4496    std::swap(A, C);
4497    // Swap bits 1/4 and 3/6.
4498    uint8_t OldImm = Imm;
4499    Imm = OldImm & 0xa5;
4500    if (OldImm & 0x02) Imm |= 0x10;
4501    if (OldImm & 0x10) Imm |= 0x02;
4502    if (OldImm & 0x08) Imm |= 0x40;
4503    if (OldImm & 0x40) Imm |= 0x08;
4504  } else if (tryFoldLoadOrBCast(Root, ParentB, B, Tmp0, Tmp1, Tmp2, Tmp3,
4505                                Tmp4)) {
4506    FoldedLoad = true;
4507    std::swap(B, C);
4508    // Swap bits 1/2 and 5/6.
4509    uint8_t OldImm = Imm;
4510    Imm = OldImm & 0x99;
4511    if (OldImm & 0x02) Imm |= 0x04;
4512    if (OldImm & 0x04) Imm |= 0x02;
4513    if (OldImm & 0x20) Imm |= 0x40;
4514    if (OldImm & 0x40) Imm |= 0x20;
4515  }
4516
4517  SDLoc DL(Root);
4518
4519  SDValue TImm = CurDAG->getTargetConstant(Imm, DL, MVT::i8);
4520
4521  MVT NVT = Root->getSimpleValueType(0);
4522
4523  MachineSDNode *MNode;
4524  if (FoldedLoad) {
4525    SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
4526
4527    unsigned Opc;
4528    if (C.getOpcode() == X86ISD::VBROADCAST_LOAD) {
4529      auto *MemIntr = cast<MemIntrinsicSDNode>(C);
4530      unsigned EltSize = MemIntr->getMemoryVT().getSizeInBits();
4531      assert((EltSize == 32 || EltSize == 64) && "Unexpected broadcast size!");
4532
4533      bool UseD = EltSize == 32;
4534      if (NVT.is128BitVector())
4535        Opc = UseD ? X86::VPTERNLOGDZ128rmbi : X86::VPTERNLOGQZ128rmbi;
4536      else if (NVT.is256BitVector())
4537        Opc = UseD ? X86::VPTERNLOGDZ256rmbi : X86::VPTERNLOGQZ256rmbi;
4538      else if (NVT.is512BitVector())
4539        Opc = UseD ? X86::VPTERNLOGDZrmbi : X86::VPTERNLOGQZrmbi;
4540      else
4541        llvm_unreachable("Unexpected vector size!");
4542    } else {
4543      bool UseD = NVT.getVectorElementType() == MVT::i32;
4544      if (NVT.is128BitVector())
4545        Opc = UseD ? X86::VPTERNLOGDZ128rmi : X86::VPTERNLOGQZ128rmi;
4546      else if (NVT.is256BitVector())
4547        Opc = UseD ? X86::VPTERNLOGDZ256rmi : X86::VPTERNLOGQZ256rmi;
4548      else if (NVT.is512BitVector())
4549        Opc = UseD ? X86::VPTERNLOGDZrmi : X86::VPTERNLOGQZrmi;
4550      else
4551        llvm_unreachable("Unexpected vector size!");
4552    }
4553
4554    SDValue Ops[] = {A, B, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, TImm, C.getOperand(0)};
4555    MNode = CurDAG->getMachineNode(Opc, DL, VTs, Ops);
4556
4557    // Update the chain.
4558    ReplaceUses(C.getValue(1), SDValue(MNode, 1));
4559    // Record the mem-refs
4560    CurDAG->setNodeMemRefs(MNode, {cast<MemSDNode>(C)->getMemOperand()});
4561  } else {
4562    bool UseD = NVT.getVectorElementType() == MVT::i32;
4563    unsigned Opc;
4564    if (NVT.is128BitVector())
4565      Opc = UseD ? X86::VPTERNLOGDZ128rri : X86::VPTERNLOGQZ128rri;
4566    else if (NVT.is256BitVector())
4567      Opc = UseD ? X86::VPTERNLOGDZ256rri : X86::VPTERNLOGQZ256rri;
4568    else if (NVT.is512BitVector())
4569      Opc = UseD ? X86::VPTERNLOGDZrri : X86::VPTERNLOGQZrri;
4570    else
4571      llvm_unreachable("Unexpected vector size!");
4572
4573    MNode = CurDAG->getMachineNode(Opc, DL, NVT, {A, B, C, TImm});
4574  }
4575
4576  ReplaceUses(SDValue(Root, 0), SDValue(MNode, 0));
4577  CurDAG->RemoveDeadNode(Root);
4578  return true;
4579}
4580
4581// Try to match two logic ops to a VPTERNLOG.
4582// FIXME: Handle more complex patterns that use an operand more than once?
4583bool X86DAGToDAGISel::tryVPTERNLOG(SDNode *N) {
4584  MVT NVT = N->getSimpleValueType(0);
4585
4586  // Make sure we support VPTERNLOG.
4587  if (!NVT.isVector() || !Subtarget->hasAVX512() ||
4588      NVT.getVectorElementType() == MVT::i1)
4589    return false;
4590
4591  // We need VLX for 128/256-bit.
4592  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4593    return false;
4594
4595  SDValue N0 = N->getOperand(0);
4596  SDValue N1 = N->getOperand(1);
4597
4598  auto getFoldableLogicOp = [](SDValue Op) {
4599    // Peek through single use bitcast.
4600    if (Op.getOpcode() == ISD::BITCAST && Op.hasOneUse())
4601      Op = Op.getOperand(0);
4602
4603    if (!Op.hasOneUse())
4604      return SDValue();
4605
4606    unsigned Opc = Op.getOpcode();
4607    if (Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR ||
4608        Opc == X86ISD::ANDNP)
4609      return Op;
4610
4611    return SDValue();
4612  };
4613
4614  SDValue A, FoldableOp;
4615  if ((FoldableOp = getFoldableLogicOp(N1))) {
4616    A = N0;
4617  } else if ((FoldableOp = getFoldableLogicOp(N0))) {
4618    A = N1;
4619  } else
4620    return false;
4621
4622  SDValue B = FoldableOp.getOperand(0);
4623  SDValue C = FoldableOp.getOperand(1);
4624  SDNode *ParentA = N;
4625  SDNode *ParentB = FoldableOp.getNode();
4626  SDNode *ParentC = FoldableOp.getNode();
4627
4628  // We can build the appropriate control immediate by performing the logic
4629  // operation we're matching using these constants for A, B, and C.
4630  uint8_t TernlogMagicA = 0xf0;
4631  uint8_t TernlogMagicB = 0xcc;
4632  uint8_t TernlogMagicC = 0xaa;
4633
4634  // Some of the inputs may be inverted, peek through them and invert the
4635  // magic values accordingly.
4636  // TODO: There may be a bitcast before the xor that we should peek through.
4637  auto PeekThroughNot = [](SDValue &Op, SDNode *&Parent, uint8_t &Magic) {
4638    if (Op.getOpcode() == ISD::XOR && Op.hasOneUse() &&
4639        ISD::isBuildVectorAllOnes(Op.getOperand(1).getNode())) {
4640      Magic = ~Magic;
4641      Parent = Op.getNode();
4642      Op = Op.getOperand(0);
4643    }
4644  };
4645
4646  PeekThroughNot(A, ParentA, TernlogMagicA);
4647  PeekThroughNot(B, ParentB, TernlogMagicB);
4648  PeekThroughNot(C, ParentC, TernlogMagicC);
4649
4650  uint8_t Imm;
4651  switch (FoldableOp.getOpcode()) {
4652  default: llvm_unreachable("Unexpected opcode!");
4653  case ISD::AND:      Imm = TernlogMagicB & TernlogMagicC; break;
4654  case ISD::OR:       Imm = TernlogMagicB | TernlogMagicC; break;
4655  case ISD::XOR:      Imm = TernlogMagicB ^ TernlogMagicC; break;
4656  case X86ISD::ANDNP: Imm = ~(TernlogMagicB) & TernlogMagicC; break;
4657  }
4658
4659  switch (N->getOpcode()) {
4660  default: llvm_unreachable("Unexpected opcode!");
4661  case X86ISD::ANDNP:
4662    if (A == N0)
4663      Imm &= ~TernlogMagicA;
4664    else
4665      Imm = ~(Imm) & TernlogMagicA;
4666    break;
4667  case ISD::AND: Imm &= TernlogMagicA; break;
4668  case ISD::OR:  Imm |= TernlogMagicA; break;
4669  case ISD::XOR: Imm ^= TernlogMagicA; break;
4670  }
4671
4672  return matchVPTERNLOG(N, ParentA, ParentB, ParentC, A, B, C, Imm);
4673}
4674
4675/// If the high bits of an 'and' operand are known zero, try setting the
4676/// high bits of an 'and' constant operand to produce a smaller encoding by
4677/// creating a small, sign-extended negative immediate rather than a large
4678/// positive one. This reverses a transform in SimplifyDemandedBits that
4679/// shrinks mask constants by clearing bits. There is also a possibility that
4680/// the 'and' mask can be made -1, so the 'and' itself is unnecessary. In that
4681/// case, just replace the 'and'. Return 'true' if the node is replaced.
4682bool X86DAGToDAGISel::shrinkAndImmediate(SDNode *And) {
4683  // i8 is unshrinkable, i16 should be promoted to i32, and vector ops don't
4684  // have immediate operands.
4685  MVT VT = And->getSimpleValueType(0);
4686  if (VT != MVT::i32 && VT != MVT::i64)
4687    return false;
4688
4689  auto *And1C = dyn_cast<ConstantSDNode>(And->getOperand(1));
4690  if (!And1C)
4691    return false;
4692
4693  // Bail out if the mask constant is already negative. It's can't shrink more.
4694  // If the upper 32 bits of a 64 bit mask are all zeros, we have special isel
4695  // patterns to use a 32-bit and instead of a 64-bit and by relying on the
4696  // implicit zeroing of 32 bit ops. So we should check if the lower 32 bits
4697  // are negative too.
4698  APInt MaskVal = And1C->getAPIntValue();
4699  unsigned MaskLZ = MaskVal.countl_zero();
4700  if (!MaskLZ || (VT == MVT::i64 && MaskLZ == 32))
4701    return false;
4702
4703  // Don't extend into the upper 32 bits of a 64 bit mask.
4704  if (VT == MVT::i64 && MaskLZ >= 32) {
4705    MaskLZ -= 32;
4706    MaskVal = MaskVal.trunc(32);
4707  }
4708
4709  SDValue And0 = And->getOperand(0);
4710  APInt HighZeros = APInt::getHighBitsSet(MaskVal.getBitWidth(), MaskLZ);
4711  APInt NegMaskVal = MaskVal | HighZeros;
4712
4713  // If a negative constant would not allow a smaller encoding, there's no need
4714  // to continue. Only change the constant when we know it's a win.
4715  unsigned MinWidth = NegMaskVal.getSignificantBits();
4716  if (MinWidth > 32 || (MinWidth > 8 && MaskVal.getSignificantBits() <= 32))
4717    return false;
4718
4719  // Extend masks if we truncated above.
4720  if (VT == MVT::i64 && MaskVal.getBitWidth() < 64) {
4721    NegMaskVal = NegMaskVal.zext(64);
4722    HighZeros = HighZeros.zext(64);
4723  }
4724
4725  // The variable operand must be all zeros in the top bits to allow using the
4726  // new, negative constant as the mask.
4727  if (!CurDAG->MaskedValueIsZero(And0, HighZeros))
4728    return false;
4729
4730  // Check if the mask is -1. In that case, this is an unnecessary instruction
4731  // that escaped earlier analysis.
4732  if (NegMaskVal.isAllOnes()) {
4733    ReplaceNode(And, And0.getNode());
4734    return true;
4735  }
4736
4737  // A negative mask allows a smaller encoding. Create a new 'and' node.
4738  SDValue NewMask = CurDAG->getConstant(NegMaskVal, SDLoc(And), VT);
4739  insertDAGNode(*CurDAG, SDValue(And, 0), NewMask);
4740  SDValue NewAnd = CurDAG->getNode(ISD::AND, SDLoc(And), VT, And0, NewMask);
4741  ReplaceNode(And, NewAnd.getNode());
4742  SelectCode(NewAnd.getNode());
4743  return true;
4744}
4745
4746static unsigned getVPTESTMOpc(MVT TestVT, bool IsTestN, bool FoldedLoad,
4747                              bool FoldedBCast, bool Masked) {
4748#define VPTESTM_CASE(VT, SUFFIX) \
4749case MVT::VT: \
4750  if (Masked) \
4751    return IsTestN ? X86::VPTESTNM##SUFFIX##k: X86::VPTESTM##SUFFIX##k; \
4752  return IsTestN ? X86::VPTESTNM##SUFFIX : X86::VPTESTM##SUFFIX;
4753
4754
4755#define VPTESTM_BROADCAST_CASES(SUFFIX) \
4756default: llvm_unreachable("Unexpected VT!"); \
4757VPTESTM_CASE(v4i32, DZ128##SUFFIX) \
4758VPTESTM_CASE(v2i64, QZ128##SUFFIX) \
4759VPTESTM_CASE(v8i32, DZ256##SUFFIX) \
4760VPTESTM_CASE(v4i64, QZ256##SUFFIX) \
4761VPTESTM_CASE(v16i32, DZ##SUFFIX) \
4762VPTESTM_CASE(v8i64, QZ##SUFFIX)
4763
4764#define VPTESTM_FULL_CASES(SUFFIX) \
4765VPTESTM_BROADCAST_CASES(SUFFIX) \
4766VPTESTM_CASE(v16i8, BZ128##SUFFIX) \
4767VPTESTM_CASE(v8i16, WZ128##SUFFIX) \
4768VPTESTM_CASE(v32i8, BZ256##SUFFIX) \
4769VPTESTM_CASE(v16i16, WZ256##SUFFIX) \
4770VPTESTM_CASE(v64i8, BZ##SUFFIX) \
4771VPTESTM_CASE(v32i16, WZ##SUFFIX)
4772
4773  if (FoldedBCast) {
4774    switch (TestVT.SimpleTy) {
4775    VPTESTM_BROADCAST_CASES(rmb)
4776    }
4777  }
4778
4779  if (FoldedLoad) {
4780    switch (TestVT.SimpleTy) {
4781    VPTESTM_FULL_CASES(rm)
4782    }
4783  }
4784
4785  switch (TestVT.SimpleTy) {
4786  VPTESTM_FULL_CASES(rr)
4787  }
4788
4789#undef VPTESTM_FULL_CASES
4790#undef VPTESTM_BROADCAST_CASES
4791#undef VPTESTM_CASE
4792}
4793
4794// Try to create VPTESTM instruction. If InMask is not null, it will be used
4795// to form a masked operation.
4796bool X86DAGToDAGISel::tryVPTESTM(SDNode *Root, SDValue Setcc,
4797                                 SDValue InMask) {
4798  assert(Subtarget->hasAVX512() && "Expected AVX512!");
4799  assert(Setcc.getSimpleValueType().getVectorElementType() == MVT::i1 &&
4800         "Unexpected VT!");
4801
4802  // Look for equal and not equal compares.
4803  ISD::CondCode CC = cast<CondCodeSDNode>(Setcc.getOperand(2))->get();
4804  if (CC != ISD::SETEQ && CC != ISD::SETNE)
4805    return false;
4806
4807  SDValue SetccOp0 = Setcc.getOperand(0);
4808  SDValue SetccOp1 = Setcc.getOperand(1);
4809
4810  // Canonicalize the all zero vector to the RHS.
4811  if (ISD::isBuildVectorAllZeros(SetccOp0.getNode()))
4812    std::swap(SetccOp0, SetccOp1);
4813
4814  // See if we're comparing against zero.
4815  if (!ISD::isBuildVectorAllZeros(SetccOp1.getNode()))
4816    return false;
4817
4818  SDValue N0 = SetccOp0;
4819
4820  MVT CmpVT = N0.getSimpleValueType();
4821  MVT CmpSVT = CmpVT.getVectorElementType();
4822
4823  // Start with both operands the same. We'll try to refine this.
4824  SDValue Src0 = N0;
4825  SDValue Src1 = N0;
4826
4827  {
4828    // Look through single use bitcasts.
4829    SDValue N0Temp = N0;
4830    if (N0Temp.getOpcode() == ISD::BITCAST && N0Temp.hasOneUse())
4831      N0Temp = N0.getOperand(0);
4832
4833     // Look for single use AND.
4834    if (N0Temp.getOpcode() == ISD::AND && N0Temp.hasOneUse()) {
4835      Src0 = N0Temp.getOperand(0);
4836      Src1 = N0Temp.getOperand(1);
4837    }
4838  }
4839
4840  // Without VLX we need to widen the operation.
4841  bool Widen = !Subtarget->hasVLX() && !CmpVT.is512BitVector();
4842
4843  auto tryFoldLoadOrBCast = [&](SDNode *Root, SDNode *P, SDValue &L,
4844                                SDValue &Base, SDValue &Scale, SDValue &Index,
4845                                SDValue &Disp, SDValue &Segment) {
4846    // If we need to widen, we can't fold the load.
4847    if (!Widen)
4848      if (tryFoldLoad(Root, P, L, Base, Scale, Index, Disp, Segment))
4849        return true;
4850
4851    // If we didn't fold a load, try to match broadcast. No widening limitation
4852    // for this. But only 32 and 64 bit types are supported.
4853    if (CmpSVT != MVT::i32 && CmpSVT != MVT::i64)
4854      return false;
4855
4856    // Look through single use bitcasts.
4857    if (L.getOpcode() == ISD::BITCAST && L.hasOneUse()) {
4858      P = L.getNode();
4859      L = L.getOperand(0);
4860    }
4861
4862    if (L.getOpcode() != X86ISD::VBROADCAST_LOAD)
4863      return false;
4864
4865    auto *MemIntr = cast<MemIntrinsicSDNode>(L);
4866    if (MemIntr->getMemoryVT().getSizeInBits() != CmpSVT.getSizeInBits())
4867      return false;
4868
4869    return tryFoldBroadcast(Root, P, L, Base, Scale, Index, Disp, Segment);
4870  };
4871
4872  // We can only fold loads if the sources are unique.
4873  bool CanFoldLoads = Src0 != Src1;
4874
4875  bool FoldedLoad = false;
4876  SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
4877  if (CanFoldLoads) {
4878    FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src1, Tmp0, Tmp1, Tmp2,
4879                                    Tmp3, Tmp4);
4880    if (!FoldedLoad) {
4881      // And is commutative.
4882      FoldedLoad = tryFoldLoadOrBCast(Root, N0.getNode(), Src0, Tmp0, Tmp1,
4883                                      Tmp2, Tmp3, Tmp4);
4884      if (FoldedLoad)
4885        std::swap(Src0, Src1);
4886    }
4887  }
4888
4889  bool FoldedBCast = FoldedLoad && Src1.getOpcode() == X86ISD::VBROADCAST_LOAD;
4890
4891  bool IsMasked = InMask.getNode() != nullptr;
4892
4893  SDLoc dl(Root);
4894
4895  MVT ResVT = Setcc.getSimpleValueType();
4896  MVT MaskVT = ResVT;
4897  if (Widen) {
4898    // Widen the inputs using insert_subreg or copy_to_regclass.
4899    unsigned Scale = CmpVT.is128BitVector() ? 4 : 2;
4900    unsigned SubReg = CmpVT.is128BitVector() ? X86::sub_xmm : X86::sub_ymm;
4901    unsigned NumElts = CmpVT.getVectorNumElements() * Scale;
4902    CmpVT = MVT::getVectorVT(CmpSVT, NumElts);
4903    MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
4904    SDValue ImplDef = SDValue(CurDAG->getMachineNode(X86::IMPLICIT_DEF, dl,
4905                                                     CmpVT), 0);
4906    Src0 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src0);
4907
4908    if (!FoldedBCast)
4909      Src1 = CurDAG->getTargetInsertSubreg(SubReg, dl, CmpVT, ImplDef, Src1);
4910
4911    if (IsMasked) {
4912      // Widen the mask.
4913      unsigned RegClass = TLI->getRegClassFor(MaskVT)->getID();
4914      SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4915      InMask = SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4916                                              dl, MaskVT, InMask, RC), 0);
4917    }
4918  }
4919
4920  bool IsTestN = CC == ISD::SETEQ;
4921  unsigned Opc = getVPTESTMOpc(CmpVT, IsTestN, FoldedLoad, FoldedBCast,
4922                               IsMasked);
4923
4924  MachineSDNode *CNode;
4925  if (FoldedLoad) {
4926    SDVTList VTs = CurDAG->getVTList(MaskVT, MVT::Other);
4927
4928    if (IsMasked) {
4929      SDValue Ops[] = { InMask, Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4930                        Src1.getOperand(0) };
4931      CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4932    } else {
4933      SDValue Ops[] = { Src0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4,
4934                        Src1.getOperand(0) };
4935      CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
4936    }
4937
4938    // Update the chain.
4939    ReplaceUses(Src1.getValue(1), SDValue(CNode, 1));
4940    // Record the mem-refs
4941    CurDAG->setNodeMemRefs(CNode, {cast<MemSDNode>(Src1)->getMemOperand()});
4942  } else {
4943    if (IsMasked)
4944      CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, InMask, Src0, Src1);
4945    else
4946      CNode = CurDAG->getMachineNode(Opc, dl, MaskVT, Src0, Src1);
4947  }
4948
4949  // If we widened, we need to shrink the mask VT.
4950  if (Widen) {
4951    unsigned RegClass = TLI->getRegClassFor(ResVT)->getID();
4952    SDValue RC = CurDAG->getTargetConstant(RegClass, dl, MVT::i32);
4953    CNode = CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
4954                                   dl, ResVT, SDValue(CNode, 0), RC);
4955  }
4956
4957  ReplaceUses(SDValue(Root, 0), SDValue(CNode, 0));
4958  CurDAG->RemoveDeadNode(Root);
4959  return true;
4960}
4961
4962// Try to match the bitselect pattern (or (and A, B), (andn A, C)). Turn it
4963// into vpternlog.
4964bool X86DAGToDAGISel::tryMatchBitSelect(SDNode *N) {
4965  assert(N->getOpcode() == ISD::OR && "Unexpected opcode!");
4966
4967  MVT NVT = N->getSimpleValueType(0);
4968
4969  // Make sure we support VPTERNLOG.
4970  if (!NVT.isVector() || !Subtarget->hasAVX512())
4971    return false;
4972
4973  // We need VLX for 128/256-bit.
4974  if (!(Subtarget->hasVLX() || NVT.is512BitVector()))
4975    return false;
4976
4977  SDValue N0 = N->getOperand(0);
4978  SDValue N1 = N->getOperand(1);
4979
4980  // Canonicalize AND to LHS.
4981  if (N1.getOpcode() == ISD::AND)
4982    std::swap(N0, N1);
4983
4984  if (N0.getOpcode() != ISD::AND ||
4985      N1.getOpcode() != X86ISD::ANDNP ||
4986      !N0.hasOneUse() || !N1.hasOneUse())
4987    return false;
4988
4989  // ANDN is not commutable, use it to pick down A and C.
4990  SDValue A = N1.getOperand(0);
4991  SDValue C = N1.getOperand(1);
4992
4993  // AND is commutable, if one operand matches A, the other operand is B.
4994  // Otherwise this isn't a match.
4995  SDValue B;
4996  if (N0.getOperand(0) == A)
4997    B = N0.getOperand(1);
4998  else if (N0.getOperand(1) == A)
4999    B = N0.getOperand(0);
5000  else
5001    return false;
5002
5003  SDLoc dl(N);
5004  SDValue Imm = CurDAG->getTargetConstant(0xCA, dl, MVT::i8);
5005  SDValue Ternlog = CurDAG->getNode(X86ISD::VPTERNLOG, dl, NVT, A, B, C, Imm);
5006  ReplaceNode(N, Ternlog.getNode());
5007
5008  return matchVPTERNLOG(Ternlog.getNode(), Ternlog.getNode(), Ternlog.getNode(),
5009                        Ternlog.getNode(), A, B, C, 0xCA);
5010}
5011
5012void X86DAGToDAGISel::Select(SDNode *Node) {
5013  MVT NVT = Node->getSimpleValueType(0);
5014  unsigned Opcode = Node->getOpcode();
5015  SDLoc dl(Node);
5016
5017  if (Node->isMachineOpcode()) {
5018    LLVM_DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
5019    Node->setNodeId(-1);
5020    return;   // Already selected.
5021  }
5022
5023  switch (Opcode) {
5024  default: break;
5025  case ISD::INTRINSIC_W_CHAIN: {
5026    unsigned IntNo = Node->getConstantOperandVal(1);
5027    switch (IntNo) {
5028    default: break;
5029    case Intrinsic::x86_encodekey128:
5030    case Intrinsic::x86_encodekey256: {
5031      if (!Subtarget->hasKL())
5032        break;
5033
5034      unsigned Opcode;
5035      switch (IntNo) {
5036      default: llvm_unreachable("Impossible intrinsic");
5037      case Intrinsic::x86_encodekey128: Opcode = X86::ENCODEKEY128; break;
5038      case Intrinsic::x86_encodekey256: Opcode = X86::ENCODEKEY256; break;
5039      }
5040
5041      SDValue Chain = Node->getOperand(0);
5042      Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(3),
5043                                   SDValue());
5044      if (Opcode == X86::ENCODEKEY256)
5045        Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(4),
5046                                     Chain.getValue(1));
5047
5048      MachineSDNode *Res = CurDAG->getMachineNode(
5049          Opcode, dl, Node->getVTList(),
5050          {Node->getOperand(2), Chain, Chain.getValue(1)});
5051      ReplaceNode(Node, Res);
5052      return;
5053    }
5054    case Intrinsic::x86_tileloadd64_internal:
5055    case Intrinsic::x86_tileloaddt164_internal: {
5056      if (!Subtarget->hasAMXTILE())
5057        break;
5058      unsigned Opc = IntNo == Intrinsic::x86_tileloadd64_internal
5059                         ? X86::PTILELOADDV
5060                         : X86::PTILELOADDT1V;
5061      // _tile_loadd_internal(row, col, buf, STRIDE)
5062      SDValue Base = Node->getOperand(4);
5063      SDValue Scale = getI8Imm(1, dl);
5064      SDValue Index = Node->getOperand(5);
5065      SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
5066      SDValue Segment = CurDAG->getRegister(0, MVT::i16);
5067      SDValue Chain = Node->getOperand(0);
5068      MachineSDNode *CNode;
5069      SDValue Ops[] = {Node->getOperand(2),
5070                       Node->getOperand(3),
5071                       Base,
5072                       Scale,
5073                       Index,
5074                       Disp,
5075                       Segment,
5076                       Chain};
5077      CNode = CurDAG->getMachineNode(Opc, dl, {MVT::x86amx, MVT::Other}, Ops);
5078      ReplaceNode(Node, CNode);
5079      return;
5080    }
5081    }
5082    break;
5083  }
5084  case ISD::INTRINSIC_VOID: {
5085    unsigned IntNo = Node->getConstantOperandVal(1);
5086    switch (IntNo) {
5087    default: break;
5088    case Intrinsic::x86_sse3_monitor:
5089    case Intrinsic::x86_monitorx:
5090    case Intrinsic::x86_clzero: {
5091      bool Use64BitPtr = Node->getOperand(2).getValueType() == MVT::i64;
5092
5093      unsigned Opc = 0;
5094      switch (IntNo) {
5095      default: llvm_unreachable("Unexpected intrinsic!");
5096      case Intrinsic::x86_sse3_monitor:
5097        if (!Subtarget->hasSSE3())
5098          break;
5099        Opc = Use64BitPtr ? X86::MONITOR64rrr : X86::MONITOR32rrr;
5100        break;
5101      case Intrinsic::x86_monitorx:
5102        if (!Subtarget->hasMWAITX())
5103          break;
5104        Opc = Use64BitPtr ? X86::MONITORX64rrr : X86::MONITORX32rrr;
5105        break;
5106      case Intrinsic::x86_clzero:
5107        if (!Subtarget->hasCLZERO())
5108          break;
5109        Opc = Use64BitPtr ? X86::CLZERO64r : X86::CLZERO32r;
5110        break;
5111      }
5112
5113      if (Opc) {
5114        unsigned PtrReg = Use64BitPtr ? X86::RAX : X86::EAX;
5115        SDValue Chain = CurDAG->getCopyToReg(Node->getOperand(0), dl, PtrReg,
5116                                             Node->getOperand(2), SDValue());
5117        SDValue InGlue = Chain.getValue(1);
5118
5119        if (IntNo == Intrinsic::x86_sse3_monitor ||
5120            IntNo == Intrinsic::x86_monitorx) {
5121          // Copy the other two operands to ECX and EDX.
5122          Chain = CurDAG->getCopyToReg(Chain, dl, X86::ECX, Node->getOperand(3),
5123                                       InGlue);
5124          InGlue = Chain.getValue(1);
5125          Chain = CurDAG->getCopyToReg(Chain, dl, X86::EDX, Node->getOperand(4),
5126                                       InGlue);
5127          InGlue = Chain.getValue(1);
5128        }
5129
5130        MachineSDNode *CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other,
5131                                                      { Chain, InGlue});
5132        ReplaceNode(Node, CNode);
5133        return;
5134      }
5135
5136      break;
5137    }
5138    case Intrinsic::x86_tilestored64_internal: {
5139      unsigned Opc = X86::PTILESTOREDV;
5140      // _tile_stored_internal(row, col, buf, STRIDE, c)
5141      SDValue Base = Node->getOperand(4);
5142      SDValue Scale = getI8Imm(1, dl);
5143      SDValue Index = Node->getOperand(5);
5144      SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
5145      SDValue Segment = CurDAG->getRegister(0, MVT::i16);
5146      SDValue Chain = Node->getOperand(0);
5147      MachineSDNode *CNode;
5148      SDValue Ops[] = {Node->getOperand(2),
5149                       Node->getOperand(3),
5150                       Base,
5151                       Scale,
5152                       Index,
5153                       Disp,
5154                       Segment,
5155                       Node->getOperand(6),
5156                       Chain};
5157      CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
5158      ReplaceNode(Node, CNode);
5159      return;
5160    }
5161    case Intrinsic::x86_tileloadd64:
5162    case Intrinsic::x86_tileloaddt164:
5163    case Intrinsic::x86_tilestored64: {
5164      if (!Subtarget->hasAMXTILE())
5165        break;
5166      unsigned Opc;
5167      switch (IntNo) {
5168      default: llvm_unreachable("Unexpected intrinsic!");
5169      case Intrinsic::x86_tileloadd64:   Opc = X86::PTILELOADD; break;
5170      case Intrinsic::x86_tileloaddt164: Opc = X86::PTILELOADDT1; break;
5171      case Intrinsic::x86_tilestored64:  Opc = X86::PTILESTORED; break;
5172      }
5173      // FIXME: Match displacement and scale.
5174      unsigned TIndex = Node->getConstantOperandVal(2);
5175      SDValue TReg = getI8Imm(TIndex, dl);
5176      SDValue Base = Node->getOperand(3);
5177      SDValue Scale = getI8Imm(1, dl);
5178      SDValue Index = Node->getOperand(4);
5179      SDValue Disp = CurDAG->getTargetConstant(0, dl, MVT::i32);
5180      SDValue Segment = CurDAG->getRegister(0, MVT::i16);
5181      SDValue Chain = Node->getOperand(0);
5182      MachineSDNode *CNode;
5183      if (Opc == X86::PTILESTORED) {
5184        SDValue Ops[] = { Base, Scale, Index, Disp, Segment, TReg, Chain };
5185        CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
5186      } else {
5187        SDValue Ops[] = { TReg, Base, Scale, Index, Disp, Segment, Chain };
5188        CNode = CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops);
5189      }
5190      ReplaceNode(Node, CNode);
5191      return;
5192    }
5193    }
5194    break;
5195  }
5196  case ISD::BRIND:
5197  case X86ISD::NT_BRIND: {
5198    if (Subtarget->isTargetNaCl())
5199      // NaCl has its own pass where jmp %r32 are converted to jmp %r64. We
5200      // leave the instruction alone.
5201      break;
5202    if (Subtarget->isTarget64BitILP32()) {
5203      // Converts a 32-bit register to a 64-bit, zero-extended version of
5204      // it. This is needed because x86-64 can do many things, but jmp %r32
5205      // ain't one of them.
5206      SDValue Target = Node->getOperand(1);
5207      assert(Target.getValueType() == MVT::i32 && "Unexpected VT!");
5208      SDValue ZextTarget = CurDAG->getZExtOrTrunc(Target, dl, MVT::i64);
5209      SDValue Brind = CurDAG->getNode(Opcode, dl, MVT::Other,
5210                                      Node->getOperand(0), ZextTarget);
5211      ReplaceNode(Node, Brind.getNode());
5212      SelectCode(ZextTarget.getNode());
5213      SelectCode(Brind.getNode());
5214      return;
5215    }
5216    break;
5217  }
5218  case X86ISD::GlobalBaseReg:
5219    ReplaceNode(Node, getGlobalBaseReg());
5220    return;
5221
5222  case ISD::BITCAST:
5223    // Just drop all 128/256/512-bit bitcasts.
5224    if (NVT.is512BitVector() || NVT.is256BitVector() || NVT.is128BitVector() ||
5225        NVT == MVT::f128) {
5226      ReplaceUses(SDValue(Node, 0), Node->getOperand(0));
5227      CurDAG->RemoveDeadNode(Node);
5228      return;
5229    }
5230    break;
5231
5232  case ISD::SRL:
5233    if (matchBitExtract(Node))
5234      return;
5235    [[fallthrough]];
5236  case ISD::SRA:
5237  case ISD::SHL:
5238    if (tryShiftAmountMod(Node))
5239      return;
5240    break;
5241
5242  case X86ISD::VPTERNLOG: {
5243    uint8_t Imm = Node->getConstantOperandVal(3);
5244    if (matchVPTERNLOG(Node, Node, Node, Node, Node->getOperand(0),
5245                       Node->getOperand(1), Node->getOperand(2), Imm))
5246      return;
5247    break;
5248  }
5249
5250  case X86ISD::ANDNP:
5251    if (tryVPTERNLOG(Node))
5252      return;
5253    break;
5254
5255  case ISD::AND:
5256    if (NVT.isVector() && NVT.getVectorElementType() == MVT::i1) {
5257      // Try to form a masked VPTESTM. Operands can be in either order.
5258      SDValue N0 = Node->getOperand(0);
5259      SDValue N1 = Node->getOperand(1);
5260      if (N0.getOpcode() == ISD::SETCC && N0.hasOneUse() &&
5261          tryVPTESTM(Node, N0, N1))
5262        return;
5263      if (N1.getOpcode() == ISD::SETCC && N1.hasOneUse() &&
5264          tryVPTESTM(Node, N1, N0))
5265        return;
5266    }
5267
5268    if (MachineSDNode *NewNode = matchBEXTRFromAndImm(Node)) {
5269      ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
5270      CurDAG->RemoveDeadNode(Node);
5271      return;
5272    }
5273    if (matchBitExtract(Node))
5274      return;
5275    if (AndImmShrink && shrinkAndImmediate(Node))
5276      return;
5277
5278    [[fallthrough]];
5279  case ISD::OR:
5280  case ISD::XOR:
5281    if (tryShrinkShlLogicImm(Node))
5282      return;
5283    if (Opcode == ISD::OR && tryMatchBitSelect(Node))
5284      return;
5285    if (tryVPTERNLOG(Node))
5286      return;
5287
5288    [[fallthrough]];
5289  case ISD::ADD:
5290    if (Opcode == ISD::ADD && matchBitExtract(Node))
5291      return;
5292    [[fallthrough]];
5293  case ISD::SUB: {
5294    // Try to avoid folding immediates with multiple uses for optsize.
5295    // This code tries to select to register form directly to avoid going
5296    // through the isel table which might fold the immediate. We can't change
5297    // the patterns on the add/sub/and/or/xor with immediate paterns in the
5298    // tablegen files to check immediate use count without making the patterns
5299    // unavailable to the fast-isel table.
5300    if (!CurDAG->shouldOptForSize())
5301      break;
5302
5303    // Only handle i8/i16/i32/i64.
5304    if (NVT != MVT::i8 && NVT != MVT::i16 && NVT != MVT::i32 && NVT != MVT::i64)
5305      break;
5306
5307    SDValue N0 = Node->getOperand(0);
5308    SDValue N1 = Node->getOperand(1);
5309
5310    auto *Cst = dyn_cast<ConstantSDNode>(N1);
5311    if (!Cst)
5312      break;
5313
5314    int64_t Val = Cst->getSExtValue();
5315
5316    // Make sure its an immediate that is considered foldable.
5317    // FIXME: Handle unsigned 32 bit immediates for 64-bit AND.
5318    if (!isInt<8>(Val) && !isInt<32>(Val))
5319      break;
5320
5321    // If this can match to INC/DEC, let it go.
5322    if (Opcode == ISD::ADD && (Val == 1 || Val == -1))
5323      break;
5324
5325    // Check if we should avoid folding this immediate.
5326    if (!shouldAvoidImmediateInstFormsForSize(N1.getNode()))
5327      break;
5328
5329    // We should not fold the immediate. So we need a register form instead.
5330    unsigned ROpc, MOpc;
5331    switch (NVT.SimpleTy) {
5332    default: llvm_unreachable("Unexpected VT!");
5333    case MVT::i8:
5334      switch (Opcode) {
5335      default: llvm_unreachable("Unexpected opcode!");
5336      case ISD::ADD: ROpc = X86::ADD8rr; MOpc = X86::ADD8rm; break;
5337      case ISD::SUB: ROpc = X86::SUB8rr; MOpc = X86::SUB8rm; break;
5338      case ISD::AND: ROpc = X86::AND8rr; MOpc = X86::AND8rm; break;
5339      case ISD::OR:  ROpc = X86::OR8rr;  MOpc = X86::OR8rm;  break;
5340      case ISD::XOR: ROpc = X86::XOR8rr; MOpc = X86::XOR8rm; break;
5341      }
5342      break;
5343    case MVT::i16:
5344      switch (Opcode) {
5345      default: llvm_unreachable("Unexpected opcode!");
5346      case ISD::ADD: ROpc = X86::ADD16rr; MOpc = X86::ADD16rm; break;
5347      case ISD::SUB: ROpc = X86::SUB16rr; MOpc = X86::SUB16rm; break;
5348      case ISD::AND: ROpc = X86::AND16rr; MOpc = X86::AND16rm; break;
5349      case ISD::OR:  ROpc = X86::OR16rr;  MOpc = X86::OR16rm;  break;
5350      case ISD::XOR: ROpc = X86::XOR16rr; MOpc = X86::XOR16rm; break;
5351      }
5352      break;
5353    case MVT::i32:
5354      switch (Opcode) {
5355      default: llvm_unreachable("Unexpected opcode!");
5356      case ISD::ADD: ROpc = X86::ADD32rr; MOpc = X86::ADD32rm; break;
5357      case ISD::SUB: ROpc = X86::SUB32rr; MOpc = X86::SUB32rm; break;
5358      case ISD::AND: ROpc = X86::AND32rr; MOpc = X86::AND32rm; break;
5359      case ISD::OR:  ROpc = X86::OR32rr;  MOpc = X86::OR32rm;  break;
5360      case ISD::XOR: ROpc = X86::XOR32rr; MOpc = X86::XOR32rm; break;
5361      }
5362      break;
5363    case MVT::i64:
5364      switch (Opcode) {
5365      default: llvm_unreachable("Unexpected opcode!");
5366      case ISD::ADD: ROpc = X86::ADD64rr; MOpc = X86::ADD64rm; break;
5367      case ISD::SUB: ROpc = X86::SUB64rr; MOpc = X86::SUB64rm; break;
5368      case ISD::AND: ROpc = X86::AND64rr; MOpc = X86::AND64rm; break;
5369      case ISD::OR:  ROpc = X86::OR64rr;  MOpc = X86::OR64rm;  break;
5370      case ISD::XOR: ROpc = X86::XOR64rr; MOpc = X86::XOR64rm; break;
5371      }
5372      break;
5373    }
5374
5375    // Ok this is a AND/OR/XOR/ADD/SUB with constant.
5376
5377    // If this is a not a subtract, we can still try to fold a load.
5378    if (Opcode != ISD::SUB) {
5379      SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5380      if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5381        SDValue Ops[] = { N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
5382        SDVTList VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
5383        MachineSDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5384        // Update the chain.
5385        ReplaceUses(N0.getValue(1), SDValue(CNode, 2));
5386        // Record the mem-refs
5387        CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N0)->getMemOperand()});
5388        ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5389        CurDAG->RemoveDeadNode(Node);
5390        return;
5391      }
5392    }
5393
5394    CurDAG->SelectNodeTo(Node, ROpc, NVT, MVT::i32, N0, N1);
5395    return;
5396  }
5397
5398  case X86ISD::SMUL:
5399    // i16/i32/i64 are handled with isel patterns.
5400    if (NVT != MVT::i8)
5401      break;
5402    [[fallthrough]];
5403  case X86ISD::UMUL: {
5404    SDValue N0 = Node->getOperand(0);
5405    SDValue N1 = Node->getOperand(1);
5406
5407    unsigned LoReg, ROpc, MOpc;
5408    switch (NVT.SimpleTy) {
5409    default: llvm_unreachable("Unsupported VT!");
5410    case MVT::i8:
5411      LoReg = X86::AL;
5412      ROpc = Opcode == X86ISD::SMUL ? X86::IMUL8r : X86::MUL8r;
5413      MOpc = Opcode == X86ISD::SMUL ? X86::IMUL8m : X86::MUL8m;
5414      break;
5415    case MVT::i16:
5416      LoReg = X86::AX;
5417      ROpc = X86::MUL16r;
5418      MOpc = X86::MUL16m;
5419      break;
5420    case MVT::i32:
5421      LoReg = X86::EAX;
5422      ROpc = X86::MUL32r;
5423      MOpc = X86::MUL32m;
5424      break;
5425    case MVT::i64:
5426      LoReg = X86::RAX;
5427      ROpc = X86::MUL64r;
5428      MOpc = X86::MUL64m;
5429      break;
5430    }
5431
5432    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5433    bool FoldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5434    // Multiply is commutative.
5435    if (!FoldedLoad) {
5436      FoldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5437      if (FoldedLoad)
5438        std::swap(N0, N1);
5439    }
5440
5441    SDValue InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
5442                                          N0, SDValue()).getValue(1);
5443
5444    MachineSDNode *CNode;
5445    if (FoldedLoad) {
5446      // i16/i32/i64 use an instruction that produces a low and high result even
5447      // though only the low result is used.
5448      SDVTList VTs;
5449      if (NVT == MVT::i8)
5450        VTs = CurDAG->getVTList(NVT, MVT::i32, MVT::Other);
5451      else
5452        VTs = CurDAG->getVTList(NVT, NVT, MVT::i32, MVT::Other);
5453
5454      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5455                        InGlue };
5456      CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5457
5458      // Update the chain.
5459      ReplaceUses(N1.getValue(1), SDValue(CNode, NVT == MVT::i8 ? 2 : 3));
5460      // Record the mem-refs
5461      CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5462    } else {
5463      // i16/i32/i64 use an instruction that produces a low and high result even
5464      // though only the low result is used.
5465      SDVTList VTs;
5466      if (NVT == MVT::i8)
5467        VTs = CurDAG->getVTList(NVT, MVT::i32);
5468      else
5469        VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
5470
5471      CNode = CurDAG->getMachineNode(ROpc, dl, VTs, {N1, InGlue});
5472    }
5473
5474    ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
5475    ReplaceUses(SDValue(Node, 1), SDValue(CNode, NVT == MVT::i8 ? 1 : 2));
5476    CurDAG->RemoveDeadNode(Node);
5477    return;
5478  }
5479
5480  case ISD::SMUL_LOHI:
5481  case ISD::UMUL_LOHI: {
5482    SDValue N0 = Node->getOperand(0);
5483    SDValue N1 = Node->getOperand(1);
5484
5485    unsigned Opc, MOpc;
5486    unsigned LoReg, HiReg;
5487    bool IsSigned = Opcode == ISD::SMUL_LOHI;
5488    bool UseMULX = !IsSigned && Subtarget->hasBMI2();
5489    bool UseMULXHi = UseMULX && SDValue(Node, 0).use_empty();
5490    switch (NVT.SimpleTy) {
5491    default: llvm_unreachable("Unsupported VT!");
5492    case MVT::i32:
5493      Opc = UseMULXHi  ? X86::MULX32Hrr
5494            : UseMULX  ? GET_EGPR_IF_ENABLED(X86::MULX32rr)
5495            : IsSigned ? X86::IMUL32r
5496                       : X86::MUL32r;
5497      MOpc = UseMULXHi  ? X86::MULX32Hrm
5498             : UseMULX  ? GET_EGPR_IF_ENABLED(X86::MULX32rm)
5499             : IsSigned ? X86::IMUL32m
5500                        : X86::MUL32m;
5501      LoReg = UseMULX ? X86::EDX : X86::EAX;
5502      HiReg = X86::EDX;
5503      break;
5504    case MVT::i64:
5505      Opc = UseMULXHi  ? X86::MULX64Hrr
5506            : UseMULX  ? GET_EGPR_IF_ENABLED(X86::MULX64rr)
5507            : IsSigned ? X86::IMUL64r
5508                       : X86::MUL64r;
5509      MOpc = UseMULXHi  ? X86::MULX64Hrm
5510             : UseMULX  ? GET_EGPR_IF_ENABLED(X86::MULX64rm)
5511             : IsSigned ? X86::IMUL64m
5512                        : X86::MUL64m;
5513      LoReg = UseMULX ? X86::RDX : X86::RAX;
5514      HiReg = X86::RDX;
5515      break;
5516#undef GET_EGPR_IF_ENABLED
5517    }
5518
5519    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5520    bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5521    // Multiply is commutative.
5522    if (!foldedLoad) {
5523      foldedLoad = tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5524      if (foldedLoad)
5525        std::swap(N0, N1);
5526    }
5527
5528    SDValue InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
5529                                          N0, SDValue()).getValue(1);
5530    SDValue ResHi, ResLo;
5531    if (foldedLoad) {
5532      SDValue Chain;
5533      MachineSDNode *CNode = nullptr;
5534      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5535                        InGlue };
5536      if (UseMULXHi) {
5537        SDVTList VTs = CurDAG->getVTList(NVT, MVT::Other);
5538        CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5539        ResHi = SDValue(CNode, 0);
5540        Chain = SDValue(CNode, 1);
5541      } else if (UseMULX) {
5542        SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other);
5543        CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5544        ResHi = SDValue(CNode, 0);
5545        ResLo = SDValue(CNode, 1);
5546        Chain = SDValue(CNode, 2);
5547      } else {
5548        SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
5549        CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops);
5550        Chain = SDValue(CNode, 0);
5551        InGlue = SDValue(CNode, 1);
5552      }
5553
5554      // Update the chain.
5555      ReplaceUses(N1.getValue(1), Chain);
5556      // Record the mem-refs
5557      CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5558    } else {
5559      SDValue Ops[] = { N1, InGlue };
5560      if (UseMULXHi) {
5561        SDVTList VTs = CurDAG->getVTList(NVT);
5562        SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5563        ResHi = SDValue(CNode, 0);
5564      } else if (UseMULX) {
5565        SDVTList VTs = CurDAG->getVTList(NVT, NVT);
5566        SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5567        ResHi = SDValue(CNode, 0);
5568        ResLo = SDValue(CNode, 1);
5569      } else {
5570        SDVTList VTs = CurDAG->getVTList(MVT::Glue);
5571        SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops);
5572        InGlue = SDValue(CNode, 0);
5573      }
5574    }
5575
5576    // Copy the low half of the result, if it is needed.
5577    if (!SDValue(Node, 0).use_empty()) {
5578      if (!ResLo) {
5579        assert(LoReg && "Register for low half is not defined!");
5580        ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg,
5581                                       NVT, InGlue);
5582        InGlue = ResLo.getValue(2);
5583      }
5584      ReplaceUses(SDValue(Node, 0), ResLo);
5585      LLVM_DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG);
5586                 dbgs() << '\n');
5587    }
5588    // Copy the high half of the result, if it is needed.
5589    if (!SDValue(Node, 1).use_empty()) {
5590      if (!ResHi) {
5591        assert(HiReg && "Register for high half is not defined!");
5592        ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg,
5593                                       NVT, InGlue);
5594        InGlue = ResHi.getValue(2);
5595      }
5596      ReplaceUses(SDValue(Node, 1), ResHi);
5597      LLVM_DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG);
5598                 dbgs() << '\n');
5599    }
5600
5601    CurDAG->RemoveDeadNode(Node);
5602    return;
5603  }
5604
5605  case ISD::SDIVREM:
5606  case ISD::UDIVREM: {
5607    SDValue N0 = Node->getOperand(0);
5608    SDValue N1 = Node->getOperand(1);
5609
5610    unsigned ROpc, MOpc;
5611    bool isSigned = Opcode == ISD::SDIVREM;
5612    if (!isSigned) {
5613      switch (NVT.SimpleTy) {
5614      default: llvm_unreachable("Unsupported VT!");
5615      case MVT::i8:  ROpc = X86::DIV8r;  MOpc = X86::DIV8m;  break;
5616      case MVT::i16: ROpc = X86::DIV16r; MOpc = X86::DIV16m; break;
5617      case MVT::i32: ROpc = X86::DIV32r; MOpc = X86::DIV32m; break;
5618      case MVT::i64: ROpc = X86::DIV64r; MOpc = X86::DIV64m; break;
5619      }
5620    } else {
5621      switch (NVT.SimpleTy) {
5622      default: llvm_unreachable("Unsupported VT!");
5623      case MVT::i8:  ROpc = X86::IDIV8r;  MOpc = X86::IDIV8m;  break;
5624      case MVT::i16: ROpc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
5625      case MVT::i32: ROpc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
5626      case MVT::i64: ROpc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
5627      }
5628    }
5629
5630    unsigned LoReg, HiReg, ClrReg;
5631    unsigned SExtOpcode;
5632    switch (NVT.SimpleTy) {
5633    default: llvm_unreachable("Unsupported VT!");
5634    case MVT::i8:
5635      LoReg = X86::AL;  ClrReg = HiReg = X86::AH;
5636      SExtOpcode = 0; // Not used.
5637      break;
5638    case MVT::i16:
5639      LoReg = X86::AX;  HiReg = X86::DX;
5640      ClrReg = X86::DX;
5641      SExtOpcode = X86::CWD;
5642      break;
5643    case MVT::i32:
5644      LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
5645      SExtOpcode = X86::CDQ;
5646      break;
5647    case MVT::i64:
5648      LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
5649      SExtOpcode = X86::CQO;
5650      break;
5651    }
5652
5653    SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
5654    bool foldedLoad = tryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
5655    bool signBitIsZero = CurDAG->SignBitIsZero(N0);
5656
5657    SDValue InGlue;
5658    if (NVT == MVT::i8) {
5659      // Special case for div8, just use a move with zero extension to AX to
5660      // clear the upper 8 bits (AH).
5661      SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain;
5662      MachineSDNode *Move;
5663      if (tryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
5664        SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
5665        unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rm8
5666                                                    : X86::MOVZX16rm8;
5667        Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, MVT::Other, Ops);
5668        Chain = SDValue(Move, 1);
5669        ReplaceUses(N0.getValue(1), Chain);
5670        // Record the mem-refs
5671        CurDAG->setNodeMemRefs(Move, {cast<LoadSDNode>(N0)->getMemOperand()});
5672      } else {
5673        unsigned Opc = (isSigned && !signBitIsZero) ? X86::MOVSX16rr8
5674                                                    : X86::MOVZX16rr8;
5675        Move = CurDAG->getMachineNode(Opc, dl, MVT::i16, N0);
5676        Chain = CurDAG->getEntryNode();
5677      }
5678      Chain  = CurDAG->getCopyToReg(Chain, dl, X86::AX, SDValue(Move, 0),
5679                                    SDValue());
5680      InGlue = Chain.getValue(1);
5681    } else {
5682      InGlue =
5683        CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
5684                             LoReg, N0, SDValue()).getValue(1);
5685      if (isSigned && !signBitIsZero) {
5686        // Sign extend the low part into the high part.
5687        InGlue =
5688          SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InGlue),0);
5689      } else {
5690        // Zero out the high part, effectively zero extending the input.
5691        SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i32);
5692        SDValue ClrNode = SDValue(
5693            CurDAG->getMachineNode(X86::MOV32r0, dl, VTs, std::nullopt), 0);
5694        switch (NVT.SimpleTy) {
5695        case MVT::i16:
5696          ClrNode =
5697              SDValue(CurDAG->getMachineNode(
5698                          TargetOpcode::EXTRACT_SUBREG, dl, MVT::i16, ClrNode,
5699                          CurDAG->getTargetConstant(X86::sub_16bit, dl,
5700                                                    MVT::i32)),
5701                      0);
5702          break;
5703        case MVT::i32:
5704          break;
5705        case MVT::i64:
5706          ClrNode =
5707              SDValue(CurDAG->getMachineNode(
5708                          TargetOpcode::SUBREG_TO_REG, dl, MVT::i64,
5709                          CurDAG->getTargetConstant(0, dl, MVT::i64), ClrNode,
5710                          CurDAG->getTargetConstant(X86::sub_32bit, dl,
5711                                                    MVT::i32)),
5712                      0);
5713          break;
5714        default:
5715          llvm_unreachable("Unexpected division source");
5716        }
5717
5718        InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
5719                                      ClrNode, InGlue).getValue(1);
5720      }
5721    }
5722
5723    if (foldedLoad) {
5724      SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
5725                        InGlue };
5726      MachineSDNode *CNode =
5727        CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops);
5728      InGlue = SDValue(CNode, 1);
5729      // Update the chain.
5730      ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
5731      // Record the mem-refs
5732      CurDAG->setNodeMemRefs(CNode, {cast<LoadSDNode>(N1)->getMemOperand()});
5733    } else {
5734      InGlue =
5735        SDValue(CurDAG->getMachineNode(ROpc, dl, MVT::Glue, N1, InGlue), 0);
5736    }
5737
5738    // Prevent use of AH in a REX instruction by explicitly copying it to
5739    // an ABCD_L register.
5740    //
5741    // The current assumption of the register allocator is that isel
5742    // won't generate explicit references to the GR8_ABCD_H registers. If
5743    // the allocator and/or the backend get enhanced to be more robust in
5744    // that regard, this can be, and should be, removed.
5745    if (HiReg == X86::AH && !SDValue(Node, 1).use_empty()) {
5746      SDValue AHCopy = CurDAG->getRegister(X86::AH, MVT::i8);
5747      unsigned AHExtOpcode =
5748          isSigned ? X86::MOVSX32rr8_NOREX : X86::MOVZX32rr8_NOREX;
5749
5750      SDNode *RNode = CurDAG->getMachineNode(AHExtOpcode, dl, MVT::i32,
5751                                             MVT::Glue, AHCopy, InGlue);
5752      SDValue Result(RNode, 0);
5753      InGlue = SDValue(RNode, 1);
5754
5755      Result =
5756          CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result);
5757
5758      ReplaceUses(SDValue(Node, 1), Result);
5759      LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5760                 dbgs() << '\n');
5761    }
5762    // Copy the division (low) result, if it is needed.
5763    if (!SDValue(Node, 0).use_empty()) {
5764      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
5765                                                LoReg, NVT, InGlue);
5766      InGlue = Result.getValue(2);
5767      ReplaceUses(SDValue(Node, 0), Result);
5768      LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5769                 dbgs() << '\n');
5770    }
5771    // Copy the remainder (high) result, if it is needed.
5772    if (!SDValue(Node, 1).use_empty()) {
5773      SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
5774                                              HiReg, NVT, InGlue);
5775      InGlue = Result.getValue(2);
5776      ReplaceUses(SDValue(Node, 1), Result);
5777      LLVM_DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG);
5778                 dbgs() << '\n');
5779    }
5780    CurDAG->RemoveDeadNode(Node);
5781    return;
5782  }
5783
5784  case X86ISD::FCMP:
5785  case X86ISD::STRICT_FCMP:
5786  case X86ISD::STRICT_FCMPS: {
5787    bool IsStrictCmp = Node->getOpcode() == X86ISD::STRICT_FCMP ||
5788                       Node->getOpcode() == X86ISD::STRICT_FCMPS;
5789    SDValue N0 = Node->getOperand(IsStrictCmp ? 1 : 0);
5790    SDValue N1 = Node->getOperand(IsStrictCmp ? 2 : 1);
5791
5792    // Save the original VT of the compare.
5793    MVT CmpVT = N0.getSimpleValueType();
5794
5795    // Floating point needs special handling if we don't have FCOMI.
5796    if (Subtarget->canUseCMOV())
5797      break;
5798
5799    bool IsSignaling = Node->getOpcode() == X86ISD::STRICT_FCMPS;
5800
5801    unsigned Opc;
5802    switch (CmpVT.SimpleTy) {
5803    default: llvm_unreachable("Unexpected type!");
5804    case MVT::f32:
5805      Opc = IsSignaling ? X86::COM_Fpr32 : X86::UCOM_Fpr32;
5806      break;
5807    case MVT::f64:
5808      Opc = IsSignaling ? X86::COM_Fpr64 : X86::UCOM_Fpr64;
5809      break;
5810    case MVT::f80:
5811      Opc = IsSignaling ? X86::COM_Fpr80 : X86::UCOM_Fpr80;
5812      break;
5813    }
5814
5815    SDValue Chain =
5816        IsStrictCmp ? Node->getOperand(0) : CurDAG->getEntryNode();
5817    SDValue Glue;
5818    if (IsStrictCmp) {
5819      SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
5820      Chain = SDValue(CurDAG->getMachineNode(Opc, dl, VTs, {N0, N1, Chain}), 0);
5821      Glue = Chain.getValue(1);
5822    } else {
5823      Glue = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N0, N1), 0);
5824    }
5825
5826    // Move FPSW to AX.
5827    SDValue FNSTSW =
5828        SDValue(CurDAG->getMachineNode(X86::FNSTSW16r, dl, MVT::i16, Glue), 0);
5829
5830    // Extract upper 8-bits of AX.
5831    SDValue Extract =
5832        CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl, MVT::i8, FNSTSW);
5833
5834    // Move AH into flags.
5835    // Some 64-bit targets lack SAHF support, but they do support FCOMI.
5836    assert(Subtarget->canUseLAHFSAHF() &&
5837           "Target doesn't support SAHF or FCOMI?");
5838    SDValue AH = CurDAG->getCopyToReg(Chain, dl, X86::AH, Extract, SDValue());
5839    Chain = AH;
5840    SDValue SAHF = SDValue(
5841        CurDAG->getMachineNode(X86::SAHF, dl, MVT::i32, AH.getValue(1)), 0);
5842
5843    if (IsStrictCmp)
5844      ReplaceUses(SDValue(Node, 1), Chain);
5845
5846    ReplaceUses(SDValue(Node, 0), SAHF);
5847    CurDAG->RemoveDeadNode(Node);
5848    return;
5849  }
5850
5851  case X86ISD::CMP: {
5852    SDValue N0 = Node->getOperand(0);
5853    SDValue N1 = Node->getOperand(1);
5854
5855    // Optimizations for TEST compares.
5856    if (!isNullConstant(N1))
5857      break;
5858
5859    // Save the original VT of the compare.
5860    MVT CmpVT = N0.getSimpleValueType();
5861
5862    // If we are comparing (and (shr X, C, Mask) with 0, emit a BEXTR followed
5863    // by a test instruction. The test should be removed later by
5864    // analyzeCompare if we are using only the zero flag.
5865    // TODO: Should we check the users and use the BEXTR flags directly?
5866    if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
5867      if (MachineSDNode *NewNode = matchBEXTRFromAndImm(N0.getNode())) {
5868        unsigned TestOpc = CmpVT == MVT::i64 ? X86::TEST64rr
5869                                             : X86::TEST32rr;
5870        SDValue BEXTR = SDValue(NewNode, 0);
5871        NewNode = CurDAG->getMachineNode(TestOpc, dl, MVT::i32, BEXTR, BEXTR);
5872        ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
5873        CurDAG->RemoveDeadNode(Node);
5874        return;
5875      }
5876    }
5877
5878    // We can peek through truncates, but we need to be careful below.
5879    if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
5880      N0 = N0.getOperand(0);
5881
5882    // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
5883    // use a smaller encoding.
5884    // Look past the truncate if CMP is the only use of it.
5885    if (N0.getOpcode() == ISD::AND && N0.getNode()->hasOneUse() &&
5886        N0.getValueType() != MVT::i8) {
5887      auto *MaskC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
5888      if (!MaskC)
5889        break;
5890
5891      // We may have looked through a truncate so mask off any bits that
5892      // shouldn't be part of the compare.
5893      uint64_t Mask = MaskC->getZExtValue();
5894      Mask &= maskTrailingOnes<uint64_t>(CmpVT.getScalarSizeInBits());
5895
5896      // Check if we can replace AND+IMM{32,64} with a shift. This is possible
5897      // for masks like 0xFF000000 or 0x00FFFFFF and if we care only about the
5898      // zero flag.
5899      if (CmpVT == MVT::i64 && !isInt<8>(Mask) && isShiftedMask_64(Mask) &&
5900          onlyUsesZeroFlag(SDValue(Node, 0))) {
5901        unsigned ShiftOpcode = ISD::DELETED_NODE;
5902        unsigned ShiftAmt;
5903        unsigned SubRegIdx;
5904        MVT SubRegVT;
5905        unsigned TestOpcode;
5906        unsigned LeadingZeros = llvm::countl_zero(Mask);
5907        unsigned TrailingZeros = llvm::countr_zero(Mask);
5908
5909        // With leading/trailing zeros, the transform is profitable if we can
5910        // eliminate a movabsq or shrink a 32-bit immediate to 8-bit without
5911        // incurring any extra register moves.
5912        bool SavesBytes = !isInt<32>(Mask) || N0.getOperand(0).hasOneUse();
5913        if (LeadingZeros == 0 && SavesBytes) {
5914          // If the mask covers the most significant bit, then we can replace
5915          // TEST+AND with a SHR and check eflags.
5916          // This emits a redundant TEST which is subsequently eliminated.
5917          ShiftOpcode = X86::SHR64ri;
5918          ShiftAmt = TrailingZeros;
5919          SubRegIdx = 0;
5920          TestOpcode = X86::TEST64rr;
5921        } else if (TrailingZeros == 0 && SavesBytes) {
5922          // If the mask covers the least significant bit, then we can replace
5923          // TEST+AND with a SHL and check eflags.
5924          // This emits a redundant TEST which is subsequently eliminated.
5925          ShiftOpcode = X86::SHL64ri;
5926          ShiftAmt = LeadingZeros;
5927          SubRegIdx = 0;
5928          TestOpcode = X86::TEST64rr;
5929        } else if (MaskC->hasOneUse() && !isInt<32>(Mask)) {
5930          // If the shifted mask extends into the high half and is 8/16/32 bits
5931          // wide, then replace it with a SHR and a TEST8rr/TEST16rr/TEST32rr.
5932          unsigned PopCount = 64 - LeadingZeros - TrailingZeros;
5933          if (PopCount == 8) {
5934            ShiftOpcode = X86::SHR64ri;
5935            ShiftAmt = TrailingZeros;
5936            SubRegIdx = X86::sub_8bit;
5937            SubRegVT = MVT::i8;
5938            TestOpcode = X86::TEST8rr;
5939          } else if (PopCount == 16) {
5940            ShiftOpcode = X86::SHR64ri;
5941            ShiftAmt = TrailingZeros;
5942            SubRegIdx = X86::sub_16bit;
5943            SubRegVT = MVT::i16;
5944            TestOpcode = X86::TEST16rr;
5945          } else if (PopCount == 32) {
5946            ShiftOpcode = X86::SHR64ri;
5947            ShiftAmt = TrailingZeros;
5948            SubRegIdx = X86::sub_32bit;
5949            SubRegVT = MVT::i32;
5950            TestOpcode = X86::TEST32rr;
5951          }
5952        }
5953        if (ShiftOpcode != ISD::DELETED_NODE) {
5954          SDValue ShiftC = CurDAG->getTargetConstant(ShiftAmt, dl, MVT::i64);
5955          SDValue Shift = SDValue(
5956              CurDAG->getMachineNode(ShiftOpcode, dl, MVT::i64, MVT::i32,
5957                                     N0.getOperand(0), ShiftC),
5958              0);
5959          if (SubRegIdx != 0) {
5960            Shift =
5961                CurDAG->getTargetExtractSubreg(SubRegIdx, dl, SubRegVT, Shift);
5962          }
5963          MachineSDNode *Test =
5964              CurDAG->getMachineNode(TestOpcode, dl, MVT::i32, Shift, Shift);
5965          ReplaceNode(Node, Test);
5966          return;
5967        }
5968      }
5969
5970      MVT VT;
5971      int SubRegOp;
5972      unsigned ROpc, MOpc;
5973
5974      // For each of these checks we need to be careful if the sign flag is
5975      // being used. It is only safe to use the sign flag in two conditions,
5976      // either the sign bit in the shrunken mask is zero or the final test
5977      // size is equal to the original compare size.
5978
5979      if (isUInt<8>(Mask) &&
5980          (!(Mask & 0x80) || CmpVT == MVT::i8 ||
5981           hasNoSignFlagUses(SDValue(Node, 0)))) {
5982        // For example, convert "testl %eax, $8" to "testb %al, $8"
5983        VT = MVT::i8;
5984        SubRegOp = X86::sub_8bit;
5985        ROpc = X86::TEST8ri;
5986        MOpc = X86::TEST8mi;
5987      } else if (OptForMinSize && isUInt<16>(Mask) &&
5988                 (!(Mask & 0x8000) || CmpVT == MVT::i16 ||
5989                  hasNoSignFlagUses(SDValue(Node, 0)))) {
5990        // For example, "testl %eax, $32776" to "testw %ax, $32776".
5991        // NOTE: We only want to form TESTW instructions if optimizing for
5992        // min size. Otherwise we only save one byte and possibly get a length
5993        // changing prefix penalty in the decoders.
5994        VT = MVT::i16;
5995        SubRegOp = X86::sub_16bit;
5996        ROpc = X86::TEST16ri;
5997        MOpc = X86::TEST16mi;
5998      } else if (isUInt<32>(Mask) && N0.getValueType() != MVT::i16 &&
5999                 ((!(Mask & 0x80000000) &&
6000                   // Without minsize 16-bit Cmps can get here so we need to
6001                   // be sure we calculate the correct sign flag if needed.
6002                   (CmpVT != MVT::i16 || !(Mask & 0x8000))) ||
6003                  CmpVT == MVT::i32 ||
6004                  hasNoSignFlagUses(SDValue(Node, 0)))) {
6005        // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
6006        // NOTE: We only want to run that transform if N0 is 32 or 64 bits.
6007        // Otherwize, we find ourselves in a position where we have to do
6008        // promotion. If previous passes did not promote the and, we assume
6009        // they had a good reason not to and do not promote here.
6010        VT = MVT::i32;
6011        SubRegOp = X86::sub_32bit;
6012        ROpc = X86::TEST32ri;
6013        MOpc = X86::TEST32mi;
6014      } else {
6015        // No eligible transformation was found.
6016        break;
6017      }
6018
6019      SDValue Imm = CurDAG->getTargetConstant(Mask, dl, VT);
6020      SDValue Reg = N0.getOperand(0);
6021
6022      // Emit a testl or testw.
6023      MachineSDNode *NewNode;
6024      SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
6025      if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
6026        if (auto *LoadN = dyn_cast<LoadSDNode>(N0.getOperand(0).getNode())) {
6027          if (!LoadN->isSimple()) {
6028            unsigned NumVolBits = LoadN->getValueType(0).getSizeInBits();
6029            if ((MOpc == X86::TEST8mi && NumVolBits != 8) ||
6030                (MOpc == X86::TEST16mi && NumVolBits != 16) ||
6031                (MOpc == X86::TEST32mi && NumVolBits != 32))
6032              break;
6033          }
6034        }
6035        SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
6036                          Reg.getOperand(0) };
6037        NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops);
6038        // Update the chain.
6039        ReplaceUses(Reg.getValue(1), SDValue(NewNode, 1));
6040        // Record the mem-refs
6041        CurDAG->setNodeMemRefs(NewNode,
6042                               {cast<LoadSDNode>(Reg)->getMemOperand()});
6043      } else {
6044        // Extract the subregister if necessary.
6045        if (N0.getValueType() != VT)
6046          Reg = CurDAG->getTargetExtractSubreg(SubRegOp, dl, VT, Reg);
6047
6048        NewNode = CurDAG->getMachineNode(ROpc, dl, MVT::i32, Reg, Imm);
6049      }
6050      // Replace CMP with TEST.
6051      ReplaceNode(Node, NewNode);
6052      return;
6053    }
6054    break;
6055  }
6056  case X86ISD::PCMPISTR: {
6057    if (!Subtarget->hasSSE42())
6058      break;
6059
6060    bool NeedIndex = !SDValue(Node, 0).use_empty();
6061    bool NeedMask = !SDValue(Node, 1).use_empty();
6062    // We can't fold a load if we are going to make two instructions.
6063    bool MayFoldLoad = !NeedIndex || !NeedMask;
6064
6065    MachineSDNode *CNode;
6066    if (NeedMask) {
6067      unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrr : X86::PCMPISTRMrr;
6068      unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRMrm : X86::PCMPISTRMrm;
6069      CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node);
6070      ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
6071    }
6072    if (NeedIndex || !NeedMask) {
6073      unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrr : X86::PCMPISTRIrr;
6074      unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPISTRIrm : X86::PCMPISTRIrm;
6075      CNode = emitPCMPISTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node);
6076      ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
6077    }
6078
6079    // Connect the flag usage to the last instruction created.
6080    ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
6081    CurDAG->RemoveDeadNode(Node);
6082    return;
6083  }
6084  case X86ISD::PCMPESTR: {
6085    if (!Subtarget->hasSSE42())
6086      break;
6087
6088    // Copy the two implicit register inputs.
6089    SDValue InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EAX,
6090                                          Node->getOperand(1),
6091                                          SDValue()).getValue(1);
6092    InGlue = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EDX,
6093                                  Node->getOperand(3), InGlue).getValue(1);
6094
6095    bool NeedIndex = !SDValue(Node, 0).use_empty();
6096    bool NeedMask = !SDValue(Node, 1).use_empty();
6097    // We can't fold a load if we are going to make two instructions.
6098    bool MayFoldLoad = !NeedIndex || !NeedMask;
6099
6100    MachineSDNode *CNode;
6101    if (NeedMask) {
6102      unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrr : X86::PCMPESTRMrr;
6103      unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRMrm : X86::PCMPESTRMrm;
6104      CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::v16i8, Node,
6105                           InGlue);
6106      ReplaceUses(SDValue(Node, 1), SDValue(CNode, 0));
6107    }
6108    if (NeedIndex || !NeedMask) {
6109      unsigned ROpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrr : X86::PCMPESTRIrr;
6110      unsigned MOpc = Subtarget->hasAVX() ? X86::VPCMPESTRIrm : X86::PCMPESTRIrm;
6111      CNode = emitPCMPESTR(ROpc, MOpc, MayFoldLoad, dl, MVT::i32, Node, InGlue);
6112      ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
6113    }
6114    // Connect the flag usage to the last instruction created.
6115    ReplaceUses(SDValue(Node, 2), SDValue(CNode, 1));
6116    CurDAG->RemoveDeadNode(Node);
6117    return;
6118  }
6119
6120  case ISD::SETCC: {
6121    if (NVT.isVector() && tryVPTESTM(Node, SDValue(Node, 0), SDValue()))
6122      return;
6123
6124    break;
6125  }
6126
6127  case ISD::STORE:
6128    if (foldLoadStoreIntoMemOperand(Node))
6129      return;
6130    break;
6131
6132  case X86ISD::SETCC_CARRY: {
6133    MVT VT = Node->getSimpleValueType(0);
6134    SDValue Result;
6135    if (Subtarget->hasSBBDepBreaking()) {
6136      // We have to do this manually because tblgen will put the eflags copy in
6137      // the wrong place if we use an extract_subreg in the pattern.
6138      // Copy flags to the EFLAGS register and glue it to next node.
6139      SDValue EFLAGS =
6140          CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, X86::EFLAGS,
6141                               Node->getOperand(1), SDValue());
6142
6143      // Create a 64-bit instruction if the result is 64-bits otherwise use the
6144      // 32-bit version.
6145      unsigned Opc = VT == MVT::i64 ? X86::SETB_C64r : X86::SETB_C32r;
6146      MVT SetVT = VT == MVT::i64 ? MVT::i64 : MVT::i32;
6147      Result = SDValue(
6148          CurDAG->getMachineNode(Opc, dl, SetVT, EFLAGS, EFLAGS.getValue(1)),
6149          0);
6150    } else {
6151      // The target does not recognize sbb with the same reg operand as a
6152      // no-source idiom, so we explicitly zero the input values.
6153      Result = getSBBZero(Node);
6154    }
6155
6156    // For less than 32-bits we need to extract from the 32-bit node.
6157    if (VT == MVT::i8 || VT == MVT::i16) {
6158      int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit;
6159      Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result);
6160    }
6161
6162    ReplaceUses(SDValue(Node, 0), Result);
6163    CurDAG->RemoveDeadNode(Node);
6164    return;
6165  }
6166  case X86ISD::SBB: {
6167    if (isNullConstant(Node->getOperand(0)) &&
6168        isNullConstant(Node->getOperand(1))) {
6169      SDValue Result = getSBBZero(Node);
6170
6171      // Replace the flag use.
6172      ReplaceUses(SDValue(Node, 1), Result.getValue(1));
6173
6174      // Replace the result use.
6175      if (!SDValue(Node, 0).use_empty()) {
6176        // For less than 32-bits we need to extract from the 32-bit node.
6177        MVT VT = Node->getSimpleValueType(0);
6178        if (VT == MVT::i8 || VT == MVT::i16) {
6179          int SubIndex = VT == MVT::i16 ? X86::sub_16bit : X86::sub_8bit;
6180          Result = CurDAG->getTargetExtractSubreg(SubIndex, dl, VT, Result);
6181        }
6182        ReplaceUses(SDValue(Node, 0), Result);
6183      }
6184
6185      CurDAG->RemoveDeadNode(Node);
6186      return;
6187    }
6188    break;
6189  }
6190  case X86ISD::MGATHER: {
6191    auto *Mgt = cast<X86MaskedGatherSDNode>(Node);
6192    SDValue IndexOp = Mgt->getIndex();
6193    SDValue Mask = Mgt->getMask();
6194    MVT IndexVT = IndexOp.getSimpleValueType();
6195    MVT ValueVT = Node->getSimpleValueType(0);
6196    MVT MaskVT = Mask.getSimpleValueType();
6197
6198    // This is just to prevent crashes if the nodes are malformed somehow. We're
6199    // otherwise only doing loose type checking in here based on type what
6200    // a type constraint would say just like table based isel.
6201    if (!ValueVT.isVector() || !MaskVT.isVector())
6202      break;
6203
6204    unsigned NumElts = ValueVT.getVectorNumElements();
6205    MVT ValueSVT = ValueVT.getVectorElementType();
6206
6207    bool IsFP = ValueSVT.isFloatingPoint();
6208    unsigned EltSize = ValueSVT.getSizeInBits();
6209
6210    unsigned Opc = 0;
6211    bool AVX512Gather = MaskVT.getVectorElementType() == MVT::i1;
6212    if (AVX512Gather) {
6213      if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
6214        Opc = IsFP ? X86::VGATHERDPSZ128rm : X86::VPGATHERDDZ128rm;
6215      else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
6216        Opc = IsFP ? X86::VGATHERDPSZ256rm : X86::VPGATHERDDZ256rm;
6217      else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32)
6218        Opc = IsFP ? X86::VGATHERDPSZrm : X86::VPGATHERDDZrm;
6219      else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
6220        Opc = IsFP ? X86::VGATHERDPDZ128rm : X86::VPGATHERDQZ128rm;
6221      else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
6222        Opc = IsFP ? X86::VGATHERDPDZ256rm : X86::VPGATHERDQZ256rm;
6223      else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64)
6224        Opc = IsFP ? X86::VGATHERDPDZrm : X86::VPGATHERDQZrm;
6225      else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
6226        Opc = IsFP ? X86::VGATHERQPSZ128rm : X86::VPGATHERQDZ128rm;
6227      else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
6228        Opc = IsFP ? X86::VGATHERQPSZ256rm : X86::VPGATHERQDZ256rm;
6229      else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32)
6230        Opc = IsFP ? X86::VGATHERQPSZrm : X86::VPGATHERQDZrm;
6231      else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
6232        Opc = IsFP ? X86::VGATHERQPDZ128rm : X86::VPGATHERQQZ128rm;
6233      else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
6234        Opc = IsFP ? X86::VGATHERQPDZ256rm : X86::VPGATHERQQZ256rm;
6235      else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64)
6236        Opc = IsFP ? X86::VGATHERQPDZrm : X86::VPGATHERQQZrm;
6237    } else {
6238      assert(EVT(MaskVT) == EVT(ValueVT).changeVectorElementTypeToInteger() &&
6239             "Unexpected mask VT!");
6240      if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
6241        Opc = IsFP ? X86::VGATHERDPSrm : X86::VPGATHERDDrm;
6242      else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
6243        Opc = IsFP ? X86::VGATHERDPSYrm : X86::VPGATHERDDYrm;
6244      else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
6245        Opc = IsFP ? X86::VGATHERDPDrm : X86::VPGATHERDQrm;
6246      else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
6247        Opc = IsFP ? X86::VGATHERDPDYrm : X86::VPGATHERDQYrm;
6248      else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
6249        Opc = IsFP ? X86::VGATHERQPSrm : X86::VPGATHERQDrm;
6250      else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
6251        Opc = IsFP ? X86::VGATHERQPSYrm : X86::VPGATHERQDYrm;
6252      else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
6253        Opc = IsFP ? X86::VGATHERQPDrm : X86::VPGATHERQQrm;
6254      else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
6255        Opc = IsFP ? X86::VGATHERQPDYrm : X86::VPGATHERQQYrm;
6256    }
6257
6258    if (!Opc)
6259      break;
6260
6261    SDValue Base, Scale, Index, Disp, Segment;
6262    if (!selectVectorAddr(Mgt, Mgt->getBasePtr(), IndexOp, Mgt->getScale(),
6263                          Base, Scale, Index, Disp, Segment))
6264      break;
6265
6266    SDValue PassThru = Mgt->getPassThru();
6267    SDValue Chain = Mgt->getChain();
6268    // Gather instructions have a mask output not in the ISD node.
6269    SDVTList VTs = CurDAG->getVTList(ValueVT, MaskVT, MVT::Other);
6270
6271    MachineSDNode *NewNode;
6272    if (AVX512Gather) {
6273      SDValue Ops[] = {PassThru, Mask, Base,    Scale,
6274                       Index,    Disp, Segment, Chain};
6275      NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
6276    } else {
6277      SDValue Ops[] = {PassThru, Base,    Scale, Index,
6278                       Disp,     Segment, Mask,  Chain};
6279      NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
6280    }
6281    CurDAG->setNodeMemRefs(NewNode, {Mgt->getMemOperand()});
6282    ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 0));
6283    ReplaceUses(SDValue(Node, 1), SDValue(NewNode, 2));
6284    CurDAG->RemoveDeadNode(Node);
6285    return;
6286  }
6287  case X86ISD::MSCATTER: {
6288    auto *Sc = cast<X86MaskedScatterSDNode>(Node);
6289    SDValue Value = Sc->getValue();
6290    SDValue IndexOp = Sc->getIndex();
6291    MVT IndexVT = IndexOp.getSimpleValueType();
6292    MVT ValueVT = Value.getSimpleValueType();
6293
6294    // This is just to prevent crashes if the nodes are malformed somehow. We're
6295    // otherwise only doing loose type checking in here based on type what
6296    // a type constraint would say just like table based isel.
6297    if (!ValueVT.isVector())
6298      break;
6299
6300    unsigned NumElts = ValueVT.getVectorNumElements();
6301    MVT ValueSVT = ValueVT.getVectorElementType();
6302
6303    bool IsFP = ValueSVT.isFloatingPoint();
6304    unsigned EltSize = ValueSVT.getSizeInBits();
6305
6306    unsigned Opc;
6307    if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 32)
6308      Opc = IsFP ? X86::VSCATTERDPSZ128mr : X86::VPSCATTERDDZ128mr;
6309    else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 32)
6310      Opc = IsFP ? X86::VSCATTERDPSZ256mr : X86::VPSCATTERDDZ256mr;
6311    else if (IndexVT == MVT::v16i32 && NumElts == 16 && EltSize == 32)
6312      Opc = IsFP ? X86::VSCATTERDPSZmr : X86::VPSCATTERDDZmr;
6313    else if (IndexVT == MVT::v4i32 && NumElts == 2 && EltSize == 64)
6314      Opc = IsFP ? X86::VSCATTERDPDZ128mr : X86::VPSCATTERDQZ128mr;
6315    else if (IndexVT == MVT::v4i32 && NumElts == 4 && EltSize == 64)
6316      Opc = IsFP ? X86::VSCATTERDPDZ256mr : X86::VPSCATTERDQZ256mr;
6317    else if (IndexVT == MVT::v8i32 && NumElts == 8 && EltSize == 64)
6318      Opc = IsFP ? X86::VSCATTERDPDZmr : X86::VPSCATTERDQZmr;
6319    else if (IndexVT == MVT::v2i64 && NumElts == 4 && EltSize == 32)
6320      Opc = IsFP ? X86::VSCATTERQPSZ128mr : X86::VPSCATTERQDZ128mr;
6321    else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 32)
6322      Opc = IsFP ? X86::VSCATTERQPSZ256mr : X86::VPSCATTERQDZ256mr;
6323    else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 32)
6324      Opc = IsFP ? X86::VSCATTERQPSZmr : X86::VPSCATTERQDZmr;
6325    else if (IndexVT == MVT::v2i64 && NumElts == 2 && EltSize == 64)
6326      Opc = IsFP ? X86::VSCATTERQPDZ128mr : X86::VPSCATTERQQZ128mr;
6327    else if (IndexVT == MVT::v4i64 && NumElts == 4 && EltSize == 64)
6328      Opc = IsFP ? X86::VSCATTERQPDZ256mr : X86::VPSCATTERQQZ256mr;
6329    else if (IndexVT == MVT::v8i64 && NumElts == 8 && EltSize == 64)
6330      Opc = IsFP ? X86::VSCATTERQPDZmr : X86::VPSCATTERQQZmr;
6331    else
6332      break;
6333
6334    SDValue Base, Scale, Index, Disp, Segment;
6335    if (!selectVectorAddr(Sc, Sc->getBasePtr(), IndexOp, Sc->getScale(),
6336                          Base, Scale, Index, Disp, Segment))
6337      break;
6338
6339    SDValue Mask = Sc->getMask();
6340    SDValue Chain = Sc->getChain();
6341    // Scatter instructions have a mask output not in the ISD node.
6342    SDVTList VTs = CurDAG->getVTList(Mask.getValueType(), MVT::Other);
6343    SDValue Ops[] = {Base, Scale, Index, Disp, Segment, Mask, Value, Chain};
6344
6345    MachineSDNode *NewNode = CurDAG->getMachineNode(Opc, SDLoc(dl), VTs, Ops);
6346    CurDAG->setNodeMemRefs(NewNode, {Sc->getMemOperand()});
6347    ReplaceUses(SDValue(Node, 0), SDValue(NewNode, 1));
6348    CurDAG->RemoveDeadNode(Node);
6349    return;
6350  }
6351  case ISD::PREALLOCATED_SETUP: {
6352    auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>();
6353    auto CallId = MFI->getPreallocatedIdForCallSite(
6354        cast<SrcValueSDNode>(Node->getOperand(1))->getValue());
6355    SDValue Chain = Node->getOperand(0);
6356    SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32);
6357    MachineSDNode *New = CurDAG->getMachineNode(
6358        TargetOpcode::PREALLOCATED_SETUP, dl, MVT::Other, CallIdValue, Chain);
6359    ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Chain
6360    CurDAG->RemoveDeadNode(Node);
6361    return;
6362  }
6363  case ISD::PREALLOCATED_ARG: {
6364    auto *MFI = CurDAG->getMachineFunction().getInfo<X86MachineFunctionInfo>();
6365    auto CallId = MFI->getPreallocatedIdForCallSite(
6366        cast<SrcValueSDNode>(Node->getOperand(1))->getValue());
6367    SDValue Chain = Node->getOperand(0);
6368    SDValue CallIdValue = CurDAG->getTargetConstant(CallId, dl, MVT::i32);
6369    SDValue ArgIndex = Node->getOperand(2);
6370    SDValue Ops[3];
6371    Ops[0] = CallIdValue;
6372    Ops[1] = ArgIndex;
6373    Ops[2] = Chain;
6374    MachineSDNode *New = CurDAG->getMachineNode(
6375        TargetOpcode::PREALLOCATED_ARG, dl,
6376        CurDAG->getVTList(TLI->getPointerTy(CurDAG->getDataLayout()),
6377                          MVT::Other),
6378        Ops);
6379    ReplaceUses(SDValue(Node, 0), SDValue(New, 0)); // Arg pointer
6380    ReplaceUses(SDValue(Node, 1), SDValue(New, 1)); // Chain
6381    CurDAG->RemoveDeadNode(Node);
6382    return;
6383  }
6384  case X86ISD::AESENCWIDE128KL:
6385  case X86ISD::AESDECWIDE128KL:
6386  case X86ISD::AESENCWIDE256KL:
6387  case X86ISD::AESDECWIDE256KL: {
6388    if (!Subtarget->hasWIDEKL())
6389      break;
6390
6391    unsigned Opcode;
6392    switch (Node->getOpcode()) {
6393    default:
6394      llvm_unreachable("Unexpected opcode!");
6395    case X86ISD::AESENCWIDE128KL:
6396      Opcode = X86::AESENCWIDE128KL;
6397      break;
6398    case X86ISD::AESDECWIDE128KL:
6399      Opcode = X86::AESDECWIDE128KL;
6400      break;
6401    case X86ISD::AESENCWIDE256KL:
6402      Opcode = X86::AESENCWIDE256KL;
6403      break;
6404    case X86ISD::AESDECWIDE256KL:
6405      Opcode = X86::AESDECWIDE256KL;
6406      break;
6407    }
6408
6409    SDValue Chain = Node->getOperand(0);
6410    SDValue Addr = Node->getOperand(1);
6411
6412    SDValue Base, Scale, Index, Disp, Segment;
6413    if (!selectAddr(Node, Addr, Base, Scale, Index, Disp, Segment))
6414      break;
6415
6416    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM0, Node->getOperand(2),
6417                                 SDValue());
6418    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM1, Node->getOperand(3),
6419                                 Chain.getValue(1));
6420    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM2, Node->getOperand(4),
6421                                 Chain.getValue(1));
6422    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM3, Node->getOperand(5),
6423                                 Chain.getValue(1));
6424    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM4, Node->getOperand(6),
6425                                 Chain.getValue(1));
6426    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM5, Node->getOperand(7),
6427                                 Chain.getValue(1));
6428    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM6, Node->getOperand(8),
6429                                 Chain.getValue(1));
6430    Chain = CurDAG->getCopyToReg(Chain, dl, X86::XMM7, Node->getOperand(9),
6431                                 Chain.getValue(1));
6432
6433    MachineSDNode *Res = CurDAG->getMachineNode(
6434        Opcode, dl, Node->getVTList(),
6435        {Base, Scale, Index, Disp, Segment, Chain, Chain.getValue(1)});
6436    CurDAG->setNodeMemRefs(Res, cast<MemSDNode>(Node)->getMemOperand());
6437    ReplaceNode(Node, Res);
6438    return;
6439  }
6440  }
6441
6442  SelectCode(Node);
6443}
6444
6445bool X86DAGToDAGISel::SelectInlineAsmMemoryOperand(
6446    const SDValue &Op, InlineAsm::ConstraintCode ConstraintID,
6447    std::vector<SDValue> &OutOps) {
6448  SDValue Op0, Op1, Op2, Op3, Op4;
6449  switch (ConstraintID) {
6450  default:
6451    llvm_unreachable("Unexpected asm memory constraint");
6452  case InlineAsm::ConstraintCode::o: // offsetable        ??
6453  case InlineAsm::ConstraintCode::v: // not offsetable    ??
6454  case InlineAsm::ConstraintCode::m: // memory
6455  case InlineAsm::ConstraintCode::X:
6456  case InlineAsm::ConstraintCode::p: // address
6457    if (!selectAddr(nullptr, Op, Op0, Op1, Op2, Op3, Op4))
6458      return true;
6459    break;
6460  }
6461
6462  OutOps.push_back(Op0);
6463  OutOps.push_back(Op1);
6464  OutOps.push_back(Op2);
6465  OutOps.push_back(Op3);
6466  OutOps.push_back(Op4);
6467  return false;
6468}
6469
6470/// This pass converts a legalized DAG into a X86-specific DAG,
6471/// ready for instruction scheduling.
6472FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
6473                                     CodeGenOptLevel OptLevel) {
6474  return new X86DAGToDAGISel(TM, OptLevel);
6475}
6476