X86EvexToVex.cpp revision 360784
1//===- X86EvexToVex.cpp ---------------------------------------------------===//
2// Compress EVEX instructions to VEX encoding when possible to reduce code size
3//
4// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5// See https://llvm.org/LICENSE.txt for license information.
6// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This file defines the pass that goes over all AVX-512 instructions which
12/// are encoded using the EVEX prefix and if possible replaces them by their
13/// corresponding VEX encoding which is usually shorter by 2 bytes.
14/// EVEX instructions may be encoded via the VEX prefix when the AVX-512
15/// instruction has a corresponding AVX/AVX2 opcode, when vector length
16/// accessed by instruction is less than 512 bits and when it does not use
17//  the xmm or the mask registers or xmm/ymm registers with indexes higher than 15.
18/// The pass applies code reduction on the generated code for AVX-512 instrs.
19//
20//===----------------------------------------------------------------------===//
21
22#include "MCTargetDesc/X86BaseInfo.h"
23#include "MCTargetDesc/X86InstComments.h"
24#include "X86.h"
25#include "X86InstrInfo.h"
26#include "X86Subtarget.h"
27#include "llvm/ADT/StringRef.h"
28#include "llvm/CodeGen/MachineFunction.h"
29#include "llvm/CodeGen/MachineFunctionPass.h"
30#include "llvm/CodeGen/MachineInstr.h"
31#include "llvm/CodeGen/MachineOperand.h"
32#include "llvm/MC/MCInstrDesc.h"
33#include "llvm/Pass.h"
34#include <cassert>
35#include <cstdint>
36
37using namespace llvm;
38
39// Including the generated EVEX2VEX tables.
40struct X86EvexToVexCompressTableEntry {
41  uint16_t EvexOpcode;
42  uint16_t VexOpcode;
43
44  bool operator<(const X86EvexToVexCompressTableEntry &RHS) const {
45    return EvexOpcode < RHS.EvexOpcode;
46  }
47
48  friend bool operator<(const X86EvexToVexCompressTableEntry &TE,
49                        unsigned Opc) {
50    return TE.EvexOpcode < Opc;
51  }
52};
53#include "X86GenEVEX2VEXTables.inc"
54
55#define EVEX2VEX_DESC "Compressing EVEX instrs to VEX encoding when possible"
56#define EVEX2VEX_NAME "x86-evex-to-vex-compress"
57
58#define DEBUG_TYPE EVEX2VEX_NAME
59
60namespace {
61
62class EvexToVexInstPass : public MachineFunctionPass {
63
64  /// For EVEX instructions that can be encoded using VEX encoding, replace
65  /// them by the VEX encoding in order to reduce size.
66  bool CompressEvexToVexImpl(MachineInstr &MI) const;
67
68public:
69  static char ID;
70
71  EvexToVexInstPass() : MachineFunctionPass(ID) { }
72
73  StringRef getPassName() const override { return EVEX2VEX_DESC; }
74
75  /// Loop over all of the basic blocks, replacing EVEX instructions
76  /// by equivalent VEX instructions when possible for reducing code size.
77  bool runOnMachineFunction(MachineFunction &MF) override;
78
79  // This pass runs after regalloc and doesn't support VReg operands.
80  MachineFunctionProperties getRequiredProperties() const override {
81    return MachineFunctionProperties().set(
82        MachineFunctionProperties::Property::NoVRegs);
83  }
84
85private:
86  /// Machine instruction info used throughout the class.
87  const X86InstrInfo *TII = nullptr;
88};
89
90} // end anonymous namespace
91
92char EvexToVexInstPass::ID = 0;
93
94bool EvexToVexInstPass::runOnMachineFunction(MachineFunction &MF) {
95  TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
96
97  const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
98  if (!ST.hasAVX512())
99    return false;
100
101  bool Changed = false;
102
103  /// Go over all basic blocks in function and replace
104  /// EVEX encoded instrs by VEX encoding when possible.
105  for (MachineBasicBlock &MBB : MF) {
106
107    // Traverse the basic block.
108    for (MachineInstr &MI : MBB)
109      Changed |= CompressEvexToVexImpl(MI);
110  }
111
112  return Changed;
113}
114
115static bool usesExtendedRegister(const MachineInstr &MI) {
116  auto isHiRegIdx = [](unsigned Reg) {
117    // Check for XMM register with indexes between 16 - 31.
118    if (Reg >= X86::XMM16 && Reg <= X86::XMM31)
119      return true;
120
121    // Check for YMM register with indexes between 16 - 31.
122    if (Reg >= X86::YMM16 && Reg <= X86::YMM31)
123      return true;
124
125    return false;
126  };
127
128  // Check that operands are not ZMM regs or
129  // XMM/YMM regs with hi indexes between 16 - 31.
130  for (const MachineOperand &MO : MI.explicit_operands()) {
131    if (!MO.isReg())
132      continue;
133
134    Register Reg = MO.getReg();
135
136    assert(!(Reg >= X86::ZMM0 && Reg <= X86::ZMM31) &&
137           "ZMM instructions should not be in the EVEX->VEX tables");
138
139    if (isHiRegIdx(Reg))
140      return true;
141  }
142
143  return false;
144}
145
146// Do any custom cleanup needed to finalize the conversion.
147static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) {
148  (void)NewOpc;
149  unsigned Opc = MI.getOpcode();
150  switch (Opc) {
151  case X86::VALIGNDZ128rri:
152  case X86::VALIGNDZ128rmi:
153  case X86::VALIGNQZ128rri:
154  case X86::VALIGNQZ128rmi: {
155    assert((NewOpc == X86::VPALIGNRrri || NewOpc == X86::VPALIGNRrmi) &&
156           "Unexpected new opcode!");
157    unsigned Scale = (Opc == X86::VALIGNQZ128rri ||
158                      Opc == X86::VALIGNQZ128rmi) ? 8 : 4;
159    MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
160    Imm.setImm(Imm.getImm() * Scale);
161    break;
162  }
163  case X86::VSHUFF32X4Z256rmi:
164  case X86::VSHUFF32X4Z256rri:
165  case X86::VSHUFF64X2Z256rmi:
166  case X86::VSHUFF64X2Z256rri:
167  case X86::VSHUFI32X4Z256rmi:
168  case X86::VSHUFI32X4Z256rri:
169  case X86::VSHUFI64X2Z256rmi:
170  case X86::VSHUFI64X2Z256rri: {
171    assert((NewOpc == X86::VPERM2F128rr || NewOpc == X86::VPERM2I128rr ||
172            NewOpc == X86::VPERM2F128rm || NewOpc == X86::VPERM2I128rm) &&
173           "Unexpected new opcode!");
174    MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
175    int64_t ImmVal = Imm.getImm();
176    // Set bit 5, move bit 1 to bit 4, copy bit 0.
177    Imm.setImm(0x20 | ((ImmVal & 2) << 3) | (ImmVal & 1));
178    break;
179  }
180  case X86::VRNDSCALEPDZ128rri:
181  case X86::VRNDSCALEPDZ128rmi:
182  case X86::VRNDSCALEPSZ128rri:
183  case X86::VRNDSCALEPSZ128rmi:
184  case X86::VRNDSCALEPDZ256rri:
185  case X86::VRNDSCALEPDZ256rmi:
186  case X86::VRNDSCALEPSZ256rri:
187  case X86::VRNDSCALEPSZ256rmi:
188  case X86::VRNDSCALESDZr:
189  case X86::VRNDSCALESDZm:
190  case X86::VRNDSCALESSZr:
191  case X86::VRNDSCALESSZm:
192  case X86::VRNDSCALESDZr_Int:
193  case X86::VRNDSCALESDZm_Int:
194  case X86::VRNDSCALESSZr_Int:
195  case X86::VRNDSCALESSZm_Int:
196    const MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
197    int64_t ImmVal = Imm.getImm();
198    // Ensure that only bits 3:0 of the immediate are used.
199    if ((ImmVal & 0xf) != ImmVal)
200      return false;
201    break;
202  }
203
204  return true;
205}
206
207
208// For EVEX instructions that can be encoded using VEX encoding
209// replace them by the VEX encoding in order to reduce size.
210bool EvexToVexInstPass::CompressEvexToVexImpl(MachineInstr &MI) const {
211  // VEX format.
212  // # of bytes: 0,2,3  1      1      0,1   0,1,2,4  0,1
213  //  [Prefixes] [VEX]  OPCODE ModR/M [SIB] [DISP]  [IMM]
214  //
215  // EVEX format.
216  //  # of bytes: 4    1      1      1      4       / 1         1
217  //  [Prefixes]  EVEX Opcode ModR/M [SIB] [Disp32] / [Disp8*N] [Immediate]
218
219  const MCInstrDesc &Desc = MI.getDesc();
220
221  // Check for EVEX instructions only.
222  if ((Desc.TSFlags & X86II::EncodingMask) != X86II::EVEX)
223    return false;
224
225  // Check for EVEX instructions with mask or broadcast as in these cases
226  // the EVEX prefix is needed in order to carry this information
227  // thus preventing the transformation to VEX encoding.
228  if (Desc.TSFlags & (X86II::EVEX_K | X86II::EVEX_B))
229    return false;
230
231  // Check for EVEX instructions with L2 set. These instructions are 512-bits
232  // and can't be converted to VEX.
233  if (Desc.TSFlags & X86II::EVEX_L2)
234    return false;
235
236#ifndef NDEBUG
237  // Make sure the tables are sorted.
238  static std::atomic<bool> TableChecked(false);
239  if (!TableChecked.load(std::memory_order_relaxed)) {
240    assert(std::is_sorted(std::begin(X86EvexToVex128CompressTable),
241                          std::end(X86EvexToVex128CompressTable)) &&
242           "X86EvexToVex128CompressTable is not sorted!");
243    assert(std::is_sorted(std::begin(X86EvexToVex256CompressTable),
244                          std::end(X86EvexToVex256CompressTable)) &&
245           "X86EvexToVex256CompressTable is not sorted!");
246    TableChecked.store(true, std::memory_order_relaxed);
247  }
248#endif
249
250  // Use the VEX.L bit to select the 128 or 256-bit table.
251  ArrayRef<X86EvexToVexCompressTableEntry> Table =
252    (Desc.TSFlags & X86II::VEX_L) ? makeArrayRef(X86EvexToVex256CompressTable)
253                                  : makeArrayRef(X86EvexToVex128CompressTable);
254
255  auto I = llvm::lower_bound(Table, MI.getOpcode());
256  if (I == Table.end() || I->EvexOpcode != MI.getOpcode())
257    return false;
258
259  unsigned NewOpc = I->VexOpcode;
260
261  if (usesExtendedRegister(MI))
262    return false;
263
264  if (!performCustomAdjustments(MI, NewOpc))
265    return false;
266
267  MI.setDesc(TII->get(NewOpc));
268  MI.setAsmPrinterFlag(X86::AC_EVEX_2_VEX);
269  return true;
270}
271
272INITIALIZE_PASS(EvexToVexInstPass, EVEX2VEX_NAME, EVEX2VEX_DESC, false, false)
273
274FunctionPass *llvm::createX86EvexToVexInsts() {
275  return new EvexToVexInstPass();
276}
277