IntrinsicsAArch64.td revision 360784
135007Sjb//===- IntrinsicsAARCH64.td - Defines AARCH64 intrinsics ---*- tablegen -*-===//
217706Sjulian//
317706Sjulian// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
417706Sjulian// See https://llvm.org/LICENSE.txt for license information.
517706Sjulian// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
617706Sjulian//
717706Sjulian//===----------------------------------------------------------------------===//
817706Sjulian//
917706Sjulian// This file defines all of the AARCH64-specific intrinsics.
1017706Sjulian//
1117706Sjulian//===----------------------------------------------------------------------===//
1217706Sjulian
1317706Sjulianlet TargetPrefix = "aarch64" in {
1417706Sjulian
1517706Sjuliandef int_aarch64_ldxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
1617706Sjuliandef int_aarch64_ldaxr : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty]>;
1717706Sjuliandef int_aarch64_stxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
1817706Sjuliandef int_aarch64_stlxr : Intrinsic<[llvm_i32_ty], [llvm_i64_ty, llvm_anyptr_ty]>;
1917706Sjulian
2017706Sjuliandef int_aarch64_ldxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
2117706Sjuliandef int_aarch64_ldaxp : Intrinsic<[llvm_i64_ty, llvm_i64_ty], [llvm_ptr_ty]>;
2217706Sjuliandef int_aarch64_stxp : Intrinsic<[llvm_i32_ty],
2317706Sjulian                               [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
2417706Sjuliandef int_aarch64_stlxp : Intrinsic<[llvm_i32_ty],
2517706Sjulian                                [llvm_i64_ty, llvm_i64_ty, llvm_ptr_ty]>;
2617706Sjulian
2717706Sjuliandef int_aarch64_clrex : Intrinsic<[]>;
2817706Sjulian
2917706Sjuliandef int_aarch64_sdiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
3017706Sjulian                                LLVMMatchType<0>], [IntrNoMem]>;
3150476Speterdef int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
3248794Snik                                LLVMMatchType<0>], [IntrNoMem]>;
33202884Skib
3417706Sjuliandef int_aarch64_fjcvtzs : Intrinsic<[llvm_i32_ty], [llvm_double_ty], [IntrNoMem]>;
3579531Sru
3617706Sjuliandef int_aarch64_cls: Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
37202884Skibdef int_aarch64_cls64: Intrinsic<[llvm_i32_ty], [llvm_i64_ty], [IntrNoMem]>;
38202884Skib
3917706Sjulian//===----------------------------------------------------------------------===//
4059501Sphantom// HINT
41124535Sru
4217706Sjuliandef int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>;
4384306Sru
4417706Sjulian//===----------------------------------------------------------------------===//
4517706Sjulian// Data Barrier Instructions
46202884Skib
47202884Skibdef int_aarch64_dmb : GCCBuiltin<"__builtin_arm_dmb">, MSBuiltin<"__dmb">, Intrinsic<[], [llvm_i32_ty]>;
4817706Sjuliandef int_aarch64_dsb : GCCBuiltin<"__builtin_arm_dsb">, MSBuiltin<"__dsb">, Intrinsic<[], [llvm_i32_ty]>;
4917706Sjuliandef int_aarch64_isb : GCCBuiltin<"__builtin_arm_isb">, MSBuiltin<"__isb">, Intrinsic<[], [llvm_i32_ty]>;
5017706Sjulian
5117706Sjulian// A space-consuming intrinsic primarily for testing block and jump table
5217706Sjulian// placements. The first argument is the number of bytes this "instruction"
5317706Sjulian// takes up, the second and return value are essentially chains, used to force
5417706Sjulian// ordering during ISel.
5517706Sjuliandef int_aarch64_space : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty], []>;
5617706Sjulian
5717706Sjulian}
5817706Sjulian
5917706Sjulian//===----------------------------------------------------------------------===//
6017706Sjulian// Advanced SIMD (NEON)
6117706Sjulian
6217706Sjulianlet TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
6317706Sjulian  class AdvSIMD_2Scalar_Float_Intrinsic
6417706Sjulian    : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
6517706Sjulian                [IntrNoMem]>;
6617706Sjulian
6757686Ssheldonh  class AdvSIMD_FPToIntRounding_Intrinsic
6857686Ssheldonh    : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]>;
6917706Sjulian
7017706Sjulian  class AdvSIMD_1IntArg_Intrinsic
7157686Ssheldonh    : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
7257686Ssheldonh  class AdvSIMD_1FloatArg_Intrinsic
7317706Sjulian    : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
7417706Sjulian  class AdvSIMD_1VectorArg_Intrinsic
7517706Sjulian    : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>], [IntrNoMem]>;
76202884Skib  class AdvSIMD_1VectorArg_Expand_Intrinsic
77202884Skib    : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
78202884Skib  class AdvSIMD_1VectorArg_Long_Intrinsic
79202884Skib    : Intrinsic<[llvm_anyvector_ty], [LLVMTruncatedType<0>], [IntrNoMem]>;
80202884Skib  class AdvSIMD_1IntArg_Narrow_Intrinsic
81202884Skib    : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty], [IntrNoMem]>;
82202884Skib  class AdvSIMD_1VectorArg_Narrow_Intrinsic
83202884Skib    : Intrinsic<[llvm_anyint_ty], [LLVMExtendedType<0>], [IntrNoMem]>;
8417706Sjulian  class AdvSIMD_1VectorArg_Int_Across_Intrinsic
8517706Sjulian    : Intrinsic<[llvm_anyint_ty], [llvm_anyvector_ty], [IntrNoMem]>;
8617706Sjulian  class AdvSIMD_1VectorArg_Float_Across_Intrinsic
87131530Sru    : Intrinsic<[llvm_anyfloat_ty], [llvm_anyvector_ty], [IntrNoMem]>;
8817706Sjulian
89202884Skib  class AdvSIMD_2IntArg_Intrinsic
90202884Skib    : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
91202884Skib                [IntrNoMem]>;
9257686Ssheldonh  class AdvSIMD_2FloatArg_Intrinsic
9317706Sjulian    : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
9417706Sjulian                [IntrNoMem]>;
95112542Scharnier  class AdvSIMD_2VectorArg_Intrinsic
9617706Sjulian    : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, LLVMMatchType<0>],
97202884Skib                [IntrNoMem]>;
98202884Skib  class AdvSIMD_2VectorArg_Compare_Intrinsic
99202884Skib    : Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty, LLVMMatchType<1>],
10017706Sjulian                [IntrNoMem]>;
10117706Sjulian  class AdvSIMD_2Arg_FloatCompare_Intrinsic
10217706Sjulian    : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>],
10317706Sjulian                [IntrNoMem]>;
10417706Sjulian  class AdvSIMD_2VectorArg_Long_Intrinsic
10517706Sjulian    : Intrinsic<[llvm_anyvector_ty],
10617706Sjulian                [LLVMTruncatedType<0>, LLVMTruncatedType<0>],
10717706Sjulian                [IntrNoMem]>;
10817706Sjulian  class AdvSIMD_2VectorArg_Wide_Intrinsic
10917706Sjulian    : Intrinsic<[llvm_anyvector_ty],
11017706Sjulian                [LLVMMatchType<0>, LLVMTruncatedType<0>],
11117706Sjulian                [IntrNoMem]>;
11217706Sjulian  class AdvSIMD_2VectorArg_Narrow_Intrinsic
113202884Skib    : Intrinsic<[llvm_anyvector_ty],
114202884Skib                [LLVMExtendedType<0>, LLVMExtendedType<0>],
115202884Skib                [IntrNoMem]>;
11617706Sjulian  class AdvSIMD_2Arg_Scalar_Narrow_Intrinsic
117202884Skib    : Intrinsic<[llvm_anyint_ty],
118202884Skib                [LLVMExtendedType<0>, llvm_i32_ty],
119209588Sjhb                [IntrNoMem]>;
120202884Skib  class AdvSIMD_2VectorArg_Scalar_Expand_BySize_Intrinsic
121202884Skib    : Intrinsic<[llvm_anyvector_ty],
122202884Skib                [llvm_anyvector_ty],
123202884Skib                [IntrNoMem]>;
124202884Skib  class AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic
125202884Skib    : Intrinsic<[llvm_anyvector_ty],
126202884Skib                [LLVMTruncatedType<0>],
12717706Sjulian                [IntrNoMem]>;
12821907Swosch  class AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic
12921907Swosch    : Intrinsic<[llvm_anyvector_ty],
13017706Sjulian                [LLVMTruncatedType<0>, llvm_i32_ty],
131112542Scharnier                [IntrNoMem]>;
13217706Sjulian  class AdvSIMD_2VectorArg_Tied_Narrow_Intrinsic
133112542Scharnier    : Intrinsic<[llvm_anyvector_ty],
13473093Sru                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty],
135202884Skib                [IntrNoMem]>;
136202884Skib
137209588Sjhb  class AdvSIMD_3VectorArg_Intrinsic
138202884Skib      : Intrinsic<[llvm_anyvector_ty],
139209588Sjhb               [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
140202884Skib               [IntrNoMem]>;
141  class AdvSIMD_3VectorArg_Scalar_Intrinsic
142      : Intrinsic<[llvm_anyvector_ty],
143               [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty],
144               [IntrNoMem]>;
145  class AdvSIMD_3VectorArg_Tied_Narrow_Intrinsic
146      : Intrinsic<[llvm_anyvector_ty],
147               [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty,
148                LLVMMatchType<1>], [IntrNoMem]>;
149  class AdvSIMD_3VectorArg_Scalar_Tied_Narrow_Intrinsic
150    : Intrinsic<[llvm_anyvector_ty],
151                [LLVMHalfElementsVectorType<0>, llvm_anyvector_ty, llvm_i32_ty],
152                [IntrNoMem]>;
153  class AdvSIMD_CvtFxToFP_Intrinsic
154    : Intrinsic<[llvm_anyfloat_ty], [llvm_anyint_ty, llvm_i32_ty],
155                [IntrNoMem]>;
156  class AdvSIMD_CvtFPToFx_Intrinsic
157    : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, llvm_i32_ty],
158                [IntrNoMem]>;
159
160  class AdvSIMD_1Arg_Intrinsic
161    : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], [IntrNoMem]>;
162
163  class AdvSIMD_Dot_Intrinsic
164    : Intrinsic<[llvm_anyvector_ty],
165                [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
166                [IntrNoMem]>;
167
168  class AdvSIMD_FP16FML_Intrinsic
169    : Intrinsic<[llvm_anyvector_ty],
170                [LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<1>],
171                [IntrNoMem]>;
172}
173
174// Arithmetic ops
175
176let TargetPrefix = "aarch64", IntrProperties = [IntrNoMem] in {
177  // Vector Add Across Lanes
178  def int_aarch64_neon_saddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
179  def int_aarch64_neon_uaddv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
180  def int_aarch64_neon_faddv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
181
182  // Vector Long Add Across Lanes
183  def int_aarch64_neon_saddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
184  def int_aarch64_neon_uaddlv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
185
186  // Vector Halving Add
187  def int_aarch64_neon_shadd : AdvSIMD_2VectorArg_Intrinsic;
188  def int_aarch64_neon_uhadd : AdvSIMD_2VectorArg_Intrinsic;
189
190  // Vector Rounding Halving Add
191  def int_aarch64_neon_srhadd : AdvSIMD_2VectorArg_Intrinsic;
192  def int_aarch64_neon_urhadd : AdvSIMD_2VectorArg_Intrinsic;
193
194  // Vector Saturating Add
195  def int_aarch64_neon_sqadd : AdvSIMD_2IntArg_Intrinsic;
196  def int_aarch64_neon_suqadd : AdvSIMD_2IntArg_Intrinsic;
197  def int_aarch64_neon_usqadd : AdvSIMD_2IntArg_Intrinsic;
198  def int_aarch64_neon_uqadd : AdvSIMD_2IntArg_Intrinsic;
199
200  // Vector Add High-Half
201  // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
202  // header is no longer supported.
203  def int_aarch64_neon_addhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
204
205  // Vector Rounding Add High-Half
206  def int_aarch64_neon_raddhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
207
208  // Vector Saturating Doubling Multiply High
209  def int_aarch64_neon_sqdmulh : AdvSIMD_2IntArg_Intrinsic;
210
211  // Vector Saturating Rounding Doubling Multiply High
212  def int_aarch64_neon_sqrdmulh : AdvSIMD_2IntArg_Intrinsic;
213
214  // Vector Polynominal Multiply
215  def int_aarch64_neon_pmul : AdvSIMD_2VectorArg_Intrinsic;
216
217  // Vector Long Multiply
218  def int_aarch64_neon_smull : AdvSIMD_2VectorArg_Long_Intrinsic;
219  def int_aarch64_neon_umull : AdvSIMD_2VectorArg_Long_Intrinsic;
220  def int_aarch64_neon_pmull : AdvSIMD_2VectorArg_Long_Intrinsic;
221
222  // 64-bit polynomial multiply really returns an i128, which is not legal. Fake
223  // it with a v16i8.
224  def int_aarch64_neon_pmull64 :
225        Intrinsic<[llvm_v16i8_ty], [llvm_i64_ty, llvm_i64_ty], [IntrNoMem]>;
226
227  // Vector Extending Multiply
228  def int_aarch64_neon_fmulx : AdvSIMD_2FloatArg_Intrinsic {
229    let IntrProperties = [IntrNoMem, Commutative];
230  }
231
232  // Vector Saturating Doubling Long Multiply
233  def int_aarch64_neon_sqdmull : AdvSIMD_2VectorArg_Long_Intrinsic;
234  def int_aarch64_neon_sqdmulls_scalar
235    : Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
236
237  // Vector Halving Subtract
238  def int_aarch64_neon_shsub : AdvSIMD_2VectorArg_Intrinsic;
239  def int_aarch64_neon_uhsub : AdvSIMD_2VectorArg_Intrinsic;
240
241  // Vector Saturating Subtract
242  def int_aarch64_neon_sqsub : AdvSIMD_2IntArg_Intrinsic;
243  def int_aarch64_neon_uqsub : AdvSIMD_2IntArg_Intrinsic;
244
245  // Vector Subtract High-Half
246  // FIXME: this is a legacy intrinsic for aarch64_simd.h. Remove it when that
247  // header is no longer supported.
248  def int_aarch64_neon_subhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
249
250  // Vector Rounding Subtract High-Half
251  def int_aarch64_neon_rsubhn : AdvSIMD_2VectorArg_Narrow_Intrinsic;
252
253  // Vector Compare Absolute Greater-than-or-equal
254  def int_aarch64_neon_facge : AdvSIMD_2Arg_FloatCompare_Intrinsic;
255
256  // Vector Compare Absolute Greater-than
257  def int_aarch64_neon_facgt : AdvSIMD_2Arg_FloatCompare_Intrinsic;
258
259  // Vector Absolute Difference
260  def int_aarch64_neon_sabd : AdvSIMD_2VectorArg_Intrinsic;
261  def int_aarch64_neon_uabd : AdvSIMD_2VectorArg_Intrinsic;
262  def int_aarch64_neon_fabd : AdvSIMD_2VectorArg_Intrinsic;
263
264  // Scalar Absolute Difference
265  def int_aarch64_sisd_fabd : AdvSIMD_2Scalar_Float_Intrinsic;
266
267  // Vector Max
268  def int_aarch64_neon_smax : AdvSIMD_2VectorArg_Intrinsic;
269  def int_aarch64_neon_umax : AdvSIMD_2VectorArg_Intrinsic;
270  def int_aarch64_neon_fmax : AdvSIMD_2FloatArg_Intrinsic;
271  def int_aarch64_neon_fmaxnmp : AdvSIMD_2VectorArg_Intrinsic;
272
273  // Vector Max Across Lanes
274  def int_aarch64_neon_smaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
275  def int_aarch64_neon_umaxv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
276  def int_aarch64_neon_fmaxv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
277  def int_aarch64_neon_fmaxnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
278
279  // Vector Min
280  def int_aarch64_neon_smin : AdvSIMD_2VectorArg_Intrinsic;
281  def int_aarch64_neon_umin : AdvSIMD_2VectorArg_Intrinsic;
282  def int_aarch64_neon_fmin : AdvSIMD_2FloatArg_Intrinsic;
283  def int_aarch64_neon_fminnmp : AdvSIMD_2VectorArg_Intrinsic;
284
285  // Vector Min/Max Number
286  def int_aarch64_neon_fminnm : AdvSIMD_2FloatArg_Intrinsic;
287  def int_aarch64_neon_fmaxnm : AdvSIMD_2FloatArg_Intrinsic;
288
289  // Vector Min Across Lanes
290  def int_aarch64_neon_sminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
291  def int_aarch64_neon_uminv : AdvSIMD_1VectorArg_Int_Across_Intrinsic;
292  def int_aarch64_neon_fminv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
293  def int_aarch64_neon_fminnmv : AdvSIMD_1VectorArg_Float_Across_Intrinsic;
294
295  // Pairwise Add
296  def int_aarch64_neon_addp : AdvSIMD_2VectorArg_Intrinsic;
297  def int_aarch64_neon_faddp : AdvSIMD_2VectorArg_Intrinsic;
298
299  // Long Pairwise Add
300  // FIXME: In theory, we shouldn't need intrinsics for saddlp or
301  // uaddlp, but tblgen's type inference currently can't handle the
302  // pattern fragments this ends up generating.
303  def int_aarch64_neon_saddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
304  def int_aarch64_neon_uaddlp : AdvSIMD_1VectorArg_Expand_Intrinsic;
305
306  // Folding Maximum
307  def int_aarch64_neon_smaxp : AdvSIMD_2VectorArg_Intrinsic;
308  def int_aarch64_neon_umaxp : AdvSIMD_2VectorArg_Intrinsic;
309  def int_aarch64_neon_fmaxp : AdvSIMD_2VectorArg_Intrinsic;
310
311  // Folding Minimum
312  def int_aarch64_neon_sminp : AdvSIMD_2VectorArg_Intrinsic;
313  def int_aarch64_neon_uminp : AdvSIMD_2VectorArg_Intrinsic;
314  def int_aarch64_neon_fminp : AdvSIMD_2VectorArg_Intrinsic;
315
316  // Reciprocal Estimate/Step
317  def int_aarch64_neon_frecps : AdvSIMD_2FloatArg_Intrinsic;
318  def int_aarch64_neon_frsqrts : AdvSIMD_2FloatArg_Intrinsic;
319
320  // Reciprocal Exponent
321  def int_aarch64_neon_frecpx : AdvSIMD_1FloatArg_Intrinsic;
322
323  // Vector Saturating Shift Left
324  def int_aarch64_neon_sqshl : AdvSIMD_2IntArg_Intrinsic;
325  def int_aarch64_neon_uqshl : AdvSIMD_2IntArg_Intrinsic;
326
327  // Vector Rounding Shift Left
328  def int_aarch64_neon_srshl : AdvSIMD_2IntArg_Intrinsic;
329  def int_aarch64_neon_urshl : AdvSIMD_2IntArg_Intrinsic;
330
331  // Vector Saturating Rounding Shift Left
332  def int_aarch64_neon_sqrshl : AdvSIMD_2IntArg_Intrinsic;
333  def int_aarch64_neon_uqrshl : AdvSIMD_2IntArg_Intrinsic;
334
335  // Vector Signed->Unsigned Shift Left by Constant
336  def int_aarch64_neon_sqshlu : AdvSIMD_2IntArg_Intrinsic;
337
338  // Vector Signed->Unsigned Narrowing Saturating Shift Right by Constant
339  def int_aarch64_neon_sqshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
340
341  // Vector Signed->Unsigned Rounding Narrowing Saturating Shift Right by Const
342  def int_aarch64_neon_sqrshrun : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
343
344  // Vector Narrowing Shift Right by Constant
345  def int_aarch64_neon_sqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
346  def int_aarch64_neon_uqshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
347
348  // Vector Rounding Narrowing Shift Right by Constant
349  def int_aarch64_neon_rshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
350
351  // Vector Rounding Narrowing Saturating Shift Right by Constant
352  def int_aarch64_neon_sqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
353  def int_aarch64_neon_uqrshrn : AdvSIMD_2Arg_Scalar_Narrow_Intrinsic;
354
355  // Vector Shift Left
356  def int_aarch64_neon_sshl : AdvSIMD_2IntArg_Intrinsic;
357  def int_aarch64_neon_ushl : AdvSIMD_2IntArg_Intrinsic;
358
359  // Vector Widening Shift Left by Constant
360  def int_aarch64_neon_shll : AdvSIMD_2VectorArg_Scalar_Wide_BySize_Intrinsic;
361  def int_aarch64_neon_sshll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
362  def int_aarch64_neon_ushll : AdvSIMD_2VectorArg_Scalar_Wide_Intrinsic;
363
364  // Vector Shift Right by Constant and Insert
365  def int_aarch64_neon_vsri : AdvSIMD_3VectorArg_Scalar_Intrinsic;
366
367  // Vector Shift Left by Constant and Insert
368  def int_aarch64_neon_vsli : AdvSIMD_3VectorArg_Scalar_Intrinsic;
369
370  // Vector Saturating Narrow
371  def int_aarch64_neon_scalar_sqxtn: AdvSIMD_1IntArg_Narrow_Intrinsic;
372  def int_aarch64_neon_scalar_uqxtn : AdvSIMD_1IntArg_Narrow_Intrinsic;
373  def int_aarch64_neon_sqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
374  def int_aarch64_neon_uqxtn : AdvSIMD_1VectorArg_Narrow_Intrinsic;
375
376  // Vector Saturating Extract and Unsigned Narrow
377  def int_aarch64_neon_scalar_sqxtun : AdvSIMD_1IntArg_Narrow_Intrinsic;
378  def int_aarch64_neon_sqxtun : AdvSIMD_1VectorArg_Narrow_Intrinsic;
379
380  // Vector Absolute Value
381  def int_aarch64_neon_abs : AdvSIMD_1Arg_Intrinsic;
382
383  // Vector Saturating Absolute Value
384  def int_aarch64_neon_sqabs : AdvSIMD_1IntArg_Intrinsic;
385
386  // Vector Saturating Negation
387  def int_aarch64_neon_sqneg : AdvSIMD_1IntArg_Intrinsic;
388
389  // Vector Count Leading Sign Bits
390  def int_aarch64_neon_cls : AdvSIMD_1VectorArg_Intrinsic;
391
392  // Vector Reciprocal Estimate
393  def int_aarch64_neon_urecpe : AdvSIMD_1VectorArg_Intrinsic;
394  def int_aarch64_neon_frecpe : AdvSIMD_1FloatArg_Intrinsic;
395
396  // Vector Square Root Estimate
397  def int_aarch64_neon_ursqrte : AdvSIMD_1VectorArg_Intrinsic;
398  def int_aarch64_neon_frsqrte : AdvSIMD_1FloatArg_Intrinsic;
399
400  // Vector Bitwise Reverse
401  def int_aarch64_neon_rbit : AdvSIMD_1VectorArg_Intrinsic;
402
403  // Vector Conversions Between Half-Precision and Single-Precision.
404  def int_aarch64_neon_vcvtfp2hf
405    : Intrinsic<[llvm_v4i16_ty], [llvm_v4f32_ty], [IntrNoMem]>;
406  def int_aarch64_neon_vcvthf2fp
407    : Intrinsic<[llvm_v4f32_ty], [llvm_v4i16_ty], [IntrNoMem]>;
408
409  // Vector Conversions Between Floating-point and Fixed-point.
410  def int_aarch64_neon_vcvtfp2fxs : AdvSIMD_CvtFPToFx_Intrinsic;
411  def int_aarch64_neon_vcvtfp2fxu : AdvSIMD_CvtFPToFx_Intrinsic;
412  def int_aarch64_neon_vcvtfxs2fp : AdvSIMD_CvtFxToFP_Intrinsic;
413  def int_aarch64_neon_vcvtfxu2fp : AdvSIMD_CvtFxToFP_Intrinsic;
414
415  // Vector FP->Int Conversions
416  def int_aarch64_neon_fcvtas : AdvSIMD_FPToIntRounding_Intrinsic;
417  def int_aarch64_neon_fcvtau : AdvSIMD_FPToIntRounding_Intrinsic;
418  def int_aarch64_neon_fcvtms : AdvSIMD_FPToIntRounding_Intrinsic;
419  def int_aarch64_neon_fcvtmu : AdvSIMD_FPToIntRounding_Intrinsic;
420  def int_aarch64_neon_fcvtns : AdvSIMD_FPToIntRounding_Intrinsic;
421  def int_aarch64_neon_fcvtnu : AdvSIMD_FPToIntRounding_Intrinsic;
422  def int_aarch64_neon_fcvtps : AdvSIMD_FPToIntRounding_Intrinsic;
423  def int_aarch64_neon_fcvtpu : AdvSIMD_FPToIntRounding_Intrinsic;
424  def int_aarch64_neon_fcvtzs : AdvSIMD_FPToIntRounding_Intrinsic;
425  def int_aarch64_neon_fcvtzu : AdvSIMD_FPToIntRounding_Intrinsic;
426
427  // Vector FP Rounding: only ties to even is unrepresented by a normal
428  // intrinsic.
429  def int_aarch64_neon_frintn : AdvSIMD_1FloatArg_Intrinsic;
430
431  // Scalar FP->Int conversions
432
433  // Vector FP Inexact Narrowing
434  def int_aarch64_neon_fcvtxn : AdvSIMD_1VectorArg_Expand_Intrinsic;
435
436  // Scalar FP Inexact Narrowing
437  def int_aarch64_sisd_fcvtxn : Intrinsic<[llvm_float_ty], [llvm_double_ty],
438                                        [IntrNoMem]>;
439
440  // v8.2-A Dot Product
441  def int_aarch64_neon_udot : AdvSIMD_Dot_Intrinsic;
442  def int_aarch64_neon_sdot : AdvSIMD_Dot_Intrinsic;
443
444  // v8.2-A FP16 Fused Multiply-Add Long
445  def int_aarch64_neon_fmlal : AdvSIMD_FP16FML_Intrinsic;
446  def int_aarch64_neon_fmlsl : AdvSIMD_FP16FML_Intrinsic;
447  def int_aarch64_neon_fmlal2 : AdvSIMD_FP16FML_Intrinsic;
448  def int_aarch64_neon_fmlsl2 : AdvSIMD_FP16FML_Intrinsic;
449
450  // v8.3-A Floating-point complex add
451  def int_aarch64_neon_vcadd_rot90  : AdvSIMD_2VectorArg_Intrinsic;
452  def int_aarch64_neon_vcadd_rot270 : AdvSIMD_2VectorArg_Intrinsic;
453}
454
455let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
456  class AdvSIMD_2Vector2Index_Intrinsic
457    : Intrinsic<[llvm_anyvector_ty],
458                [llvm_anyvector_ty, llvm_i64_ty, LLVMMatchType<0>, llvm_i64_ty],
459                [IntrNoMem]>;
460}
461
462// Vector element to element moves
463def int_aarch64_neon_vcopy_lane: AdvSIMD_2Vector2Index_Intrinsic;
464
465let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
466  class AdvSIMD_1Vec_Load_Intrinsic
467      : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>],
468                  [IntrReadMem, IntrArgMemOnly]>;
469  class AdvSIMD_1Vec_Store_Lane_Intrinsic
470    : Intrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty],
471                [IntrArgMemOnly, NoCapture<2>]>;
472
473  class AdvSIMD_2Vec_Load_Intrinsic
474    : Intrinsic<[LLVMMatchType<0>, llvm_anyvector_ty],
475                [LLVMAnyPointerType<LLVMMatchType<0>>],
476                [IntrReadMem, IntrArgMemOnly]>;
477  class AdvSIMD_2Vec_Load_Lane_Intrinsic
478    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>],
479                [LLVMMatchType<0>, llvm_anyvector_ty,
480                 llvm_i64_ty, llvm_anyptr_ty],
481                [IntrReadMem, IntrArgMemOnly]>;
482  class AdvSIMD_2Vec_Store_Intrinsic
483    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
484                     LLVMAnyPointerType<LLVMMatchType<0>>],
485                [IntrArgMemOnly, NoCapture<2>]>;
486  class AdvSIMD_2Vec_Store_Lane_Intrinsic
487    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
488                 llvm_i64_ty, llvm_anyptr_ty],
489                [IntrArgMemOnly, NoCapture<3>]>;
490
491  class AdvSIMD_3Vec_Load_Intrinsic
492    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty],
493                [LLVMAnyPointerType<LLVMMatchType<0>>],
494                [IntrReadMem, IntrArgMemOnly]>;
495  class AdvSIMD_3Vec_Load_Lane_Intrinsic
496    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
497                [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty,
498                 llvm_i64_ty, llvm_anyptr_ty],
499                [IntrReadMem, IntrArgMemOnly]>;
500  class AdvSIMD_3Vec_Store_Intrinsic
501    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
502                     LLVMMatchType<0>, LLVMAnyPointerType<LLVMMatchType<0>>],
503                [IntrArgMemOnly, NoCapture<3>]>;
504  class AdvSIMD_3Vec_Store_Lane_Intrinsic
505    : Intrinsic<[], [llvm_anyvector_ty,
506                 LLVMMatchType<0>, LLVMMatchType<0>,
507                 llvm_i64_ty, llvm_anyptr_ty],
508                [IntrArgMemOnly, NoCapture<4>]>;
509
510  class AdvSIMD_4Vec_Load_Intrinsic
511    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
512                 LLVMMatchType<0>, llvm_anyvector_ty],
513                [LLVMAnyPointerType<LLVMMatchType<0>>],
514                [IntrReadMem, IntrArgMemOnly]>;
515  class AdvSIMD_4Vec_Load_Lane_Intrinsic
516    : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>,
517                 LLVMMatchType<0>, LLVMMatchType<0>],
518                [LLVMMatchType<0>, LLVMMatchType<0>,
519                 LLVMMatchType<0>, llvm_anyvector_ty,
520                 llvm_i64_ty, llvm_anyptr_ty],
521                [IntrReadMem, IntrArgMemOnly]>;
522  class AdvSIMD_4Vec_Store_Intrinsic
523    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
524                 LLVMMatchType<0>, LLVMMatchType<0>,
525                 LLVMAnyPointerType<LLVMMatchType<0>>],
526                [IntrArgMemOnly, NoCapture<4>]>;
527  class AdvSIMD_4Vec_Store_Lane_Intrinsic
528    : Intrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>,
529                 LLVMMatchType<0>, LLVMMatchType<0>,
530                 llvm_i64_ty, llvm_anyptr_ty],
531                [IntrArgMemOnly, NoCapture<5>]>;
532}
533
534// Memory ops
535
536def int_aarch64_neon_ld1x2 : AdvSIMD_2Vec_Load_Intrinsic;
537def int_aarch64_neon_ld1x3 : AdvSIMD_3Vec_Load_Intrinsic;
538def int_aarch64_neon_ld1x4 : AdvSIMD_4Vec_Load_Intrinsic;
539
540def int_aarch64_neon_st1x2 : AdvSIMD_2Vec_Store_Intrinsic;
541def int_aarch64_neon_st1x3 : AdvSIMD_3Vec_Store_Intrinsic;
542def int_aarch64_neon_st1x4 : AdvSIMD_4Vec_Store_Intrinsic;
543
544def int_aarch64_neon_ld2 : AdvSIMD_2Vec_Load_Intrinsic;
545def int_aarch64_neon_ld3 : AdvSIMD_3Vec_Load_Intrinsic;
546def int_aarch64_neon_ld4 : AdvSIMD_4Vec_Load_Intrinsic;
547
548def int_aarch64_neon_ld2lane : AdvSIMD_2Vec_Load_Lane_Intrinsic;
549def int_aarch64_neon_ld3lane : AdvSIMD_3Vec_Load_Lane_Intrinsic;
550def int_aarch64_neon_ld4lane : AdvSIMD_4Vec_Load_Lane_Intrinsic;
551
552def int_aarch64_neon_ld2r : AdvSIMD_2Vec_Load_Intrinsic;
553def int_aarch64_neon_ld3r : AdvSIMD_3Vec_Load_Intrinsic;
554def int_aarch64_neon_ld4r : AdvSIMD_4Vec_Load_Intrinsic;
555
556def int_aarch64_neon_st2  : AdvSIMD_2Vec_Store_Intrinsic;
557def int_aarch64_neon_st3  : AdvSIMD_3Vec_Store_Intrinsic;
558def int_aarch64_neon_st4  : AdvSIMD_4Vec_Store_Intrinsic;
559
560def int_aarch64_neon_st2lane  : AdvSIMD_2Vec_Store_Lane_Intrinsic;
561def int_aarch64_neon_st3lane  : AdvSIMD_3Vec_Store_Lane_Intrinsic;
562def int_aarch64_neon_st4lane  : AdvSIMD_4Vec_Store_Lane_Intrinsic;
563
564let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
565  class AdvSIMD_Tbl1_Intrinsic
566    : Intrinsic<[llvm_anyvector_ty], [llvm_v16i8_ty, LLVMMatchType<0>],
567                [IntrNoMem]>;
568  class AdvSIMD_Tbl2_Intrinsic
569    : Intrinsic<[llvm_anyvector_ty],
570                [llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>], [IntrNoMem]>;
571  class AdvSIMD_Tbl3_Intrinsic
572    : Intrinsic<[llvm_anyvector_ty],
573                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
574                 LLVMMatchType<0>],
575                [IntrNoMem]>;
576  class AdvSIMD_Tbl4_Intrinsic
577    : Intrinsic<[llvm_anyvector_ty],
578                [llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty,
579                 LLVMMatchType<0>],
580                [IntrNoMem]>;
581
582  class AdvSIMD_Tbx1_Intrinsic
583    : Intrinsic<[llvm_anyvector_ty],
584                [LLVMMatchType<0>, llvm_v16i8_ty, LLVMMatchType<0>],
585                [IntrNoMem]>;
586  class AdvSIMD_Tbx2_Intrinsic
587    : Intrinsic<[llvm_anyvector_ty],
588                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
589                 LLVMMatchType<0>],
590                [IntrNoMem]>;
591  class AdvSIMD_Tbx3_Intrinsic
592    : Intrinsic<[llvm_anyvector_ty],
593                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
594                 llvm_v16i8_ty, LLVMMatchType<0>],
595                [IntrNoMem]>;
596  class AdvSIMD_Tbx4_Intrinsic
597    : Intrinsic<[llvm_anyvector_ty],
598                [LLVMMatchType<0>, llvm_v16i8_ty, llvm_v16i8_ty,
599                 llvm_v16i8_ty, llvm_v16i8_ty, LLVMMatchType<0>],
600                [IntrNoMem]>;
601}
602def int_aarch64_neon_tbl1 : AdvSIMD_Tbl1_Intrinsic;
603def int_aarch64_neon_tbl2 : AdvSIMD_Tbl2_Intrinsic;
604def int_aarch64_neon_tbl3 : AdvSIMD_Tbl3_Intrinsic;
605def int_aarch64_neon_tbl4 : AdvSIMD_Tbl4_Intrinsic;
606
607def int_aarch64_neon_tbx1 : AdvSIMD_Tbx1_Intrinsic;
608def int_aarch64_neon_tbx2 : AdvSIMD_Tbx2_Intrinsic;
609def int_aarch64_neon_tbx3 : AdvSIMD_Tbx3_Intrinsic;
610def int_aarch64_neon_tbx4 : AdvSIMD_Tbx4_Intrinsic;
611
612let TargetPrefix = "aarch64" in {
613  class FPCR_Get_Intrinsic
614    : Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>;
615}
616
617// FPCR
618def int_aarch64_get_fpcr : FPCR_Get_Intrinsic;
619
620let TargetPrefix = "aarch64" in {
621  class Crypto_AES_DataKey_Intrinsic
622    : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty], [IntrNoMem]>;
623
624  class Crypto_AES_Data_Intrinsic
625    : Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
626
627  // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
628  // (v4i32).
629  class Crypto_SHA_5Hash4Schedule_Intrinsic
630    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty],
631                [IntrNoMem]>;
632
633  // SHA intrinsic taking 5 words of the hash (v4i32, i32) and 4 of the schedule
634  // (v4i32).
635  class Crypto_SHA_1Hash_Intrinsic
636    : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
637
638  // SHA intrinsic taking 8 words of the schedule
639  class Crypto_SHA_8Schedule_Intrinsic
640    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
641
642  // SHA intrinsic taking 12 words of the schedule
643  class Crypto_SHA_12Schedule_Intrinsic
644    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
645                [IntrNoMem]>;
646
647  // SHA intrinsic taking 8 words of the hash and 4 of the schedule.
648  class Crypto_SHA_8Hash4Schedule_Intrinsic
649    : Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
650                [IntrNoMem]>;
651}
652
653// AES
654def int_aarch64_crypto_aese   : Crypto_AES_DataKey_Intrinsic;
655def int_aarch64_crypto_aesd   : Crypto_AES_DataKey_Intrinsic;
656def int_aarch64_crypto_aesmc  : Crypto_AES_Data_Intrinsic;
657def int_aarch64_crypto_aesimc : Crypto_AES_Data_Intrinsic;
658
659// SHA1
660def int_aarch64_crypto_sha1c  : Crypto_SHA_5Hash4Schedule_Intrinsic;
661def int_aarch64_crypto_sha1p  : Crypto_SHA_5Hash4Schedule_Intrinsic;
662def int_aarch64_crypto_sha1m  : Crypto_SHA_5Hash4Schedule_Intrinsic;
663def int_aarch64_crypto_sha1h  : Crypto_SHA_1Hash_Intrinsic;
664
665def int_aarch64_crypto_sha1su0 : Crypto_SHA_12Schedule_Intrinsic;
666def int_aarch64_crypto_sha1su1 : Crypto_SHA_8Schedule_Intrinsic;
667
668// SHA256
669def int_aarch64_crypto_sha256h   : Crypto_SHA_8Hash4Schedule_Intrinsic;
670def int_aarch64_crypto_sha256h2  : Crypto_SHA_8Hash4Schedule_Intrinsic;
671def int_aarch64_crypto_sha256su0 : Crypto_SHA_8Schedule_Intrinsic;
672def int_aarch64_crypto_sha256su1 : Crypto_SHA_12Schedule_Intrinsic;
673
674//===----------------------------------------------------------------------===//
675// CRC32
676
677let TargetPrefix = "aarch64" in {
678
679def int_aarch64_crc32b  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
680    [IntrNoMem]>;
681def int_aarch64_crc32cb : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
682    [IntrNoMem]>;
683def int_aarch64_crc32h  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
684    [IntrNoMem]>;
685def int_aarch64_crc32ch : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
686    [IntrNoMem]>;
687def int_aarch64_crc32w  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
688    [IntrNoMem]>;
689def int_aarch64_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
690    [IntrNoMem]>;
691def int_aarch64_crc32x  : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
692    [IntrNoMem]>;
693def int_aarch64_crc32cx : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
694    [IntrNoMem]>;
695}
696
697//===----------------------------------------------------------------------===//
698// Memory Tagging Extensions (MTE) Intrinsics
699let TargetPrefix = "aarch64" in {
700def int_aarch64_irg   : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
701    [IntrNoMem, IntrHasSideEffects]>;
702def int_aarch64_addg  : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty],
703    [IntrNoMem]>;
704def int_aarch64_gmi   : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty],
705    [IntrNoMem]>;
706def int_aarch64_ldg   : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty],
707    [IntrReadMem]>;
708def int_aarch64_stg   : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
709    [IntrWriteMem]>;
710def int_aarch64_subp :  Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty],
711    [IntrNoMem]>;
712
713// The following are codegen-only intrinsics for stack instrumentation.
714
715// Generate a randomly tagged stack base pointer.
716def int_aarch64_irg_sp   : Intrinsic<[llvm_ptr_ty], [llvm_i64_ty],
717    [IntrNoMem, IntrHasSideEffects]>;
718
719// Transfer pointer tag with offset.
720// ptr1 = tagp(ptr0, baseptr, tag_offset) returns a pointer where
721// * address is the address in ptr0
722// * tag is a function of (tag in baseptr, tag_offset).
723// Address bits in baseptr and tag bits in ptr0 are ignored.
724// When offset between ptr0 and baseptr is a compile time constant, this can be emitted as
725//   ADDG ptr1, baseptr, (ptr0 - baseptr), tag_offset
726// It is intended that ptr0 is an alloca address, and baseptr is the direct output of llvm.aarch64.irg.sp.
727def int_aarch64_tagp : Intrinsic<[llvm_anyptr_ty], [LLVMMatchType<0>, llvm_ptr_ty, llvm_i64_ty],
728    [IntrNoMem, ImmArg<2>]>;
729
730// Update allocation tags for the memory range to match the tag in the pointer argument.
731def int_aarch64_settag  : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
732    [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
733
734// Update allocation tags for the memory range to match the tag in the pointer argument,
735// and set memory contents to zero.
736def int_aarch64_settag_zero  : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty],
737    [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
738
739// Update allocation tags for 16-aligned, 16-sized memory region, and store a pair 8-byte values.
740def int_aarch64_stgp  : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty],
741    [IntrWriteMem, IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
742}
743
744// Transactional Memory Extension (TME) Intrinsics
745let TargetPrefix = "aarch64" in {
746def int_aarch64_tstart  : GCCBuiltin<"__builtin_arm_tstart">,
747                         Intrinsic<[llvm_i64_ty]>;
748
749def int_aarch64_tcommit : GCCBuiltin<"__builtin_arm_tcommit">, Intrinsic<[]>;
750
751def int_aarch64_tcancel : GCCBuiltin<"__builtin_arm_tcancel">,
752                          Intrinsic<[], [llvm_i64_ty], [ImmArg<0>]>;
753
754def int_aarch64_ttest   : GCCBuiltin<"__builtin_arm_ttest">,
755                          Intrinsic<[llvm_i64_ty], [],
756                                    [IntrNoMem, IntrHasSideEffects]>;
757}
758
759def llvm_nxv2i1_ty  : LLVMType<nxv2i1>;
760def llvm_nxv4i1_ty  : LLVMType<nxv4i1>;
761def llvm_nxv8i1_ty  : LLVMType<nxv8i1>;
762def llvm_nxv16i1_ty : LLVMType<nxv16i1>;
763def llvm_nxv16i8_ty : LLVMType<nxv16i8>;
764def llvm_nxv4i32_ty : LLVMType<nxv4i32>;
765def llvm_nxv2i64_ty : LLVMType<nxv2i64>;
766def llvm_nxv8f16_ty : LLVMType<nxv8f16>;
767def llvm_nxv4f32_ty : LLVMType<nxv4f32>;
768def llvm_nxv2f64_ty : LLVMType<nxv2f64>;
769
770let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
771
772  class AdvSIMD_1Vec_PredLoad_Intrinsic
773    : Intrinsic<[llvm_anyvector_ty],
774                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
775                 LLVMPointerTo<0>],
776                [IntrReadMem, IntrArgMemOnly]>;
777
778  class AdvSIMD_1Vec_PredStore_Intrinsic
779    : Intrinsic<[],
780                [llvm_anyvector_ty,
781                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
782                 LLVMPointerTo<0>],
783                [IntrArgMemOnly, NoCapture<2>]>;
784
785  class AdvSIMD_Merged1VectorArg_Intrinsic
786    : Intrinsic<[llvm_anyvector_ty],
787                [LLVMMatchType<0>,
788                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
789                 LLVMMatchType<0>],
790                [IntrNoMem]>;
791
792  class AdvSIMD_2VectorArgIndexed_Intrinsic
793    : Intrinsic<[llvm_anyvector_ty],
794                [LLVMMatchType<0>,
795                 LLVMMatchType<0>,
796                 llvm_i32_ty],
797                [IntrNoMem]>;
798
799  class AdvSIMD_3VectorArgIndexed_Intrinsic
800    : Intrinsic<[llvm_anyvector_ty],
801                [LLVMMatchType<0>,
802                 LLVMMatchType<0>,
803                 LLVMMatchType<0>,
804                 llvm_i32_ty],
805                [IntrNoMem]>;
806
807  class AdvSIMD_Pred1VectorArg_Intrinsic
808    : Intrinsic<[llvm_anyvector_ty],
809                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
810                 LLVMMatchType<0>],
811                [IntrNoMem]>;
812
813  class AdvSIMD_Pred2VectorArg_Intrinsic
814    : Intrinsic<[llvm_anyvector_ty],
815                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
816                 LLVMMatchType<0>,
817                 LLVMMatchType<0>],
818                [IntrNoMem]>;
819
820  class AdvSIMD_Pred3VectorArg_Intrinsic
821    : Intrinsic<[llvm_anyvector_ty],
822                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
823                 LLVMMatchType<0>,
824                 LLVMMatchType<0>,
825                 LLVMMatchType<0>],
826                [IntrNoMem]>;
827
828  class AdvSIMD_SVE_Compare_Intrinsic
829    : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
830                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
831                 llvm_anyvector_ty,
832                 LLVMMatchType<0>],
833                [IntrNoMem]>;
834
835  class AdvSIMD_SVE_CompareWide_Intrinsic
836    : Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
837                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
838                 llvm_anyvector_ty,
839                 llvm_nxv2i64_ty],
840                [IntrNoMem]>;
841
842  class AdvSIMD_SVE_Saturating_Intrinsic
843    : Intrinsic<[llvm_anyvector_ty],
844                [LLVMMatchType<0>,
845                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
846                [IntrNoMem]>;
847
848  class AdvSIMD_SVE_SaturatingWithPattern_Intrinsic
849    : Intrinsic<[llvm_anyvector_ty],
850                [LLVMMatchType<0>,
851                 llvm_i32_ty,
852                 llvm_i32_ty],
853                [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
854
855  class AdvSIMD_SVE_Saturating_N_Intrinsic<LLVMType T>
856    : Intrinsic<[T],
857                [T, llvm_anyvector_ty],
858                [IntrNoMem]>;
859
860  class AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<LLVMType T>
861    : Intrinsic<[T],
862                [T, llvm_i32_ty, llvm_i32_ty],
863                [IntrNoMem, ImmArg<1>, ImmArg<2>]>;
864
865  class AdvSIMD_SVE_CNT_Intrinsic
866    : Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
867                [LLVMVectorOfBitcastsToInt<0>,
868                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
869                 llvm_anyvector_ty],
870                [IntrNoMem]>;
871
872  class AdvSIMD_SVE_FP_Reduce_Intrinsic
873    : Intrinsic<[llvm_anyfloat_ty],
874                [LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
875                 llvm_anyvector_ty],
876                [IntrNoMem]>;
877
878  class AdvSIMD_SVE_ReduceWithInit_Intrinsic
879    : Intrinsic<[LLVMVectorElementType<0>],
880                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
881                 LLVMVectorElementType<0>,
882                 llvm_anyvector_ty],
883                [IntrNoMem]>;
884
885  class AdvSIMD_SVE_FP_ReduceWithInit_Intrinsic
886    : Intrinsic<[llvm_anyfloat_ty],
887                [LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
888                 LLVMMatchType<0>,
889                 llvm_anyvector_ty],
890                [IntrNoMem]>;
891
892  class AdvSIMD_SVE_ShiftByImm_Intrinsic
893    : Intrinsic<[llvm_anyvector_ty],
894                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
895                 LLVMMatchType<0>,
896                 llvm_i32_ty],
897                [IntrNoMem]>;
898
899  class AdvSIMD_SVE_ShiftWide_Intrinsic
900    : Intrinsic<[llvm_anyvector_ty],
901                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
902                 LLVMMatchType<0>,
903                 llvm_nxv2i64_ty],
904                [IntrNoMem]>;
905
906  class AdvSIMD_SVE_Unpack_Intrinsic
907    : Intrinsic<[llvm_anyvector_ty],
908               [LLVMSubdivide2VectorType<0>],
909               [IntrNoMem]>;
910
911  class AdvSIMD_SVE_CADD_Intrinsic
912    : Intrinsic<[llvm_anyvector_ty],
913                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
914                 LLVMMatchType<0>,
915                 LLVMMatchType<0>,
916                 llvm_i32_ty],
917                [IntrNoMem]>;
918
919  class AdvSIMD_SVE_CMLA_Intrinsic
920    : Intrinsic<[llvm_anyvector_ty],
921                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
922                 LLVMMatchType<0>,
923                 LLVMMatchType<0>,
924                 LLVMMatchType<0>,
925                 llvm_i32_ty],
926                [IntrNoMem]>;
927
928  class AdvSIMD_SVE_CMLA_LANE_Intrinsic
929    : Intrinsic<[llvm_anyvector_ty],
930                [LLVMMatchType<0>,
931                 LLVMMatchType<0>,
932                 LLVMMatchType<0>,
933                 llvm_i32_ty,
934                 llvm_i32_ty],
935                [IntrNoMem]>;
936
937  class AdvSIMD_SVE_EXPA_Intrinsic
938    : Intrinsic<[llvm_anyvector_ty],
939                [LLVMVectorOfBitcastsToInt<0>],
940                [IntrNoMem]>;
941
942  class AdvSIMD_SVE_FCVT_Intrinsic
943    : Intrinsic<[llvm_anyvector_ty],
944                [LLVMMatchType<0>,
945                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
946                 llvm_anyvector_ty],
947                [IntrNoMem]>;
948
949  class AdvSIMD_SVE_FCVTZS_Intrinsic
950    : Intrinsic<[llvm_anyvector_ty],
951                [LLVMVectorOfBitcastsToInt<0>,
952                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
953                 llvm_anyvector_ty],
954                [IntrNoMem]>;
955
956  class AdvSIMD_SVE_INSR_Intrinsic
957    : Intrinsic<[llvm_anyvector_ty],
958                [LLVMMatchType<0>,
959                 LLVMVectorElementType<0>],
960                [IntrNoMem]>;
961
962  class AdvSIMD_SVE_PTRUE_Intrinsic
963    : Intrinsic<[llvm_anyvector_ty],
964                [llvm_i32_ty],
965                [IntrNoMem, ImmArg<0>]>;
966
967  class AdvSIMD_SVE_PUNPKHI_Intrinsic
968    : Intrinsic<[LLVMHalfElementsVectorType<0>],
969                [llvm_anyvector_ty],
970                [IntrNoMem]>;
971
972  class AdvSIMD_SVE_SCALE_Intrinsic
973    : Intrinsic<[llvm_anyvector_ty],
974                [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
975                 LLVMMatchType<0>,
976                 LLVMVectorOfBitcastsToInt<0>],
977                [IntrNoMem]>;
978
979  class AdvSIMD_SVE_SCVTF_Intrinsic
980    : Intrinsic<[llvm_anyvector_ty],
981                [LLVMMatchType<0>,
982                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
983                 llvm_anyvector_ty],
984                [IntrNoMem]>;
985
986  class AdvSIMD_SVE_TSMUL_Intrinsic
987    : Intrinsic<[llvm_anyvector_ty],
988                [LLVMMatchType<0>,
989                 LLVMVectorOfBitcastsToInt<0>],
990                [IntrNoMem]>;
991
992  class AdvSIMD_SVE_CNTB_Intrinsic
993    : Intrinsic<[llvm_i64_ty],
994                [llvm_i32_ty],
995                [IntrNoMem, ImmArg<0>]>;
996
997  class AdvSIMD_SVE_CNTP_Intrinsic
998    : Intrinsic<[llvm_i64_ty],
999                [llvm_anyvector_ty, LLVMMatchType<0>],
1000                [IntrNoMem]>;
1001
1002  class AdvSIMD_SVE_DOT_Intrinsic
1003    : Intrinsic<[llvm_anyvector_ty],
1004                [LLVMMatchType<0>,
1005                 LLVMSubdivide4VectorType<0>,
1006                 LLVMSubdivide4VectorType<0>],
1007                [IntrNoMem]>;
1008
1009  class AdvSIMD_SVE_DOT_Indexed_Intrinsic
1010    : Intrinsic<[llvm_anyvector_ty],
1011                [LLVMMatchType<0>,
1012                 LLVMSubdivide4VectorType<0>,
1013                 LLVMSubdivide4VectorType<0>,
1014                 llvm_i32_ty],
1015                [IntrNoMem]>;
1016
1017  class AdvSIMD_SVE_PTEST_Intrinsic
1018    : Intrinsic<[llvm_i1_ty],
1019                [llvm_anyvector_ty,
1020                 LLVMMatchType<0>],
1021                [IntrNoMem]>;
1022
1023  class AdvSIMD_SVE_TBL_Intrinsic
1024    : Intrinsic<[llvm_anyvector_ty],
1025                [LLVMMatchType<0>,
1026                 LLVMVectorOfBitcastsToInt<0>],
1027                [IntrNoMem]>;
1028
1029  class SVE2_3VectorArg_Long_Intrinsic
1030    : Intrinsic<[llvm_anyvector_ty],
1031                [LLVMMatchType<0>,
1032                 LLVMSubdivide2VectorType<0>,
1033                 LLVMSubdivide2VectorType<0>],
1034                [IntrNoMem]>;
1035
1036  class SVE2_3VectorArgIndexed_Long_Intrinsic
1037    : Intrinsic<[llvm_anyvector_ty],
1038                [LLVMMatchType<0>,
1039                 LLVMSubdivide2VectorType<0>,
1040                 LLVMSubdivide2VectorType<0>,
1041                 llvm_i32_ty],
1042                [IntrNoMem]>;
1043
1044  class SVE2_1VectorArg_Narrowing_Intrinsic
1045    : Intrinsic<[LLVMSubdivide2VectorType<0>],
1046                [llvm_anyvector_ty],
1047                [IntrNoMem]>;
1048
1049  class SVE2_Merged1VectorArg_Narrowing_Intrinsic
1050    : Intrinsic<[LLVMSubdivide2VectorType<0>],
1051                [LLVMSubdivide2VectorType<0>,
1052                 llvm_anyvector_ty],
1053                [IntrNoMem]>;
1054  class SVE2_2VectorArg_Narrowing_Intrinsic
1055      : Intrinsic<
1056            [LLVMSubdivide2VectorType<0>],
1057            [llvm_anyvector_ty, LLVMMatchType<0>],
1058            [IntrNoMem]>;
1059
1060  class SVE2_Merged2VectorArg_Narrowing_Intrinsic
1061      : Intrinsic<
1062            [LLVMSubdivide2VectorType<0>],
1063            [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty, LLVMMatchType<0>],
1064            [IntrNoMem]>;
1065
1066  class SVE2_1VectorArg_Imm_Narrowing_Intrinsic
1067      : Intrinsic<[LLVMSubdivide2VectorType<0>],
1068                  [llvm_anyvector_ty, llvm_i32_ty],
1069                  [IntrNoMem, ImmArg<1>]>;
1070
1071  class SVE2_2VectorArg_Imm_Narrowing_Intrinsic
1072      : Intrinsic<[LLVMSubdivide2VectorType<0>],
1073                  [LLVMSubdivide2VectorType<0>, llvm_anyvector_ty,
1074                   llvm_i32_ty],
1075                  [IntrNoMem, ImmArg<2>]>;
1076
1077  // NOTE: There is no relationship between these intrinsics beyond an attempt
1078  // to reuse currently identical class definitions.
1079  class AdvSIMD_SVE_LOGB_Intrinsic  : AdvSIMD_SVE_CNT_Intrinsic;
1080
1081  // This class of intrinsics are not intended to be useful within LLVM IR but
1082  // are instead here to support some of the more regid parts of the ACLE.
1083  class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN>
1084  : GCCBuiltin<"__builtin_sve_" # name>,
1085    Intrinsic<[OUT], [OUT, llvm_nxv16i1_ty, IN], [IntrNoMem]>;
1086}
1087
1088//===----------------------------------------------------------------------===//
1089// SVE
1090
1091let TargetPrefix = "aarch64" in {  // All intrinsics start with "llvm.aarch64.".
1092
1093class AdvSIMD_SVE_Reduce_Intrinsic
1094  : Intrinsic<[LLVMVectorElementType<0>],
1095              [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1096               llvm_anyvector_ty],
1097              [IntrNoMem]>;
1098
1099class AdvSIMD_SVE_SADDV_Reduce_Intrinsic
1100  : Intrinsic<[llvm_i64_ty],
1101              [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1102               llvm_anyvector_ty],
1103              [IntrNoMem]>;
1104
1105class AdvSIMD_SVE_WHILE_Intrinsic
1106    : Intrinsic<[llvm_anyvector_ty],
1107                [llvm_anyint_ty, LLVMMatchType<1>],
1108                [IntrNoMem]>;
1109
1110class AdvSIMD_GatherLoad_64bitOffset_Intrinsic
1111    : Intrinsic<[llvm_anyvector_ty],
1112                [
1113                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1114                  LLVMPointerToElt<0>,
1115                  LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
1116                ],
1117                [IntrReadMem, IntrArgMemOnly]>;
1118
1119class AdvSIMD_GatherLoad_32bitOffset_Intrinsic
1120    : Intrinsic<[llvm_anyvector_ty],
1121                [
1122                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1123                  LLVMPointerToElt<0>,
1124                  LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
1125                ],
1126                [IntrReadMem, IntrArgMemOnly]>;
1127
1128class AdvSIMD_GatherLoad_VecTorBase_Intrinsic
1129    : Intrinsic<[llvm_anyvector_ty],
1130                [
1131                  LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1132                  llvm_anyvector_ty,
1133                  llvm_i64_ty
1134                ],
1135                [IntrReadMem, IntrArgMemOnly]>;
1136
1137class AdvSIMD_ScatterStore_64bitOffset_Intrinsic
1138    : Intrinsic<[],
1139               [
1140                 llvm_anyvector_ty,
1141                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1142                 LLVMPointerToElt<0>,
1143                 LLVMScalarOrSameVectorWidth<0, llvm_i64_ty>
1144               ],
1145               [IntrWriteMem, IntrArgMemOnly]>;
1146
1147class AdvSIMD_ScatterStore_32bitOffset_Intrinsic
1148    : Intrinsic<[],
1149               [
1150                 llvm_anyvector_ty,
1151                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1152                 LLVMPointerToElt<0>,
1153                 LLVMScalarOrSameVectorWidth<0, llvm_i32_ty>
1154               ],
1155               [IntrWriteMem, IntrArgMemOnly]>;
1156
1157class AdvSIMD_ScatterStore_VectorBase_Intrinsic
1158    : Intrinsic<[],
1159               [
1160                 llvm_anyvector_ty,
1161                 LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
1162                 llvm_anyvector_ty, llvm_i64_ty
1163               ],
1164               [IntrWriteMem, IntrArgMemOnly, ImmArg<3>]>;
1165
1166//
1167// Loads
1168//
1169
1170def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
1171
1172//
1173// Stores
1174//
1175
1176def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic;
1177
1178//
1179// Integer arithmetic
1180//
1181
1182def int_aarch64_sve_add   : AdvSIMD_Pred2VectorArg_Intrinsic;
1183def int_aarch64_sve_sub   : AdvSIMD_Pred2VectorArg_Intrinsic;
1184def int_aarch64_sve_subr  : AdvSIMD_Pred2VectorArg_Intrinsic;
1185
1186def int_aarch64_sve_mul        : AdvSIMD_Pred2VectorArg_Intrinsic;
1187def int_aarch64_sve_smulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
1188def int_aarch64_sve_umulh      : AdvSIMD_Pred2VectorArg_Intrinsic;
1189
1190def int_aarch64_sve_sdiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
1191def int_aarch64_sve_udiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
1192def int_aarch64_sve_sdivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1193def int_aarch64_sve_udivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1194
1195def int_aarch64_sve_smax       : AdvSIMD_Pred2VectorArg_Intrinsic;
1196def int_aarch64_sve_umax       : AdvSIMD_Pred2VectorArg_Intrinsic;
1197def int_aarch64_sve_smin       : AdvSIMD_Pred2VectorArg_Intrinsic;
1198def int_aarch64_sve_umin       : AdvSIMD_Pred2VectorArg_Intrinsic;
1199def int_aarch64_sve_sabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1200def int_aarch64_sve_uabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1201
1202def int_aarch64_sve_mad        : AdvSIMD_Pred3VectorArg_Intrinsic;
1203def int_aarch64_sve_msb        : AdvSIMD_Pred3VectorArg_Intrinsic;
1204def int_aarch64_sve_mla        : AdvSIMD_Pred3VectorArg_Intrinsic;
1205def int_aarch64_sve_mls        : AdvSIMD_Pred3VectorArg_Intrinsic;
1206
1207def int_aarch64_sve_saddv      : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
1208def int_aarch64_sve_uaddv      : AdvSIMD_SVE_SADDV_Reduce_Intrinsic;
1209
1210def int_aarch64_sve_smaxv      : AdvSIMD_SVE_Reduce_Intrinsic;
1211def int_aarch64_sve_umaxv      : AdvSIMD_SVE_Reduce_Intrinsic;
1212def int_aarch64_sve_sminv      : AdvSIMD_SVE_Reduce_Intrinsic;
1213def int_aarch64_sve_uminv      : AdvSIMD_SVE_Reduce_Intrinsic;
1214
1215def int_aarch64_sve_orv        : AdvSIMD_SVE_Reduce_Intrinsic;
1216def int_aarch64_sve_eorv       : AdvSIMD_SVE_Reduce_Intrinsic;
1217def int_aarch64_sve_andv       : AdvSIMD_SVE_Reduce_Intrinsic;
1218
1219def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic;
1220def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic;
1221
1222def int_aarch64_sve_sdot      : AdvSIMD_SVE_DOT_Intrinsic;
1223def int_aarch64_sve_sdot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
1224
1225def int_aarch64_sve_udot      : AdvSIMD_SVE_DOT_Intrinsic;
1226def int_aarch64_sve_udot_lane : AdvSIMD_SVE_DOT_Indexed_Intrinsic;
1227
1228// Shifts
1229
1230def int_aarch64_sve_asr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1231def int_aarch64_sve_asr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
1232def int_aarch64_sve_asrd     : AdvSIMD_SVE_ShiftByImm_Intrinsic;
1233def int_aarch64_sve_insr     : AdvSIMD_SVE_INSR_Intrinsic;
1234def int_aarch64_sve_lsl      : AdvSIMD_Pred2VectorArg_Intrinsic;
1235def int_aarch64_sve_lsl_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
1236def int_aarch64_sve_lsr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1237def int_aarch64_sve_lsr_wide : AdvSIMD_SVE_ShiftWide_Intrinsic;
1238
1239//
1240// Integer comparisons
1241//
1242
1243def int_aarch64_sve_cmpeq : AdvSIMD_SVE_Compare_Intrinsic;
1244def int_aarch64_sve_cmpge : AdvSIMD_SVE_Compare_Intrinsic;
1245def int_aarch64_sve_cmpgt : AdvSIMD_SVE_Compare_Intrinsic;
1246def int_aarch64_sve_cmphi : AdvSIMD_SVE_Compare_Intrinsic;
1247def int_aarch64_sve_cmphs : AdvSIMD_SVE_Compare_Intrinsic;
1248def int_aarch64_sve_cmpne : AdvSIMD_SVE_Compare_Intrinsic;
1249
1250def int_aarch64_sve_cmpeq_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1251def int_aarch64_sve_cmpge_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1252def int_aarch64_sve_cmpgt_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1253def int_aarch64_sve_cmphi_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1254def int_aarch64_sve_cmphs_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1255def int_aarch64_sve_cmple_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1256def int_aarch64_sve_cmplo_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1257def int_aarch64_sve_cmpls_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1258def int_aarch64_sve_cmplt_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1259def int_aarch64_sve_cmpne_wide : AdvSIMD_SVE_CompareWide_Intrinsic;
1260
1261//
1262// Counting bits
1263//
1264
1265def int_aarch64_sve_cls : AdvSIMD_Merged1VectorArg_Intrinsic;
1266def int_aarch64_sve_clz : AdvSIMD_Merged1VectorArg_Intrinsic;
1267def int_aarch64_sve_cnt : AdvSIMD_SVE_CNT_Intrinsic;
1268
1269//
1270// Counting elements
1271//
1272
1273def int_aarch64_sve_cntb : AdvSIMD_SVE_CNTB_Intrinsic;
1274def int_aarch64_sve_cnth : AdvSIMD_SVE_CNTB_Intrinsic;
1275def int_aarch64_sve_cntw : AdvSIMD_SVE_CNTB_Intrinsic;
1276def int_aarch64_sve_cntd : AdvSIMD_SVE_CNTB_Intrinsic;
1277
1278def int_aarch64_sve_cntp : AdvSIMD_SVE_CNTP_Intrinsic;
1279
1280//
1281// Saturating scalar arithmetic
1282//
1283
1284def int_aarch64_sve_sqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1285def int_aarch64_sve_sqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1286def int_aarch64_sve_sqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1287def int_aarch64_sve_sqdecp : AdvSIMD_SVE_Saturating_Intrinsic;
1288
1289def int_aarch64_sve_sqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1290def int_aarch64_sve_sqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1291def int_aarch64_sve_sqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1292def int_aarch64_sve_sqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1293def int_aarch64_sve_sqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1294def int_aarch64_sve_sqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1295def int_aarch64_sve_sqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1296def int_aarch64_sve_sqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1297def int_aarch64_sve_sqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1298def int_aarch64_sve_sqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1299
1300def int_aarch64_sve_sqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1301def int_aarch64_sve_sqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1302def int_aarch64_sve_sqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1303def int_aarch64_sve_sqincp : AdvSIMD_SVE_Saturating_Intrinsic;
1304
1305def int_aarch64_sve_sqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1306def int_aarch64_sve_sqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1307def int_aarch64_sve_sqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1308def int_aarch64_sve_sqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1309def int_aarch64_sve_sqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1310def int_aarch64_sve_sqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1311def int_aarch64_sve_sqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1312def int_aarch64_sve_sqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1313def int_aarch64_sve_sqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1314def int_aarch64_sve_sqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1315
1316def int_aarch64_sve_uqdech : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1317def int_aarch64_sve_uqdecw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1318def int_aarch64_sve_uqdecd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1319def int_aarch64_sve_uqdecp : AdvSIMD_SVE_Saturating_Intrinsic;
1320
1321def int_aarch64_sve_uqdecb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1322def int_aarch64_sve_uqdecb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1323def int_aarch64_sve_uqdech_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1324def int_aarch64_sve_uqdech_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1325def int_aarch64_sve_uqdecw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1326def int_aarch64_sve_uqdecw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1327def int_aarch64_sve_uqdecd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1328def int_aarch64_sve_uqdecd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1329def int_aarch64_sve_uqdecp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1330def int_aarch64_sve_uqdecp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1331
1332def int_aarch64_sve_uqinch : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1333def int_aarch64_sve_uqincw : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1334def int_aarch64_sve_uqincd : AdvSIMD_SVE_SaturatingWithPattern_Intrinsic;
1335def int_aarch64_sve_uqincp : AdvSIMD_SVE_Saturating_Intrinsic;
1336
1337def int_aarch64_sve_uqincb_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1338def int_aarch64_sve_uqincb_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1339def int_aarch64_sve_uqinch_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1340def int_aarch64_sve_uqinch_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1341def int_aarch64_sve_uqincw_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1342def int_aarch64_sve_uqincw_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1343def int_aarch64_sve_uqincd_n32 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i32_ty>;
1344def int_aarch64_sve_uqincd_n64 : AdvSIMD_SVE_SaturatingWithPattern_N_Intrinsic<llvm_i64_ty>;
1345def int_aarch64_sve_uqincp_n32 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i32_ty>;
1346def int_aarch64_sve_uqincp_n64 : AdvSIMD_SVE_Saturating_N_Intrinsic<llvm_i64_ty>;
1347
1348//
1349// Reversal
1350//
1351
1352def int_aarch64_sve_rbit : AdvSIMD_Merged1VectorArg_Intrinsic;
1353def int_aarch64_sve_revb : AdvSIMD_Merged1VectorArg_Intrinsic;
1354def int_aarch64_sve_revh : AdvSIMD_Merged1VectorArg_Intrinsic;
1355def int_aarch64_sve_revw : AdvSIMD_Merged1VectorArg_Intrinsic;
1356
1357//
1358// Permutations and selection
1359//
1360
1361def int_aarch64_sve_clasta    : AdvSIMD_Pred2VectorArg_Intrinsic;
1362def int_aarch64_sve_clasta_n  : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
1363def int_aarch64_sve_clastb    : AdvSIMD_Pred2VectorArg_Intrinsic;
1364def int_aarch64_sve_clastb_n  : AdvSIMD_SVE_ReduceWithInit_Intrinsic;
1365def int_aarch64_sve_compact   : AdvSIMD_Pred1VectorArg_Intrinsic;
1366def int_aarch64_sve_ext       : AdvSIMD_2VectorArgIndexed_Intrinsic;
1367def int_aarch64_sve_lasta     : AdvSIMD_SVE_Reduce_Intrinsic;
1368def int_aarch64_sve_lastb     : AdvSIMD_SVE_Reduce_Intrinsic;
1369def int_aarch64_sve_rev       : AdvSIMD_1VectorArg_Intrinsic;
1370def int_aarch64_sve_splice    : AdvSIMD_Pred2VectorArg_Intrinsic;
1371def int_aarch64_sve_sunpkhi   : AdvSIMD_SVE_Unpack_Intrinsic;
1372def int_aarch64_sve_sunpklo   : AdvSIMD_SVE_Unpack_Intrinsic;
1373def int_aarch64_sve_tbl       : AdvSIMD_SVE_TBL_Intrinsic;
1374def int_aarch64_sve_trn1      : AdvSIMD_2VectorArg_Intrinsic;
1375def int_aarch64_sve_trn2      : AdvSIMD_2VectorArg_Intrinsic;
1376def int_aarch64_sve_uunpkhi   : AdvSIMD_SVE_Unpack_Intrinsic;
1377def int_aarch64_sve_uunpklo   : AdvSIMD_SVE_Unpack_Intrinsic;
1378def int_aarch64_sve_uzp1      : AdvSIMD_2VectorArg_Intrinsic;
1379def int_aarch64_sve_uzp2      : AdvSIMD_2VectorArg_Intrinsic;
1380def int_aarch64_sve_zip1      : AdvSIMD_2VectorArg_Intrinsic;
1381def int_aarch64_sve_zip2      : AdvSIMD_2VectorArg_Intrinsic;
1382
1383//
1384// Logical operations
1385//
1386
1387def int_aarch64_sve_and  : AdvSIMD_Pred2VectorArg_Intrinsic;
1388def int_aarch64_sve_bic  : AdvSIMD_Pred2VectorArg_Intrinsic;
1389def int_aarch64_sve_cnot : AdvSIMD_Merged1VectorArg_Intrinsic;
1390def int_aarch64_sve_eor  : AdvSIMD_Pred2VectorArg_Intrinsic;
1391def int_aarch64_sve_not  : AdvSIMD_Merged1VectorArg_Intrinsic;
1392def int_aarch64_sve_orr  : AdvSIMD_Pred2VectorArg_Intrinsic;
1393
1394//
1395// Conversion
1396//
1397
1398def int_aarch64_sve_sxtb : AdvSIMD_Merged1VectorArg_Intrinsic;
1399def int_aarch64_sve_sxth : AdvSIMD_Merged1VectorArg_Intrinsic;
1400def int_aarch64_sve_sxtw : AdvSIMD_Merged1VectorArg_Intrinsic;
1401def int_aarch64_sve_uxtb : AdvSIMD_Merged1VectorArg_Intrinsic;
1402def int_aarch64_sve_uxth : AdvSIMD_Merged1VectorArg_Intrinsic;
1403def int_aarch64_sve_uxtw : AdvSIMD_Merged1VectorArg_Intrinsic;
1404
1405//
1406// While comparisons
1407//
1408
1409def int_aarch64_sve_whilele : AdvSIMD_SVE_WHILE_Intrinsic;
1410def int_aarch64_sve_whilelo : AdvSIMD_SVE_WHILE_Intrinsic;
1411def int_aarch64_sve_whilels : AdvSIMD_SVE_WHILE_Intrinsic;
1412def int_aarch64_sve_whilelt : AdvSIMD_SVE_WHILE_Intrinsic;
1413def int_aarch64_sve_whilege : AdvSIMD_SVE_WHILE_Intrinsic;
1414def int_aarch64_sve_whilegt : AdvSIMD_SVE_WHILE_Intrinsic;
1415def int_aarch64_sve_whilehs : AdvSIMD_SVE_WHILE_Intrinsic;
1416def int_aarch64_sve_whilehi : AdvSIMD_SVE_WHILE_Intrinsic;
1417
1418//
1419// Floating-point arithmetic
1420//
1421
1422def int_aarch64_sve_fabd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1423def int_aarch64_sve_fabs       : AdvSIMD_Merged1VectorArg_Intrinsic;
1424def int_aarch64_sve_fadd       : AdvSIMD_Pred2VectorArg_Intrinsic;
1425def int_aarch64_sve_fcadd      : AdvSIMD_SVE_CADD_Intrinsic;
1426def int_aarch64_sve_fcmla      : AdvSIMD_SVE_CMLA_Intrinsic;
1427def int_aarch64_sve_fcmla_lane : AdvSIMD_SVE_CMLA_LANE_Intrinsic;
1428def int_aarch64_sve_fdiv       : AdvSIMD_Pred2VectorArg_Intrinsic;
1429def int_aarch64_sve_fdivr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1430def int_aarch64_sve_fexpa_x    : AdvSIMD_SVE_EXPA_Intrinsic;
1431def int_aarch64_sve_fmad       : AdvSIMD_Pred3VectorArg_Intrinsic;
1432def int_aarch64_sve_fmax       : AdvSIMD_Pred2VectorArg_Intrinsic;
1433def int_aarch64_sve_fmaxnm     : AdvSIMD_Pred2VectorArg_Intrinsic;
1434def int_aarch64_sve_fmin       : AdvSIMD_Pred2VectorArg_Intrinsic;
1435def int_aarch64_sve_fminnm     : AdvSIMD_Pred2VectorArg_Intrinsic;
1436def int_aarch64_sve_fmla       : AdvSIMD_Pred3VectorArg_Intrinsic;
1437def int_aarch64_sve_fmla_lane  : AdvSIMD_3VectorArgIndexed_Intrinsic;
1438def int_aarch64_sve_fmls       : AdvSIMD_Pred3VectorArg_Intrinsic;
1439def int_aarch64_sve_fmls_lane  : AdvSIMD_3VectorArgIndexed_Intrinsic;
1440def int_aarch64_sve_fmsb       : AdvSIMD_Pred3VectorArg_Intrinsic;
1441def int_aarch64_sve_fmul       : AdvSIMD_Pred2VectorArg_Intrinsic;
1442def int_aarch64_sve_fmulx      : AdvSIMD_Pred2VectorArg_Intrinsic;
1443def int_aarch64_sve_fneg       : AdvSIMD_Merged1VectorArg_Intrinsic;
1444def int_aarch64_sve_fmul_lane  : AdvSIMD_2VectorArgIndexed_Intrinsic;
1445def int_aarch64_sve_fnmad      : AdvSIMD_Pred3VectorArg_Intrinsic;
1446def int_aarch64_sve_fnmla      : AdvSIMD_Pred3VectorArg_Intrinsic;
1447def int_aarch64_sve_fnmls      : AdvSIMD_Pred3VectorArg_Intrinsic;
1448def int_aarch64_sve_fnmsb      : AdvSIMD_Pred3VectorArg_Intrinsic;
1449def int_aarch64_sve_frecpe_x   : AdvSIMD_1VectorArg_Intrinsic;
1450def int_aarch64_sve_frecps_x   : AdvSIMD_2VectorArg_Intrinsic;
1451def int_aarch64_sve_frecpx     : AdvSIMD_Merged1VectorArg_Intrinsic;
1452def int_aarch64_sve_frinta     : AdvSIMD_Merged1VectorArg_Intrinsic;
1453def int_aarch64_sve_frinti     : AdvSIMD_Merged1VectorArg_Intrinsic;
1454def int_aarch64_sve_frintm     : AdvSIMD_Merged1VectorArg_Intrinsic;
1455def int_aarch64_sve_frintn     : AdvSIMD_Merged1VectorArg_Intrinsic;
1456def int_aarch64_sve_frintp     : AdvSIMD_Merged1VectorArg_Intrinsic;
1457def int_aarch64_sve_frintx     : AdvSIMD_Merged1VectorArg_Intrinsic;
1458def int_aarch64_sve_frintz     : AdvSIMD_Merged1VectorArg_Intrinsic;
1459def int_aarch64_sve_frsqrte_x  : AdvSIMD_1VectorArg_Intrinsic;
1460def int_aarch64_sve_frsqrts_x  : AdvSIMD_2VectorArg_Intrinsic;
1461def int_aarch64_sve_fscale     : AdvSIMD_SVE_SCALE_Intrinsic;
1462def int_aarch64_sve_fsqrt      : AdvSIMD_Merged1VectorArg_Intrinsic;
1463def int_aarch64_sve_fsub       : AdvSIMD_Pred2VectorArg_Intrinsic;
1464def int_aarch64_sve_fsubr      : AdvSIMD_Pred2VectorArg_Intrinsic;
1465def int_aarch64_sve_ftmad_x    : AdvSIMD_2VectorArgIndexed_Intrinsic;
1466def int_aarch64_sve_ftsmul_x   : AdvSIMD_SVE_TSMUL_Intrinsic;
1467def int_aarch64_sve_ftssel_x   : AdvSIMD_SVE_TSMUL_Intrinsic;
1468
1469//
1470// Floating-point reductions
1471//
1472
1473def int_aarch64_sve_fadda   : AdvSIMD_SVE_FP_ReduceWithInit_Intrinsic;
1474def int_aarch64_sve_faddv   : AdvSIMD_SVE_FP_Reduce_Intrinsic;
1475def int_aarch64_sve_fmaxv   : AdvSIMD_SVE_FP_Reduce_Intrinsic;
1476def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_FP_Reduce_Intrinsic;
1477def int_aarch64_sve_fminv   : AdvSIMD_SVE_FP_Reduce_Intrinsic;
1478def int_aarch64_sve_fminnmv : AdvSIMD_SVE_FP_Reduce_Intrinsic;
1479
1480//
1481// Floating-point conversions
1482//
1483
1484def int_aarch64_sve_fcvt   : AdvSIMD_SVE_FCVT_Intrinsic;
1485def int_aarch64_sve_fcvtzs : AdvSIMD_SVE_FCVTZS_Intrinsic;
1486def int_aarch64_sve_fcvtzu : AdvSIMD_SVE_FCVTZS_Intrinsic;
1487def int_aarch64_sve_scvtf  : AdvSIMD_SVE_SCVTF_Intrinsic;
1488def int_aarch64_sve_ucvtf  : AdvSIMD_SVE_SCVTF_Intrinsic;
1489
1490//
1491// Floating-point comparisons
1492//
1493
1494def int_aarch64_sve_facge : AdvSIMD_SVE_Compare_Intrinsic;
1495def int_aarch64_sve_facgt : AdvSIMD_SVE_Compare_Intrinsic;
1496
1497def int_aarch64_sve_fcmpeq : AdvSIMD_SVE_Compare_Intrinsic;
1498def int_aarch64_sve_fcmpge : AdvSIMD_SVE_Compare_Intrinsic;
1499def int_aarch64_sve_fcmpgt : AdvSIMD_SVE_Compare_Intrinsic;
1500def int_aarch64_sve_fcmpne : AdvSIMD_SVE_Compare_Intrinsic;
1501def int_aarch64_sve_fcmpuo : AdvSIMD_SVE_Compare_Intrinsic;
1502
1503def int_aarch64_sve_fcvtzs_i32f16   : Builtin_SVCVT<"svcvt_s32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>;
1504def int_aarch64_sve_fcvtzs_i32f64   : Builtin_SVCVT<"svcvt_s32_f64_m", llvm_nxv4i32_ty, llvm_nxv2f64_ty>;
1505def int_aarch64_sve_fcvtzs_i64f16   : Builtin_SVCVT<"svcvt_s64_f16_m", llvm_nxv2i64_ty, llvm_nxv8f16_ty>;
1506def int_aarch64_sve_fcvtzs_i64f32   : Builtin_SVCVT<"svcvt_s64_f32_m", llvm_nxv2i64_ty, llvm_nxv4f32_ty>;
1507
1508def int_aarch64_sve_fcvtzu_i32f16   : Builtin_SVCVT<"svcvt_u32_f16_m", llvm_nxv4i32_ty, llvm_nxv8f16_ty>;
1509def int_aarch64_sve_fcvtzu_i32f64   : Builtin_SVCVT<"svcvt_u32_f64_m", llvm_nxv4i32_ty, llvm_nxv2f64_ty>;
1510def int_aarch64_sve_fcvtzu_i64f16   : Builtin_SVCVT<"svcvt_u64_f16_m", llvm_nxv2i64_ty, llvm_nxv8f16_ty>;
1511def int_aarch64_sve_fcvtzu_i64f32   : Builtin_SVCVT<"svcvt_u64_f32_m", llvm_nxv2i64_ty, llvm_nxv4f32_ty>;
1512
1513def int_aarch64_sve_fcvt_f16f32     : Builtin_SVCVT<"svcvt_f16_f32_m", llvm_nxv8f16_ty, llvm_nxv4f32_ty>;
1514def int_aarch64_sve_fcvt_f16f64     : Builtin_SVCVT<"svcvt_f16_f64_m", llvm_nxv8f16_ty, llvm_nxv2f64_ty>;
1515def int_aarch64_sve_fcvt_f32f64     : Builtin_SVCVT<"svcvt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
1516
1517def int_aarch64_sve_fcvt_f32f16     : Builtin_SVCVT<"svcvt_f32_f16_m", llvm_nxv4f32_ty, llvm_nxv8f16_ty>;
1518def int_aarch64_sve_fcvt_f64f16     : Builtin_SVCVT<"svcvt_f64_f16_m", llvm_nxv2f64_ty, llvm_nxv8f16_ty>;
1519def int_aarch64_sve_fcvt_f64f32     : Builtin_SVCVT<"svcvt_f64_f32_m", llvm_nxv2f64_ty, llvm_nxv4f32_ty>;
1520
1521def int_aarch64_sve_fcvtlt_f32f16   : Builtin_SVCVT<"svcvtlt_f32_f16_m", llvm_nxv4f32_ty, llvm_nxv8f16_ty>;
1522def int_aarch64_sve_fcvtlt_f64f32   : Builtin_SVCVT<"svcvtlt_f64_f32_m", llvm_nxv2f64_ty, llvm_nxv4f32_ty>;
1523def int_aarch64_sve_fcvtnt_f16f32   : Builtin_SVCVT<"svcvtnt_f16_f32_m", llvm_nxv8f16_ty, llvm_nxv4f32_ty>;
1524def int_aarch64_sve_fcvtnt_f32f64   : Builtin_SVCVT<"svcvtnt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
1525
1526def int_aarch64_sve_fcvtx_f32f64    : Builtin_SVCVT<"svcvtx_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
1527def int_aarch64_sve_fcvtxnt_f32f64  : Builtin_SVCVT<"svcvtxnt_f32_f64_m", llvm_nxv4f32_ty, llvm_nxv2f64_ty>;
1528
1529def int_aarch64_sve_scvtf_f16i32    : Builtin_SVCVT<"svcvt_f16_s32_m", llvm_nxv8f16_ty, llvm_nxv4i32_ty>;
1530def int_aarch64_sve_scvtf_f16i64    : Builtin_SVCVT<"svcvt_f16_s64_m", llvm_nxv8f16_ty, llvm_nxv2i64_ty>;
1531def int_aarch64_sve_scvtf_f32i64    : Builtin_SVCVT<"svcvt_f32_s64_m", llvm_nxv4f32_ty, llvm_nxv2i64_ty>;
1532def int_aarch64_sve_scvtf_f64i32    : Builtin_SVCVT<"svcvt_f64_s32_m", llvm_nxv2f64_ty, llvm_nxv4i32_ty>;
1533
1534def int_aarch64_sve_ucvtf_f16i32    : Builtin_SVCVT<"svcvt_f16_u32_m", llvm_nxv8f16_ty, llvm_nxv4i32_ty>;
1535def int_aarch64_sve_ucvtf_f16i64    : Builtin_SVCVT<"svcvt_f16_u64_m", llvm_nxv8f16_ty, llvm_nxv2i64_ty>;
1536def int_aarch64_sve_ucvtf_f32i64    : Builtin_SVCVT<"svcvt_f32_u64_m", llvm_nxv4f32_ty, llvm_nxv2i64_ty>;
1537def int_aarch64_sve_ucvtf_f64i32    : Builtin_SVCVT<"svcvt_f64_u32_m", llvm_nxv2f64_ty, llvm_nxv4i32_ty>;
1538
1539//
1540// Predicate creation
1541//
1542
1543def int_aarch64_sve_ptrue : AdvSIMD_SVE_PTRUE_Intrinsic;
1544
1545//
1546// Predicate operations
1547//
1548
1549def int_aarch64_sve_and_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
1550def int_aarch64_sve_bic_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
1551def int_aarch64_sve_eor_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
1552def int_aarch64_sve_nand_z  : AdvSIMD_Pred2VectorArg_Intrinsic;
1553def int_aarch64_sve_nor_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
1554def int_aarch64_sve_orn_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
1555def int_aarch64_sve_orr_z   : AdvSIMD_Pred2VectorArg_Intrinsic;
1556def int_aarch64_sve_pfirst  : AdvSIMD_Pred1VectorArg_Intrinsic;
1557def int_aarch64_sve_pnext   : AdvSIMD_Pred1VectorArg_Intrinsic;
1558def int_aarch64_sve_punpkhi : AdvSIMD_SVE_PUNPKHI_Intrinsic;
1559def int_aarch64_sve_punpklo : AdvSIMD_SVE_PUNPKHI_Intrinsic;
1560
1561//
1562// Testing predicates
1563//
1564
1565def int_aarch64_sve_ptest_any   : AdvSIMD_SVE_PTEST_Intrinsic;
1566def int_aarch64_sve_ptest_first : AdvSIMD_SVE_PTEST_Intrinsic;
1567def int_aarch64_sve_ptest_last  : AdvSIMD_SVE_PTEST_Intrinsic;
1568
1569//
1570// Gather loads:
1571//
1572
1573// scalar + vector, 64 bit unscaled offsets
1574def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
1575
1576// scalar + vector, 64 bit scaled offsets
1577def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
1578
1579//  scalar + vector, 32 bit unscaled offsets, sign (sxtw) or zero (zxtw)
1580//  extended to 64 bits
1581def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
1582def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
1583
1584//  scalar + vector, 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended
1585//  to 64 bits
1586def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
1587def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
1588
1589// vector base + immediate index
1590def int_aarch64_sve_ld1_gather_imm : AdvSIMD_GatherLoad_VecTorBase_Intrinsic;
1591
1592//
1593// Scatter stores:
1594//
1595
1596// scalar + vector, 64 bit unscaled offsets
1597def int_aarch64_sve_st1_scatter : AdvSIMD_ScatterStore_64bitOffset_Intrinsic;
1598
1599// scalar + vector, 64 bit scaled offsets
1600def int_aarch64_sve_st1_scatter_index
1601    : AdvSIMD_ScatterStore_64bitOffset_Intrinsic;
1602
1603//  scalar + vector, 32 bit unscaled offsets, sign (sxtw) or zero (zxtw)
1604//  extended to 64 bits
1605def int_aarch64_sve_st1_scatter_sxtw
1606    : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
1607
1608def int_aarch64_sve_st1_scatter_uxtw
1609    : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
1610
1611//  scalar + vector, 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended
1612//  to 64 bits
1613def int_aarch64_sve_st1_scatter_sxtw_index
1614    : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
1615
1616def int_aarch64_sve_st1_scatter_uxtw_index
1617    : AdvSIMD_ScatterStore_32bitOffset_Intrinsic;
1618
1619// vector base + immediate index
1620def int_aarch64_sve_st1_scatter_imm : AdvSIMD_ScatterStore_VectorBase_Intrinsic;
1621
1622//
1623// SVE2 - Non-widening pairwise arithmetic
1624//
1625
1626def int_aarch64_sve_faddp   : AdvSIMD_Pred2VectorArg_Intrinsic;
1627def int_aarch64_sve_fmaxp   : AdvSIMD_Pred2VectorArg_Intrinsic;
1628def int_aarch64_sve_fmaxnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
1629def int_aarch64_sve_fminp   : AdvSIMD_Pred2VectorArg_Intrinsic;
1630def int_aarch64_sve_fminnmp : AdvSIMD_Pred2VectorArg_Intrinsic;
1631
1632//
1633// SVE2 - Floating-point widening multiply-accumulate
1634//
1635
1636def int_aarch64_sve_fmlalb        : SVE2_3VectorArg_Long_Intrinsic;
1637def int_aarch64_sve_fmlalb_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
1638def int_aarch64_sve_fmlalt        : SVE2_3VectorArg_Long_Intrinsic;
1639def int_aarch64_sve_fmlalt_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
1640def int_aarch64_sve_fmlslb        : SVE2_3VectorArg_Long_Intrinsic;
1641def int_aarch64_sve_fmlslb_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
1642def int_aarch64_sve_fmlslt        : SVE2_3VectorArg_Long_Intrinsic;
1643def int_aarch64_sve_fmlslt_lane   : SVE2_3VectorArgIndexed_Long_Intrinsic;
1644
1645//
1646// SVE2 - Floating-point integer binary logarithm
1647//
1648
1649def int_aarch64_sve_flogb : AdvSIMD_SVE_LOGB_Intrinsic;
1650
1651//
1652// SVE2 - Unary narrowing operations
1653//
1654
1655def int_aarch64_sve_sqxtnb  : SVE2_1VectorArg_Narrowing_Intrinsic;
1656def int_aarch64_sve_sqxtnt  : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
1657def int_aarch64_sve_sqxtunb : SVE2_1VectorArg_Narrowing_Intrinsic;
1658def int_aarch64_sve_sqxtunt : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
1659def int_aarch64_sve_uqxtnb  : SVE2_1VectorArg_Narrowing_Intrinsic;
1660def int_aarch64_sve_uqxtnt  : SVE2_Merged1VectorArg_Narrowing_Intrinsic;
1661
1662//
1663// SVE2 - Binary narrowing DSP operations
1664//
1665def int_aarch64_sve_addhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
1666def int_aarch64_sve_addhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
1667
1668def int_aarch64_sve_raddhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
1669def int_aarch64_sve_raddhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
1670
1671def int_aarch64_sve_subhnb    : SVE2_2VectorArg_Narrowing_Intrinsic;
1672def int_aarch64_sve_subhnt    : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
1673
1674def int_aarch64_sve_rsubhnb   : SVE2_2VectorArg_Narrowing_Intrinsic;
1675def int_aarch64_sve_rsubhnt   : SVE2_Merged2VectorArg_Narrowing_Intrinsic;
1676
1677// Narrowing shift right
1678def int_aarch64_sve_shrnb     : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1679def int_aarch64_sve_shrnt     : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1680
1681def int_aarch64_sve_rshrnb    : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1682def int_aarch64_sve_rshrnt    : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1683
1684// Saturating shift right - signed input/output
1685def int_aarch64_sve_sqshrnb   : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1686def int_aarch64_sve_sqshrnt   : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1687
1688def int_aarch64_sve_sqrshrnb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1689def int_aarch64_sve_sqrshrnt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1690
1691// Saturating shift right - unsigned input/output
1692def int_aarch64_sve_uqshrnb   : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1693def int_aarch64_sve_uqshrnt   : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1694
1695def int_aarch64_sve_uqrshrnb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1696def int_aarch64_sve_uqrshrnt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1697
1698// Saturating shift right - signed input, unsigned output
1699def int_aarch64_sve_sqshrunb  : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1700def int_aarch64_sve_sqshrunt  : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1701
1702def int_aarch64_sve_sqrshrunb : SVE2_1VectorArg_Imm_Narrowing_Intrinsic;
1703def int_aarch64_sve_sqrshrunt : SVE2_2VectorArg_Imm_Narrowing_Intrinsic;
1704}
1705