X86CallingConv.td revision 263508
1//===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This describes the calling conventions for the X86-32 and X86-64
11// architectures.
12//
13//===----------------------------------------------------------------------===//
14
15/// CCIfSubtarget - Match if the current subtarget has a feature F.
16class CCIfSubtarget<string F, CCAction A>
17 : CCIf<!strconcat("State.getTarget().getSubtarget<X86Subtarget>().", F), A>;
18
19//===----------------------------------------------------------------------===//
20// Return Value Calling Conventions
21//===----------------------------------------------------------------------===//
22
23// Return-value conventions common to all X86 CC's.
24def RetCC_X86Common : CallingConv<[
25  // Scalar values are returned in AX first, then DX.  For i8, the ABI
26  // requires the values to be in AL and AH, however this code uses AL and DL
27  // instead. This is because using AH for the second register conflicts with
28  // the way LLVM does multiple return values -- a return of {i16,i8} would end
29  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
30  // for functions that return two i8 values are currently expected to pack the
31  // values into an i16 (which uses AX, and thus AL:AH).
32  //
33  // For code that doesn't care about the ABI, we allow returning more than two
34  // integer values in registers.
35  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
36  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
37  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
38  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
39
40  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
41  // can only be used by ABI non-compliant code. If the target doesn't have XMM
42  // registers, it won't have vector types.
43  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
44            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
45
46  // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
47  // can only be used by ABI non-compliant code. This vector type is only
48  // supported while using the AVX target feature.
49  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
50            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
51
52  // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
53  // can only be used by ABI non-compliant code. This vector type is only
54  // supported while using the AVX-512 target feature.
55  CCIfType<[v16i32, v8i64, v16f32, v8f64],
56            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
57
58  // MMX vector types are always returned in MM0. If the target doesn't have
59  // MM0, it doesn't support these vector types.
60  CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
61
62  // Long double types are always returned in ST0 (even with SSE).
63  CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
64]>;
65
66// X86-32 C return-value convention.
67def RetCC_X86_32_C : CallingConv<[
68  // The X86-32 calling convention returns FP values in ST0, unless marked
69  // with "inreg" (used here to distinguish one kind of reg from another,
70  // weirdly; this is really the sse-regparm calling convention) in which
71  // case they use XMM0, otherwise it is the same as the common X86 calling
72  // conv.
73  CCIfInReg<CCIfSubtarget<"hasSSE2()",
74    CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
75  CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>,
76  CCDelegateTo<RetCC_X86Common>
77]>;
78
79// X86-32 FastCC return-value convention.
80def RetCC_X86_32_Fast : CallingConv<[
81  // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
82  // SSE2.
83  // This can happen when a float, 2 x float, or 3 x float vector is split by
84  // target lowering, and is returned in 1-3 sse regs.
85  CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
86  CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
87
88  // For integers, ECX can be used as an extra return register
89  CCIfType<[i8],  CCAssignToReg<[AL, DL, CL]>>,
90  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
91  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
92
93  // Otherwise, it is the same as the common X86 calling convention.
94  CCDelegateTo<RetCC_X86Common>
95]>;
96
97// Intel_OCL_BI return-value convention.
98def RetCC_Intel_OCL_BI : CallingConv<[
99  // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
100  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
101            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
102
103  // 256-bit FP vectors
104  // No more than 4 registers
105  CCIfType<[v8f32, v4f64, v8i32, v4i64],
106            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
107
108  // 512-bit FP vectors
109  CCIfType<[v16f32, v8f64, v16i32, v8i64],
110            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
111
112  // i32, i64 in the standard way
113  CCDelegateTo<RetCC_X86Common>
114]>;
115
116// X86-32 HiPE return-value convention.
117def RetCC_X86_32_HiPE : CallingConv<[
118  // Promote all types to i32
119  CCIfType<[i8, i16], CCPromoteToType<i32>>,
120
121  // Return: HP, P, VAL1, VAL2
122  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
123]>;
124
125// X86-64 C return-value convention.
126def RetCC_X86_64_C : CallingConv<[
127  // The X86-64 calling convention always returns FP values in XMM0.
128  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
129  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
130
131  // MMX vector types are always returned in XMM0.
132  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
133  CCDelegateTo<RetCC_X86Common>
134]>;
135
136// X86-Win64 C return-value convention.
137def RetCC_X86_Win64_C : CallingConv<[
138  // The X86-Win64 calling convention always returns __m64 values in RAX.
139  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
140
141  // Otherwise, everything is the same as 'normal' X86-64 C CC.
142  CCDelegateTo<RetCC_X86_64_C>
143]>;
144
145// X86-64 HiPE return-value convention.
146def RetCC_X86_64_HiPE : CallingConv<[
147  // Promote all types to i64
148  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
149
150  // Return: HP, P, VAL1, VAL2
151  CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
152]>;
153
154// X86-64 WebKit_JS return-value convention.
155def RetCC_X86_64_WebKit_JS : CallingConv<[
156  // Promote all types to i64
157  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
158
159  // Return: RAX
160  CCIfType<[i64], CCAssignToReg<[RAX]>>
161]>;
162
163// X86-64 AnyReg return-value convention. No explicit register is specified for
164// the return-value. The register allocator is allowed and expected to choose
165// any free register.
166//
167// This calling convention is currently only supported by the stackmap and
168// patchpoint intrinsics. All other uses will result in an assert on Debug
169// builds. On Release builds we fallback to the X86 C calling convention.
170def RetCC_X86_64_AnyReg : CallingConv<[
171  CCCustom<"CC_X86_AnyReg_Error">
172]>;
173
174// This is the root return-value convention for the X86-32 backend.
175def RetCC_X86_32 : CallingConv<[
176  // If FastCC, use RetCC_X86_32_Fast.
177  CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
178  // If HiPE, use RetCC_X86_32_HiPE.
179  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
180
181  // Otherwise, use RetCC_X86_32_C.
182  CCDelegateTo<RetCC_X86_32_C>
183]>;
184
185// This is the root return-value convention for the X86-64 backend.
186def RetCC_X86_64 : CallingConv<[
187  // HiPE uses RetCC_X86_64_HiPE
188  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
189
190  // Handle JavaScript calls.
191  CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<RetCC_X86_64_WebKit_JS>>,
192  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_X86_64_AnyReg>>,
193
194  // Handle explicit CC selection
195  CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
196  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
197
198  // Mingw64 and native Win64 use Win64 CC
199  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
200
201  // Otherwise, drop to normal X86-64 CC
202  CCDelegateTo<RetCC_X86_64_C>
203]>;
204
205// This is the return-value convention used for the entire X86 backend.
206def RetCC_X86 : CallingConv<[
207
208  // Check if this is the Intel OpenCL built-ins calling convention
209  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
210
211  CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
212  CCDelegateTo<RetCC_X86_32>
213]>;
214
215//===----------------------------------------------------------------------===//
216// X86-64 Argument Calling Conventions
217//===----------------------------------------------------------------------===//
218
219def CC_X86_64_C : CallingConv<[
220  // Handles byval parameters.
221  CCIfByVal<CCPassByVal<8, 8>>,
222
223  // Promote i8/i16 arguments to i32.
224  CCIfType<[i8, i16], CCPromoteToType<i32>>,
225
226  // The 'nest' parameter, if any, is passed in R10.
227  CCIfNest<CCAssignToReg<[R10]>>,
228
229  // The first 6 integer arguments are passed in integer registers.
230  CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
231  CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
232
233  // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
234  CCIfType<[x86mmx],
235            CCIfSubtarget<"isTargetDarwin()",
236            CCIfSubtarget<"hasSSE2()",
237            CCPromoteToType<v2i64>>>>,
238
239  // The first 8 FP/Vector arguments are passed in XMM registers.
240  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
241            CCIfSubtarget<"hasSSE1()",
242            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
243
244  // The first 8 256-bit vector arguments are passed in YMM registers, unless
245  // this is a vararg function.
246  // FIXME: This isn't precisely correct; the x86-64 ABI document says that
247  // fixed arguments to vararg functions are supposed to be passed in
248  // registers.  Actually modeling that would be a lot of work, though.
249  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
250                          CCIfSubtarget<"hasFp256()",
251                          CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
252                                         YMM4, YMM5, YMM6, YMM7]>>>>,
253
254  // The first 8 512-bit vector arguments are passed in ZMM registers.
255  CCIfNotVarArg<CCIfType<[v16i32, v8i64, v16f32, v8f64],
256            CCIfSubtarget<"hasAVX512()",
257            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
258
259  // Integer/FP values get stored in stack slots that are 8 bytes in size and
260  // 8-byte aligned if there are no more registers to hold them.
261  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
262
263  // Long doubles get stack slots whose size and alignment depends on the
264  // subtarget.
265  CCIfType<[f80], CCAssignToStack<0, 0>>,
266
267  // Vectors get 16-byte stack slots that are 16-byte aligned.
268  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
269
270  // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
271  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
272           CCAssignToStack<32, 32>>,
273
274  // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
275  CCIfType<[v16i32, v8i64, v16f32, v8f64],
276           CCAssignToStack<64, 64>>
277]>;
278
279// Calling convention used on Win64
280def CC_X86_Win64_C : CallingConv<[
281  // FIXME: Handle byval stuff.
282  // FIXME: Handle varargs.
283
284  // Promote i8/i16 arguments to i32.
285  CCIfType<[i8, i16], CCPromoteToType<i32>>,
286
287  // The 'nest' parameter, if any, is passed in R10.
288  CCIfNest<CCAssignToReg<[R10]>>,
289
290  // 128 bit vectors are passed by pointer
291  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
292
293
294  // 256 bit vectors are passed by pointer
295  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
296
297  // 512 bit vectors are passed by pointer
298  CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
299
300  // The first 4 MMX vector arguments are passed in GPRs.
301  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
302
303  // The first 4 integer arguments are passed in integer registers.
304  CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
305                                          [XMM0, XMM1, XMM2, XMM3]>>,
306
307  // Do not pass the sret argument in RCX, the Win64 thiscall calling
308  // convention requires "this" to be passed in RCX.
309  CCIfCC<"CallingConv::X86_ThisCall",
310    CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8  , R9  ],
311                                                     [XMM1, XMM2, XMM3]>>>>,
312
313  CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8  , R9  ],
314                                          [XMM0, XMM1, XMM2, XMM3]>>,
315
316  // The first 4 FP/Vector arguments are passed in XMM registers.
317  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
318           CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
319                                   [RCX , RDX , R8  , R9  ]>>,
320
321  // Integer/FP values get stored in stack slots that are 8 bytes in size and
322  // 8-byte aligned if there are no more registers to hold them.
323  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
324
325  // Long doubles get stack slots whose size and alignment depends on the
326  // subtarget.
327  CCIfType<[f80], CCAssignToStack<0, 0>>
328]>;
329
330def CC_X86_64_GHC : CallingConv<[
331  // Promote i8/i16/i32 arguments to i64.
332  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
333
334  // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
335  CCIfType<[i64],
336            CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
337
338  // Pass in STG registers: F1, F2, F3, F4, D1, D2
339  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
340            CCIfSubtarget<"hasSSE1()",
341            CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
342]>;
343
344def CC_X86_64_HiPE : CallingConv<[
345  // Promote i8/i16/i32 arguments to i64.
346  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
347
348  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
349  CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
350
351  // Integer/FP values get stored in stack slots that are 8 bytes in size and
352  // 8-byte aligned if there are no more registers to hold them.
353  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
354]>;
355
356def CC_X86_64_WebKit_JS : CallingConv<[
357  // Promote i8/i16 arguments to i32.
358  CCIfType<[i8, i16], CCPromoteToType<i32>>,
359
360  // Integer/FP values are always stored in stack slots that are 8 bytes in size
361  // and 8-byte aligned.
362  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
363]>;
364
365// No explicit register is specified for the AnyReg calling convention. The
366// register allocator may assign the arguments to any free register.
367//
368// This calling convention is currently only supported by the stackmap and
369// patchpoint intrinsics. All other uses will result in an assert on Debug
370// builds. On Release builds we fallback to the X86 C calling convention.
371def CC_X86_64_AnyReg : CallingConv<[
372  CCCustom<"CC_X86_AnyReg_Error">
373]>;
374
375//===----------------------------------------------------------------------===//
376// X86 C Calling Convention
377//===----------------------------------------------------------------------===//
378
379/// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
380/// values are spilled on the stack, and the first 4 vector values go in XMM
381/// regs.
382def CC_X86_32_Common : CallingConv<[
383  // Handles byval parameters.
384  CCIfByVal<CCPassByVal<4, 4>>,
385
386  // The first 3 float or double arguments, if marked 'inreg' and if the call
387  // is not a vararg call and if SSE2 is available, are passed in SSE registers.
388  CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
389                CCIfSubtarget<"hasSSE2()",
390                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
391
392  // The first 3 __m64 vector arguments are passed in mmx registers if the
393  // call is not a vararg call.
394  CCIfNotVarArg<CCIfType<[x86mmx],
395                CCAssignToReg<[MM0, MM1, MM2]>>>,
396
397  // Integer/Float values get stored in stack slots that are 4 bytes in
398  // size and 4-byte aligned.
399  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
400
401  // Doubles get 8-byte slots that are 4-byte aligned.
402  CCIfType<[f64], CCAssignToStack<8, 4>>,
403
404  // Long doubles get slots whose size depends on the subtarget.
405  CCIfType<[f80], CCAssignToStack<0, 4>>,
406
407  // The first 4 SSE vector arguments are passed in XMM registers.
408  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
409                CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
410
411  // The first 4 AVX 256-bit vector arguments are passed in YMM registers.
412  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
413                CCIfSubtarget<"hasFp256()",
414                CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
415
416  // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
417  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
418
419  // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
420  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
421           CCAssignToStack<32, 32>>,
422
423  // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
424  // passed in the parameter area.
425  CCIfType<[x86mmx], CCAssignToStack<8, 4>>]>;
426
427def CC_X86_32_C : CallingConv<[
428  // Promote i8/i16 arguments to i32.
429  CCIfType<[i8, i16], CCPromoteToType<i32>>,
430
431  // The 'nest' parameter, if any, is passed in ECX.
432  CCIfNest<CCAssignToReg<[ECX]>>,
433
434  // The first 3 integer arguments, if marked 'inreg' and if the call is not
435  // a vararg call, are passed in integer registers.
436  CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
437
438  // Otherwise, same as everything else.
439  CCDelegateTo<CC_X86_32_Common>
440]>;
441
442def CC_X86_32_FastCall : CallingConv<[
443  // Promote i8/i16 arguments to i32.
444  CCIfType<[i8, i16], CCPromoteToType<i32>>,
445
446  // The 'nest' parameter, if any, is passed in EAX.
447  CCIfNest<CCAssignToReg<[EAX]>>,
448
449  // The first 2 integer arguments are passed in ECX/EDX
450  CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
451
452  // Otherwise, same as everything else.
453  CCDelegateTo<CC_X86_32_Common>
454]>;
455
456def CC_X86_32_ThisCall : CallingConv<[
457  // Promote i8/i16 arguments to i32.
458  CCIfType<[i8, i16], CCPromoteToType<i32>>,
459
460  // Pass sret arguments indirectly through stack.
461  CCIfSRet<CCAssignToStack<4, 4>>,
462
463  // The first integer argument is passed in ECX
464  CCIfType<[i32], CCAssignToReg<[ECX]>>,
465
466  // Otherwise, same as everything else.
467  CCDelegateTo<CC_X86_32_Common>
468]>;
469
470def CC_X86_32_FastCC : CallingConv<[
471  // Handles byval parameters.  Note that we can't rely on the delegation
472  // to CC_X86_32_Common for this because that happens after code that
473  // puts arguments in registers.
474  CCIfByVal<CCPassByVal<4, 4>>,
475
476  // Promote i8/i16 arguments to i32.
477  CCIfType<[i8, i16], CCPromoteToType<i32>>,
478
479  // The 'nest' parameter, if any, is passed in EAX.
480  CCIfNest<CCAssignToReg<[EAX]>>,
481
482  // The first 2 integer arguments are passed in ECX/EDX
483  CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
484
485  // The first 3 float or double arguments, if the call is not a vararg
486  // call and if SSE2 is available, are passed in SSE registers.
487  CCIfNotVarArg<CCIfType<[f32,f64],
488                CCIfSubtarget<"hasSSE2()",
489                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
490
491  // Doubles get 8-byte slots that are 8-byte aligned.
492  CCIfType<[f64], CCAssignToStack<8, 8>>,
493
494  // Otherwise, same as everything else.
495  CCDelegateTo<CC_X86_32_Common>
496]>;
497
498def CC_X86_32_GHC : CallingConv<[
499  // Promote i8/i16 arguments to i32.
500  CCIfType<[i8, i16], CCPromoteToType<i32>>,
501
502  // Pass in STG registers: Base, Sp, Hp, R1
503  CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
504]>;
505
506def CC_X86_32_HiPE : CallingConv<[
507  // Promote i8/i16 arguments to i32.
508  CCIfType<[i8, i16], CCPromoteToType<i32>>,
509
510  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
511  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
512
513  // Integer/Float values get stored in stack slots that are 4 bytes in
514  // size and 4-byte aligned.
515  CCIfType<[i32, f32], CCAssignToStack<4, 4>>
516]>;
517
518// X86-64 Intel OpenCL built-ins calling convention.
519def CC_Intel_OCL_BI : CallingConv<[
520
521  CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
522  CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8,  R9 ]>>>,
523
524  CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
525  CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
526
527  CCIfType<[i32], CCAssignToStack<4, 4>>,
528
529  // The SSE vector arguments are passed in XMM registers.
530  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
531           CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
532
533  // The 256-bit vector arguments are passed in YMM registers.
534  CCIfType<[v8f32, v4f64, v8i32, v4i64],
535           CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
536
537  // The 512-bit vector arguments are passed in ZMM registers.
538  CCIfType<[v16f32, v8f64, v16i32, v8i64],
539           CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
540
541  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
542  CCIfSubtarget<"is64Bit()",       CCDelegateTo<CC_X86_64_C>>,
543  CCDelegateTo<CC_X86_32_C>
544]>;
545
546//===----------------------------------------------------------------------===//
547// X86 Root Argument Calling Conventions
548//===----------------------------------------------------------------------===//
549
550// This is the root argument convention for the X86-32 backend.
551def CC_X86_32 : CallingConv<[
552  CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
553  CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
554  CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
555  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
556  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
557
558  // Otherwise, drop to normal X86-32 CC
559  CCDelegateTo<CC_X86_32_C>
560]>;
561
562// This is the root argument convention for the X86-64 backend.
563def CC_X86_64 : CallingConv<[
564  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
565  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
566  CCIfCC<"CallingConv::WebKit_JS", CCDelegateTo<CC_X86_64_WebKit_JS>>,
567  CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_X86_64_AnyReg>>,
568  CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
569  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
570
571  // Mingw64 and native Win64 use Win64 CC
572  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
573
574  // Otherwise, drop to normal X86-64 CC
575  CCDelegateTo<CC_X86_64_C>
576]>;
577
578// This is the argument convention used for the entire X86 backend.
579def CC_X86 : CallingConv<[
580  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
581  CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
582  CCDelegateTo<CC_X86_32>
583]>;
584
585//===----------------------------------------------------------------------===//
586// Callee-saved Registers.
587//===----------------------------------------------------------------------===//
588
589def CSR_NoRegs : CalleeSavedRegs<(add)>;
590
591def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
592def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
593
594def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
595def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
596
597def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
598                                     (sequence "XMM%u", 6, 15))>;
599
600def CSR_MostRegs_64 : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
601                                           R11, R12, R13, R14, R15, RBP,
602                                           (sequence "XMM%u", 0, 15))>;
603
604// Standard C + YMM6-15
605def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
606                                                  R13, R14, R15,
607                                                  (sequence "YMM%u", 6, 15))>;
608
609def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
610                                                     R12, R13, R14, R15,
611                                                     (sequence "ZMM%u", 6, 21),
612                                                     K4, K5, K6, K7)>;
613//Standard C + XMM 8-15
614def CSR_64_Intel_OCL_BI       : CalleeSavedRegs<(add CSR_64,
615                                                 (sequence "XMM%u", 8, 15))>;
616
617//Standard C + YMM 8-15
618def CSR_64_Intel_OCL_BI_AVX    : CalleeSavedRegs<(add CSR_64,
619                                                  (sequence "YMM%u", 8, 15))>;
620
621def CSR_64_Intel_OCL_BI_AVX512    : CalleeSavedRegs<(add CSR_64,
622                                                  (sequence "ZMM%u", 16, 31),
623                                                  K4, K5, K6, K7)>;
624