1/* Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007
2   Free Software Foundation, Inc.
3
4   This file is part of GCC.
5
6   GCC is free software; you can redistribute it and/or modify
7   it under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 2, or (at your option)
9   any later version.
10
11   GCC is distributed in the hope that it will be useful,
12   but WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14   GNU General Public License for more details.
15
16   You should have received a copy of the GNU General Public License
17   along with GCC; see the file COPYING.  If not, write to
18   the Free Software Foundation, 51 Franklin Street, Fifth Floor,
19   Boston, MA 02110-1301, USA.  */
20
21/* As a special exception, if you include this header file into source
22   files compiled by GCC, this header file does not by itself cause
23   the resulting executable to be covered by the GNU General Public
24   License.  This exception does not however invalidate any other
25   reasons why the executable file might be covered by the GNU General
26   Public License.  */
27
28/* Implemented from the specification included in the Intel C++ Compiler
29   User Guide and Reference, version 9.0.  */
30
31#ifndef _XMMINTRIN_H_INCLUDED
32#define _XMMINTRIN_H_INCLUDED
33
34#ifndef __SSE__
35# error "SSE instruction set not enabled"
36#else
37
38/* We need type definitions from the MMX header file.  */
39#include <mmintrin.h>
40
41/* Get _mm_malloc () and _mm_free ().  */
42#if __STDC_HOSTED__
43#include <mm_malloc.h>
44#endif
45
46/* The Intel API is flexible enough that we must allow aliasing with other
47   vector types, and their scalar components.  */
48typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
49
50/* Internal data types for implementing the intrinsics.  */
51typedef float __v4sf __attribute__ ((__vector_size__ (16)));
52
53/* Create a selector for use with the SHUFPS instruction.  */
54#define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
55 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
56
57/* Constants for use with _mm_prefetch.  */
58enum _mm_hint
59{
60  _MM_HINT_T0 = 3,
61  _MM_HINT_T1 = 2,
62  _MM_HINT_T2 = 1,
63  _MM_HINT_NTA = 0
64};
65
66/* Bits in the MXCSR.  */
67#define _MM_EXCEPT_MASK       0x003f
68#define _MM_EXCEPT_INVALID    0x0001
69#define _MM_EXCEPT_DENORM     0x0002
70#define _MM_EXCEPT_DIV_ZERO   0x0004
71#define _MM_EXCEPT_OVERFLOW   0x0008
72#define _MM_EXCEPT_UNDERFLOW  0x0010
73#define _MM_EXCEPT_INEXACT    0x0020
74
75#define _MM_MASK_MASK         0x1f80
76#define _MM_MASK_INVALID      0x0080
77#define _MM_MASK_DENORM       0x0100
78#define _MM_MASK_DIV_ZERO     0x0200
79#define _MM_MASK_OVERFLOW     0x0400
80#define _MM_MASK_UNDERFLOW    0x0800
81#define _MM_MASK_INEXACT      0x1000
82
83#define _MM_ROUND_MASK        0x6000
84#define _MM_ROUND_NEAREST     0x0000
85#define _MM_ROUND_DOWN        0x2000
86#define _MM_ROUND_UP          0x4000
87#define _MM_ROUND_TOWARD_ZERO 0x6000
88
89#define _MM_FLUSH_ZERO_MASK   0x8000
90#define _MM_FLUSH_ZERO_ON     0x8000
91#define _MM_FLUSH_ZERO_OFF    0x0000
92
93/* Create a vector of zeros.  */
94static __inline __m128 __attribute__((__always_inline__))
95_mm_setzero_ps (void)
96{
97  return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
98}
99
100/* Perform the respective operation on the lower SPFP (single-precision
101   floating-point) values of A and B; the upper three SPFP values are
102   passed through from A.  */
103
104static __inline __m128 __attribute__((__always_inline__))
105_mm_add_ss (__m128 __A, __m128 __B)
106{
107  return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
108}
109
110static __inline __m128 __attribute__((__always_inline__))
111_mm_sub_ss (__m128 __A, __m128 __B)
112{
113  return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
114}
115
116static __inline __m128 __attribute__((__always_inline__))
117_mm_mul_ss (__m128 __A, __m128 __B)
118{
119  return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
120}
121
122static __inline __m128 __attribute__((__always_inline__))
123_mm_div_ss (__m128 __A, __m128 __B)
124{
125  return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
126}
127
128static __inline __m128 __attribute__((__always_inline__))
129_mm_sqrt_ss (__m128 __A)
130{
131  return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
132}
133
134static __inline __m128 __attribute__((__always_inline__))
135_mm_rcp_ss (__m128 __A)
136{
137  return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
138}
139
140static __inline __m128 __attribute__((__always_inline__))
141_mm_rsqrt_ss (__m128 __A)
142{
143  return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
144}
145
146static __inline __m128 __attribute__((__always_inline__))
147_mm_min_ss (__m128 __A, __m128 __B)
148{
149  return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
150}
151
152static __inline __m128 __attribute__((__always_inline__))
153_mm_max_ss (__m128 __A, __m128 __B)
154{
155  return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
156}
157
158/* Perform the respective operation on the four SPFP values in A and B.  */
159
160static __inline __m128 __attribute__((__always_inline__))
161_mm_add_ps (__m128 __A, __m128 __B)
162{
163  return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
164}
165
166static __inline __m128 __attribute__((__always_inline__))
167_mm_sub_ps (__m128 __A, __m128 __B)
168{
169  return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
170}
171
172static __inline __m128 __attribute__((__always_inline__))
173_mm_mul_ps (__m128 __A, __m128 __B)
174{
175  return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
176}
177
178static __inline __m128 __attribute__((__always_inline__))
179_mm_div_ps (__m128 __A, __m128 __B)
180{
181  return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
182}
183
184static __inline __m128 __attribute__((__always_inline__))
185_mm_sqrt_ps (__m128 __A)
186{
187  return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
188}
189
190static __inline __m128 __attribute__((__always_inline__))
191_mm_rcp_ps (__m128 __A)
192{
193  return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
194}
195
196static __inline __m128 __attribute__((__always_inline__))
197_mm_rsqrt_ps (__m128 __A)
198{
199  return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
200}
201
202static __inline __m128 __attribute__((__always_inline__))
203_mm_min_ps (__m128 __A, __m128 __B)
204{
205  return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
206}
207
208static __inline __m128 __attribute__((__always_inline__))
209_mm_max_ps (__m128 __A, __m128 __B)
210{
211  return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
212}
213
214/* Perform logical bit-wise operations on 128-bit values.  */
215
216static __inline __m128 __attribute__((__always_inline__))
217_mm_and_ps (__m128 __A, __m128 __B)
218{
219  return __builtin_ia32_andps (__A, __B);
220}
221
222static __inline __m128 __attribute__((__always_inline__))
223_mm_andnot_ps (__m128 __A, __m128 __B)
224{
225  return __builtin_ia32_andnps (__A, __B);
226}
227
228static __inline __m128 __attribute__((__always_inline__))
229_mm_or_ps (__m128 __A, __m128 __B)
230{
231  return __builtin_ia32_orps (__A, __B);
232}
233
234static __inline __m128 __attribute__((__always_inline__))
235_mm_xor_ps (__m128 __A, __m128 __B)
236{
237  return __builtin_ia32_xorps (__A, __B);
238}
239
240/* Perform a comparison on the lower SPFP values of A and B.  If the
241   comparison is true, place a mask of all ones in the result, otherwise a
242   mask of zeros.  The upper three SPFP values are passed through from A.  */
243
244static __inline __m128 __attribute__((__always_inline__))
245_mm_cmpeq_ss (__m128 __A, __m128 __B)
246{
247  return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
248}
249
250static __inline __m128 __attribute__((__always_inline__))
251_mm_cmplt_ss (__m128 __A, __m128 __B)
252{
253  return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
254}
255
256static __inline __m128 __attribute__((__always_inline__))
257_mm_cmple_ss (__m128 __A, __m128 __B)
258{
259  return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
260}
261
262static __inline __m128 __attribute__((__always_inline__))
263_mm_cmpgt_ss (__m128 __A, __m128 __B)
264{
265  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
266					(__v4sf)
267					__builtin_ia32_cmpltss ((__v4sf) __B,
268								(__v4sf)
269								__A));
270}
271
272static __inline __m128 __attribute__((__always_inline__))
273_mm_cmpge_ss (__m128 __A, __m128 __B)
274{
275  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
276					(__v4sf)
277					__builtin_ia32_cmpless ((__v4sf) __B,
278								(__v4sf)
279								__A));
280}
281
282static __inline __m128 __attribute__((__always_inline__))
283_mm_cmpneq_ss (__m128 __A, __m128 __B)
284{
285  return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
286}
287
288static __inline __m128 __attribute__((__always_inline__))
289_mm_cmpnlt_ss (__m128 __A, __m128 __B)
290{
291  return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
292}
293
294static __inline __m128 __attribute__((__always_inline__))
295_mm_cmpnle_ss (__m128 __A, __m128 __B)
296{
297  return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
298}
299
300static __inline __m128 __attribute__((__always_inline__))
301_mm_cmpngt_ss (__m128 __A, __m128 __B)
302{
303  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
304					(__v4sf)
305					__builtin_ia32_cmpnltss ((__v4sf) __B,
306								 (__v4sf)
307								 __A));
308}
309
310static __inline __m128 __attribute__((__always_inline__))
311_mm_cmpnge_ss (__m128 __A, __m128 __B)
312{
313  return (__m128) __builtin_ia32_movss ((__v4sf) __A,
314					(__v4sf)
315					__builtin_ia32_cmpnless ((__v4sf) __B,
316								 (__v4sf)
317								 __A));
318}
319
320static __inline __m128 __attribute__((__always_inline__))
321_mm_cmpord_ss (__m128 __A, __m128 __B)
322{
323  return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
324}
325
326static __inline __m128 __attribute__((__always_inline__))
327_mm_cmpunord_ss (__m128 __A, __m128 __B)
328{
329  return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
330}
331
332/* Perform a comparison on the four SPFP values of A and B.  For each
333   element, if the comparison is true, place a mask of all ones in the
334   result, otherwise a mask of zeros.  */
335
336static __inline __m128 __attribute__((__always_inline__))
337_mm_cmpeq_ps (__m128 __A, __m128 __B)
338{
339  return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
340}
341
342static __inline __m128 __attribute__((__always_inline__))
343_mm_cmplt_ps (__m128 __A, __m128 __B)
344{
345  return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
346}
347
348static __inline __m128 __attribute__((__always_inline__))
349_mm_cmple_ps (__m128 __A, __m128 __B)
350{
351  return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
352}
353
354static __inline __m128 __attribute__((__always_inline__))
355_mm_cmpgt_ps (__m128 __A, __m128 __B)
356{
357  return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
358}
359
360static __inline __m128 __attribute__((__always_inline__))
361_mm_cmpge_ps (__m128 __A, __m128 __B)
362{
363  return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
364}
365
366static __inline __m128 __attribute__((__always_inline__))
367_mm_cmpneq_ps (__m128 __A, __m128 __B)
368{
369  return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
370}
371
372static __inline __m128 __attribute__((__always_inline__))
373_mm_cmpnlt_ps (__m128 __A, __m128 __B)
374{
375  return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
376}
377
378static __inline __m128 __attribute__((__always_inline__))
379_mm_cmpnle_ps (__m128 __A, __m128 __B)
380{
381  return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
382}
383
384static __inline __m128 __attribute__((__always_inline__))
385_mm_cmpngt_ps (__m128 __A, __m128 __B)
386{
387  return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
388}
389
390static __inline __m128 __attribute__((__always_inline__))
391_mm_cmpnge_ps (__m128 __A, __m128 __B)
392{
393  return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
394}
395
396static __inline __m128 __attribute__((__always_inline__))
397_mm_cmpord_ps (__m128 __A, __m128 __B)
398{
399  return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
400}
401
402static __inline __m128 __attribute__((__always_inline__))
403_mm_cmpunord_ps (__m128 __A, __m128 __B)
404{
405  return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
406}
407
408/* Compare the lower SPFP values of A and B and return 1 if true
409   and 0 if false.  */
410
411static __inline int __attribute__((__always_inline__))
412_mm_comieq_ss (__m128 __A, __m128 __B)
413{
414  return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
415}
416
417static __inline int __attribute__((__always_inline__))
418_mm_comilt_ss (__m128 __A, __m128 __B)
419{
420  return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
421}
422
423static __inline int __attribute__((__always_inline__))
424_mm_comile_ss (__m128 __A, __m128 __B)
425{
426  return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
427}
428
429static __inline int __attribute__((__always_inline__))
430_mm_comigt_ss (__m128 __A, __m128 __B)
431{
432  return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
433}
434
435static __inline int __attribute__((__always_inline__))
436_mm_comige_ss (__m128 __A, __m128 __B)
437{
438  return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
439}
440
441static __inline int __attribute__((__always_inline__))
442_mm_comineq_ss (__m128 __A, __m128 __B)
443{
444  return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
445}
446
447static __inline int __attribute__((__always_inline__))
448_mm_ucomieq_ss (__m128 __A, __m128 __B)
449{
450  return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
451}
452
453static __inline int __attribute__((__always_inline__))
454_mm_ucomilt_ss (__m128 __A, __m128 __B)
455{
456  return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
457}
458
459static __inline int __attribute__((__always_inline__))
460_mm_ucomile_ss (__m128 __A, __m128 __B)
461{
462  return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
463}
464
465static __inline int __attribute__((__always_inline__))
466_mm_ucomigt_ss (__m128 __A, __m128 __B)
467{
468  return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
469}
470
471static __inline int __attribute__((__always_inline__))
472_mm_ucomige_ss (__m128 __A, __m128 __B)
473{
474  return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
475}
476
477static __inline int __attribute__((__always_inline__))
478_mm_ucomineq_ss (__m128 __A, __m128 __B)
479{
480  return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
481}
482
483/* Convert the lower SPFP value to a 32-bit integer according to the current
484   rounding mode.  */
485static __inline int __attribute__((__always_inline__))
486_mm_cvtss_si32 (__m128 __A)
487{
488  return __builtin_ia32_cvtss2si ((__v4sf) __A);
489}
490
491static __inline int __attribute__((__always_inline__))
492_mm_cvt_ss2si (__m128 __A)
493{
494  return _mm_cvtss_si32 (__A);
495}
496
497#ifdef __x86_64__
498/* Convert the lower SPFP value to a 32-bit integer according to the
499   current rounding mode.  */
500
501/* Intel intrinsic.  */
502static __inline long long __attribute__((__always_inline__))
503_mm_cvtss_si64 (__m128 __A)
504{
505  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
506}
507
508/* Microsoft intrinsic.  */
509static __inline long long __attribute__((__always_inline__))
510_mm_cvtss_si64x (__m128 __A)
511{
512  return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
513}
514#endif
515
516/* Convert the two lower SPFP values to 32-bit integers according to the
517   current rounding mode.  Return the integers in packed form.  */
518static __inline __m64 __attribute__((__always_inline__))
519_mm_cvtps_pi32 (__m128 __A)
520{
521  return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
522}
523
524static __inline __m64 __attribute__((__always_inline__))
525_mm_cvt_ps2pi (__m128 __A)
526{
527  return _mm_cvtps_pi32 (__A);
528}
529
530/* Truncate the lower SPFP value to a 32-bit integer.  */
531static __inline int __attribute__((__always_inline__))
532_mm_cvttss_si32 (__m128 __A)
533{
534  return __builtin_ia32_cvttss2si ((__v4sf) __A);
535}
536
537static __inline int __attribute__((__always_inline__))
538_mm_cvtt_ss2si (__m128 __A)
539{
540  return _mm_cvttss_si32 (__A);
541}
542
543#ifdef __x86_64__
544/* Truncate the lower SPFP value to a 32-bit integer.  */
545
546/* Intel intrinsic.  */
547static __inline long long __attribute__((__always_inline__))
548_mm_cvttss_si64 (__m128 __A)
549{
550  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
551}
552
553/* Microsoft intrinsic.  */
554static __inline long long __attribute__((__always_inline__))
555_mm_cvttss_si64x (__m128 __A)
556{
557  return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
558}
559#endif
560
561/* Truncate the two lower SPFP values to 32-bit integers.  Return the
562   integers in packed form.  */
563static __inline __m64 __attribute__((__always_inline__))
564_mm_cvttps_pi32 (__m128 __A)
565{
566  return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
567}
568
569static __inline __m64 __attribute__((__always_inline__))
570_mm_cvtt_ps2pi (__m128 __A)
571{
572  return _mm_cvttps_pi32 (__A);
573}
574
575/* Convert B to a SPFP value and insert it as element zero in A.  */
576static __inline __m128 __attribute__((__always_inline__))
577_mm_cvtsi32_ss (__m128 __A, int __B)
578{
579  return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
580}
581
582static __inline __m128 __attribute__((__always_inline__))
583_mm_cvt_si2ss (__m128 __A, int __B)
584{
585  return _mm_cvtsi32_ss (__A, __B);
586}
587
588#ifdef __x86_64__
589/* Convert B to a SPFP value and insert it as element zero in A.  */
590
591/* Intel intrinsic.  */
592static __inline __m128 __attribute__((__always_inline__))
593_mm_cvtsi64_ss (__m128 __A, long long __B)
594{
595  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
596}
597
598/* Microsoft intrinsic.  */
599static __inline __m128 __attribute__((__always_inline__))
600_mm_cvtsi64x_ss (__m128 __A, long long __B)
601{
602  return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
603}
604#endif
605
606/* Convert the two 32-bit values in B to SPFP form and insert them
607   as the two lower elements in A.  */
608static __inline __m128 __attribute__((__always_inline__))
609_mm_cvtpi32_ps (__m128 __A, __m64 __B)
610{
611  return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
612}
613
614static __inline __m128 __attribute__((__always_inline__))
615_mm_cvt_pi2ps (__m128 __A, __m64 __B)
616{
617  return _mm_cvtpi32_ps (__A, __B);
618}
619
620/* Convert the four signed 16-bit values in A to SPFP form.  */
621static __inline __m128 __attribute__((__always_inline__))
622_mm_cvtpi16_ps (__m64 __A)
623{
624  __v4hi __sign;
625  __v2si __hisi, __losi;
626  __v4sf __r;
627
628  /* This comparison against zero gives us a mask that can be used to
629     fill in the missing sign bits in the unpack operations below, so
630     that we get signed values after unpacking.  */
631  __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
632
633  /* Convert the four words to doublewords.  */
634  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
635  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
636
637  /* Convert the doublewords to floating point two at a time.  */
638  __r = (__v4sf) _mm_setzero_ps ();
639  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
640  __r = __builtin_ia32_movlhps (__r, __r);
641  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
642
643  return (__m128) __r;
644}
645
646/* Convert the four unsigned 16-bit values in A to SPFP form.  */
647static __inline __m128 __attribute__((__always_inline__))
648_mm_cvtpu16_ps (__m64 __A)
649{
650  __v2si __hisi, __losi;
651  __v4sf __r;
652
653  /* Convert the four words to doublewords.  */
654  __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
655  __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
656
657  /* Convert the doublewords to floating point two at a time.  */
658  __r = (__v4sf) _mm_setzero_ps ();
659  __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
660  __r = __builtin_ia32_movlhps (__r, __r);
661  __r = __builtin_ia32_cvtpi2ps (__r, __losi);
662
663  return (__m128) __r;
664}
665
666/* Convert the low four signed 8-bit values in A to SPFP form.  */
667static __inline __m128 __attribute__((__always_inline__))
668_mm_cvtpi8_ps (__m64 __A)
669{
670  __v8qi __sign;
671
672  /* This comparison against zero gives us a mask that can be used to
673     fill in the missing sign bits in the unpack operations below, so
674     that we get signed values after unpacking.  */
675  __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
676
677  /* Convert the four low bytes to words.  */
678  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
679
680  return _mm_cvtpi16_ps(__A);
681}
682
683/* Convert the low four unsigned 8-bit values in A to SPFP form.  */
684static __inline __m128 __attribute__((__always_inline__))
685_mm_cvtpu8_ps(__m64 __A)
686{
687  __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
688  return _mm_cvtpu16_ps(__A);
689}
690
691/* Convert the four signed 32-bit values in A and B to SPFP form.  */
692static __inline __m128 __attribute__((__always_inline__))
693_mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
694{
695  __v4sf __zero = (__v4sf) _mm_setzero_ps ();
696  __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
697  __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
698  return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
699}
700
701/* Convert the four SPFP values in A to four signed 16-bit integers.  */
702static __inline __m64 __attribute__((__always_inline__))
703_mm_cvtps_pi16(__m128 __A)
704{
705  __v4sf __hisf = (__v4sf)__A;
706  __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
707  __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
708  __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
709  return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
710}
711
712/* Convert the four SPFP values in A to four signed 8-bit integers.  */
713static __inline __m64 __attribute__((__always_inline__))
714_mm_cvtps_pi8(__m128 __A)
715{
716  __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
717  return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
718}
719
720/* Selects four specific SPFP values from A and B based on MASK.  */
721#if 0
722static __inline __m128 __attribute__((__always_inline__))
723_mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
724{
725  return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
726}
727#else
728#define _mm_shuffle_ps(A, B, MASK) \
729 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
730#endif
731
732
733/* Selects and interleaves the upper two SPFP values from A and B.  */
734static __inline __m128 __attribute__((__always_inline__))
735_mm_unpackhi_ps (__m128 __A, __m128 __B)
736{
737  return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
738}
739
740/* Selects and interleaves the lower two SPFP values from A and B.  */
741static __inline __m128 __attribute__((__always_inline__))
742_mm_unpacklo_ps (__m128 __A, __m128 __B)
743{
744  return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
745}
746
747/* Sets the upper two SPFP values with 64-bits of data loaded from P;
748   the lower two values are passed through from A.  */
749static __inline __m128 __attribute__((__always_inline__))
750_mm_loadh_pi (__m128 __A, __m64 const *__P)
751{
752  return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
753}
754
755/* Stores the upper two SPFP values of A into P.  */
756static __inline void __attribute__((__always_inline__))
757_mm_storeh_pi (__m64 *__P, __m128 __A)
758{
759  __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
760}
761
762/* Moves the upper two values of B into the lower two values of A.  */
763static __inline __m128 __attribute__((__always_inline__))
764_mm_movehl_ps (__m128 __A, __m128 __B)
765{
766  return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
767}
768
769/* Moves the lower two values of B into the upper two values of A.  */
770static __inline __m128 __attribute__((__always_inline__))
771_mm_movelh_ps (__m128 __A, __m128 __B)
772{
773  return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
774}
775
776/* Sets the lower two SPFP values with 64-bits of data loaded from P;
777   the upper two values are passed through from A.  */
778static __inline __m128 __attribute__((__always_inline__))
779_mm_loadl_pi (__m128 __A, __m64 const *__P)
780{
781  return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
782}
783
784/* Stores the lower two SPFP values of A into P.  */
785static __inline void __attribute__((__always_inline__))
786_mm_storel_pi (__m64 *__P, __m128 __A)
787{
788  __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
789}
790
791/* Creates a 4-bit mask from the most significant bits of the SPFP values.  */
792static __inline int __attribute__((__always_inline__))
793_mm_movemask_ps (__m128 __A)
794{
795  return __builtin_ia32_movmskps ((__v4sf)__A);
796}
797
798/* Return the contents of the control register.  */
799static __inline unsigned int __attribute__((__always_inline__))
800_mm_getcsr (void)
801{
802  return __builtin_ia32_stmxcsr ();
803}
804
805/* Read exception bits from the control register.  */
806static __inline unsigned int __attribute__((__always_inline__))
807_MM_GET_EXCEPTION_STATE (void)
808{
809  return _mm_getcsr() & _MM_EXCEPT_MASK;
810}
811
812static __inline unsigned int __attribute__((__always_inline__))
813_MM_GET_EXCEPTION_MASK (void)
814{
815  return _mm_getcsr() & _MM_MASK_MASK;
816}
817
818static __inline unsigned int __attribute__((__always_inline__))
819_MM_GET_ROUNDING_MODE (void)
820{
821  return _mm_getcsr() & _MM_ROUND_MASK;
822}
823
824static __inline unsigned int __attribute__((__always_inline__))
825_MM_GET_FLUSH_ZERO_MODE (void)
826{
827  return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
828}
829
830/* Set the control register to I.  */
831static __inline void __attribute__((__always_inline__))
832_mm_setcsr (unsigned int __I)
833{
834  __builtin_ia32_ldmxcsr (__I);
835}
836
837/* Set exception bits in the control register.  */
838static __inline void __attribute__((__always_inline__))
839_MM_SET_EXCEPTION_STATE(unsigned int __mask)
840{
841  _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
842}
843
844static __inline void __attribute__((__always_inline__))
845_MM_SET_EXCEPTION_MASK (unsigned int __mask)
846{
847  _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
848}
849
850static __inline void __attribute__((__always_inline__))
851_MM_SET_ROUNDING_MODE (unsigned int __mode)
852{
853  _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
854}
855
856static __inline void __attribute__((__always_inline__))
857_MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
858{
859  _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
860}
861
862/* Create a vector with element 0 as F and the rest zero.  */
863static __inline __m128 __attribute__((__always_inline__))
864_mm_set_ss (float __F)
865{
866  return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
867}
868
869/* Create a vector with all four elements equal to F.  */
870static __inline __m128 __attribute__((__always_inline__))
871_mm_set1_ps (float __F)
872{
873  return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
874}
875
876static __inline __m128 __attribute__((__always_inline__))
877_mm_set_ps1 (float __F)
878{
879  return _mm_set1_ps (__F);
880}
881
882/* Create a vector with element 0 as *P and the rest zero.  */
883static __inline __m128 __attribute__((__always_inline__))
884_mm_load_ss (float const *__P)
885{
886  return _mm_set_ss (*__P);
887}
888
889/* Create a vector with all four elements equal to *P.  */
890static __inline __m128 __attribute__((__always_inline__))
891_mm_load1_ps (float const *__P)
892{
893  return _mm_set1_ps (*__P);
894}
895
896static __inline __m128 __attribute__((__always_inline__))
897_mm_load_ps1 (float const *__P)
898{
899  return _mm_load1_ps (__P);
900}
901
902/* Load four SPFP values from P.  The address must be 16-byte aligned.  */
903static __inline __m128 __attribute__((__always_inline__))
904_mm_load_ps (float const *__P)
905{
906  return (__m128) *(__v4sf *)__P;
907}
908
909/* Load four SPFP values from P.  The address need not be 16-byte aligned.  */
910static __inline __m128 __attribute__((__always_inline__))
911_mm_loadu_ps (float const *__P)
912{
913  return (__m128) __builtin_ia32_loadups (__P);
914}
915
916/* Load four SPFP values in reverse order.  The address must be aligned.  */
917static __inline __m128 __attribute__((__always_inline__))
918_mm_loadr_ps (float const *__P)
919{
920  __v4sf __tmp = *(__v4sf *)__P;
921  return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
922}
923
924/* Create the vector [Z Y X W].  */
925static __inline __m128 __attribute__((__always_inline__))
926_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
927{
928  return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
929}
930
931/* Create the vector [W X Y Z].  */
932static __inline __m128 __attribute__((__always_inline__))
933_mm_setr_ps (float __Z, float __Y, float __X, float __W)
934{
935  return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
936}
937
938/* Stores the lower SPFP value.  */
939static __inline void __attribute__((__always_inline__))
940_mm_store_ss (float *__P, __m128 __A)
941{
942  *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
943}
944
945static __inline float __attribute__((__always_inline__))
946_mm_cvtss_f32 (__m128 __A)
947{
948  return __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
949}
950
951/* Store four SPFP values.  The address must be 16-byte aligned.  */
952static __inline void __attribute__((__always_inline__))
953_mm_store_ps (float *__P, __m128 __A)
954{
955  *(__v4sf *)__P = (__v4sf)__A;
956}
957
958/* Store four SPFP values.  The address need not be 16-byte aligned.  */
959static __inline void __attribute__((__always_inline__))
960_mm_storeu_ps (float *__P, __m128 __A)
961{
962  __builtin_ia32_storeups (__P, (__v4sf)__A);
963}
964
965/* Store the lower SPFP value across four words.  */
966static __inline void __attribute__((__always_inline__))
967_mm_store1_ps (float *__P, __m128 __A)
968{
969  __v4sf __va = (__v4sf)__A;
970  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
971  _mm_storeu_ps (__P, __tmp);
972}
973
974static __inline void __attribute__((__always_inline__))
975_mm_store_ps1 (float *__P, __m128 __A)
976{
977  _mm_store1_ps (__P, __A);
978}
979
980/* Store four SPFP values in reverse order.  The address must be aligned.  */
981static __inline void __attribute__((__always_inline__))
982_mm_storer_ps (float *__P, __m128 __A)
983{
984  __v4sf __va = (__v4sf)__A;
985  __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
986  _mm_store_ps (__P, __tmp);
987}
988
989/* Sets the low SPFP value of A from the low value of B.  */
990static __inline __m128 __attribute__((__always_inline__))
991_mm_move_ss (__m128 __A, __m128 __B)
992{
993  return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
994}
995
996/* Extracts one of the four words of A.  The selector N must be immediate.  */
997#if 0
998static __inline int __attribute__((__always_inline__))
999_mm_extract_pi16 (__m64 const __A, int const __N)
1000{
1001  return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
1002}
1003
1004static __inline int __attribute__((__always_inline__))
1005_m_pextrw (__m64 const __A, int const __N)
1006{
1007  return _mm_extract_pi16 (__A, __N);
1008}
1009#else
1010#define _mm_extract_pi16(A, N)	__builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
1011#define _m_pextrw(A, N)		_mm_extract_pi16((A), (N))
1012#endif
1013
1014/* Inserts word D into one of four words of A.  The selector N must be
1015   immediate.  */
1016#if 0
1017static __inline __m64 __attribute__((__always_inline__))
1018_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
1019{
1020  return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
1021}
1022
1023static __inline __m64 __attribute__((__always_inline__))
1024_m_pinsrw (__m64 const __A, int const __D, int const __N)
1025{
1026  return _mm_insert_pi16 (__A, __D, __N);
1027}
1028#else
1029#define _mm_insert_pi16(A, D, N) \
1030  ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
1031#define _m_pinsrw(A, D, N)	 _mm_insert_pi16((A), (D), (N))
1032#endif
1033
1034/* Compute the element-wise maximum of signed 16-bit values.  */
1035static __inline __m64 __attribute__((__always_inline__))
1036_mm_max_pi16 (__m64 __A, __m64 __B)
1037{
1038  return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1039}
1040
1041static __inline __m64 __attribute__((__always_inline__))
1042_m_pmaxsw (__m64 __A, __m64 __B)
1043{
1044  return _mm_max_pi16 (__A, __B);
1045}
1046
1047/* Compute the element-wise maximum of unsigned 8-bit values.  */
1048static __inline __m64 __attribute__((__always_inline__))
1049_mm_max_pu8 (__m64 __A, __m64 __B)
1050{
1051  return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1052}
1053
1054static __inline __m64 __attribute__((__always_inline__))
1055_m_pmaxub (__m64 __A, __m64 __B)
1056{
1057  return _mm_max_pu8 (__A, __B);
1058}
1059
1060/* Compute the element-wise minimum of signed 16-bit values.  */
1061static __inline __m64 __attribute__((__always_inline__))
1062_mm_min_pi16 (__m64 __A, __m64 __B)
1063{
1064  return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1065}
1066
1067static __inline __m64 __attribute__((__always_inline__))
1068_m_pminsw (__m64 __A, __m64 __B)
1069{
1070  return _mm_min_pi16 (__A, __B);
1071}
1072
1073/* Compute the element-wise minimum of unsigned 8-bit values.  */
1074static __inline __m64 __attribute__((__always_inline__))
1075_mm_min_pu8 (__m64 __A, __m64 __B)
1076{
1077  return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1078}
1079
1080static __inline __m64 __attribute__((__always_inline__))
1081_m_pminub (__m64 __A, __m64 __B)
1082{
1083  return _mm_min_pu8 (__A, __B);
1084}
1085
1086/* Create an 8-bit mask of the signs of 8-bit values.  */
1087static __inline int __attribute__((__always_inline__))
1088_mm_movemask_pi8 (__m64 __A)
1089{
1090  return __builtin_ia32_pmovmskb ((__v8qi)__A);
1091}
1092
1093static __inline int __attribute__((__always_inline__))
1094_m_pmovmskb (__m64 __A)
1095{
1096  return _mm_movemask_pi8 (__A);
1097}
1098
1099/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1100   in B and produce the high 16 bits of the 32-bit results.  */
1101static __inline __m64 __attribute__((__always_inline__))
1102_mm_mulhi_pu16 (__m64 __A, __m64 __B)
1103{
1104  return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1105}
1106
1107static __inline __m64 __attribute__((__always_inline__))
1108_m_pmulhuw (__m64 __A, __m64 __B)
1109{
1110  return _mm_mulhi_pu16 (__A, __B);
1111}
1112
1113/* Return a combination of the four 16-bit values in A.  The selector
1114   must be an immediate.  */
1115#if 0
1116static __inline __m64 __attribute__((__always_inline__))
1117_mm_shuffle_pi16 (__m64 __A, int __N)
1118{
1119  return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1120}
1121
1122static __inline __m64 __attribute__((__always_inline__))
1123_m_pshufw (__m64 __A, int __N)
1124{
1125  return _mm_shuffle_pi16 (__A, __N);
1126}
1127#else
1128#define _mm_shuffle_pi16(A, N) \
1129  ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1130#define _m_pshufw(A, N)		_mm_shuffle_pi16 ((A), (N))
1131#endif
1132
1133/* Conditionally store byte elements of A into P.  The high bit of each
1134   byte in the selector N determines whether the corresponding byte from
1135   A is stored.  */
1136static __inline void __attribute__((__always_inline__))
1137_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1138{
1139  __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1140}
1141
1142static __inline void __attribute__((__always_inline__))
1143_m_maskmovq (__m64 __A, __m64 __N, char *__P)
1144{
1145  _mm_maskmove_si64 (__A, __N, __P);
1146}
1147
1148/* Compute the rounded averages of the unsigned 8-bit values in A and B.  */
1149static __inline __m64 __attribute__((__always_inline__))
1150_mm_avg_pu8 (__m64 __A, __m64 __B)
1151{
1152  return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1153}
1154
1155static __inline __m64 __attribute__((__always_inline__))
1156_m_pavgb (__m64 __A, __m64 __B)
1157{
1158  return _mm_avg_pu8 (__A, __B);
1159}
1160
1161/* Compute the rounded averages of the unsigned 16-bit values in A and B.  */
1162static __inline __m64 __attribute__((__always_inline__))
1163_mm_avg_pu16 (__m64 __A, __m64 __B)
1164{
1165  return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1166}
1167
1168static __inline __m64 __attribute__((__always_inline__))
1169_m_pavgw (__m64 __A, __m64 __B)
1170{
1171  return _mm_avg_pu16 (__A, __B);
1172}
1173
1174/* Compute the sum of the absolute differences of the unsigned 8-bit
1175   values in A and B.  Return the value in the lower 16-bit word; the
1176   upper words are cleared.  */
1177static __inline __m64 __attribute__((__always_inline__))
1178_mm_sad_pu8 (__m64 __A, __m64 __B)
1179{
1180  return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1181}
1182
1183static __inline __m64 __attribute__((__always_inline__))
1184_m_psadbw (__m64 __A, __m64 __B)
1185{
1186  return _mm_sad_pu8 (__A, __B);
1187}
1188
1189/* Loads one cache line from address P to a location "closer" to the
1190   processor.  The selector I specifies the type of prefetch operation.  */
1191#if 0
1192static __inline void __attribute__((__always_inline__))
1193_mm_prefetch (void *__P, enum _mm_hint __I)
1194{
1195  __builtin_prefetch (__P, 0, __I);
1196}
1197#else
1198#define _mm_prefetch(P, I) \
1199  __builtin_prefetch ((P), 0, (I))
1200#endif
1201
1202/* Stores the data in A to the address P without polluting the caches.  */
1203static __inline void __attribute__((__always_inline__))
1204_mm_stream_pi (__m64 *__P, __m64 __A)
1205{
1206  __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1207}
1208
1209/* Likewise.  The address must be 16-byte aligned.  */
1210static __inline void __attribute__((__always_inline__))
1211_mm_stream_ps (float *__P, __m128 __A)
1212{
1213  __builtin_ia32_movntps (__P, (__v4sf)__A);
1214}
1215
1216/* Guarantees that every preceding store is globally visible before
1217   any subsequent store.  */
1218static __inline void __attribute__((__always_inline__))
1219_mm_sfence (void)
1220{
1221  __builtin_ia32_sfence ();
1222}
1223
1224/* The execution of the next instruction is delayed by an implementation
1225   specific amount of time.  The instruction does not modify the
1226   architectural state.  */
1227static __inline void __attribute__((__always_inline__))
1228_mm_pause (void)
1229{
1230  __asm__ __volatile__ ("rep; nop" : : );
1231}
1232
1233/* Transpose the 4x4 matrix composed of row[0-3].  */
1234#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3)			\
1235do {									\
1236  __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3);	\
1237  __v4sf __t0 = __builtin_ia32_unpcklps (__r0, __r1);			\
1238  __v4sf __t1 = __builtin_ia32_unpcklps (__r2, __r3);			\
1239  __v4sf __t2 = __builtin_ia32_unpckhps (__r0, __r1);			\
1240  __v4sf __t3 = __builtin_ia32_unpckhps (__r2, __r3);			\
1241  (row0) = __builtin_ia32_movlhps (__t0, __t1);				\
1242  (row1) = __builtin_ia32_movhlps (__t1, __t0);				\
1243  (row2) = __builtin_ia32_movlhps (__t2, __t3);				\
1244  (row3) = __builtin_ia32_movhlps (__t3, __t2);				\
1245} while (0)
1246
1247/* For backward source compatibility.  */
1248#ifdef __SSE2__
1249#include <emmintrin.h>
1250#endif
1251
1252#endif /* __SSE__ */
1253#endif /* _XMMINTRIN_H_INCLUDED */
1254