1/*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerX86Common_h
27#define MacroAssemblerX86Common_h
28
29#if ENABLE(ASSEMBLER)
30
31#include "X86Assembler.h"
32#include "AbstractMacroAssembler.h"
33
34namespace JSC {
35
36class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
37public:
38#if CPU(X86_64)
39    static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
40#endif
41
42protected:
43    static const int DoubleConditionBitInvert = 0x10;
44    static const int DoubleConditionBitSpecial = 0x20;
45    static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
46
47public:
48    typedef X86Assembler::XMMRegisterID XMMRegisterID;
49
50    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
51    {
52        return value >= -128 && value <= 127;
53    }
54
55    enum RelationalCondition {
56        Equal = X86Assembler::ConditionE,
57        NotEqual = X86Assembler::ConditionNE,
58        Above = X86Assembler::ConditionA,
59        AboveOrEqual = X86Assembler::ConditionAE,
60        Below = X86Assembler::ConditionB,
61        BelowOrEqual = X86Assembler::ConditionBE,
62        GreaterThan = X86Assembler::ConditionG,
63        GreaterThanOrEqual = X86Assembler::ConditionGE,
64        LessThan = X86Assembler::ConditionL,
65        LessThanOrEqual = X86Assembler::ConditionLE
66    };
67
68    enum ResultCondition {
69        Overflow = X86Assembler::ConditionO,
70        Signed = X86Assembler::ConditionS,
71        PositiveOrZero = X86Assembler::ConditionNS,
72        Zero = X86Assembler::ConditionE,
73        NonZero = X86Assembler::ConditionNE
74    };
75
76    enum DoubleCondition {
77        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
78        DoubleEqual = X86Assembler::ConditionE | DoubleConditionBitSpecial,
79        DoubleNotEqual = X86Assembler::ConditionNE,
80        DoubleGreaterThan = X86Assembler::ConditionA,
81        DoubleGreaterThanOrEqual = X86Assembler::ConditionAE,
82        DoubleLessThan = X86Assembler::ConditionA | DoubleConditionBitInvert,
83        DoubleLessThanOrEqual = X86Assembler::ConditionAE | DoubleConditionBitInvert,
84        // If either operand is NaN, these conditions always evaluate to true.
85        DoubleEqualOrUnordered = X86Assembler::ConditionE,
86        DoubleNotEqualOrUnordered = X86Assembler::ConditionNE | DoubleConditionBitSpecial,
87        DoubleGreaterThanOrUnordered = X86Assembler::ConditionB | DoubleConditionBitInvert,
88        DoubleGreaterThanOrEqualOrUnordered = X86Assembler::ConditionBE | DoubleConditionBitInvert,
89        DoubleLessThanOrUnordered = X86Assembler::ConditionB,
90        DoubleLessThanOrEqualOrUnordered = X86Assembler::ConditionBE,
91    };
92    COMPILE_ASSERT(
93        !((X86Assembler::ConditionE | X86Assembler::ConditionNE | X86Assembler::ConditionA | X86Assembler::ConditionAE | X86Assembler::ConditionB | X86Assembler::ConditionBE) & DoubleConditionBits),
94        DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
95
96    static const RegisterID stackPointerRegister = X86Registers::esp;
97    static const RegisterID framePointerRegister = X86Registers::ebp;
98
99    static bool canBlind() { return true; }
100    static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
101    static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
102
103    // Integer arithmetic operations:
104    //
105    // Operations are typically two operand - operation(source, srcDst)
106    // For many operations the source may be an TrustedImm32, the srcDst operand
107    // may often be a memory location (explictly described using an Address
108    // object).
109
110    void add32(RegisterID src, RegisterID dest)
111    {
112        m_assembler.addl_rr(src, dest);
113    }
114
115    void add32(TrustedImm32 imm, Address address)
116    {
117        m_assembler.addl_im(imm.m_value, address.offset, address.base);
118    }
119
120    void add32(TrustedImm32 imm, RegisterID dest)
121    {
122        if (imm.m_value == 1)
123            m_assembler.inc_r(dest);
124        else
125            m_assembler.addl_ir(imm.m_value, dest);
126    }
127
128    void add32(Address src, RegisterID dest)
129    {
130        m_assembler.addl_mr(src.offset, src.base, dest);
131    }
132
133    void add32(RegisterID src, Address dest)
134    {
135        m_assembler.addl_rm(src, dest.offset, dest.base);
136    }
137
138    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
139    {
140        m_assembler.leal_mr(imm.m_value, src, dest);
141    }
142
143    void and32(RegisterID src, RegisterID dest)
144    {
145        m_assembler.andl_rr(src, dest);
146    }
147
148    void and32(TrustedImm32 imm, RegisterID dest)
149    {
150        m_assembler.andl_ir(imm.m_value, dest);
151    }
152
153    void and32(RegisterID src, Address dest)
154    {
155        m_assembler.andl_rm(src, dest.offset, dest.base);
156    }
157
158    void and32(Address src, RegisterID dest)
159    {
160        m_assembler.andl_mr(src.offset, src.base, dest);
161    }
162
163    void and32(TrustedImm32 imm, Address address)
164    {
165        m_assembler.andl_im(imm.m_value, address.offset, address.base);
166    }
167
168    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
169    {
170        if (op1 == op2)
171            zeroExtend32ToPtr(op1, dest);
172        else if (op1 == dest)
173            and32(op2, dest);
174        else {
175            move(op2, dest);
176            and32(op1, dest);
177        }
178    }
179
180    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
181    {
182        move(src, dest);
183        and32(imm, dest);
184    }
185
186    void lshift32(RegisterID shift_amount, RegisterID dest)
187    {
188        ASSERT(shift_amount != dest);
189
190        if (shift_amount == X86Registers::ecx)
191            m_assembler.shll_CLr(dest);
192        else {
193            // On x86 we can only shift by ecx; if asked to shift by another register we'll
194            // need rejig the shift amount into ecx first, and restore the registers afterwards.
195            // If we dest is ecx, then shift the swapped register!
196            swap(shift_amount, X86Registers::ecx);
197            m_assembler.shll_CLr(dest == X86Registers::ecx ? shift_amount : dest);
198            swap(shift_amount, X86Registers::ecx);
199        }
200    }
201
202    void lshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
203    {
204        ASSERT(shift_amount != dest);
205
206        if (src != dest)
207            move(src, dest);
208        lshift32(shift_amount, dest);
209    }
210
211    void lshift32(TrustedImm32 imm, RegisterID dest)
212    {
213        m_assembler.shll_i8r(imm.m_value, dest);
214    }
215
216    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
217    {
218        if (src != dest)
219            move(src, dest);
220        lshift32(imm, dest);
221    }
222
223    void mul32(RegisterID src, RegisterID dest)
224    {
225        m_assembler.imull_rr(src, dest);
226    }
227
228    void mul32(Address src, RegisterID dest)
229    {
230        m_assembler.imull_mr(src.offset, src.base, dest);
231    }
232
233    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
234    {
235        m_assembler.imull_i32r(src, imm.m_value, dest);
236    }
237
238    void neg32(RegisterID srcDest)
239    {
240        m_assembler.negl_r(srcDest);
241    }
242
243    void neg32(Address srcDest)
244    {
245        m_assembler.negl_m(srcDest.offset, srcDest.base);
246    }
247
248    void or32(RegisterID src, RegisterID dest)
249    {
250        m_assembler.orl_rr(src, dest);
251    }
252
253    void or32(TrustedImm32 imm, RegisterID dest)
254    {
255        m_assembler.orl_ir(imm.m_value, dest);
256    }
257
258    void or32(RegisterID src, Address dest)
259    {
260        m_assembler.orl_rm(src, dest.offset, dest.base);
261    }
262
263    void or32(Address src, RegisterID dest)
264    {
265        m_assembler.orl_mr(src.offset, src.base, dest);
266    }
267
268    void or32(TrustedImm32 imm, Address address)
269    {
270        m_assembler.orl_im(imm.m_value, address.offset, address.base);
271    }
272
273    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
274    {
275        if (op1 == op2)
276            zeroExtend32ToPtr(op1, dest);
277        else if (op1 == dest)
278            or32(op2, dest);
279        else {
280            move(op2, dest);
281            or32(op1, dest);
282        }
283    }
284
285    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
286    {
287        move(src, dest);
288        or32(imm, dest);
289    }
290
291    void rshift32(RegisterID shift_amount, RegisterID dest)
292    {
293        ASSERT(shift_amount != dest);
294
295        if (shift_amount == X86Registers::ecx)
296            m_assembler.sarl_CLr(dest);
297        else {
298            // On x86 we can only shift by ecx; if asked to shift by another register we'll
299            // need rejig the shift amount into ecx first, and restore the registers afterwards.
300            // If we dest is ecx, then shift the swapped register!
301            swap(shift_amount, X86Registers::ecx);
302            m_assembler.sarl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
303            swap(shift_amount, X86Registers::ecx);
304        }
305    }
306
307    void rshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
308    {
309        ASSERT(shift_amount != dest);
310
311        if (src != dest)
312            move(src, dest);
313        rshift32(shift_amount, dest);
314    }
315
316    void rshift32(TrustedImm32 imm, RegisterID dest)
317    {
318        m_assembler.sarl_i8r(imm.m_value, dest);
319    }
320
321    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
322    {
323        if (src != dest)
324            move(src, dest);
325        rshift32(imm, dest);
326    }
327
328    void urshift32(RegisterID shift_amount, RegisterID dest)
329    {
330        ASSERT(shift_amount != dest);
331
332        if (shift_amount == X86Registers::ecx)
333            m_assembler.shrl_CLr(dest);
334        else {
335            // On x86 we can only shift by ecx; if asked to shift by another register we'll
336            // need rejig the shift amount into ecx first, and restore the registers afterwards.
337            // If we dest is ecx, then shift the swapped register!
338            swap(shift_amount, X86Registers::ecx);
339            m_assembler.shrl_CLr(dest == X86Registers::ecx ? shift_amount : dest);
340            swap(shift_amount, X86Registers::ecx);
341        }
342    }
343
344    void urshift32(RegisterID src, RegisterID shift_amount, RegisterID dest)
345    {
346        ASSERT(shift_amount != dest);
347
348        if (src != dest)
349            move(src, dest);
350        urshift32(shift_amount, dest);
351    }
352
353    void urshift32(TrustedImm32 imm, RegisterID dest)
354    {
355        m_assembler.shrl_i8r(imm.m_value, dest);
356    }
357
358    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
359    {
360        if (src != dest)
361            move(src, dest);
362        urshift32(imm, dest);
363    }
364
365    void sub32(RegisterID src, RegisterID dest)
366    {
367        m_assembler.subl_rr(src, dest);
368    }
369
370    void sub32(TrustedImm32 imm, RegisterID dest)
371    {
372        if (imm.m_value == 1)
373            m_assembler.dec_r(dest);
374        else
375            m_assembler.subl_ir(imm.m_value, dest);
376    }
377
378    void sub32(TrustedImm32 imm, Address address)
379    {
380        m_assembler.subl_im(imm.m_value, address.offset, address.base);
381    }
382
383    void sub32(Address src, RegisterID dest)
384    {
385        m_assembler.subl_mr(src.offset, src.base, dest);
386    }
387
388    void sub32(RegisterID src, Address dest)
389    {
390        m_assembler.subl_rm(src, dest.offset, dest.base);
391    }
392
393    void xor32(RegisterID src, RegisterID dest)
394    {
395        m_assembler.xorl_rr(src, dest);
396    }
397
398    void xor32(TrustedImm32 imm, Address dest)
399    {
400        if (imm.m_value == -1)
401            m_assembler.notl_m(dest.offset, dest.base);
402        else
403            m_assembler.xorl_im(imm.m_value, dest.offset, dest.base);
404    }
405
406    void xor32(TrustedImm32 imm, RegisterID dest)
407    {
408        if (imm.m_value == -1)
409        m_assembler.notl_r(dest);
410        else
411        m_assembler.xorl_ir(imm.m_value, dest);
412    }
413
414    void xor32(RegisterID src, Address dest)
415    {
416        m_assembler.xorl_rm(src, dest.offset, dest.base);
417    }
418
419    void xor32(Address src, RegisterID dest)
420    {
421        m_assembler.xorl_mr(src.offset, src.base, dest);
422    }
423
424    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
425    {
426        if (op1 == op2)
427            move(TrustedImm32(0), dest);
428        else if (op1 == dest)
429            xor32(op2, dest);
430        else {
431            move(op2, dest);
432            xor32(op1, dest);
433        }
434    }
435
436    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
437    {
438        move(src, dest);
439        xor32(imm, dest);
440    }
441
442    void sqrtDouble(FPRegisterID src, FPRegisterID dst)
443    {
444        m_assembler.sqrtsd_rr(src, dst);
445    }
446
447    void absDouble(FPRegisterID src, FPRegisterID dst)
448    {
449        ASSERT(src != dst);
450        static const double negativeZeroConstant = -0.0;
451        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
452        m_assembler.andnpd_rr(src, dst);
453    }
454
455    void negateDouble(FPRegisterID src, FPRegisterID dst)
456    {
457        ASSERT(src != dst);
458        static const double negativeZeroConstant = -0.0;
459        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
460        m_assembler.xorpd_rr(src, dst);
461    }
462
463
464    // Memory access operations:
465    //
466    // Loads are of the form load(address, destination) and stores of the form
467    // store(source, address).  The source for a store may be an TrustedImm32.  Address
468    // operand objects to loads and store will be implicitly constructed if a
469    // register is passed.
470
471    void load32(ImplicitAddress address, RegisterID dest)
472    {
473        m_assembler.movl_mr(address.offset, address.base, dest);
474    }
475
476    void load32(BaseIndex address, RegisterID dest)
477    {
478        m_assembler.movl_mr(address.offset, address.base, address.index, address.scale, dest);
479    }
480
481    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
482    {
483        load32(address, dest);
484    }
485
486    void load16Unaligned(BaseIndex address, RegisterID dest)
487    {
488        load16(address, dest);
489    }
490
491    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
492    {
493        padBeforePatch();
494        m_assembler.movl_mr_disp32(address.offset, address.base, dest);
495        return DataLabel32(this);
496    }
497
498    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
499    {
500        padBeforePatch();
501        m_assembler.movl_mr_disp8(address.offset, address.base, dest);
502        return DataLabelCompact(this);
503    }
504
505    static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
506    {
507        ASSERT(isCompactPtrAlignedAddressOffset(value));
508        AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
509    }
510
511    DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
512    {
513        padBeforePatch();
514        m_assembler.movl_mr_disp8(address.offset, address.base, dest);
515        return DataLabelCompact(this);
516    }
517
518    void load8(BaseIndex address, RegisterID dest)
519    {
520        m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest);
521    }
522
523    void load8(ImplicitAddress address, RegisterID dest)
524    {
525        m_assembler.movzbl_mr(address.offset, address.base, dest);
526    }
527
528    void load8Signed(BaseIndex address, RegisterID dest)
529    {
530        m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
531    }
532
533    void load8Signed(ImplicitAddress address, RegisterID dest)
534    {
535        m_assembler.movsbl_mr(address.offset, address.base, dest);
536    }
537
538    void load16(BaseIndex address, RegisterID dest)
539    {
540        m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest);
541    }
542
543    void load16(Address address, RegisterID dest)
544    {
545        m_assembler.movzwl_mr(address.offset, address.base, dest);
546    }
547
548    void load16Signed(BaseIndex address, RegisterID dest)
549    {
550        m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
551    }
552
553    void load16Signed(Address address, RegisterID dest)
554    {
555        m_assembler.movswl_mr(address.offset, address.base, dest);
556    }
557
558    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
559    {
560        padBeforePatch();
561        m_assembler.movl_rm_disp32(src, address.offset, address.base);
562        return DataLabel32(this);
563    }
564
565    void store32(RegisterID src, ImplicitAddress address)
566    {
567        m_assembler.movl_rm(src, address.offset, address.base);
568    }
569
570    void store32(RegisterID src, BaseIndex address)
571    {
572        m_assembler.movl_rm(src, address.offset, address.base, address.index, address.scale);
573    }
574
575    void store32(TrustedImm32 imm, ImplicitAddress address)
576    {
577        m_assembler.movl_i32m(imm.m_value, address.offset, address.base);
578    }
579
580    void store32(TrustedImm32 imm, BaseIndex address)
581    {
582        m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale);
583    }
584
585    void store8(TrustedImm32 imm, Address address)
586    {
587        ASSERT(-128 <= imm.m_value && imm.m_value < 128);
588        m_assembler.movb_i8m(imm.m_value, address.offset, address.base);
589    }
590
591    void store8(TrustedImm32 imm, BaseIndex address)
592    {
593        ASSERT(-128 <= imm.m_value && imm.m_value < 128);
594        m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
595    }
596
597    static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
598    {
599        if (address.base != X86Registers::eax && address.index != X86Registers::eax)
600            return X86Registers::eax;
601
602        if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
603            return X86Registers::ebx;
604
605        ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
606        return X86Registers::ecx;
607    }
608
609    static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
610    {
611        if (address.base != X86Registers::eax)
612            return X86Registers::eax;
613
614        ASSERT(address.base != X86Registers::edx);
615        return X86Registers::edx;
616    }
617
618    void store8(RegisterID src, BaseIndex address)
619    {
620#if CPU(X86)
621        // On 32-bit x86 we can only store from the first 4 registers;
622        // esp..edi are mapped to the 'h' registers!
623        if (src >= 4) {
624            // Pick a temporary register.
625            RegisterID temp = getUnusedRegister(address);
626
627            // Swap to the temporary register to perform the store.
628            swap(src, temp);
629            m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale);
630            swap(src, temp);
631            return;
632        }
633#endif
634        m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
635    }
636
637    void store8(RegisterID src, Address address)
638    {
639#if CPU(X86)
640        // On 32-bit x86 we can only store from the first 4 registers;
641        // esp..edi are mapped to the 'h' registers!
642        if (src >= 4) {
643            // Pick a temporary register.
644            RegisterID temp = getUnusedRegister(address);
645
646            // Swap to the temporary register to perform the store.
647            swap(src, temp);
648            m_assembler.movb_rm(temp, address.offset, address.base);
649            swap(src, temp);
650            return;
651        }
652#endif
653        m_assembler.movb_rm(src, address.offset, address.base);
654    }
655
656    void store16(RegisterID src, BaseIndex address)
657    {
658#if CPU(X86)
659        // On 32-bit x86 we can only store from the first 4 registers;
660        // esp..edi are mapped to the 'h' registers!
661        if (src >= 4) {
662            // Pick a temporary register.
663            RegisterID temp = getUnusedRegister(address);
664
665            // Swap to the temporary register to perform the store.
666            swap(src, temp);
667            m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
668            swap(src, temp);
669            return;
670        }
671#endif
672        m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale);
673    }
674
675
676    // Floating-point operation:
677    //
678    // Presently only supports SSE, not x87 floating point.
679
680    void moveDouble(FPRegisterID src, FPRegisterID dest)
681    {
682        ASSERT(isSSE2Present());
683        if (src != dest)
684            m_assembler.movsd_rr(src, dest);
685    }
686
687    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
688    {
689#if CPU(X86)
690        ASSERT(isSSE2Present());
691        m_assembler.movsd_mr(address.m_value, dest);
692#else
693        move(address, scratchRegister);
694        loadDouble(scratchRegister, dest);
695#endif
696    }
697
698    void loadDouble(ImplicitAddress address, FPRegisterID dest)
699    {
700        ASSERT(isSSE2Present());
701        m_assembler.movsd_mr(address.offset, address.base, dest);
702    }
703
704    void loadDouble(BaseIndex address, FPRegisterID dest)
705    {
706        ASSERT(isSSE2Present());
707        m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest);
708    }
709    void loadFloat(BaseIndex address, FPRegisterID dest)
710    {
711        ASSERT(isSSE2Present());
712        m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest);
713    }
714
715    void storeDouble(FPRegisterID src, ImplicitAddress address)
716    {
717        ASSERT(isSSE2Present());
718        m_assembler.movsd_rm(src, address.offset, address.base);
719    }
720
721    void storeDouble(FPRegisterID src, BaseIndex address)
722    {
723        ASSERT(isSSE2Present());
724        m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale);
725    }
726
727    void storeFloat(FPRegisterID src, BaseIndex address)
728    {
729        ASSERT(isSSE2Present());
730        m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale);
731    }
732
733    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
734    {
735        ASSERT(isSSE2Present());
736        m_assembler.cvtsd2ss_rr(src, dst);
737    }
738
739    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
740    {
741        ASSERT(isSSE2Present());
742        m_assembler.cvtss2sd_rr(src, dst);
743    }
744
745    void addDouble(FPRegisterID src, FPRegisterID dest)
746    {
747        ASSERT(isSSE2Present());
748        m_assembler.addsd_rr(src, dest);
749    }
750
751    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
752    {
753        ASSERT(isSSE2Present());
754        if (op1 == dest)
755            addDouble(op2, dest);
756        else {
757            moveDouble(op2, dest);
758            addDouble(op1, dest);
759        }
760    }
761
762    void addDouble(Address src, FPRegisterID dest)
763    {
764        ASSERT(isSSE2Present());
765        m_assembler.addsd_mr(src.offset, src.base, dest);
766    }
767
768    void divDouble(FPRegisterID src, FPRegisterID dest)
769    {
770        ASSERT(isSSE2Present());
771        m_assembler.divsd_rr(src, dest);
772    }
773
774    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
775    {
776        // B := A / B is invalid.
777        ASSERT(op1 == dest || op2 != dest);
778
779        moveDouble(op1, dest);
780        divDouble(op2, dest);
781    }
782
783    void divDouble(Address src, FPRegisterID dest)
784    {
785        ASSERT(isSSE2Present());
786        m_assembler.divsd_mr(src.offset, src.base, dest);
787    }
788
789    void subDouble(FPRegisterID src, FPRegisterID dest)
790    {
791        ASSERT(isSSE2Present());
792        m_assembler.subsd_rr(src, dest);
793    }
794
795    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
796    {
797        // B := A - B is invalid.
798        ASSERT(op1 == dest || op2 != dest);
799
800        moveDouble(op1, dest);
801        subDouble(op2, dest);
802    }
803
804    void subDouble(Address src, FPRegisterID dest)
805    {
806        ASSERT(isSSE2Present());
807        m_assembler.subsd_mr(src.offset, src.base, dest);
808    }
809
810    void mulDouble(FPRegisterID src, FPRegisterID dest)
811    {
812        ASSERT(isSSE2Present());
813        m_assembler.mulsd_rr(src, dest);
814    }
815
816    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
817    {
818        ASSERT(isSSE2Present());
819        if (op1 == dest)
820            mulDouble(op2, dest);
821        else {
822            moveDouble(op2, dest);
823            mulDouble(op1, dest);
824        }
825    }
826
827    void mulDouble(Address src, FPRegisterID dest)
828    {
829        ASSERT(isSSE2Present());
830        m_assembler.mulsd_mr(src.offset, src.base, dest);
831    }
832
833    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
834    {
835        ASSERT(isSSE2Present());
836        m_assembler.cvtsi2sd_rr(src, dest);
837    }
838
839    void convertInt32ToDouble(Address src, FPRegisterID dest)
840    {
841        ASSERT(isSSE2Present());
842        m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
843    }
844
845    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
846    {
847        ASSERT(isSSE2Present());
848
849        if (cond & DoubleConditionBitInvert)
850            m_assembler.ucomisd_rr(left, right);
851        else
852            m_assembler.ucomisd_rr(right, left);
853
854        if (cond == DoubleEqual) {
855            if (left == right)
856                return Jump(m_assembler.jnp());
857            Jump isUnordered(m_assembler.jp());
858            Jump result = Jump(m_assembler.je());
859            isUnordered.link(this);
860            return result;
861        } else if (cond == DoubleNotEqualOrUnordered) {
862            if (left == right)
863                return Jump(m_assembler.jp());
864            Jump isUnordered(m_assembler.jp());
865            Jump isEqual(m_assembler.je());
866            isUnordered.link(this);
867            Jump result = jump();
868            isEqual.link(this);
869            return result;
870        }
871
872        ASSERT(!(cond & DoubleConditionBitSpecial));
873        return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
874    }
875
876    // Truncates 'src' to an integer, and places the resulting 'dest'.
877    // If the result is not representable as a 32 bit value, branch.
878    // May also branch for some values that are representable in 32 bits
879    // (specifically, in this case, INT_MIN).
880    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
881    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
882    {
883        ASSERT(isSSE2Present());
884        m_assembler.cvttsd2si_rr(src, dest);
885        return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
886    }
887
888    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
889    {
890        ASSERT(isSSE2Present());
891        m_assembler.cvttsd2si_rr(src, dest);
892    }
893
894#if CPU(X86_64)
895    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
896    {
897        ASSERT(isSSE2Present());
898        m_assembler.cvttsd2siq_rr(src, dest);
899    }
900#endif
901
902    // Convert 'src' to an integer, and places the resulting 'dest'.
903    // If the result is not representable as a 32 bit value, branch.
904    // May also branch for some values that are representable in 32 bits
905    // (specifically, in this case, 0).
906    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
907    {
908        ASSERT(isSSE2Present());
909        m_assembler.cvttsd2si_rr(src, dest);
910
911        // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
912        if (negZeroCheck)
913            failureCases.append(branchTest32(Zero, dest));
914
915        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
916        convertInt32ToDouble(dest, fpTemp);
917        m_assembler.ucomisd_rr(fpTemp, src);
918        failureCases.append(m_assembler.jp());
919        failureCases.append(m_assembler.jne());
920    }
921
922    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
923    {
924        ASSERT(isSSE2Present());
925        m_assembler.xorpd_rr(scratch, scratch);
926        return branchDouble(DoubleNotEqual, reg, scratch);
927    }
928
929    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
930    {
931        ASSERT(isSSE2Present());
932        m_assembler.xorpd_rr(scratch, scratch);
933        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
934    }
935
936    void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
937    {
938        ASSERT(isSSE2Present());
939        m_assembler.psllq_i8r(imm.m_value, reg);
940    }
941
942    void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg)
943    {
944        ASSERT(isSSE2Present());
945        m_assembler.psrlq_i8r(imm.m_value, reg);
946    }
947
948    void orPacked(XMMRegisterID src, XMMRegisterID dst)
949    {
950        ASSERT(isSSE2Present());
951        m_assembler.por_rr(src, dst);
952    }
953
954    void moveInt32ToPacked(RegisterID src, XMMRegisterID dst)
955    {
956        ASSERT(isSSE2Present());
957        m_assembler.movd_rr(src, dst);
958    }
959
960    void movePackedToInt32(XMMRegisterID src, RegisterID dst)
961    {
962        ASSERT(isSSE2Present());
963        m_assembler.movd_rr(src, dst);
964    }
965
966    // Stack manipulation operations:
967    //
968    // The ABI is assumed to provide a stack abstraction to memory,
969    // containing machine word sized units of data.  Push and pop
970    // operations add and remove a single register sized unit of data
971    // to or from the stack.  Peek and poke operations read or write
972    // values on the stack, without moving the current stack position.
973
974    void pop(RegisterID dest)
975    {
976        m_assembler.pop_r(dest);
977    }
978
979    void push(RegisterID src)
980    {
981        m_assembler.push_r(src);
982    }
983
984    void push(Address address)
985    {
986        m_assembler.push_m(address.offset, address.base);
987    }
988
989    void push(TrustedImm32 imm)
990    {
991        m_assembler.push_i32(imm.m_value);
992    }
993
994
995    // Register move operations:
996    //
997    // Move values in registers.
998
999    void move(TrustedImm32 imm, RegisterID dest)
1000    {
1001        // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
1002        // may be useful to have a separate version that sign extends the value?
1003        if (!imm.m_value)
1004            m_assembler.xorl_rr(dest, dest);
1005        else
1006            m_assembler.movl_i32r(imm.m_value, dest);
1007    }
1008
1009#if CPU(X86_64)
1010    void move(RegisterID src, RegisterID dest)
1011    {
1012        // Note: on 64-bit this is is a full register move; perhaps it would be
1013        // useful to have separate move32 & movePtr, with move32 zero extending?
1014        if (src != dest)
1015            m_assembler.movq_rr(src, dest);
1016    }
1017
1018    void move(TrustedImmPtr imm, RegisterID dest)
1019    {
1020        m_assembler.movq_i64r(imm.asIntptr(), dest);
1021    }
1022
1023    void move(TrustedImm64 imm, RegisterID dest)
1024    {
1025        m_assembler.movq_i64r(imm.m_value, dest);
1026    }
1027
1028    void swap(RegisterID reg1, RegisterID reg2)
1029    {
1030        if (reg1 != reg2)
1031            m_assembler.xchgq_rr(reg1, reg2);
1032    }
1033
1034    void signExtend32ToPtr(RegisterID src, RegisterID dest)
1035    {
1036        m_assembler.movsxd_rr(src, dest);
1037    }
1038
1039    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1040    {
1041        m_assembler.movl_rr(src, dest);
1042    }
1043#else
1044    void move(RegisterID src, RegisterID dest)
1045    {
1046        if (src != dest)
1047            m_assembler.movl_rr(src, dest);
1048    }
1049
1050    void move(TrustedImmPtr imm, RegisterID dest)
1051    {
1052        m_assembler.movl_i32r(imm.asIntptr(), dest);
1053    }
1054
1055    void swap(RegisterID reg1, RegisterID reg2)
1056    {
1057        if (reg1 != reg2)
1058            m_assembler.xchgl_rr(reg1, reg2);
1059    }
1060
1061    void signExtend32ToPtr(RegisterID src, RegisterID dest)
1062    {
1063        move(src, dest);
1064    }
1065
1066    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1067    {
1068        move(src, dest);
1069    }
1070#endif
1071
1072
1073    // Forwards / external control flow operations:
1074    //
1075    // This set of jump and conditional branch operations return a Jump
1076    // object which may linked at a later point, allow forwards jump,
1077    // or jumps that will require external linkage (after the code has been
1078    // relocated).
1079    //
1080    // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1081    // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1082    // used (representing the names 'below' and 'above').
1083    //
1084    // Operands to the comparision are provided in the expected order, e.g.
1085    // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1086    // treated as a signed 32bit value, is less than or equal to 5.
1087    //
1088    // jz and jnz test whether the first operand is equal to zero, and take
1089    // an optional second operand of a mask under which to perform the test.
1090
1091public:
1092    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
1093    {
1094        m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1095        return Jump(m_assembler.jCC(x86Condition(cond)));
1096    }
1097
1098    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
1099    {
1100        m_assembler.cmpl_rr(right, left);
1101        return Jump(m_assembler.jCC(x86Condition(cond)));
1102    }
1103
1104    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
1105    {
1106        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1107            m_assembler.testl_rr(left, left);
1108        else
1109            m_assembler.cmpl_ir(right.m_value, left);
1110        return Jump(m_assembler.jCC(x86Condition(cond)));
1111    }
1112
1113    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
1114    {
1115        m_assembler.cmpl_mr(right.offset, right.base, left);
1116        return Jump(m_assembler.jCC(x86Condition(cond)));
1117    }
1118
1119    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
1120    {
1121        m_assembler.cmpl_rm(right, left.offset, left.base);
1122        return Jump(m_assembler.jCC(x86Condition(cond)));
1123    }
1124
1125    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
1126    {
1127        m_assembler.cmpl_im(right.m_value, left.offset, left.base);
1128        return Jump(m_assembler.jCC(x86Condition(cond)));
1129    }
1130
1131    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1132    {
1133        m_assembler.cmpl_im(right.m_value, left.offset, left.base, left.index, left.scale);
1134        return Jump(m_assembler.jCC(x86Condition(cond)));
1135    }
1136
1137    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1138    {
1139        return branch32(cond, left, right);
1140    }
1141
1142    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
1143    {
1144        m_assembler.testl_rr(reg, mask);
1145        return Jump(m_assembler.jCC(x86Condition(cond)));
1146    }
1147
1148    void test32(ResultCondition, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1149    {
1150        if (mask.m_value == -1)
1151            m_assembler.testl_rr(reg, reg);
1152        else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
1153            if (mask.m_value == 0xff)
1154                m_assembler.testb_rr(reg, reg);
1155            else
1156                m_assembler.testb_i8r(mask.m_value, reg);
1157        } else
1158            m_assembler.testl_i32r(mask.m_value, reg);
1159    }
1160
1161    Jump branch(ResultCondition cond)
1162    {
1163        return Jump(m_assembler.jCC(x86Condition(cond)));
1164    }
1165
1166    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
1167    {
1168        test32(cond, reg, mask);
1169        return branch(cond);
1170    }
1171
1172    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1173    {
1174        generateTest32(address, mask);
1175        return Jump(m_assembler.jCC(x86Condition(cond)));
1176    }
1177
1178    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1179    {
1180        if (mask.m_value == -1)
1181            m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
1182        else
1183            m_assembler.testl_i32m(mask.m_value, address.offset, address.base, address.index, address.scale);
1184        return Jump(m_assembler.jCC(x86Condition(cond)));
1185    }
1186
1187    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
1188    {
1189        // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1190        ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1191        if (mask.m_value == -1)
1192            m_assembler.cmpb_im(0, address.offset, address.base);
1193        else
1194            m_assembler.testb_im(mask.m_value, address.offset, address.base);
1195        return Jump(m_assembler.jCC(x86Condition(cond)));
1196    }
1197
1198    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
1199    {
1200        // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1201        ASSERT(mask.m_value >= -128 && mask.m_value <= 255);
1202        if (mask.m_value == -1)
1203            m_assembler.cmpb_im(0, address.offset, address.base, address.index, address.scale);
1204        else
1205            m_assembler.testb_im(mask.m_value, address.offset, address.base, address.index, address.scale);
1206        return Jump(m_assembler.jCC(x86Condition(cond)));
1207    }
1208
1209    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
1210    {
1211        ASSERT(!(right.m_value & 0xFFFFFF00));
1212
1213        m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale);
1214        return Jump(m_assembler.jCC(x86Condition(cond)));
1215    }
1216
1217    Jump jump()
1218    {
1219        return Jump(m_assembler.jmp());
1220    }
1221
1222    void jump(RegisterID target)
1223    {
1224        m_assembler.jmp_r(target);
1225    }
1226
1227    // Address is a memory location containing the address to jump to
1228    void jump(Address address)
1229    {
1230        m_assembler.jmp_m(address.offset, address.base);
1231    }
1232
1233
1234    // Arithmetic control flow operations:
1235    //
1236    // This set of conditional branch operations branch based
1237    // on the result of an arithmetic operation.  The operation
1238    // is performed as normal, storing the result.
1239    //
1240    // * jz operations branch if the result is zero.
1241    // * jo operations branch if the (signed) arithmetic
1242    //   operation caused an overflow to occur.
1243
1244    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
1245    {
1246        add32(src, dest);
1247        return Jump(m_assembler.jCC(x86Condition(cond)));
1248    }
1249
1250    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1251    {
1252        add32(imm, dest);
1253        return Jump(m_assembler.jCC(x86Condition(cond)));
1254    }
1255
1256    Jump branchAdd32(ResultCondition cond, TrustedImm32 src, Address dest)
1257    {
1258        add32(src, dest);
1259        return Jump(m_assembler.jCC(x86Condition(cond)));
1260    }
1261
1262    Jump branchAdd32(ResultCondition cond, RegisterID src, Address dest)
1263    {
1264        add32(src, dest);
1265        return Jump(m_assembler.jCC(x86Condition(cond)));
1266    }
1267
1268    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
1269    {
1270        add32(src, dest);
1271        return Jump(m_assembler.jCC(x86Condition(cond)));
1272    }
1273
1274    Jump branchAdd32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1275    {
1276        if (src1 == dest)
1277            return branchAdd32(cond, src2, dest);
1278        move(src2, dest);
1279        return branchAdd32(cond, src1, dest);
1280    }
1281
1282    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
1283    {
1284        move(src, dest);
1285        return branchAdd32(cond, imm, dest);
1286    }
1287
1288    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
1289    {
1290        mul32(src, dest);
1291        if (cond != Overflow)
1292            m_assembler.testl_rr(dest, dest);
1293        return Jump(m_assembler.jCC(x86Condition(cond)));
1294    }
1295
1296    Jump branchMul32(ResultCondition cond, Address src, RegisterID dest)
1297    {
1298        mul32(src, dest);
1299        if (cond != Overflow)
1300            m_assembler.testl_rr(dest, dest);
1301        return Jump(m_assembler.jCC(x86Condition(cond)));
1302    }
1303
1304    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
1305    {
1306        mul32(imm, src, dest);
1307        if (cond != Overflow)
1308            m_assembler.testl_rr(dest, dest);
1309        return Jump(m_assembler.jCC(x86Condition(cond)));
1310    }
1311
1312    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1313    {
1314        if (src1 == dest)
1315            return branchMul32(cond, src2, dest);
1316        move(src2, dest);
1317        return branchMul32(cond, src1, dest);
1318    }
1319
1320    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
1321    {
1322        sub32(src, dest);
1323        return Jump(m_assembler.jCC(x86Condition(cond)));
1324    }
1325
1326    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
1327    {
1328        sub32(imm, dest);
1329        return Jump(m_assembler.jCC(x86Condition(cond)));
1330    }
1331
1332    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, Address dest)
1333    {
1334        sub32(imm, dest);
1335        return Jump(m_assembler.jCC(x86Condition(cond)));
1336    }
1337
1338    Jump branchSub32(ResultCondition cond, RegisterID src, Address dest)
1339    {
1340        sub32(src, dest);
1341        return Jump(m_assembler.jCC(x86Condition(cond)));
1342    }
1343
1344    Jump branchSub32(ResultCondition cond, Address src, RegisterID dest)
1345    {
1346        sub32(src, dest);
1347        return Jump(m_assembler.jCC(x86Condition(cond)));
1348    }
1349
1350    Jump branchSub32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
1351    {
1352        // B := A - B is invalid.
1353        ASSERT(src1 == dest || src2 != dest);
1354
1355        move(src1, dest);
1356        return branchSub32(cond, src2, dest);
1357    }
1358
1359    Jump branchSub32(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
1360    {
1361        move(src1, dest);
1362        return branchSub32(cond, src2, dest);
1363    }
1364
1365    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
1366    {
1367        neg32(srcDest);
1368        return Jump(m_assembler.jCC(x86Condition(cond)));
1369    }
1370
1371    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
1372    {
1373        or32(src, dest);
1374        return Jump(m_assembler.jCC(x86Condition(cond)));
1375    }
1376
1377
1378    // Miscellaneous operations:
1379
1380    void breakpoint()
1381    {
1382        m_assembler.int3();
1383    }
1384
1385    Call nearCall()
1386    {
1387        return Call(m_assembler.call(), Call::LinkableNear);
1388    }
1389
1390    Call call(RegisterID target)
1391    {
1392        return Call(m_assembler.call(target), Call::None);
1393    }
1394
1395    void call(Address address)
1396    {
1397        m_assembler.call_m(address.offset, address.base);
1398    }
1399
1400    void ret()
1401    {
1402        m_assembler.ret();
1403    }
1404
1405    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
1406    {
1407        m_assembler.cmpb_im(right.m_value, left.offset, left.base);
1408        set32(x86Condition(cond), dest);
1409    }
1410
1411    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
1412    {
1413        m_assembler.cmpl_rr(right, left);
1414        set32(x86Condition(cond), dest);
1415    }
1416
1417    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
1418    {
1419        if (((cond == Equal) || (cond == NotEqual)) && !right.m_value)
1420            m_assembler.testl_rr(left, left);
1421        else
1422            m_assembler.cmpl_ir(right.m_value, left);
1423        set32(x86Condition(cond), dest);
1424    }
1425
1426    // FIXME:
1427    // The mask should be optional... perhaps the argument order should be
1428    // dest-src, operations always have a dest? ... possibly not true, considering
1429    // asm ops like test, or pseudo ops like pop().
1430
1431    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1432    {
1433        if (mask.m_value == -1)
1434            m_assembler.cmpb_im(0, address.offset, address.base);
1435        else
1436            m_assembler.testb_im(mask.m_value, address.offset, address.base);
1437        set32(x86Condition(cond), dest);
1438    }
1439
1440    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
1441    {
1442        generateTest32(address, mask);
1443        set32(x86Condition(cond), dest);
1444    }
1445
1446    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1447    static RelationalCondition invert(RelationalCondition cond)
1448    {
1449        return static_cast<RelationalCondition>(cond ^ 1);
1450    }
1451
1452    void nop()
1453    {
1454        m_assembler.nop();
1455    }
1456
1457    void memoryFence()
1458    {
1459        m_assembler.mfence();
1460    }
1461
1462    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1463    {
1464        X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
1465    }
1466
1467    static ptrdiff_t maxJumpReplacementSize()
1468    {
1469        return X86Assembler::maxJumpReplacementSize();
1470    }
1471
1472#if USE(MASM_PROBE)
1473    struct CPUState {
1474        #define DECLARE_REGISTER(_type, _regName) \
1475            _type _regName;
1476        FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
1477        #undef DECLARE_REGISTER
1478    };
1479
1480    struct ProbeContext;
1481    typedef void (*ProbeFunction)(struct ProbeContext*);
1482
1483    struct ProbeContext {
1484        ProbeFunction probeFunction;
1485        void* arg1;
1486        void* arg2;
1487        CPUState cpu;
1488
1489        void dump(const char* indentation = 0);
1490    private:
1491        void dumpCPURegisters(const char* indentation);
1492    };
1493#endif // USE(MASM_PROBE)
1494
1495protected:
1496    X86Assembler::Condition x86Condition(RelationalCondition cond)
1497    {
1498        return static_cast<X86Assembler::Condition>(cond);
1499    }
1500
1501    X86Assembler::Condition x86Condition(ResultCondition cond)
1502    {
1503        return static_cast<X86Assembler::Condition>(cond);
1504    }
1505
1506    void set32(X86Assembler::Condition cond, RegisterID dest)
1507    {
1508#if CPU(X86)
1509        // On 32-bit x86 we can only set the first 4 registers;
1510        // esp..edi are mapped to the 'h' registers!
1511        if (dest >= 4) {
1512            m_assembler.xchgl_rr(dest, X86Registers::eax);
1513            m_assembler.setCC_r(cond, X86Registers::eax);
1514            m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax);
1515            m_assembler.xchgl_rr(dest, X86Registers::eax);
1516            return;
1517        }
1518#endif
1519        m_assembler.setCC_r(cond, dest);
1520        m_assembler.movzbl_rr(dest, dest);
1521    }
1522
1523private:
1524    // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1525    // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1526    friend class MacroAssemblerX86;
1527
1528    ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1))
1529    {
1530        if (mask.m_value == -1)
1531            m_assembler.cmpl_im(0, address.offset, address.base);
1532        else if (!(mask.m_value & ~0xff))
1533            m_assembler.testb_im(mask.m_value, address.offset, address.base);
1534        else if (!(mask.m_value & ~0xff00))
1535            m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base);
1536        else if (!(mask.m_value & ~0xff0000))
1537            m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base);
1538        else if (!(mask.m_value & ~0xff000000))
1539            m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base);
1540        else
1541            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
1542    }
1543
1544#if CPU(X86)
1545#if OS(MAC_OS_X)
1546
1547    // All X86 Macs are guaranteed to support at least SSE2,
1548    static bool isSSE2Present()
1549    {
1550        return true;
1551    }
1552
1553#else // OS(MAC_OS_X)
1554
1555    enum SSE2CheckState {
1556        NotCheckedSSE2,
1557        HasSSE2,
1558        NoSSE2
1559    };
1560
1561    static bool isSSE2Present()
1562    {
1563        if (s_sse2CheckState == NotCheckedSSE2) {
1564            // Default the flags value to zero; if the compiler is
1565            // not MSVC or GCC we will read this as SSE2 not present.
1566            int flags = 0;
1567#if COMPILER(MSVC)
1568            _asm {
1569                mov eax, 1 // cpuid function 1 gives us the standard feature set
1570                cpuid;
1571                mov flags, edx;
1572            }
1573#elif COMPILER(GCC)
1574            asm (
1575                 "movl $0x1, %%eax;"
1576                 "pushl %%ebx;"
1577                 "cpuid;"
1578                 "popl %%ebx;"
1579                 "movl %%edx, %0;"
1580                 : "=g" (flags)
1581                 :
1582                 : "%eax", "%ecx", "%edx"
1583                 );
1584#endif
1585            static const int SSE2FeatureBit = 1 << 26;
1586            s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
1587        }
1588        // Only check once.
1589        ASSERT(s_sse2CheckState != NotCheckedSSE2);
1590
1591        return s_sse2CheckState == HasSSE2;
1592    }
1593
1594    static SSE2CheckState s_sse2CheckState;
1595
1596#endif // OS(MAC_OS_X)
1597#elif !defined(NDEBUG) // CPU(X86)
1598
1599    // On x86-64 we should never be checking for SSE2 in a non-debug build,
1600    // but non debug add this method to keep the asserts above happy.
1601    static bool isSSE2Present()
1602    {
1603        return true;
1604    }
1605
1606#endif
1607};
1608
1609} // namespace JSC
1610
1611#endif // ENABLE(ASSEMBLER)
1612
1613#endif // MacroAssemblerX86Common_h
1614