1/*
2 * Copyright (C) 2008, 2013 Apple Inc.
3 * Copyright (C) 2009, 2010 University of Szeged
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
16 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
19 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
22 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
23 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#ifndef MacroAssemblerARM_h
29#define MacroAssemblerARM_h
30
31#if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
32
33#include "ARMAssembler.h"
34#include "AbstractMacroAssembler.h"
35
36namespace JSC {
37
38class MacroAssemblerARM : public AbstractMacroAssembler<ARMAssembler> {
39    static const int DoubleConditionMask = 0x0f;
40    static const int DoubleConditionBitSpecial = 0x10;
41    COMPILE_ASSERT(!(DoubleConditionBitSpecial & DoubleConditionMask), DoubleConditionBitSpecial_should_not_interfere_with_ARMAssembler_Condition_codes);
42public:
43    typedef ARMRegisters::FPRegisterID FPRegisterID;
44
45    enum RelationalCondition {
46        Equal = ARMAssembler::EQ,
47        NotEqual = ARMAssembler::NE,
48        Above = ARMAssembler::HI,
49        AboveOrEqual = ARMAssembler::CS,
50        Below = ARMAssembler::CC,
51        BelowOrEqual = ARMAssembler::LS,
52        GreaterThan = ARMAssembler::GT,
53        GreaterThanOrEqual = ARMAssembler::GE,
54        LessThan = ARMAssembler::LT,
55        LessThanOrEqual = ARMAssembler::LE
56    };
57
58    enum ResultCondition {
59        Overflow = ARMAssembler::VS,
60        Signed = ARMAssembler::MI,
61        PositiveOrZero = ARMAssembler::PL,
62        Zero = ARMAssembler::EQ,
63        NonZero = ARMAssembler::NE
64    };
65
66    enum DoubleCondition {
67        // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68        DoubleEqual = ARMAssembler::EQ,
69        DoubleNotEqual = ARMAssembler::NE | DoubleConditionBitSpecial,
70        DoubleGreaterThan = ARMAssembler::GT,
71        DoubleGreaterThanOrEqual = ARMAssembler::GE,
72        DoubleLessThan = ARMAssembler::CC,
73        DoubleLessThanOrEqual = ARMAssembler::LS,
74        // If either operand is NaN, these conditions always evaluate to true.
75        DoubleEqualOrUnordered = ARMAssembler::EQ | DoubleConditionBitSpecial,
76        DoubleNotEqualOrUnordered = ARMAssembler::NE,
77        DoubleGreaterThanOrUnordered = ARMAssembler::HI,
78        DoubleGreaterThanOrEqualOrUnordered = ARMAssembler::CS,
79        DoubleLessThanOrUnordered = ARMAssembler::LT,
80        DoubleLessThanOrEqualOrUnordered = ARMAssembler::LE,
81    };
82
83    static const RegisterID stackPointerRegister = ARMRegisters::sp;
84    static const RegisterID framePointerRegister = ARMRegisters::fp;
85    static const RegisterID linkRegister = ARMRegisters::lr;
86
87    static const Scale ScalePtr = TimesFour;
88
89    void add32(RegisterID src, RegisterID dest)
90    {
91        m_assembler.adds(dest, dest, src);
92    }
93
94    void add32(RegisterID op1, RegisterID op2, RegisterID dest)
95    {
96        m_assembler.adds(dest, op1, op2);
97    }
98
99    void add32(TrustedImm32 imm, Address address)
100    {
101        load32(address, ARMRegisters::S1);
102        add32(imm, ARMRegisters::S1);
103        store32(ARMRegisters::S1, address);
104    }
105
106    void add32(TrustedImm32 imm, RegisterID dest)
107    {
108        m_assembler.adds(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
109    }
110
111    void add32(AbsoluteAddress src, RegisterID dest)
112    {
113        move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
114        m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0);
115        add32(ARMRegisters::S1, dest);
116    }
117
118    void add32(Address src, RegisterID dest)
119    {
120        load32(src, ARMRegisters::S1);
121        add32(ARMRegisters::S1, dest);
122    }
123
124    void add32(RegisterID src, TrustedImm32 imm, RegisterID dest)
125    {
126        m_assembler.adds(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
127    }
128
129    void and32(RegisterID src, RegisterID dest)
130    {
131        m_assembler.bitAnds(dest, dest, src);
132    }
133
134    void and32(RegisterID op1, RegisterID op2, RegisterID dest)
135    {
136        m_assembler.bitAnds(dest, op1, op2);
137    }
138
139    void and32(TrustedImm32 imm, RegisterID dest)
140    {
141        ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
142        if (w & ARMAssembler::Op2InvertedImmediate)
143            m_assembler.bics(dest, dest, w & ~ARMAssembler::Op2InvertedImmediate);
144        else
145            m_assembler.bitAnds(dest, dest, w);
146    }
147
148    void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
149    {
150        ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true);
151        if (w & ARMAssembler::Op2InvertedImmediate)
152            m_assembler.bics(dest, src, w & ~ARMAssembler::Op2InvertedImmediate);
153        else
154            m_assembler.bitAnds(dest, src, w);
155    }
156
157    void and32(Address src, RegisterID dest)
158    {
159        load32(src, ARMRegisters::S1);
160        and32(ARMRegisters::S1, dest);
161    }
162
163    void lshift32(RegisterID shiftAmount, RegisterID dest)
164    {
165        lshift32(dest, shiftAmount, dest);
166    }
167
168    void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
169    {
170        ARMWord w = ARMAssembler::getOp2Byte(0x1f);
171        m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
172
173        m_assembler.movs(dest, m_assembler.lslRegister(src, ARMRegisters::S0));
174    }
175
176    void lshift32(TrustedImm32 imm, RegisterID dest)
177    {
178        m_assembler.movs(dest, m_assembler.lsl(dest, imm.m_value & 0x1f));
179    }
180
181    void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
182    {
183        m_assembler.movs(dest, m_assembler.lsl(src, imm.m_value & 0x1f));
184    }
185
186    void mul32(RegisterID op1, RegisterID op2, RegisterID dest)
187    {
188        if (op2 == dest) {
189            if (op1 == dest) {
190                move(op2, ARMRegisters::S0);
191                op2 = ARMRegisters::S0;
192            } else {
193                // Swap the operands.
194                RegisterID tmp = op1;
195                op1 = op2;
196                op2 = tmp;
197            }
198        }
199        m_assembler.muls(dest, op1, op2);
200    }
201
202    void mul32(RegisterID src, RegisterID dest)
203    {
204        mul32(src, dest, dest);
205    }
206
207    void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
208    {
209        move(imm, ARMRegisters::S0);
210        m_assembler.muls(dest, src, ARMRegisters::S0);
211    }
212
213    void neg32(RegisterID srcDest)
214    {
215        m_assembler.rsbs(srcDest, srcDest, ARMAssembler::getOp2Byte(0));
216    }
217
218    void or32(RegisterID src, RegisterID dest)
219    {
220        m_assembler.orrs(dest, dest, src);
221    }
222
223    void or32(RegisterID src, AbsoluteAddress dest)
224    {
225        move(TrustedImmPtr(dest.m_ptr), ARMRegisters::S0);
226        load32(Address(ARMRegisters::S0), ARMRegisters::S1);
227        or32(src, ARMRegisters::S1);
228        store32(ARMRegisters::S1, ARMRegisters::S0);
229    }
230
231    void or32(TrustedImm32 imm, RegisterID dest)
232    {
233        m_assembler.orrs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
234    }
235
236    void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
237    {
238        m_assembler.orrs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
239    }
240
241    void or32(RegisterID op1, RegisterID op2, RegisterID dest)
242    {
243        m_assembler.orrs(dest, op1, op2);
244    }
245
246    void rshift32(RegisterID shiftAmount, RegisterID dest)
247    {
248        rshift32(dest, shiftAmount, dest);
249    }
250
251    void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
252    {
253        ARMWord w = ARMAssembler::getOp2Byte(0x1f);
254        m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
255
256        m_assembler.movs(dest, m_assembler.asrRegister(src, ARMRegisters::S0));
257    }
258
259    void rshift32(TrustedImm32 imm, RegisterID dest)
260    {
261        rshift32(dest, imm, dest);
262    }
263
264    void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
265    {
266        m_assembler.movs(dest, m_assembler.asr(src, imm.m_value & 0x1f));
267    }
268
269    void urshift32(RegisterID shiftAmount, RegisterID dest)
270    {
271        urshift32(dest, shiftAmount, dest);
272    }
273
274    void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
275    {
276        ARMWord w = ARMAssembler::getOp2Byte(0x1f);
277        m_assembler.bitAnd(ARMRegisters::S0, shiftAmount, w);
278
279        m_assembler.movs(dest, m_assembler.lsrRegister(src, ARMRegisters::S0));
280    }
281
282    void urshift32(TrustedImm32 imm, RegisterID dest)
283    {
284        m_assembler.movs(dest, m_assembler.lsr(dest, imm.m_value & 0x1f));
285    }
286
287    void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
288    {
289        m_assembler.movs(dest, m_assembler.lsr(src, imm.m_value & 0x1f));
290    }
291
292    void sub32(RegisterID src, RegisterID dest)
293    {
294        m_assembler.subs(dest, dest, src);
295    }
296
297    void sub32(TrustedImm32 imm, RegisterID dest)
298    {
299        m_assembler.subs(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
300    }
301
302    void sub32(TrustedImm32 imm, Address address)
303    {
304        load32(address, ARMRegisters::S1);
305        sub32(imm, ARMRegisters::S1);
306        store32(ARMRegisters::S1, address);
307    }
308
309    void sub32(Address src, RegisterID dest)
310    {
311        load32(src, ARMRegisters::S1);
312        sub32(ARMRegisters::S1, dest);
313    }
314
315    void sub32(RegisterID src, TrustedImm32 imm, RegisterID dest)
316    {
317        m_assembler.subs(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
318    }
319
320    void xor32(RegisterID src, RegisterID dest)
321    {
322        m_assembler.eors(dest, dest, src);
323    }
324
325    void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
326    {
327        m_assembler.eors(dest, op1, op2);
328    }
329
330    void xor32(TrustedImm32 imm, RegisterID dest)
331    {
332        if (imm.m_value == -1)
333            m_assembler.mvns(dest, dest);
334        else
335            m_assembler.eors(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
336    }
337
338    void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
339    {
340        if (imm.m_value == -1)
341            m_assembler.mvns(dest, src);
342        else
343            m_assembler.eors(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
344    }
345
346    void countLeadingZeros32(RegisterID src, RegisterID dest)
347    {
348#if WTF_ARM_ARCH_AT_LEAST(5)
349        m_assembler.clz(dest, src);
350#else
351        UNUSED_PARAM(src);
352        UNUSED_PARAM(dest);
353        RELEASE_ASSERT_NOT_REACHED();
354#endif
355    }
356
357    void load8(ImplicitAddress address, RegisterID dest)
358    {
359        m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.offset);
360    }
361
362    void load8(BaseIndex address, RegisterID dest)
363    {
364        m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
365    }
366
367    void load8(const void* address, RegisterID dest)
368    {
369        move(TrustedImmPtr(address), ARMRegisters::S0);
370        m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, ARMRegisters::S0, 0);
371    }
372
373    void load8Signed(BaseIndex address, RegisterID dest)
374    {
375        m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
376    }
377
378    void load16(ImplicitAddress address, RegisterID dest)
379    {
380        m_assembler.dataTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.offset);
381    }
382
383    void load16(BaseIndex address, RegisterID dest)
384    {
385        m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
386    }
387
388    void load16Signed(BaseIndex address, RegisterID dest)
389    {
390        m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
391    }
392
393    void load32(ImplicitAddress address, RegisterID dest)
394    {
395        m_assembler.dataTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.offset);
396    }
397
398    void load32(BaseIndex address, RegisterID dest)
399    {
400        m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
401    }
402
403#if CPU(ARMV5_OR_LOWER)
404    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest);
405#else
406    void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
407    {
408        load32(address, dest);
409    }
410#endif
411
412    void load16Unaligned(BaseIndex address, RegisterID dest)
413    {
414        load16(address, dest);
415    }
416
417    void abortWithReason(AbortReason reason)
418    {
419        move(TrustedImm32(reason), ARMRegisters::S0);
420        breakpoint();
421    }
422
423    void abortWithReason(AbortReason reason, intptr_t misc)
424    {
425        move(TrustedImm32(misc), ARMRegisters::S1);
426        abortWithReason(reason);
427    }
428
429    ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
430    {
431        ConvertibleLoadLabel result(this);
432        ASSERT(address.offset >= 0 && address.offset <= 255);
433        m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
434        return result;
435    }
436
437    DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
438    {
439        DataLabel32 dataLabel(this);
440        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
441        m_assembler.dtrUpRegister(ARMAssembler::LoadUint32, dest, address.base, ARMRegisters::S0);
442        return dataLabel;
443    }
444
445    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
446    {
447        return value >= -4095 && value <= 4095;
448    }
449
450    DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
451    {
452        DataLabelCompact dataLabel(this);
453        ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
454        if (address.offset >= 0)
455            m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, address.base, address.offset);
456        else
457            m_assembler.dtrDown(ARMAssembler::LoadUint32, dest, address.base, address.offset);
458        return dataLabel;
459    }
460
461    DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
462    {
463        DataLabel32 dataLabel(this);
464        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, 0);
465        m_assembler.dtrUpRegister(ARMAssembler::StoreUint32, src, address.base, ARMRegisters::S0);
466        return dataLabel;
467    }
468
469    void store8(RegisterID src, BaseIndex address)
470    {
471        m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
472    }
473
474    void store8(RegisterID src, ImplicitAddress address)
475    {
476        m_assembler.dtrUp(ARMAssembler::StoreUint8, src, address.base, address.offset);
477    }
478
479    void store8(RegisterID src, const void* address)
480    {
481        move(TrustedImmPtr(address), ARMRegisters::S0);
482        m_assembler.dtrUp(ARMAssembler::StoreUint8, src, ARMRegisters::S0, 0);
483    }
484
485    void store8(TrustedImm32 imm, ImplicitAddress address)
486    {
487        move(imm, ARMRegisters::S1);
488        store8(ARMRegisters::S1, address);
489    }
490
491    void store8(TrustedImm32 imm, const void* address)
492    {
493        move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0);
494        move(imm, ARMRegisters::S1);
495        m_assembler.dtrUp(ARMAssembler::StoreUint8, ARMRegisters::S1, ARMRegisters::S0, 0);
496    }
497
498    void store16(RegisterID src, BaseIndex address)
499    {
500        m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
501    }
502
503    void store32(RegisterID src, ImplicitAddress address)
504    {
505        m_assembler.dataTransfer32(ARMAssembler::StoreUint32, src, address.base, address.offset);
506    }
507
508    void store32(RegisterID src, BaseIndex address)
509    {
510        m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
511    }
512
513    void store32(TrustedImm32 imm, ImplicitAddress address)
514    {
515        move(imm, ARMRegisters::S1);
516        store32(ARMRegisters::S1, address);
517    }
518
519    void store32(TrustedImm32 imm, BaseIndex address)
520    {
521        move(imm, ARMRegisters::S1);
522        m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, ARMRegisters::S1, address.base, address.index, static_cast<int>(address.scale), address.offset);
523    }
524
525    void store32(RegisterID src, const void* address)
526    {
527        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
528        m_assembler.dtrUp(ARMAssembler::StoreUint32, src, ARMRegisters::S0, 0);
529    }
530
531    void store32(TrustedImm32 imm, const void* address)
532    {
533        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
534        m_assembler.moveImm(imm.m_value, ARMRegisters::S1);
535        m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0);
536    }
537
538    void pop(RegisterID dest)
539    {
540        m_assembler.pop(dest);
541    }
542
543    void popPair(RegisterID dest1, RegisterID dest2)
544    {
545        m_assembler.pop(dest1);
546        m_assembler.pop(dest2);
547    }
548
549    void push(RegisterID src)
550    {
551        m_assembler.push(src);
552    }
553
554    void push(Address address)
555    {
556        load32(address, ARMRegisters::S1);
557        push(ARMRegisters::S1);
558    }
559
560    void push(TrustedImm32 imm)
561    {
562        move(imm, ARMRegisters::S0);
563        push(ARMRegisters::S0);
564    }
565
566    void pushPair(RegisterID src1, RegisterID src2)
567    {
568        m_assembler.push(src2);
569        m_assembler.push(src1);
570    }
571
572    void move(TrustedImm32 imm, RegisterID dest)
573    {
574        m_assembler.moveImm(imm.m_value, dest);
575    }
576
577    void move(RegisterID src, RegisterID dest)
578    {
579        if (src != dest)
580            m_assembler.mov(dest, src);
581    }
582
583    void move(TrustedImmPtr imm, RegisterID dest)
584    {
585        move(TrustedImm32(imm), dest);
586    }
587
588    void swap(RegisterID reg1, RegisterID reg2)
589    {
590        xor32(reg1, reg2);
591        xor32(reg2, reg1);
592        xor32(reg1, reg2);
593    }
594
595    void signExtend32ToPtr(RegisterID src, RegisterID dest)
596    {
597        if (src != dest)
598            move(src, dest);
599    }
600
601    void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
602    {
603        if (src != dest)
604            move(src, dest);
605    }
606
607    Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
608    {
609        load8(left, ARMRegisters::S1);
610        return branch32(cond, ARMRegisters::S1, right);
611    }
612
613    Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
614    {
615        ASSERT(!(right.m_value & 0xFFFFFF00));
616        load8(left, ARMRegisters::S1);
617        return branch32(cond, ARMRegisters::S1, right);
618    }
619
620    Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
621    {
622        move(TrustedImmPtr(left.m_ptr), ARMRegisters::S1);
623        load8(Address(ARMRegisters::S1), ARMRegisters::S1);
624        return branch32(cond, ARMRegisters::S1, right);
625    }
626
627    Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right, int useConstantPool = 0)
628    {
629        m_assembler.cmp(left, right);
630        return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
631    }
632
633    Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right, int useConstantPool = 0)
634    {
635        internalCompare32(left, right);
636        return Jump(m_assembler.jmp(ARMCondition(cond), useConstantPool));
637    }
638
639    Jump branch32(RelationalCondition cond, RegisterID left, Address right)
640    {
641        load32(right, ARMRegisters::S1);
642        return branch32(cond, left, ARMRegisters::S1);
643    }
644
645    Jump branch32(RelationalCondition cond, Address left, RegisterID right)
646    {
647        load32(left, ARMRegisters::S1);
648        return branch32(cond, ARMRegisters::S1, right);
649    }
650
651    Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
652    {
653        load32(left, ARMRegisters::S1);
654        return branch32(cond, ARMRegisters::S1, right);
655    }
656
657    Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
658    {
659        load32(left, ARMRegisters::S1);
660        return branch32(cond, ARMRegisters::S1, right);
661    }
662
663    Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
664    {
665        load32WithUnalignedHalfWords(left, ARMRegisters::S1);
666        return branch32(cond, ARMRegisters::S1, right);
667    }
668
669    Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
670    {
671        load8(address, ARMRegisters::S1);
672        return branchTest32(cond, ARMRegisters::S1, mask);
673    }
674
675    Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
676    {
677        load8(address, ARMRegisters::S1);
678        return branchTest32(cond, ARMRegisters::S1, mask);
679    }
680
681    Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
682    {
683        move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
684        load8(Address(ARMRegisters::S1), ARMRegisters::S1);
685        return branchTest32(cond, ARMRegisters::S1, mask);
686    }
687
688    Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
689    {
690        ASSERT((cond == Zero) || (cond == NonZero));
691        m_assembler.tst(reg, mask);
692        return Jump(m_assembler.jmp(ARMCondition(cond)));
693    }
694
695    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
696    {
697        ASSERT((cond == Zero) || (cond == NonZero));
698        ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
699        if (w & ARMAssembler::Op2InvertedImmediate)
700            m_assembler.bics(ARMRegisters::S0, reg, w & ~ARMAssembler::Op2InvertedImmediate);
701        else
702            m_assembler.tst(reg, w);
703        return Jump(m_assembler.jmp(ARMCondition(cond)));
704    }
705
706    Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
707    {
708        load32(address, ARMRegisters::S1);
709        return branchTest32(cond, ARMRegisters::S1, mask);
710    }
711
712    Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
713    {
714        load32(address, ARMRegisters::S1);
715        return branchTest32(cond, ARMRegisters::S1, mask);
716    }
717
718    Jump jump()
719    {
720        return Jump(m_assembler.jmp());
721    }
722
723    void jump(RegisterID target)
724    {
725        m_assembler.bx(target);
726    }
727
728    void jump(Address address)
729    {
730        load32(address, ARMRegisters::pc);
731    }
732
733    void jump(AbsoluteAddress address)
734    {
735        move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0);
736        load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc);
737    }
738
739    void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
740    {
741        m_assembler.vmov(dest1, dest2, src);
742    }
743
744    void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID)
745    {
746        m_assembler.vmov(dest, src1, src2);
747    }
748
749    Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
750    {
751        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
752            || (cond == NonZero) || (cond == PositiveOrZero));
753        add32(src, dest);
754        return Jump(m_assembler.jmp(ARMCondition(cond)));
755    }
756
757    Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
758    {
759        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
760            || (cond == NonZero) || (cond == PositiveOrZero));
761        add32(op1, op2, dest);
762        return Jump(m_assembler.jmp(ARMCondition(cond)));
763    }
764
765    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
766    {
767        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
768            || (cond == NonZero) || (cond == PositiveOrZero));
769        add32(imm, dest);
770        return Jump(m_assembler.jmp(ARMCondition(cond)));
771    }
772
773    Jump branchAdd32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
774    {
775        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
776            || (cond == NonZero) || (cond == PositiveOrZero));
777        add32(src, imm, dest);
778        return Jump(m_assembler.jmp(ARMCondition(cond)));
779    }
780
781    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
782    {
783        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero)
784            || (cond == NonZero) || (cond == PositiveOrZero));
785        add32(imm, dest);
786        return Jump(m_assembler.jmp(ARMCondition(cond)));
787    }
788
789    Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
790    {
791        load32(src, ARMRegisters::S0);
792        return branchAdd32(cond, dest, ARMRegisters::S0, dest);
793    }
794    void mull32(RegisterID op1, RegisterID op2, RegisterID dest)
795    {
796        if (op2 == dest) {
797            if (op1 == dest) {
798                move(op2, ARMRegisters::S0);
799                op2 = ARMRegisters::S0;
800            } else {
801                // Swap the operands.
802                RegisterID tmp = op1;
803                op1 = op2;
804                op2 = tmp;
805            }
806        }
807        m_assembler.mull(ARMRegisters::S1, dest, op1, op2);
808        m_assembler.cmp(ARMRegisters::S1, m_assembler.asr(dest, 31));
809    }
810
811    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
812    {
813        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
814        if (cond == Overflow) {
815            mull32(src1, src2, dest);
816            cond = NonZero;
817        }
818        else
819            mul32(src1, src2, dest);
820        return Jump(m_assembler.jmp(ARMCondition(cond)));
821    }
822
823    Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
824    {
825        return branchMul32(cond, src, dest, dest);
826    }
827
828    Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
829    {
830        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
831        if (cond == Overflow) {
832            move(imm, ARMRegisters::S0);
833            mull32(ARMRegisters::S0, src, dest);
834            cond = NonZero;
835        }
836        else
837            mul32(imm, src, dest);
838        return Jump(m_assembler.jmp(ARMCondition(cond)));
839    }
840
841    Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
842    {
843        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
844        sub32(src, dest);
845        return Jump(m_assembler.jmp(ARMCondition(cond)));
846    }
847
848    Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
849    {
850        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
851        sub32(imm, dest);
852        return Jump(m_assembler.jmp(ARMCondition(cond)));
853    }
854
855    Jump branchSub32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
856    {
857        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
858        sub32(src, imm, dest);
859        return Jump(m_assembler.jmp(ARMCondition(cond)));
860    }
861
862    Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
863    {
864        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
865        m_assembler.subs(dest, op1, op2);
866        return Jump(m_assembler.jmp(ARMCondition(cond)));
867    }
868
869    Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
870    {
871        ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
872        neg32(srcDest);
873        return Jump(m_assembler.jmp(ARMCondition(cond)));
874    }
875
876    Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
877    {
878        ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
879        or32(src, dest);
880        return Jump(m_assembler.jmp(ARMCondition(cond)));
881    }
882
883    PatchableJump patchableJump()
884    {
885        return PatchableJump(m_assembler.jmp(ARMAssembler::AL, 1));
886    }
887
888    PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
889    {
890        internalCompare32(reg, imm);
891        Jump jump(m_assembler.loadBranchTarget(ARMRegisters::S1, ARMCondition(cond), true));
892        m_assembler.bx(ARMRegisters::S1, ARMCondition(cond));
893        return PatchableJump(jump);
894    }
895
896    void breakpoint()
897    {
898        m_assembler.bkpt(0);
899    }
900
901    Call nearCall()
902    {
903        m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
904        return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear);
905    }
906
907    Call call(RegisterID target)
908    {
909        return Call(m_assembler.blx(target), Call::None);
910    }
911
912    void call(Address address)
913    {
914        call32(address.base, address.offset);
915    }
916
917    void ret()
918    {
919        m_assembler.bx(linkRegister);
920    }
921
922    void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
923    {
924        m_assembler.cmp(left, right);
925        m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
926        m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
927    }
928
929    void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
930    {
931        m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
932        m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
933        m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
934    }
935
936    void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
937    {
938        load8(left, ARMRegisters::S1);
939        compare32(cond, ARMRegisters::S1, right, dest);
940    }
941
942    void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
943    {
944        if (mask.m_value == -1)
945            m_assembler.cmp(0, reg);
946        else
947            m_assembler.tst(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0));
948        m_assembler.mov(dest, ARMAssembler::getOp2Byte(0));
949        m_assembler.mov(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond));
950    }
951
952    void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
953    {
954        load32(address, ARMRegisters::S1);
955        test32(cond, ARMRegisters::S1, mask, dest);
956    }
957
958    void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
959    {
960        load8(address, ARMRegisters::S1);
961        test32(cond, ARMRegisters::S1, mask, dest);
962    }
963
964    void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
965    {
966        m_assembler.add(dest, src, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
967    }
968
969    void add32(TrustedImm32 imm, AbsoluteAddress address)
970    {
971        load32(address.m_ptr, ARMRegisters::S1);
972        add32(imm, ARMRegisters::S1);
973        store32(ARMRegisters::S1, address.m_ptr);
974    }
975
976    void add64(TrustedImm32 imm, AbsoluteAddress address)
977    {
978        ARMWord tmp;
979
980        move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
981        m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
982
983        if ((tmp = ARMAssembler::getOp2(imm.m_value)) != ARMAssembler::InvalidImmediate)
984            m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, tmp);
985        else if ((tmp = ARMAssembler::getOp2(-imm.m_value)) != ARMAssembler::InvalidImmediate)
986            m_assembler.subs(ARMRegisters::S0, ARMRegisters::S0, tmp);
987        else {
988            m_assembler.adds(ARMRegisters::S0, ARMRegisters::S0, m_assembler.getImm(imm.m_value, ARMRegisters::S1));
989            move(TrustedImmPtr(address.m_ptr), ARMRegisters::S1);
990        }
991        m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, 0);
992
993        m_assembler.dtrUp(ARMAssembler::LoadUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
994        if (imm.m_value >= 0)
995            m_assembler.adc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
996        else
997            m_assembler.sbc(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
998        m_assembler.dtrUp(ARMAssembler::StoreUint32, ARMRegisters::S0, ARMRegisters::S1, sizeof(ARMWord));
999    }
1000
1001    void sub32(TrustedImm32 imm, AbsoluteAddress address)
1002    {
1003        load32(address.m_ptr, ARMRegisters::S1);
1004        sub32(imm, ARMRegisters::S1);
1005        store32(ARMRegisters::S1, address.m_ptr);
1006    }
1007
1008    void load32(const void* address, RegisterID dest)
1009    {
1010        m_assembler.ldrUniqueImmediate(ARMRegisters::S0, reinterpret_cast<ARMWord>(address));
1011        m_assembler.dtrUp(ARMAssembler::LoadUint32, dest, ARMRegisters::S0, 0);
1012    }
1013
1014    Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
1015    {
1016        load32(left.m_ptr, ARMRegisters::S1);
1017        return branch32(cond, ARMRegisters::S1, right);
1018    }
1019
1020    Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
1021    {
1022        load32(left.m_ptr, ARMRegisters::S1);
1023        return branch32(cond, ARMRegisters::S1, right);
1024    }
1025
1026    void relativeTableJump(RegisterID index, int scale)
1027    {
1028        ASSERT(scale >= 0 && scale <= 31);
1029        m_assembler.add(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(index, scale));
1030
1031        // NOP the default prefetching
1032        m_assembler.mov(ARMRegisters::r0, ARMRegisters::r0);
1033    }
1034
1035    Call call()
1036    {
1037        ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord));
1038        m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true);
1039        return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable);
1040    }
1041
1042    Call tailRecursiveCall()
1043    {
1044        return Call::fromTailJump(jump());
1045    }
1046
1047    Call makeTailRecursiveCall(Jump oldJump)
1048    {
1049        return Call::fromTailJump(oldJump);
1050    }
1051
1052    DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest)
1053    {
1054        DataLabelPtr dataLabel(this);
1055        m_assembler.ldrUniqueImmediate(dest, reinterpret_cast<ARMWord>(initialValue.m_value));
1056        return dataLabel;
1057    }
1058
1059    DataLabel32 moveWithPatch(TrustedImm32 initialValue, RegisterID dest)
1060    {
1061        DataLabel32 dataLabel(this);
1062        m_assembler.ldrUniqueImmediate(dest, static_cast<ARMWord>(initialValue.m_value));
1063        return dataLabel;
1064    }
1065
1066    Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1067    {
1068        ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
1069        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S1);
1070        Jump jump = branch32(cond, left, ARMRegisters::S1, true);
1071        return jump;
1072    }
1073
1074    Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
1075    {
1076        load32(left, ARMRegisters::S1);
1077        ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
1078        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
1079        Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
1080        return jump;
1081    }
1082
1083    Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
1084    {
1085        load32(left, ARMRegisters::S1);
1086        ensureSpace(3 * sizeof(ARMWord), 2 * sizeof(ARMWord));
1087        dataLabel = moveWithPatch(initialRightValue, ARMRegisters::S0);
1088        Jump jump = branch32(cond, ARMRegisters::S0, ARMRegisters::S1, true);
1089        return jump;
1090    }
1091
1092    DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
1093    {
1094        DataLabelPtr dataLabel = moveWithPatch(initialValue, ARMRegisters::S1);
1095        store32(ARMRegisters::S1, address);
1096        return dataLabel;
1097    }
1098
1099    DataLabelPtr storePtrWithPatch(ImplicitAddress address)
1100    {
1101        return storePtrWithPatch(TrustedImmPtr(0), address);
1102    }
1103
1104    // Floating point operators
1105    static bool supportsFloatingPoint()
1106    {
1107        return s_isVFPPresent;
1108    }
1109
1110    static bool supportsFloatingPointTruncate()
1111    {
1112        return false;
1113    }
1114
1115    static bool supportsFloatingPointSqrt()
1116    {
1117        return s_isVFPPresent;
1118    }
1119    static bool supportsFloatingPointAbs() { return false; }
1120
1121    void loadFloat(BaseIndex address, FPRegisterID dest)
1122    {
1123        m_assembler.baseIndexTransferFloat(ARMAssembler::LoadFloat, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
1124    }
1125
1126    void loadDouble(ImplicitAddress address, FPRegisterID dest)
1127    {
1128        m_assembler.dataTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.offset);
1129    }
1130
1131    void loadDouble(BaseIndex address, FPRegisterID dest)
1132    {
1133        m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset);
1134    }
1135
1136    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1137    {
1138        move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
1139        m_assembler.doubleDtrUp(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0);
1140    }
1141
1142    void storeFloat(FPRegisterID src, BaseIndex address)
1143    {
1144        m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
1145    }
1146
1147    void storeDouble(FPRegisterID src, ImplicitAddress address)
1148    {
1149        m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.offset);
1150    }
1151
1152    void storeDouble(FPRegisterID src, BaseIndex address)
1153    {
1154        m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset);
1155    }
1156
1157    void storeDouble(FPRegisterID src, TrustedImmPtr address)
1158    {
1159        move(TrustedImm32(reinterpret_cast<ARMWord>(address.m_value)), ARMRegisters::S0);
1160        m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0);
1161    }
1162
1163    void moveDouble(FPRegisterID src, FPRegisterID dest)
1164    {
1165        if (src != dest)
1166            m_assembler.vmov_f64(dest, src);
1167    }
1168
1169    void addDouble(FPRegisterID src, FPRegisterID dest)
1170    {
1171        m_assembler.vadd_f64(dest, dest, src);
1172    }
1173
1174    void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1175    {
1176        m_assembler.vadd_f64(dest, op1, op2);
1177    }
1178
1179    void addDouble(Address src, FPRegisterID dest)
1180    {
1181        loadDouble(src, ARMRegisters::SD0);
1182        addDouble(ARMRegisters::SD0, dest);
1183    }
1184
1185    void addDouble(AbsoluteAddress address, FPRegisterID dest)
1186    {
1187        loadDouble(TrustedImmPtr(address.m_ptr), ARMRegisters::SD0);
1188        addDouble(ARMRegisters::SD0, dest);
1189    }
1190
1191    void divDouble(FPRegisterID src, FPRegisterID dest)
1192    {
1193        m_assembler.vdiv_f64(dest, dest, src);
1194    }
1195
1196    void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1197    {
1198        m_assembler.vdiv_f64(dest, op1, op2);
1199    }
1200
1201    void divDouble(Address src, FPRegisterID dest)
1202    {
1203        RELEASE_ASSERT_NOT_REACHED(); // Untested
1204        loadDouble(src, ARMRegisters::SD0);
1205        divDouble(ARMRegisters::SD0, dest);
1206    }
1207
1208    void subDouble(FPRegisterID src, FPRegisterID dest)
1209    {
1210        m_assembler.vsub_f64(dest, dest, src);
1211    }
1212
1213    void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1214    {
1215        m_assembler.vsub_f64(dest, op1, op2);
1216    }
1217
1218    void subDouble(Address src, FPRegisterID dest)
1219    {
1220        loadDouble(src, ARMRegisters::SD0);
1221        subDouble(ARMRegisters::SD0, dest);
1222    }
1223
1224    void mulDouble(FPRegisterID src, FPRegisterID dest)
1225    {
1226        m_assembler.vmul_f64(dest, dest, src);
1227    }
1228
1229    void mulDouble(Address src, FPRegisterID dest)
1230    {
1231        loadDouble(src, ARMRegisters::SD0);
1232        mulDouble(ARMRegisters::SD0, dest);
1233    }
1234
1235    void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1236    {
1237        m_assembler.vmul_f64(dest, op1, op2);
1238    }
1239
1240    void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1241    {
1242        m_assembler.vsqrt_f64(dest, src);
1243    }
1244
1245    void absDouble(FPRegisterID src, FPRegisterID dest)
1246    {
1247        m_assembler.vabs_f64(dest, src);
1248    }
1249
1250    void negateDouble(FPRegisterID src, FPRegisterID dest)
1251    {
1252        m_assembler.vneg_f64(dest, src);
1253    }
1254
1255    void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1256    {
1257        m_assembler.vmov_vfp32(dest << 1, src);
1258        m_assembler.vcvt_f64_s32(dest, dest << 1);
1259    }
1260
1261    void convertInt32ToDouble(Address src, FPRegisterID dest)
1262    {
1263        load32(src, ARMRegisters::S1);
1264        convertInt32ToDouble(ARMRegisters::S1, dest);
1265    }
1266
1267    void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest)
1268    {
1269        move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
1270        load32(Address(ARMRegisters::S1), ARMRegisters::S1);
1271        convertInt32ToDouble(ARMRegisters::S1, dest);
1272    }
1273
1274    void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
1275    {
1276        m_assembler.vcvt_f64_f32(dst, src);
1277    }
1278
1279    void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
1280    {
1281        m_assembler.vcvt_f32_f64(dst, src);
1282    }
1283
1284    Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1285    {
1286        m_assembler.vcmp_f64(left, right);
1287        m_assembler.vmrs_apsr();
1288        if (cond & DoubleConditionBitSpecial)
1289            m_assembler.cmp(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::VS);
1290        return Jump(m_assembler.jmp(static_cast<ARMAssembler::Condition>(cond & ~DoubleConditionMask)));
1291    }
1292
1293    // Truncates 'src' to an integer, and places the resulting 'dest'.
1294    // If the result is not representable as a 32 bit value, branch.
1295    // May also branch for some values that are representable in 32 bits
1296    // (specifically, in this case, INT_MIN).
1297    enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1298    Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1299    {
1300        truncateDoubleToInt32(src, dest);
1301
1302        m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
1303        m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
1304
1305        ARMWord w = ARMAssembler::getOp2(0x80000000);
1306        ASSERT(w != ARMAssembler::InvalidImmediate);
1307        m_assembler.cmp(ARMRegisters::S0, w);
1308        return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
1309    }
1310
1311    Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1312    {
1313        truncateDoubleToUint32(src, dest);
1314
1315        m_assembler.add(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1));
1316        m_assembler.bic(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1));
1317
1318        m_assembler.cmp(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
1319        return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE));
1320    }
1321
1322    // Result is undefined if the value is outside of the integer range.
1323    void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1324    {
1325        m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
1326        m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
1327    }
1328
1329    void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1330    {
1331        m_assembler.vcvt_u32_f64(ARMRegisters::SD0 << 1, src);
1332        m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
1333    }
1334
1335    // Convert 'src' to an integer, and places the resulting 'dest'.
1336    // If the result is not representable as a 32 bit value, branch.
1337    // May also branch for some values that are representable in 32 bits
1338    // (specifically, in this case, 0).
1339    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1340    {
1341        m_assembler.vcvt_s32_f64(ARMRegisters::SD0 << 1, src);
1342        m_assembler.vmov_arm32(dest, ARMRegisters::SD0 << 1);
1343
1344        // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1345        m_assembler.vcvt_f64_s32(ARMRegisters::SD0, ARMRegisters::SD0 << 1);
1346        failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
1347
1348        // If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
1349        if (negZeroCheck)
1350            failureCases.append(branchTest32(Zero, dest));
1351    }
1352
1353    Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch)
1354    {
1355        m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
1356        convertInt32ToDouble(ARMRegisters::S0, scratch);
1357        return branchDouble(DoubleNotEqual, reg, scratch);
1358    }
1359
1360    Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch)
1361    {
1362        m_assembler.mov(ARMRegisters::S0, ARMAssembler::getOp2Byte(0));
1363        convertInt32ToDouble(ARMRegisters::S0, scratch);
1364        return branchDouble(DoubleEqualOrUnordered, reg, scratch);
1365    }
1366
1367    // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1368    static RelationalCondition invert(RelationalCondition cond)
1369    {
1370        ASSERT((static_cast<uint32_t>(cond & 0x0fffffff)) == 0 && static_cast<uint32_t>(cond) < static_cast<uint32_t>(ARMAssembler::AL));
1371        return static_cast<RelationalCondition>(cond ^ 0x10000000);
1372    }
1373
1374    void nop()
1375    {
1376        m_assembler.nop();
1377    }
1378
1379    void memoryFence()
1380    {
1381        m_assembler.dmbSY();
1382    }
1383
1384    static FunctionPtr readCallTarget(CodeLocationCall call)
1385    {
1386        return FunctionPtr(reinterpret_cast<void(*)()>(ARMAssembler::readCallTarget(call.dataLocation())));
1387    }
1388
1389    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
1390    {
1391        ARMAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
1392    }
1393
1394    static ptrdiff_t maxJumpReplacementSize()
1395    {
1396        ARMAssembler::maxJumpReplacementSize();
1397        return 0;
1398    }
1399
1400    static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
1401    static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
1402
1403    static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
1404    {
1405        UNREACHABLE_FOR_PLATFORM();
1406        return CodeLocationLabel();
1407    }
1408
1409    static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
1410    {
1411        UNREACHABLE_FOR_PLATFORM();
1412        return CodeLocationLabel();
1413    }
1414
1415    static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
1416    {
1417        return label.labelAtOffset(0);
1418    }
1419
1420    static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID reg, void* initialValue)
1421    {
1422        ARMAssembler::revertBranchPtrWithPatch(instructionStart.dataLocation(), reg, reinterpret_cast<uintptr_t>(initialValue) & 0xffff);
1423    }
1424
1425    static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
1426    {
1427        UNREACHABLE_FOR_PLATFORM();
1428    }
1429
1430    static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
1431    {
1432        UNREACHABLE_FOR_PLATFORM();
1433    }
1434
1435#if USE(MASM_PROBE)
1436    struct CPUState {
1437        #define DECLARE_REGISTER(_type, _regName) \
1438            _type _regName;
1439        FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
1440        #undef DECLARE_REGISTER
1441    };
1442
1443    struct ProbeContext;
1444    typedef void (*ProbeFunction)(struct ProbeContext*);
1445
1446    struct ProbeContext {
1447        ProbeFunction probeFunction;
1448        void* arg1;
1449        void* arg2;
1450        CPUState cpu;
1451
1452        void dump(const char* indentation = 0);
1453    private:
1454        void dumpCPURegisters(const char* indentation);
1455    };
1456
1457    // For details about probe(), see comment in MacroAssemblerX86_64.h.
1458    void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
1459#endif // USE(MASM_PROBE)
1460
1461protected:
1462    ARMAssembler::Condition ARMCondition(RelationalCondition cond)
1463    {
1464        return static_cast<ARMAssembler::Condition>(cond);
1465    }
1466
1467    ARMAssembler::Condition ARMCondition(ResultCondition cond)
1468    {
1469        return static_cast<ARMAssembler::Condition>(cond);
1470    }
1471
1472    void ensureSpace(int insnSpace, int constSpace)
1473    {
1474        m_assembler.ensureSpace(insnSpace, constSpace);
1475    }
1476
1477    int sizeOfConstantPool()
1478    {
1479        return m_assembler.sizeOfConstantPool();
1480    }
1481
1482    void call32(RegisterID base, int32_t offset)
1483    {
1484        load32(Address(base, offset), ARMRegisters::S1);
1485        m_assembler.blx(ARMRegisters::S1);
1486    }
1487
1488private:
1489    friend class LinkBuffer;
1490    friend class RepatchBuffer;
1491
1492    void internalCompare32(RegisterID left, TrustedImm32 right)
1493    {
1494        ARMWord tmp = (static_cast<unsigned>(right.m_value) == 0x80000000) ? ARMAssembler::InvalidImmediate : m_assembler.getOp2(-right.m_value);
1495        if (tmp != ARMAssembler::InvalidImmediate)
1496            m_assembler.cmn(left, tmp);
1497        else
1498            m_assembler.cmp(left, m_assembler.getImm(right.m_value, ARMRegisters::S0));
1499    }
1500
1501    static void linkCall(void* code, Call call, FunctionPtr function)
1502    {
1503        ARMAssembler::linkCall(code, call.m_label, function.value());
1504    }
1505
1506    static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
1507    {
1508        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1509    }
1510
1511    static void repatchCall(CodeLocationCall call, FunctionPtr destination)
1512    {
1513        ARMAssembler::relinkCall(call.dataLocation(), destination.executableAddress());
1514    }
1515
1516#if USE(MASM_PROBE)
1517    inline TrustedImm32 trustedImm32FromPtr(void* ptr)
1518    {
1519        return TrustedImm32(TrustedImmPtr(ptr));
1520    }
1521
1522    inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
1523    {
1524        return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
1525    }
1526
1527    inline TrustedImm32 trustedImm32FromPtr(void (*function)())
1528    {
1529        return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
1530    }
1531#endif
1532
1533    static const bool s_isVFPPresent;
1534};
1535
1536}
1537
1538#endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL)
1539
1540#endif // MacroAssemblerARM_h
1541