1/*
2 * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef DFGAssemblyHelpers_h
27#define DFGAssemblyHelpers_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(DFG_JIT)
32
33#include "CodeBlock.h"
34#include "DFGFPRInfo.h"
35#include "DFGGPRInfo.h"
36#include "DFGNode.h"
37#include "VM.h"
38#include "MacroAssembler.h"
39
40namespace JSC { namespace DFG {
41
42typedef void (*V_DFGDebugOperation_EPP)(ExecState*, void*, void*);
43
44class AssemblyHelpers : public MacroAssembler {
45public:
46    AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
47        : m_vm(vm)
48        , m_codeBlock(codeBlock)
49        , m_baselineCodeBlock(codeBlock ? codeBlock->baselineVersion() : 0)
50    {
51        if (m_codeBlock) {
52            ASSERT(m_baselineCodeBlock);
53            ASSERT(!m_baselineCodeBlock->alternative());
54            ASSERT(m_baselineCodeBlock->getJITType() == JITCode::BaselineJIT);
55        }
56    }
57
58    CodeBlock* codeBlock() { return m_codeBlock; }
59    VM* vm() { return m_vm; }
60    AssemblerType_T& assembler() { return m_assembler; }
61
62#if CPU(X86_64) || CPU(X86)
63    void preserveReturnAddressAfterCall(GPRReg reg)
64    {
65        pop(reg);
66    }
67
68    void restoreReturnAddressBeforeReturn(GPRReg reg)
69    {
70        push(reg);
71    }
72
73    void restoreReturnAddressBeforeReturn(Address address)
74    {
75        push(address);
76    }
77#endif // CPU(X86_64) || CPU(X86)
78
79#if CPU(ARM)
80    ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
81    {
82        move(linkRegister, reg);
83    }
84
85    ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
86    {
87        move(reg, linkRegister);
88    }
89
90    ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
91    {
92        loadPtr(address, linkRegister);
93    }
94#endif
95
96#if CPU(MIPS)
97    ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
98    {
99        move(returnAddressRegister, reg);
100    }
101
102    ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
103    {
104        move(reg, returnAddressRegister);
105    }
106
107    ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
108    {
109        loadPtr(address, returnAddressRegister);
110    }
111#endif
112
113    void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to)
114    {
115        loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
116    }
117    void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
118    {
119#if USE(JSVALUE64)
120        store64(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
121#else
122        store32(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
123#endif
124    }
125
126    void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
127    {
128        storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
129    }
130
131    Jump branchIfNotCell(GPRReg reg)
132    {
133#if USE(JSVALUE64)
134        return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
135#else
136        return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
137#endif
138    }
139
140    static Address addressFor(VirtualRegister virtualRegister)
141    {
142        return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register));
143    }
144    static Address addressFor(int operand)
145    {
146        return addressFor(static_cast<VirtualRegister>(operand));
147    }
148
149    static Address tagFor(VirtualRegister virtualRegister)
150    {
151        return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
152    }
153    static Address tagFor(int operand)
154    {
155        return tagFor(static_cast<VirtualRegister>(operand));
156    }
157
158    static Address payloadFor(VirtualRegister virtualRegister)
159    {
160        return Address(GPRInfo::callFrameRegister, virtualRegister * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
161    }
162    static Address payloadFor(int operand)
163    {
164        return payloadFor(static_cast<VirtualRegister>(operand));
165    }
166
167    Jump branchIfNotObject(GPRReg structureReg)
168    {
169        return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
170    }
171
172    static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
173    {
174        if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
175            return GPRInfo::regT0;
176
177        if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
178            return GPRInfo::regT1;
179
180        if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
181            return GPRInfo::regT2;
182
183        if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
184            return GPRInfo::regT3;
185
186        return GPRInfo::regT4;
187    }
188
189    // Add a debug call. This call has no effect on JIT code execution state.
190    void debugCall(V_DFGDebugOperation_EPP function, void* argument)
191    {
192        size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
193        ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
194        EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
195
196        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
197#if USE(JSVALUE64)
198            store64(GPRInfo::toRegister(i), buffer + i);
199#else
200            store32(GPRInfo::toRegister(i), buffer + i);
201#endif
202        }
203
204        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
205            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
206            storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
207        }
208
209        // Tell GC mark phase how much of the scratch buffer is active during call.
210        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
211        storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
212
213#if CPU(X86_64) || CPU(ARM) || CPU(MIPS)
214        move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
215        move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
216        move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
217        GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
218#elif CPU(X86)
219        poke(GPRInfo::callFrameRegister, 0);
220        poke(TrustedImmPtr(argument), 1);
221        poke(TrustedImmPtr(buffer), 2);
222        GPRReg scratch = GPRInfo::regT0;
223#else
224#error "DFG JIT not supported on this platform."
225#endif
226        move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
227        call(scratch);
228
229        move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
230        storePtr(TrustedImmPtr(0), GPRInfo::regT0);
231
232        for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
233            move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
234            loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
235        }
236        for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
237#if USE(JSVALUE64)
238            load64(buffer + i, GPRInfo::toRegister(i));
239#else
240            load32(buffer + i, GPRInfo::toRegister(i));
241#endif
242        }
243    }
244
245    // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
246#if DFG_ENABLE(JIT_ASSERT)
247    void jitAssertIsInt32(GPRReg);
248    void jitAssertIsJSInt32(GPRReg);
249    void jitAssertIsJSNumber(GPRReg);
250    void jitAssertIsJSDouble(GPRReg);
251    void jitAssertIsCell(GPRReg);
252    void jitAssertHasValidCallFrame();
253#else
254    void jitAssertIsInt32(GPRReg) { }
255    void jitAssertIsJSInt32(GPRReg) { }
256    void jitAssertIsJSNumber(GPRReg) { }
257    void jitAssertIsJSDouble(GPRReg) { }
258    void jitAssertIsCell(GPRReg) { }
259    void jitAssertHasValidCallFrame() { }
260#endif
261
262    // These methods convert between doubles, and doubles boxed and JSValues.
263#if USE(JSVALUE64)
264    GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
265    {
266        moveDoubleTo64(fpr, gpr);
267        sub64(GPRInfo::tagTypeNumberRegister, gpr);
268        jitAssertIsJSDouble(gpr);
269        return gpr;
270    }
271    FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
272    {
273        jitAssertIsJSDouble(gpr);
274        add64(GPRInfo::tagTypeNumberRegister, gpr);
275        move64ToDouble(gpr, fpr);
276        return fpr;
277    }
278#endif
279
280#if USE(JSVALUE32_64)
281    void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
282    {
283        moveDoubleToInts(fpr, payloadGPR, tagGPR);
284    }
285    void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
286    {
287        moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
288    }
289#endif
290
291    enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
292    Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
293    {
294#if USE(JSVALUE64)
295        return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(&vm()->exception));
296#elif USE(JSVALUE32_64)
297        return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(&vm()->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
298#endif
299    }
300
301#if ENABLE(SAMPLING_COUNTERS)
302    static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
303    {
304        jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
305    }
306    void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1)
307    {
308        add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
309    }
310#endif
311
312#if ENABLE(SAMPLING_FLAGS)
313    void setSamplingFlag(int32_t);
314    void clearSamplingFlag(int32_t flag);
315#endif
316
317    JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
318    {
319        return codeBlock()->globalObjectFor(codeOrigin);
320    }
321
322    bool strictModeFor(CodeOrigin codeOrigin)
323    {
324        if (!codeOrigin.inlineCallFrame)
325            return codeBlock()->isStrictMode();
326        return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
327    }
328
329    ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
330
331    CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
332    {
333        return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
334    }
335
336    CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
337    {
338        if (!inlineCallFrame)
339            return baselineCodeBlock();
340        return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
341    }
342
343    CodeBlock* baselineCodeBlock()
344    {
345        return m_baselineCodeBlock;
346    }
347
348    int argumentsRegisterFor(InlineCallFrame* inlineCallFrame)
349    {
350        if (!inlineCallFrame)
351            return codeBlock()->argumentsRegister();
352
353        return baselineCodeBlockForInlineCallFrame(
354            inlineCallFrame)->argumentsRegister() + inlineCallFrame->stackOffset;
355    }
356
357    int argumentsRegisterFor(const CodeOrigin& codeOrigin)
358    {
359        return argumentsRegisterFor(codeOrigin.inlineCallFrame);
360    }
361
362    SharedSymbolTable* symbolTableFor(const CodeOrigin& codeOrigin)
363    {
364        return baselineCodeBlockFor(codeOrigin)->symbolTable();
365    }
366
367    int offsetOfLocals(const CodeOrigin& codeOrigin)
368    {
369        if (!codeOrigin.inlineCallFrame)
370            return 0;
371        return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register);
372    }
373
374    int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin)
375    {
376        if (!codeOrigin.inlineCallFrame)
377            return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register);
378        return (codeOrigin.inlineCallFrame->stackOffset + CallFrame::argumentOffsetIncludingThis(0)) * sizeof(Register);
379    }
380
381    Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
382
383protected:
384    VM* m_vm;
385    CodeBlock* m_codeBlock;
386    CodeBlock* m_baselineCodeBlock;
387
388    HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset> > m_decodedCodeMaps;
389};
390
391} } // namespace JSC::DFG
392
393#endif // ENABLE(DFG_JIT)
394
395#endif // DFGAssemblyHelpers_h
396
397