1/* 2 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR 17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 */ 25 26#ifndef DFGSpeculativeJIT_h 27#define DFGSpeculativeJIT_h 28 29#include <wtf/Platform.h> 30 31#if ENABLE(DFG_JIT) 32 33#include "DFGAbstractState.h" 34#include "DFGGenerationInfo.h" 35#include "DFGJITCompiler.h" 36#include "DFGOSRExit.h" 37#include "DFGOSRExitJumpPlaceholder.h" 38#include "DFGOperations.h" 39#include "DFGSilentRegisterSavePlan.h" 40#include "DFGValueSource.h" 41#include "MarkedAllocator.h" 42#include "ValueRecovery.h" 43 44namespace JSC { namespace DFG { 45 46class GPRTemporary; 47class JSValueOperand; 48class SlowPathGenerator; 49class SpeculativeJIT; 50class SpeculateIntegerOperand; 51class SpeculateStrictInt32Operand; 52class SpeculateDoubleOperand; 53class SpeculateCellOperand; 54class SpeculateBooleanOperand; 55 56enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue}; 57 58// === SpeculativeJIT === 59// 60// The SpeculativeJIT is used to generate a fast, but potentially 61// incomplete code path for the dataflow. When code generating 62// we may make assumptions about operand types, dynamically check, 63// and bail-out to an alternate code path if these checks fail. 64// Importantly, the speculative code path cannot be reentered once 65// a speculative check has failed. This allows the SpeculativeJIT 66// to propagate type information (including information that has 67// only speculatively been asserted) through the dataflow. 68class SpeculativeJIT { 69 friend struct OSRExit; 70private: 71 typedef JITCompiler::TrustedImm32 TrustedImm32; 72 typedef JITCompiler::Imm32 Imm32; 73 typedef JITCompiler::TrustedImmPtr TrustedImmPtr; 74 typedef JITCompiler::ImmPtr ImmPtr; 75 typedef JITCompiler::TrustedImm64 TrustedImm64; 76 typedef JITCompiler::Imm64 Imm64; 77 78 // These constants are used to set priorities for spill order for 79 // the register allocator. 80#if USE(JSVALUE64) 81 enum SpillOrder { 82 SpillOrderConstant = 1, // no spill, and cheap fill 83 SpillOrderSpilled = 2, // no spill 84 SpillOrderJS = 4, // needs spill 85 SpillOrderCell = 4, // needs spill 86 SpillOrderStorage = 4, // needs spill 87 SpillOrderInteger = 5, // needs spill and box 88 SpillOrderBoolean = 5, // needs spill and box 89 SpillOrderDouble = 6, // needs spill and convert 90 }; 91#elif USE(JSVALUE32_64) 92 enum SpillOrder { 93 SpillOrderConstant = 1, // no spill, and cheap fill 94 SpillOrderSpilled = 2, // no spill 95 SpillOrderJS = 4, // needs spill 96 SpillOrderStorage = 4, // needs spill 97 SpillOrderDouble = 4, // needs spill 98 SpillOrderInteger = 5, // needs spill and box 99 SpillOrderCell = 5, // needs spill and box 100 SpillOrderBoolean = 5, // needs spill and box 101 }; 102#endif 103 104 enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly }; 105 106public: 107 SpeculativeJIT(JITCompiler&); 108 ~SpeculativeJIT(); 109 110 bool compile(); 111 void createOSREntries(); 112 void linkOSREntries(LinkBuffer&); 113 114 BlockIndex nextBlock() 115 { 116 for (BlockIndex result = m_block + 1; ; result++) { 117 if (result >= m_jit.graph().m_blocks.size()) 118 return NoBlock; 119 if (m_jit.graph().m_blocks[result]) 120 return result; 121 } 122 } 123 124 GPRReg fillInteger(Edge, DataFormat& returnFormat); 125#if USE(JSVALUE64) 126 GPRReg fillJSValue(Edge); 127#elif USE(JSVALUE32_64) 128 bool fillJSValue(Edge, GPRReg&, GPRReg&, FPRReg&); 129#endif 130 GPRReg fillStorage(Edge); 131 132 // lock and unlock GPR & FPR registers. 133 void lock(GPRReg reg) 134 { 135 m_gprs.lock(reg); 136 } 137 void lock(FPRReg reg) 138 { 139 m_fprs.lock(reg); 140 } 141 void unlock(GPRReg reg) 142 { 143 m_gprs.unlock(reg); 144 } 145 void unlock(FPRReg reg) 146 { 147 m_fprs.unlock(reg); 148 } 149 150 // Used to check whether a child node is on its last use, 151 // and its machine registers may be reused. 152 bool canReuse(Node* node) 153 { 154 VirtualRegister virtualRegister = node->virtualRegister(); 155 GenerationInfo& info = m_generationInfo[virtualRegister]; 156 return info.canReuse(); 157 } 158 bool canReuse(Edge nodeUse) 159 { 160 return canReuse(nodeUse.node()); 161 } 162 GPRReg reuse(GPRReg reg) 163 { 164 m_gprs.lock(reg); 165 return reg; 166 } 167 FPRReg reuse(FPRReg reg) 168 { 169 m_fprs.lock(reg); 170 return reg; 171 } 172 173 // Allocate a gpr/fpr. 174 GPRReg allocate() 175 { 176#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) 177 m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset()); 178#endif 179 VirtualRegister spillMe; 180 GPRReg gpr = m_gprs.allocate(spillMe); 181 if (spillMe != InvalidVirtualRegister) { 182#if USE(JSVALUE32_64) 183 GenerationInfo& info = m_generationInfo[spillMe]; 184 RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble); 185 if ((info.registerFormat() & DataFormatJS)) 186 m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR()); 187#endif 188 spill(spillMe); 189 } 190 return gpr; 191 } 192 GPRReg allocate(GPRReg specific) 193 { 194#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) 195 m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset()); 196#endif 197 VirtualRegister spillMe = m_gprs.allocateSpecific(specific); 198 if (spillMe != InvalidVirtualRegister) { 199#if USE(JSVALUE32_64) 200 GenerationInfo& info = m_generationInfo[spillMe]; 201 RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble); 202 if ((info.registerFormat() & DataFormatJS)) 203 m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR()); 204#endif 205 spill(spillMe); 206 } 207 return specific; 208 } 209 GPRReg tryAllocate() 210 { 211 return m_gprs.tryAllocate(); 212 } 213 FPRReg fprAllocate() 214 { 215#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) 216 m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset()); 217#endif 218 VirtualRegister spillMe; 219 FPRReg fpr = m_fprs.allocate(spillMe); 220 if (spillMe != InvalidVirtualRegister) 221 spill(spillMe); 222 return fpr; 223 } 224 225 // Check whether a VirtualRegsiter is currently in a machine register. 226 // We use this when filling operands to fill those that are already in 227 // machine registers first (by locking VirtualRegsiters that are already 228 // in machine register before filling those that are not we attempt to 229 // avoid spilling values we will need immediately). 230 bool isFilled(Node* node) 231 { 232 VirtualRegister virtualRegister = node->virtualRegister(); 233 GenerationInfo& info = m_generationInfo[virtualRegister]; 234 return info.registerFormat() != DataFormatNone; 235 } 236 bool isFilledDouble(Node* node) 237 { 238 VirtualRegister virtualRegister = node->virtualRegister(); 239 GenerationInfo& info = m_generationInfo[virtualRegister]; 240 return info.registerFormat() == DataFormatDouble; 241 } 242 243 // Called on an operand once it has been consumed by a parent node. 244 void use(Node* node) 245 { 246 if (!node->hasResult()) 247 return; 248 VirtualRegister virtualRegister = node->virtualRegister(); 249 GenerationInfo& info = m_generationInfo[virtualRegister]; 250 251 // use() returns true when the value becomes dead, and any 252 // associated resources may be freed. 253 if (!info.use(*m_stream)) 254 return; 255 256 // Release the associated machine registers. 257 DataFormat registerFormat = info.registerFormat(); 258#if USE(JSVALUE64) 259 if (registerFormat == DataFormatDouble) 260 m_fprs.release(info.fpr()); 261 else if (registerFormat != DataFormatNone) 262 m_gprs.release(info.gpr()); 263#elif USE(JSVALUE32_64) 264 if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble) 265 m_fprs.release(info.fpr()); 266 else if (registerFormat & DataFormatJS) { 267 m_gprs.release(info.tagGPR()); 268 m_gprs.release(info.payloadGPR()); 269 } else if (registerFormat != DataFormatNone) 270 m_gprs.release(info.gpr()); 271#endif 272 } 273 void use(Edge nodeUse) 274 { 275 use(nodeUse.node()); 276 } 277 278 RegisterSet usedRegisters() 279 { 280 RegisterSet result; 281 for (unsigned i = GPRInfo::numberOfRegisters; i--;) { 282 GPRReg gpr = GPRInfo::toRegister(i); 283 if (m_gprs.isInUse(gpr)) 284 result.set(gpr); 285 } 286 for (unsigned i = FPRInfo::numberOfRegisters; i--;) { 287 FPRReg fpr = FPRInfo::toRegister(i); 288 if (m_fprs.isInUse(fpr)) 289 result.set(fpr); 290 } 291 return result; 292 } 293 294 static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind); 295 296 void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg); 297 void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg); 298 void writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg); 299 300 static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg) 301 { 302 return AssemblyHelpers::selectScratchGPR(preserve1, preserve2, preserve3, preserve4); 303 } 304 305 // Called by the speculative operand types, below, to fill operand to 306 // machine registers, implicitly generating speculation checks as needed. 307 GPRReg fillSpeculateInt(Edge, DataFormat& returnFormat); 308 GPRReg fillSpeculateIntStrict(Edge); 309 FPRReg fillSpeculateDouble(Edge); 310 GPRReg fillSpeculateCell(Edge); 311 GPRReg fillSpeculateBoolean(Edge); 312 GeneratedOperandType checkGeneratedTypeForToInt32(Node*); 313 314 void addSlowPathGenerator(PassOwnPtr<SlowPathGenerator>); 315 void runSlowPathGenerators(); 316 317 void compile(Node*); 318 void noticeOSRBirth(Node*); 319 void compile(BasicBlock&); 320 321 void checkArgumentTypes(); 322 323 void clearGenerationInfo(); 324 325 // These methods are used when generating 'unexpected' 326 // calls out from JIT code to C++ helper routines - 327 // they spill all live values to the appropriate 328 // slots in the JSStack without changing any state 329 // in the GenerationInfo. 330 SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source); 331 SilentRegisterSavePlan silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source); 332 void silentSpill(const SilentRegisterSavePlan&); 333 void silentFill(const SilentRegisterSavePlan&, GPRReg canTrample); 334 335 template<typename CollectionType> 336 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg) 337 { 338 ASSERT(plans.isEmpty()); 339 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 340 GPRReg gpr = iter.regID(); 341 if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2) { 342 SilentRegisterSavePlan plan = silentSavePlanForGPR(iter.name(), gpr); 343 if (doSpill) 344 silentSpill(plan); 345 plans.append(plan); 346 } 347 } 348 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 349 if (iter.name() != InvalidVirtualRegister && iter.regID() != fprExclude) { 350 SilentRegisterSavePlan plan = silentSavePlanForFPR(iter.name(), iter.regID()); 351 if (doSpill) 352 silentSpill(plan); 353 plans.append(plan); 354 } 355 } 356 } 357 template<typename CollectionType> 358 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, NoResultTag) 359 { 360 silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, InvalidFPRReg); 361 } 362 template<typename CollectionType> 363 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, FPRReg exclude) 364 { 365 silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, exclude); 366 } 367#if USE(JSVALUE32_64) 368 template<typename CollectionType> 369 void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, JSValueRegs exclude) 370 { 371 silentSpillAllRegistersImpl(doSpill, plans, exclude.tagGPR(), exclude.payloadGPR()); 372 } 373#endif 374 375 void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg) 376 { 377 silentSpillAllRegistersImpl(true, m_plans, exclude, exclude2, fprExclude); 378 } 379 void silentSpillAllRegisters(FPRReg exclude) 380 { 381 silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude); 382 } 383 384 static GPRReg pickCanTrample(GPRReg exclude) 385 { 386 GPRReg result = GPRInfo::regT0; 387 if (result == exclude) 388 result = GPRInfo::regT1; 389 return result; 390 } 391 static GPRReg pickCanTrample(FPRReg) 392 { 393 return GPRInfo::regT0; 394 } 395 static GPRReg pickCanTrample(NoResultTag) 396 { 397 return GPRInfo::regT0; 398 } 399 400#if USE(JSVALUE32_64) 401 static GPRReg pickCanTrample(JSValueRegs exclude) 402 { 403 GPRReg result = GPRInfo::regT0; 404 if (result == exclude.tagGPR()) { 405 result = GPRInfo::regT1; 406 if (result == exclude.payloadGPR()) 407 result = GPRInfo::regT2; 408 } else if (result == exclude.payloadGPR()) { 409 result = GPRInfo::regT1; 410 if (result == exclude.tagGPR()) 411 result = GPRInfo::regT2; 412 } 413 return result; 414 } 415#endif 416 417 template<typename RegisterType> 418 void silentFillAllRegisters(RegisterType exclude) 419 { 420 GPRReg canTrample = pickCanTrample(exclude); 421 422 while (!m_plans.isEmpty()) { 423 SilentRegisterSavePlan& plan = m_plans.last(); 424 silentFill(plan, canTrample); 425 m_plans.removeLast(); 426 } 427 } 428 429 // These methods convert between doubles, and doubles boxed and JSValues. 430#if USE(JSVALUE64) 431 GPRReg boxDouble(FPRReg fpr, GPRReg gpr) 432 { 433 return m_jit.boxDouble(fpr, gpr); 434 } 435 FPRReg unboxDouble(GPRReg gpr, FPRReg fpr) 436 { 437 return m_jit.unboxDouble(gpr, fpr); 438 } 439 GPRReg boxDouble(FPRReg fpr) 440 { 441 return boxDouble(fpr, allocate()); 442 } 443#elif USE(JSVALUE32_64) 444 void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR) 445 { 446 m_jit.boxDouble(fpr, tagGPR, payloadGPR); 447 } 448 void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR) 449 { 450 m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR); 451 } 452#endif 453 454 // Spill a VirtualRegister to the JSStack. 455 void spill(VirtualRegister spillMe) 456 { 457 GenerationInfo& info = m_generationInfo[spillMe]; 458 459#if USE(JSVALUE32_64) 460 if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here 461 return; 462#endif 463 // Check the GenerationInfo to see if this value need writing 464 // to the JSStack - if not, mark it as spilled & return. 465 if (!info.needsSpill()) { 466 info.setSpilled(*m_stream, spillMe); 467 return; 468 } 469 470 DataFormat spillFormat = info.registerFormat(); 471 switch (spillFormat) { 472 case DataFormatStorage: { 473 // This is special, since it's not a JS value - as in it's not visible to JS 474 // code. 475 m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe)); 476 info.spill(*m_stream, spillMe, DataFormatStorage); 477 return; 478 } 479 480 case DataFormatInteger: { 481 m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe)); 482 info.spill(*m_stream, spillMe, DataFormatInteger); 483 return; 484 } 485 486#if USE(JSVALUE64) 487 case DataFormatDouble: { 488 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); 489 info.spill(*m_stream, spillMe, DataFormatDouble); 490 return; 491 } 492 493 default: 494 // The following code handles JSValues, int32s, and cells. 495 RELEASE_ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS); 496 497 GPRReg reg = info.gpr(); 498 // We need to box int32 and cell values ... 499 // but on JSVALUE64 boxing a cell is a no-op! 500 if (spillFormat == DataFormatInteger) 501 m_jit.or64(GPRInfo::tagTypeNumberRegister, reg); 502 503 // Spill the value, and record it as spilled in its boxed form. 504 m_jit.store64(reg, JITCompiler::addressFor(spillMe)); 505 info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS)); 506 return; 507#elif USE(JSVALUE32_64) 508 case DataFormatCell: 509 case DataFormatBoolean: { 510 m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe)); 511 info.spill(*m_stream, spillMe, spillFormat); 512 return; 513 } 514 515 case DataFormatDouble: 516 case DataFormatJSDouble: { 517 // On JSVALUE32_64 boxing a double is a no-op. 518 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); 519 info.spill(*m_stream, spillMe, DataFormatJSDouble); 520 return; 521 } 522 523 default: 524 // The following code handles JSValues. 525 RELEASE_ASSERT(spillFormat & DataFormatJS); 526 m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe)); 527 m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe)); 528 info.spill(*m_stream, spillMe, spillFormat); 529 return; 530#endif 531 } 532 } 533 534 bool isKnownInteger(Node* node) { return !(m_state.forNode(node).m_type & ~SpecInt32); } 535 bool isKnownCell(Node* node) { return !(m_state.forNode(node).m_type & ~SpecCell); } 536 537 bool isKnownNotInteger(Node* node) { return !(m_state.forNode(node).m_type & SpecInt32); } 538 bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecNumber); } 539 bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); } 540 541 // Checks/accessors for constant values. 542 bool isConstant(Node* node) { return m_jit.graph().isConstant(node); } 543 bool isJSConstant(Node* node) { return m_jit.graph().isJSConstant(node); } 544 bool isInt32Constant(Node* node) { return m_jit.graph().isInt32Constant(node); } 545 bool isDoubleConstant(Node* node) { return m_jit.graph().isDoubleConstant(node); } 546 bool isNumberConstant(Node* node) { return m_jit.graph().isNumberConstant(node); } 547 bool isBooleanConstant(Node* node) { return m_jit.graph().isBooleanConstant(node); } 548 bool isFunctionConstant(Node* node) { return m_jit.graph().isFunctionConstant(node); } 549 int32_t valueOfInt32Constant(Node* node) { return m_jit.graph().valueOfInt32Constant(node); } 550 double valueOfNumberConstant(Node* node) { return m_jit.graph().valueOfNumberConstant(node); } 551#if USE(JSVALUE32_64) 552 void* addressOfDoubleConstant(Node* node) { return m_jit.addressOfDoubleConstant(node); } 553#endif 554 JSValue valueOfJSConstant(Node* node) { return m_jit.graph().valueOfJSConstant(node); } 555 bool valueOfBooleanConstant(Node* node) { return m_jit.graph().valueOfBooleanConstant(node); } 556 JSFunction* valueOfFunctionConstant(Node* node) { return m_jit.graph().valueOfFunctionConstant(node); } 557 bool isNullConstant(Node* node) 558 { 559 if (!isConstant(node)) 560 return false; 561 return valueOfJSConstant(node).isNull(); 562 } 563 564 Identifier* identifier(unsigned index) 565 { 566 return &m_jit.codeBlock()->identifier(index); 567 } 568 569 // Spill all VirtualRegisters back to the JSStack. 570 void flushRegisters() 571 { 572 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 573 if (iter.name() != InvalidVirtualRegister) { 574 spill(iter.name()); 575 iter.release(); 576 } 577 } 578 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 579 if (iter.name() != InvalidVirtualRegister) { 580 spill(iter.name()); 581 iter.release(); 582 } 583 } 584 } 585 586#ifndef NDEBUG 587 // Used to ASSERT flushRegisters() has been called prior to 588 // calling out from JIT code to a C helper function. 589 bool isFlushed() 590 { 591 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 592 if (iter.name() != InvalidVirtualRegister) 593 return false; 594 } 595 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 596 if (iter.name() != InvalidVirtualRegister) 597 return false; 598 } 599 return true; 600 } 601#endif 602 603#if USE(JSVALUE64) 604 MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node) 605 { 606 return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(node))); 607 } 608#endif 609 610 // Helper functions to enable code sharing in implementations of bit/shift ops. 611 void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result) 612 { 613 switch (op) { 614 case BitAnd: 615 m_jit.and32(Imm32(imm), op1, result); 616 break; 617 case BitOr: 618 m_jit.or32(Imm32(imm), op1, result); 619 break; 620 case BitXor: 621 m_jit.xor32(Imm32(imm), op1, result); 622 break; 623 default: 624 RELEASE_ASSERT_NOT_REACHED(); 625 } 626 } 627 void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result) 628 { 629 switch (op) { 630 case BitAnd: 631 m_jit.and32(op1, op2, result); 632 break; 633 case BitOr: 634 m_jit.or32(op1, op2, result); 635 break; 636 case BitXor: 637 m_jit.xor32(op1, op2, result); 638 break; 639 default: 640 RELEASE_ASSERT_NOT_REACHED(); 641 } 642 } 643 void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result) 644 { 645 switch (op) { 646 case BitRShift: 647 m_jit.rshift32(op1, Imm32(shiftAmount), result); 648 break; 649 case BitLShift: 650 m_jit.lshift32(op1, Imm32(shiftAmount), result); 651 break; 652 case BitURShift: 653 m_jit.urshift32(op1, Imm32(shiftAmount), result); 654 break; 655 default: 656 RELEASE_ASSERT_NOT_REACHED(); 657 } 658 } 659 void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result) 660 { 661 switch (op) { 662 case BitRShift: 663 m_jit.rshift32(op1, shiftAmount, result); 664 break; 665 case BitLShift: 666 m_jit.lshift32(op1, shiftAmount, result); 667 break; 668 case BitURShift: 669 m_jit.urshift32(op1, shiftAmount, result); 670 break; 671 default: 672 RELEASE_ASSERT_NOT_REACHED(); 673 } 674 } 675 676 // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise. 677 unsigned detectPeepHoleBranch() 678 { 679 BasicBlock* block = m_jit.graph().m_blocks[m_block].get(); 680 681 // Check that no intervening nodes will be generated. 682 for (unsigned index = m_indexInBlock + 1; index < block->size() - 1; ++index) { 683 Node* node = block->at(index); 684 if (node->shouldGenerate()) 685 return UINT_MAX; 686 } 687 688 // Check if the lastNode is a branch on this node. 689 Node* lastNode = block->last(); 690 return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? block->size() - 1 : UINT_MAX; 691 } 692 693 void compileMovHint(Node*); 694 void compileMovHintAndCheck(Node*); 695 void compileInlineStart(Node*); 696 697 void nonSpeculativeUInt32ToNumber(Node*); 698 699#if USE(JSVALUE64) 700 void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill); 701 void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); 702#elif USE(JSVALUE32_64) 703 void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill); 704 void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); 705#endif 706 707 void nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert = false); 708 void nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert = false); 709 bool nonSpeculativeCompareNull(Node*, Edge operand, bool invert = false); 710 711 void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction); 712 void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction); 713 bool nonSpeculativeCompare(Node*, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction); 714 715 void nonSpeculativePeepholeStrictEq(Node*, Node* branchNode, bool invert = false); 716 void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false); 717 bool nonSpeculativeStrictEq(Node*, bool invert = false); 718 719 void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg); 720 void compileInstanceOf(Node*); 721 722 // Access to our fixed callee CallFrame. 723 MacroAssembler::Address callFrameSlot(int slot) 724 { 725 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register))); 726 } 727 728 // Access to our fixed callee CallFrame. 729 MacroAssembler::Address argumentSlot(int argument) 730 { 731 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register))); 732 } 733 734 MacroAssembler::Address callFrameTagSlot(int slot) 735 { 736 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); 737 } 738 739 MacroAssembler::Address callFramePayloadSlot(int slot) 740 { 741 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); 742 } 743 744 MacroAssembler::Address argumentTagSlot(int argument) 745 { 746 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); 747 } 748 749 MacroAssembler::Address argumentPayloadSlot(int argument) 750 { 751 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); 752 } 753 754 void emitCall(Node*); 755 756 // Called once a node has completed code generation but prior to setting 757 // its result, to free up its children. (This must happen prior to setting 758 // the nodes result, since the node may have the same VirtualRegister as 759 // a child, and as such will use the same GeneratioInfo). 760 void useChildren(Node*); 761 762 // These method called to initialize the the GenerationInfo 763 // to describe the result of an operation. 764 void integerResult(GPRReg reg, Node* node, DataFormat format = DataFormatInteger, UseChildrenMode mode = CallUseChildren) 765 { 766 if (mode == CallUseChildren) 767 useChildren(node); 768 769 VirtualRegister virtualRegister = node->virtualRegister(); 770 GenerationInfo& info = m_generationInfo[virtualRegister]; 771 772 if (format == DataFormatInteger) { 773 m_jit.jitAssertIsInt32(reg); 774 m_gprs.retain(reg, virtualRegister, SpillOrderInteger); 775 info.initInteger(node, node->refCount(), reg); 776 } else { 777#if USE(JSVALUE64) 778 RELEASE_ASSERT(format == DataFormatJSInteger); 779 m_jit.jitAssertIsJSInt32(reg); 780 m_gprs.retain(reg, virtualRegister, SpillOrderJS); 781 info.initJSValue(node, node->refCount(), reg, format); 782#elif USE(JSVALUE32_64) 783 RELEASE_ASSERT_NOT_REACHED(); 784#endif 785 } 786 } 787 void integerResult(GPRReg reg, Node* node, UseChildrenMode mode) 788 { 789 integerResult(reg, node, DataFormatInteger, mode); 790 } 791 void noResult(Node* node, UseChildrenMode mode = CallUseChildren) 792 { 793 if (mode == UseChildrenCalledExplicitly) 794 return; 795 useChildren(node); 796 } 797 void cellResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) 798 { 799 if (mode == CallUseChildren) 800 useChildren(node); 801 802 VirtualRegister virtualRegister = node->virtualRegister(); 803 m_gprs.retain(reg, virtualRegister, SpillOrderCell); 804 GenerationInfo& info = m_generationInfo[virtualRegister]; 805 info.initCell(node, node->refCount(), reg); 806 } 807 void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) 808 { 809 if (mode == CallUseChildren) 810 useChildren(node); 811 812 VirtualRegister virtualRegister = node->virtualRegister(); 813 m_gprs.retain(reg, virtualRegister, SpillOrderBoolean); 814 GenerationInfo& info = m_generationInfo[virtualRegister]; 815 info.initBoolean(node, node->refCount(), reg); 816 } 817#if USE(JSVALUE64) 818 void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) 819 { 820 if (format == DataFormatJSInteger) 821 m_jit.jitAssertIsJSInt32(reg); 822 823 if (mode == CallUseChildren) 824 useChildren(node); 825 826 VirtualRegister virtualRegister = node->virtualRegister(); 827 m_gprs.retain(reg, virtualRegister, SpillOrderJS); 828 GenerationInfo& info = m_generationInfo[virtualRegister]; 829 info.initJSValue(node, node->refCount(), reg, format); 830 } 831 void jsValueResult(GPRReg reg, Node* node, UseChildrenMode mode) 832 { 833 jsValueResult(reg, node, DataFormatJS, mode); 834 } 835#elif USE(JSVALUE32_64) 836 void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) 837 { 838 if (mode == CallUseChildren) 839 useChildren(node); 840 841 VirtualRegister virtualRegister = node->virtualRegister(); 842 m_gprs.retain(tag, virtualRegister, SpillOrderJS); 843 m_gprs.retain(payload, virtualRegister, SpillOrderJS); 844 GenerationInfo& info = m_generationInfo[virtualRegister]; 845 info.initJSValue(node, node->refCount(), tag, payload, format); 846 } 847 void jsValueResult(GPRReg tag, GPRReg payload, Node* node, UseChildrenMode mode) 848 { 849 jsValueResult(tag, payload, node, DataFormatJS, mode); 850 } 851#endif 852 void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) 853 { 854 if (mode == CallUseChildren) 855 useChildren(node); 856 857 VirtualRegister virtualRegister = node->virtualRegister(); 858 m_gprs.retain(reg, virtualRegister, SpillOrderStorage); 859 GenerationInfo& info = m_generationInfo[virtualRegister]; 860 info.initStorage(node, node->refCount(), reg); 861 } 862 void doubleResult(FPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren) 863 { 864 if (mode == CallUseChildren) 865 useChildren(node); 866 867 VirtualRegister virtualRegister = node->virtualRegister(); 868 m_fprs.retain(reg, virtualRegister, SpillOrderDouble); 869 GenerationInfo& info = m_generationInfo[virtualRegister]; 870 info.initDouble(node, node->refCount(), reg); 871 } 872 void initConstantInfo(Node* node) 873 { 874 ASSERT(isInt32Constant(node) || isNumberConstant(node) || isJSConstant(node)); 875 m_generationInfo[node->virtualRegister()].initConstant(node, node->refCount()); 876 } 877 878 // These methods add calls to C++ helper functions. 879 // These methods are broadly value representation specific (i.e. 880 // deal with the fact that a JSValue may be passed in one or two 881 // machine registers, and delegate the calling convention specific 882 // decision as to how to fill the regsiters to setupArguments* methods. 883 884 JITCompiler::Call callOperation(P_DFGOperation_E operation, GPRReg result) 885 { 886 m_jit.setupArgumentsExecState(); 887 return appendCallWithExceptionCheckSetResult(operation, result); 888 } 889 JITCompiler::Call callOperation(P_DFGOperation_EC operation, GPRReg result, GPRReg cell) 890 { 891 m_jit.setupArgumentsWithExecState(cell); 892 return appendCallWithExceptionCheckSetResult(operation, result); 893 } 894 JITCompiler::Call callOperation(P_DFGOperation_EO operation, GPRReg result, GPRReg object) 895 { 896 m_jit.setupArgumentsWithExecState(object); 897 return appendCallWithExceptionCheckSetResult(operation, result); 898 } 899 JITCompiler::Call callOperation(P_DFGOperation_EOS operation, GPRReg result, GPRReg object, size_t size) 900 { 901 m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size)); 902 return appendCallWithExceptionCheckSetResult(operation, result); 903 } 904 JITCompiler::Call callOperation(P_DFGOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size) 905 { 906 m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size)); 907 return appendCallWithExceptionCheckSetResult(operation, result); 908 } 909 JITCompiler::Call callOperation(C_DFGOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size) 910 { 911 m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(static_cast<size_t>(size))); 912 return appendCallWithExceptionCheckSetResult(operation, result); 913 } 914 JITCompiler::Call callOperation(P_DFGOperation_EPS operation, GPRReg result, GPRReg old, size_t size) 915 { 916 m_jit.setupArgumentsWithExecState(old, TrustedImmPtr(size)); 917 return appendCallWithExceptionCheckSetResult(operation, result); 918 } 919 JITCompiler::Call callOperation(P_DFGOperation_ES operation, GPRReg result, size_t size) 920 { 921 m_jit.setupArgumentsWithExecState(TrustedImmPtr(size)); 922 return appendCallWithExceptionCheckSetResult(operation, result); 923 } 924 JITCompiler::Call callOperation(P_DFGOperation_ESt operation, GPRReg result, Structure* structure) 925 { 926 m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure)); 927 return appendCallWithExceptionCheckSetResult(operation, result); 928 } 929 JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg arg2) 930 { 931 m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2); 932 return appendCallWithExceptionCheckSetResult(operation, result); 933 } 934 JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, size_t arg2) 935 { 936 m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(arg2)); 937 return appendCallWithExceptionCheckSetResult(operation, result); 938 } 939 JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) 940 { 941 m_jit.setupArgumentsWithExecState(arg1, arg2); 942 return appendCallWithExceptionCheckSetResult(operation, result); 943 } 944 JITCompiler::Call callOperation(P_DFGOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size) 945 { 946 m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(pointer), TrustedImmPtr(size)); 947 return appendCallWithExceptionCheckSetResult(operation, result); 948 } 949 JITCompiler::Call callOperation(P_DFGOperation_EStSS operation, GPRReg result, Structure* structure, size_t index, size_t size) 950 { 951 m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(index), TrustedImmPtr(size)); 952 return appendCallWithExceptionCheckSetResult(operation, result); 953 } 954 955 JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result) 956 { 957 m_jit.setupArgumentsExecState(); 958 return appendCallWithExceptionCheckSetResult(operation, result); 959 } 960 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1) 961 { 962 m_jit.setupArgumentsWithExecState(arg1); 963 return appendCallWithExceptionCheckSetResult(operation, result); 964 } 965 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, JSCell* cell) 966 { 967 m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell)); 968 return appendCallWithExceptionCheckSetResult(operation, result); 969 } 970 JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell) 971 { 972 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell)); 973 return appendCallWithExceptionCheckSetResult(operation, result); 974 } 975 JITCompiler::Call callOperation(C_DFGOperation_EIcf operation, GPRReg result, InlineCallFrame* inlineCallFrame) 976 { 977 m_jit.setupArgumentsWithExecState(TrustedImmPtr(inlineCallFrame)); 978 return appendCallWithExceptionCheckSetResult(operation, result); 979 } 980 JITCompiler::Call callOperation(C_DFGOperation_ESt operation, GPRReg result, Structure* structure) 981 { 982 m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure)); 983 return appendCallWithExceptionCheckSetResult(operation, result); 984 } 985 JITCompiler::Call callOperation(C_DFGOperation_EJssSt operation, GPRReg result, GPRReg arg1, Structure* structure) 986 { 987 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(structure)); 988 return appendCallWithExceptionCheckSetResult(operation, result); 989 } 990 JITCompiler::Call callOperation(C_DFGOperation_EJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2) 991 { 992 m_jit.setupArgumentsWithExecState(arg1, arg2); 993 return appendCallWithExceptionCheckSetResult(operation, result); 994 } 995 JITCompiler::Call callOperation(C_DFGOperation_EJssJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3) 996 { 997 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); 998 return appendCallWithExceptionCheckSetResult(operation, result); 999 } 1000 1001 JITCompiler::Call callOperation(S_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1002 { 1003 m_jit.setupArgumentsWithExecState(arg1, arg2); 1004 return appendCallWithExceptionCheckSetResult(operation, result); 1005 } 1006 1007 JITCompiler::Call callOperation(V_DFGOperation_EC operation, GPRReg arg1) 1008 { 1009 m_jit.setupArgumentsWithExecState(arg1); 1010 return appendCallWithExceptionCheck(operation); 1011 } 1012 1013 JITCompiler::Call callOperation(V_DFGOperation_ECIcf operation, GPRReg arg1, InlineCallFrame* inlineCallFrame) 1014 { 1015 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(inlineCallFrame)); 1016 return appendCallWithExceptionCheck(operation); 1017 } 1018 JITCompiler::Call callOperation(V_DFGOperation_ECCIcf operation, GPRReg arg1, GPRReg arg2, InlineCallFrame* inlineCallFrame) 1019 { 1020 m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(inlineCallFrame)); 1021 return appendCallWithExceptionCheck(operation); 1022 } 1023 1024 JITCompiler::Call callOperation(V_DFGOperation_ECZ operation, GPRReg arg1, int arg2) 1025 { 1026 m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2)); 1027 return appendCallWithExceptionCheck(operation); 1028 } 1029 1030 JITCompiler::Call callOperation(V_DFGOperation_ECC operation, GPRReg arg1, GPRReg arg2) 1031 { 1032 m_jit.setupArgumentsWithExecState(arg1, arg2); 1033 return appendCallWithExceptionCheck(operation); 1034 } 1035 1036 JITCompiler::Call callOperation(V_DFGOperation_EOZD operation, GPRReg arg1, GPRReg arg2, FPRReg arg3) 1037 { 1038 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); 1039 return appendCallWithExceptionCheck(operation); 1040 } 1041 1042 JITCompiler::Call callOperation(V_DFGOperation_W operation, WatchpointSet* watchpointSet) 1043 { 1044 m_jit.setupArguments(TrustedImmPtr(watchpointSet)); 1045 return appendCall(operation); 1046 } 1047 1048 template<typename FunctionType, typename ArgumentType1> 1049 JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1) 1050 { 1051 return callOperation(operation, arg1); 1052 } 1053 template<typename FunctionType, typename ArgumentType1, typename ArgumentType2> 1054 JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2) 1055 { 1056 return callOperation(operation, arg1, arg2); 1057 } 1058 template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3> 1059 JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3) 1060 { 1061 return callOperation(operation, arg1, arg2, arg3); 1062 } 1063 template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4> 1064 JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4) 1065 { 1066 return callOperation(operation, arg1, arg2, arg3, arg4); 1067 } 1068 template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4, typename ArgumentType5> 1069 JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5) 1070 { 1071 return callOperation(operation, arg1, arg2, arg3, arg4, arg5); 1072 } 1073 1074 JITCompiler::Call callOperation(D_DFGOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2) 1075 { 1076 m_jit.setupArguments(arg1, arg2); 1077 return appendCallSetResult(operation, result); 1078 } 1079 JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2) 1080 { 1081 m_jit.setupArguments(arg1, arg2); 1082 return appendCallSetResult(operation, result); 1083 } 1084 JITCompiler::Call callOperation(Str_DFGOperation_EJss operation, GPRReg result, GPRReg arg1) 1085 { 1086 m_jit.setupArgumentsWithExecState(arg1); 1087 return appendCallWithExceptionCheckSetResult(operation, result); 1088 } 1089 JITCompiler::Call callOperation(C_DFGOperation_EZ operation, GPRReg result, GPRReg arg1) 1090 { 1091 m_jit.setupArgumentsWithExecState(arg1); 1092 return appendCallWithExceptionCheckSetResult(operation, result); 1093 } 1094 1095#if USE(JSVALUE64) 1096 JITCompiler::Call callOperation(J_DFGOperation_E operation, GPRReg result) 1097 { 1098 m_jit.setupArgumentsExecState(); 1099 return appendCallWithExceptionCheckSetResult(operation, result); 1100 } 1101 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer) 1102 { 1103 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer)); 1104 return appendCallWithExceptionCheckSetResult(operation, result); 1105 } 1106 JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1) 1107 { 1108 m_jit.setupArguments(arg1); 1109 JITCompiler::Call call = m_jit.appendCall(operation); 1110 m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result); 1111 return call; 1112 } 1113 JITCompiler::Call callOperation(J_DFGOperation_EGriJsgI operation, GPRReg result, GPRReg arg1, GPRReg arg2, Identifier* identifier) 1114 { 1115 m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier)); 1116 return appendCallWithExceptionCheckSetResult(operation, result); 1117 } 1118 JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg result, Identifier* identifier) 1119 { 1120 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier)); 1121 return appendCallWithExceptionCheckSetResult(operation, result); 1122 } 1123 JITCompiler::Call callOperation(J_DFGOperation_EIRo operation, GPRReg result, Identifier* identifier, ResolveOperations* operations) 1124 { 1125 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations)); 1126 return appendCallWithExceptionCheckSetResult(operation, result); 1127 } 1128 JITCompiler::Call callOperation(J_DFGOperation_EIRoPtbo operation, GPRReg result, Identifier* identifier, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations) 1129 { 1130 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations), TrustedImmPtr(putToBaseOperations)); 1131 return appendCallWithExceptionCheckSetResult(operation, result); 1132 } 1133 JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1) 1134 { 1135 m_jit.setupArgumentsWithExecState(arg1); 1136 return appendCallWithExceptionCheckSetResult(operation, result); 1137 } 1138 JITCompiler::Call callOperation(J_DFGOperation_EAZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1139 { 1140 m_jit.setupArgumentsWithExecState(arg1, arg2); 1141 return appendCallWithExceptionCheckSetResult(operation, result); 1142 } 1143 JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg result, void* pointer, size_t size) 1144 { 1145 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size)); 1146 return appendCallWithExceptionCheckSetResult(operation, result); 1147 } 1148 JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg result, int startConstant, int numConstants) 1149 { 1150 m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants)); 1151 return appendCallWithExceptionCheckSetResult(operation, result); 1152 } 1153 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer) 1154 { 1155 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer)); 1156 return appendCallWithExceptionCheckSetResult(operation, result); 1157 } 1158 JITCompiler::Call callOperation(J_DFGOperation_EC operation, GPRReg result, JSCell* cell) 1159 { 1160 m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell)); 1161 return appendCallWithExceptionCheckSetResult(operation, result); 1162 } 1163 JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg result, GPRReg arg1, Identifier* identifier) 1164 { 1165 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier)); 1166 return appendCallWithExceptionCheckSetResult(operation, result); 1167 } 1168 JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier) 1169 { 1170 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier)); 1171 return appendCallWithExceptionCheckSetResult(operation, result); 1172 } 1173 JITCompiler::Call callOperation(J_DFGOperation_EDA operation, GPRReg result, FPRReg arg1, GPRReg arg2) 1174 { 1175 m_jit.setupArgumentsWithExecState(arg1, arg2); 1176 return appendCallWithExceptionCheckSetResult(operation, result); 1177 } 1178 JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1179 { 1180 m_jit.setupArgumentsWithExecState(arg1, arg2); 1181 return appendCallWithExceptionCheckSetResult(operation, result); 1182 } 1183 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, GPRReg arg1) 1184 { 1185 m_jit.setupArgumentsWithExecState(arg1); 1186 return appendCallWithExceptionCheckSetResult(operation, result); 1187 } 1188 JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg result, GPRReg arg1) 1189 { 1190 m_jit.setupArgumentsWithExecState(arg1); 1191 return appendCallWithExceptionCheckSetResult(operation, result); 1192 } 1193 JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg result, int32_t arg1) 1194 { 1195 m_jit.setupArgumentsWithExecState(TrustedImm32(arg1)); 1196 return appendCallWithExceptionCheckSetResult(operation, result); 1197 } 1198 JITCompiler::Call callOperation(J_DFGOperation_EZZ operation, GPRReg result, int32_t arg1, GPRReg arg2) 1199 { 1200 m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2); 1201 return appendCallWithExceptionCheckSetResult(operation, result); 1202 } 1203 JITCompiler::Call callOperation(J_DFGOperation_EZIcfZ operation, GPRReg result, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2) 1204 { 1205 m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2); 1206 return appendCallWithExceptionCheckSetResult(operation, result); 1207 } 1208 1209 1210 JITCompiler::Call callOperation(C_DFGOperation_EJ operation, GPRReg result, GPRReg arg1) 1211 { 1212 m_jit.setupArgumentsWithExecState(arg1); 1213 return appendCallWithExceptionCheckSetResult(operation, result); 1214 } 1215 JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1) 1216 { 1217 m_jit.setupArguments(arg1); 1218 return appendCallSetResult(operation, result); 1219 } 1220 JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1) 1221 { 1222 m_jit.setupArgumentsWithExecState(arg1); 1223 return appendCallWithExceptionCheckSetResult(operation, result); 1224 } 1225 JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1) 1226 { 1227 m_jit.setupArgumentsWithExecState(arg1); 1228 return appendCallWithExceptionCheckSetResult(operation, result); 1229 } 1230 JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1231 { 1232 m_jit.setupArgumentsWithExecState(arg1, arg2); 1233 return appendCallWithExceptionCheckSetResult(operation, result); 1234 } 1235 1236 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1237 { 1238 m_jit.setupArgumentsWithExecState(arg1, arg2); 1239 return appendCallWithExceptionCheckSetResult(operation, result); 1240 } 1241 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1242 { 1243 m_jit.setupArgumentsWithExecState(arg1, arg2); 1244 return appendCallWithExceptionCheckSetResult(operation, result); 1245 } 1246 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm) 1247 { 1248 m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value)))); 1249 return appendCallWithExceptionCheckSetResult(operation, result); 1250 } 1251 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2) 1252 { 1253 m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))), arg2); 1254 return appendCallWithExceptionCheckSetResult(operation, result); 1255 } 1256 JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1257 { 1258 m_jit.setupArgumentsWithExecState(arg1, arg2); 1259 return appendCallWithExceptionCheckSetResult(operation, result); 1260 } 1261 JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1262 { 1263 m_jit.setupArgumentsWithExecState(arg1, arg2); 1264 return appendCallWithExceptionCheckSetResult(operation, result); 1265 } 1266 1267 JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer) 1268 { 1269 m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer)); 1270 return appendCallWithExceptionCheck(operation); 1271 } 1272 JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier) 1273 { 1274 m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier)); 1275 return appendCallWithExceptionCheck(operation); 1276 } 1277 JITCompiler::Call callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1278 { 1279 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); 1280 return appendCallWithExceptionCheck(operation); 1281 } 1282 JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1283 { 1284 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); 1285 return appendCallWithExceptionCheck(operation); 1286 } 1287 1288 JITCompiler::Call callOperation(V_DFGOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1289 { 1290 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); 1291 return appendCallWithExceptionCheck(operation); 1292 } 1293 JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1294 { 1295 m_jit.setupArgumentsWithExecState(arg1, arg2, arg3); 1296 return appendCallWithExceptionCheck(operation); 1297 } 1298 1299 JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1) 1300 { 1301 m_jit.setupArgumentsWithExecState(arg1); 1302 return appendCallWithExceptionCheckSetResult(operation, result); 1303 } 1304 1305#else // USE(JSVALUE32_64) 1306 1307// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]). 1308// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary. 1309#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS) 1310#define EABI_32BIT_DUMMY_ARG TrustedImm32(0), 1311#else 1312#define EABI_32BIT_DUMMY_ARG 1313#endif 1314 1315 JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1) 1316 { 1317 prepareForExternalCall(); 1318 m_jit.setupArguments(arg1); 1319 JITCompiler::Call call = m_jit.appendCall(operation); 1320 m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result); 1321 return call; 1322 } 1323 JITCompiler::Call callOperation(J_DFGOperation_E operation, GPRReg resultTag, GPRReg resultPayload) 1324 { 1325 m_jit.setupArgumentsExecState(); 1326 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1327 } 1328 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer) 1329 { 1330 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer)); 1331 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1332 } 1333 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer) 1334 { 1335 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer)); 1336 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1337 } 1338 JITCompiler::Call callOperation(J_DFGOperation_EGriJsgI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2, Identifier* identifier) 1339 { 1340 m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier)); 1341 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1342 } 1343 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) 1344 { 1345 m_jit.setupArgumentsWithExecState(arg1); 1346 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1347 } 1348 JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier) 1349 { 1350 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier)); 1351 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1352 } 1353 JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) 1354 { 1355 m_jit.setupArgumentsWithExecState(arg1); 1356 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1357 } 1358 JITCompiler::Call callOperation(J_DFGOperation_EAZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2) 1359 { 1360 m_jit.setupArgumentsWithExecState(arg1, arg2); 1361 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1362 } 1363 JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size) 1364 { 1365 m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size)); 1366 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1367 } 1368 JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants) 1369 { 1370 m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants)); 1371 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1372 } 1373 JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer) 1374 { 1375 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(pointer)); 1376 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1377 } 1378 JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2) 1379 { 1380 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2); 1381 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1382 } 1383 1384 JITCompiler::Call callOperation(J_DFGOperation_EC operation, GPRReg resultTag, GPRReg resultPayload, JSCell* cell) 1385 { 1386 m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell)); 1387 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1388 } 1389 JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier) 1390 { 1391 m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier)); 1392 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1393 } 1394 JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, Identifier* identifier) 1395 { 1396 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(identifier)); 1397 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1398 } 1399 JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1Tag, GPRReg arg1Payload, Identifier* identifier) 1400 { 1401 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(identifier)); 1402 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1403 } 1404 JITCompiler::Call callOperation(J_DFGOperation_EDA operation, GPRReg resultTag, GPRReg resultPayload, FPRReg arg1, GPRReg arg2) 1405 { 1406 m_jit.setupArgumentsWithExecState(arg1, arg2); 1407 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1408 } 1409 JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2) 1410 { 1411 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2); 1412 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1413 } 1414 JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, TrustedImm32 arg1Tag, GPRReg arg1Payload, GPRReg arg2) 1415 { 1416 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2); 1417 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1418 } 1419 JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload) 1420 { 1421 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); 1422 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1423 } 1424 JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) 1425 { 1426 m_jit.setupArgumentsWithExecState(arg1); 1427 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1428 } 1429 JITCompiler::Call callOperation(J_DFGOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1) 1430 { 1431 m_jit.setupArgumentsWithExecState(TrustedImm32(arg1)); 1432 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1433 } 1434 JITCompiler::Call callOperation(J_DFGOperation_EZIcfZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2) 1435 { 1436 m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2); 1437 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1438 } 1439 JITCompiler::Call callOperation(J_DFGOperation_EZZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, GPRReg arg2) 1440 { 1441 m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2); 1442 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1443 } 1444 1445 1446 JITCompiler::Call callOperation(C_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) 1447 { 1448 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); 1449 return appendCallWithExceptionCheckSetResult(operation, result); 1450 } 1451 JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) 1452 { 1453 m_jit.setupArguments(arg1Payload, arg1Tag); 1454 return appendCallSetResult(operation, result); 1455 } 1456 JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) 1457 { 1458 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); 1459 return appendCallWithExceptionCheckSetResult(operation, result); 1460 } 1461 1462 JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) 1463 { 1464 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag); 1465 return appendCallWithExceptionCheckSetResult(operation, result); 1466 } 1467 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) 1468 { 1469 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag); 1470 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1471 } 1472 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::TrustedImm32 imm) 1473 { 1474 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag)); 1475 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1476 } 1477 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::TrustedImm32 imm, GPRReg arg2Tag, GPRReg arg2Payload) 1478 { 1479 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag); 1480 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1481 } 1482 1483 JITCompiler::Call callOperation(J_DFGOperation_EIRo operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier, ResolveOperations* operations) 1484 { 1485 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations)); 1486 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1487 } 1488 1489 JITCompiler::Call callOperation(J_DFGOperation_EIRoPtbo operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier, ResolveOperations* operations, PutToBaseOperation* putToBaseOperations) 1490 { 1491 m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier), TrustedImmPtr(operations), TrustedImmPtr(putToBaseOperations)); 1492 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1493 } 1494 1495 JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload) 1496 { 1497 m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag); 1498 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1499 } 1500 JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2) 1501 { 1502 m_jit.setupArgumentsWithExecState(arg1, arg2); 1503 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1504 } 1505 1506 JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer) 1507 { 1508 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer)); 1509 return appendCallWithExceptionCheck(operation); 1510 } 1511 JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, Identifier* identifier) 1512 { 1513 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(identifier)); 1514 return appendCallWithExceptionCheck(operation); 1515 } 1516 JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload) 1517 { 1518 m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag); 1519 return appendCallWithExceptionCheck(operation); 1520 } 1521 1522 JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) 1523 { 1524 m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag); 1525 return appendCallWithExceptionCheck(operation); 1526 } 1527 1528 JITCompiler::Call callOperation(V_DFGOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) 1529 { 1530 m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag); 1531 return appendCallWithExceptionCheck(operation); 1532 } 1533 JITCompiler::Call callOperation(V_DFGOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, TrustedImm32 arg3Tag, GPRReg arg3Payload) 1534 { 1535 m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag); 1536 return appendCallWithExceptionCheck(operation); 1537 } 1538 1539 JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) 1540 { 1541 m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag); 1542 return appendCallWithExceptionCheckSetResult(operation, result); 1543 } 1544 1545#undef EABI_32BIT_DUMMY_ARG 1546 1547 template<typename FunctionType> 1548 JITCompiler::Call callOperation( 1549 FunctionType operation, JSValueRegs result) 1550 { 1551 return callOperation(operation, result.tagGPR(), result.payloadGPR()); 1552 } 1553 template<typename FunctionType, typename ArgumentType1> 1554 JITCompiler::Call callOperation( 1555 FunctionType operation, JSValueRegs result, ArgumentType1 arg1) 1556 { 1557 return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1); 1558 } 1559 template<typename FunctionType, typename ArgumentType1, typename ArgumentType2> 1560 JITCompiler::Call callOperation( 1561 FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2) 1562 { 1563 return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2); 1564 } 1565 template< 1566 typename FunctionType, typename ArgumentType1, typename ArgumentType2, 1567 typename ArgumentType3> 1568 JITCompiler::Call callOperation( 1569 FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2, 1570 ArgumentType3 arg3) 1571 { 1572 return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3); 1573 } 1574 template< 1575 typename FunctionType, typename ArgumentType1, typename ArgumentType2, 1576 typename ArgumentType3, typename ArgumentType4> 1577 JITCompiler::Call callOperation( 1578 FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2, 1579 ArgumentType3 arg3, ArgumentType4 arg4) 1580 { 1581 return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4); 1582 } 1583 template< 1584 typename FunctionType, typename ArgumentType1, typename ArgumentType2, 1585 typename ArgumentType3, typename ArgumentType4, typename ArgumentType5> 1586 JITCompiler::Call callOperation( 1587 FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2, 1588 ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5) 1589 { 1590 return callOperation( 1591 operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4, arg5); 1592 } 1593#endif // USE(JSVALUE32_64) 1594 1595#if !defined(NDEBUG) && !CPU(ARM) && !CPU(MIPS) 1596 void prepareForExternalCall() 1597 { 1598 // We're about to call out to a "native" helper function. The helper 1599 // function is expected to set topCallFrame itself with the ExecState 1600 // that is passed to it. 1601 // 1602 // We explicitly trash topCallFrame here so that we'll know if some of 1603 // the helper functions are not setting topCallFrame when they should 1604 // be doing so. Note: the previous value in topcallFrame was not valid 1605 // anyway since it was not being updated by JIT'ed code by design. 1606 1607 for (unsigned i = 0; i < sizeof(void*) / 4; i++) 1608 m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit.vm()->topCallFrame) + i * 4); 1609 } 1610#else 1611 void prepareForExternalCall() { } 1612#endif 1613 1614 // These methods add call instructions, with optional exception checks & setting results. 1615 JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function) 1616 { 1617 prepareForExternalCall(); 1618 CodeOrigin codeOrigin = m_currentNode->codeOrigin; 1619 CallBeginToken token; 1620 m_jit.beginCall(codeOrigin, token); 1621 JITCompiler::Call call = m_jit.appendCall(function); 1622 m_jit.addExceptionCheck(call, codeOrigin, token); 1623 return call; 1624 } 1625 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result) 1626 { 1627 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1628 m_jit.move(GPRInfo::returnValueGPR, result); 1629 return call; 1630 } 1631 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result) 1632 { 1633 prepareForExternalCall(); 1634 JITCompiler::Call call = m_jit.appendCall(function); 1635 m_jit.move(GPRInfo::returnValueGPR, result); 1636 return call; 1637 } 1638 JITCompiler::Call appendCall(const FunctionPtr& function) 1639 { 1640 prepareForExternalCall(); 1641 return m_jit.appendCall(function); 1642 } 1643 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2) 1644 { 1645 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1646 m_jit.setupResults(result1, result2); 1647 return call; 1648 } 1649#if CPU(X86) 1650 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) 1651 { 1652 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1653 m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister); 1654 m_jit.loadDouble(JITCompiler::stackPointerRegister, result); 1655 return call; 1656 } 1657 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) 1658 { 1659 JITCompiler::Call call = m_jit.appendCall(function); 1660 m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister); 1661 m_jit.loadDouble(JITCompiler::stackPointerRegister, result); 1662 return call; 1663 } 1664#elif CPU(ARM) 1665#if CPU(ARM_HARDFP) 1666 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) 1667 { 1668 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1669 m_jit.moveDouble(result, FPRInfo::argumentFPR0); 1670 return call; 1671 } 1672 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) 1673 { 1674 JITCompiler::Call call = m_jit.appendCall(function); 1675 m_jit.moveDouble(result, FPRInfo::argumentFPR0); 1676 return call; 1677 } 1678#else 1679 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) 1680 { 1681 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1682 m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); 1683 return call; 1684 } 1685 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) 1686 { 1687 JITCompiler::Call call = m_jit.appendCall(function); 1688 m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); 1689 return call; 1690 } 1691#endif // CPU(ARM_HARDFP) 1692#else 1693 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) 1694 { 1695 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1696 m_jit.moveDouble(FPRInfo::returnValueFPR, result); 1697 return call; 1698 } 1699 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) 1700 { 1701 JITCompiler::Call call = m_jit.appendCall(function); 1702 m_jit.moveDouble(FPRInfo::returnValueFPR, result); 1703 return call; 1704 } 1705#endif 1706 1707 void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BlockIndex destination) 1708 { 1709 if (!haveEdgeCodeToEmit(destination)) 1710 return addBranch(m_jit.branchDouble(cond, left, right), destination); 1711 1712 JITCompiler::Jump notTaken = m_jit.branchDouble(JITCompiler::invert(cond), left, right); 1713 emitEdgeCode(destination); 1714 addBranch(m_jit.jump(), destination); 1715 notTaken.link(&m_jit); 1716 } 1717 1718 void branchDoubleNonZero(FPRReg value, FPRReg scratch, BlockIndex destination) 1719 { 1720 if (!haveEdgeCodeToEmit(destination)) 1721 return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination); 1722 1723 JITCompiler::Jump notTaken = m_jit.branchDoubleZeroOrNaN(value, scratch); 1724 emitEdgeCode(destination); 1725 addBranch(m_jit.jump(), destination); 1726 notTaken.link(&m_jit); 1727 } 1728 1729 template<typename T, typename U> 1730 void branch32(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination) 1731 { 1732 if (!haveEdgeCodeToEmit(destination)) 1733 return addBranch(m_jit.branch32(cond, left, right), destination); 1734 1735 JITCompiler::Jump notTaken = m_jit.branch32(JITCompiler::invert(cond), left, right); 1736 emitEdgeCode(destination); 1737 addBranch(m_jit.jump(), destination); 1738 notTaken.link(&m_jit); 1739 } 1740 1741 template<typename T, typename U> 1742 void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination) 1743 { 1744 ASSERT(JITCompiler::isInvertible(cond)); 1745 1746 if (!haveEdgeCodeToEmit(destination)) 1747 return addBranch(m_jit.branchTest32(cond, value, mask), destination); 1748 1749 JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value, mask); 1750 emitEdgeCode(destination); 1751 addBranch(m_jit.jump(), destination); 1752 notTaken.link(&m_jit); 1753 } 1754 1755 template<typename T> 1756 void branchTest32(JITCompiler::ResultCondition cond, T value, BlockIndex destination) 1757 { 1758 ASSERT(JITCompiler::isInvertible(cond)); 1759 1760 if (!haveEdgeCodeToEmit(destination)) 1761 return addBranch(m_jit.branchTest32(cond, value), destination); 1762 1763 JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value); 1764 emitEdgeCode(destination); 1765 addBranch(m_jit.jump(), destination); 1766 notTaken.link(&m_jit); 1767 } 1768 1769#if USE(JSVALUE64) 1770 template<typename T, typename U> 1771 void branch64(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination) 1772 { 1773 if (!haveEdgeCodeToEmit(destination)) 1774 return addBranch(m_jit.branch64(cond, left, right), destination); 1775 1776 JITCompiler::Jump notTaken = m_jit.branch64(JITCompiler::invert(cond), left, right); 1777 emitEdgeCode(destination); 1778 addBranch(m_jit.jump(), destination); 1779 notTaken.link(&m_jit); 1780 } 1781#endif 1782 1783 template<typename T, typename U> 1784 void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination) 1785 { 1786 if (!haveEdgeCodeToEmit(destination)) 1787 return addBranch(m_jit.branchPtr(cond, left, right), destination); 1788 1789 JITCompiler::Jump notTaken = m_jit.branchPtr(JITCompiler::invert(cond), left, right); 1790 emitEdgeCode(destination); 1791 addBranch(m_jit.jump(), destination); 1792 notTaken.link(&m_jit); 1793 } 1794 1795 template<typename T, typename U> 1796 void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination) 1797 { 1798 ASSERT(JITCompiler::isInvertible(cond)); 1799 1800 if (!haveEdgeCodeToEmit(destination)) 1801 return addBranch(m_jit.branchTestPtr(cond, value, mask), destination); 1802 1803 JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value, mask); 1804 emitEdgeCode(destination); 1805 addBranch(m_jit.jump(), destination); 1806 notTaken.link(&m_jit); 1807 } 1808 1809 template<typename T> 1810 void branchTestPtr(JITCompiler::ResultCondition cond, T value, BlockIndex destination) 1811 { 1812 ASSERT(JITCompiler::isInvertible(cond)); 1813 1814 if (!haveEdgeCodeToEmit(destination)) 1815 return addBranch(m_jit.branchTestPtr(cond, value), destination); 1816 1817 JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value); 1818 emitEdgeCode(destination); 1819 addBranch(m_jit.jump(), destination); 1820 notTaken.link(&m_jit); 1821 } 1822 1823 template<typename T, typename U> 1824 void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination) 1825 { 1826 ASSERT(JITCompiler::isInvertible(cond)); 1827 1828 if (!haveEdgeCodeToEmit(destination)) 1829 return addBranch(m_jit.branchTest8(cond, value, mask), destination); 1830 1831 JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value, mask); 1832 emitEdgeCode(destination); 1833 addBranch(m_jit.jump(), destination); 1834 notTaken.link(&m_jit); 1835 } 1836 1837 template<typename T> 1838 void branchTest8(JITCompiler::ResultCondition cond, T value, BlockIndex destination) 1839 { 1840 ASSERT(JITCompiler::isInvertible(cond)); 1841 1842 if (!haveEdgeCodeToEmit(destination)) 1843 return addBranch(m_jit.branchTest8(cond, value), destination); 1844 1845 JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value); 1846 emitEdgeCode(destination); 1847 addBranch(m_jit.jump(), destination); 1848 notTaken.link(&m_jit); 1849 } 1850 1851 enum FallThroughMode { 1852 AtFallThroughPoint, 1853 ForceJump 1854 }; 1855 void jump(BlockIndex destination, FallThroughMode fallThroughMode = AtFallThroughPoint) 1856 { 1857 if (haveEdgeCodeToEmit(destination)) 1858 emitEdgeCode(destination); 1859 if (destination == nextBlock() 1860 && fallThroughMode == AtFallThroughPoint) 1861 return; 1862 addBranch(m_jit.jump(), destination); 1863 } 1864 1865 inline bool haveEdgeCodeToEmit(BlockIndex) 1866 { 1867 return DFG_ENABLE_EDGE_CODE_VERIFICATION; 1868 } 1869 void emitEdgeCode(BlockIndex destination) 1870 { 1871 if (!DFG_ENABLE_EDGE_CODE_VERIFICATION) 1872 return; 1873 m_jit.move(TrustedImm32(destination), GPRInfo::regT0); 1874 } 1875 1876 void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination) 1877 { 1878 m_branches.append(BranchRecord(jump, destination)); 1879 } 1880 1881 void linkBranches() 1882 { 1883 for (size_t i = 0; i < m_branches.size(); ++i) { 1884 BranchRecord& branch = m_branches[i]; 1885 branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit); 1886 } 1887 } 1888 1889 BasicBlock* block() 1890 { 1891 return m_jit.graph().m_blocks[m_block].get(); 1892 } 1893 1894#ifndef NDEBUG 1895 void dump(const char* label = 0); 1896#endif 1897 1898#if DFG_ENABLE(CONSISTENCY_CHECK) 1899 void checkConsistency(); 1900#else 1901 void checkConsistency() { } 1902#endif 1903 1904 bool isInteger(Node* node) 1905 { 1906 if (node->hasInt32Result()) 1907 return true; 1908 1909 if (isInt32Constant(node)) 1910 return true; 1911 1912 VirtualRegister virtualRegister = node->virtualRegister(); 1913 GenerationInfo& info = m_generationInfo[virtualRegister]; 1914 1915 return info.isJSInteger(); 1916 } 1917 1918 bool compare(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ); 1919 bool compilePeepHoleBranch(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ); 1920 void compilePeepHoleIntegerBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition); 1921 void compilePeepHoleBooleanBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition); 1922 void compilePeepHoleDoubleBranch(Node*, Node* branchNode, JITCompiler::DoubleCondition); 1923 void compilePeepHoleObjectEquality(Node*, Node* branchNode); 1924 void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode); 1925 void compileObjectEquality(Node*); 1926 void compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild); 1927 void compileValueAdd(Node*); 1928 void compileObjectOrOtherLogicalNot(Edge value); 1929 void compileLogicalNot(Node*); 1930 void compileStringEquality(Node*); 1931 void emitObjectOrOtherBranch(Edge value, BlockIndex taken, BlockIndex notTaken); 1932 void emitBranch(Node*); 1933 1934 void compileToStringOnCell(Node*); 1935 void compileNewStringObject(Node*); 1936 1937 void compileIntegerCompare(Node*, MacroAssembler::RelationalCondition); 1938 void compileBooleanCompare(Node*, MacroAssembler::RelationalCondition); 1939 void compileDoubleCompare(Node*, MacroAssembler::DoubleCondition); 1940 1941 bool compileStrictEqForConstant(Node*, Edge value, JSValue constant); 1942 1943 bool compileStrictEq(Node*); 1944 1945 void compileAllocatePropertyStorage(Node*); 1946 void compileReallocatePropertyStorage(Node*); 1947 1948#if USE(JSVALUE32_64) 1949 template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType> 1950 void compileContiguousPutByVal(Node*, BaseOperandType&, PropertyOperandType&, ValueOperandType&, GPRReg valuePayloadReg, TagType valueTag); 1951#endif 1952 void compileDoublePutByVal(Node*, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property); 1953 bool putByValWillNeedExtraRegister(ArrayMode arrayMode) 1954 { 1955 return arrayMode.mayStoreToHole(); 1956 } 1957 GPRReg temporaryRegisterForPutByVal(GPRTemporary&, ArrayMode); 1958 GPRReg temporaryRegisterForPutByVal(GPRTemporary& temporary, Node* node) 1959 { 1960 return temporaryRegisterForPutByVal(temporary, node->arrayMode()); 1961 } 1962 1963 void compileGetCharCodeAt(Node*); 1964 void compileGetByValOnString(Node*); 1965 void compileFromCharCode(Node*); 1966 1967 void compileGetByValOnArguments(Node*); 1968 void compileGetArgumentsLength(Node*); 1969 1970 void compileGetArrayLength(Node*); 1971 1972 void compileValueToInt32(Node*); 1973 void compileUInt32ToNumber(Node*); 1974 void compileDoubleAsInt32(Node*); 1975 void compileInt32ToDouble(Node*); 1976 void compileAdd(Node*); 1977 void compileMakeRope(Node*); 1978 void compileArithSub(Node*); 1979 void compileArithNegate(Node*); 1980 void compileArithMul(Node*); 1981 void compileArithIMul(Node*); 1982#if CPU(X86) || CPU(X86_64) 1983 void compileIntegerArithDivForX86(Node*); 1984#elif CPU(APPLE_ARMV7S) 1985 void compileIntegerArithDivForARMv7s(Node*); 1986#endif 1987 void compileArithMod(Node*); 1988 void compileSoftModulo(Node*); 1989 void compileGetIndexedPropertyStorage(Node*); 1990 void compileGetByValOnIntTypedArray(const TypedArrayDescriptor&, Node*, size_t elementSize, TypedArraySignedness); 1991 void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node*, size_t elementSize, TypedArraySignedness, TypedArrayRounding = TruncateRounding); 1992 void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor&, Node*, size_t elementSize); 1993 void compilePutByValForFloatTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node*, size_t elementSize); 1994 void compileNewFunctionNoCheck(Node*); 1995 void compileNewFunctionExpression(Node*); 1996 bool compileRegExpExec(Node*); 1997 1998 // size can be an immediate or a register, and must be in bytes. If size is a register, 1999 // it must be a different register than resultGPR. Emits code that place a pointer to 2000 // the end of the allocation. The returned jump is the jump to the slow path. 2001 template<typename SizeType> 2002 MacroAssembler::Jump emitAllocateBasicStorage(SizeType size, GPRReg resultGPR) 2003 { 2004 CopiedAllocator* copiedAllocator = &m_jit.vm()->heap.storageAllocator(); 2005 2006 m_jit.loadPtr(&copiedAllocator->m_currentRemaining, resultGPR); 2007 MacroAssembler::Jump slowPath = m_jit.branchSubPtr(JITCompiler::Signed, size, resultGPR); 2008 m_jit.storePtr(resultGPR, &copiedAllocator->m_currentRemaining); 2009 m_jit.negPtr(resultGPR); 2010 m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), resultGPR); 2011 2012 return slowPath; 2013 } 2014 2015 // Allocator for a cell of a specific size. 2016 template <typename StructureType> // StructureType can be GPR or ImmPtr. 2017 void emitAllocateJSCell(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure, 2018 GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) 2019 { 2020 m_jit.loadPtr(MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()), resultGPR); 2021 slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR)); 2022 2023 // The object is half-allocated: we have what we know is a fresh object, but 2024 // it's still on the GC's free list. 2025 m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR); 2026 m_jit.storePtr(scratchGPR, MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead())); 2027 2028 // Initialize the object's Structure. 2029 m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSCell::structureOffset())); 2030 } 2031 2032 // Allocator for an object of a specific size. 2033 template <typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr. 2034 void emitAllocateJSObject(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure, 2035 StorageType storage, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) 2036 { 2037 emitAllocateJSCell(resultGPR, allocatorGPR, structure, scratchGPR, slowPath); 2038 2039 // Initialize the object's property storage pointer. 2040 m_jit.storePtr(storage, MacroAssembler::Address(resultGPR, JSObject::butterflyOffset())); 2041 } 2042 2043 // Convenience allocator for a buit-in object. 2044 template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr. 2045 void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, 2046 GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath) 2047 { 2048 MarkedAllocator* allocator = 0; 2049 size_t size = ClassType::allocationSize(0); 2050 if (ClassType::needsDestruction && ClassType::hasImmortalStructure) 2051 allocator = &m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(size); 2052 else if (ClassType::needsDestruction) 2053 allocator = &m_jit.vm()->heap.allocatorForObjectWithNormalDestructor(size); 2054 else 2055 allocator = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(size); 2056 m_jit.move(TrustedImmPtr(allocator), scratchGPR1); 2057 emitAllocateJSObject(resultGPR, scratchGPR1, structure, storage, scratchGPR2, slowPath); 2058 } 2059 2060 void emitAllocateJSArray(GPRReg resultGPR, Structure*, GPRReg storageGPR, unsigned numElements); 2061 2062#if USE(JSVALUE64) 2063 JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp); 2064#elif USE(JSVALUE32_64) 2065 JITCompiler::Jump convertToDouble(JSValueOperand&, FPRReg result); 2066#endif 2067 2068 // Add a backward speculation check. 2069 void backwardSpeculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail); 2070 void backwardSpeculationCheck(ExitKind, JSValueSource, Node*, const MacroAssembler::JumpList& jumpsToFail); 2071 2072 // Add a speculation check without additional recovery. 2073 void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail); 2074 void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail); 2075 // Add a speculation check without additional recovery, and with a promise to supply a jump later. 2076 OSRExitJumpPlaceholder backwardSpeculationCheck(ExitKind, JSValueSource, Node*); 2077 OSRExitJumpPlaceholder backwardSpeculationCheck(ExitKind, JSValueSource, Edge); 2078 // Add a set of speculation checks without additional recovery. 2079 void speculationCheck(ExitKind, JSValueSource, Node*, const MacroAssembler::JumpList& jumpsToFail); 2080 void speculationCheck(ExitKind, JSValueSource, Edge, const MacroAssembler::JumpList& jumpsToFail); 2081 // Add a speculation check with additional recovery. 2082 void backwardSpeculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); 2083 void backwardSpeculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); 2084 // Use this like you would use speculationCheck(), except that you don't pass it a jump 2085 // (because you don't have to execute a branch; that's kind of the whole point), and you 2086 // must register the returned Watchpoint with something relevant. In general, this should 2087 // be used with extreme care. Use speculationCheck() unless you've got an amazing reason 2088 // not to. 2089 JumpReplacementWatchpoint* speculationWatchpoint(ExitKind, JSValueSource, Node*); 2090 // The default for speculation watchpoints is that they're uncounted, because the 2091 // act of firing a watchpoint invalidates it. So, future recompilations will not 2092 // attempt to set this watchpoint again. 2093 JumpReplacementWatchpoint* speculationWatchpoint(ExitKind = UncountableWatchpoint); 2094 2095 // It is generally a good idea to not use this directly. 2096 void convertLastOSRExitToForward(const ValueRecovery& = ValueRecovery()); 2097 2098 // Note: not specifying the valueRecovery argument (leaving it as ValueRecovery()) implies 2099 // that you've ensured that there exists a MovHint prior to your use of forwardSpeculationCheck(). 2100 void forwardSpeculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail, const ValueRecovery& = ValueRecovery()); 2101 void forwardSpeculationCheck(ExitKind, JSValueSource, Node*, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& = ValueRecovery()); 2102 void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); 2103 void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); 2104 // Called when we statically determine that a speculation will fail. 2105 void terminateSpeculativeExecution(ExitKind, JSValueRegs, Node*); 2106 void terminateSpeculativeExecution(ExitKind, JSValueRegs, Edge); 2107 2108 // Helpers for performing type checks on an edge stored in the given registers. 2109 bool needsTypeCheck(Edge edge, SpeculatedType typesPassedThrough) { return m_state.forNode(edge).m_type & ~typesPassedThrough; } 2110 void backwardTypeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail); 2111 void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail); 2112 void forwardTypeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery&); 2113 2114 void speculateInt32(Edge); 2115 void speculateNumber(Edge); 2116 void speculateRealNumber(Edge); 2117 void speculateBoolean(Edge); 2118 void speculateCell(Edge); 2119 void speculateObject(Edge); 2120 void speculateObjectOrOther(Edge); 2121 void speculateString(Edge); 2122 template<typename StructureLocationType> 2123 void speculateStringObjectForStructure(Edge, StructureLocationType); 2124 void speculateStringObject(Edge, GPRReg); 2125 void speculateStringObject(Edge); 2126 void speculateStringOrStringObject(Edge); 2127 void speculateNotCell(Edge); 2128 void speculateOther(Edge); 2129 void speculate(Node*, Edge); 2130 2131 const TypedArrayDescriptor* typedArrayDescriptor(ArrayMode); 2132 2133 JITCompiler::Jump jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode, IndexingType); 2134 JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode); 2135 void checkArray(Node*); 2136 void arrayify(Node*, GPRReg baseReg, GPRReg propertyReg); 2137 void arrayify(Node*); 2138 2139 template<bool strict> 2140 GPRReg fillSpeculateIntInternal(Edge, DataFormat& returnFormat); 2141 2142 // It is possible, during speculative generation, to reach a situation in which we 2143 // can statically determine a speculation will fail (for example, when two nodes 2144 // will make conflicting speculations about the same operand). In such cases this 2145 // flag is cleared, indicating no further code generation should take place. 2146 bool m_compileOkay; 2147 2148 // Tracking for which nodes are currently holding the values of arguments and bytecode 2149 // operand-indexed variables. 2150 2151 ValueSource valueSourceForOperand(int operand) 2152 { 2153 return valueSourceReferenceForOperand(operand); 2154 } 2155 2156 void setNodeForOperand(Node* node, int operand) 2157 { 2158 valueSourceReferenceForOperand(operand) = ValueSource(MinifiedID(node)); 2159 } 2160 2161 // Call this with care, since it both returns a reference into an array 2162 // and potentially resizes the array. So it would not be right to call this 2163 // twice and then perform operands on both references, since the one from 2164 // the first call may no longer be valid. 2165 ValueSource& valueSourceReferenceForOperand(int operand) 2166 { 2167 if (operandIsArgument(operand)) { 2168 int argument = operandToArgument(operand); 2169 return m_arguments[argument]; 2170 } 2171 2172 if ((unsigned)operand >= m_variables.size()) 2173 m_variables.resize(operand + 1); 2174 2175 return m_variables[operand]; 2176 } 2177 2178 void recordSetLocal(int operand, ValueSource valueSource) 2179 { 2180 valueSourceReferenceForOperand(operand) = valueSource; 2181 m_stream->appendAndLog(VariableEvent::setLocal(operand, valueSource.dataFormat())); 2182 } 2183 2184 // The JIT, while also provides MacroAssembler functionality. 2185 JITCompiler& m_jit; 2186 2187 // The current node being generated. 2188 BlockIndex m_block; 2189 Node* m_currentNode; 2190 SpeculationDirection m_speculationDirection; 2191#if !ASSERT_DISABLED 2192 bool m_canExit; 2193#endif 2194 unsigned m_indexInBlock; 2195 // Virtual and physical register maps. 2196 Vector<GenerationInfo, 32> m_generationInfo; 2197 RegisterBank<GPRInfo> m_gprs; 2198 RegisterBank<FPRInfo> m_fprs; 2199 2200 Vector<MacroAssembler::Label> m_blockHeads; 2201 Vector<MacroAssembler::Label> m_osrEntryHeads; 2202 2203 struct BranchRecord { 2204 BranchRecord(MacroAssembler::Jump jump, BlockIndex destination) 2205 : jump(jump) 2206 , destination(destination) 2207 { 2208 } 2209 2210 MacroAssembler::Jump jump; 2211 BlockIndex destination; 2212 }; 2213 Vector<BranchRecord, 8> m_branches; 2214 2215 Vector<ValueSource, 0> m_arguments; 2216 Vector<ValueSource, 0> m_variables; 2217 int m_lastSetOperand; 2218 CodeOrigin m_codeOriginForOSR; 2219 2220 AbstractState m_state; 2221 2222 VariableEventStream* m_stream; 2223 MinifiedGraph* m_minifiedGraph; 2224 2225 bool m_isCheckingArgumentTypes; 2226 2227 Vector<OwnPtr<SlowPathGenerator>, 8> m_slowPathGenerators; 2228 Vector<SilentRegisterSavePlan> m_plans; 2229 2230 ValueRecovery computeValueRecoveryFor(const ValueSource&); 2231 2232 ValueRecovery computeValueRecoveryFor(int operand) 2233 { 2234 return computeValueRecoveryFor(valueSourceForOperand(operand)); 2235 } 2236}; 2237 2238 2239// === Operand types === 2240// 2241// IntegerOperand and JSValueOperand. 2242// 2243// These classes are used to lock the operands to a node into machine 2244// registers. These classes implement of pattern of locking a value 2245// into register at the point of construction only if it is already in 2246// registers, and otherwise loading it lazily at the point it is first 2247// used. We do so in order to attempt to avoid spilling one operand 2248// in order to make space available for another. 2249 2250class IntegerOperand { 2251public: 2252 explicit IntegerOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) 2253 : m_jit(jit) 2254 , m_edge(edge) 2255 , m_gprOrInvalid(InvalidGPRReg) 2256#ifndef NDEBUG 2257 , m_format(DataFormatNone) 2258#endif 2259 { 2260 ASSERT(m_jit); 2261 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == KnownInt32Use); 2262 if (jit->isFilled(edge.node())) 2263 gpr(); 2264 } 2265 2266 ~IntegerOperand() 2267 { 2268 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2269 m_jit->unlock(m_gprOrInvalid); 2270 } 2271 2272 Edge edge() const 2273 { 2274 return m_edge; 2275 } 2276 2277 Node* node() const 2278 { 2279 return edge().node(); 2280 } 2281 2282 DataFormat format() 2283 { 2284 gpr(); // m_format is set when m_gpr is locked. 2285 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger); 2286 return m_format; 2287 } 2288 2289 GPRReg gpr() 2290 { 2291 if (m_gprOrInvalid == InvalidGPRReg) 2292 m_gprOrInvalid = m_jit->fillInteger(m_edge, m_format); 2293 return m_gprOrInvalid; 2294 } 2295 2296 void use() 2297 { 2298 m_jit->use(node()); 2299 } 2300 2301private: 2302 SpeculativeJIT* m_jit; 2303 Edge m_edge; 2304 GPRReg m_gprOrInvalid; 2305 DataFormat m_format; 2306}; 2307 2308class JSValueOperand { 2309public: 2310 explicit JSValueOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) 2311 : m_jit(jit) 2312 , m_edge(edge) 2313#if USE(JSVALUE64) 2314 , m_gprOrInvalid(InvalidGPRReg) 2315#elif USE(JSVALUE32_64) 2316 , m_isDouble(false) 2317#endif 2318 { 2319 ASSERT(m_jit); 2320 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse); 2321#if USE(JSVALUE64) 2322 if (jit->isFilled(node())) 2323 gpr(); 2324#elif USE(JSVALUE32_64) 2325 m_register.pair.tagGPR = InvalidGPRReg; 2326 m_register.pair.payloadGPR = InvalidGPRReg; 2327 if (jit->isFilled(node())) 2328 fill(); 2329#endif 2330 } 2331 2332 ~JSValueOperand() 2333 { 2334#if USE(JSVALUE64) 2335 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2336 m_jit->unlock(m_gprOrInvalid); 2337#elif USE(JSVALUE32_64) 2338 if (m_isDouble) { 2339 ASSERT(m_register.fpr != InvalidFPRReg); 2340 m_jit->unlock(m_register.fpr); 2341 } else { 2342 ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg); 2343 m_jit->unlock(m_register.pair.tagGPR); 2344 m_jit->unlock(m_register.pair.payloadGPR); 2345 } 2346#endif 2347 } 2348 2349 Edge edge() const 2350 { 2351 return m_edge; 2352 } 2353 2354 Node* node() const 2355 { 2356 return edge().node(); 2357 } 2358 2359#if USE(JSVALUE64) 2360 GPRReg gpr() 2361 { 2362 if (m_gprOrInvalid == InvalidGPRReg) 2363 m_gprOrInvalid = m_jit->fillJSValue(m_edge); 2364 return m_gprOrInvalid; 2365 } 2366 JSValueRegs jsValueRegs() 2367 { 2368 return JSValueRegs(gpr()); 2369 } 2370#elif USE(JSVALUE32_64) 2371 bool isDouble() { return m_isDouble; } 2372 2373 void fill() 2374 { 2375 if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg) 2376 m_isDouble = !m_jit->fillJSValue(m_edge, m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr); 2377 } 2378 2379 GPRReg tagGPR() 2380 { 2381 fill(); 2382 ASSERT(!m_isDouble); 2383 return m_register.pair.tagGPR; 2384 } 2385 2386 GPRReg payloadGPR() 2387 { 2388 fill(); 2389 ASSERT(!m_isDouble); 2390 return m_register.pair.payloadGPR; 2391 } 2392 2393 JSValueRegs jsValueRegs() 2394 { 2395 return JSValueRegs(tagGPR(), payloadGPR()); 2396 } 2397 2398 FPRReg fpr() 2399 { 2400 fill(); 2401 ASSERT(m_isDouble); 2402 return m_register.fpr; 2403 } 2404#endif 2405 2406 void use() 2407 { 2408 m_jit->use(node()); 2409 } 2410 2411private: 2412 SpeculativeJIT* m_jit; 2413 Edge m_edge; 2414#if USE(JSVALUE64) 2415 GPRReg m_gprOrInvalid; 2416#elif USE(JSVALUE32_64) 2417 union { 2418 struct { 2419 GPRReg tagGPR; 2420 GPRReg payloadGPR; 2421 } pair; 2422 FPRReg fpr; 2423 } m_register; 2424 bool m_isDouble; 2425#endif 2426}; 2427 2428class StorageOperand { 2429public: 2430 explicit StorageOperand(SpeculativeJIT* jit, Edge edge) 2431 : m_jit(jit) 2432 , m_edge(edge) 2433 , m_gprOrInvalid(InvalidGPRReg) 2434 { 2435 ASSERT(m_jit); 2436 ASSERT(edge.useKind() == UntypedUse || edge.useKind() == KnownCellUse); 2437 if (jit->isFilled(node())) 2438 gpr(); 2439 } 2440 2441 ~StorageOperand() 2442 { 2443 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2444 m_jit->unlock(m_gprOrInvalid); 2445 } 2446 2447 Edge edge() const 2448 { 2449 return m_edge; 2450 } 2451 2452 Node* node() const 2453 { 2454 return edge().node(); 2455 } 2456 2457 GPRReg gpr() 2458 { 2459 if (m_gprOrInvalid == InvalidGPRReg) 2460 m_gprOrInvalid = m_jit->fillStorage(edge()); 2461 return m_gprOrInvalid; 2462 } 2463 2464 void use() 2465 { 2466 m_jit->use(node()); 2467 } 2468 2469private: 2470 SpeculativeJIT* m_jit; 2471 Edge m_edge; 2472 GPRReg m_gprOrInvalid; 2473}; 2474 2475 2476// === Temporaries === 2477// 2478// These classes are used to allocate temporary registers. 2479// A mechanism is provided to attempt to reuse the registers 2480// currently allocated to child nodes whose value is consumed 2481// by, and not live after, this operation. 2482 2483class GPRTemporary { 2484public: 2485 GPRTemporary(); 2486 GPRTemporary(SpeculativeJIT*); 2487 GPRTemporary(SpeculativeJIT*, GPRReg specific); 2488 GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&); 2489 GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&, SpeculateIntegerOperand&); 2490 GPRTemporary(SpeculativeJIT*, SpeculateStrictInt32Operand&); 2491 GPRTemporary(SpeculativeJIT*, IntegerOperand&); 2492 GPRTemporary(SpeculativeJIT*, IntegerOperand&, IntegerOperand&); 2493 GPRTemporary(SpeculativeJIT*, SpeculateCellOperand&); 2494 GPRTemporary(SpeculativeJIT*, SpeculateBooleanOperand&); 2495#if USE(JSVALUE64) 2496 GPRTemporary(SpeculativeJIT*, JSValueOperand&); 2497#elif USE(JSVALUE32_64) 2498 GPRTemporary(SpeculativeJIT*, JSValueOperand&, bool tag = true); 2499#endif 2500 GPRTemporary(SpeculativeJIT*, StorageOperand&); 2501 2502 void adopt(GPRTemporary&); 2503 2504 ~GPRTemporary() 2505 { 2506 if (m_jit && m_gpr != InvalidGPRReg) 2507 m_jit->unlock(gpr()); 2508 } 2509 2510 GPRReg gpr() 2511 { 2512 return m_gpr; 2513 } 2514 2515private: 2516 SpeculativeJIT* m_jit; 2517 GPRReg m_gpr; 2518}; 2519 2520class FPRTemporary { 2521public: 2522 FPRTemporary(SpeculativeJIT*); 2523 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&); 2524 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&); 2525#if USE(JSVALUE32_64) 2526 FPRTemporary(SpeculativeJIT*, JSValueOperand&); 2527#endif 2528 2529 ~FPRTemporary() 2530 { 2531 m_jit->unlock(fpr()); 2532 } 2533 2534 FPRReg fpr() const 2535 { 2536 ASSERT(m_fpr != InvalidFPRReg); 2537 return m_fpr; 2538 } 2539 2540protected: 2541 FPRTemporary(SpeculativeJIT* jit, FPRReg lockedFPR) 2542 : m_jit(jit) 2543 , m_fpr(lockedFPR) 2544 { 2545 } 2546 2547private: 2548 SpeculativeJIT* m_jit; 2549 FPRReg m_fpr; 2550}; 2551 2552 2553// === Results === 2554// 2555// These classes lock the result of a call to a C++ helper function. 2556 2557class GPRResult : public GPRTemporary { 2558public: 2559 GPRResult(SpeculativeJIT* jit) 2560 : GPRTemporary(jit, GPRInfo::returnValueGPR) 2561 { 2562 } 2563}; 2564 2565#if USE(JSVALUE32_64) 2566class GPRResult2 : public GPRTemporary { 2567public: 2568 GPRResult2(SpeculativeJIT* jit) 2569 : GPRTemporary(jit, GPRInfo::returnValueGPR2) 2570 { 2571 } 2572}; 2573#endif 2574 2575class FPRResult : public FPRTemporary { 2576public: 2577 FPRResult(SpeculativeJIT* jit) 2578 : FPRTemporary(jit, lockedResult(jit)) 2579 { 2580 } 2581 2582private: 2583 static FPRReg lockedResult(SpeculativeJIT* jit) 2584 { 2585 jit->lock(FPRInfo::returnValueFPR); 2586 return FPRInfo::returnValueFPR; 2587 } 2588}; 2589 2590 2591// === Speculative Operand types === 2592// 2593// SpeculateIntegerOperand, SpeculateStrictInt32Operand and SpeculateCellOperand. 2594// 2595// These are used to lock the operands to a node into machine registers within the 2596// SpeculativeJIT. The classes operate like those above, however these will 2597// perform a speculative check for a more restrictive type than we can statically 2598// determine the operand to have. If the operand does not have the requested type, 2599// a bail-out to the non-speculative path will be taken. 2600 2601class SpeculateIntegerOperand { 2602public: 2603 explicit SpeculateIntegerOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) 2604 : m_jit(jit) 2605 , m_edge(edge) 2606 , m_gprOrInvalid(InvalidGPRReg) 2607#ifndef NDEBUG 2608 , m_format(DataFormatNone) 2609#endif 2610 { 2611 ASSERT(m_jit); 2612 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use)); 2613 if (jit->isFilled(node())) 2614 gpr(); 2615 } 2616 2617 ~SpeculateIntegerOperand() 2618 { 2619 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2620 m_jit->unlock(m_gprOrInvalid); 2621 } 2622 2623 Edge edge() const 2624 { 2625 return m_edge; 2626 } 2627 2628 Node* node() const 2629 { 2630 return edge().node(); 2631 } 2632 2633 DataFormat format() 2634 { 2635 gpr(); // m_format is set when m_gpr is locked. 2636 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger); 2637 return m_format; 2638 } 2639 2640 GPRReg gpr() 2641 { 2642 if (m_gprOrInvalid == InvalidGPRReg) 2643 m_gprOrInvalid = m_jit->fillSpeculateInt(edge(), m_format); 2644 return m_gprOrInvalid; 2645 } 2646 2647 void use() 2648 { 2649 m_jit->use(node()); 2650 } 2651 2652private: 2653 SpeculativeJIT* m_jit; 2654 Edge m_edge; 2655 GPRReg m_gprOrInvalid; 2656 DataFormat m_format; 2657}; 2658 2659class SpeculateStrictInt32Operand { 2660public: 2661 explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) 2662 : m_jit(jit) 2663 , m_edge(edge) 2664 , m_gprOrInvalid(InvalidGPRReg) 2665 { 2666 ASSERT(m_jit); 2667 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use)); 2668 if (jit->isFilled(node())) 2669 gpr(); 2670 } 2671 2672 ~SpeculateStrictInt32Operand() 2673 { 2674 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2675 m_jit->unlock(m_gprOrInvalid); 2676 } 2677 2678 Edge edge() const 2679 { 2680 return m_edge; 2681 } 2682 2683 Node* node() const 2684 { 2685 return edge().node(); 2686 } 2687 2688 GPRReg gpr() 2689 { 2690 if (m_gprOrInvalid == InvalidGPRReg) 2691 m_gprOrInvalid = m_jit->fillSpeculateIntStrict(edge()); 2692 return m_gprOrInvalid; 2693 } 2694 2695 void use() 2696 { 2697 m_jit->use(node()); 2698 } 2699 2700private: 2701 SpeculativeJIT* m_jit; 2702 Edge m_edge; 2703 GPRReg m_gprOrInvalid; 2704}; 2705 2706class SpeculateDoubleOperand { 2707public: 2708 explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) 2709 : m_jit(jit) 2710 , m_edge(edge) 2711 , m_fprOrInvalid(InvalidFPRReg) 2712 { 2713 ASSERT(m_jit); 2714 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == NumberUse || edge.useKind() == KnownNumberUse || edge.useKind() == RealNumberUse)); 2715 if (jit->isFilled(node())) 2716 fpr(); 2717 } 2718 2719 ~SpeculateDoubleOperand() 2720 { 2721 ASSERT(m_fprOrInvalid != InvalidFPRReg); 2722 m_jit->unlock(m_fprOrInvalid); 2723 } 2724 2725 Edge edge() const 2726 { 2727 return m_edge; 2728 } 2729 2730 Node* node() const 2731 { 2732 return edge().node(); 2733 } 2734 2735 FPRReg fpr() 2736 { 2737 if (m_fprOrInvalid == InvalidFPRReg) 2738 m_fprOrInvalid = m_jit->fillSpeculateDouble(edge()); 2739 return m_fprOrInvalid; 2740 } 2741 2742 void use() 2743 { 2744 m_jit->use(node()); 2745 } 2746 2747private: 2748 SpeculativeJIT* m_jit; 2749 Edge m_edge; 2750 FPRReg m_fprOrInvalid; 2751}; 2752 2753class SpeculateCellOperand { 2754public: 2755 explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) 2756 : m_jit(jit) 2757 , m_edge(edge) 2758 , m_gprOrInvalid(InvalidGPRReg) 2759 { 2760 ASSERT(m_jit); 2761 if (!edge) 2762 return; 2763 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == CellUse || edge.useKind() == KnownCellUse || edge.useKind() == ObjectUse || edge.useKind() == StringUse || edge.useKind() == KnownStringUse || edge.useKind() == StringObjectUse || edge.useKind() == StringOrStringObjectUse)); 2764 if (jit->isFilled(node())) 2765 gpr(); 2766 } 2767 2768 ~SpeculateCellOperand() 2769 { 2770 if (!m_edge) 2771 return; 2772 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2773 m_jit->unlock(m_gprOrInvalid); 2774 } 2775 2776 Edge edge() const 2777 { 2778 return m_edge; 2779 } 2780 2781 Node* node() const 2782 { 2783 return edge().node(); 2784 } 2785 2786 GPRReg gpr() 2787 { 2788 ASSERT(m_edge); 2789 if (m_gprOrInvalid == InvalidGPRReg) 2790 m_gprOrInvalid = m_jit->fillSpeculateCell(edge()); 2791 return m_gprOrInvalid; 2792 } 2793 2794 void use() 2795 { 2796 ASSERT(m_edge); 2797 m_jit->use(node()); 2798 } 2799 2800private: 2801 SpeculativeJIT* m_jit; 2802 Edge m_edge; 2803 GPRReg m_gprOrInvalid; 2804}; 2805 2806class SpeculateBooleanOperand { 2807public: 2808 explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation) 2809 : m_jit(jit) 2810 , m_edge(edge) 2811 , m_gprOrInvalid(InvalidGPRReg) 2812 { 2813 ASSERT(m_jit); 2814 ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse); 2815 if (jit->isFilled(node())) 2816 gpr(); 2817 } 2818 2819 ~SpeculateBooleanOperand() 2820 { 2821 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2822 m_jit->unlock(m_gprOrInvalid); 2823 } 2824 2825 Edge edge() const 2826 { 2827 return m_edge; 2828 } 2829 2830 Node* node() const 2831 { 2832 return edge().node(); 2833 } 2834 2835 GPRReg gpr() 2836 { 2837 if (m_gprOrInvalid == InvalidGPRReg) 2838 m_gprOrInvalid = m_jit->fillSpeculateBoolean(edge()); 2839 return m_gprOrInvalid; 2840 } 2841 2842 void use() 2843 { 2844 m_jit->use(node()); 2845 } 2846 2847private: 2848 SpeculativeJIT* m_jit; 2849 Edge m_edge; 2850 GPRReg m_gprOrInvalid; 2851}; 2852 2853template<typename StructureLocationType> 2854void SpeculativeJIT::speculateStringObjectForStructure(Edge edge, StructureLocationType structureLocation) 2855{ 2856 Structure* stringObjectStructure = 2857 m_jit.globalObjectFor(m_currentNode->codeOrigin)->stringObjectStructure(); 2858 Structure* stringPrototypeStructure = stringObjectStructure->storedPrototype().asCell()->structure(); 2859 ASSERT(stringPrototypeStructure->transitionWatchpointSetIsStillValid()); 2860 2861 if (!m_state.forNode(edge).m_currentKnownStructure.isSubsetOf(StructureSet(m_jit.globalObjectFor(m_currentNode->codeOrigin)->stringObjectStructure()))) { 2862 speculationCheck( 2863 NotStringObject, JSValueRegs(), 0, 2864 m_jit.branchPtr( 2865 JITCompiler::NotEqual, structureLocation, TrustedImmPtr(stringObjectStructure))); 2866 } 2867 stringPrototypeStructure->addTransitionWatchpoint(speculationWatchpoint(NotStringObject)); 2868} 2869 2870#define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) do { \ 2871 if (!needsTypeCheck((edge), (typesPassedThrough))) \ 2872 break; \ 2873 typeCheck((source), (edge), (typesPassedThrough), (jumpToFail)); \ 2874 } while (0) 2875 2876} } // namespace JSC::DFG 2877 2878#endif 2879#endif 2880 2881