Changeset 95484 in webkit
- Timestamp:
- Sep 19, 2011 3:27:38 PM (13 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r95480 r95484 1 2011-09-19 Filip Pizlo <fpizlo@apple.com> 2 3 DFG speculation failures should act as additional value profiles 4 https://bugs.webkit.org/show_bug.cgi?id=68335 5 6 Reviewed by Oliver Hunt. 7 8 This adds slow-case counters to the old JIT. It also ensures that 9 negative zero in multiply is handled carefully. The old JIT 10 previously took slow path if the result of a multiply was zero, 11 which, without any changes, would cause the DFG to think that 12 every such multiply produced a double result. 13 14 This also fixes a bug in the old JIT's handling of decrements. It 15 would take the slow path if the result was zero, but not if it 16 underflowed. 17 18 By itself, this would be a 1% slow-down on V8 and Kraken. But then 19 I wrote optimizations in the DFG that take advantage of this new 20 information. It's no longer the case that every multiply needs to 21 do a check for negative zero; it only happens if the negative 22 zero is ignored. 23 24 This results in a 12% speed-up on v8-crypto, for a 1.4% geomean 25 speed-up in V8. It's mostly neutral on Kraken. I can see an 26 0.5% slow-down and it appears to be significant. 27 28 * bytecode/CodeBlock.cpp: 29 (JSC::CodeBlock::resetRareCaseProfiles): 30 (JSC::CodeBlock::dumpValueProfiles): 31 * bytecode/CodeBlock.h: 32 * bytecode/ValueProfile.h: 33 (JSC::RareCaseProfile::RareCaseProfile): 34 (JSC::getRareCaseProfileBytecodeOffset): 35 * dfg/DFGByteCodeParser.cpp: 36 (JSC::DFG::ByteCodeParser::toInt32): 37 (JSC::DFG::ByteCodeParser::makeSafe): 38 (JSC::DFG::ByteCodeParser::parseBlock): 39 * dfg/DFGJITCodeGenerator.cpp: 40 (JSC::DFG::GPRTemporary::GPRTemporary): 41 * dfg/DFGJITCodeGenerator.h: 42 * dfg/DFGNode.h: 43 * dfg/DFGPropagator.cpp: 44 (JSC::DFG::Propagator::propagateNode): 45 (JSC::DFG::Propagator::fixupNode): 46 (JSC::DFG::Propagator::clobbersWorld): 47 (JSC::DFG::Propagator::performNodeCSE): 48 * dfg/DFGSpeculativeJIT.cpp: 49 (JSC::DFG::SpeculativeJIT::compile): 50 (JSC::DFG::SpeculativeJIT::computeValueRecoveryFor): 51 * jit/JIT.cpp: 52 (JSC::JIT::privateCompileSlowCases): 53 * jit/JIT.h: 54 (JSC::JIT::linkDummySlowCase): 55 * jit/JITArithmetic.cpp: 56 (JSC::JIT::emit_op_post_dec): 57 (JSC::JIT::emit_op_pre_dec): 58 (JSC::JIT::compileBinaryArithOp): 59 (JSC::JIT::emit_op_add): 60 (JSC::JIT::emitSlow_op_add): 61 * jit/JITInlineMethods.h: 62 (JSC::JIT::addSlowCase): 63 1 64 2011-09-19 Adam Roben <aroben@apple.com> 2 65 -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.cpp
r95134 r95484 1985 1985 #endif 1986 1986 1987 #if ENABLE(VALUE_PROFILER) 1988 void CodeBlock::resetRareCaseProfiles() 1989 { 1990 for (unsigned i = 0; i < numberOfSlowCaseProfiles(); ++i) 1991 slowCaseProfile(i)->m_counter = 0; 1992 for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) 1993 specialFastCaseProfile(i)->m_counter = 0; 1994 } 1995 #endif 1996 1987 1997 #if ENABLE(VERBOSE_VALUE_PROFILE) 1988 1998 void CodeBlock::dumpValueProfiles() … … 2003 2013 fprintf(stderr, "\n"); 2004 2014 } 2015 fprintf(stderr, "SlowCaseProfile for %p:\n", this); 2016 for (unsigned i = 0; i < numberOfSlowCaseProfiles(); ++i) { 2017 SlowCaseProfile* profile = slowCaseProfile(i); 2018 fprintf(stderr, " bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter); 2019 } 2005 2020 } 2006 2021 #endif -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.h
r95273 r95484 489 489 return result; 490 490 } 491 492 RareCaseProfile* addSlowCaseProfile(int bytecodeOffset) 493 { 494 m_slowCaseProfiles.append(RareCaseProfile(bytecodeOffset)); 495 return &m_slowCaseProfiles.last(); 496 } 497 unsigned numberOfSlowCaseProfiles() { return m_slowCaseProfiles.size(); } 498 RareCaseProfile* slowCaseProfile(int index) { return &m_slowCaseProfiles[index]; } 499 RareCaseProfile* slowCaseProfileForBytecodeOffset(int bytecodeOffset) 500 { 501 return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_slowCaseProfiles, m_slowCaseProfiles.size(), bytecodeOffset); 502 } 503 504 static uint32_t slowCaseThreshold() { return 100; } 505 bool likelyToTakeSlowCase(int bytecodeOffset) 506 { 507 return slowCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter >= slowCaseThreshold(); 508 } 509 510 RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) 511 { 512 m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset)); 513 return &m_specialFastCaseProfiles.last(); 514 } 515 unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); } 516 RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; } 517 RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset) 518 { 519 return WTF::genericBinarySearch<RareCaseProfile, int, getRareCaseProfileBytecodeOffset>(m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset); 520 } 521 522 bool likelyToTakeDeepestSlowCase(int bytecodeOffset) 523 { 524 unsigned slowCaseCount = slowCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; 525 unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; 526 return (slowCaseCount - specialFastCaseCount) >= slowCaseThreshold(); 527 } 528 529 void resetRareCaseProfiles(); 491 530 #endif 492 531 … … 792 831 #if ENABLE(VALUE_PROFILER) 793 832 SegmentedVector<ValueProfile, 8> m_valueProfiles; 833 SegmentedVector<RareCaseProfile, 8> m_slowCaseProfiles; 834 SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles; 794 835 #endif 795 836 -
trunk/Source/JavaScriptCore/bytecode/ValueProfile.h
r95134 r95484 360 360 return valueProfile->m_bytecodeOffset; 361 361 } 362 363 // This is a mini value profile to catch pathologies. It is a counter that gets 364 // incremented when we take the slow path on any instruction. 365 struct RareCaseProfile { 366 RareCaseProfile(int bytecodeOffset) 367 : m_bytecodeOffset(bytecodeOffset) 368 , m_counter(0) 369 { 370 } 371 372 int m_bytecodeOffset; 373 uint32_t m_counter; 374 }; 375 376 inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile) 377 { 378 return rareCaseProfile->m_bytecodeOffset; 379 } 362 380 #endif 363 381 -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r95399 r95484 194 194 return node.child1(); 195 195 196 if (node.op == UInt32ToNumberSafe) 197 return node.child1(); 198 196 199 // Check for numeric constants boxed as JSValues. 197 200 if (node.op == JSConstant) { … … 484 487 case ArithAdd: 485 488 case ArithSub: 486 case ArithMul: 489 case ArithMulIgnoreZero: 490 case ArithMulSpecNotNegZero: 491 case ArithMulPossiblyNegZero: 492 case ArithMulSafe: 487 493 case ValueAdd: 488 494 m_reusableNodeStack.append(&m_graph[nodePtr->child1()]); … … 518 524 { 519 525 stronglyPredict(nodeIndex, m_currentIndex); 526 } 527 528 NodeType makeSafe(NodeType op) 529 { 530 if (!m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) 531 return op; 532 533 #if ENABLE(DFG_DEBUG_VERBOSE) 534 printf("Making %s @%lu safe at bc#%u because slow-case counter is at %u\n", Graph::opName(op), m_graph.size(), m_currentIndex, m_profiledBlock->slowCaseProfileForBytecodeOffset(m_currentIndex)->m_counter); 535 #endif 536 537 switch (op) { 538 case UInt32ToNumber: 539 return UInt32ToNumberSafe; 540 case ArithAdd: 541 return ArithAddSafe; 542 case ArithSub: 543 return ArithSubSafe; 544 545 // We initialize ArithMul to ArithMulIgnoreZero and push it towards 546 // safer variants as we learn more, unless we already know that it 547 // can overflow. If it doesn't overflow, we first turn it into 548 // PossiblyNegZero if we see that it had been -0 during old JIT 549 // execution. 550 case ArithMulIgnoreZero: 551 if (m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)) 552 return ArithMulSafe; 553 else 554 return ArithMulPossiblyNegZero; 555 556 case ValueAdd: 557 return ValueAddSafe; 558 default: 559 ASSERT_NOT_REACHED(); 560 return Phantom; // Have to return something. 561 } 562 } 563 564 NodeIndex getTrueResult(NodeIndex nodeIndex) 565 { 566 switch (m_graph[nodeIndex].op) { 567 case ArithMulIgnoreZero: 568 m_graph[nodeIndex].op = ArithMulSpecNotNegZero; 569 break; 570 case ArithMulPossiblyNegZero: 571 m_graph[nodeIndex].op = ArithMulSafe; 572 break; 573 default: 574 break; 575 } 576 return nodeIndex; 577 } 578 579 bool isMultiply(NodeIndex nodeIndex) 580 { 581 switch (m_graph[nodeIndex].op) { 582 case ArithMulSpecNotNegZero: 583 case ArithMulIgnoreZero: 584 case ArithMulPossiblyNegZero: 585 case ArithMulSafe: 586 return true; 587 default: 588 return false; 589 } 520 590 } 521 591 … … 788 858 result = addToGraph(BitURShift, op1, op2); 789 859 else 790 result = addToGraph( UInt32ToNumber, op1);860 result = addToGraph(makeSafe(UInt32ToNumber), op1); 791 861 } else { 792 862 // Cannot optimize at this stage; shift & potentially rebox as a double. 793 863 result = addToGraph(BitURShift, op1, op2); 794 result = addToGraph( UInt32ToNumber, result);864 result = addToGraph(makeSafe(UInt32ToNumber), result); 795 865 } 796 866 set(currentInstruction[1].u.operand, result, PredictInt32); … … 804 874 NodeIndex op = getToNumber(srcDst); 805 875 weaklyPredictInt32(op); 806 set(srcDst, addToGraph( ArithAdd, op, one()));876 set(srcDst, addToGraph(makeSafe(ArithAdd), op, one())); 807 877 NEXT_OPCODE(op_pre_inc); 808 878 } … … 814 884 weaklyPredictInt32(op); 815 885 set(result, op); 816 set(srcDst, addToGraph( ArithAdd, op, one()));886 set(srcDst, addToGraph(makeSafe(ArithAdd), op, one())); 817 887 NEXT_OPCODE(op_post_inc); 818 888 } … … 822 892 NodeIndex op = getToNumber(srcDst); 823 893 weaklyPredictInt32(op); 824 set(srcDst, addToGraph( ArithSub, op, one()));894 set(srcDst, addToGraph(makeSafe(ArithSub), op, one())); 825 895 NEXT_OPCODE(op_pre_dec); 826 896 } … … 832 902 weaklyPredictInt32(op); 833 903 set(result, op); 834 set(srcDst, addToGraph( ArithSub, op, one()));904 set(srcDst, addToGraph(makeSafe(ArithSub), op, one())); 835 905 NEXT_OPCODE(op_post_dec); 836 906 } … … 847 917 weaklyPredictInt32(op2); 848 918 } 919 920 // If both inputs are multiplies, then we need to make sure to play 921 // it safe with -0. 922 if (isMultiply(op1) && isMultiply(op2)) { 923 getTrueResult(op1); 924 getTrueResult(op2); 925 } 926 849 927 if (m_graph[op1].hasNumberResult() && m_graph[op2].hasNumberResult()) 850 set(currentInstruction[1].u.operand, addToGraph( ArithAdd, toNumber(op1), toNumber(op2)));928 set(currentInstruction[1].u.operand, addToGraph(makeSafe(ArithAdd), toNumber(op1), toNumber(op2))); 851 929 else 852 set(currentInstruction[1].u.operand, addToGraph( ValueAdd, op1, op2));930 set(currentInstruction[1].u.operand, addToGraph(makeSafe(ValueAdd), op1, op2)); 853 931 NEXT_OPCODE(op_add); 854 932 } … … 861 939 weaklyPredictInt32(op2); 862 940 } 863 set(currentInstruction[1].u.operand, addToGraph(ArithSub, op1, op2)); 941 942 // For the left child we need to be careful about negative zero, 943 // since -0 - 0 is -0. If the right child is -0 then it doesn't 944 // matter, since 0 - -0 is 0 and 0 - 0 is 0. 945 getTrueResult(op1); 946 947 set(currentInstruction[1].u.operand, addToGraph(makeSafe(ArithSub), op1, op2)); 864 948 NEXT_OPCODE(op_sub); 865 949 } 866 950 867 951 case op_mul: { 868 NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); 869 NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); 870 set(currentInstruction[1].u.operand, addToGraph(ArithMul, op1, op2)); 952 // We could be fancy here and skip the getTrueResult, instead making 953 // getTrueResult operate transitively. But we do this the simple way, for 954 // now. 955 NodeIndex op1 = getTrueResult(getToNumber(currentInstruction[2].u.operand)); 956 NodeIndex op2 = getTrueResult(getToNumber(currentInstruction[3].u.operand)); 957 set(currentInstruction[1].u.operand, addToGraph(makeSafe(ArithMulIgnoreZero), op1, op2)); 871 958 NEXT_OPCODE(op_mul); 872 959 } … … 1003 1090 NodeIndex base = get(currentInstruction[1].u.operand); 1004 1091 NodeIndex property = get(currentInstruction[2].u.operand); 1005 NodeIndex value = get (currentInstruction[3].u.operand);1092 NodeIndex value = getTrueResult(get(currentInstruction[3].u.operand)); 1006 1093 weaklyPredictArray(base); 1007 1094 weaklyPredictInt32(property); … … 1060 1147 1061 1148 case op_put_by_id: { 1062 NodeIndex value = get (currentInstruction[3].u.operand);1149 NodeIndex value = getTrueResult(get(currentInstruction[3].u.operand)); 1063 1150 NodeIndex base = get(currentInstruction[1].u.operand); 1064 1151 unsigned identifier = currentInstruction[2].u.operand; … … 1081 1168 1082 1169 case op_put_global_var: { 1083 NodeIndex value = get (currentInstruction[2].u.operand);1170 NodeIndex value = getTrueResult(get(currentInstruction[2].u.operand)); 1084 1171 addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value); 1085 1172 NEXT_OPCODE(op_put_global_var); … … 1363 1450 1364 1451 Node* phiNode = &m_graph[entry.m_phi]; 1365 if (phiNode->refCount()) 1452 if (phiNode->refCount()) { 1453 if (m_graph[valueInPredecessor].op == SetLocal) { 1454 // Any live SetLocal should ensure that it gets the true value. 1455 // Currently this means that ArithMuls distinguish between 1456 // negative zero and positive zero. 1457 getTrueResult(m_graph[valueInPredecessor].child1()); 1458 } 1366 1459 m_graph.ref(valueInPredecessor); 1460 } 1367 1461 1368 1462 if (phiNode->child1() == NoNode) { -
trunk/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.cpp
r95273 r95484 34 34 namespace JSC { namespace DFG { 35 35 36 const double twoToThe32 = (double)0x100000000ull;36 const double JITCodeGenerator::twoToThe32 = (double)0x100000000ull; 37 37 38 38 void JITCodeGenerator::clearGenerationInfo() … … 730 730 } 731 731 732 case ArithMul: { 732 case ArithMulIgnoreZero: 733 case ArithMulPossiblyNegZero: 734 case ArithMulSpecNotNegZero: 735 case ArithMulSafe: { 733 736 overflow.append(m_jit.branchMul32(MacroAssembler::Overflow, arg1GPR, arg2GPR, resultGPR)); 734 737 overflow.append(m_jit.branchTest32(MacroAssembler::Zero, resultGPR)); … … 821 824 break; 822 825 823 case ArithMul: 826 case ArithMulIgnoreZero: 827 case ArithMulPossiblyNegZero: 828 case ArithMulSpecNotNegZero: 829 case ArithMulSafe: 824 830 m_jit.mulDouble(tmp2FPR, tmp1FPR); 825 831 break; … … 1990 1996 } 1991 1997 1998 GPRTemporary::GPRTemporary(JITCodeGenerator* jit, SpeculateStrictInt32Operand& op1) 1999 : m_jit(jit) 2000 , m_gpr(InvalidGPRReg) 2001 { 2002 if (m_jit->canReuse(op1.index())) 2003 m_gpr = m_jit->reuse(op1.gpr()); 2004 else 2005 m_gpr = m_jit->allocate(); 2006 } 2007 1992 2008 GPRTemporary::GPRTemporary(JITCodeGenerator* jit, IntegerOperand& op1) 1993 2009 : m_jit(jit) -
trunk/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.h
r95310 r95484 69 69 70 70 enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly }; 71 71 72 static const double twoToThe32; 72 73 73 74 public: … … 1174 1175 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&); 1175 1176 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&, SpeculateIntegerOperand&); 1177 GPRTemporary(JITCodeGenerator*, SpeculateStrictInt32Operand&); 1176 1178 GPRTemporary(JITCodeGenerator*, IntegerOperand&); 1177 1179 GPRTemporary(JITCodeGenerator*, IntegerOperand&, IntegerOperand&); -
trunk/Source/JavaScriptCore/dfg/DFGNode.h
r95399 r95484 148 148 /* Used to box the result of URShift nodes (result has range 0..2^32-1). */\ 149 149 macro(UInt32ToNumber, NodeResultNumber) \ 150 macro(UInt32ToNumberSafe, NodeResultNumber) \ 150 151 \ 151 152 /* Nodes for arithmetic operations. */\ 152 153 macro(ArithAdd, NodeResultNumber) \ 153 154 macro(ArithSub, NodeResultNumber) \ 154 macro(ArithMul, NodeResultNumber) \ 155 macro(ArithAddSafe, NodeResultNumber) /* Safe variants are those that are known to take old JIT slow path */\ 156 macro(ArithSubSafe, NodeResultNumber) \ 157 macro(ArithMulSpecNotNegZero, NodeResultNumber) /* Speculate that the result is not negative zero. */ \ 158 macro(ArithMulIgnoreZero, NodeResultNumber) /* If it's negative zero, ignore it. */ \ 159 macro(ArithMulPossiblyNegZero, NodeResultNumber) /* It definitely may be negative zero but we haven't decided what to do about it yet. No code generation for this node; it either turns into ArithMulIgnoreZero or ArithMulSafe. */ \ 160 macro(ArithMulSafe, NodeResultNumber) /* It may be negative zero, or it may produce other forms of double, so speculate double. */\ 155 161 macro(ArithDiv, NodeResultNumber) \ 156 162 macro(ArithMod, NodeResultNumber) \ … … 167 173 /* Add of values may either be arithmetic, or result in string concatenation. */\ 168 174 macro(ValueAdd, NodeResultJS | NodeMustGenerate | NodeMightClobber) \ 175 macro(ValueAddSafe, NodeResultJS | NodeMustGenerate | NodeMightClobber) \ 169 176 \ 170 177 /* Property access. */\ -
trunk/Source/JavaScriptCore/dfg/DFGPropagator.cpp
r95399 r95484 226 226 case ArithAdd: 227 227 case ArithSub: 228 case ArithMul: 228 case ArithMulIgnoreZero: 229 case ArithMulPossiblyNegZero: 230 case ArithMulSpecNotNegZero: 229 231 case ArithMin: 230 232 case ArithMax: { … … 251 253 if (isStrongPrediction(child)) 252 254 changed |= mergePrediction(child); 255 break; 256 } 257 258 case ArithAddSafe: 259 case ArithSubSafe: 260 case ArithMulSafe: 261 case UInt32ToNumberSafe: { 262 changed |= setPrediction(makePrediction(PredictDouble, StrongPrediction)); 263 break; 264 } 265 266 case ValueAddSafe: { 267 PredictedType left = m_predictions[node.child1()]; 268 PredictedType right = m_predictions[node.child2()]; 269 270 if (isStrongPrediction(left) && isStrongPrediction(right)) { 271 if (isNumberPrediction(left) && isNumberPrediction(right)) 272 changed |= mergePrediction(makePrediction(PredictDouble, StrongPrediction)); 273 else if (!(left & PredictNumber) || !(right & PredictNumber)) { 274 // left or right is definitely something other than a number. 275 changed |= mergePrediction(makePrediction(PredictString, StrongPrediction)); 276 } else 277 changed |= mergePrediction(makePrediction(PredictString | PredictInt32 | PredictDouble, StrongPrediction)); 278 } 253 279 break; 254 280 } … … 409 435 case ArithAdd: 410 436 case ArithSub: 411 case ArithMul: 437 case ArithMulIgnoreZero: 438 case ArithMulPossiblyNegZero: 439 case ArithMulSpecNotNegZero: 412 440 case ArithMin: 413 441 case ArithMax: { … … 442 470 } 443 471 472 case ArithAddSafe: 473 case ArithSubSafe: 474 case ArithMulSafe: { 475 toDouble(node.child1()); 476 toDouble(node.child2()); 477 break; 478 } 479 480 case ValueAddSafe: { 481 PredictedType left = m_predictions[node.child1()]; 482 PredictedType right = m_predictions[node.child2()]; 483 484 if (isStrongPrediction(left) && isStrongPrediction(right) && isNumberPrediction(left) && isNumberPrediction(right)) { 485 toDouble(node.child2()); 486 toDouble(node.child1()); 487 } 488 break; 489 } 490 444 491 default: 445 492 break; … … 597 644 switch (node.op) { 598 645 case ValueAdd: 646 case ValueAddSafe: 599 647 case CompareLess: 600 648 case CompareLessEq: … … 744 792 case ArithAdd: 745 793 case ArithSub: 746 case ArithMul: 794 case ArithMulIgnoreZero: 795 case ArithMulPossiblyNegZero: 796 case ArithMulSpecNotNegZero: 747 797 case ArithMod: 748 798 case ArithDiv: … … 751 801 case ArithMax: 752 802 case ArithSqrt: 803 case ArithAddSafe: 804 case ArithSubSafe: 805 case ArithMulSafe: 753 806 setReplacement(pureCSE(node)); 754 807 break; -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
r95399 r95484 841 841 break; 842 842 } 843 844 case UInt32ToNumberSafe: { 845 // We know that this sometimes produces doubles. So produce a double every 846 // time. This at least allows subsequent code to not have weird conditionals. 847 848 IntegerOperand op1(this, node.child1()); 849 FPRTemporary result(this); 850 851 GPRReg inputGPR = op1.gpr(); 852 FPRReg outputFPR = result.fpr(); 853 854 m_jit.convertInt32ToDouble(inputGPR, outputFPR); 855 856 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0)); 857 m_jit.addDouble(JITCompiler::AbsoluteAddress(&twoToThe32), outputFPR); 858 positive.link(&m_jit); 859 860 doubleResult(outputFPR, m_compileIndex); 861 break; 862 } 843 863 844 864 case ValueToInt32: { … … 917 937 break; 918 938 } 919 939 940 // Fall through to safe cases. 941 } 942 943 case ValueAddSafe: 944 case ArithAddSafe: { 920 945 if (shouldSpeculateNumber(node.child1(), node.child2())) { 921 946 SpeculateDoubleOperand op1(this, node.child1()); … … 931 956 } 932 957 933 ASSERT(op == ValueAdd );958 ASSERT(op == ValueAdd || op == ValueAddSafe); 934 959 935 960 JSValueOperand op1(this, node.child1()); … … 973 998 break; 974 999 } 975 1000 1001 // Fall through to safe case. 1002 } 1003 1004 case ArithSubSafe: { 976 1005 SpeculateDoubleOperand op1(this, node.child1()); 977 1006 SpeculateDoubleOperand op2(this, node.child2()); … … 986 1015 } 987 1016 988 case ArithMul: { 1017 case ArithMulSpecNotNegZero: 1018 case ArithMulPossiblyNegZero: 1019 case ArithMulIgnoreZero: { 989 1020 if (shouldSpeculateInteger(node.child1(), node.child2())) { 990 1021 SpeculateIntegerOperand op1(this, node.child1()); … … 996 1027 speculationCheck(m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr())); 997 1028 998 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr()); 999 speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0))); 1000 speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0))); 1001 resultNonZero.link(&m_jit); 1029 // Only the ArithMulSpecNotNegZero opcode requires that we actually turn 1030 // negative zero into a double. The other two mean that all users of this 1031 // result don't care if it's a positive or negative zero. 1032 if (op == ArithMulSpecNotNegZero) { 1033 MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr()); 1034 speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0))); 1035 speculationCheck(m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0))); 1036 resultNonZero.link(&m_jit); 1037 } 1002 1038 1003 1039 integerResult(result.gpr(), m_compileIndex); 1004 1040 break; 1005 1041 } 1006 1042 1043 // Fall through to safe case. 1044 } 1045 1046 case ArithMulSafe: { 1007 1047 SpeculateDoubleOperand op1(this, node.child1()); 1008 1048 SpeculateDoubleOperand op2(this, node.child2()); … … 1088 1128 case ArithMax: { 1089 1129 if (shouldSpeculateInteger(node.child1(), node.child2())) { 1090 Speculate IntegerOperand op1(this, node.child1());1091 Speculate IntegerOperand op2(this, node.child2());1130 SpeculateStrictInt32Operand op1(this, node.child1()); 1131 SpeculateStrictInt32Operand op2(this, node.child2()); 1092 1132 GPRTemporary result(this, op1); 1093 1133 … … 1837 1877 bool found = false; 1838 1878 1839 if (nodePtr->op == UInt32ToNumber ) {1879 if (nodePtr->op == UInt32ToNumber || nodePtr->op == UInt32ToNumberSafe) { 1840 1880 NodeIndex nodeIndex = nodePtr->child1(); 1841 1881 nodePtr = &m_jit.graph()[nodeIndex]; … … 1868 1908 break; 1869 1909 case UInt32ToNumber: 1910 case UInt32ToNumberSafe: 1870 1911 uint32ToNumberIndex = info.nodeIndex(); 1871 1912 break; -
trunk/Source/JavaScriptCore/jit/JIT.cpp
r95273 r95484 414 414 #endif 415 415 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; 416 417 #if ENABLE(VALUE_PROFILER) 418 RareCaseProfile* slowCaseProfile = 0; 419 if (m_canBeOptimized) 420 slowCaseProfile = m_codeBlock->addSlowCaseProfile(m_bytecodeOffset); 421 #endif 416 422 417 423 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { … … 486 492 ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen."); 487 493 ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); 494 495 #if ENABLE(VALUE_PROFILER) 496 if (m_canBeOptimized) 497 add32(Imm32(1), AbsoluteAddress(&slowCaseProfile->m_counter)); 498 #endif 488 499 489 500 emitJumpSlowToHot(jump(), 0); -
trunk/Source/JavaScriptCore/jit/JIT.h
r95397 r95484 296 296 void addSlowCase(Jump); 297 297 void addSlowCase(JumpList); 298 void addSlowCase(); 298 299 void addJump(Jump, int); 299 300 void emitJumpSlowToHot(Jump, int); … … 973 974 ++iter; 974 975 } 976 void linkDummySlowCase(Vector<SlowCaseEntry>::iterator& iter) 977 { 978 ASSERT(!iter->from.isSet()); 979 ++iter; 980 } 975 981 void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg); 976 982 -
trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp
r90371 r95484 628 628 move(regT0, regT1); 629 629 emitJumpSlowCaseIfNotImmediateInteger(regT0); 630 addSlowCase(branchSub32( Zero, TrustedImm32(1), regT1));630 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT1)); 631 631 emitFastArithIntToImmNoCheck(regT1, regT1); 632 632 emitPutVirtualRegister(srcDst, regT1); … … 677 677 emitGetVirtualRegister(srcDst, regT0); 678 678 emitJumpSlowCaseIfNotImmediateInteger(regT0); 679 addSlowCase(branchSub32( Zero, TrustedImm32(1), regT0));679 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0)); 680 680 emitFastArithIntToImmNoCheck(regT0, regT0); 681 681 emitPutVirtualRegister(srcDst); … … 785 785 else { 786 786 ASSERT(opcodeID == op_mul); 787 #if ENABLE(VALUE_PROFILER) 788 if (m_canBeOptimized) { 789 // We want to be able to measure if this is taking the slow case just 790 // because of negative zero. If this produces positive zero, then we 791 // don't want the slow case to be taken because that will throw off 792 // speculative compilation. 793 move(regT0, regT2); 794 addSlowCase(branchMul32(Overflow, regT1, regT2)); 795 JumpList done; 796 done.append(branchTest32(NonZero, regT2)); 797 Jump negativeZero = branch32(LessThan, regT0, Imm32(0)); 798 done.append(branch32(GreaterThanOrEqual, regT1, Imm32(0))); 799 negativeZero.link(this); 800 // We only get here if we have a genuine negative zero. Record this, 801 // so that the speculative JIT knows that we failed speculation 802 // because of a negative zero. 803 add32(Imm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter)); 804 addSlowCase(jump()); 805 done.link(this); 806 move(regT2, regT0); 807 } else { 808 addSlowCase(branchMul32(Overflow, regT1, regT0)); 809 addSlowCase(branchTest32(Zero, regT0)); 810 } 811 #else 787 812 addSlowCase(branchMul32(Overflow, regT1, regT0)); 788 813 addSlowCase(branchTest32(Zero, regT0)); 814 #endif 789 815 } 790 816 emitFastArithIntToImmNoCheck(regT0, regT0); … … 888 914 889 915 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { 916 addSlowCase(); 890 917 JITStubCall stubCall(this, cti_op_add); 891 918 stubCall.addArgument(op1, regT2); … … 918 945 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); 919 946 920 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) 947 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { 948 linkDummySlowCase(iter); 921 949 return; 950 } 922 951 923 952 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1); -
trunk/Source/JavaScriptCore/jit/JITInlineMethods.h
r95450 r95484 300 300 } 301 301 302 ALWAYS_INLINE void JIT::addSlowCase() 303 { 304 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. 305 306 Jump emptyJump; // Doing it this way to make Windows happy. 307 m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset)); 308 } 309 302 310 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset) 303 311 {
Note: See TracChangeset
for help on using the changeset viewer.