Changeset 194613 in webkit
- Timestamp:
- Jan 5, 2016 3:08:58 PM (8 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r194612 r194613 1 2016-01-04 Mark Lam <mark.lam@apple.com> 2 3 Profiling should detect when multiplication overflows but does not create negative zero. 4 https://bugs.webkit.org/show_bug.cgi?id=132470 5 6 Reviewed by Geoffrey Garen. 7 8 * assembler/MacroAssemblerARM64.h: 9 (JSC::MacroAssemblerARM64::or32): 10 * assembler/MacroAssemblerARMv7.h: 11 (JSC::MacroAssemblerARMv7::or32): 12 - New or32 emitter needed by the mul snippet. 13 14 * bytecode/CodeBlock.cpp: 15 (JSC::CodeBlock::resultProfileForBytecodeOffset): 16 (JSC::CodeBlock::updateResultProfileForBytecodeOffset): Deleted. 17 * bytecode/CodeBlock.h: 18 (JSC::CodeBlock::ensureResultProfile): 19 (JSC::CodeBlock::addResultProfile): Deleted. 20 (JSC::CodeBlock::likelyToTakeDeepestSlowCase): Deleted. 21 - Added a m_bytecodeOffsetToResultProfileIndexMap because we can now add result 22 profiles in any order (based on runtime execution), not necessarily in bytecode 23 order at baseline compilation time. 24 25 * bytecode/ValueProfile.cpp: 26 (WTF::printInternal): 27 * bytecode/ValueProfile.h: 28 (JSC::ResultProfile::didObserveInt52Overflow): 29 (JSC::ResultProfile::setObservedInt52Overflow): 30 - Add new Int52Overflow flags. 31 32 * dfg/DFGByteCodeParser.cpp: 33 (JSC::DFG::ByteCodeParser::makeSafe): 34 - Now with more straightforward mapping of profiling info. 35 36 * dfg/DFGCommon.h: 37 - Fixed a typo in a comment. 38 39 * dfg/DFGNode.h: 40 (JSC::DFG::Node::arithNodeFlags): 41 (JSC::DFG::Node::mayHaveNonIntResult): 42 (JSC::DFG::Node::hasConstantBuffer): 43 * dfg/DFGNodeFlags.cpp: 44 (JSC::DFG::dumpNodeFlags): 45 * dfg/DFGNodeFlags.h: 46 (JSC::DFG::nodeMayOverflowInt52): 47 (JSC::DFG::nodeCanSpeculateInt52): 48 * dfg/DFGPredictionPropagationPhase.cpp: 49 (JSC::DFG::PredictionPropagationPhase::propagate): 50 - We now have profiling info for whether the result was ever seen to be a non-Int. 51 Use this to make a better prediction. 52 53 * jit/JITArithmetic.cpp: 54 (JSC::JIT::emit_op_div): 55 (JSC::JIT::emit_op_mul): 56 - Switch to using CodeBlock::ensureResultProfile(). ResultProfiles can now be 57 created at any time (including the slow path), not just in bytecode order 58 during baseline compilation. 59 60 * jit/JITMulGenerator.cpp: 61 (JSC::JITMulGenerator::generateFastPath): 62 - Removed the fast path profiling code for NegZero because we'll go to the slow 63 path anyway. Let the slow path do the profiling for us. 64 - Added profiling for NegZero and potential Int52 overflows in the fast path 65 that does double math. 66 67 * runtime/CommonSlowPaths.cpp: 68 (JSC::updateResultProfileForBinaryArithOp): 69 - Removed the RETURN_WITH_RESULT_PROFILING macro (2 less macros), and just use 70 the RETURN_WITH_PROFILING macro instead with a call to 71 updateResultProfileForBinaryArithOp(). This makes it clear what we're doing 72 to do profiling in each case, and also allows us to do custom profiling for 73 each opcode if needed. However, so far, we always call 74 updateResultProfileForBinaryArithOp(). 75 1 76 2016-01-05 Keith Miller <keith_miller@apple.com> 2 77 -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerARM64.h
r194530 r194613 524 524 } 525 525 526 ASSERT(src != dataTempRegister); 526 527 move(imm, getCachedDataTempRegisterIDAndInvalidate()); 527 528 m_assembler.orr<32>(dest, src, dataTempRegister); … … 533 534 m_assembler.orr<32>(dataTempRegister, dataTempRegister, src); 534 535 store32(dataTempRegister, address.m_ptr); 536 } 537 538 void or32(TrustedImm32 imm, AbsoluteAddress address) 539 { 540 load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate()); 541 or32(imm, memoryTempRegister, memoryTempRegister); 542 store32(memoryTempRegister, address.m_ptr); 535 543 } 536 544 -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
r194062 r194613 353 353 } 354 354 355 void or32(TrustedImm32 imm, AbsoluteAddress address) 356 { 357 move(TrustedImmPtr(address.m_ptr), addressTempRegister); 358 load32(addressTempRegister, dataTempRegister); 359 or32(imm, dataTempRegister, dataTempRegister); 360 store32(dataTempRegister, addressTempRegister); 361 } 362 355 363 void or32(TrustedImm32 imm, Address address) 356 364 { -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.cpp
r194496 r194613 4189 4189 ResultProfile* CodeBlock::resultProfileForBytecodeOffset(int bytecodeOffset) 4190 4190 { 4191 return tryBinarySearch<ResultProfile, int>( 4192 m_resultProfiles, m_resultProfiles.size(), bytecodeOffset, 4193 getResultProfileBytecodeOffset); 4194 } 4195 4196 void CodeBlock::updateResultProfileForBytecodeOffset(int bytecodeOffset, JSValue result) 4197 { 4198 #if ENABLE(DFG_JIT) 4199 ResultProfile* profile = resultProfileForBytecodeOffset(bytecodeOffset); 4200 if (!profile) 4201 profile = addResultProfile(bytecodeOffset); 4202 4203 if (result.isNumber()) { 4204 if (!result.isInt32()) { 4205 double doubleVal = result.asNumber(); 4206 if (!doubleVal && std::signbit(doubleVal)) 4207 profile->setObservedNegZeroDouble(); 4208 else 4209 profile->setObservedNonNegZeroDouble(); 4210 } 4211 } else 4212 profile->setObservedNonNumber(); 4213 #else 4214 UNUSED_PARAM(bytecodeOffset); 4215 UNUSED_PARAM(result); 4216 #endif 4191 auto iterator = m_bytecodeOffsetToResultProfileIndexMap.find(bytecodeOffset); 4192 if (iterator == m_bytecodeOffsetToResultProfileIndexMap.end()) 4193 return nullptr; 4194 return &m_resultProfiles[iterator->value]; 4217 4195 } 4218 4196 -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.h
r194496 r194613 453 453 } 454 454 455 ResultProfile* addResultProfile(int bytecodeOffset) 456 { 457 m_resultProfiles.append(ResultProfile(bytecodeOffset)); 458 return &m_resultProfiles.last(); 455 ResultProfile* ensureResultProfile(int bytecodeOffset) 456 { 457 ResultProfile* profile = resultProfileForBytecodeOffset(bytecodeOffset); 458 if (!profile) { 459 m_resultProfiles.append(ResultProfile(bytecodeOffset)); 460 profile = &m_resultProfiles.last(); 461 ASSERT(&m_resultProfiles.last() == &m_resultProfiles[m_resultProfiles.size() - 1]); 462 m_bytecodeOffsetToResultProfileIndexMap.add(bytecodeOffset, m_resultProfiles.size() - 1); 463 } 464 return profile; 459 465 } 460 466 unsigned numberOfResultProfiles() { return m_resultProfiles.size(); } 461 467 ResultProfile* resultProfileForBytecodeOffset(int bytecodeOffset); 462 463 void updateResultProfileForBytecodeOffset(int bytecodeOffset, JSValue result);464 468 465 469 unsigned specialFastCaseProfileCountForBytecodeOffset(int bytecodeOffset) … … 477 481 unsigned specialFastCaseCount = specialFastCaseProfileCountForBytecodeOffset(bytecodeOffset); 478 482 return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount(); 479 }480 481 bool likelyToTakeDeepestSlowCase(int bytecodeOffset)482 {483 if (!hasBaselineJITProfiling())484 return false;485 unsigned slowCaseCount = rareCaseProfileCountForBytecodeOffset(bytecodeOffset);486 unsigned specialFastCaseCount = specialFastCaseProfileCountForBytecodeOffset(bytecodeOffset);487 unsigned value = slowCaseCount - specialFastCaseCount;488 return value >= Options::likelyToTakeSlowCaseMinimumCount();489 483 } 490 484 … … 1069 1063 SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; 1070 1064 SegmentedVector<ResultProfile, 8> m_resultProfiles; 1065 HashMap<unsigned, unsigned, IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> m_bytecodeOffsetToResultProfileIndexMap; 1071 1066 Vector<ArrayAllocationProfile> m_arrayAllocationProfiles; 1072 1067 ArrayProfileVector m_arrayProfiles; -
trunk/Source/JavaScriptCore/bytecode/ValueProfile.cpp
r194294 r194613 55 55 separator = "|"; 56 56 } 57 if (profile.didObserveInt52Overflow()) { 58 out.print("Int52Overflow"); 59 separator = "|"; 60 } 57 61 } 58 62 if (profile.specialFastPathCount()) { -
trunk/Source/JavaScriptCore/bytecode/ValueProfile.h
r194294 r194613 209 209 struct ResultProfile { 210 210 private: 211 static const int numberOfFlagBits = 4;211 static const int numberOfFlagBits = 5; 212 212 213 213 public: … … 223 223 NonNumber = 1 << 2, 224 224 Int32Overflow = 1 << 3, 225 Int52Overflow = 1 << 4, 225 226 }; 226 227 … … 234 235 bool didObserveNonNumber() const { return hasBits(NonNumber); } 235 236 bool didObserveInt32Overflow() const { return hasBits(Int32Overflow); } 237 bool didObserveInt52Overflow() const { return hasBits(Int52Overflow); } 236 238 237 239 void setObservedNonNegZeroDouble() { setBit(NonNegZeroDouble); } … … 239 241 void setObservedNonNumber() { setBit(NonNumber); } 240 242 void setObservedInt32Overflow() { setBit(Int32Overflow); } 243 void setObservedInt52Overflow() { setBit(Int52Overflow); } 241 244 242 245 void* addressOfFlags() { return &m_bytecodeOffsetAndFlags; } -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r194565 r194613 910 910 break; 911 911 912 case ArithMul: 913 // FIXME: We should detect cases where we only overflowed but never created 914 // negative zero. 915 // https://bugs.webkit.org/show_bug.cgi?id=132470 916 if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex) 917 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) 918 node->mergeFlags(NodeMayOverflowInt32InBaseline | NodeMayNegZeroInBaseline); 919 else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex) 920 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) 912 case ArithMul: { 913 ResultProfile& resultProfile = *m_inlineStackTop->m_profiledBlock->resultProfileForBytecodeOffset(m_currentIndex); 914 if (resultProfile.didObserveInt52Overflow()) 915 node->mergeFlags(NodeMayOverflowInt52); 916 if (resultProfile.didObserveInt32Overflow() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) 917 node->mergeFlags(NodeMayOverflowInt32InBaseline); 918 if (resultProfile.didObserveNegZeroDouble() || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) 921 919 node->mergeFlags(NodeMayNegZeroInBaseline); 920 if (resultProfile.didObserveNonInt32()) 921 node->mergeFlags(NodeMayHaveNonIntResult); 922 922 break; 923 923 } 924 924 925 default: 925 926 RELEASE_ASSERT_NOT_REACHED(); -
trunk/Source/JavaScriptCore/dfg/DFGCommon.h
r194543 r194613 108 108 // being done by the separate FixuPhase. 109 109 enum PredictionPass { 110 // We're converging in a stra ght-forward forward flow fixpoint. This is the110 // We're converging in a straight-forward forward flow fixpoint. This is the 111 111 // most conventional part of the propagator - it makes only monotonic decisions 112 112 // based on value profiles and rare case profiles. It ignores baseline JIT rare -
trunk/Source/JavaScriptCore/dfg/DFGNode.h
r194248 r194613 927 927 return result & ~NodeBytecodeNeedsNegZero; 928 928 } 929 929 930 bool mayHaveNonIntResult() 931 { 932 return m_flags & NodeMayHaveNonIntResult; 933 } 934 930 935 bool hasConstantBuffer() 931 936 { -
trunk/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
r194423 r194613 86 86 } 87 87 88 if (flags & NodeMayHaveNonIntResult) 89 out.print(comma, "MayHaveNonIntResult"); 90 91 if (flags & NodeMayOverflowInt52) 92 out.print(comma, "MayOverflowInt52"); 93 88 94 if (flags & NodeMayOverflowInt32InBaseline) 89 95 out.print(comma, "MayOverflowInt32InBaseline"); -
trunk/Source/JavaScriptCore/dfg/DFGNodeFlags.h
r194423 r194613 47 47 #define NodeMustGenerate 0x0008 // set on nodes that have side effects, and may not trivially be removed by DCE. 48 48 #define NodeHasVarArgs 0x0010 49 // 0x0020 and 0x0040 are free. 50 51 #define NodeBehaviorMask 0x0780 49 50 #define NodeBehaviorMask 0x07e0 51 #define NodeMayHaveNonIntResult 0x0020 52 #define NodeMayOverflowInt52 0x0040 52 53 #define NodeMayOverflowInt32InBaseline 0x0080 53 54 #define NodeMayOverflowInt32InDFG 0x0100 … … 94 95 AllRareCases 95 96 }; 97 98 static inline bool nodeMayOverflowInt52(NodeFlags flags, RareCaseProfilingSource) 99 { 100 return !!(flags & NodeMayOverflowInt52); 101 } 96 102 97 103 static inline bool nodeMayOverflowInt32(NodeFlags flags, RareCaseProfilingSource source) … … 142 148 static inline bool nodeCanSpeculateInt52(NodeFlags flags, RareCaseProfilingSource source) 143 149 { 150 if (nodeMayOverflowInt52(flags, source)) 151 return false; 152 144 153 if (nodeMayNegZero(flags, source)) 145 154 return bytecodeCanIgnoreNegativeZero(flags); -
trunk/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
r194248 r194613 344 344 else 345 345 changed |= mergePrediction(speculatedDoubleTypeForPredictions(left, right)); 346 } else 347 changed |= mergePrediction(SpecInt32 | SpecBytecodeDouble); 346 } else { 347 if (node->mayHaveNonIntResult()) 348 changed |= mergePrediction(SpecInt32 | SpecBytecodeDouble); 349 else 350 changed |= mergePrediction(SpecInt32); 351 } 348 352 } 349 353 break; -
trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp
r194363 r194613 761 761 ResultProfile* resultProfile = nullptr; 762 762 if (shouldEmitProfiling()) 763 resultProfile = m_codeBlock-> addResultProfile(m_bytecodeOffset);763 resultProfile = m_codeBlock->ensureResultProfile(m_bytecodeOffset); 764 764 765 765 SnippetOperand leftOperand(types.first()); … … 836 836 ResultProfile* resultProfile = nullptr; 837 837 if (shouldEmitProfiling()) 838 resultProfile = m_codeBlock-> addResultProfile(m_bytecodeOffset);838 resultProfile = m_codeBlock->ensureResultProfile(m_bytecodeOffset); 839 839 840 840 SnippetOperand leftOperand(types.first()); -
trunk/Source/JavaScriptCore/jit/JITMulGenerator.cpp
r194294 r194613 36 36 ASSERT(m_scratchGPR != m_left.payloadGPR()); 37 37 ASSERT(m_scratchGPR != m_right.payloadGPR()); 38 ASSERT(m_scratchGPR != m_result.payloadGPR()); 38 39 #if USE(JSVALUE32_64) 39 40 ASSERT(m_scratchGPR != m_left.tagGPR()); … … 92 93 93 94 m_slowPathJumpList.append(jit.branchMul32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), m_scratchGPR)); 94 if (!m_resultProfile) { 95 m_slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_scratchGPR)); // Go slow if potential negative zero. 96 97 } else { 98 CCallHelpers::JumpList notNegativeZero; 99 notNegativeZero.append(jit.branchTest32(CCallHelpers::NonZero, m_scratchGPR)); 100 101 CCallHelpers::Jump negativeZero = jit.branch32(CCallHelpers::LessThan, m_left.payloadGPR(), CCallHelpers::TrustedImm32(0)); 102 notNegativeZero.append(jit.branch32(CCallHelpers::GreaterThanOrEqual, m_right.payloadGPR(), CCallHelpers::TrustedImm32(0))); 103 104 negativeZero.link(&jit); 105 // Record this, so that the speculative JIT knows that we failed speculation 106 // because of a negative zero. 107 jit.add32(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfSpecialFastPathCount())); 108 m_slowPathJumpList.append(jit.jump()); 109 110 notNegativeZero.link(&jit); 111 } 95 m_slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_scratchGPR)); // Go slow if potential negative zero. 112 96 113 97 jit.boxInt32(m_scratchGPR, m_result); … … 148 132 // Do doubleVar * doubleVar. 149 133 jit.mulDouble(m_rightFPR, m_leftFPR); 150 jit.boxDouble(m_leftFPR, m_result); 134 135 if (!m_resultProfile) 136 jit.boxDouble(m_leftFPR, m_result); 137 else { 138 // The Int52 overflow check below intentionally omits 1ll << 51 as a valid negative Int52 value. 139 // Therefore, we will get a false positive if the result is that value. This is intentionally 140 // done to simplify the checking algorithm. 141 142 const int64_t negativeZeroBits = 1ll << 63; 143 #if USE(JSVALUE64) 144 jit.moveDoubleTo64(m_leftFPR, m_result.payloadGPR()); 145 CCallHelpers::Jump notNegativeZero = jit.branch64(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm64(negativeZeroBits)); 146 147 jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags())); 148 CCallHelpers::Jump done = jit.jump(); 149 150 notNegativeZero.link(&jit); 151 jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags())); 152 153 jit.move(m_result.payloadGPR(), m_scratchGPR); 154 jit.urshiftPtr(CCallHelpers::Imm32(52), m_scratchGPR); 155 jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR); 156 CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431)); 157 158 jit.or32(CCallHelpers::TrustedImm32(ResultProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags())); 159 noInt52Overflow.link(&jit); 160 161 done.link(&jit); 162 jit.sub64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR()); // Box the double. 163 #else 164 jit.boxDouble(m_leftFPR, m_result); 165 CCallHelpers::JumpList notNegativeZero; 166 notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm32(0))); 167 notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.tagGPR(), CCallHelpers::TrustedImm32(negativeZeroBits >> 32))); 168 169 jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags())); 170 CCallHelpers::Jump done = jit.jump(); 171 172 notNegativeZero.link(&jit); 173 jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags())); 174 175 jit.move(m_result.tagGPR(), m_scratchGPR); 176 jit.urshiftPtr(CCallHelpers::Imm32(52 - 32), m_scratchGPR); 177 jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR); 178 CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431)); 179 180 jit.or32(CCallHelpers::TrustedImm32(ResultProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags())); 181 182 m_endJumpList.append(noInt52Overflow); 183 m_endJumpList.append(done); 184 #endif 185 } 151 186 } 152 187 -
trunk/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp
r194294 r194613 136 136 } while (false) 137 137 138 #define RETURN_WITH_RESULT_PROFILING(value__) \139 RETURN_WITH_PROFILING(value__, PROFILE_RESULT(returnValue__))140 141 #define PROFILE_RESULT(value__) do { \142 CodeBlock* codeBlock = exec->codeBlock(); \143 unsigned bytecodeOffset = codeBlock->bytecodeOffset(pc); \144 codeBlock->updateResultProfileForBytecodeOffset(bytecodeOffset, value__); \145 } while (false)146 147 138 #define CALL_END_IMPL(exec, callTarget) RETURN_TWO((callTarget), (exec)) 148 139 … … 359 350 } 360 351 352 #if ENABLE(DFG_JIT) 353 static void updateResultProfileForBinaryArithOp(ExecState* exec, Instruction* pc, JSValue result, JSValue left, JSValue right) 354 { 355 CodeBlock* codeBlock = exec->codeBlock(); 356 unsigned bytecodeOffset = codeBlock->bytecodeOffset(pc); 357 ResultProfile* profile = codeBlock->ensureResultProfile(bytecodeOffset); 358 359 if (result.isNumber()) { 360 if (!result.isInt32()) { 361 if (left.isInt32() && right.isInt32()) 362 profile->setObservedInt32Overflow(); 363 364 double doubleVal = result.asNumber(); 365 if (!doubleVal && std::signbit(doubleVal)) 366 profile->setObservedNegZeroDouble(); 367 else { 368 profile->setObservedNonNegZeroDouble(); 369 370 // The Int52 overflow check here intentionally omits 1ll << 51 as a valid negative Int52 value. 371 // Therefore, we will get a false positive if the result is that value. This is intentionally 372 // done to simplify the checking algorithm. 373 static const int64_t int52OverflowPoint = (1ll << 51); 374 int64_t int64Val = static_cast<int64_t>(std::abs(doubleVal)); 375 if (int64Val >= int52OverflowPoint) 376 profile->setObservedInt52Overflow(); 377 } 378 } 379 } else 380 profile->setObservedNonNumber(); 381 } 382 #else 383 static void updateResultProfileForBinaryArithOp(ExecState*, Instruction*, JSValue, JSValue, JSValue) { } 384 #endif 385 361 386 SLOW_PATH_DECL(slow_path_add) 362 387 { … … 364 389 JSValue v1 = OP_C(2).jsValue(); 365 390 JSValue v2 = OP_C(3).jsValue(); 366 391 JSValue result; 392 367 393 if (v1.isString() && !v2.isObject()) 368 RETURN_WITH_RESULT_PROFILING(jsString(exec, asString(v1), v2.toString(exec))); 369 370 if (v1.isNumber() && v2.isNumber()) 371 RETURN_WITH_RESULT_PROFILING(jsNumber(v1.asNumber() + v2.asNumber())); 372 373 RETURN_WITH_RESULT_PROFILING(jsAddSlowCase(exec, v1, v2)); 394 result = jsString(exec, asString(v1), v2.toString(exec)); 395 else if (v1.isNumber() && v2.isNumber()) 396 result = jsNumber(v1.asNumber() + v2.asNumber()); 397 else 398 result = jsAddSlowCase(exec, v1, v2); 399 400 RETURN_WITH_PROFILING(result, { 401 updateResultProfileForBinaryArithOp(exec, pc, result, v1, v2); 402 }); 374 403 } 375 404 … … 381 410 { 382 411 BEGIN(); 383 double a = OP_C(2).jsValue().toNumber(exec); 384 double b = OP_C(3).jsValue().toNumber(exec); 385 RETURN_WITH_RESULT_PROFILING(jsNumber(a * b)); 412 JSValue left = OP_C(2).jsValue(); 413 JSValue right = OP_C(3).jsValue(); 414 double a = left.toNumber(exec); 415 double b = right.toNumber(exec); 416 JSValue result = jsNumber(a * b); 417 RETURN_WITH_PROFILING(result, { 418 updateResultProfileForBinaryArithOp(exec, pc, result, left, right); 419 }); 386 420 } 387 421 … … 389 423 { 390 424 BEGIN(); 391 double a = OP_C(2).jsValue().toNumber(exec); 392 double b = OP_C(3).jsValue().toNumber(exec); 393 RETURN_WITH_RESULT_PROFILING(jsNumber(a - b)); 425 JSValue left = OP_C(2).jsValue(); 426 JSValue right = OP_C(3).jsValue(); 427 double a = left.toNumber(exec); 428 double b = right.toNumber(exec); 429 JSValue result = jsNumber(a - b); 430 RETURN_WITH_PROFILING(result, { 431 updateResultProfileForBinaryArithOp(exec, pc, result, left, right); 432 }); 394 433 } 395 434 … … 397 436 { 398 437 BEGIN(); 399 double a = OP_C(2).jsValue().toNumber(exec); 400 double b = OP_C(3).jsValue().toNumber(exec); 401 RETURN_WITH_RESULT_PROFILING(jsNumber(a / b)); 438 JSValue left = OP_C(2).jsValue(); 439 JSValue right = OP_C(3).jsValue(); 440 double a = left.toNumber(exec); 441 double b = right.toNumber(exec); 442 JSValue result = jsNumber(a / b); 443 RETURN_WITH_PROFILING(result, { 444 updateResultProfileForBinaryArithOp(exec, pc, result, left, right); 445 }); 402 446 } 403 447
Note: See TracChangeset
for help on using the changeset viewer.