Changeset 208326 in webkit
- Timestamp:
- Nov 3, 2016 7:39:47 AM (7 years ago)
- Location:
- trunk
- Files:
-
- 1 added
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JSTests/ChangeLog
r208311 r208326 1 2016-11-03 Saam Barati <sbarati@apple.com> 2 3 Asking for a value profile prediction should be defensive against not finding a value profile 4 https://bugs.webkit.org/show_bug.cgi?id=164306 5 6 Reviewed by Mark Lam. 7 8 * stress/inlined-tail-call-in-inlined-setter-should-not-crash-when-getting-value-profile.js: Added. 9 (let.o.set foo): 10 (bar): 11 1 12 2016-11-02 Saam Barati <sbarati@apple.com> 2 13 -
trunk/Source/JavaScriptCore/ChangeLog
r208323 r208326 1 2016-11-03 Saam Barati <sbarati@apple.com> 2 3 Asking for a value profile prediction should be defensive against not finding a value profile 4 https://bugs.webkit.org/show_bug.cgi?id=164306 5 6 Reviewed by Mark Lam. 7 8 Currently, the code that calls CodeBlock::valueProfilePredictionForBytecodeOffset 9 in the DFG assumes it will always be at a value producing node. However, this isn't 10 true if we tail call from an inlined setter. When we're at a tail call, we try 11 to find the first caller that isn't a tail call to see what value the 12 tail_call produces. If we inline a setter, however, we will end up finding 13 the put_by_id as our first non-tail-called "caller", and that won't have a 14 value profile associated with it since it's not a value producing node. 15 CodeBlock::valueProfilePredictionForBytecodeOffset should be defensive 16 against finding a null value profile. 17 18 * bytecode/CodeBlock.h: 19 (JSC::CodeBlock::valueProfilePredictionForBytecodeOffset): 20 * dfg/DFGByteCodeParser.cpp: 21 (JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit): 22 1 23 2016-11-02 Yusuke Suzuki <utatane.tea@gmail.com> 2 24 -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.h
r208309 r208326 415 415 SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset) 416 416 { 417 return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker); 417 if (ValueProfile* valueProfile = valueProfileForBytecodeOffset(bytecodeOffset)) 418 return valueProfile->computeUpdatedPrediction(locker); 419 return SpecNone; 418 420 } 419 421 -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r208320 r208326 852 852 { 853 853 SpeculatedType prediction; 854 CodeBlock* profiledBlock = nullptr;855 856 854 { 857 855 ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); 858 856 prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); 859 860 if (prediction == SpecNone) { 861 // If we have no information about the values this 862 // node generates, we check if by any chance it is 863 // a tail call opcode. In that case, we walk up the 864 // inline frames to find a call higher in the call 865 // chain and use its prediction. If we only have 866 // inlined tail call frames, we use SpecFullTop 867 // to avoid a spurious OSR exit. 868 Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex; 869 OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instruction->u.opcode); 870 871 switch (opcodeID) { 872 case op_tail_call: 873 case op_tail_call_varargs: 874 case op_tail_call_forward_arguments: { 875 if (!inlineCallFrame()) { 876 prediction = SpecFullTop; 877 break; 878 } 879 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls(); 880 if (!codeOrigin) { 881 prediction = SpecFullTop; 882 break; 883 } 884 InlineStackEntry* stack = m_inlineStackTop; 885 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame) 886 stack = stack->m_caller; 887 bytecodeIndex = codeOrigin->bytecodeIndex; 888 profiledBlock = stack->m_profiledBlock; 889 break; 890 } 891 892 default: 893 break; 894 } 895 } 896 } 897 898 if (profiledBlock) { 857 } 858 859 if (prediction != SpecNone) 860 return prediction; 861 862 // If we have no information about the values this 863 // node generates, we check if by any chance it is 864 // a tail call opcode. In that case, we walk up the 865 // inline frames to find a call higher in the call 866 // chain and use its prediction. If we only have 867 // inlined tail call frames, we use SpecFullTop 868 // to avoid a spurious OSR exit. 869 Instruction* instruction = m_inlineStackTop->m_profiledBlock->instructions().begin() + bytecodeIndex; 870 OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(instruction->u.opcode); 871 872 switch (opcodeID) { 873 case op_tail_call: 874 case op_tail_call_varargs: 875 case op_tail_call_forward_arguments: { 876 // Things should be more permissive to us returning BOTTOM instead of TOP here. 877 // Currently, this will cause us to Force OSR exit. This is bad because returning 878 // TOP will cause anything that transitively touches this speculated type to 879 // also become TOP during prediction propagation. 880 // https://bugs.webkit.org/show_bug.cgi?id=164337 881 if (!inlineCallFrame()) 882 return SpecFullTop; 883 884 CodeOrigin* codeOrigin = inlineCallFrame()->getCallerSkippingTailCalls(); 885 if (!codeOrigin) 886 return SpecFullTop; 887 888 InlineStackEntry* stack = m_inlineStackTop; 889 while (stack->m_inlineCallFrame != codeOrigin->inlineCallFrame) 890 stack = stack->m_caller; 891 892 bytecodeIndex = codeOrigin->bytecodeIndex; 893 CodeBlock* profiledBlock = stack->m_profiledBlock; 899 894 ConcurrentJITLocker locker(profiledBlock->m_lock); 900 prediction = profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); 901 } 902 903 return prediction; 895 return profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); 896 } 897 898 default: 899 return SpecNone; 900 } 901 902 RELEASE_ASSERT_NOT_REACHED(); 903 return SpecNone; 904 904 } 905 905
Note: See TracChangeset
for help on using the changeset viewer.