Changeset 223989 in webkit
- Timestamp:
- Oct 25, 2017 3:15:39 PM (6 years ago)
- Location:
- trunk
- Files:
-
- 1 deleted
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JSTests/ChangeLog
r223961 r223989 1 2017-10-25 Commit Queue <commit-queue@webkit.org> 2 3 Unreviewed, rolling out r223691 and r223729. 4 https://bugs.webkit.org/show_bug.cgi?id=178834 5 6 Broke Speedometer 2 React-Redux-TodoMVC test case (Requested 7 by rniwa on #webkit). 8 9 Reverted changesets: 10 11 "Turn recursive tail calls into loops" 12 https://bugs.webkit.org/show_bug.cgi?id=176601 13 https://trac.webkit.org/changeset/223691 14 15 "REGRESSION(r223691): DFGByteCodeParser.cpp:1483:83: warning: 16 comparison is always false due to limited range of data type 17 [-Wtype-limits]" 18 https://bugs.webkit.org/show_bug.cgi?id=178543 19 https://trac.webkit.org/changeset/223729 20 1 21 2017-10-25 Ryan Haddad <ryanhaddad@apple.com> 2 22 -
trunk/Source/JavaScriptCore/ChangeLog
r223980 r223989 1 2017-10-25 Commit Queue <commit-queue@webkit.org> 2 3 Unreviewed, rolling out r223691 and r223729. 4 https://bugs.webkit.org/show_bug.cgi?id=178834 5 6 Broke Speedometer 2 React-Redux-TodoMVC test case (Requested 7 by rniwa on #webkit). 8 9 Reverted changesets: 10 11 "Turn recursive tail calls into loops" 12 https://bugs.webkit.org/show_bug.cgi?id=176601 13 https://trac.webkit.org/changeset/223691 14 15 "REGRESSION(r223691): DFGByteCodeParser.cpp:1483:83: warning: 16 comparison is always false due to limited range of data type 17 [-Wtype-limits]" 18 https://bugs.webkit.org/show_bug.cgi?id=178543 19 https://trac.webkit.org/changeset/223729 20 1 21 2017-10-25 Michael Saboff <msaboff@apple.com> 2 22 -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.h
r223691 r223989 743 743 // Call this to cause an optimization trigger to fire soon, but 744 744 // not necessarily the next one. This makes sense if optimization 745 // succeeds. Successful optimization means that all calls are745 // succeeds. Successfuly optimization means that all calls are 746 746 // relinked to the optimized code, so this only affects call 747 747 // frames that are still executing this CodeBlock. The value here … … 925 925 std::optional<CodeOrigin> findPC(void* pc); 926 926 #endif 927 928 bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); }929 927 930 928 protected: -
trunk/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
r223691 r223989 43 43 if (opcodeID == op_loop_hint) 44 44 out.append(bytecodeOffset); 45 else if (opcodeID == op_enter && codeBlock->hasTailCalls()) {46 // We need to insert a jump after op_enter, so recursive tail calls have somewhere to jump to.47 // But we only want to pay that price for functions that have at least one tail call.48 out.append(bytecodeOffset + opcodeLengths[op_enter]);49 }50 45 } 51 46 … … 59 54 { 60 55 ASSERT(out.isEmpty()); 61 62 // The code block has a superset of the jump targets. So if it claims to have none, we are done. 56 57 // We will derive a superset of the jump targets that the code block thinks it has. 58 // So, if the code block claims there are none, then we are done. 63 59 if (Mode == ComputePreciseJumpTargetsMode::FollowCodeBlockClaim && !codeBlock->numberOfJumpTargets()) 64 60 return; -
trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
r223691 r223989 71 71 , m_derivedContextType(static_cast<unsigned>(info.derivedContextType())) 72 72 , m_evalContextType(static_cast<unsigned>(info.evalContextType())) 73 , m_hasTailCalls(false)74 73 , m_lineCount(0) 75 74 , m_endColumn(UINT_MAX) -
trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
r223691 r223989 130 130 bool isArrowFunctionContext() const { return m_isArrowFunctionContext; } 131 131 bool isClassContext() const { return m_isClassContext; } 132 bool hasTailCalls() const { return m_hasTailCalls; }133 void setHasTailCalls() { m_hasTailCalls = true; }134 132 135 133 void addExpressionInfo(unsigned instructionOffset, int divot, … … 445 443 unsigned m_derivedContextType : 2; 446 444 unsigned m_evalContextType : 2; 447 unsigned m_hasTailCalls : 1;448 445 unsigned m_lineCount; 449 446 unsigned m_endColumn; -
trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
r223691 r223989 1314 1314 { 1315 1315 emitOpcode(op_enter); 1316 1317 // We must add the end of op_enter as a potential jump target, because the bytecode parser may decide to split its basic block1318 // to have somewhere to jump to if there is a recursive tail-call that points to this function.1319 m_codeBlock->addJumpTarget(instructions().size());1320 // This disables peephole optimizations when an instruction is a jump target1321 m_lastOpcodeID = op_end;1322 1316 } 1323 1317 … … 3354 3348 RegisterID* BytecodeGenerator::emitCallInTailPosition(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3355 3349 { 3356 if (m_inTailPosition) { 3357 m_codeBlock->setHasTailCalls(); 3358 return emitCall(op_tail_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); 3359 } 3360 return emitCall(op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); 3350 return emitCall(m_inTailPosition ? op_tail_call : op_call, dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); 3361 3351 } 3362 3352 -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r223729 r223989 225 225 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis); 226 226 Node* getArgumentCount(); 227 bool handleRecursiveTailCall(Node* callTargetNode, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis);228 227 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. 229 228 // Handle inlining. Return true if it succeeded, false if we need to plant a call. … … 233 232 template<typename ChecksFunctor> 234 233 void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); 234 void cancelLinkingForBlock(InlineStackEntry*, BasicBlock*); // Only works when the given block is the last one to have been added for that inline stack entry. 235 235 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. 236 236 template<typename ChecksFunctor> … … 1128 1128 Vector<BasicBlock*> m_blockLinkingTargets; 1129 1129 1130 // This is set by op_enter in parseBlock(), and indicates the first block of the function. 1131 BasicBlock* m_entryBlock; 1132 1130 1133 // Optional: a continuation block for returns to jump to. It is set by early returns if it does not exist. 1131 1134 BasicBlock* m_continuationBlock; … … 1215 1218 Ref<BasicBlock> block = adoptRef(*new BasicBlock(bytecodeIndex, m_numArguments, m_numLocals, 1)); 1216 1219 BasicBlock* blockPtr = block.ptr(); 1217 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin1218 if (m_inlineStackTop->m_blockLinkingTargets.size())1219 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);1220 1220 m_inlineStackTop->m_blockLinkingTargets.append(blockPtr); 1221 1221 m_graph.appendBlock(WTFMove(block)); … … 1235 1235 ASSERT(block->bytecodeBegin == UINT_MAX); 1236 1236 block->bytecodeBegin = bytecodeIndex; 1237 // m_blockLinkingTargets must always be sorted in increasing order of bytecodeBegin1238 if (m_inlineStackTop->m_blockLinkingTargets.size())1239 ASSERT(m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < bytecodeIndex);1240 1237 m_inlineStackTop->m_blockLinkingTargets.append(block); 1241 1238 } … … 1298 1295 if (callLinkStatus.canOptimize()) { 1299 1296 VirtualRegister thisArgument = virtualRegisterForArgument(0, registerOffset); 1300 1301 if (op == TailCall && handleRecursiveTailCall(callTarget, callLinkStatus, registerOffset, thisArgument, argumentCountIncludingThis))1302 return Terminal;1303 1297 1304 1298 // Inlining is quite complex, and managed by a pipeline of functions: … … 1417 1411 for (int i = 0; i < argumentCountIncludingThis; ++i) 1418 1412 addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset))); 1419 }1420 1421 bool ByteCodeParser::handleRecursiveTailCall(Node* callTargetNode, const CallLinkStatus& callLinkStatus, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis)1422 {1423 // FIXME: We currently only do this optimisation in the simple, non-polymorphic case.1424 // https://bugs.webkit.org/show_bug.cgi?id=1783901425 if (callLinkStatus.couldTakeSlowPath() || callLinkStatus.size() != 1)1426 return false;1427 1428 auto targetExecutable = callLinkStatus[0].executable();1429 InlineStackEntry* stackEntry = m_inlineStackTop;1430 do {1431 if (targetExecutable != stackEntry->executable())1432 continue;1433 VERBOSE_LOG(" We found a recursive tail call, trying to optimize it into a jump.\n");1434 1435 if (auto* callFrame = stackEntry->m_inlineCallFrame) {1436 // Some code may statically use the argument count from the InlineCallFrame, so it would be invalid to loop back if it does not match.1437 // We "continue" instead of returning false in case another stack entry further on the stack has the right number of arguments.1438 if (argumentCountIncludingThis != static_cast<int>(callFrame->argumentCountIncludingThis))1439 continue;1440 } else {1441 // We are in the machine code entry (i.e. the original caller).1442 // If we have more arguments than the number of parameters to the function, it is not clear where we could put them on the stack.1443 if (argumentCountIncludingThis > m_codeBlock->numParameters())1444 return false;1445 }1446 1447 // We must add some check that the profiling information was correct and the target of this call is what we thought1448 emitFunctionChecks(callLinkStatus[0], callTargetNode, thisArgument);1449 // We flush everything, as if we were in the backedge of a loop (see treatment of op_jmp in parseBlock).1450 flushForTerminal();1451 1452 // We must set the arguments to the right values1453 int argIndex = 0;1454 for (; argIndex < argumentCountIncludingThis; ++argIndex) {1455 Node* value = get(virtualRegisterForArgument(argIndex, registerOffset));1456 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), value, NormalSet);1457 }1458 Node* undefined = addToGraph(JSConstant, OpInfo(m_constantUndefined));1459 for (; argIndex < stackEntry->m_codeBlock->numParameters(); ++argIndex)1460 setDirect(stackEntry->remapOperand(virtualRegisterForArgument(argIndex)), undefined, NormalSet);1461 1462 // We must repeat the work of op_enter here as we will jump right after it.1463 // We jump right after it and not before it, because of some invariant saying that a CFG root cannot have predecessors in the IR.1464 for (int i = 0; i < stackEntry->m_codeBlock->m_numVars; ++i)1465 setDirect(stackEntry->remapOperand(virtualRegisterForLocal(i)), undefined, NormalSet);1466 1467 // We want to emit the SetLocals with an exit origin that points to the place we are jumping to.1468 unsigned oldIndex = m_currentIndex;1469 auto oldStackTop = m_inlineStackTop;1470 m_inlineStackTop = stackEntry;1471 m_currentIndex = OPCODE_LENGTH(op_enter);1472 m_exitOK = true;1473 processSetLocalQueue();1474 m_currentIndex = oldIndex;1475 m_inlineStackTop = oldStackTop;1476 m_exitOK = false;1477 1478 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), OPCODE_LENGTH(op_enter), getBytecodeBeginForBlock);1479 RELEASE_ASSERT(entryBlockPtr);1480 addJumpTo(*entryBlockPtr);1481 return true;1482 // It would be unsound to jump over a non-tail call: the "tail" call is not really a tail call in that case.1483 } while (stackEntry->m_inlineCallFrame && stackEntry->m_inlineCallFrame->kind == InlineCallFrame::TailCall && (stackEntry = stackEntry->m_caller));1484 1485 // The tail call was not recursive1486 return false;1487 1413 } 1488 1414 … … 4289 4215 set(virtualRegisterForLocal(i), undefined, ImmediateNakedSet); 4290 4216 4217 m_inlineStackTop->m_entryBlock = m_currentBlock; 4218 4291 4219 NEXT_OPCODE(op_enter); 4292 4220 } … … 5475 5403 NEXT_OPCODE(op_tail_call); 5476 5404 else 5477 LAST_OPCODE_LINKED(op_tail_call); 5478 // We use LAST_OPCODE_LINKED instead of LAST_OPCODE because if the tail call was optimized, it may now be a jump to a bytecode index in a different InlineStackEntry. 5405 LAST_OPCODE(op_tail_call); 5479 5406 } 5480 5407 … … 6504 6431 6505 6432 m_graph.determineReachability(); 6433 if (Options::verboseDFGBytecodeParsing()) 6434 m_graph.dump(); 6506 6435 m_graph.killUnreachableBlocks(); 6507 6436
Note: See TracChangeset
for help on using the changeset viewer.