Changeset 242989 in webkit
- Timestamp:
- Mar 14, 2019 9:31:52 PM (5 years ago)
- Location:
- trunk
- Files:
-
- 2 added
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JSTests/ChangeLog
r242955 r242989 1 2019-03-14 Saam barati <sbarati@apple.com> 2 3 We can't remove code after ForceOSRExit until after FixupPhase 4 https://bugs.webkit.org/show_bug.cgi?id=186916 5 <rdar://problem/41396612> 6 7 Reviewed by Yusuke Suzuki. 8 9 * stress/movhint-backwards-propagation-must-merge-use-as-value-add.js: Added. 10 (foo): 11 * stress/movhint-backwards-propagation-must-merge-use-as-value.js: Added. 12 (foo): 13 1 14 2019-03-13 Michael Saboff <msaboff@apple.com> 2 15 -
trunk/Source/JavaScriptCore/ChangeLog
r242982 r242989 1 2019-03-14 Saam barati <sbarati@apple.com> 2 3 We can't remove code after ForceOSRExit until after FixupPhase 4 https://bugs.webkit.org/show_bug.cgi?id=186916 5 <rdar://problem/41396612> 6 7 Reviewed by Yusuke Suzuki. 8 9 There was an optimization in the bytecode parser I added in r232742 that converted blocks 10 with ForceOSRExit in them to remove all IR after the ForceOSRExit. However, 11 this is incorrect because it breaks backwards propagation. For example, it 12 could incorrectly lead us to think it's safe to not check for overflow in 13 an Add because such Add has no non-int uses. Backwards propagation relies on 14 having a view over bytecode uses, and this optimization broke that. This patch 15 rolls out that optimization, as initial perf data shows it may no longer be 16 needed. 17 18 * dfg/DFGByteCodeParser.cpp: 19 (JSC::DFG::ByteCodeParser::addToGraph): 20 (JSC::DFG::ByteCodeParser::parse): 21 1 22 2019-03-14 Saam barati <sbarati@apple.com> 2 23 -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r242715 r242989 716 716 VERBOSE_LOG(" appended ", node, " ", Graph::opName(node->op()), "\n"); 717 717 718 m_hasAnyForceOSRExits |= (node->op() == ForceOSRExit);719 720 718 m_currentBlock->append(node); 721 719 if (clobbersExitState(m_graph, node)) … … 1179 1177 const Instruction* m_currentInstruction; 1180 1178 bool m_hasDebuggerEnabled; 1181 bool m_hasAnyForceOSRExits { false };1182 1179 }; 1183 1180 … … 7288 7285 linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); 7289 7286 7290 if (m_hasAnyForceOSRExits) {7291 BlockSet blocksToIgnore;7292 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {7293 if (block->isOSRTarget && block->bytecodeBegin == m_graph.m_plan.osrEntryBytecodeIndex()) {7294 blocksToIgnore.add(block);7295 break;7296 }7297 }7298 7299 {7300 bool isSafeToValidate = false;7301 auto postOrder = m_graph.blocksInPostOrder(isSafeToValidate); // This algorithm doesn't rely on the predecessors list, which is not yet built.7302 bool changed;7303 do {7304 changed = false;7305 for (BasicBlock* block : postOrder) {7306 for (BasicBlock* successor : block->successors()) {7307 if (blocksToIgnore.contains(successor)) {7308 changed |= blocksToIgnore.add(block);7309 break;7310 }7311 }7312 }7313 } while (changed);7314 }7315 7316 InsertionSet insertionSet(m_graph);7317 Operands<VariableAccessData*> mapping(OperandsLike, m_graph.block(0)->variablesAtHead);7318 7319 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {7320 if (blocksToIgnore.contains(block))7321 continue;7322 7323 mapping.fill(nullptr);7324 if (validationEnabled()) {7325 // Verify that it's correct to fill mapping with nullptr.7326 for (unsigned i = 0; i < block->variablesAtHead.size(); ++i) {7327 Node* node = block->variablesAtHead.at(i);7328 RELEASE_ASSERT(!node);7329 }7330 }7331 7332 for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) {7333 Node* node = block->at(nodeIndex);7334 7335 if (node->hasVariableAccessData(m_graph))7336 mapping.operand(node->local()) = node->variableAccessData();7337 7338 if (node->op() == ForceOSRExit) {7339 NodeOrigin endOrigin = node->origin.withExitOK(true);7340 7341 if (validationEnabled()) {7342 // This verifies that we don't need to change any of the successors's predecessor7343 // list after planting the Unreachable below. At this point in the bytecode7344 // parser, we haven't linked up the predecessor lists yet.7345 for (BasicBlock* successor : block->successors())7346 RELEASE_ASSERT(successor->predecessors.isEmpty());7347 }7348 7349 block->resize(nodeIndex + 1);7350 7351 insertionSet.insertNode(block->size(), SpecNone, ExitOK, endOrigin);7352 7353 auto insertLivenessPreservingOp = [&] (InlineCallFrame* inlineCallFrame, NodeType op, VirtualRegister operand) {7354 VariableAccessData* variable = mapping.operand(operand);7355 if (!variable) {7356 variable = newVariableAccessData(operand);7357 mapping.operand(operand) = variable;7358 }7359 7360 VirtualRegister argument = operand - (inlineCallFrame ? inlineCallFrame->stackOffset : 0);7361 if (argument.isArgument() && !argument.isHeader()) {7362 const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame);7363 arguments[argument.toArgument()]->addVariable(variable);7364 } insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable));7365 };7366 auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {7367 insertLivenessPreservingOp(inlineCallFrame, Flush, operand);7368 };7369 auto addPhantomLocalDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) {7370 insertLivenessPreservingOp(inlineCallFrame, PhantomLocal, operand);7371 };7372 flushForTerminalImpl(endOrigin.semantic, addFlushDirect, addPhantomLocalDirect);7373 7374 insertionSet.insertNode(block->size(), SpecNone, Unreachable, endOrigin);7375 insertionSet.execute(block);7376 break;7377 }7378 }7379 }7380 } else if (validationEnabled()) {7381 // Ensure our bookkeeping for ForceOSRExit nodes is working.7382 for (BasicBlock* block : m_graph.blocksInNaturalOrder()) {7383 for (Node* node : *block)7384 RELEASE_ASSERT(node->op() != ForceOSRExit);7385 }7386 }7387 7388 7287 m_graph.determineReachability(); 7389 7288 m_graph.killUnreachableBlocks();
Note: See TracChangeset
for help on using the changeset viewer.