Changeset 94996 in webkit
- Timestamp:
- Sep 12, 2011 6:33:43 PM (13 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 1 added
- 20 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r94992 r94996 1 2011-09-09 Filip Pizlo <fpizlo@apple.com> 2 3 JavaScriptCore does not have speculative->baseline OSR 4 https://bugs.webkit.org/show_bug.cgi?id=67826 5 6 Reviewed by Oliver Hunt. 7 8 This adds the ability to bail out of DFG speculative JIT execution by 9 performing an on-stack replacement (OSR) that results in the control 10 flow going to the equivalent code generated by the old JIT. 11 12 This required a number of new features, as well as taking advantage of 13 some features that happened to already be present: 14 15 We already had a policy of storing the bytecode index for which a DFG 16 node was generated inside the DFG::Node class. This was previously 17 called exceptionInfo. It's now renamed to codeOrigin to reflect that 18 it's used for more than just excpetions. OSR uses this to figure out 19 which bytecode index to use to look up the machine code location in 20 the code generated by the old JIT that we should be jumping to. 21 22 CodeBlock now stores a mapping between bytecode indices and machine 23 code offsets for code generated by the old JIT. This is implemented 24 by CompactJITCodeMap, which tries to compress this data a bit. The 25 OSR compiler decodes this and uses it to find the machine code 26 locations it should be jumping to. 27 28 We already had a mechanism that emitted SetLocal nodes in the DFG graph 29 that told us the time at which the old JIT would have stored something 30 into its register file, and the DFG::Node that corresponds to the value 31 that it would have stored. These SetLocal's were mostly dead-code- 32 eliminated, but our DCE leaves the nodes intact except for making them 33 have 0 as the ref count. This allows the OSR compiler to construct a 34 mapping between the state as it would have been seen by the old JIT 35 and the state as the DFG JIT sees it. The OSR compiler uses this to 36 generate code that reshapes the call frame so that it is like what the 37 old JIT would expect. 38 39 Finally, when DFG_OSR is enabled (the default for TIERED_COMPILATION) 40 we no longer emit the non-speculative path. 41 42 * JavaScriptCore.xcodeproj/project.pbxproj: 43 * bytecode/CodeBlock.h: 44 * dfg/DFGByteCodeParser.cpp: 45 (JSC::DFG::ByteCodeParser::currentCodeOrigin): 46 (JSC::DFG::ByteCodeParser::addToGraph): 47 * dfg/DFGGPRInfo.h: 48 * dfg/DFGGenerationInfo.h: 49 (JSC::DFG::GenerationInfo::alive): 50 * dfg/DFGGraph.cpp: 51 (JSC::DFG::Graph::dump): 52 * dfg/DFGJITCodeGenerator.cpp: 53 (JSC::DFG::JITCodeGenerator::emitCall): 54 * dfg/DFGJITCodeGenerator.h: 55 (JSC::DFG::JITCodeGenerator::appendCallWithExceptionCheck): 56 * dfg/DFGJITCompiler.cpp: 57 (JSC::DFG::JITCompiler::exitSpeculativeWithOSR): 58 (JSC::DFG::JITCompiler::linkOSRExits): 59 (JSC::DFG::JITCompiler::compileBody): 60 (JSC::DFG::JITCompiler::link): 61 * dfg/DFGJITCompiler.h: 62 (JSC::DFG::CallRecord::CallRecord): 63 (JSC::DFG::JITCompiler::notifyCall): 64 (JSC::DFG::JITCompiler::appendCallWithExceptionCheck): 65 (JSC::DFG::JITCompiler::appendCallWithFastExceptionCheck): 66 (JSC::DFG::JITCompiler::addJSCall): 67 (JSC::DFG::JITCompiler::JSCallRecord::JSCallRecord): 68 * dfg/DFGNode.h: 69 (JSC::DFG::CodeOrigin::CodeOrigin): 70 (JSC::DFG::CodeOrigin::isSet): 71 (JSC::DFG::CodeOrigin::bytecodeIndex): 72 (JSC::DFG::Node::Node): 73 (JSC::DFG::Node::child1Unchecked): 74 * dfg/DFGNonSpeculativeJIT.cpp: 75 (JSC::DFG::NonSpeculativeJIT::compile): 76 * dfg/DFGSpeculativeJIT.cpp: 77 (JSC::DFG::ValueSource::dump): 78 (JSC::DFG::ValueRecovery::dump): 79 (JSC::DFG::OSRExit::OSRExit): 80 (JSC::DFG::SpeculativeJIT::compile): 81 (JSC::DFG::SpeculativeJIT::compileMovHint): 82 (JSC::DFG::SpeculativeJIT::computeValueRecoveryFor): 83 * dfg/DFGSpeculativeJIT.h: 84 (JSC::DFG::ValueSource::ValueSource): 85 (JSC::DFG::ValueSource::isSet): 86 (JSC::DFG::ValueSource::nodeIndex): 87 (JSC::DFG::ValueRecovery::ValueRecovery): 88 (JSC::DFG::ValueRecovery::alreadyInRegisterFile): 89 (JSC::DFG::ValueRecovery::inGPR): 90 (JSC::DFG::ValueRecovery::inFPR): 91 (JSC::DFG::ValueRecovery::displacedInRegisterFile): 92 (JSC::DFG::ValueRecovery::constant): 93 (JSC::DFG::ValueRecovery::technique): 94 (JSC::DFG::ValueRecovery::gpr): 95 (JSC::DFG::ValueRecovery::fpr): 96 (JSC::DFG::ValueRecovery::virtualRegister): 97 (JSC::DFG::OSRExit::numberOfRecoveries): 98 (JSC::DFG::OSRExit::valueRecovery): 99 (JSC::DFG::OSRExit::isArgument): 100 (JSC::DFG::OSRExit::argumentForIndex): 101 (JSC::DFG::OSRExit::variableForIndex): 102 (JSC::DFG::OSRExit::operandForIndex): 103 (JSC::DFG::SpeculativeJIT::osrExits): 104 (JSC::DFG::SpeculativeJIT::speculationCheck): 105 (JSC::DFG::SpeculativeJIT::valueSourceForOperand): 106 (JSC::DFG::SpeculativeJIT::setNodeIndexForOperand): 107 (JSC::DFG::SpeculativeJIT::valueSourceReferenceForOperand): 108 (JSC::DFG::SpeculativeJIT::computeValueRecoveryFor): 109 (JSC::DFG::SpeculationCheckIndexIterator::SpeculationCheckIndexIterator): 110 (JSC::DFG::SpeculativeJIT::SpeculativeJIT): 111 * jit/CompactJITCodeMap.h: Added. 112 (JSC::BytecodeAndMachineOffset::BytecodeAndMachineOffset): 113 (JSC::BytecodeAndMachineOffset::getBytecodeIndex): 114 (JSC::BytecodeAndMachineOffset::getMachineCodeOffset): 115 (JSC::CompactJITCodeMap::~CompactJITCodeMap): 116 (JSC::CompactJITCodeMap::decode): 117 (JSC::CompactJITCodeMap::CompactJITCodeMap): 118 (JSC::CompactJITCodeMap::at): 119 (JSC::CompactJITCodeMap::decodeNumber): 120 (JSC::CompactJITCodeMap::Encoder::Encoder): 121 (JSC::CompactJITCodeMap::Encoder::~Encoder): 122 (JSC::CompactJITCodeMap::Encoder::append): 123 (JSC::CompactJITCodeMap::Encoder::finish): 124 (JSC::CompactJITCodeMap::Encoder::appendByte): 125 (JSC::CompactJITCodeMap::Encoder::encodeNumber): 126 (JSC::CompactJITCodeMap::Encoder::ensureCapacityFor): 127 * jit/JIT.cpp: 128 (JSC::JIT::privateCompileMainPass): 129 (JSC::JIT::privateCompile): 130 * jit/JIT.h: 131 * runtime/JSGlobalData.cpp: 132 (JSC::JSGlobalData::JSGlobalData): 133 (JSC::JSGlobalData::~JSGlobalData): 134 * runtime/JSGlobalData.h: 135 (JSC::JSGlobalData::osrScratchBufferForSize): 136 * runtime/JSValue.cpp: 137 (JSC::JSValue::description): 138 1 139 2011-09-12 Geoffrey Garen <ggaren@apple.com> 2 140 -
trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
r94920 r94996 66 66 0FD3C82814115D4F00FD81CB /* DFGDriver.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD3C82214115D0E00FD81CB /* DFGDriver.h */; }; 67 67 0FD82E2114172CE300179C94 /* DFGCapabilities.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD82E1E14172C2F00179C94 /* DFGCapabilities.cpp */; }; 68 0FD82E39141AB14D00179C94 /* CompactJITCodeMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82E37141AB14200179C94 /* CompactJITCodeMap.h */; settings = {ATTRIBUTES = (Private, ); }; }; 68 69 1400067712A6F7830064D123 /* OSAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1400067612A6F7830064D123 /* OSAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; }; 69 70 1400069312A6F9E10064D123 /* OSAllocatorPosix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1400069212A6F9E10064D123 /* OSAllocatorPosix.cpp */; }; … … 798 799 0FD82E1E14172C2F00179C94 /* DFGCapabilities.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCapabilities.cpp; path = dfg/DFGCapabilities.cpp; sourceTree = "<group>"; }; 799 800 0FD82E1F14172C2F00179C94 /* DFGCapabilities.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCapabilities.h; path = dfg/DFGCapabilities.h; sourceTree = "<group>"; }; 801 0FD82E37141AB14200179C94 /* CompactJITCodeMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CompactJITCodeMap.h; sourceTree = "<group>"; }; 800 802 1400067612A6F7830064D123 /* OSAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OSAllocator.h; sourceTree = "<group>"; }; 801 803 1400069212A6F9E10064D123 /* OSAllocatorPosix.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OSAllocatorPosix.cpp; sourceTree = "<group>"; }; … … 1577 1579 isa = PBXGroup; 1578 1580 children = ( 1581 0FD82E37141AB14200179C94 /* CompactJITCodeMap.h */, 1579 1582 A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */, 1580 1583 A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */, … … 2395 2398 86D3B2C410156BDE002865E7 /* ARMAssembler.h in Headers */, 2396 2399 86ADD1450FDDEA980006EEC2 /* ARMv7Assembler.h in Headers */, 2400 0FD82E39141AB14D00179C94 /* CompactJITCodeMap.h in Headers */, 2397 2401 BC18C3E60E16F5CD00B34460 /* ArrayConstructor.h in Headers */, 2398 2402 BC18C46E0E16F5CD00B34460 /* TCSpinLock.h in Headers */, -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.h
r94920 r94996 31 31 #define CodeBlock_h 32 32 33 #include "CompactJITCodeMap.h" 33 34 #include "EvalCodeCache.h" 34 35 #include "Instruction.h" … … 299 300 #endif 300 301 302 #if ENABLE(TIERED_COMPILATION) 303 void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap) 304 { 305 m_jitCodeMap = jitCodeMap; 306 } 307 CompactJITCodeMap* jitCodeMap() 308 { 309 return m_jitCodeMap.get(); 310 } 311 #endif 312 301 313 #if ENABLE(INTERPRETER) 302 314 unsigned bytecodeOffset(Instruction* returnAddress) … … 652 664 SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls; 653 665 #endif 666 #if ENABLE(TIERED_COMPILATION) 667 OwnPtr<CompactJITCodeMap> m_jitCodeMap; 668 #endif 654 669 #if ENABLE(VALUE_PROFILER) 655 670 SegmentedVector<ValueProfile, 8> m_valueProfiles; … … 667 682 SymbolTable* m_symbolTable; 668 683 669 OwnPtr<CodeBlock> m_alternative; // FIXME make this do something684 OwnPtr<CodeBlock> m_alternative; 670 685 671 686 struct RareData { -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r94942 r94996 368 368 return getJSConstant(m_constant1); 369 369 } 370 370 371 CodeOrigin currentCodeOrigin() 372 { 373 return CodeOrigin(m_currentIndex); 374 } 371 375 372 376 // These methods create a node and add it to the graph. If nodes of this type are … … 375 379 { 376 380 NodeIndex resultIndex = (NodeIndex)m_graph.size(); 377 m_graph.append(Node(op, m_currentIndex, child1, child2, child3));381 m_graph.append(Node(op, currentCodeOrigin(), child1, child2, child3)); 378 382 379 383 if (op & NodeMustGenerate) … … 384 388 { 385 389 NodeIndex resultIndex = (NodeIndex)m_graph.size(); 386 m_graph.append(Node(op, m_currentIndex, info, child1, child2, child3));390 m_graph.append(Node(op, currentCodeOrigin(), info, child1, child2, child3)); 387 391 388 392 if (op & NodeMustGenerate) … … 393 397 { 394 398 NodeIndex resultIndex = (NodeIndex)m_graph.size(); 395 m_graph.append(Node(op, m_currentIndex, info1, info2, child1, child2, child3));399 m_graph.append(Node(op, currentCodeOrigin(), info1, info2, child1, child2, child3)); 396 400 397 401 if (op & NodeMustGenerate) … … 403 407 { 404 408 NodeIndex resultIndex = (NodeIndex)m_graph.size(); 405 m_graph.append(Node(Node::VarArg, op, m_currentIndex, info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs));409 m_graph.append(Node(Node::VarArg, op, currentCodeOrigin(), info1, info2, m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs)); 406 410 407 411 m_numPassedVarArgs = 0; -
trunk/Source/JavaScriptCore/dfg/DFGGPRInfo.h
r85271 r94996 43 43 44 44 // These registers match the old JIT. 45 static const GPRReg cachedResultRegister = X86Registers::eax; 45 46 static const GPRReg timeoutCheckRegister = X86Registers::r12; 46 47 static const GPRReg callFrameRegister = X86Registers::r13; -
trunk/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
r94629 r94996 275 275 } 276 276 277 #ifndef NDEBUG278 277 bool alive() 279 278 { 280 279 return m_useCount; 281 280 } 282 #endif283 281 284 282 private: -
trunk/Source/JavaScriptCore/dfg/DFGGraph.cpp
r94942 r94996 53 53 54 54 unsigned refCount = node.refCount(); 55 if (!refCount) { 56 printf("% 4d:\tskipped %s\n", (int)nodeIndex, opName(op)); 57 return; 58 } 55 bool skipped = !refCount; 59 56 bool mustGenerate = node.mustGenerate(); 60 if (mustGenerate) 57 if (mustGenerate) { 58 ASSERT(refCount); 61 59 --refCount; 60 } 62 61 63 62 // Example/explanation of dataflow dump output … … 78 77 // id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }. 79 78 // var# - the index of a var on the global object, used by GetGlobalVar/PutGlobalVar operations. 80 printf("% 4d: \t<%c%u:", (int)nodeIndex, mustGenerate ? '!' : ' ', refCount);81 if (node.hasResult() )79 printf("% 4d:%s<%c%u:", (int)nodeIndex, skipped ? " skipped " : " ", mustGenerate ? '!' : ' ', refCount); 80 if (node.hasResult() && !skipped) 82 81 printf("%u", node.virtualRegister()); 83 82 else -
trunk/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.cpp
r94942 r94996 1127 1127 1128 1128 JITCompiler::Call fastCall = m_jit.nearCall(); 1129 m_jit.notifyCall(fastCall, m_jit.graph()[m_compileIndex]. exceptionInfo);1129 m_jit.notifyCall(fastCall, m_jit.graph()[m_compileIndex].codeOrigin); 1130 1130 1131 1131 JITCompiler::Jump done = m_jit.jump(); … … 1134 1134 1135 1135 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1136 JITCompiler::Call slowCall = m_jit.appendCallWithFastExceptionCheck(slowCallFunction, m_jit.graph()[m_compileIndex]. exceptionInfo);1136 JITCompiler::Call slowCall = m_jit.appendCallWithFastExceptionCheck(slowCallFunction, m_jit.graph()[m_compileIndex].codeOrigin); 1137 1137 m_jit.move(Imm32(numArgs), GPRInfo::regT1); 1138 1138 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); 1139 m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), m_jit.graph()[m_compileIndex]. exceptionInfo);1139 m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), m_jit.graph()[m_compileIndex].codeOrigin); 1140 1140 1141 1141 done.link(&m_jit); … … 1145 1145 jsValueResult(resultGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly); 1146 1146 1147 m_jit.addJSCall(fastCall, slowCall, targetToCheck, isCall, m_jit.graph()[m_compileIndex]. exceptionInfo);1147 m_jit.addJSCall(fastCall, slowCall, targetToCheck, isCall, m_jit.graph()[m_compileIndex].codeOrigin); 1148 1148 } 1149 1149 -
trunk/Source/JavaScriptCore/dfg/DFGJITCodeGenerator.h
r94942 r94996 33 33 #include <dfg/DFGGraph.h> 34 34 #include <dfg/DFGJITCompiler.h> 35 #include <dfg/DFGNode.h> 35 36 #include <dfg/DFGOperations.h> 36 37 #include <dfg/DFGRegisterBank.h> … … 908 909 JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function) 909 910 { 910 return m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex]. exceptionInfo);911 return m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex].codeOrigin); 911 912 } 912 913 -
trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
r94942 r94996 101 101 } 102 102 103 #if ENABLE(DFG_OSR_EXIT) 104 void JITCompiler::exitSpeculativeWithOSR(const OSRExit& exit, SpeculationRecovery* recovery, Vector<BytecodeAndMachineOffset>& decodedCodeMap) 105 { 106 // 1) Pro-forma stuff. 107 exit.m_check.link(this); 108 109 #if ENABLE(DFG_DEBUG_VERBOSE) 110 fprintf(stderr, "OSR exit for Node @%d (bc#%u) at JIT offset 0x%x ", (int)exit.m_nodeIndex, exit.m_bytecodeIndex, debugOffset()); 111 exit.dump(stderr); 112 #endif 113 #if ENABLE(DFG_JIT_BREAK_ON_SPECULATION_FAILURE) 114 breakpoint(); 115 #endif 116 117 #if ENABLE(DFG_VERBOSE_SPECULATION_FAILURE) 118 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; 119 debugInfo->codeBlock = m_codeBlock; 120 debugInfo->debugOffset = debugOffset(); 121 122 debugCall(debugOperationPrintSpeculationFailure, debugInfo); 123 #endif 124 125 // 2) Perform speculation recovery. This only comes into play when an operation 126 // starts mutating state before verifying the speculation it has already made. 127 128 GPRReg alreadyBoxed = InvalidGPRReg; 129 130 if (recovery) { 131 switch (recovery->type()) { 132 case SpeculativeAdd: 133 sub32(recovery->src(), recovery->dest()); 134 orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest()); 135 alreadyBoxed = recovery->dest(); 136 break; 137 138 case BooleanSpeculationCheck: 139 xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest()); 140 break; 141 142 default: 143 break; 144 } 145 } 146 147 // 3) Figure out how many scratch slots we'll need. We need one for every GPR/FPR 148 // whose destination is now occupied by a DFG virtual register, and we need 149 // one for every displaced virtual register if there are more than 150 // GPRInfo::numberOfRegisters of them. Also see if there are any constants, 151 // any undefined slots, any FPR slots, and any unboxed ints. 152 153 Vector<bool> poisonedVirtualRegisters(exit.m_variables.size()); 154 for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i) 155 poisonedVirtualRegisters[i] = false; 156 157 unsigned numberOfPoisonedVirtualRegisters = 0; 158 unsigned numberOfDisplacedVirtualRegisters = 0; 159 160 // Booleans for fast checks. We expect that most OSR exits do not have to rebox 161 // Int32s, have no FPRs, and have no constants. If there are constants, we 162 // expect most of them to be jsUndefined(); if that's true then we handle that 163 // specially to minimize code size and execution time. 164 bool haveUnboxedInt32s = false; 165 bool haveFPRs = false; 166 bool haveConstants = false; 167 bool haveUndefined = false; 168 169 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 170 const ValueRecovery& recovery = exit.valueRecovery(index); 171 switch (recovery.technique()) { 172 case DisplacedInRegisterFile: 173 numberOfDisplacedVirtualRegisters++; 174 ASSERT((int)recovery.virtualRegister() >= 0); 175 176 // See if we might like to store to this virtual register before doing 177 // virtual register shuffling. If so, we say that the virtual register 178 // is poisoned: it cannot be stored to until after displaced virtual 179 // registers are handled. We track poisoned virtual register carefully 180 // to ensure this happens efficiently. Note that we expect this case 181 // to be rare, so the handling of it is optimized for the cases in 182 // which it does not happen. 183 if (recovery.virtualRegister() < (int)exit.m_variables.size()) { 184 switch (exit.m_variables[recovery.virtualRegister()].technique()) { 185 case InGPR: 186 case UnboxedInt32InGPR: 187 case InFPR: 188 if (!poisonedVirtualRegisters[recovery.virtualRegister()]) { 189 poisonedVirtualRegisters[recovery.virtualRegister()] = true; 190 numberOfPoisonedVirtualRegisters++; 191 } 192 break; 193 default: 194 break; 195 } 196 } 197 break; 198 199 case UnboxedInt32InGPR: 200 haveUnboxedInt32s = true; 201 break; 202 203 case InFPR: 204 haveFPRs = true; 205 break; 206 207 case Constant: 208 haveConstants = true; 209 if (recovery.constant().isUndefined()) 210 haveUndefined = true; 211 break; 212 213 default: 214 break; 215 } 216 } 217 218 EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(globalData()->osrScratchBufferForSize(sizeof(EncodedJSValue) * (numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters)))); 219 220 // From here on, the code assumes that it is profitable to maximize the distance 221 // between when something is computed and when it is stored. 222 223 // 4) Perform all reboxing of integers. 224 225 if (haveUnboxedInt32s) { 226 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 227 const ValueRecovery& recovery = exit.valueRecovery(index); 228 if (recovery.technique() == UnboxedInt32InGPR && recovery.gpr() != alreadyBoxed) 229 orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr()); 230 } 231 } 232 233 // 5) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage. 234 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that 235 // most OSR failure points will have at least one GPR that needs to be dumped. 236 237 unsigned scratchIndex = 0; 238 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 239 const ValueRecovery& recovery = exit.valueRecovery(index); 240 int operand = exit.operandForIndex(index); 241 switch (recovery.technique()) { 242 case InGPR: 243 case UnboxedInt32InGPR: 244 if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) 245 storePtr(recovery.gpr(), scratchBuffer + scratchIndex++); 246 else 247 storePtr(recovery.gpr(), addressFor((VirtualRegister)operand)); 248 break; 249 default: 250 break; 251 } 252 } 253 254 // At this point all GPRs are available for scratch use. 255 256 if (haveFPRs) { 257 // 6) Box all doubles (relies on there being more GPRs than FPRs) 258 259 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 260 const ValueRecovery& recovery = exit.valueRecovery(index); 261 if (recovery.technique() != InFPR) 262 continue; 263 FPRReg fpr = recovery.fpr(); 264 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr)); 265 boxDouble(fpr, gpr); 266 } 267 268 // 7) Dump all doubles into the register file, or to the scratch storage if 269 // the destination virtual register is poisoned. 270 271 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 272 const ValueRecovery& recovery = exit.valueRecovery(index); 273 if (recovery.technique() != InFPR) 274 continue; 275 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr())); 276 if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) 277 storePtr(gpr, scratchBuffer + scratchIndex++); 278 else 279 storePtr(gpr, addressFor((VirtualRegister)exit.operandForIndex(index))); 280 } 281 } 282 283 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters); 284 285 // 8) Reshuffle displaced virtual registers. Optimize for the case that 286 // the number of displaced virtual registers is not more than the number 287 // of available physical registers. 288 289 if (numberOfDisplacedVirtualRegisters) { 290 if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) { 291 // So far this appears to be the case that triggers all the time, but 292 // that is far from guaranteed. 293 294 unsigned displacementIndex = 0; 295 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 296 const ValueRecovery& recovery = exit.valueRecovery(index); 297 if (recovery.technique() != DisplacedInRegisterFile) 298 continue; 299 loadPtr(addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); 300 } 301 302 displacementIndex = 0; 303 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 304 const ValueRecovery& recovery = exit.valueRecovery(index); 305 if (recovery.technique() != DisplacedInRegisterFile) 306 continue; 307 storePtr(GPRInfo::toRegister(displacementIndex++), addressFor((VirtualRegister)exit.operandForIndex(index))); 308 } 309 } else { 310 // FIXME: This should use the shuffling algorithm that we use 311 // for speculative->non-speculative jumps, if we ever discover that 312 // some hot code with lots of live values that get displaced and 313 // spilled really enjoys frequently failing speculation. 314 315 // For now this code is engineered to be correct but probably not 316 // super. In particular, it correctly handles cases where for example 317 // the displacements are a permutation of the destination values, like 318 // 319 // 1 -> 2 320 // 2 -> 1 321 // 322 // It accomplishes this by simply lifting all of the virtual registers 323 // from their old (DFG JIT) locations and dropping them in a scratch 324 // location in memory, and then transferring from that scratch location 325 // to their new (old JIT) locations. 326 327 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 328 const ValueRecovery& recovery = exit.valueRecovery(index); 329 if (recovery.technique() != DisplacedInRegisterFile) 330 continue; 331 loadPtr(addressFor(recovery.virtualRegister()), GPRInfo::regT0); 332 storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++); 333 } 334 335 scratchIndex = numberOfPoisonedVirtualRegisters; 336 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 337 const ValueRecovery& recovery = exit.valueRecovery(index); 338 if (recovery.technique() != DisplacedInRegisterFile) 339 continue; 340 loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0); 341 storePtr(GPRInfo::regT0, addressFor((VirtualRegister)exit.operandForIndex(index))); 342 } 343 344 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters); 345 } 346 } 347 348 // 9) Dump all poisoned virtual registers. 349 350 scratchIndex = 0; 351 if (numberOfPoisonedVirtualRegisters) { 352 for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) { 353 if (!poisonedVirtualRegisters[virtualRegister]) 354 continue; 355 356 const ValueRecovery& recovery = exit.m_variables[virtualRegister]; 357 switch (recovery.technique()) { 358 case InGPR: 359 case UnboxedInt32InGPR: 360 case InFPR: 361 loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0); 362 storePtr(GPRInfo::regT0, addressFor((VirtualRegister)virtualRegister)); 363 break; 364 365 default: 366 break; 367 } 368 } 369 } 370 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters); 371 372 // 10) Dump all constants. Optimize for Undefined, since that's a constant we see 373 // often. 374 375 if (haveConstants) { 376 if (haveUndefined) 377 move(TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0); 378 379 for (int index = 0; index < exit.numberOfRecoveries(); ++index) { 380 const ValueRecovery& recovery = exit.valueRecovery(index); 381 if (recovery.technique() != Constant) 382 continue; 383 if (recovery.constant().isUndefined()) 384 storePtr(GPRInfo::regT0, addressFor((VirtualRegister)exit.operandForIndex(index))); 385 else 386 storePtr(TrustedImmPtr(JSValue::encode(recovery.constant())), addressFor((VirtualRegister)exit.operandForIndex(index))); 387 } 388 } 389 390 // 11) Load the result of the last bytecode operation into regT0. 391 392 if (exit.m_lastSetOperand != std::numeric_limits<int>::max()) 393 loadPtr(addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister); 394 395 // 12) Fix call frame. 396 397 ASSERT(codeBlock()->alternative()->getJITType() == JITCode::BaselineJIT); 398 storePtr(TrustedImmPtr(codeBlock()->alternative()), addressFor((VirtualRegister)RegisterFile::CodeBlock)); 399 400 // 13) Jump into the corresponding baseline JIT code. 401 402 BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_bytecodeIndex); 403 404 ASSERT(mapping); 405 ASSERT(mapping->m_bytecodeIndex == exit.m_bytecodeIndex); 406 407 void* jumpTarget = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(codeBlock()->alternative()->getJITCode().start()) + mapping->m_machineCodeOffset); 408 409 ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister); 410 411 move(TrustedImmPtr(jumpTarget), GPRInfo::regT1); 412 jump(GPRInfo::regT1); 413 414 #if ENABLE(DFG_DEBUG_VERBOSE) 415 fprintf(stderr, " -> %p\n", jumpTarget); 416 #endif 417 } 418 419 void JITCompiler::linkOSRExits(SpeculativeJIT& speculative) 420 { 421 Vector<BytecodeAndMachineOffset> decodedCodeMap; 422 ASSERT(codeBlock()->alternative()); 423 ASSERT(codeBlock()->alternative()->getJITType() == JITCode::BaselineJIT); 424 ASSERT(codeBlock()->alternative()->jitCodeMap()); 425 codeBlock()->alternative()->jitCodeMap()->decode(decodedCodeMap); 426 427 OSRExitVector::Iterator exitsIter = speculative.osrExits().begin(); 428 OSRExitVector::Iterator exitsEnd = speculative.osrExits().end(); 429 430 while (exitsIter != exitsEnd) { 431 const OSRExit& exit = *exitsIter; 432 exitSpeculativeWithOSR(exit, speculative.speculationRecovery(exit.m_recoveryIndex), decodedCodeMap); 433 ++exitsIter; 434 } 435 } 436 #else // ENABLE(DFG_OSR_EXIT) 103 437 class GeneralizedRegister { 104 438 public: … … 804 1138 ASSERT(!(entriesIter != entriesEnd)); 805 1139 } 1140 #endif // ENABLE(DFG_OSR_EXIT) 806 1141 807 1142 void JITCompiler::compileEntry() … … 845 1180 // non-speculative path. 846 1181 if (compiledSpeculative) { 1182 #if ENABLE(DFG_OSR_EXIT) 1183 linkOSRExits(speculative); 1184 #else 847 1185 SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks()); 848 1186 NonSpeculativeJIT nonSpeculative(*this); … … 851 1189 // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one. 852 1190 linkSpeculationChecks(speculative, nonSpeculative); 1191 #endif 853 1192 } else { 854 1193 // If compilation through the SpeculativeJIT failed, throw away the code we generated. … … 859 1198 rewindToLabel(speculativePathBegin); 860 1199 1200 #if ENABLE(DFG_OSR_EXIT) 1201 SpeculationCheckIndexIterator checkIterator; 1202 #else 861 1203 SpeculationCheckVector noChecks; 862 1204 SpeculationCheckIndexIterator checkIterator(noChecks); 1205 #endif 863 1206 NonSpeculativeJIT nonSpeculative(*this); 864 1207 nonSpeculative.compile(checkIterator); … … 908 1251 if (m_calls[i].m_handlesExceptions) { 909 1252 unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call); 910 unsigned exceptionInfo = m_calls[i].m_ exceptionInfo;1253 unsigned exceptionInfo = m_calls[i].m_codeOrigin.bytecodeIndex(); 911 1254 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); 912 1255 } -
trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.h
r94942 r94996 54 54 struct EntryLocation; 55 55 struct SpeculationCheck; 56 struct OSRExit; 56 57 57 58 #ifndef NDEBUG … … 83 84 84 85 // Constructor for a call with an exception handler. 85 CallRecord(MacroAssembler::Call call, FunctionPtr function, MacroAssembler::Jump exceptionCheck, ExceptionInfo exceptionInfo)86 CallRecord(MacroAssembler::Call call, FunctionPtr function, MacroAssembler::Jump exceptionCheck, CodeOrigin codeOrigin) 86 87 : m_call(call) 87 88 , m_function(function) 88 89 , m_exceptionCheck(exceptionCheck) 89 , m_ exceptionInfo(exceptionInfo)90 , m_codeOrigin(codeOrigin) 90 91 , m_handlesExceptions(true) 91 92 { … … 94 95 // Constructor for a call that may cause exceptions, but which are handled 95 96 // through some mechanism other than the in-line exception handler. 96 CallRecord(MacroAssembler::Call call, FunctionPtr function, ExceptionInfo exceptionInfo)97 CallRecord(MacroAssembler::Call call, FunctionPtr function, CodeOrigin codeOrigin) 97 98 : m_call(call) 98 99 , m_function(function) 99 , m_ exceptionInfo(exceptionInfo)100 , m_codeOrigin(codeOrigin) 100 101 , m_handlesExceptions(true) 101 102 { … … 105 106 FunctionPtr m_function; 106 107 MacroAssembler::Jump m_exceptionCheck; 107 ExceptionInfo m_exceptionInfo;108 CodeOrigin m_codeOrigin; 108 109 bool m_handlesExceptions; 109 110 }; … … 193 194 194 195 // Notify the JIT of a call that does not require linking. 195 void notifyCall(Call call, unsigned exceptionInfo)196 { 197 m_calls.append(CallRecord(call, FunctionPtr(), exceptionInfo));196 void notifyCall(Call call, CodeOrigin codeOrigin) 197 { 198 m_calls.append(CallRecord(call, FunctionPtr(), codeOrigin)); 198 199 } 199 200 … … 206 207 207 208 // Add a call out from JIT code, with an exception check. 208 Call appendCallWithExceptionCheck(const FunctionPtr& function, unsigned exceptionInfo)209 Call appendCallWithExceptionCheck(const FunctionPtr& function, CodeOrigin codeOrigin) 209 210 { 210 211 Call functionCall = call(); 211 212 Jump exceptionCheck = branchTestPtr(NonZero, AbsoluteAddress(&globalData()->exception)); 212 m_calls.append(CallRecord(functionCall, function, exceptionCheck, exceptionInfo));213 m_calls.append(CallRecord(functionCall, function, exceptionCheck, codeOrigin)); 213 214 return functionCall; 214 215 } 215 216 216 217 // Add a call out from JIT code, with a fast exception check that tests if the return value is zero. 217 Call appendCallWithFastExceptionCheck(const FunctionPtr& function, unsigned exceptionInfo)218 Call appendCallWithFastExceptionCheck(const FunctionPtr& function, CodeOrigin codeOrigin) 218 219 { 219 220 Call functionCall = call(); 220 221 Jump exceptionCheck = branchTestPtr(Zero, GPRInfo::returnValueGPR); 221 m_calls.append(CallRecord(functionCall, function, exceptionCheck, exceptionInfo));222 m_calls.append(CallRecord(functionCall, function, exceptionCheck, codeOrigin)); 222 223 return functionCall; 223 224 } … … 312 313 } 313 314 314 void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, bool isCall, unsigned exceptionInfo)315 { 316 m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, isCall, exceptionInfo));315 void addJSCall(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, bool isCall, CodeOrigin codeOrigin) 316 { 317 m_jsCalls.append(JSCallRecord(fastCall, slowCall, targetToCheck, isCall, codeOrigin)); 317 318 } 318 319 … … 327 328 void fillInt32ToInteger(NodeIndex, GPRReg); 328 329 void fillToJS(NodeIndex, GPRReg); 330 331 #if ENABLE(DFG_OSR_EXIT) 332 void exitSpeculativeWithOSR(const OSRExit&, SpeculationRecovery*, Vector<BytecodeAndMachineOffset>& decodedCodeMap); 333 void linkOSRExits(SpeculativeJIT&); 334 #else 329 335 void jumpFromSpeculativeToNonSpeculative(const SpeculationCheck&, const EntryLocation&, SpeculationRecovery*, NodeToRegisterMap& checkNodeToRegisterMap, NodeToRegisterMap& entryNodeToRegisterMap); 330 336 void linkSpeculationChecks(SpeculativeJIT&, NonSpeculativeJIT&); 337 #endif 331 338 332 339 // The globalData, used to access constants such as the vPtrs. … … 387 394 388 395 struct JSCallRecord { 389 JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, bool isCall, unsigned exceptionInfo)396 JSCallRecord(Call fastCall, Call slowCall, DataLabelPtr targetToCheck, bool isCall, CodeOrigin codeOrigin) 390 397 : m_fastCall(fastCall) 391 398 , m_slowCall(slowCall) 392 399 , m_targetToCheck(targetToCheck) 393 400 , m_isCall(isCall) 394 , m_ exceptionInfo(exceptionInfo)401 , m_codeOrigin(codeOrigin) 395 402 { 396 403 } … … 400 407 DataLabelPtr m_targetToCheck; 401 408 bool m_isCall; 402 unsigned m_exceptionInfo;409 CodeOrigin m_codeOrigin; 403 410 }; 404 411 -
trunk/Source/JavaScriptCore/dfg/DFGNode.h
r94942 r94996 27 27 #define DFGNode_h 28 28 29 #include <wtf/Platform.h> 30 29 31 // Emit various logging information for debugging, including dumping the dataflow graphs. 30 32 #define ENABLE_DFG_DEBUG_VERBOSE 0 33 // Emit logging for OSR exit value recoveries at every node, not just nodes that 34 // actually has speculation checks. 35 #define ENABLE_DFG_VERBOSE_VALUE_RECOVERIES 0 31 36 // Enable generation of dynamic checks into the instruction stream. 32 37 #if !ASSERT_DISABLED … … 51 56 // Disable the SpeculativeJIT without having to touch Platform.h! 52 57 #define DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE 0 58 // Disable the non-speculative JIT and use OSR instead. 59 #define ENABLE_DFG_OSR_EXIT ENABLE_TIERED_COMPILATION 53 60 // Generate stats on how successful we were in making use of the DFG jit, and remaining on the hot path. 54 61 #define ENABLE_DFG_SUCCESS_STATS 0 … … 73 80 static const NodeIndex NoNode = UINT_MAX; 74 81 75 // Information used to map back from an exception to any handler/source information. 82 // Information used to map back from an exception to any handler/source information, 83 // and to implement OSR. 76 84 // (Presently implemented as a bytecode index). 77 typedef uint32_t ExceptionInfo; 85 class CodeOrigin { 86 public: 87 CodeOrigin() 88 : m_bytecodeIndex(std::numeric_limits<uint32_t>::max()) 89 { 90 } 91 92 explicit CodeOrigin(uint32_t bytecodeIndex) 93 : m_bytecodeIndex(bytecodeIndex) 94 { 95 } 96 97 bool isSet() const { return m_bytecodeIndex != std::numeric_limits<uint32_t>::max(); } 98 99 uint32_t bytecodeIndex() const 100 { 101 ASSERT(isSet()); 102 return m_bytecodeIndex; 103 } 104 105 private: 106 uint32_t m_bytecodeIndex; 107 }; 78 108 79 109 // Entries in the NodeType enum (below) are composed of an id, a result type (possibly none) … … 358 388 359 389 // Construct a node with up to 3 children, no immediate value. 360 Node(NodeType op, ExceptionInfo exceptionInfo, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)390 Node(NodeType op, CodeOrigin codeOrigin, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) 361 391 : op(op) 362 , exceptionInfo(exceptionInfo)392 , codeOrigin(codeOrigin) 363 393 , m_virtualRegister(InvalidVirtualRegister) 364 394 , m_refCount(0) … … 371 401 372 402 // Construct a node with up to 3 children and an immediate value. 373 Node(NodeType op, ExceptionInfo exceptionInfo, OpInfo imm, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)403 Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) 374 404 : op(op) 375 , exceptionInfo(exceptionInfo)405 , codeOrigin(codeOrigin) 376 406 , m_virtualRegister(InvalidVirtualRegister) 377 407 , m_refCount(0) … … 385 415 386 416 // Construct a node with up to 3 children and two immediate values. 387 Node(NodeType op, ExceptionInfo exceptionInfo, OpInfo imm1, OpInfo imm2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)417 Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) 388 418 : op(op) 389 , exceptionInfo(exceptionInfo)419 , codeOrigin(codeOrigin) 390 420 , m_virtualRegister(InvalidVirtualRegister) 391 421 , m_refCount(0) … … 400 430 401 431 // Construct a node with a variable number of children and two immediate values. 402 Node(VarArgTag, NodeType op, ExceptionInfo exceptionInfo, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)432 Node(VarArgTag, NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren) 403 433 : op(op) 404 , exceptionInfo(exceptionInfo)434 , codeOrigin(codeOrigin) 405 435 , m_virtualRegister(InvalidVirtualRegister) 406 436 , m_refCount(0) … … 644 674 return children.fixed.child1; 645 675 } 676 677 // This is useful if you want to do a fast check on the first child 678 // before also doing a check on the opcode. Use this with care and 679 // avoid it if possible. 680 NodeIndex child1Unchecked() 681 { 682 return children.fixed.child1; 683 } 646 684 647 685 NodeIndex child2() … … 672 710 NodeType op; 673 711 // Used to look up exception handling information (currently implemented as a bytecode index). 674 ExceptionInfo exceptionInfo;712 CodeOrigin codeOrigin; 675 713 // References to up to 3 children (0 for no child). 676 714 union { -
trunk/Source/JavaScriptCore/dfg/DFGNonSpeculativeJIT.cpp
r94942 r94996 435 435 void NonSpeculativeJIT::compile(SpeculationCheckIndexIterator& checkIterator, Node& node) 436 436 { 437 #if ENABLE(DFG_OSR_EXIT) 438 UNUSED_PARAM(checkIterator); 439 #else 437 440 // Check for speculation checks from the corresponding instruction in the 438 441 // speculative path. Do not check for NodeIndex 0, since this is checked … … 443 446 if (m_compileIndex && checkIterator.hasCheckAtIndex(m_compileIndex)) 444 447 trackEntry(m_jit.label()); 448 #endif 445 449 446 450 NodeType op = node.op; … … 1284 1288 { 1285 1289 // Check for speculation checks added at function entry (checking argument types). 1290 #if !ENABLE(DFG_OSR_EXIT) 1286 1291 if (checkIterator.hasCheckAtIndex(m_compileIndex)) 1287 1292 trackEntry(m_jit.label()); 1293 #endif 1288 1294 1289 1295 ASSERT(!m_compileIndex); -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
r94942 r94996 145 145 } 146 146 147 #if !ENABLE(DFG_OSR_EXIT) 147 148 SpeculationCheck::SpeculationCheck(MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex) 148 149 : m_check(check) … … 171 172 } 172 173 } 174 #endif 175 176 #ifndef NDEBUG 177 void ValueSource::dump(FILE* out) const 178 { 179 fprintf(out, "Node(%d)", m_nodeIndex); 180 } 181 182 void ValueRecovery::dump(FILE* out) const 183 { 184 switch (technique()) { 185 case AlreadyInRegisterFile: 186 fprintf(out, "-"); 187 break; 188 case InGPR: 189 fprintf(out, "%%%s", GPRInfo::debugName(gpr())); 190 break; 191 case UnboxedInt32InGPR: 192 fprintf(out, "int32(%%%s)", GPRInfo::debugName(gpr())); 193 break; 194 case InFPR: 195 fprintf(out, "%%%s", FPRInfo::debugName(fpr())); 196 break; 197 case DisplacedInRegisterFile: 198 fprintf(out, "*%d", virtualRegister()); 199 break; 200 case Constant: 201 fprintf(out, "[%s]", constant().description()); 202 break; 203 case DontKnow: 204 fprintf(out, "!"); 205 break; 206 default: 207 fprintf(out, "?%d", technique()); 208 break; 209 } 210 } 211 #endif 212 213 #if ENABLE(DFG_OSR_EXIT) 214 OSRExit::OSRExit(MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex) 215 : m_check(check) 216 , m_nodeIndex(jit->m_compileIndex) 217 , m_bytecodeIndex(jit->m_bytecodeIndexForOSR) 218 , m_recoveryIndex(recoveryIndex) 219 , m_arguments(jit->m_arguments.size()) 220 , m_variables(jit->m_variables.size()) 221 , m_lastSetOperand(jit->m_lastSetOperand) 222 { 223 ASSERT(m_bytecodeIndex != std::numeric_limits<uint32_t>::max()); 224 for (unsigned argument = 0; argument < m_arguments.size(); ++argument) 225 m_arguments[argument] = jit->computeValueRecoveryFor(jit->m_arguments[argument]); 226 for (unsigned variable = 0; variable < m_variables.size(); ++variable) 227 m_variables[variable] = jit->computeValueRecoveryFor(jit->m_variables[variable]); 228 } 229 230 #ifndef NDEBUG 231 void OSRExit::dump(FILE* out) const 232 { 233 for (unsigned argument = 0; argument < m_arguments.size(); ++argument) 234 m_arguments[argument].dump(out); 235 fprintf(out, " : "); 236 for (unsigned variable = 0; variable < m_variables.size(); ++variable) 237 m_variables[variable].dump(out); 238 } 239 #endif 240 #endif 173 241 174 242 GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat) … … 651 719 652 720 case SetLocal: { 721 // SetLocal doubles as a hint as to where a node will be stored and 722 // as a speculation point. So before we speculate make sure that we 723 // know where the child of this node needs to go in the virtual 724 // register file. 725 compileMovHint(node); 726 727 // As far as OSR is concerned, we're on the bytecode index corresponding 728 // to the *next* instruction, since we've already "executed" the 729 // SetLocal and whatever other DFG Nodes are associated with the same 730 // bytecode index as the SetLocal. 731 ASSERT(m_bytecodeIndexForOSR == node.codeOrigin.bytecodeIndex()); 732 Node& nextNode = m_jit.graph()[m_compileIndex+1]; 733 734 // This assertion will fail if we ever emit multiple SetLocal's for 735 // a single bytecode instruction. That's unlikely to happen. But if 736 // it does, the solution is to to have this perform a search until 737 // it finds a Node with a different bytecode index from the one we've 738 // got, and to abstractly execute the SetLocal's along the way. Or, 739 // better yet, handle all of the SetLocal's at once: abstract interpret 740 // all of them, then emit code for all of them, with OSR exiting to 741 // the next non-SetLocal instruction. Note the special case for when 742 // both this SetLocal and the next op have a bytecode index of 0; this 743 // occurs for SetLocal's generated at the top of the code block to 744 // initialize locals to undefined. Ideally, we'd have a way of marking 745 // in the CodeOrigin that a SetLocal is synthetic. This will make the 746 // assertion more sensible-looking. We should then also assert that 747 // synthetic SetLocal's don't have speculation checks, since they 748 // should only be dropping values that we statically know we are 749 // allowed to drop into the variables. DFGPropagator will guarantee 750 // this, since it should have at least an approximation (if not 751 // exact knowledge) of the type of the SetLocal's child node, and 752 // should merge that information into the local that is being set. 753 ASSERT(m_bytecodeIndexForOSR != nextNode.codeOrigin.bytecodeIndex() 754 || (!m_bytecodeIndexForOSR && !nextNode.codeOrigin.bytecodeIndex())); 755 m_bytecodeIndexForOSR = nextNode.codeOrigin.bytecodeIndex(); 756 653 757 PredictedType predictedType = m_jit.graph().getPrediction(node.local()); 654 758 if (isInt32Prediction(predictedType)) { … … 671 775 noResult(m_compileIndex); 672 776 } 777 778 // Indicate that it's no longer necessary to retrieve the value of 779 // this bytecode variable from registers or other locations in the register file. 780 valueSourceReferenceForOperand(node.local()) = ValueSource(); 673 781 break; 674 782 } … … 1370 1478 } 1371 1479 } 1372 1480 1373 1481 if (node.hasResult() && node.mustGenerate()) 1374 1482 use(m_compileIndex); 1483 } 1484 1485 void SpeculativeJIT::compileMovHint(Node& node) 1486 { 1487 ASSERT(node.op == SetLocal); 1488 1489 setNodeIndexForOperand(node.child1(), node.local()); 1490 m_lastSetOperand = node.local(); 1375 1491 } 1376 1492 … … 1383 1499 m_jit.breakpoint(); 1384 1500 #endif 1501 1502 for (size_t i = 0; i < m_arguments.size(); ++i) 1503 m_arguments[i] = ValueSource(); 1504 for (size_t i = 0; i < m_variables.size(); ++i) 1505 m_variables[i] = ValueSource(); 1506 m_lastSetOperand = std::numeric_limits<int>::max(); 1507 m_bytecodeIndexForOSR = std::numeric_limits<uint32_t>::max(); 1385 1508 1386 1509 for (; m_compileIndex < block.end; ++m_compileIndex) { 1387 1510 Node& node = m_jit.graph()[m_compileIndex]; 1388 if (!node.shouldGenerate()) 1389 continue; 1390 1511 m_bytecodeIndexForOSR = node.codeOrigin.bytecodeIndex(); 1512 if (!node.shouldGenerate()) { 1391 1513 #if ENABLE(DFG_DEBUG_VERBOSE) 1392 fprintf(stderr, "SpeculativeJIT generating Node @%d at JIT offset 0x%x ", (int)m_compileIndex, m_jit.debugOffset()); 1514 fprintf(stderr, "SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex(), m_jit.debugOffset()); 1515 #endif 1516 if (node.op == SetLocal) 1517 compileMovHint(node); 1518 } else { 1519 1520 #if ENABLE(DFG_DEBUG_VERBOSE) 1521 fprintf(stderr, "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex(), m_jit.debugOffset()); 1393 1522 #endif 1394 1523 #if ENABLE(DFG_JIT_BREAK_ON_EVERY_NODE) 1395 m_jit.breakpoint();1524 m_jit.breakpoint(); 1396 1525 #endif 1397 checkConsistency();1398 compile(node);1399 if (!m_compileOkay) {1526 checkConsistency(); 1527 compile(node); 1528 if (!m_compileOkay) { 1400 1529 #if ENABLE(DYNAMIC_TERMINATE_SPECULATION) 1401 m_compileOkay = true;1402 m_compileIndex = block.end;1403 clearGenerationInfo();1530 m_compileOkay = true; 1531 m_compileIndex = block.end; 1532 clearGenerationInfo(); 1404 1533 #endif 1405 return; 1406 } 1534 return; 1535 } 1536 1407 1537 #if ENABLE(DFG_DEBUG_VERBOSE) 1408 if (node.hasResult()) 1409 fprintf(stderr, "-> %s\n", dataFormatToString(m_generationInfo[node.virtualRegister()].registerFormat())); 1410 else 1411 fprintf(stderr, "\n"); 1538 if (node.hasResult()) { 1539 GenerationInfo& info = m_generationInfo[node.virtualRegister()]; 1540 fprintf(stderr, "-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)node.virtualRegister()); 1541 if (info.registerFormat() != DataFormatNone) { 1542 if (info.registerFormat() == DataFormatDouble) 1543 fprintf(stderr, ", %s", FPRInfo::debugName(info.fpr())); 1544 else 1545 fprintf(stderr, ", %s", GPRInfo::debugName(info.gpr())); 1546 } 1547 fprintf(stderr, " "); 1548 } else 1549 fprintf(stderr, " "); 1412 1550 #endif 1413 checkConsistency(); 1551 } 1552 1553 #if ENABLE(DFG_VERBOSE_VALUE_RECOVERIES) 1554 for (int operand = -m_arguments.size() - RegisterFile::CallFrameHeaderSize; operand < -RegisterFile::CallFrameHeaderSize; ++operand) 1555 computeValueRecoveryFor(operand).dump(stderr); 1556 1557 fprintf(stderr, " : "); 1558 1559 for (int operand = 0; operand < (int)m_variables.size(); ++operand) 1560 computeValueRecoveryFor(operand).dump(stderr); 1561 #endif 1562 1563 #if ENABLE(DFG_DEBUG_VERBOSE) 1564 fprintf(stderr, "\n"); 1565 #endif 1566 1567 if (node.shouldGenerate()) 1568 checkConsistency(); 1414 1569 } 1415 1570 } … … 1420 1575 { 1421 1576 ASSERT(!m_compileIndex); 1577 m_bytecodeIndexForOSR = 0; 1422 1578 for (int i = 0; i < m_jit.codeBlock()->m_numParameters; ++i) { 1423 1579 VirtualRegister virtualRegister = (VirtualRegister)(m_jit.codeBlock()->thisRegister() + i); … … 1455 1611 for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) { 1456 1612 compile(*m_jit.graph().m_blocks[m_block]); 1457 #if !ENABLE(DYNAMIC_ OPTIMIZATION)1613 #if !ENABLE(DYNAMIC_TERMINATE_SPECULATION) 1458 1614 if (!m_compileOkay) 1459 1615 return false; … … 1464 1620 } 1465 1621 1622 ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource) 1623 { 1624 if (!valueSource.isSet()) 1625 return ValueRecovery::alreadyInRegisterFile(); 1626 1627 if (m_jit.isConstant(valueSource.nodeIndex())) 1628 return ValueRecovery::constant(m_jit.valueOfJSConstant(valueSource.nodeIndex())); 1629 1630 Node* nodePtr = &m_jit.graph()[valueSource.nodeIndex()]; 1631 if (!nodePtr->shouldGenerate()) { 1632 // It's legitimately dead. As in, nobody will ever use this node, or operand, 1633 // ever. Set it to Undefined to make the GC happy after the OSR. 1634 return ValueRecovery::constant(jsUndefined()); 1635 } 1636 1637 GenerationInfo* infoPtr = &m_generationInfo[nodePtr->virtualRegister()]; 1638 if (!infoPtr->alive() || infoPtr->nodeIndex() != valueSource.nodeIndex()) { 1639 // Try to see if there is an alternate node that would contain the value we want. 1640 // There are four possibilities: 1641 // 1642 // ValueToNumber: If the only live version of the value is a ValueToNumber node 1643 // then it means that all remaining uses of the value would have performed a 1644 // ValueToNumber conversion anyway. Thus, we can substitute ValueToNumber. 1645 // 1646 // ValueToInt32: Likewise, if the only remaining live version of the value is 1647 // ValueToInt32, then we can use it. But if there is both a ValueToInt32 1648 // and a ValueToNumber, then we better go with ValueToNumber because it 1649 // means that some remaining uses would have converted to number while 1650 // others would have converted to Int32. 1651 // 1652 // UInt32ToNumber: If the only live version of the value is a UInt32ToNumber 1653 // then the only remaining uses are ones that want a properly formed number 1654 // rather than a UInt32 intermediate. 1655 // 1656 // The reverse of the above: This node could be a UInt32ToNumber, but its 1657 // alternative is still alive. This means that the only remaining uses of 1658 // the number would be fine with a UInt32 intermediate. 1659 1660 bool found = false; 1661 1662 if (nodePtr->op == UInt32ToNumber) { 1663 NodeIndex nodeIndex = nodePtr->child1(); 1664 nodePtr = &m_jit.graph()[nodeIndex]; 1665 infoPtr = &m_generationInfo[nodePtr->virtualRegister()]; 1666 if (infoPtr->alive() && infoPtr->nodeIndex() == nodeIndex) 1667 found = true; 1668 } 1669 1670 if (!found) { 1671 NodeIndex valueToNumberIndex = NoNode; 1672 NodeIndex valueToInt32Index = NoNode; 1673 NodeIndex uint32ToNumberIndex = NoNode; 1674 1675 for (unsigned virtualRegister = 0; virtualRegister < m_generationInfo.size(); ++virtualRegister) { 1676 GenerationInfo& info = m_generationInfo[virtualRegister]; 1677 if (!info.alive()) 1678 continue; 1679 if (info.nodeIndex() == NoNode) 1680 continue; 1681 Node& node = m_jit.graph()[info.nodeIndex()]; 1682 if (node.child1Unchecked() != valueSource.nodeIndex()) 1683 continue; 1684 switch (node.op) { 1685 case ValueToNumber: 1686 valueToNumberIndex = info.nodeIndex(); 1687 break; 1688 case ValueToInt32: 1689 valueToInt32Index = info.nodeIndex(); 1690 break; 1691 case UInt32ToNumber: 1692 uint32ToNumberIndex = info.nodeIndex(); 1693 break; 1694 default: 1695 break; 1696 } 1697 } 1698 1699 NodeIndex nodeIndexToUse; 1700 if (valueToNumberIndex != NoNode) 1701 nodeIndexToUse = valueToNumberIndex; 1702 else if (valueToInt32Index != NoNode) 1703 nodeIndexToUse = valueToInt32Index; 1704 else if (uint32ToNumberIndex != NoNode) 1705 nodeIndexToUse = uint32ToNumberIndex; 1706 else 1707 nodeIndexToUse = NoNode; 1708 1709 if (nodeIndexToUse != NoNode) { 1710 nodePtr = &m_jit.graph()[nodeIndexToUse]; 1711 infoPtr = &m_generationInfo[nodePtr->virtualRegister()]; 1712 ASSERT(infoPtr->alive() && infoPtr->nodeIndex() == nodeIndexToUse); 1713 found = true; 1714 } 1715 } 1716 1717 if (!found) 1718 return ValueRecovery::constant(jsUndefined()); 1719 } 1720 1721 ASSERT(infoPtr->alive()); 1722 1723 if (infoPtr->registerFormat() != DataFormatNone) { 1724 if (infoPtr->registerFormat() == DataFormatDouble) 1725 return ValueRecovery::inFPR(infoPtr->fpr()); 1726 return ValueRecovery::inGPR(infoPtr->gpr(), infoPtr->registerFormat()); 1727 } 1728 if (infoPtr->spillFormat() != DataFormatNone) 1729 return ValueRecovery::displacedInRegisterFile(static_cast<VirtualRegister>(nodePtr->virtualRegister())); 1730 1731 ASSERT_NOT_REACHED(); 1732 return ValueRecovery(); 1733 } 1734 1466 1735 } } // namespace JSC::DFG 1467 1736 -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
r94942 r94996 67 67 }; 68 68 69 #if !ENABLE(DFG_OSR_EXIT) 69 70 // === SpeculationCheck === 70 71 // … … 92 93 }; 93 94 typedef SegmentedVector<SpeculationCheck, 16> SpeculationCheckVector; 94 95 #endif // !ENABLE(DFG_OSR_EXIT) 96 97 class ValueSource { 98 public: 99 ValueSource() 100 : m_nodeIndex(NoNode) 101 { 102 } 103 104 explicit ValueSource(NodeIndex nodeIndex) 105 : m_nodeIndex(nodeIndex) 106 { 107 } 108 109 bool isSet() const 110 { 111 return m_nodeIndex != NoNode; 112 } 113 114 NodeIndex nodeIndex() const 115 { 116 ASSERT(isSet()); 117 return m_nodeIndex; 118 } 119 120 #ifndef NDEBUG 121 void dump(FILE* out) const; 122 #endif 123 124 private: 125 NodeIndex m_nodeIndex; 126 }; 127 128 // Describes how to recover a given bytecode virtual register at a given 129 // code point. 130 enum ValueRecoveryTechnique { 131 // It's already in the register file at the right location. 132 AlreadyInRegisterFile, 133 // It's in a register. 134 InGPR, 135 UnboxedInt32InGPR, 136 InFPR, 137 // It's in the register file, but at a different location. 138 DisplacedInRegisterFile, 139 // It's a constant. 140 Constant, 141 // Don't know how to recover it. 142 DontKnow 143 }; 144 145 class ValueRecovery { 146 public: 147 ValueRecovery() 148 : m_technique(DontKnow) 149 { 150 } 151 152 static ValueRecovery alreadyInRegisterFile() 153 { 154 ValueRecovery result; 155 result.m_technique = AlreadyInRegisterFile; 156 return result; 157 } 158 159 static ValueRecovery inGPR(GPRReg gpr, DataFormat dataFormat) 160 { 161 ASSERT(dataFormat != DataFormatNone); 162 ValueRecovery result; 163 if (dataFormat == DataFormatInteger) 164 result.m_technique = UnboxedInt32InGPR; 165 else 166 result.m_technique = InGPR; 167 result.m_source.gpr = gpr; 168 return result; 169 } 170 171 static ValueRecovery inFPR(FPRReg fpr) 172 { 173 ValueRecovery result; 174 result.m_technique = InFPR; 175 result.m_source.fpr = fpr; 176 return result; 177 } 178 179 static ValueRecovery displacedInRegisterFile(VirtualRegister virtualReg) 180 { 181 ValueRecovery result; 182 result.m_technique = DisplacedInRegisterFile; 183 result.m_source.virtualReg = virtualReg; 184 return result; 185 } 186 187 static ValueRecovery constant(JSValue value) 188 { 189 ValueRecovery result; 190 result.m_technique = Constant; 191 result.m_source.constant = JSValue::encode(value); 192 return result; 193 } 194 195 ValueRecoveryTechnique technique() const { return m_technique; } 196 197 GPRReg gpr() const 198 { 199 ASSERT(m_technique == InGPR || m_technique == UnboxedInt32InGPR); 200 return m_source.gpr; 201 } 202 203 FPRReg fpr() const 204 { 205 ASSERT(m_technique == InFPR); 206 return m_source.fpr; 207 } 208 209 VirtualRegister virtualRegister() const 210 { 211 ASSERT(m_technique == DisplacedInRegisterFile); 212 return m_source.virtualReg; 213 } 214 215 JSValue constant() const 216 { 217 ASSERT(m_technique == Constant); 218 return JSValue::decode(m_source.constant); 219 } 220 221 #ifndef NDEBUG 222 void dump(FILE* out) const; 223 #endif 224 225 private: 226 ValueRecoveryTechnique m_technique; 227 union { 228 GPRReg gpr; 229 FPRReg fpr; 230 VirtualRegister virtualReg; 231 EncodedJSValue constant; 232 } m_source; 233 }; 234 235 #if ENABLE(DFG_OSR_EXIT) 236 // === OSRExit === 237 // 238 // This structure describes how to exit the speculative path by 239 // going into baseline code. 240 struct OSRExit { 241 OSRExit(MacroAssembler::Jump, SpeculativeJIT*, unsigned recoveryIndex = 0); 242 243 MacroAssembler::Jump m_check; 244 NodeIndex m_nodeIndex; 245 unsigned m_bytecodeIndex; 246 247 unsigned m_recoveryIndex; 248 249 // Convenient way of iterating over ValueRecoveries while being 250 // generic over argument versus variable. 251 int numberOfRecoveries() const { return m_arguments.size() + m_variables.size(); } 252 const ValueRecovery& valueRecovery(int index) const 253 { 254 if (index < (int)m_arguments.size()) 255 return m_arguments[index]; 256 return m_variables[index - m_arguments.size()]; 257 } 258 bool isArgument(int index) const { return index < (int)m_arguments.size(); } 259 bool isVariable(int index) const { return !isArgument(index); } 260 int argumentForIndex(int index) const 261 { 262 return index; 263 } 264 int variableForIndex(int index) const 265 { 266 return index - m_arguments.size(); 267 } 268 int operandForIndex(int index) const 269 { 270 if (index < (int)m_arguments.size()) 271 return index - m_arguments.size() - RegisterFile::CallFrameHeaderSize; 272 return index - m_arguments.size(); 273 } 274 275 #ifndef NDEBUG 276 void dump(FILE* out) const; 277 #endif 278 279 Vector<ValueRecovery, 0> m_arguments; 280 Vector<ValueRecovery, 0> m_variables; 281 int m_lastSetOperand; 282 }; 283 typedef SegmentedVector<OSRExit, 16> OSRExitVector; 284 #endif // ENABLE(DFG_OSR_EXIT) 95 285 96 286 // === SpeculativeJIT === … … 106 296 class SpeculativeJIT : public JITCodeGenerator { 107 297 friend struct SpeculationCheck; 108 public: 109 SpeculativeJIT(JITCompiler& jit) 110 : JITCodeGenerator(jit, true) 111 , m_compileOkay(true) 112 { 113 } 298 friend struct OSRExit; 299 public: 300 SpeculativeJIT(JITCompiler&); 114 301 115 302 bool compile(); … … 117 304 // Retrieve the list of bail-outs from the speculative path, 118 305 // and additional recovery information. 306 #if !ENABLE(DFG_OSR_EXIT) 119 307 SpeculationCheckVector& speculationChecks() 120 308 { 121 309 return m_speculationChecks; 122 310 } 311 #else 312 OSRExitVector& osrExits() 313 { 314 return m_osrExits; 315 } 316 #endif 123 317 SpeculationRecovery* speculationRecovery(size_t index) 124 318 { … … 140 334 141 335 void compile(Node&); 336 void compileMovHint(Node&); 142 337 void compile(BasicBlock&); 143 338 … … 204 399 if (!m_compileOkay) 205 400 return; 401 #if !ENABLE(DFG_OSR_EXIT) 206 402 m_speculationChecks.append(SpeculationCheck(jumpToFail, this)); 403 #else 404 m_osrExits.append(OSRExit(jumpToFail, this)); 405 #endif 207 406 } 208 407 // Add a speculation check with additional recovery. … … 212 411 return; 213 412 m_speculationRecoveryList.append(recovery); 413 #if !ENABLE(DFG_OSR_EXIT) 214 414 m_speculationChecks.append(SpeculationCheck(jumpToFail, this, m_speculationRecoveryList.size())); 415 #else 416 m_osrExits.append(OSRExit(jumpToFail, this, m_speculationRecoveryList.size())); 417 #endif 215 418 } 216 419 … … 235 438 template<bool strict> 236 439 GPRReg fillSpeculateIntInternal(NodeIndex, DataFormat& returnFormat); 237 440 238 441 // It is possible, during speculative generation, to reach a situation in which we 239 442 // can statically determine a speculation will fail (for example, when two nodes … … 241 444 // flag is cleared, indicating no further code generation should take place. 242 445 bool m_compileOkay; 446 #if !ENABLE(DFG_OSR_EXIT) 243 447 // This vector tracks bail-outs from the speculative path to the non-speculative one. 244 448 SpeculationCheckVector m_speculationChecks; 449 #else 450 // This vector tracks bail-outs from the speculative path to the old JIT. 451 OSRExitVector m_osrExits; 452 #endif 245 453 // Some bail-outs need to record additional information recording specific recovery 246 454 // to be performed (for example, on detected overflow from an add, we may need to 247 455 // reverse the addition if an operand is being overwritten). 248 456 Vector<SpeculationRecovery, 16> m_speculationRecoveryList; 457 458 // Tracking for which nodes are currently holding the values of arguments and bytecode 459 // operand-indexed variables. 460 461 ValueSource valueSourceForOperand(int operand) 462 { 463 return valueSourceReferenceForOperand(operand); 464 } 465 466 void setNodeIndexForOperand(NodeIndex nodeIndex, int operand) 467 { 468 valueSourceReferenceForOperand(operand) = ValueSource(nodeIndex); 469 } 470 471 // Call this with care, since it both returns a reference into an array 472 // and potentially resizes the array. So it would not be right to call this 473 // twice and then perform operands on both references, since the one from 474 // the first call may no longer be valid. 475 ValueSource& valueSourceReferenceForOperand(int operand) 476 { 477 if (operandIsArgument(operand)) { 478 int argument = operand + m_arguments.size() + RegisterFile::CallFrameHeaderSize; 479 return m_arguments[argument]; 480 } 481 482 if ((unsigned)operand >= m_variables.size()) 483 m_variables.resize(operand + 1); 484 485 return m_variables[operand]; 486 } 487 488 Vector<ValueSource, 0> m_arguments; 489 Vector<ValueSource, 0> m_variables; 490 int m_lastSetOperand; 491 uint32_t m_bytecodeIndexForOSR; 492 493 ValueRecovery computeValueRecoveryFor(const ValueSource&); 494 495 ValueRecovery computeValueRecoveryFor(int operand) 496 { 497 return computeValueRecoveryFor(valueSourceForOperand(operand)); 498 } 249 499 }; 250 500 … … 466 716 }; 467 717 468 469 718 // === SpeculationCheckIndexIterator === 470 719 // 471 720 // This class is used by the non-speculative JIT to check which 472 721 // nodes require entry points from the speculative path. 722 #if ENABLE(DFG_OSR_EXIT) 723 // This becomes a stub if OSR is enabled. 724 class SpeculationCheckIndexIterator { 725 public: 726 SpeculationCheckIndexIterator() { } 727 }; 728 #else 473 729 class SpeculationCheckIndexIterator { 474 730 public: … … 496 752 SpeculationCheckVector::Iterator m_end; 497 753 }; 498 754 #endif 755 756 inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit) 757 : JITCodeGenerator(jit, true) 758 , m_compileOkay(true) 759 , m_arguments(jit.codeBlock()->m_numParameters) 760 , m_variables(jit.codeBlock()->m_numVars) 761 , m_lastSetOperand(std::numeric_limits<int>::max()) 762 , m_bytecodeIndexForOSR(std::numeric_limits<uint32_t>::max()) 763 { 764 } 499 765 500 766 } } // namespace JSC::DFG -
trunk/Source/JavaScriptCore/jit/JIT.cpp
r94942 r94996 209 209 210 210 m_labels[m_bytecodeOffset] = label(); 211 212 #if ENABLE(TIERED_COMPILATION) 213 if (m_canBeOptimized) 214 m_jitCodeMapEncoder.append(m_bytecodeOffset, differenceBetween(m_startOfCode, label())); 215 #endif 211 216 212 217 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { … … 374 379 } 375 380 376 377 381 void JIT::privateCompileLinkPass() 378 382 { … … 503 507 #if ENABLE(TIERED_COMPILATION) 504 508 m_canBeOptimized = m_codeBlock->canCompileWithDFG(); 509 if (m_canBeOptimized) 510 m_startOfCode = label(); 505 511 #endif 506 512 … … 642 648 info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation; 643 649 } 650 651 #if ENABLE(TIERED_COMPILATION) 652 if (m_canBeOptimized) 653 m_codeBlock->setJITCodeMap(m_jitCodeMapEncoder.finish()); 654 #endif 644 655 645 656 if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck) -
trunk/Source/JavaScriptCore/jit/JIT.h
r94920 r94996 41 41 42 42 #include "CodeBlock.h" 43 #include "CompactJITCodeMap.h" 43 44 #include "Interpreter.h" 44 45 #include "JSInterfaceJIT.h" … … 1055 1056 #if ENABLE(TIERED_COMPILATION) 1056 1057 bool m_canBeOptimized; 1058 Label m_startOfCode; 1059 CompactJITCodeMap::Encoder m_jitCodeMapEncoder; 1057 1060 #endif 1058 1061 } JIT_CLASS_ALIGNMENT; -
trunk/Source/JavaScriptCore/runtime/JSGlobalData.cpp
r94920 r94996 192 192 , interpreter(0) 193 193 , heap(this, heapSize) 194 #if ENABLE(TIERED_COMPILATION) 195 , sizeOfLastOSRScratchBuffer(0) 196 #endif 194 197 , dynamicGlobalObject(0) 195 198 , cachedUTCOffset(std::numeric_limits<double>::quiet_NaN()) … … 356 359 #if ENABLE(REGEXP_TRACING) 357 360 delete m_rtTraceList; 361 #endif 362 363 #if ENABLE(TIERED_COMPILATION) 364 for (unsigned i = 0; i < osrScratchBuffers.size(); ++i) 365 fastFree(osrScratchBuffers[i]); 358 366 #endif 359 367 } -
trunk/Source/JavaScriptCore/runtime/JSGlobalData.h
r94920 r94996 235 235 int64_t debugDataBuffer[64]; 236 236 #endif 237 #if ENABLE(TIERED_COMPILATION) 238 Vector<void*> osrScratchBuffers; 239 size_t sizeOfLastOSRScratchBuffer; 240 241 void* osrScratchBufferForSize(size_t size) 242 { 243 if (!size) 244 return 0; 245 246 if (size > sizeOfLastOSRScratchBuffer) { 247 // Protect against a N^2 memory usage pathology by ensuring 248 // that at worst, we get a geometric series, meaning that the 249 // total memory usage is somewhere around 250 // max(scratch buffer size) * 4. 251 sizeOfLastOSRScratchBuffer = size * 2; 252 253 osrScratchBuffers.append(fastMalloc(sizeOfLastOSRScratchBuffer)); 254 } 255 256 return osrScratchBuffers.last(); 257 } 258 #endif 237 259 #endif 238 260 -
trunk/Source/JavaScriptCore/runtime/JSValue.cpp
r94701 r94996 120 120 char* JSValue::description() 121 121 { 122 static const size_t size = 32;122 static const size_t size = 64; 123 123 static char description[size]; 124 124
Note: See TracChangeset
for help on using the changeset viewer.