Changeset 100244 in webkit
- Timestamp:
- Nov 14, 2011, 11:04:10 PM (14 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 4 deleted
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/CMakeLists.txt
r100205 r100244 53 53 dfg/DFGDriver.cpp 54 54 dfg/DFGGraph.cpp 55 dfg/DFGJITCodeGenerator.cpp56 dfg/DFGJITCodeGenerator32_64.cpp57 dfg/DFGJITCodeGenerator64.cpp58 55 dfg/DFGJITCompiler.cpp 59 56 dfg/DFGOSREntry.cpp -
trunk/Source/JavaScriptCore/ChangeLog
r100242 r100244 1 2011-11-14 Filip Pizlo <fpizlo@apple.com> 2 3 DFG::SpeculativeJIT and DFG::JITCodeGenerator should be combined 4 https://bugs.webkit.org/show_bug.cgi?id=72348 5 6 Reviewed by Gavin Barraclough. 7 8 Moved all of JITCodeGenerator into SpeculativeJIT. 9 10 * CMakeLists.txt: 11 * GNUmakefile.list.am: 12 * JavaScriptCore.xcodeproj/project.pbxproj: 13 * Target.pri: 14 * dfg/DFGJITCodeGenerator.cpp: Removed. 15 * dfg/DFGJITCodeGenerator.h: Removed. 16 * dfg/DFGJITCodeGenerator32_64.cpp: Removed. 17 * dfg/DFGJITCodeGenerator64.cpp: Removed. 18 * dfg/DFGJITCompiler.cpp: 19 * dfg/DFGRepatch.cpp: 20 (JSC::DFG::generateProtoChainAccessStub): 21 (JSC::DFG::tryCacheGetByID): 22 (JSC::DFG::tryCachePutByID): 23 * dfg/DFGSpeculativeJIT.cpp: 24 (JSC::DFG::SpeculativeJIT::clearGenerationInfo): 25 (JSC::DFG::SpeculativeJIT::fillStorage): 26 (JSC::DFG::SpeculativeJIT::useChildren): 27 (JSC::DFG::SpeculativeJIT::isStrictInt32): 28 (JSC::DFG::SpeculativeJIT::isKnownInteger): 29 (JSC::DFG::SpeculativeJIT::isKnownNumeric): 30 (JSC::DFG::SpeculativeJIT::isKnownCell): 31 (JSC::DFG::SpeculativeJIT::isKnownNotCell): 32 (JSC::DFG::SpeculativeJIT::isKnownNotInteger): 33 (JSC::DFG::SpeculativeJIT::isKnownNotNumber): 34 (JSC::DFG::SpeculativeJIT::isKnownBoolean): 35 (JSC::DFG::SpeculativeJIT::writeBarrier): 36 (JSC::DFG::SpeculativeJIT::markCellCard): 37 (JSC::DFG::SpeculativeJIT::nonSpeculativeCompare): 38 (JSC::DFG::SpeculativeJIT::nonSpeculativeStrictEq): 39 (JSC::DFG::dataFormatString): 40 (JSC::DFG::SpeculativeJIT::dump): 41 (JSC::DFG::SpeculativeJIT::checkConsistency): 42 (JSC::DFG::GPRTemporary::GPRTemporary): 43 (JSC::DFG::GPRTemporary::adopt): 44 (JSC::DFG::FPRTemporary::FPRTemporary): 45 * dfg/DFGSpeculativeJIT.h: 46 (JSC::DFG::SpeculativeJIT::at): 47 (JSC::DFG::SpeculativeJIT::lock): 48 (JSC::DFG::SpeculativeJIT::unlock): 49 (JSC::DFG::SpeculativeJIT::canReuse): 50 (JSC::DFG::SpeculativeJIT::reuse): 51 (JSC::DFG::SpeculativeJIT::allocate): 52 (JSC::DFG::SpeculativeJIT::tryAllocate): 53 (JSC::DFG::SpeculativeJIT::fprAllocate): 54 (JSC::DFG::SpeculativeJIT::isFilled): 55 (JSC::DFG::SpeculativeJIT::isFilledDouble): 56 (JSC::DFG::SpeculativeJIT::use): 57 (JSC::DFG::SpeculativeJIT::selectScratchGPR): 58 (JSC::DFG::SpeculativeJIT::silentSpillGPR): 59 (JSC::DFG::SpeculativeJIT::silentSpillFPR): 60 (JSC::DFG::SpeculativeJIT::silentFillGPR): 61 (JSC::DFG::SpeculativeJIT::silentFillFPR): 62 (JSC::DFG::SpeculativeJIT::silentSpillAllRegisters): 63 (JSC::DFG::SpeculativeJIT::silentFillAllRegisters): 64 (JSC::DFG::SpeculativeJIT::boxDouble): 65 (JSC::DFG::SpeculativeJIT::unboxDouble): 66 (JSC::DFG::SpeculativeJIT::spill): 67 (JSC::DFG::SpeculativeJIT::isConstant): 68 (JSC::DFG::SpeculativeJIT::isJSConstant): 69 (JSC::DFG::SpeculativeJIT::isInt32Constant): 70 (JSC::DFG::SpeculativeJIT::isDoubleConstant): 71 (JSC::DFG::SpeculativeJIT::isNumberConstant): 72 (JSC::DFG::SpeculativeJIT::isBooleanConstant): 73 (JSC::DFG::SpeculativeJIT::isFunctionConstant): 74 (JSC::DFG::SpeculativeJIT::valueOfInt32Constant): 75 (JSC::DFG::SpeculativeJIT::valueOfNumberConstant): 76 (JSC::DFG::SpeculativeJIT::addressOfDoubleConstant): 77 (JSC::DFG::SpeculativeJIT::valueOfJSConstant): 78 (JSC::DFG::SpeculativeJIT::valueOfBooleanConstant): 79 (JSC::DFG::SpeculativeJIT::valueOfFunctionConstant): 80 (JSC::DFG::SpeculativeJIT::isNullConstant): 81 (JSC::DFG::SpeculativeJIT::identifier): 82 (JSC::DFG::SpeculativeJIT::flushRegisters): 83 (JSC::DFG::SpeculativeJIT::isFlushed): 84 (JSC::DFG::SpeculativeJIT::valueOfJSConstantAsImmPtr): 85 (JSC::DFG::SpeculativeJIT::bitOp): 86 (JSC::DFG::SpeculativeJIT::shiftOp): 87 (JSC::DFG::SpeculativeJIT::detectPeepHoleBranch): 88 (JSC::DFG::SpeculativeJIT::addressOfCallData): 89 (JSC::DFG::SpeculativeJIT::tagOfCallData): 90 (JSC::DFG::SpeculativeJIT::payloadOfCallData): 91 (JSC::DFG::SpeculativeJIT::integerResult): 92 (JSC::DFG::SpeculativeJIT::noResult): 93 (JSC::DFG::SpeculativeJIT::cellResult): 94 (JSC::DFG::SpeculativeJIT::booleanResult): 95 (JSC::DFG::SpeculativeJIT::jsValueResult): 96 (JSC::DFG::SpeculativeJIT::storageResult): 97 (JSC::DFG::SpeculativeJIT::doubleResult): 98 (JSC::DFG::SpeculativeJIT::initConstantInfo): 99 (JSC::DFG::SpeculativeJIT::resetCallArguments): 100 (JSC::DFG::SpeculativeJIT::addCallArgument): 101 (JSC::DFG::SpeculativeJIT::setupArguments): 102 (JSC::DFG::SpeculativeJIT::setupArgumentsExecState): 103 (JSC::DFG::SpeculativeJIT::setupArgumentsWithExecState): 104 (JSC::DFG::SpeculativeJIT::setupTwoStubArgs): 105 (JSC::DFG::SpeculativeJIT::setupStubArguments): 106 (JSC::DFG::SpeculativeJIT::callOperation): 107 (JSC::DFG::SpeculativeJIT::appendCallWithExceptionCheck): 108 (JSC::DFG::SpeculativeJIT::appendCallWithExceptionCheckSetResult): 109 (JSC::DFG::SpeculativeJIT::setupResults): 110 (JSC::DFG::SpeculativeJIT::appendCallSetResult): 111 (JSC::DFG::SpeculativeJIT::addBranch): 112 (JSC::DFG::SpeculativeJIT::linkBranches): 113 (JSC::DFG::SpeculativeJIT::block): 114 (JSC::DFG::SpeculativeJIT::checkConsistency): 115 (JSC::DFG::SpeculativeJIT::BranchRecord::BranchRecord): 116 (JSC::DFG::IntegerOperand::IntegerOperand): 117 (JSC::DFG::IntegerOperand::~IntegerOperand): 118 (JSC::DFG::IntegerOperand::index): 119 (JSC::DFG::IntegerOperand::format): 120 (JSC::DFG::IntegerOperand::gpr): 121 (JSC::DFG::IntegerOperand::use): 122 (JSC::DFG::DoubleOperand::DoubleOperand): 123 (JSC::DFG::DoubleOperand::~DoubleOperand): 124 (JSC::DFG::DoubleOperand::index): 125 (JSC::DFG::DoubleOperand::fpr): 126 (JSC::DFG::DoubleOperand::use): 127 (JSC::DFG::JSValueOperand::JSValueOperand): 128 (JSC::DFG::JSValueOperand::~JSValueOperand): 129 (JSC::DFG::JSValueOperand::index): 130 (JSC::DFG::JSValueOperand::gpr): 131 (JSC::DFG::JSValueOperand::jsValueRegs): 132 (JSC::DFG::JSValueOperand::isDouble): 133 (JSC::DFG::JSValueOperand::fill): 134 (JSC::DFG::JSValueOperand::tagGPR): 135 (JSC::DFG::JSValueOperand::payloadGPR): 136 (JSC::DFG::JSValueOperand::fpr): 137 (JSC::DFG::JSValueOperand::use): 138 (JSC::DFG::StorageOperand::StorageOperand): 139 (JSC::DFG::StorageOperand::~StorageOperand): 140 (JSC::DFG::StorageOperand::index): 141 (JSC::DFG::StorageOperand::gpr): 142 (JSC::DFG::StorageOperand::use): 143 (JSC::DFG::GPRTemporary::~GPRTemporary): 144 (JSC::DFG::GPRTemporary::gpr): 145 (JSC::DFG::FPRTemporary::~FPRTemporary): 146 (JSC::DFG::FPRTemporary::fpr): 147 (JSC::DFG::FPRTemporary::FPRTemporary): 148 (JSC::DFG::GPRResult::GPRResult): 149 (JSC::DFG::GPRResult2::GPRResult2): 150 (JSC::DFG::FPRResult::FPRResult): 151 (JSC::DFG::FPRResult::lockedResult): 152 (JSC::DFG::SpeculativeJIT::SpeculativeJIT): 153 * dfg/DFGSpeculativeJIT32_64.cpp: 154 (JSC::DFG::SpeculativeJIT::fillInteger): 155 (JSC::DFG::SpeculativeJIT::fillDouble): 156 (JSC::DFG::SpeculativeJIT::fillJSValue): 157 (JSC::DFG::SpeculativeJIT::nonSpeculativeValueToNumber): 158 (JSC::DFG::SpeculativeJIT::nonSpeculativeValueToInt32): 159 (JSC::DFG::SpeculativeJIT::nonSpeculativeUInt32ToNumber): 160 (JSC::DFG::SpeculativeJIT::nonSpeculativeKnownConstantArithOp): 161 (JSC::DFG::SpeculativeJIT::nonSpeculativeBasicArithOp): 162 (JSC::DFG::SpeculativeJIT::cachedGetById): 163 (JSC::DFG::SpeculativeJIT::cachedPutById): 164 (JSC::DFG::SpeculativeJIT::cachedGetMethod): 165 (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull): 166 (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNull): 167 (JSC::DFG::SpeculativeJIT::nonSpeculativeCompareNull): 168 (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch): 169 (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompare): 170 (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeStrictEq): 171 (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq): 172 (JSC::DFG::SpeculativeJIT::emitCall): 173 * dfg/DFGSpeculativeJIT64.cpp: 174 (JSC::DFG::SpeculativeJIT::fillInteger): 175 (JSC::DFG::SpeculativeJIT::fillDouble): 176 (JSC::DFG::SpeculativeJIT::fillJSValue): 177 (JSC::DFG::SpeculativeJIT::nonSpeculativeValueToNumber): 178 (JSC::DFG::SpeculativeJIT::nonSpeculativeValueToInt32): 179 (JSC::DFG::SpeculativeJIT::nonSpeculativeUInt32ToNumber): 180 (JSC::DFG::SpeculativeJIT::nonSpeculativeKnownConstantArithOp): 181 (JSC::DFG::SpeculativeJIT::nonSpeculativeBasicArithOp): 182 (JSC::DFG::SpeculativeJIT::cachedGetById): 183 (JSC::DFG::SpeculativeJIT::cachedPutById): 184 (JSC::DFG::SpeculativeJIT::cachedGetMethod): 185 (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull): 186 (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNull): 187 (JSC::DFG::SpeculativeJIT::nonSpeculativeCompareNull): 188 (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch): 189 (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeCompare): 190 (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeStrictEq): 191 (JSC::DFG::SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq): 192 (JSC::DFG::SpeculativeJIT::emitCall): 193 * runtime/JSFunction.h: 194 1 195 2011-11-14 Filip Pizlo <fpizlo@apple.com> 2 196 -
trunk/Source/JavaScriptCore/GNUmakefile.list.am
r100205 r100244 131 131 Source/JavaScriptCore/dfg/DFGGraph.h \ 132 132 Source/JavaScriptCore/dfg/DFGIntrinsic.h \ 133 Source/JavaScriptCore/dfg/DFGJITCodeGenerator32_64.cpp \134 Source/JavaScriptCore/dfg/DFGJITCodeGenerator64.cpp \135 Source/JavaScriptCore/dfg/DFGJITCodeGenerator.cpp \136 Source/JavaScriptCore/dfg/DFGJITCodeGenerator.h \137 133 Source/JavaScriptCore/dfg/DFGJITCompiler.cpp \ 138 134 Source/JavaScriptCore/dfg/DFGJITCompiler.h \ -
trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
r100205 r100244 329 329 86704B8912DBA33700A9FE7B /* YarrPattern.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86704B8212DBA33700A9FE7B /* YarrPattern.cpp */; }; 330 330 86704B8A12DBA33700A9FE7B /* YarrPattern.h in Headers */ = {isa = PBXBuildFile; fileRef = 86704B8312DBA33700A9FE7B /* YarrPattern.h */; settings = {ATTRIBUTES = (Private, ); }; }; 331 86880F1C14328BB900B08D42 /* DFGJITCodeGenerator32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86880F1814328BB900B08D42 /* DFGJITCodeGenerator32_64.cpp */; };332 331 86880F1E14328BB900B08D42 /* DFGJITCompilerInlineMethods.h in Headers */ = {isa = PBXBuildFile; fileRef = 86880F1A14328BB900B08D42 /* DFGJITCompilerInlineMethods.h */; }; 333 332 86880F1F14328BB900B08D42 /* DFGSpeculativeJIT32_64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86880F1B14328BB900B08D42 /* DFGSpeculativeJIT32_64.cpp */; }; 334 86880F44143531A800B08D42 /* DFGJITCodeGenerator64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86880F43143531A700B08D42 /* DFGJITCodeGenerator64.cpp */; };335 333 86880F4D14353B2100B08D42 /* DFGSpeculativeJIT64.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86880F4C14353B2100B08D42 /* DFGSpeculativeJIT64.cpp */; }; 336 334 868BFA08117CEFD100B908B1 /* AtomicString.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 868BFA00117CEFD100B908B1 /* AtomicString.cpp */; }; … … 385 383 86EC9DC71328DF82002B2AD7 /* DFGGraph.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86EC9DB71328DF82002B2AD7 /* DFGGraph.cpp */; }; 386 384 86EC9DC81328DF82002B2AD7 /* DFGGraph.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DB81328DF82002B2AD7 /* DFGGraph.h */; }; 387 86EC9DC91328DF82002B2AD7 /* DFGJITCodeGenerator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86EC9DB91328DF82002B2AD7 /* DFGJITCodeGenerator.cpp */; };388 86EC9DCA1328DF82002B2AD7 /* DFGJITCodeGenerator.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DBA1328DF82002B2AD7 /* DFGJITCodeGenerator.h */; };389 385 86EC9DCB1328DF82002B2AD7 /* DFGJITCompiler.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86EC9DBB1328DF82002B2AD7 /* DFGJITCompiler.cpp */; }; 390 386 86EC9DCC1328DF82002B2AD7 /* DFGJITCompiler.h in Headers */ = {isa = PBXBuildFile; fileRef = 86EC9DBC1328DF82002B2AD7 /* DFGJITCompiler.h */; }; … … 1113 1109 867FC35F11B763950025105E /* JavaScriptCore.JSVALUE32_64only.exp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.exports; path = JavaScriptCore.JSVALUE32_64only.exp; sourceTree = "<group>"; }; 1114 1110 867FC36111B763950025105E /* JavaScriptCore.JSVALUE64only.exp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.exports; path = JavaScriptCore.JSVALUE64only.exp; sourceTree = "<group>"; }; 1115 86880F1814328BB900B08D42 /* DFGJITCodeGenerator32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGJITCodeGenerator32_64.cpp; path = dfg/DFGJITCodeGenerator32_64.cpp; sourceTree = "<group>"; };1116 1111 86880F1A14328BB900B08D42 /* DFGJITCompilerInlineMethods.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGJITCompilerInlineMethods.h; path = dfg/DFGJITCompilerInlineMethods.h; sourceTree = "<group>"; }; 1117 1112 86880F1B14328BB900B08D42 /* DFGSpeculativeJIT32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGSpeculativeJIT32_64.cpp; path = dfg/DFGSpeculativeJIT32_64.cpp; sourceTree = "<group>"; }; 1118 86880F43143531A700B08D42 /* DFGJITCodeGenerator64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGJITCodeGenerator64.cpp; path = dfg/DFGJITCodeGenerator64.cpp; sourceTree = "<group>"; };1119 1113 86880F4C14353B2100B08D42 /* DFGSpeculativeJIT64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGSpeculativeJIT64.cpp; path = dfg/DFGSpeculativeJIT64.cpp; sourceTree = "<group>"; }; 1120 1114 868BFA00117CEFD100B908B1 /* AtomicString.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = AtomicString.cpp; path = text/AtomicString.cpp; sourceTree = "<group>"; }; … … 1170 1164 86EC9DB71328DF82002B2AD7 /* DFGGraph.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGGraph.cpp; path = dfg/DFGGraph.cpp; sourceTree = "<group>"; }; 1171 1165 86EC9DB81328DF82002B2AD7 /* DFGGraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGGraph.h; path = dfg/DFGGraph.h; sourceTree = "<group>"; }; 1172 86EC9DB91328DF82002B2AD7 /* DFGJITCodeGenerator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGJITCodeGenerator.cpp; path = dfg/DFGJITCodeGenerator.cpp; sourceTree = "<group>"; };1173 86EC9DBA1328DF82002B2AD7 /* DFGJITCodeGenerator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGJITCodeGenerator.h; path = dfg/DFGJITCodeGenerator.h; sourceTree = "<group>"; };1174 1166 86EC9DBB1328DF82002B2AD7 /* DFGJITCompiler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGJITCompiler.cpp; path = dfg/DFGJITCompiler.cpp; sourceTree = "<group>"; }; 1175 1167 86EC9DBC1328DF82002B2AD7 /* DFGJITCompiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGJITCompiler.h; path = dfg/DFGJITCompiler.h; sourceTree = "<group>"; }; … … 2333 2325 86EC9DB81328DF82002B2AD7 /* DFGGraph.h */, 2334 2326 0FD82EF31423073900179C94 /* DFGIntrinsic.h */, 2335 86EC9DB91328DF82002B2AD7 /* DFGJITCodeGenerator.cpp */,2336 86EC9DBA1328DF82002B2AD7 /* DFGJITCodeGenerator.h */,2337 86880F1814328BB900B08D42 /* DFGJITCodeGenerator32_64.cpp */,2338 86880F43143531A700B08D42 /* DFGJITCodeGenerator64.cpp */,2339 2327 86EC9DBB1328DF82002B2AD7 /* DFGJITCompiler.cpp */, 2340 2328 86EC9DBC1328DF82002B2AD7 /* DFGJITCompiler.h */, … … 2610 2598 86EC9DC61328DF82002B2AD7 /* DFGGenerationInfo.h in Headers */, 2611 2599 86EC9DC81328DF82002B2AD7 /* DFGGraph.h in Headers */, 2612 86EC9DCA1328DF82002B2AD7 /* DFGJITCodeGenerator.h in Headers */,2613 2600 86EC9DCC1328DF82002B2AD7 /* DFGJITCompiler.h in Headers */, 2614 2601 86ECA3EA132DEF1C002B2AD7 /* DFGNode.h in Headers */, … … 3302 3289 86EC9DC41328DF82002B2AD7 /* DFGByteCodeParser.cpp in Sources */, 3303 3290 86EC9DC71328DF82002B2AD7 /* DFGGraph.cpp in Sources */, 3304 86EC9DC91328DF82002B2AD7 /* DFGJITCodeGenerator.cpp in Sources */,3305 3291 86EC9DCB1328DF82002B2AD7 /* DFGJITCompiler.cpp in Sources */, 3306 3292 86EC9DCF1328DF82002B2AD7 /* DFGOperations.cpp in Sources */, … … 3464 3450 A70456B11427FB950037DA68 /* AllocationSpace.cpp in Sources */, 3465 3451 86FA9E91142BBB2E001773B7 /* JSBoundFunction.cpp in Sources */, 3466 86880F1C14328BB900B08D42 /* DFGJITCodeGenerator32_64.cpp in Sources */,3467 3452 86880F1F14328BB900B08D42 /* DFGSpeculativeJIT32_64.cpp in Sources */, 3468 86880F44143531A800B08D42 /* DFGJITCodeGenerator64.cpp in Sources */,3469 3453 86880F4D14353B2100B08D42 /* DFGSpeculativeJIT64.cpp in Sources */, 3470 3454 0FE228EE1436AB2C00196C48 /* Heuristics.cpp in Sources */, -
trunk/Source/JavaScriptCore/Target.pri
r100205 r100244 90 90 dfg/DFGDriver.cpp \ 91 91 dfg/DFGGraph.cpp \ 92 dfg/DFGJITCodeGenerator.cpp \93 dfg/DFGJITCodeGenerator32_64.cpp \94 dfg/DFGJITCodeGenerator64.cpp \95 92 dfg/DFGJITCompiler.cpp \ 96 93 dfg/DFGOperations.cpp \ -
trunk/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
r100165 r100244 30 30 31 31 #include "CodeBlock.h" 32 #include "DFGJITCodeGenerator.h"33 32 #include "DFGOSRExitCompiler.h" 34 33 #include "DFGOperations.h" -
trunk/Source/JavaScriptCore/dfg/DFGRepatch.cpp
r100165 r100244 29 29 #if ENABLE(DFG_JIT) 30 30 31 #include "DFG JITCodeGenerator.h"31 #include "DFGSpeculativeJIT.h" 32 32 #include "LinkBuffer.h" 33 33 #include "Operations.h" … … 119 119 120 120 if (scratchGPR == InvalidGPRReg) { 121 scratchGPR = JITCodeGenerator::selectScratchGPR(baseGPR, resultGPR);121 scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR); 122 122 stubJit.push(scratchGPR); 123 123 needToRestoreScratch = true; … … 177 177 178 178 if (scratchGPR == InvalidGPRReg) { 179 scratchGPR = JITCodeGenerator::selectScratchGPR(baseGPR, resultGPR);179 scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR); 180 180 stubJit.push(scratchGPR); 181 181 needToRestoreScratch = true; … … 542 542 543 543 if (scratchGPR == InvalidGPRReg) { 544 scratchGPR = JITCodeGenerator::selectScratchGPR(baseGPR, valueGPR);544 scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR); 545 545 stubJit.push(scratchGPR); 546 546 needToRestoreScratch = true; … … 558 558 #if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING) 559 559 // Must always emit this write barrier as the structure transition itself requires it 560 GPRReg scratch2 = JITCodeGenerator::selectScratchGPR(baseGPR, valueGPR, scratchGPR);560 GPRReg scratch2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR); 561 561 stubJit.push(scratch2); 562 JITCodeGenerator::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess);562 SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess); 563 563 stubJit.pop(scratch2); 564 564 #endif -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
r99917 r100244 25 25 26 26 #include "config.h" 27 28 27 #include "DFGSpeculativeJIT.h" 28 29 #if ENABLE(DFG_JIT) 30 31 #include "DFGJITCompilerInlineMethods.h" 29 32 #include "JSByteArray.h" 30 31 #if ENABLE(DFG_JIT) 32 33 #include "LinkBuffer.h" 34 35 namespace JSC { namespace DFG { 36 37 const double SpeculativeJIT::twoToThe32 = (double)0x100000000ull; 38 39 void SpeculativeJIT::clearGenerationInfo() 40 { 41 for (unsigned i = 0; i < m_generationInfo.size(); ++i) 42 m_generationInfo[i] = GenerationInfo(); 43 m_gprs = RegisterBank<GPRInfo>(); 44 m_fprs = RegisterBank<FPRInfo>(); 45 } 46 47 GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex) 48 { 49 Node& node = m_jit.graph()[nodeIndex]; 50 VirtualRegister virtualRegister = node.virtualRegister(); 51 GenerationInfo& info = m_generationInfo[virtualRegister]; 52 53 switch (info.registerFormat()) { 54 case DataFormatNone: { 55 GPRReg gpr = allocate(); 56 ASSERT(info.spillFormat() == DataFormatStorage); 57 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); 58 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); 59 info.fillStorage(gpr); 60 return gpr; 61 } 62 63 case DataFormatStorage: { 64 GPRReg gpr = info.gpr(); 65 m_gprs.lock(gpr); 66 return gpr; 67 } 68 69 default: 70 ASSERT_NOT_REACHED(); 71 } 72 73 return InvalidGPRReg; 74 } 75 76 void SpeculativeJIT::useChildren(Node& node) 77 { 78 if (node.op & NodeHasVarArgs) { 79 for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++) 80 use(m_jit.graph().m_varArgChildren[childIdx]); 81 } else { 82 NodeIndex child1 = node.child1(); 83 if (child1 == NoNode) { 84 ASSERT(node.child2() == NoNode && node.child3() == NoNode); 85 return; 86 } 87 use(child1); 88 89 NodeIndex child2 = node.child2(); 90 if (child2 == NoNode) { 91 ASSERT(node.child3() == NoNode); 92 return; 93 } 94 use(child2); 95 96 NodeIndex child3 = node.child3(); 97 if (child3 == NoNode) 98 return; 99 use(child3); 100 } 101 } 102 103 bool SpeculativeJIT::isStrictInt32(NodeIndex nodeIndex) 104 { 105 if (isInt32Constant(nodeIndex)) 106 return true; 107 108 Node& node = m_jit.graph()[nodeIndex]; 109 GenerationInfo& info = m_generationInfo[node.virtualRegister()]; 110 111 return info.registerFormat() == DataFormatInteger; 112 } 113 114 bool SpeculativeJIT::isKnownInteger(NodeIndex nodeIndex) 115 { 116 if (isInt32Constant(nodeIndex)) 117 return true; 118 119 Node& node = m_jit.graph()[nodeIndex]; 120 121 if (node.hasInt32Result()) 122 return true; 123 124 GenerationInfo& info = m_generationInfo[node.virtualRegister()]; 125 126 return info.isJSInteger(); 127 } 128 129 bool SpeculativeJIT::isKnownNumeric(NodeIndex nodeIndex) 130 { 131 if (isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex)) 132 return true; 133 134 Node& node = m_jit.graph()[nodeIndex]; 135 136 if (node.hasNumberResult()) 137 return true; 138 139 GenerationInfo& info = m_generationInfo[node.virtualRegister()]; 140 141 return info.isJSInteger() || info.isJSDouble(); 142 } 143 144 bool SpeculativeJIT::isKnownCell(NodeIndex nodeIndex) 145 { 146 return m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister()].isJSCell(); 147 } 148 149 bool SpeculativeJIT::isKnownNotCell(NodeIndex nodeIndex) 150 { 151 Node& node = m_jit.graph()[nodeIndex]; 152 VirtualRegister virtualRegister = node.virtualRegister(); 153 GenerationInfo& info = m_generationInfo[virtualRegister]; 154 if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isCell()) 155 return true; 156 return !(info.isJSCell() || info.isUnknownJS()); 157 } 158 159 bool SpeculativeJIT::isKnownNotInteger(NodeIndex nodeIndex) 160 { 161 Node& node = m_jit.graph()[nodeIndex]; 162 VirtualRegister virtualRegister = node.virtualRegister(); 163 GenerationInfo& info = m_generationInfo[virtualRegister]; 164 165 return info.isJSDouble() || info.isJSCell() || info.isJSBoolean() 166 || (node.hasConstant() && !valueOfJSConstant(nodeIndex).isInt32()); 167 } 168 169 bool SpeculativeJIT::isKnownNotNumber(NodeIndex nodeIndex) 170 { 171 Node& node = m_jit.graph()[nodeIndex]; 172 VirtualRegister virtualRegister = node.virtualRegister(); 173 GenerationInfo& info = m_generationInfo[virtualRegister]; 174 175 return (!info.isJSDouble() && !info.isJSInteger() && !info.isUnknownJS()) 176 || (node.hasConstant() && !isNumberConstant(nodeIndex)); 177 } 178 179 bool SpeculativeJIT::isKnownBoolean(NodeIndex nodeIndex) 180 { 181 Node& node = m_jit.graph()[nodeIndex]; 182 if (node.hasBooleanResult()) 183 return true; 184 185 if (isBooleanConstant(nodeIndex)) 186 return true; 187 188 VirtualRegister virtualRegister = node.virtualRegister(); 189 GenerationInfo& info = m_generationInfo[virtualRegister]; 190 191 return info.isJSBoolean(); 192 } 193 194 void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind) 195 { 196 UNUSED_PARAM(jit); 197 UNUSED_PARAM(owner); 198 UNUSED_PARAM(scratch1); 199 UNUSED_PARAM(scratch2); 200 UNUSED_PARAM(useKind); 201 ASSERT(owner != scratch1); 202 ASSERT(owner != scratch2); 203 ASSERT(scratch1 != scratch2); 204 205 #if ENABLE(WRITE_BARRIER_PROFILING) 206 JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind)); 207 #endif 208 markCellCard(jit, owner, scratch1, scratch2); 209 } 210 211 void SpeculativeJIT::markCellCard(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2) 212 { 213 UNUSED_PARAM(jit); 214 UNUSED_PARAM(owner); 215 UNUSED_PARAM(scratch1); 216 UNUSED_PARAM(scratch2); 217 218 #if ENABLE(GGC) 219 jit.move(owner, scratch1); 220 jit.andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch1); 221 jit.move(owner, scratch2); 222 // consume additional 8 bits as we're using an approximate filter 223 jit.rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2); 224 jit.andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2); 225 MacroAssembler::Jump filter = jit.branchTest8(MacroAssembler::Zero, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfMarks())); 226 jit.move(owner, scratch2); 227 jit.rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2); 228 jit.andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2); 229 jit.store8(TrustedImm32(1), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfCards())); 230 filter.link(&jit); 231 #endif 232 } 233 234 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2) 235 { 236 UNUSED_PARAM(ownerGPR); 237 UNUSED_PARAM(valueGPR); 238 UNUSED_PARAM(scratch1); 239 UNUSED_PARAM(scratch2); 240 UNUSED_PARAM(useKind); 241 242 if (isKnownNotCell(valueIndex)) 243 return; 244 245 #if ENABLE(WRITE_BARRIER_PROFILING) 246 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind)); 247 #endif 248 249 #if ENABLE(GGC) 250 GPRTemporary temp1; 251 GPRTemporary temp2; 252 if (scratch1 == InvalidGPRReg) { 253 GPRTemporary scratchGPR(this); 254 temp1.adopt(scratchGPR); 255 scratch1 = temp1.gpr(); 256 } 257 if (scratch2 == InvalidGPRReg) { 258 GPRTemporary scratchGPR(this); 259 temp2.adopt(scratchGPR); 260 scratch2 = temp2.gpr(); 261 } 262 263 JITCompiler::Jump rhsNotCell; 264 bool hadCellCheck = false; 265 if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.getPrediction(valueIndex))) { 266 hadCellCheck = true; 267 rhsNotCell = m_jit.branchIfNotCell(valueGPR); 268 } 269 270 markCellCard(m_jit, ownerGPR, scratch1, scratch2); 271 272 if (hadCellCheck) 273 rhsNotCell.link(&m_jit); 274 #endif 275 } 276 277 void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2) 278 { 279 UNUSED_PARAM(ownerGPR); 280 UNUSED_PARAM(value); 281 UNUSED_PARAM(scratch1); 282 UNUSED_PARAM(scratch2); 283 UNUSED_PARAM(useKind); 284 285 if (Heap::isMarked(value)) 286 return; 287 288 #if ENABLE(WRITE_BARRIER_PROFILING) 289 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind)); 290 #endif 291 292 #if ENABLE(GGC) 293 GPRTemporary temp1; 294 GPRTemporary temp2; 295 if (scratch1 == InvalidGPRReg) { 296 GPRTemporary scratchGPR(this); 297 temp1.adopt(scratchGPR); 298 scratch1 = temp1.gpr(); 299 } 300 if (scratch2 == InvalidGPRReg) { 301 GPRTemporary scratchGPR(this); 302 temp2.adopt(scratchGPR); 303 scratch2 = temp2.gpr(); 304 } 305 306 markCellCard(m_jit, ownerGPR, scratch1, scratch2); 307 #endif 308 } 309 310 void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind useKind, GPRReg scratch) 311 { 312 UNUSED_PARAM(owner); 313 UNUSED_PARAM(valueGPR); 314 UNUSED_PARAM(scratch); 315 UNUSED_PARAM(useKind); 316 317 if (isKnownNotCell(valueIndex)) 318 return; 319 320 #if ENABLE(WRITE_BARRIER_PROFILING) 321 JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind)); 322 #endif 323 324 #if ENABLE(GGC) 325 JITCompiler::Jump rhsNotCell; 326 bool hadCellCheck = false; 327 if (!isKnownCell(valueIndex) && !isCellPrediction(m_jit.getPrediction(valueIndex))) { 328 hadCellCheck = true; 329 rhsNotCell = m_jit.branchIfNotCell(valueGPR); 330 } 331 332 GPRTemporary temp; 333 if (scratch == InvalidGPRReg) { 334 GPRTemporary scratchGPR(this); 335 temp.adopt(scratchGPR); 336 scratch = temp.gpr(); 337 } 338 339 uint8_t* cardAddress = Heap::addressOfCardFor(owner); 340 m_jit.move(JITCompiler::TrustedImmPtr(cardAddress), scratch); 341 m_jit.store8(JITCompiler::TrustedImm32(1), JITCompiler::Address(scratch)); 342 343 if (hadCellCheck) 344 rhsNotCell.link(&m_jit); 345 #endif 346 } 347 348 bool SpeculativeJIT::nonSpeculativeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) 349 { 350 NodeIndex branchNodeIndex = detectPeepHoleBranch(); 351 if (branchNodeIndex != NoNode) { 352 ASSERT(node.adjustedRefCount() == 1); 353 354 nonSpeculativePeepholeBranch(node, branchNodeIndex, cond, helperFunction); 355 356 m_compileIndex = branchNodeIndex; 357 358 return true; 359 } 360 361 nonSpeculativeNonPeepholeCompare(node, cond, helperFunction); 362 363 return false; 364 } 365 366 bool SpeculativeJIT::nonSpeculativeStrictEq(Node& node, bool invert) 367 { 368 if (!invert && (isKnownNumeric(node.child1()) || isKnownNumeric(node.child2()))) 369 return nonSpeculativeCompare(node, MacroAssembler::Equal, operationCompareStrictEq); 370 371 NodeIndex branchNodeIndex = detectPeepHoleBranch(); 372 if (branchNodeIndex != NoNode) { 373 ASSERT(node.adjustedRefCount() == 1); 374 375 nonSpeculativePeepholeStrictEq(node, branchNodeIndex, invert); 376 377 m_compileIndex = branchNodeIndex; 378 379 return true; 380 } 381 382 nonSpeculativeNonPeepholeStrictEq(node, invert); 383 384 return false; 385 } 386 387 #ifndef NDEBUG 388 static const char* dataFormatString(DataFormat format) 389 { 390 // These values correspond to the DataFormat enum. 391 const char* strings[] = { 392 "[ ]", 393 "[ i]", 394 "[ d]", 395 "[ c]", 396 "Err!", 397 "Err!", 398 "Err!", 399 "Err!", 400 "[J ]", 401 "[Ji]", 402 "[Jd]", 403 "[Jc]", 404 "Err!", 405 "Err!", 406 "Err!", 407 "Err!", 408 }; 409 return strings[format]; 410 } 411 412 void SpeculativeJIT::dump(const char* label) 413 { 414 if (label) 415 fprintf(stderr, "<%s>\n", label); 416 417 fprintf(stderr, " gprs:\n"); 418 m_gprs.dump(); 419 fprintf(stderr, " fprs:\n"); 420 m_fprs.dump(); 421 fprintf(stderr, " VirtualRegisters:\n"); 422 for (unsigned i = 0; i < m_generationInfo.size(); ++i) { 423 GenerationInfo& info = m_generationInfo[i]; 424 if (info.alive()) 425 fprintf(stderr, " % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat())); 426 else 427 fprintf(stderr, " % 3d:[__][__]", i); 428 if (info.registerFormat() == DataFormatDouble) 429 fprintf(stderr, ":fpr%d\n", info.fpr()); 430 else if (info.registerFormat() != DataFormatNone 33 431 #if USE(JSVALUE32_64) 34 #include "DFGJITCompilerInlineMethods.h" 35 #endif 36 37 namespace JSC { namespace DFG { 432 && !(info.registerFormat() & DataFormatJS) 433 #endif 434 ) { 435 ASSERT(info.gpr() != InvalidGPRReg); 436 fprintf(stderr, ":%s\n", GPRInfo::debugName(info.gpr())); 437 } else 438 fprintf(stderr, "\n"); 439 } 440 if (label) 441 fprintf(stderr, "</%s>\n", label); 442 } 443 #endif 444 445 446 #if DFG_ENABLE(CONSISTENCY_CHECK) 447 void SpeculativeJIT::checkConsistency() 448 { 449 bool failed = false; 450 451 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 452 if (iter.isLocked()) { 453 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName()); 454 failed = true; 455 } 456 } 457 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 458 if (iter.isLocked()) { 459 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName()); 460 failed = true; 461 } 462 } 463 464 for (unsigned i = 0; i < m_generationInfo.size(); ++i) { 465 VirtualRegister virtualRegister = (VirtualRegister)i; 466 GenerationInfo& info = m_generationInfo[virtualRegister]; 467 if (!info.alive()) 468 continue; 469 switch (info.registerFormat()) { 470 case DataFormatNone: 471 break; 472 case DataFormatJS: 473 case DataFormatJSInteger: 474 case DataFormatJSDouble: 475 case DataFormatJSCell: 476 case DataFormatJSBoolean: 477 #if USE(JSVALUE32_64) 478 break; 479 #endif 480 case DataFormatInteger: 481 case DataFormatCell: 482 case DataFormatBoolean: 483 case DataFormatStorage: { 484 GPRReg gpr = info.gpr(); 485 ASSERT(gpr != InvalidGPRReg); 486 if (m_gprs.name(gpr) != virtualRegister) { 487 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr)); 488 failed = true; 489 } 490 break; 491 } 492 case DataFormatDouble: { 493 FPRReg fpr = info.fpr(); 494 ASSERT(fpr != InvalidFPRReg); 495 if (m_fprs.name(fpr) != virtualRegister) { 496 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr)); 497 failed = true; 498 } 499 break; 500 } 501 } 502 } 503 504 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 505 VirtualRegister virtualRegister = iter.name(); 506 if (virtualRegister == InvalidVirtualRegister) 507 continue; 508 509 GenerationInfo& info = m_generationInfo[virtualRegister]; 510 #if USE(JSVALUE64) 511 if (iter.regID() != info.gpr()) { 512 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister); 513 failed = true; 514 } 515 #else 516 if (!(info.registerFormat() & DataFormatJS)) { 517 if (iter.regID() != info.gpr()) { 518 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister); 519 failed = true; 520 } 521 } else { 522 if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) { 523 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister); 524 failed = true; 525 } 526 } 527 #endif 528 } 529 530 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 531 VirtualRegister virtualRegister = iter.name(); 532 if (virtualRegister == InvalidVirtualRegister) 533 continue; 534 535 GenerationInfo& info = m_generationInfo[virtualRegister]; 536 if (iter.regID() != info.fpr()) { 537 fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister); 538 failed = true; 539 } 540 } 541 542 if (failed) { 543 dump(); 544 CRASH(); 545 } 546 } 547 #endif 548 549 GPRTemporary::GPRTemporary() 550 : m_jit(0) 551 , m_gpr(InvalidGPRReg) 552 { 553 } 554 555 GPRTemporary::GPRTemporary(SpeculativeJIT* jit) 556 : m_jit(jit) 557 , m_gpr(InvalidGPRReg) 558 { 559 #if CPU(X86) 560 // we currenty lazily allocate the reg, as the number of regs on X86 is limited. 561 #else 562 m_gpr = m_jit->allocate(); 563 #endif 564 } 565 566 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific) 567 : m_jit(jit) 568 , m_gpr(InvalidGPRReg) 569 { 570 m_gpr = m_jit->allocate(specific); 571 } 572 573 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1) 574 : m_jit(jit) 575 , m_gpr(InvalidGPRReg) 576 { 577 if (m_jit->canReuse(op1.index())) 578 m_gpr = m_jit->reuse(op1.gpr()); 579 else 580 m_gpr = m_jit->allocate(); 581 } 582 583 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2) 584 : m_jit(jit) 585 , m_gpr(InvalidGPRReg) 586 { 587 if (m_jit->canReuse(op1.index())) 588 m_gpr = m_jit->reuse(op1.gpr()); 589 else if (m_jit->canReuse(op2.index())) 590 m_gpr = m_jit->reuse(op2.gpr()); 591 else 592 m_gpr = m_jit->allocate(); 593 } 594 595 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1) 596 : m_jit(jit) 597 , m_gpr(InvalidGPRReg) 598 { 599 if (m_jit->canReuse(op1.index())) 600 m_gpr = m_jit->reuse(op1.gpr()); 601 else 602 m_gpr = m_jit->allocate(); 603 } 604 605 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1) 606 : m_jit(jit) 607 , m_gpr(InvalidGPRReg) 608 { 609 if (m_jit->canReuse(op1.index())) 610 m_gpr = m_jit->reuse(op1.gpr()); 611 else 612 m_gpr = m_jit->allocate(); 613 } 614 615 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2) 616 : m_jit(jit) 617 , m_gpr(InvalidGPRReg) 618 { 619 if (m_jit->canReuse(op1.index())) 620 m_gpr = m_jit->reuse(op1.gpr()); 621 else if (m_jit->canReuse(op2.index())) 622 m_gpr = m_jit->reuse(op2.gpr()); 623 else 624 m_gpr = m_jit->allocate(); 625 } 626 627 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1) 628 : m_jit(jit) 629 , m_gpr(InvalidGPRReg) 630 { 631 if (m_jit->canReuse(op1.index())) 632 m_gpr = m_jit->reuse(op1.gpr()); 633 else 634 m_gpr = m_jit->allocate(); 635 } 636 637 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1) 638 : m_jit(jit) 639 , m_gpr(InvalidGPRReg) 640 { 641 if (m_jit->canReuse(op1.index())) 642 m_gpr = m_jit->reuse(op1.gpr()); 643 else 644 m_gpr = m_jit->allocate(); 645 } 646 647 #if USE(JSVALUE64) 648 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1) 649 : m_jit(jit) 650 , m_gpr(InvalidGPRReg) 651 { 652 if (m_jit->canReuse(op1.index())) 653 m_gpr = m_jit->reuse(op1.gpr()); 654 else 655 m_gpr = m_jit->allocate(); 656 } 657 #else 658 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag) 659 : m_jit(jit) 660 , m_gpr(InvalidGPRReg) 661 { 662 if (!op1.isDouble() && m_jit->canReuse(op1.index())) 663 m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR()); 664 else 665 m_gpr = m_jit->allocate(); 666 } 667 #endif 668 669 GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1) 670 : m_jit(jit) 671 , m_gpr(InvalidGPRReg) 672 { 673 if (m_jit->canReuse(op1.index())) 674 m_gpr = m_jit->reuse(op1.gpr()); 675 else 676 m_gpr = m_jit->allocate(); 677 } 678 679 void GPRTemporary::adopt(GPRTemporary& other) 680 { 681 ASSERT(!m_jit); 682 ASSERT(m_gpr == InvalidGPRReg); 683 ASSERT(other.m_jit); 684 ASSERT(other.m_gpr != InvalidGPRReg); 685 m_jit = other.m_jit; 686 m_gpr = other.m_gpr; 687 other.m_jit = 0; 688 other.m_gpr = InvalidGPRReg; 689 } 690 691 FPRTemporary::FPRTemporary(SpeculativeJIT* jit) 692 : m_jit(jit) 693 , m_fpr(InvalidFPRReg) 694 { 695 m_fpr = m_jit->fprAllocate(); 696 } 697 698 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1) 699 : m_jit(jit) 700 , m_fpr(InvalidFPRReg) 701 { 702 if (m_jit->canReuse(op1.index())) 703 m_fpr = m_jit->reuse(op1.fpr()); 704 else 705 m_fpr = m_jit->fprAllocate(); 706 } 707 708 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1, DoubleOperand& op2) 709 : m_jit(jit) 710 , m_fpr(InvalidFPRReg) 711 { 712 if (m_jit->canReuse(op1.index())) 713 m_fpr = m_jit->reuse(op1.fpr()); 714 else if (m_jit->canReuse(op2.index())) 715 m_fpr = m_jit->reuse(op2.fpr()); 716 else 717 m_fpr = m_jit->fprAllocate(); 718 } 719 720 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1) 721 : m_jit(jit) 722 , m_fpr(InvalidFPRReg) 723 { 724 if (m_jit->canReuse(op1.index())) 725 m_fpr = m_jit->reuse(op1.fpr()); 726 else 727 m_fpr = m_jit->fprAllocate(); 728 } 729 730 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2) 731 : m_jit(jit) 732 , m_fpr(InvalidFPRReg) 733 { 734 if (m_jit->canReuse(op1.index())) 735 m_fpr = m_jit->reuse(op1.fpr()); 736 else if (m_jit->canReuse(op2.index())) 737 m_fpr = m_jit->reuse(op2.fpr()); 738 else 739 m_fpr = m_jit->fprAllocate(); 740 } 741 742 #if USE(JSVALUE32_64) 743 FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1) 744 : m_jit(jit) 745 , m_fpr(InvalidFPRReg) 746 { 747 if (op1.isDouble() && m_jit->canReuse(op1.index())) 748 m_fpr = m_jit->reuse(op1.fpr()); 749 else 750 m_fpr = m_jit->fprAllocate(); 751 } 752 #endif 38 753 39 754 #ifndef NDEBUG -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
r99904 r100244 30 30 31 31 #include "DFGAbstractState.h" 32 #include "DFGJITCodeGenerator.h" 32 #include "DFGGenerationInfo.h" 33 #include "DFGJITCompiler.h" 33 34 #include "DFGOSRExit.h" 35 #include "DFGOperations.h" 34 36 #include "ValueRecovery.h" 35 37 36 38 namespace JSC { namespace DFG { 37 39 40 class JSValueOperand; 38 41 class SpeculativeJIT; 42 class SpeculateIntegerOperand; 43 class SpeculateStrictInt32Operand; 44 class SpeculateDoubleOperand; 45 class SpeculateCellOperand; 46 class SpeculateBooleanOperand; 47 39 48 40 49 enum ValueSourceKind { … … 126 135 // to propagate type information (including information that has 127 136 // only speculatively been asserted) through the dataflow. 128 class SpeculativeJIT : public JITCodeGenerator{137 class SpeculativeJIT { 129 138 friend struct OSRExit; 139 private: 140 typedef JITCompiler::TrustedImm32 TrustedImm32; 141 typedef JITCompiler::Imm32 Imm32; 142 typedef JITCompiler::TrustedImmPtr TrustedImmPtr; 143 typedef JITCompiler::ImmPtr ImmPtr; 144 145 // These constants are used to set priorities for spill order for 146 // the register allocator. 147 #if USE(JSVALUE64) 148 enum SpillOrder { 149 SpillOrderConstant = 1, // no spill, and cheap fill 150 SpillOrderSpilled = 2, // no spill 151 SpillOrderJS = 4, // needs spill 152 SpillOrderCell = 4, // needs spill 153 SpillOrderStorage = 4, // needs spill 154 SpillOrderInteger = 5, // needs spill and box 155 SpillOrderBoolean = 5, // needs spill and box 156 SpillOrderDouble = 6, // needs spill and convert 157 }; 158 #elif USE(JSVALUE32_64) 159 enum SpillOrder { 160 SpillOrderConstant = 1, // no spill, and cheap fill 161 SpillOrderSpilled = 2, // no spill 162 SpillOrderJS = 4, // needs spill 163 SpillOrderStorage = 4, // needs spill 164 SpillOrderDouble = 4, // needs spill 165 SpillOrderInteger = 5, // needs spill and box 166 SpillOrderCell = 5, // needs spill and box 167 SpillOrderBoolean = 5, // needs spill and box 168 }; 169 #endif 170 171 enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly }; 172 173 static const double twoToThe32; 174 130 175 public: 131 176 SpeculativeJIT(JITCompiler&); … … 133 178 bool compile(); 134 179 void linkOSREntries(LinkBuffer&); 180 181 Node& at(NodeIndex nodeIndex) 182 { 183 return m_jit.graph()[nodeIndex]; 184 } 185 186 GPRReg fillInteger(NodeIndex, DataFormat& returnFormat); 187 FPRReg fillDouble(NodeIndex); 188 #if USE(JSVALUE64) 189 GPRReg fillJSValue(NodeIndex); 190 #elif USE(JSVALUE32_64) 191 bool fillJSValue(NodeIndex, GPRReg&, GPRReg&, FPRReg&); 192 #endif 193 GPRReg fillStorage(NodeIndex); 194 195 // lock and unlock GPR & FPR registers. 196 void lock(GPRReg reg) 197 { 198 m_gprs.lock(reg); 199 } 200 void lock(FPRReg reg) 201 { 202 m_fprs.lock(reg); 203 } 204 void unlock(GPRReg reg) 205 { 206 m_gprs.unlock(reg); 207 } 208 void unlock(FPRReg reg) 209 { 210 m_fprs.unlock(reg); 211 } 212 213 // Used to check whether a child node is on its last use, 214 // and its machine registers may be reused. 215 bool canReuse(NodeIndex nodeIndex) 216 { 217 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister(); 218 GenerationInfo& info = m_generationInfo[virtualRegister]; 219 return info.canReuse(); 220 } 221 GPRReg reuse(GPRReg reg) 222 { 223 m_gprs.lock(reg); 224 return reg; 225 } 226 FPRReg reuse(FPRReg reg) 227 { 228 m_fprs.lock(reg); 229 return reg; 230 } 231 232 // Allocate a gpr/fpr. 233 GPRReg allocate() 234 { 235 VirtualRegister spillMe; 236 GPRReg gpr = m_gprs.allocate(spillMe); 237 if (spillMe != InvalidVirtualRegister) { 238 #if USE(JSVALUE32_64) 239 GenerationInfo& info = m_generationInfo[spillMe]; 240 ASSERT(info.registerFormat() != DataFormatJSDouble); 241 if ((info.registerFormat() & DataFormatJS)) 242 m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR()); 243 #endif 244 spill(spillMe); 245 } 246 return gpr; 247 } 248 GPRReg allocate(GPRReg specific) 249 { 250 VirtualRegister spillMe = m_gprs.allocateSpecific(specific); 251 if (spillMe != InvalidVirtualRegister) { 252 #if USE(JSVALUE32_64) 253 GenerationInfo& info = m_generationInfo[spillMe]; 254 ASSERT(info.registerFormat() != DataFormatJSDouble); 255 if ((info.registerFormat() & DataFormatJS)) 256 m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR()); 257 #endif 258 spill(spillMe); 259 } 260 return specific; 261 } 262 GPRReg tryAllocate() 263 { 264 return m_gprs.tryAllocate(); 265 } 266 FPRReg fprAllocate() 267 { 268 VirtualRegister spillMe; 269 FPRReg fpr = m_fprs.allocate(spillMe); 270 if (spillMe != InvalidVirtualRegister) 271 spill(spillMe); 272 return fpr; 273 } 274 275 // Check whether a VirtualRegsiter is currently in a machine register. 276 // We use this when filling operands to fill those that are already in 277 // machine registers first (by locking VirtualRegsiters that are already 278 // in machine register before filling those that are not we attempt to 279 // avoid spilling values we will need immediately). 280 bool isFilled(NodeIndex nodeIndex) 281 { 282 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister(); 283 GenerationInfo& info = m_generationInfo[virtualRegister]; 284 return info.registerFormat() != DataFormatNone; 285 } 286 bool isFilledDouble(NodeIndex nodeIndex) 287 { 288 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister(); 289 GenerationInfo& info = m_generationInfo[virtualRegister]; 290 return info.registerFormat() == DataFormatDouble; 291 } 292 293 // Called on an operand once it has been consumed by a parent node. 294 void use(NodeIndex nodeIndex) 295 { 296 VirtualRegister virtualRegister = at(nodeIndex).virtualRegister(); 297 GenerationInfo& info = m_generationInfo[virtualRegister]; 298 299 // use() returns true when the value becomes dead, and any 300 // associated resources may be freed. 301 if (!info.use()) 302 return; 303 304 // Release the associated machine registers. 305 DataFormat registerFormat = info.registerFormat(); 306 #if USE(JSVALUE64) 307 if (registerFormat == DataFormatDouble) 308 m_fprs.release(info.fpr()); 309 else if (registerFormat != DataFormatNone) 310 m_gprs.release(info.gpr()); 311 #elif USE(JSVALUE32_64) 312 if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble) 313 m_fprs.release(info.fpr()); 314 else if (registerFormat & DataFormatJS) { 315 m_gprs.release(info.tagGPR()); 316 m_gprs.release(info.payloadGPR()); 317 } else if (registerFormat != DataFormatNone) 318 m_gprs.release(info.gpr()); 319 #endif 320 } 321 322 static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2); 323 static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind); 324 325 void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg); 326 void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg); 327 void writeBarrier(JSCell* owner, GPRReg valueGPR, NodeIndex valueIndex, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg); 328 329 static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg) 330 { 331 if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0) 332 return GPRInfo::regT0; 333 334 if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1) 335 return GPRInfo::regT1; 336 337 if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2) 338 return GPRInfo::regT2; 339 340 return GPRInfo::regT3; 341 } 135 342 136 343 // Called by the speculative operand types, below, to fill operand to … … 143 350 144 351 private: 145 friend class JITCodeGenerator;146 147 352 void compile(Node&); 148 353 void compileMovHint(Node&); … … 150 355 151 356 void checkArgumentTypes(); 357 358 void clearGenerationInfo(); 359 360 // These methods are used when generating 'unexpected' 361 // calls out from JIT code to C++ helper routines - 362 // they spill all live values to the appropriate 363 // slots in the RegisterFile without changing any state 364 // in the GenerationInfo. 365 void silentSpillGPR(VirtualRegister spillMe, GPRReg source) 366 { 367 GenerationInfo& info = m_generationInfo[spillMe]; 368 ASSERT(info.registerFormat() != DataFormatNone); 369 ASSERT(info.registerFormat() != DataFormatDouble); 370 371 if (!info.needsSpill()) 372 return; 373 374 DataFormat registerFormat = info.registerFormat(); 375 376 #if USE(JSVALUE64) 377 ASSERT(info.gpr() == source); 378 if (registerFormat == DataFormatInteger) 379 m_jit.store32(source, JITCompiler::addressFor(spillMe)); 380 else { 381 ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage); 382 m_jit.storePtr(source, JITCompiler::addressFor(spillMe)); 383 } 384 #elif USE(JSVALUE32_64) 385 if (registerFormat & DataFormatJS) { 386 ASSERT(info.tagGPR() == source || info.payloadGPR() == source); 387 m_jit.store32(source, source == info.tagGPR() ? JITCompiler::tagFor(spillMe) : JITCompiler::payloadFor(spillMe)); 388 } else { 389 ASSERT(info.gpr() == source); 390 m_jit.store32(source, JITCompiler::payloadFor(spillMe)); 391 } 392 #endif 393 } 394 void silentSpillFPR(VirtualRegister spillMe, FPRReg source) 395 { 396 GenerationInfo& info = m_generationInfo[spillMe]; 397 ASSERT(info.registerFormat() == DataFormatDouble); 398 399 if (!info.needsSpill()) { 400 // it's either a constant or it's already been spilled 401 ASSERT(at(info.nodeIndex()).hasConstant() || info.spillFormat() != DataFormatNone); 402 return; 403 } 404 405 // it's neither a constant nor has it been spilled. 406 ASSERT(!at(info.nodeIndex()).hasConstant()); 407 ASSERT(info.spillFormat() == DataFormatNone); 408 ASSERT(info.fpr() == source); 409 410 m_jit.storeDouble(source, JITCompiler::addressFor(spillMe)); 411 } 412 413 void silentFillGPR(VirtualRegister spillMe, GPRReg target) 414 { 415 GenerationInfo& info = m_generationInfo[spillMe]; 416 417 NodeIndex nodeIndex = info.nodeIndex(); 418 Node& node = at(nodeIndex); 419 ASSERT(info.registerFormat() != DataFormatNone); 420 ASSERT(info.registerFormat() != DataFormatDouble); 421 DataFormat registerFormat = info.registerFormat(); 422 423 if (registerFormat == DataFormatInteger) { 424 ASSERT(info.gpr() == target); 425 ASSERT(isJSInteger(info.registerFormat())); 426 if (node.hasConstant()) { 427 ASSERT(isInt32Constant(nodeIndex)); 428 m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), target); 429 } else 430 m_jit.load32(JITCompiler::payloadFor(spillMe), target); 431 return; 432 } 433 434 if (registerFormat == DataFormatBoolean) { 435 #if USE(JSVALUE64) 436 ASSERT_NOT_REACHED(); 437 #elif USE(JSVALUE32_64) 438 ASSERT(info.gpr() == target); 439 if (node.hasConstant()) { 440 ASSERT(isBooleanConstant(nodeIndex)); 441 m_jit.move(Imm32(valueOfBooleanConstant(nodeIndex)), target); 442 } else 443 m_jit.load32(JITCompiler::payloadFor(spillMe), target); 444 #endif 445 return; 446 } 447 448 if (registerFormat == DataFormatCell) { 449 ASSERT(info.gpr() == target); 450 if (node.isConstant()) { 451 JSValue value = valueOfJSConstant(nodeIndex); 452 ASSERT(value.isCell()); 453 m_jit.move(ImmPtr(value.asCell()), target); 454 } else 455 m_jit.loadPtr(JITCompiler::payloadFor(spillMe), target); 456 return; 457 } 458 459 if (registerFormat == DataFormatStorage) { 460 ASSERT(info.gpr() == target); 461 m_jit.loadPtr(JITCompiler::addressFor(spillMe), target); 462 return; 463 } 464 465 ASSERT(registerFormat & DataFormatJS); 466 #if USE(JSVALUE64) 467 ASSERT(info.gpr() == target); 468 if (node.hasConstant()) 469 m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex), target); 470 else if (info.spillFormat() == DataFormatInteger) { 471 ASSERT(registerFormat == DataFormatJSInteger); 472 m_jit.load32(JITCompiler::payloadFor(spillMe), target); 473 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, target); 474 } else if (info.spillFormat() == DataFormatDouble) { 475 ASSERT(registerFormat == DataFormatJSDouble); 476 m_jit.loadPtr(JITCompiler::addressFor(spillMe), target); 477 m_jit.subPtr(GPRInfo::tagTypeNumberRegister, target); 478 } else 479 m_jit.loadPtr(JITCompiler::addressFor(spillMe), target); 480 #else 481 ASSERT(info.tagGPR() == target || info.payloadGPR() == target); 482 if (node.hasConstant()) { 483 JSValue v = valueOfJSConstant(nodeIndex); 484 m_jit.move(info.tagGPR() == target ? Imm32(v.tag()) : Imm32(v.payload()), target); 485 } else if (info.spillFormat() == DataFormatInteger) { 486 ASSERT(registerFormat == DataFormatJSInteger); 487 if (info.payloadGPR() == target) 488 m_jit.load32(JITCompiler::payloadFor(spillMe), target); 489 else 490 m_jit.move(TrustedImm32(JSValue::Int32Tag), target); 491 } else 492 m_jit.load32(info.tagGPR() == target ? JITCompiler::tagFor(spillMe) : JITCompiler::payloadFor(spillMe), target); 493 #endif 494 } 495 496 void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg target) 497 { 498 GenerationInfo& info = m_generationInfo[spillMe]; 499 ASSERT(info.fpr() == target); 500 501 NodeIndex nodeIndex = info.nodeIndex(); 502 #if USE(JSVALUE64) 503 Node& node = at(nodeIndex); 504 ASSERT(info.registerFormat() == DataFormatDouble); 505 506 if (node.hasConstant()) { 507 ASSERT(isNumberConstant(nodeIndex)); 508 m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(nodeIndex))), canTrample); 509 m_jit.movePtrToDouble(canTrample, target); 510 return; 511 } 512 513 if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) { 514 // it was already spilled previously and not as a double, which means we need unboxing. 515 ASSERT(info.spillFormat() & DataFormatJS); 516 m_jit.loadPtr(JITCompiler::addressFor(spillMe), canTrample); 517 unboxDouble(canTrample, target); 518 return; 519 } 520 521 m_jit.loadDouble(JITCompiler::addressFor(spillMe), target); 522 #elif USE(JSVALUE32_64) 523 UNUSED_PARAM(canTrample); 524 ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble); 525 m_jit.emitLoadDouble(nodeIndex, target); 526 #endif 527 } 528 529 void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg) 530 { 531 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 532 GPRReg gpr = iter.regID(); 533 if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2) 534 silentSpillGPR(iter.name(), gpr); 535 } 536 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 537 if (iter.name() != InvalidVirtualRegister) 538 silentSpillFPR(iter.name(), iter.regID()); 539 } 540 } 541 void silentSpillAllRegisters(FPRReg exclude) 542 { 543 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 544 if (iter.name() != InvalidVirtualRegister) 545 silentSpillGPR(iter.name(), iter.regID()); 546 } 547 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 548 FPRReg fpr = iter.regID(); 549 if (iter.name() != InvalidVirtualRegister && fpr != exclude) 550 silentSpillFPR(iter.name(), fpr); 551 } 552 } 553 554 void silentFillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg) 555 { 556 GPRReg canTrample = GPRInfo::regT0; 557 if (exclude == GPRInfo::regT0) 558 canTrample = GPRInfo::regT1; 559 560 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 561 if (iter.name() != InvalidVirtualRegister) 562 silentFillFPR(iter.name(), canTrample, iter.regID()); 563 } 564 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 565 GPRReg gpr = iter.regID(); 566 if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2) 567 silentFillGPR(iter.name(), gpr); 568 } 569 } 570 void silentFillAllRegisters(FPRReg exclude) 571 { 572 GPRReg canTrample = GPRInfo::regT0; 573 574 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 575 FPRReg fpr = iter.regID(); 576 if (iter.name() != InvalidVirtualRegister && fpr != exclude) 577 silentFillFPR(iter.name(), canTrample, fpr); 578 } 579 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 580 if (iter.name() != InvalidVirtualRegister) 581 silentFillGPR(iter.name(), iter.regID()); 582 } 583 } 584 585 // These methods convert between doubles, and doubles boxed and JSValues. 586 #if USE(JSVALUE64) 587 GPRReg boxDouble(FPRReg fpr, GPRReg gpr) 588 { 589 return m_jit.boxDouble(fpr, gpr); 590 } 591 FPRReg unboxDouble(GPRReg gpr, FPRReg fpr) 592 { 593 return m_jit.unboxDouble(gpr, fpr); 594 } 595 GPRReg boxDouble(FPRReg fpr) 596 { 597 return boxDouble(fpr, allocate()); 598 } 599 #elif USE(JSVALUE32_64) 600 void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR) 601 { 602 m_jit.boxDouble(fpr, tagGPR, payloadGPR); 603 } 604 void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR) 605 { 606 m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR); 607 } 608 #endif 609 610 // Spill a VirtualRegister to the RegisterFile. 611 void spill(VirtualRegister spillMe) 612 { 613 GenerationInfo& info = m_generationInfo[spillMe]; 614 615 #if USE(JSVALUE32_64) 616 if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here 617 return; 618 #endif 619 // Check the GenerationInfo to see if this value need writing 620 // to the RegisterFile - if not, mark it as spilled & return. 621 if (!info.needsSpill()) { 622 info.setSpilled(); 623 return; 624 } 625 626 DataFormat spillFormat = info.registerFormat(); 627 switch (spillFormat) { 628 case DataFormatStorage: { 629 // This is special, since it's not a JS value - as in it's not visible to JS 630 // code. 631 m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe)); 632 info.spill(DataFormatStorage); 633 return; 634 } 635 636 #if USE(JSVALUE64) 637 case DataFormatDouble: { 638 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); 639 info.spill(DataFormatDouble); 640 return; 641 } 642 643 case DataFormatInteger: { 644 m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe)); 645 info.spill(DataFormatInteger); 646 return; 647 } 648 649 default: 650 // The following code handles JSValues, int32s, and cells. 651 ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS); 652 653 GPRReg reg = info.gpr(); 654 // We need to box int32 and cell values ... 655 // but on JSVALUE64 boxing a cell is a no-op! 656 if (spillFormat == DataFormatInteger) 657 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg); 658 659 // Spill the value, and record it as spilled in its boxed form. 660 m_jit.storePtr(reg, JITCompiler::addressFor(spillMe)); 661 info.spill((DataFormat)(spillFormat | DataFormatJS)); 662 return; 663 #elif USE(JSVALUE32_64) 664 case DataFormatDouble: 665 case DataFormatJSDouble: { 666 // On JSVALUE32_64 boxing a double is a no-op. 667 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); 668 info.spill(DataFormatJSDouble); 669 return; 670 } 671 default: 672 // The following code handles JSValues, int32s, cells and booleans. 673 ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat == DataFormatBoolean || (spillFormat & DataFormatJS)); 674 675 if (spillFormat & DataFormatJS) { // JSValue 676 m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe)); 677 m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe)); 678 } else { 679 GPRReg reg = info.gpr(); 680 m_jit.store32(reg, JITCompiler::payloadFor(spillMe)); 681 // We need to box int32s, booleans and cells. 682 if (spillFormat == DataFormatInteger) 683 m_jit.store32(TrustedImm32(JSValue::Int32Tag), JITCompiler::tagFor(spillMe)); 684 else if (spillFormat == DataFormatCell) 685 m_jit.store32(TrustedImm32(JSValue::CellTag), JITCompiler::tagFor(spillMe)); 686 else 687 m_jit.store32(TrustedImm32(JSValue::BooleanTag), JITCompiler::tagFor(spillMe)); 688 } 689 info.spill((DataFormat)(spillFormat | DataFormatJS)); 690 return; 691 #endif 692 } 693 } 694 695 bool isStrictInt32(NodeIndex); 696 697 bool isKnownInteger(NodeIndex); 698 bool isKnownNumeric(NodeIndex); 699 bool isKnownCell(NodeIndex); 700 701 bool isKnownNotInteger(NodeIndex); 702 bool isKnownNotNumber(NodeIndex); 703 704 bool isKnownBoolean(NodeIndex); 705 706 bool isKnownNotCell(NodeIndex); 707 708 // Checks/accessors for constant values. 709 bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); } 710 bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); } 711 bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); } 712 bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); } 713 bool isNumberConstant(NodeIndex nodeIndex) { return m_jit.isNumberConstant(nodeIndex); } 714 bool isBooleanConstant(NodeIndex nodeIndex) { return m_jit.isBooleanConstant(nodeIndex); } 715 bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.isFunctionConstant(nodeIndex); } 716 int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); } 717 double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.valueOfNumberConstant(nodeIndex); } 718 #if USE(JSVALUE32_64) 719 void* addressOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.addressOfDoubleConstant(nodeIndex); } 720 #endif 721 JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); } 722 bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.valueOfBooleanConstant(nodeIndex); } 723 JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return m_jit.valueOfFunctionConstant(nodeIndex); } 724 bool isNullConstant(NodeIndex nodeIndex) 725 { 726 if (!isConstant(nodeIndex)) 727 return false; 728 return valueOfJSConstant(nodeIndex).isNull(); 729 } 730 731 Identifier* identifier(unsigned index) 732 { 733 return &m_jit.codeBlock()->identifier(index); 734 } 735 736 // Spill all VirtualRegisters back to the RegisterFile. 737 void flushRegisters() 738 { 739 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 740 if (iter.name() != InvalidVirtualRegister) { 741 spill(iter.name()); 742 iter.release(); 743 } 744 } 745 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 746 if (iter.name() != InvalidVirtualRegister) { 747 spill(iter.name()); 748 iter.release(); 749 } 750 } 751 } 752 753 #ifndef NDEBUG 754 // Used to ASSERT flushRegisters() has been called prior to 755 // calling out from JIT code to a C helper function. 756 bool isFlushed() 757 { 758 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { 759 if (iter.name() != InvalidVirtualRegister) 760 return false; 761 } 762 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) { 763 if (iter.name() != InvalidVirtualRegister) 764 return false; 765 } 766 return true; 767 } 768 #endif 769 770 #if USE(JSVALUE64) 771 MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex) 772 { 773 return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex))); 774 } 775 #endif 776 777 // Helper functions to enable code sharing in implementations of bit/shift ops. 778 void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result) 779 { 780 switch (op) { 781 case BitAnd: 782 m_jit.and32(Imm32(imm), op1, result); 783 break; 784 case BitOr: 785 m_jit.or32(Imm32(imm), op1, result); 786 break; 787 case BitXor: 788 m_jit.xor32(Imm32(imm), op1, result); 789 break; 790 default: 791 ASSERT_NOT_REACHED(); 792 } 793 } 794 void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result) 795 { 796 switch (op) { 797 case BitAnd: 798 m_jit.and32(op1, op2, result); 799 break; 800 case BitOr: 801 m_jit.or32(op1, op2, result); 802 break; 803 case BitXor: 804 m_jit.xor32(op1, op2, result); 805 break; 806 default: 807 ASSERT_NOT_REACHED(); 808 } 809 } 810 void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result) 811 { 812 switch (op) { 813 case BitRShift: 814 m_jit.rshift32(op1, Imm32(shiftAmount), result); 815 break; 816 case BitLShift: 817 m_jit.lshift32(op1, Imm32(shiftAmount), result); 818 break; 819 case BitURShift: 820 m_jit.urshift32(op1, Imm32(shiftAmount), result); 821 break; 822 default: 823 ASSERT_NOT_REACHED(); 824 } 825 } 826 void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result) 827 { 828 switch (op) { 829 case BitRShift: 830 m_jit.rshift32(op1, shiftAmount, result); 831 break; 832 case BitLShift: 833 m_jit.lshift32(op1, shiftAmount, result); 834 break; 835 case BitURShift: 836 m_jit.urshift32(op1, shiftAmount, result); 837 break; 838 default: 839 ASSERT_NOT_REACHED(); 840 } 841 } 842 843 // Returns the node index of the branch node if peephole is okay, NoNode otherwise. 844 NodeIndex detectPeepHoleBranch() 845 { 846 NodeIndex lastNodeIndex = m_jit.graph().m_blocks[m_block]->end - 1; 847 848 // Check that no intervening nodes will be generated. 849 for (NodeIndex index = m_compileIndex + 1; index < lastNodeIndex; ++index) { 850 if (at(index).shouldGenerate()) 851 return NoNode; 852 } 853 854 // Check if the lastNode is a branch on this node. 855 Node& lastNode = at(lastNodeIndex); 856 return lastNode.op == Branch && lastNode.child1() == m_compileIndex ? lastNodeIndex : NoNode; 857 } 858 859 void nonSpeculativeValueToNumber(Node&); 860 void nonSpeculativeValueToInt32(Node&); 861 void nonSpeculativeUInt32ToNumber(Node&); 862 863 void nonSpeculativeKnownConstantArithOp(NodeType op, NodeIndex regChild, NodeIndex immChild, bool commute); 864 void nonSpeculativeBasicArithOp(NodeType op, Node&); 865 866 #if USE(JSVALUE64) 867 JITCompiler::Call cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), NodeType = GetById); 868 void cachedPutById(GPRReg base, GPRReg value, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); 869 void cachedGetMethod(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); 870 #elif USE(JSVALUE32_64) 871 JITCompiler::Call cachedGetById(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), NodeType = GetById); 872 void cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); 873 void cachedGetMethod(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump()); 874 #endif 875 876 void nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert = false); 877 void nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert = false); 878 bool nonSpeculativeCompareNull(Node&, NodeIndex operand, bool invert = false); 879 880 void nonSpeculativePeepholeBranch(Node&, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction); 881 void nonSpeculativeNonPeepholeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction); 882 bool nonSpeculativeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction); 883 884 void nonSpeculativePeepholeStrictEq(Node&, NodeIndex branchNodeIndex, bool invert = false); 885 void nonSpeculativeNonPeepholeStrictEq(Node&, bool invert = false); 886 bool nonSpeculativeStrictEq(Node&, bool invert = false); 887 888 MacroAssembler::Address addressOfCallData(int idx) 889 { 890 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + idx) * static_cast<int>(sizeof(Register))); 891 } 892 893 #if USE(JSVALUE32_64) 894 MacroAssembler::Address tagOfCallData(int idx) 895 { 896 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + idx) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); 897 } 898 899 MacroAssembler::Address payloadOfCallData(int idx) 900 { 901 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + idx) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); 902 } 903 #endif 904 905 void emitCall(Node&); 906 907 // Called once a node has completed code generation but prior to setting 908 // its result, to free up its children. (This must happen prior to setting 909 // the nodes result, since the node may have the same VirtualRegister as 910 // a child, and as such will use the same GeneratioInfo). 911 void useChildren(Node&); 912 913 // These method called to initialize the the GenerationInfo 914 // to describe the result of an operation. 915 void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger, UseChildrenMode mode = CallUseChildren) 916 { 917 Node& node = at(nodeIndex); 918 if (mode == CallUseChildren) 919 useChildren(node); 920 921 VirtualRegister virtualRegister = node.virtualRegister(); 922 GenerationInfo& info = m_generationInfo[virtualRegister]; 923 924 if (format == DataFormatInteger) { 925 m_jit.jitAssertIsInt32(reg); 926 m_gprs.retain(reg, virtualRegister, SpillOrderInteger); 927 info.initInteger(nodeIndex, node.refCount(), reg); 928 } else { 929 #if USE(JSVALUE64) 930 ASSERT(format == DataFormatJSInteger); 931 m_jit.jitAssertIsJSInt32(reg); 932 m_gprs.retain(reg, virtualRegister, SpillOrderJS); 933 info.initJSValue(nodeIndex, node.refCount(), reg, format); 934 #elif USE(JSVALUE32_64) 935 ASSERT_NOT_REACHED(); 936 #endif 937 } 938 } 939 void integerResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode) 940 { 941 integerResult(reg, nodeIndex, DataFormatInteger, mode); 942 } 943 void noResult(NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren) 944 { 945 if (mode == UseChildrenCalledExplicitly) 946 return; 947 Node& node = at(nodeIndex); 948 useChildren(node); 949 } 950 void cellResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren) 951 { 952 Node& node = at(nodeIndex); 953 if (mode == CallUseChildren) 954 useChildren(node); 955 956 VirtualRegister virtualRegister = node.virtualRegister(); 957 m_gprs.retain(reg, virtualRegister, SpillOrderCell); 958 GenerationInfo& info = m_generationInfo[virtualRegister]; 959 info.initCell(nodeIndex, node.refCount(), reg); 960 } 961 void booleanResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren) 962 { 963 Node& node = at(nodeIndex); 964 if (mode == CallUseChildren) 965 useChildren(node); 966 967 VirtualRegister virtualRegister = node.virtualRegister(); 968 m_gprs.retain(reg, virtualRegister, SpillOrderBoolean); 969 GenerationInfo& info = m_generationInfo[virtualRegister]; 970 info.initBoolean(nodeIndex, node.refCount(), reg); 971 } 972 #if USE(JSVALUE64) 973 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) 974 { 975 if (format == DataFormatJSInteger) 976 m_jit.jitAssertIsJSInt32(reg); 977 978 Node& node = at(nodeIndex); 979 if (mode == CallUseChildren) 980 useChildren(node); 981 982 VirtualRegister virtualRegister = node.virtualRegister(); 983 m_gprs.retain(reg, virtualRegister, SpillOrderJS); 984 GenerationInfo& info = m_generationInfo[virtualRegister]; 985 info.initJSValue(nodeIndex, node.refCount(), reg, format); 986 } 987 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode) 988 { 989 jsValueResult(reg, nodeIndex, DataFormatJS, mode); 990 } 991 #elif USE(JSVALUE32_64) 992 void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren) 993 { 994 Node& node = at(nodeIndex); 995 if (mode == CallUseChildren) 996 useChildren(node); 997 998 VirtualRegister virtualRegister = node.virtualRegister(); 999 m_gprs.retain(tag, virtualRegister, SpillOrderJS); 1000 m_gprs.retain(payload, virtualRegister, SpillOrderJS); 1001 GenerationInfo& info = m_generationInfo[virtualRegister]; 1002 info.initJSValue(nodeIndex, node.refCount(), tag, payload, format); 1003 } 1004 void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, UseChildrenMode mode) 1005 { 1006 jsValueResult(tag, payload, nodeIndex, DataFormatJS, mode); 1007 } 1008 #endif 1009 void storageResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren) 1010 { 1011 Node& node = at(nodeIndex); 1012 if (mode == CallUseChildren) 1013 useChildren(node); 1014 1015 VirtualRegister virtualRegister = node.virtualRegister(); 1016 m_gprs.retain(reg, virtualRegister, SpillOrderStorage); 1017 GenerationInfo& info = m_generationInfo[virtualRegister]; 1018 info.initStorage(nodeIndex, node.refCount(), reg); 1019 } 1020 void doubleResult(FPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren) 1021 { 1022 Node& node = at(nodeIndex); 1023 if (mode == CallUseChildren) 1024 useChildren(node); 1025 1026 VirtualRegister virtualRegister = node.virtualRegister(); 1027 m_fprs.retain(reg, virtualRegister, SpillOrderDouble); 1028 GenerationInfo& info = m_generationInfo[virtualRegister]; 1029 info.initDouble(nodeIndex, node.refCount(), reg); 1030 } 1031 void initConstantInfo(NodeIndex nodeIndex) 1032 { 1033 ASSERT(isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex) || isJSConstant(nodeIndex)); 1034 Node& node = at(nodeIndex); 1035 m_generationInfo[node.virtualRegister()].initConstant(nodeIndex, node.refCount()); 1036 } 1037 1038 // These methods used to sort arguments into the correct registers. 1039 // On X86 we use cdecl calling conventions, which pass all arguments on the 1040 // stack. On other architectures we may need to sort values into the 1041 // correct registers. 1042 #if !NUMBER_OF_ARGUMENT_REGISTERS 1043 unsigned m_callArgumentIndex; 1044 void resetCallArguments() { m_callArgumentIndex = 0; } 1045 1046 // These methods are using internally to implement the callOperation methods. 1047 void addCallArgument(GPRReg value) 1048 { 1049 m_jit.poke(value, m_callArgumentIndex++); 1050 } 1051 void addCallArgument(TrustedImm32 imm) 1052 { 1053 m_jit.poke(imm, m_callArgumentIndex++); 1054 } 1055 void addCallArgument(TrustedImmPtr pointer) 1056 { 1057 m_jit.poke(pointer, m_callArgumentIndex++); 1058 } 1059 void addCallArgument(FPRReg value) 1060 { 1061 m_jit.storeDouble(value, JITCompiler::Address(JITCompiler::stackPointerRegister, m_callArgumentIndex * sizeof(void*))); 1062 m_callArgumentIndex += sizeof(double) / sizeof(void*); 1063 } 1064 1065 ALWAYS_INLINE void setupArguments(FPRReg arg1) 1066 { 1067 resetCallArguments(); 1068 addCallArgument(arg1); 1069 } 1070 1071 ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) 1072 { 1073 resetCallArguments(); 1074 addCallArgument(arg1); 1075 addCallArgument(arg2); 1076 } 1077 1078 ALWAYS_INLINE void setupArgumentsExecState() 1079 { 1080 resetCallArguments(); 1081 addCallArgument(GPRInfo::callFrameRegister); 1082 } 1083 1084 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1) 1085 { 1086 resetCallArguments(); 1087 addCallArgument(GPRInfo::callFrameRegister); 1088 addCallArgument(arg1); 1089 } 1090 1091 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1) 1092 { 1093 resetCallArguments(); 1094 addCallArgument(GPRInfo::callFrameRegister); 1095 addCallArgument(arg1); 1096 } 1097 1098 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2) 1099 { 1100 resetCallArguments(); 1101 addCallArgument(GPRInfo::callFrameRegister); 1102 addCallArgument(arg1); 1103 addCallArgument(arg2); 1104 } 1105 1106 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2) 1107 { 1108 resetCallArguments(); 1109 addCallArgument(GPRInfo::callFrameRegister); 1110 addCallArgument(arg1); 1111 addCallArgument(arg2); 1112 } 1113 1114 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2) 1115 { 1116 resetCallArguments(); 1117 addCallArgument(GPRInfo::callFrameRegister); 1118 addCallArgument(arg1); 1119 addCallArgument(arg2); 1120 } 1121 1122 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2) 1123 { 1124 resetCallArguments(); 1125 addCallArgument(GPRInfo::callFrameRegister); 1126 addCallArgument(arg1); 1127 addCallArgument(arg2); 1128 } 1129 1130 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3) 1131 { 1132 resetCallArguments(); 1133 addCallArgument(GPRInfo::callFrameRegister); 1134 addCallArgument(arg1); 1135 addCallArgument(arg2); 1136 addCallArgument(arg3); 1137 } 1138 1139 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3) 1140 { 1141 resetCallArguments(); 1142 addCallArgument(GPRInfo::callFrameRegister); 1143 addCallArgument(arg1); 1144 addCallArgument(arg2); 1145 addCallArgument(arg3); 1146 } 1147 1148 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) 1149 { 1150 resetCallArguments(); 1151 addCallArgument(GPRInfo::callFrameRegister); 1152 addCallArgument(arg1); 1153 addCallArgument(arg2); 1154 addCallArgument(arg3); 1155 addCallArgument(arg4); 1156 } 1157 1158 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) 1159 { 1160 resetCallArguments(); 1161 addCallArgument(GPRInfo::callFrameRegister); 1162 addCallArgument(arg1); 1163 addCallArgument(arg2); 1164 addCallArgument(arg3); 1165 addCallArgument(arg4); 1166 } 1167 1168 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) 1169 { 1170 resetCallArguments(); 1171 addCallArgument(GPRInfo::callFrameRegister); 1172 addCallArgument(arg1); 1173 addCallArgument(arg2); 1174 addCallArgument(arg3); 1175 addCallArgument(arg4); 1176 } 1177 1178 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) 1179 { 1180 resetCallArguments(); 1181 addCallArgument(GPRInfo::callFrameRegister); 1182 addCallArgument(arg1); 1183 addCallArgument(arg2); 1184 addCallArgument(arg3); 1185 addCallArgument(arg4); 1186 } 1187 1188 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) 1189 { 1190 resetCallArguments(); 1191 addCallArgument(GPRInfo::callFrameRegister); 1192 addCallArgument(arg1); 1193 addCallArgument(arg2); 1194 addCallArgument(arg3); 1195 addCallArgument(arg4); 1196 addCallArgument(arg5); 1197 } 1198 #endif // !NUMBER_OF_ARGUMENT_REGISTERS 1199 // These methods are suitable for any calling convention that provides for 1200 // at least 4 argument registers, e.g. X86_64, ARMv7. 1201 #if NUMBER_OF_ARGUMENT_REGISTERS >= 4 1202 template<GPRReg destA, GPRReg destB> 1203 void setupTwoStubArgs(GPRReg srcA, GPRReg srcB) 1204 { 1205 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: 1206 // (1) both are already in arg regs, the right way around. 1207 // (2) both are already in arg regs, the wrong way around. 1208 // (3) neither are currently in arg registers. 1209 // (4) srcA in in its correct reg. 1210 // (5) srcA in in the incorrect reg. 1211 // (6) srcB in in its correct reg. 1212 // (7) srcB in in the incorrect reg. 1213 // 1214 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in 1215 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in 1216 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2 1217 // (requires a swap) and 7 (must move srcB first, to avoid trampling.) 1218 1219 if (srcB != destA) { 1220 // Handle the easy cases - two simple moves. 1221 m_jit.move(srcA, destA); 1222 m_jit.move(srcB, destB); 1223 } else if (srcA != destB) { 1224 // Handle the non-swap case - just put srcB in place first. 1225 m_jit.move(srcB, destB); 1226 m_jit.move(srcA, destA); 1227 } else 1228 m_jit.swap(destA, destB); 1229 } 1230 #if CPU(X86_64) 1231 template<FPRReg destA, FPRReg destB> 1232 void setupTwoStubArgs(FPRReg srcA, FPRReg srcB) 1233 { 1234 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: 1235 // (1) both are already in arg regs, the right way around. 1236 // (2) both are already in arg regs, the wrong way around. 1237 // (3) neither are currently in arg registers. 1238 // (4) srcA in in its correct reg. 1239 // (5) srcA in in the incorrect reg. 1240 // (6) srcB in in its correct reg. 1241 // (7) srcB in in the incorrect reg. 1242 // 1243 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in 1244 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in 1245 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2 1246 // (requires a swap) and 7 (must move srcB first, to avoid trampling.) 1247 1248 if (srcB != destA) { 1249 // Handle the easy cases - two simple moves. 1250 m_jit.moveDouble(srcA, destA); 1251 m_jit.moveDouble(srcB, destB); 1252 return; 1253 } 1254 1255 if (srcA != destB) { 1256 // Handle the non-swap case - just put srcB in place first. 1257 m_jit.moveDouble(srcB, destB); 1258 m_jit.moveDouble(srcA, destA); 1259 return; 1260 } 1261 1262 ASSERT(srcB == destA && srcA == destB); 1263 // Need to swap; pick a temporary register. 1264 FPRReg temp; 1265 if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3) 1266 temp = FPRInfo::argumentFPR3; 1267 else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2) 1268 temp = FPRInfo::argumentFPR2; 1269 else { 1270 ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1); 1271 temp = FPRInfo::argumentFPR1; 1272 } 1273 m_jit.moveDouble(destA, temp); 1274 m_jit.moveDouble(destB, destA); 1275 m_jit.moveDouble(temp, destB); 1276 } 1277 #endif 1278 void setupStubArguments(GPRReg arg1, GPRReg arg2) 1279 { 1280 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2); 1281 } 1282 void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3) 1283 { 1284 // If neither of arg2/arg3 are in our way, then we can move arg1 into place. 1285 // Then we can use setupTwoStubArgs to fix arg2/arg3. 1286 if (arg2 != GPRInfo::argumentGPR1 && arg3 != GPRInfo::argumentGPR1) { 1287 m_jit.move(arg1, GPRInfo::argumentGPR1); 1288 setupTwoStubArgs<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3); 1289 return; 1290 } 1291 1292 // If neither of arg1/arg3 are in our way, then we can move arg2 into place. 1293 // Then we can use setupTwoStubArgs to fix arg1/arg3. 1294 if (arg1 != GPRInfo::argumentGPR2 && arg3 != GPRInfo::argumentGPR2) { 1295 m_jit.move(arg2, GPRInfo::argumentGPR2); 1296 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3); 1297 return; 1298 } 1299 1300 // If neither of arg1/arg2 are in our way, then we can move arg3 into place. 1301 // Then we can use setupTwoStubArgs to fix arg1/arg2. 1302 if (arg1 != GPRInfo::argumentGPR3 && arg2 != GPRInfo::argumentGPR3) { 1303 m_jit.move(arg3, GPRInfo::argumentGPR3); 1304 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2); 1305 return; 1306 } 1307 1308 // If we get here, we haven't been able to move any of arg1/arg2/arg3. 1309 // Since all three are blocked, then all three must already be in the argument register. 1310 // But are they in the right ones? 1311 1312 // First, ensure arg1 is in place. 1313 if (arg1 != GPRInfo::argumentGPR1) { 1314 m_jit.swap(arg1, GPRInfo::argumentGPR1); 1315 1316 // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be. 1317 ASSERT(arg2 == GPRInfo::argumentGPR1 || arg3 == GPRInfo::argumentGPR1); 1318 // If arg2 was in argumentGPR1 it no longer is (due to the swap). 1319 // Otherwise arg3 must have been. Mark him as moved. 1320 if (arg2 == GPRInfo::argumentGPR1) 1321 arg2 = arg1; 1322 else 1323 arg3 = arg1; 1324 } 1325 1326 // Either arg2 & arg3 need swapping, or we're all done. 1327 ASSERT((arg2 == GPRInfo::argumentGPR2 || arg3 == GPRInfo::argumentGPR3) 1328 || (arg2 == GPRInfo::argumentGPR3 || arg3 == GPRInfo::argumentGPR2)); 1329 1330 if (arg2 != GPRInfo::argumentGPR2) 1331 m_jit.swap(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3); 1332 } 1333 1334 #if CPU(X86_64) 1335 ALWAYS_INLINE void setupArguments(FPRReg arg1) 1336 { 1337 m_jit.moveDouble(arg1, FPRInfo::argumentFPR0); 1338 } 1339 1340 ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) 1341 { 1342 setupTwoStubArgs<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2); 1343 } 1344 #else 1345 ALWAYS_INLINE void setupArguments(FPRReg arg1) 1346 { 1347 m_jit.assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1); 1348 } 1349 1350 ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) 1351 { 1352 m_jit.assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1); 1353 m_jit.assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2); 1354 } 1355 #endif 1356 1357 ALWAYS_INLINE void setupArgumentsExecState() 1358 { 1359 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1360 } 1361 1362 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1) 1363 { 1364 m_jit.move(arg1, GPRInfo::argumentGPR1); 1365 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1366 } 1367 1368 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1) 1369 { 1370 m_jit.move(arg1, GPRInfo::argumentGPR1); 1371 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1372 } 1373 1374 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2) 1375 { 1376 setupStubArguments(arg1, arg2); 1377 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1378 } 1379 1380 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2) 1381 { 1382 m_jit.move(arg1, GPRInfo::argumentGPR1); 1383 m_jit.move(arg2, GPRInfo::argumentGPR2); 1384 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1385 } 1386 1387 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2) 1388 { 1389 m_jit.move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample! 1390 m_jit.move(arg1, GPRInfo::argumentGPR1); 1391 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1392 } 1393 1394 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2) 1395 { 1396 m_jit.move(arg1, GPRInfo::argumentGPR1); 1397 m_jit.move(arg2, GPRInfo::argumentGPR2); 1398 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1399 } 1400 1401 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2) 1402 { 1403 m_jit.move(arg1, GPRInfo::argumentGPR1); 1404 m_jit.move(arg2, GPRInfo::argumentGPR2); 1405 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1406 } 1407 1408 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3) 1409 { 1410 setupStubArguments(arg1, arg2, arg3); 1411 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1412 } 1413 1414 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3) 1415 { 1416 setupStubArguments(arg1, arg2); 1417 m_jit.move(arg3, GPRInfo::argumentGPR3); 1418 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1419 } 1420 1421 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3) 1422 { 1423 setupStubArguments(arg1, arg2); 1424 m_jit.move(arg3, GPRInfo::argumentGPR3); 1425 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1426 } 1427 1428 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3) 1429 { 1430 m_jit.move(arg1, GPRInfo::argumentGPR1); 1431 m_jit.move(arg2, GPRInfo::argumentGPR2); 1432 m_jit.move(arg3, GPRInfo::argumentGPR3); 1433 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1434 } 1435 1436 #endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4 1437 // These methods are suitable for any calling convention that provides for 1438 // exactly 4 argument registers, e.g. ARMv7. 1439 #if NUMBER_OF_ARGUMENT_REGISTERS == 4 1440 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) 1441 { 1442 m_jit.poke(arg4); 1443 setupArgumentsWithExecState(arg1, arg2, arg3); 1444 } 1445 1446 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) 1447 { 1448 m_jit.poke(arg4); 1449 setupArgumentsWithExecState(arg1, arg2, arg3); 1450 } 1451 1452 ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) 1453 { 1454 m_jit.poke(arg4); 1455 setupArgumentsWithExecState(arg1, arg2, arg3); 1456 } 1457 1458 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) 1459 { 1460 m_jit.poke(arg4); 1461 setupArgumentsWithExecState(arg1, arg2, arg3); 1462 } 1463 1464 ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) 1465 { 1466 m_jit.poke(arg5, 1); 1467 m_jit.poke(arg4); 1468 setupArgumentsWithExecState(arg1, arg2, arg3); 1469 } 1470 #endif // NUMBER_OF_ARGUMENT_REGISTERS == 4 1471 1472 // These methods add calls to C++ helper functions. 1473 // These methods are broadly value representation specific (i.e. 1474 // deal with the fact that a JSValue may be passed in one or two 1475 // machine registers, and delegate the calling convention specific 1476 // decision as to how to fill the regsiters to setupArguments* methods. 1477 #if USE(JSVALUE64) 1478 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer) 1479 { 1480 setupArgumentsWithExecState(TrustedImmPtr(pointer)); 1481 return appendCallWithExceptionCheckSetResult(operation, result); 1482 } 1483 JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1) 1484 { 1485 setupArguments(arg1); 1486 JITCompiler::Call call = m_jit.appendCall(operation); 1487 m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result); 1488 return call; 1489 } 1490 JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg result, GPRReg arg1, Identifier* identifier) 1491 { 1492 setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier)); 1493 return appendCallWithExceptionCheckSetResult(operation, result); 1494 } 1495 JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg result, Identifier* identifier) 1496 { 1497 setupArgumentsWithExecState(TrustedImmPtr(identifier)); 1498 return appendCallWithExceptionCheckSetResult(operation, result); 1499 } 1500 JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1) 1501 { 1502 setupArgumentsWithExecState(arg1); 1503 return appendCallWithExceptionCheckSetResult(operation, result); 1504 } 1505 JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg result, void* pointer, size_t size) 1506 { 1507 setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size)); 1508 return appendCallWithExceptionCheckSetResult(operation, result); 1509 } 1510 JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg result, int startConstant, int numConstants) 1511 { 1512 setupArgumentsWithExecState(Imm32(startConstant), Imm32(numConstants)); 1513 return appendCallWithExceptionCheckSetResult(operation, result); 1514 } 1515 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer) 1516 { 1517 setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer)); 1518 return appendCallWithExceptionCheckSetResult(operation, result); 1519 } 1520 JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg result, GPRReg arg1, Identifier* identifier) 1521 { 1522 setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier)); 1523 return appendCallWithExceptionCheckSetResult(operation, result); 1524 } 1525 JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1526 { 1527 setupArgumentsWithExecState(arg1, arg2); 1528 return appendCallWithExceptionCheckSetResult(operation, result); 1529 } 1530 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, GPRReg arg1) 1531 { 1532 setupArgumentsWithExecState(arg1); 1533 return appendCallWithExceptionCheckSetResult(operation, result); 1534 } 1535 JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result) 1536 { 1537 setupArgumentsExecState(); 1538 return appendCallWithExceptionCheckSetResult(operation, result); 1539 } 1540 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1) 1541 { 1542 setupArgumentsWithExecState(arg1); 1543 return appendCallWithExceptionCheckSetResult(operation, result); 1544 } 1545 JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell) 1546 { 1547 setupArgumentsWithExecState(arg1, TrustedImmPtr(cell)); 1548 return appendCallWithExceptionCheckSetResult(operation, result); 1549 } 1550 JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1) 1551 { 1552 setupArgumentsWithExecState(arg1); 1553 return appendCallWithExceptionCheckSetResult(operation, result); 1554 } 1555 JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1556 { 1557 setupArgumentsWithExecState(arg1, arg2); 1558 return appendCallWithExceptionCheckSetResult(operation, result); 1559 } 1560 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1561 { 1562 setupArgumentsWithExecState(arg1, arg2); 1563 return appendCallWithExceptionCheckSetResult(operation, result); 1564 } 1565 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::Imm32 imm) 1566 { 1567 setupArgumentsWithExecState(arg1, MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value))))); 1568 return appendCallWithExceptionCheckSetResult(operation, result); 1569 } 1570 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::Imm32 imm, GPRReg arg2) 1571 { 1572 setupArgumentsWithExecState(MacroAssembler::ImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2); 1573 return appendCallWithExceptionCheckSetResult(operation, result); 1574 } 1575 JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2) 1576 { 1577 setupArgumentsWithExecState(arg1, arg2); 1578 return appendCallWithExceptionCheckSetResult(operation, result); 1579 } 1580 JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer) 1581 { 1582 setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer)); 1583 return appendCallWithExceptionCheck(operation); 1584 } 1585 JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier) 1586 { 1587 setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier)); 1588 return appendCallWithExceptionCheck(operation); 1589 } 1590 JITCompiler::Call callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1591 { 1592 setupArgumentsWithExecState(arg1, arg2, arg3); 1593 return appendCallWithExceptionCheck(operation); 1594 } 1595 JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1596 { 1597 setupArgumentsWithExecState(arg1, arg2, arg3); 1598 return appendCallWithExceptionCheck(operation); 1599 } 1600 JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1601 { 1602 setupArgumentsWithExecState(arg1, arg2, arg3); 1603 return appendCallWithExceptionCheck(operation); 1604 } 1605 JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) 1606 { 1607 setupArgumentsWithExecState(arg1, arg2, arg3); 1608 return appendCallWithExceptionCheck(operation); 1609 } 1610 JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1) 1611 { 1612 setupArgumentsWithExecState(arg1); 1613 return appendCallWithExceptionCheckSetResult(operation, result); 1614 } 1615 JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2) 1616 { 1617 setupArguments(arg1, arg2); 1618 return appendCallSetResult(operation, result); 1619 } 1620 #else 1621 JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1) 1622 { 1623 setupArguments(arg1); 1624 JITCompiler::Call call = m_jit.appendCall(operation); 1625 m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result); 1626 return call; 1627 } 1628 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer) 1629 { 1630 setupArgumentsWithExecState(TrustedImmPtr(pointer)); 1631 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1632 } 1633 JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer) 1634 { 1635 setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer)); 1636 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1637 } 1638 JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier) 1639 { 1640 setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier)); 1641 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1642 } 1643 JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) 1644 { 1645 setupArgumentsWithExecState(arg1); 1646 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1647 } 1648 JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier) 1649 { 1650 setupArgumentsWithExecState(TrustedImmPtr(identifier)); 1651 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1652 } 1653 JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1) 1654 { 1655 setupArgumentsWithExecState(arg1); 1656 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1657 } 1658 JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size) 1659 { 1660 setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size)); 1661 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1662 } 1663 JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants) 1664 { 1665 setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants)); 1666 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1667 } 1668 JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer) 1669 { 1670 setupArgumentsWithExecState(arg1Payload, arg1Tag, ImmPtr(pointer)); 1671 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1672 } 1673 JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2) 1674 { 1675 setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2); 1676 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1677 } 1678 JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier) 1679 { 1680 setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier)); 1681 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1682 } 1683 JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2) 1684 { 1685 setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2); 1686 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1687 } 1688 JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload) 1689 { 1690 setupArgumentsWithExecState(arg1Payload, arg1Tag); 1691 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1692 } 1693 JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result) 1694 { 1695 setupArgumentsExecState(); 1696 return appendCallWithExceptionCheckSetResult(operation, result); 1697 } 1698 JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1) 1699 { 1700 setupArgumentsWithExecState(arg1); 1701 return appendCallWithExceptionCheckSetResult(operation, result); 1702 } 1703 JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell) 1704 { 1705 setupArgumentsWithExecState(arg1, TrustedImmPtr(cell)); 1706 return appendCallWithExceptionCheckSetResult(operation, result); 1707 } 1708 JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) 1709 { 1710 setupArgumentsWithExecState(arg1Payload, arg1Tag); 1711 return appendCallWithExceptionCheckSetResult(operation, result); 1712 } 1713 JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) 1714 { 1715 setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag); 1716 return appendCallWithExceptionCheckSetResult(operation, result); 1717 } 1718 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) 1719 { 1720 setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag); 1721 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1722 } 1723 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::Imm32 imm) 1724 { 1725 setupArgumentsWithExecState(arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag)); 1726 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1727 } 1728 JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::Imm32 imm, GPRReg arg2Tag, GPRReg arg2Payload) 1729 { 1730 setupArgumentsWithExecState(imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag); 1731 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1732 } 1733 JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload) 1734 { 1735 setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag); 1736 return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); 1737 } 1738 JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer) 1739 { 1740 setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer)); 1741 return appendCallWithExceptionCheck(operation); 1742 } 1743 JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, Identifier* identifier) 1744 { 1745 setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(identifier)); 1746 return appendCallWithExceptionCheck(operation); 1747 } 1748 JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload) 1749 { 1750 setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag); 1751 return appendCallWithExceptionCheck(operation); 1752 } 1753 JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) 1754 { 1755 setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag); 1756 return appendCallWithExceptionCheck(operation); 1757 } 1758 JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload) 1759 { 1760 setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag); 1761 return appendCallWithExceptionCheck(operation); 1762 } 1763 1764 JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload) 1765 { 1766 setupArgumentsWithExecState(arg1Payload, arg1Tag); 1767 return appendCallWithExceptionCheckSetResult(operation, result); 1768 } 1769 1770 JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2) 1771 { 1772 setupArguments(arg1, arg2); 1773 return appendCallSetResult(operation, result); 1774 } 1775 #endif 1776 1777 // These methods add call instructions, with optional exception checks & setting results. 1778 JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function) 1779 { 1780 return m_jit.addExceptionCheck(m_jit.appendCall(function), at(m_compileIndex).codeOrigin); 1781 } 1782 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result) 1783 { 1784 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1785 m_jit.move(GPRInfo::returnValueGPR, result); 1786 return call; 1787 } 1788 void setupResults(GPRReg destA, GPRReg destB) 1789 { 1790 GPRReg srcA = GPRInfo::returnValueGPR; 1791 GPRReg srcB = GPRInfo::returnValueGPR2; 1792 1793 if (srcB != destA) { 1794 // Handle the easy cases - two simple moves. 1795 m_jit.move(srcA, destA); 1796 m_jit.move(srcB, destB); 1797 } else if (srcA != destB) { 1798 // Handle the non-swap case - just put srcB in place first. 1799 m_jit.move(srcB, destB); 1800 m_jit.move(srcA, destA); 1801 } else 1802 m_jit.swap(destA, destB); 1803 } 1804 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2) 1805 { 1806 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1807 setupResults(result1, result2); 1808 return call; 1809 } 1810 #if CPU(X86) 1811 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) 1812 { 1813 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1814 m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister); 1815 m_jit.loadDouble(JITCompiler::stackPointerRegister, result); 1816 return call; 1817 } 1818 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) 1819 { 1820 JITCompiler::Call call = m_jit.appendCall(function); 1821 m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister); 1822 m_jit.loadDouble(JITCompiler::stackPointerRegister, result); 1823 return call; 1824 } 1825 #elif CPU(ARM) 1826 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) 1827 { 1828 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1829 m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); 1830 return call; 1831 } 1832 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) 1833 { 1834 JITCompiler::Call call = m_jit.appendCall(function); 1835 m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2); 1836 return call; 1837 } 1838 #else 1839 JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result) 1840 { 1841 JITCompiler::Call call = appendCallWithExceptionCheck(function); 1842 m_jit.moveDouble(FPRInfo::returnValueFPR, result); 1843 return call; 1844 } 1845 JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result) 1846 { 1847 JITCompiler::Call call = m_jit.appendCall(function); 1848 m_jit.moveDouble(FPRInfo::returnValueFPR, result); 1849 return call; 1850 } 1851 #endif 1852 1853 void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination) 1854 { 1855 m_branches.append(BranchRecord(jump, destination)); 1856 } 1857 1858 void linkBranches() 1859 { 1860 for (size_t i = 0; i < m_branches.size(); ++i) { 1861 BranchRecord& branch = m_branches[i]; 1862 branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit); 1863 } 1864 } 1865 1866 BasicBlock* block() 1867 { 1868 return m_jit.graph().m_blocks[m_block].get(); 1869 } 1870 1871 #ifndef NDEBUG 1872 void dump(const char* label = 0); 1873 #endif 1874 1875 #if DFG_ENABLE(CONSISTENCY_CHECK) 1876 void checkConsistency(); 1877 #else 1878 void checkConsistency() { } 1879 #endif 152 1880 153 1881 bool isInteger(NodeIndex nodeIndex) … … 296 2024 } 297 2025 2026 // The JIT, while also provides MacroAssembler functionality. 2027 JITCompiler& m_jit; 2028 // The current node being generated. 2029 BlockIndex m_block; 2030 NodeIndex m_compileIndex; 2031 // Virtual and physical register maps. 2032 Vector<GenerationInfo, 32> m_generationInfo; 2033 RegisterBank<GPRInfo> m_gprs; 2034 RegisterBank<FPRInfo> m_fprs; 2035 2036 Vector<MacroAssembler::Label> m_blockHeads; 2037 struct BranchRecord { 2038 BranchRecord(MacroAssembler::Jump jump, BlockIndex destination) 2039 : jump(jump) 2040 , destination(destination) 2041 { 2042 } 2043 2044 MacroAssembler::Jump jump; 2045 BlockIndex destination; 2046 }; 2047 Vector<BranchRecord, 8> m_branches; 2048 298 2049 Vector<ValueSource, 0> m_arguments; 299 2050 Vector<ValueSource, 0> m_variables; … … 308 2059 { 309 2060 return computeValueRecoveryFor(valueSourceForOperand(operand)); 2061 } 2062 }; 2063 2064 2065 // === Operand types === 2066 // 2067 // IntegerOperand, DoubleOperand and JSValueOperand. 2068 // 2069 // These classes are used to lock the operands to a node into machine 2070 // registers. These classes implement of pattern of locking a value 2071 // into register at the point of construction only if it is already in 2072 // registers, and otherwise loading it lazily at the point it is first 2073 // used. We do so in order to attempt to avoid spilling one operand 2074 // in order to make space available for another. 2075 2076 class IntegerOperand { 2077 public: 2078 explicit IntegerOperand(SpeculativeJIT* jit, NodeIndex index) 2079 : m_jit(jit) 2080 , m_index(index) 2081 , m_gprOrInvalid(InvalidGPRReg) 2082 #ifndef NDEBUG 2083 , m_format(DataFormatNone) 2084 #endif 2085 { 2086 ASSERT(m_jit); 2087 if (jit->isFilled(index)) 2088 gpr(); 2089 } 2090 2091 ~IntegerOperand() 2092 { 2093 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2094 m_jit->unlock(m_gprOrInvalid); 2095 } 2096 2097 NodeIndex index() const 2098 { 2099 return m_index; 2100 } 2101 2102 DataFormat format() 2103 { 2104 gpr(); // m_format is set when m_gpr is locked. 2105 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger); 2106 return m_format; 2107 } 2108 2109 GPRReg gpr() 2110 { 2111 if (m_gprOrInvalid == InvalidGPRReg) 2112 m_gprOrInvalid = m_jit->fillInteger(index(), m_format); 2113 return m_gprOrInvalid; 2114 } 2115 2116 void use() 2117 { 2118 m_jit->use(m_index); 2119 } 2120 2121 private: 2122 SpeculativeJIT* m_jit; 2123 NodeIndex m_index; 2124 GPRReg m_gprOrInvalid; 2125 DataFormat m_format; 2126 }; 2127 2128 class DoubleOperand { 2129 public: 2130 explicit DoubleOperand(SpeculativeJIT* jit, NodeIndex index) 2131 : m_jit(jit) 2132 , m_index(index) 2133 , m_fprOrInvalid(InvalidFPRReg) 2134 { 2135 ASSERT(m_jit); 2136 if (jit->isFilledDouble(index)) 2137 fpr(); 2138 } 2139 2140 ~DoubleOperand() 2141 { 2142 ASSERT(m_fprOrInvalid != InvalidFPRReg); 2143 m_jit->unlock(m_fprOrInvalid); 2144 } 2145 2146 NodeIndex index() const 2147 { 2148 return m_index; 2149 } 2150 2151 FPRReg fpr() 2152 { 2153 if (m_fprOrInvalid == InvalidFPRReg) 2154 m_fprOrInvalid = m_jit->fillDouble(index()); 2155 return m_fprOrInvalid; 2156 } 2157 2158 void use() 2159 { 2160 m_jit->use(m_index); 2161 } 2162 2163 private: 2164 SpeculativeJIT* m_jit; 2165 NodeIndex m_index; 2166 FPRReg m_fprOrInvalid; 2167 }; 2168 2169 class JSValueOperand { 2170 public: 2171 explicit JSValueOperand(SpeculativeJIT* jit, NodeIndex index) 2172 : m_jit(jit) 2173 , m_index(index) 2174 #if USE(JSVALUE64) 2175 , m_gprOrInvalid(InvalidGPRReg) 2176 #elif USE(JSVALUE32_64) 2177 , m_isDouble(false) 2178 #endif 2179 { 2180 ASSERT(m_jit); 2181 #if USE(JSVALUE64) 2182 if (jit->isFilled(index)) 2183 gpr(); 2184 #elif USE(JSVALUE32_64) 2185 m_register.pair.tagGPR = InvalidGPRReg; 2186 m_register.pair.payloadGPR = InvalidGPRReg; 2187 if (jit->isFilled(index)) 2188 fill(); 2189 #endif 2190 } 2191 2192 ~JSValueOperand() 2193 { 2194 #if USE(JSVALUE64) 2195 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2196 m_jit->unlock(m_gprOrInvalid); 2197 #elif USE(JSVALUE32_64) 2198 if (m_isDouble) { 2199 ASSERT(m_register.fpr != InvalidFPRReg); 2200 m_jit->unlock(m_register.fpr); 2201 } else { 2202 ASSERT(m_register.pair.tagGPR != InvalidGPRReg && m_register.pair.payloadGPR != InvalidGPRReg); 2203 m_jit->unlock(m_register.pair.tagGPR); 2204 m_jit->unlock(m_register.pair.payloadGPR); 2205 } 2206 #endif 2207 } 2208 2209 NodeIndex index() const 2210 { 2211 return m_index; 2212 } 2213 2214 #if USE(JSVALUE64) 2215 GPRReg gpr() 2216 { 2217 if (m_gprOrInvalid == InvalidGPRReg) 2218 m_gprOrInvalid = m_jit->fillJSValue(index()); 2219 return m_gprOrInvalid; 2220 } 2221 JSValueRegs jsValueRegs() 2222 { 2223 return JSValueRegs(gpr()); 2224 } 2225 #elif USE(JSVALUE32_64) 2226 bool isDouble() { return m_isDouble; } 2227 2228 void fill() 2229 { 2230 if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg) 2231 m_isDouble = !m_jit->fillJSValue(index(), m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr); 2232 } 2233 2234 GPRReg tagGPR() 2235 { 2236 fill(); 2237 ASSERT(!m_isDouble); 2238 return m_register.pair.tagGPR; 2239 } 2240 2241 GPRReg payloadGPR() 2242 { 2243 fill(); 2244 ASSERT(!m_isDouble); 2245 return m_register.pair.payloadGPR; 2246 } 2247 2248 JSValueRegs jsValueRegs() 2249 { 2250 return JSValueRegs(tagGPR(), payloadGPR()); 2251 } 2252 2253 FPRReg fpr() 2254 { 2255 fill(); 2256 ASSERT(m_isDouble); 2257 return m_register.fpr; 2258 } 2259 #endif 2260 2261 void use() 2262 { 2263 m_jit->use(m_index); 2264 } 2265 2266 private: 2267 SpeculativeJIT* m_jit; 2268 NodeIndex m_index; 2269 #if USE(JSVALUE64) 2270 GPRReg m_gprOrInvalid; 2271 #elif USE(JSVALUE32_64) 2272 union { 2273 struct { 2274 GPRReg tagGPR; 2275 GPRReg payloadGPR; 2276 } pair; 2277 FPRReg fpr; 2278 } m_register; 2279 bool m_isDouble; 2280 #endif 2281 }; 2282 2283 class StorageOperand { 2284 public: 2285 explicit StorageOperand(SpeculativeJIT* jit, NodeIndex index) 2286 : m_jit(jit) 2287 , m_index(index) 2288 , m_gprOrInvalid(InvalidGPRReg) 2289 { 2290 ASSERT(m_jit); 2291 if (jit->isFilled(index)) 2292 gpr(); 2293 } 2294 2295 ~StorageOperand() 2296 { 2297 ASSERT(m_gprOrInvalid != InvalidGPRReg); 2298 m_jit->unlock(m_gprOrInvalid); 2299 } 2300 2301 NodeIndex index() const 2302 { 2303 return m_index; 2304 } 2305 2306 GPRReg gpr() 2307 { 2308 if (m_gprOrInvalid == InvalidGPRReg) 2309 m_gprOrInvalid = m_jit->fillStorage(index()); 2310 return m_gprOrInvalid; 2311 } 2312 2313 void use() 2314 { 2315 m_jit->use(m_index); 2316 } 2317 2318 private: 2319 SpeculativeJIT* m_jit; 2320 NodeIndex m_index; 2321 GPRReg m_gprOrInvalid; 2322 }; 2323 2324 2325 // === Temporaries === 2326 // 2327 // These classes are used to allocate temporary registers. 2328 // A mechanism is provided to attempt to reuse the registers 2329 // currently allocated to child nodes whose value is consumed 2330 // by, and not live after, this operation. 2331 2332 class GPRTemporary { 2333 public: 2334 GPRTemporary(); 2335 GPRTemporary(SpeculativeJIT*); 2336 GPRTemporary(SpeculativeJIT*, GPRReg specific); 2337 GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&); 2338 GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&, SpeculateIntegerOperand&); 2339 GPRTemporary(SpeculativeJIT*, SpeculateStrictInt32Operand&); 2340 GPRTemporary(SpeculativeJIT*, IntegerOperand&); 2341 GPRTemporary(SpeculativeJIT*, IntegerOperand&, IntegerOperand&); 2342 GPRTemporary(SpeculativeJIT*, SpeculateCellOperand&); 2343 GPRTemporary(SpeculativeJIT*, SpeculateBooleanOperand&); 2344 #if USE(JSVALUE64) 2345 GPRTemporary(SpeculativeJIT*, JSValueOperand&); 2346 #elif USE(JSVALUE32_64) 2347 GPRTemporary(SpeculativeJIT*, JSValueOperand&, bool tag = true); 2348 #endif 2349 GPRTemporary(SpeculativeJIT*, StorageOperand&); 2350 2351 void adopt(GPRTemporary&); 2352 2353 ~GPRTemporary() 2354 { 2355 if (m_jit && m_gpr != InvalidGPRReg) 2356 m_jit->unlock(gpr()); 2357 } 2358 2359 GPRReg gpr() 2360 { 2361 // In some cases we have lazy allocation. 2362 if (m_jit && m_gpr == InvalidGPRReg) 2363 m_gpr = m_jit->allocate(); 2364 return m_gpr; 2365 } 2366 2367 private: 2368 SpeculativeJIT* m_jit; 2369 GPRReg m_gpr; 2370 }; 2371 2372 class FPRTemporary { 2373 public: 2374 FPRTemporary(SpeculativeJIT*); 2375 FPRTemporary(SpeculativeJIT*, DoubleOperand&); 2376 FPRTemporary(SpeculativeJIT*, DoubleOperand&, DoubleOperand&); 2377 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&); 2378 FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&); 2379 #if USE(JSVALUE32_64) 2380 FPRTemporary(SpeculativeJIT*, JSValueOperand&); 2381 #endif 2382 2383 ~FPRTemporary() 2384 { 2385 m_jit->unlock(fpr()); 2386 } 2387 2388 FPRReg fpr() const 2389 { 2390 ASSERT(m_fpr != InvalidFPRReg); 2391 return m_fpr; 2392 } 2393 2394 protected: 2395 FPRTemporary(SpeculativeJIT* jit, FPRReg lockedFPR) 2396 : m_jit(jit) 2397 , m_fpr(lockedFPR) 2398 { 2399 } 2400 2401 private: 2402 SpeculativeJIT* m_jit; 2403 FPRReg m_fpr; 2404 }; 2405 2406 2407 // === Results === 2408 // 2409 // These classes lock the result of a call to a C++ helper function. 2410 2411 class GPRResult : public GPRTemporary { 2412 public: 2413 GPRResult(SpeculativeJIT* jit) 2414 : GPRTemporary(jit, GPRInfo::returnValueGPR) 2415 { 2416 } 2417 }; 2418 2419 #if USE(JSVALUE32_64) 2420 class GPRResult2 : public GPRTemporary { 2421 public: 2422 GPRResult2(SpeculativeJIT* jit) 2423 : GPRTemporary(jit, GPRInfo::returnValueGPR2) 2424 { 2425 } 2426 }; 2427 #endif 2428 2429 class FPRResult : public FPRTemporary { 2430 public: 2431 FPRResult(SpeculativeJIT* jit) 2432 : FPRTemporary(jit, lockedResult(jit)) 2433 { 2434 } 2435 2436 private: 2437 static FPRReg lockedResult(SpeculativeJIT* jit) 2438 { 2439 jit->lock(FPRInfo::returnValueFPR); 2440 return FPRInfo::returnValueFPR; 310 2441 } 311 2442 }; … … 317 2448 // 318 2449 // These are used to lock the operands to a node into machine registers within the 319 // SpeculativeJIT. The classes operate like those provided by the JITCodeGenerator,320 // however these will perform a speculative check for a more restrictive type than321 // we can statically determine the operand to have. If the operand does not have322 // the requested type,a bail-out to the non-speculative path will be taken.2450 // SpeculativeJIT. The classes operate like those above, however these will 2451 // perform a speculative check for a more restrictive type than we can statically 2452 // determine the operand to have. If the operand does not have the requested type, 2453 // a bail-out to the non-speculative path will be taken. 323 2454 324 2455 class SpeculateIntegerOperand { … … 529 2660 530 2661 inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit) 531 : JITCodeGenerator(jit) 532 , m_compileOkay(true) 2662 : m_compileOkay(true) 2663 , m_jit(jit) 2664 , m_compileIndex(0) 2665 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters) 2666 , m_blockHeads(jit.graph().m_blocks.size()) 533 2667 , m_arguments(jit.codeBlock()->m_numParameters) 534 2668 , m_variables(jit.graph().m_localVars) -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
r99904 r100244 47 47 48 48 #if USE(JSVALUE32_64) 49 50 GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat) 51 { 52 Node& node = at(nodeIndex); 53 VirtualRegister virtualRegister = node.virtualRegister(); 54 GenerationInfo& info = m_generationInfo[virtualRegister]; 55 56 if (info.registerFormat() == DataFormatNone) { 57 GPRReg gpr = allocate(); 58 59 if (node.hasConstant()) { 60 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); 61 if (isInt32Constant(nodeIndex)) 62 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); 63 else if (isNumberConstant(nodeIndex)) 64 ASSERT_NOT_REACHED(); 65 else { 66 ASSERT(isJSConstant(nodeIndex)); 67 JSValue jsValue = valueOfJSConstant(nodeIndex); 68 m_jit.move(MacroAssembler::Imm32(jsValue.payload()), gpr); 69 } 70 } else { 71 ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger); 72 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); 73 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); 74 } 75 76 info.fillInteger(gpr); 77 returnFormat = DataFormatInteger; 78 return gpr; 79 } 80 81 switch (info.registerFormat()) { 82 case DataFormatNone: 83 // Should have filled, above. 84 case DataFormatJSDouble: 85 case DataFormatDouble: 86 case DataFormatJS: 87 case DataFormatCell: 88 case DataFormatJSCell: 89 case DataFormatBoolean: 90 case DataFormatJSBoolean: 91 case DataFormatStorage: 92 // Should only be calling this function if we know this operand to be integer. 93 ASSERT_NOT_REACHED(); 94 95 case DataFormatJSInteger: { 96 GPRReg tagGPR = info.tagGPR(); 97 GPRReg payloadGPR = info.payloadGPR(); 98 m_gprs.lock(tagGPR); 99 m_jit.jitAssertIsJSInt32(tagGPR); 100 m_gprs.unlock(tagGPR); 101 m_gprs.lock(payloadGPR); 102 m_gprs.release(tagGPR); 103 m_gprs.release(payloadGPR); 104 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger); 105 info.fillInteger(payloadGPR); 106 returnFormat = DataFormatInteger; 107 return payloadGPR; 108 } 109 110 case DataFormatInteger: { 111 GPRReg gpr = info.gpr(); 112 m_gprs.lock(gpr); 113 m_jit.jitAssertIsInt32(gpr); 114 returnFormat = DataFormatInteger; 115 return gpr; 116 } 117 } 118 119 ASSERT_NOT_REACHED(); 120 return InvalidGPRReg; 121 } 122 123 FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) 124 { 125 Node& node = at(nodeIndex); 126 VirtualRegister virtualRegister = node.virtualRegister(); 127 GenerationInfo& info = m_generationInfo[virtualRegister]; 128 129 if (info.registerFormat() == DataFormatNone) { 130 131 if (node.hasConstant()) { 132 if (isInt32Constant(nodeIndex)) { 133 // FIXME: should not be reachable? 134 GPRReg gpr = allocate(); 135 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); 136 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); 137 info.fillInteger(gpr); 138 unlock(gpr); 139 } else if (isNumberConstant(nodeIndex)) { 140 FPRReg fpr = fprAllocate(); 141 m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr); 142 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); 143 info.fillDouble(fpr); 144 return fpr; 145 } else { 146 // FIXME: should not be reachable? 147 ASSERT_NOT_REACHED(); 148 } 149 } else { 150 DataFormat spillFormat = info.spillFormat(); 151 ASSERT(spillFormat & DataFormatJS); 152 if (spillFormat == DataFormatJSDouble) { 153 FPRReg fpr = fprAllocate(); 154 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); 155 m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); 156 info.fillDouble(fpr); 157 return fpr; 158 } 159 160 FPRReg fpr = fprAllocate(); 161 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)); 162 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); 163 JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); 164 165 isInteger.link(&m_jit); 166 m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr); 167 168 hasUnboxedDouble.link(&m_jit); 169 m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); 170 info.fillDouble(fpr); 171 return fpr; 172 } 173 } 174 175 switch (info.registerFormat()) { 176 case DataFormatNone: 177 // Should have filled, above. 178 case DataFormatCell: 179 case DataFormatJSCell: 180 case DataFormatBoolean: 181 case DataFormatJSBoolean: 182 case DataFormatStorage: 183 // Should only be calling this function if we know this operand to be numeric. 184 ASSERT_NOT_REACHED(); 185 186 case DataFormatJSInteger: 187 case DataFormatJS: { 188 GPRReg tagGPR = info.tagGPR(); 189 GPRReg payloadGPR = info.payloadGPR(); 190 FPRReg fpr = fprAllocate(); 191 m_gprs.lock(tagGPR); 192 m_gprs.lock(payloadGPR); 193 194 JITCompiler::Jump hasUnboxedDouble; 195 196 if (info.registerFormat() != DataFormatJSInteger) { 197 FPRTemporary scratch(this); 198 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); 199 m_jit.jitAssertIsJSDouble(tagGPR); 200 unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr()); 201 hasUnboxedDouble = m_jit.jump(); 202 isInteger.link(&m_jit); 203 } 204 205 m_jit.convertInt32ToDouble(payloadGPR, fpr); 206 207 if (info.registerFormat() != DataFormatJSInteger) 208 hasUnboxedDouble.link(&m_jit); 209 210 m_gprs.release(tagGPR); 211 m_gprs.release(payloadGPR); 212 m_gprs.unlock(tagGPR); 213 m_gprs.unlock(payloadGPR); 214 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); 215 info.fillDouble(fpr); 216 info.killSpilled(); 217 return fpr; 218 } 219 220 case DataFormatInteger: { 221 FPRReg fpr = fprAllocate(); 222 GPRReg gpr = info.gpr(); 223 m_gprs.lock(gpr); 224 m_jit.convertInt32ToDouble(gpr, fpr); 225 m_gprs.unlock(gpr); 226 return fpr; 227 } 228 229 case DataFormatJSDouble: 230 case DataFormatDouble: { 231 FPRReg fpr = info.fpr(); 232 m_fprs.lock(fpr); 233 return fpr; 234 } 235 } 236 237 ASSERT_NOT_REACHED(); 238 return InvalidFPRReg; 239 } 240 241 bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr) 242 { 243 // FIXME: For double we could fill with a FPR. 244 UNUSED_PARAM(fpr); 245 246 Node& node = at(nodeIndex); 247 VirtualRegister virtualRegister = node.virtualRegister(); 248 GenerationInfo& info = m_generationInfo[virtualRegister]; 249 250 switch (info.registerFormat()) { 251 case DataFormatNone: { 252 253 if (node.hasConstant()) { 254 if (isInt32Constant(nodeIndex)) { 255 tagGPR = allocate(); 256 payloadGPR = allocate(); 257 m_jit.emitLoad(nodeIndex, tagGPR, payloadGPR); 258 info.fillJSValue(tagGPR, payloadGPR, DataFormatJSInteger); 259 } else { 260 tagGPR = allocate(); 261 payloadGPR = allocate(); 262 m_jit.emitLoad(nodeIndex, tagGPR, payloadGPR); 263 info.fillJSValue(tagGPR, payloadGPR, DataFormatJS); 264 } 265 266 m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant); 267 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant); 268 } else { 269 DataFormat spillFormat = info.spillFormat(); 270 ASSERT(spillFormat & DataFormatJS); 271 tagGPR = allocate(); 272 payloadGPR = allocate(); 273 m_jit.emitLoad(nodeIndex, tagGPR, payloadGPR); 274 m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled); 275 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled); 276 info.fillJSValue(tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat); 277 } 278 279 return true; 280 } 281 282 case DataFormatInteger: 283 case DataFormatCell: 284 case DataFormatBoolean: { 285 GPRReg gpr = info.gpr(); 286 // If the register has already been locked we need to take a copy. 287 if (m_gprs.isLocked(gpr)) { 288 payloadGPR = allocate(); 289 m_jit.move(gpr, payloadGPR); 290 } else { 291 payloadGPR = gpr; 292 m_gprs.lock(gpr); 293 } 294 tagGPR = allocate(); 295 uint32_t tag = JSValue::EmptyValueTag; 296 DataFormat fillFormat = DataFormatJS; 297 switch (info.registerFormat()) { 298 case DataFormatInteger: 299 tag = JSValue::Int32Tag; 300 fillFormat = DataFormatJSInteger; 301 break; 302 case DataFormatCell: 303 tag = JSValue::CellTag; 304 fillFormat = DataFormatJSCell; 305 break; 306 case DataFormatBoolean: 307 tag = JSValue::BooleanTag; 308 fillFormat = DataFormatJSBoolean; 309 break; 310 default: 311 ASSERT_NOT_REACHED(); 312 break; 313 } 314 m_jit.move(TrustedImm32(tag), tagGPR); 315 m_gprs.release(gpr); 316 m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); 317 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); 318 info.fillJSValue(tagGPR, payloadGPR, fillFormat); 319 return true; 320 } 321 322 case DataFormatJSDouble: 323 case DataFormatDouble: { 324 FPRReg oldFPR = info.fpr(); 325 m_fprs.lock(oldFPR); 326 tagGPR = allocate(); 327 payloadGPR = allocate(); 328 boxDouble(oldFPR, tagGPR, payloadGPR); 329 m_fprs.unlock(oldFPR); 330 m_fprs.release(oldFPR); 331 m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); 332 m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); 333 info.fillJSValue(tagGPR, payloadGPR, DataFormatJS); 334 return true; 335 } 336 337 case DataFormatJS: 338 case DataFormatJSInteger: 339 case DataFormatJSCell: 340 case DataFormatJSBoolean: { 341 tagGPR = info.tagGPR(); 342 payloadGPR = info.payloadGPR(); 343 m_gprs.lock(tagGPR); 344 m_gprs.lock(payloadGPR); 345 return true; 346 } 347 348 case DataFormatStorage: 349 // this type currently never occurs 350 ASSERT_NOT_REACHED(); 351 } 352 353 ASSERT_NOT_REACHED(); 354 return true; 355 } 356 357 void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node) 358 { 359 if (isKnownNumeric(node.child1())) { 360 JSValueOperand op1(this, node.child1()); 361 op1.fill(); 362 if (op1.isDouble()) { 363 FPRTemporary result(this, op1); 364 m_jit.moveDouble(op1.fpr(), result.fpr()); 365 doubleResult(result.fpr(), m_compileIndex); 366 } else { 367 GPRTemporary resultTag(this, op1); 368 GPRTemporary resultPayload(this, op1, false); 369 m_jit.move(op1.tagGPR(), resultTag.gpr()); 370 m_jit.move(op1.payloadGPR(), resultPayload.gpr()); 371 jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); 372 } 373 return; 374 } 375 376 JSValueOperand op1(this, node.child1()); 377 GPRTemporary resultTag(this, op1); 378 GPRTemporary resultPayload(this, op1, false); 379 380 ASSERT(!isInt32Constant(node.child1())); 381 ASSERT(!isNumberConstant(node.child1())); 382 383 GPRReg tagGPR = op1.tagGPR(); 384 GPRReg payloadGPR = op1.payloadGPR(); 385 GPRReg resultTagGPR = resultTag.gpr(); 386 GPRReg resultPayloadGPR = resultPayload.gpr(); 387 op1.use(); 388 389 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); 390 JITCompiler::Jump nonNumeric = m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)); 391 392 // First, if we get here we have a double encoded as a JSValue 393 JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); 394 395 // Next handle cells (& other JS immediates) 396 nonNumeric.link(&m_jit); 397 silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); 398 callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, tagGPR, payloadGPR); 399 boxDouble(FPRInfo::returnValueFPR, resultTagGPR, resultPayloadGPR); 400 silentFillAllRegisters(resultTagGPR, resultPayloadGPR); 401 JITCompiler::Jump hasCalledToNumber = m_jit.jump(); 402 403 // Finally, handle integers. 404 isInteger.link(&m_jit); 405 hasUnboxedDouble.link(&m_jit); 406 m_jit.move(tagGPR, resultTagGPR); 407 m_jit.move(payloadGPR, resultPayloadGPR); 408 hasCalledToNumber.link(&m_jit); 409 jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); 410 } 411 412 void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node) 413 { 414 ASSERT(!isInt32Constant(node.child1())); 415 416 if (isKnownInteger(node.child1())) { 417 IntegerOperand op1(this, node.child1()); 418 GPRTemporary result(this, op1); 419 m_jit.move(op1.gpr(), result.gpr()); 420 integerResult(result.gpr(), m_compileIndex); 421 return; 422 } 423 424 GenerationInfo& childInfo = m_generationInfo[at(node.child1()).virtualRegister()]; 425 if (childInfo.isJSDouble()) { 426 DoubleOperand op1(this, node.child1()); 427 GPRTemporary result(this); 428 FPRReg fpr = op1.fpr(); 429 GPRReg gpr = result.gpr(); 430 op1.use(); 431 JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful); 432 433 silentSpillAllRegisters(gpr); 434 callOperation(toInt32, gpr, fpr); 435 silentFillAllRegisters(gpr); 436 437 truncatedToInteger.link(&m_jit); 438 integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly); 439 return; 440 } 441 442 JSValueOperand op1(this, node.child1()); 443 GPRTemporary result(this); 444 GPRReg tagGPR = op1.tagGPR(); 445 GPRReg payloadGPR = op1.payloadGPR(); 446 GPRReg resultGPR = result.gpr(); 447 op1.use(); 448 449 JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); 450 451 // First handle non-integers 452 silentSpillAllRegisters(resultGPR); 453 callOperation(dfgConvertJSValueToInt32, GPRInfo::returnValueGPR, tagGPR, payloadGPR); 454 m_jit.move(GPRInfo::returnValueGPR, resultGPR); 455 silentFillAllRegisters(resultGPR); 456 JITCompiler::Jump hasCalledToInt32 = m_jit.jump(); 457 458 // Then handle integers. 459 isInteger.link(&m_jit); 460 m_jit.move(payloadGPR, resultGPR); 461 hasCalledToInt32.link(&m_jit); 462 integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); 463 } 464 465 void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node) 466 { 467 IntegerOperand op1(this, node.child1()); 468 FPRTemporary boxer(this); 469 GPRTemporary resultTag(this, op1); 470 GPRTemporary resultPayload(this); 471 472 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0)); 473 474 m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr()); 475 m_jit.move(JITCompiler::TrustedImmPtr(&twoToThe32), resultPayload.gpr()); // reuse resultPayload register here. 476 m_jit.addDouble(JITCompiler::Address(resultPayload.gpr(), 0), boxer.fpr()); 477 478 boxDouble(boxer.fpr(), resultTag.gpr(), resultPayload.gpr()); 479 480 JITCompiler::Jump done = m_jit.jump(); 481 482 positive.link(&m_jit); 483 484 m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTag.gpr()); 485 m_jit.move(op1.gpr(), resultPayload.gpr()); 486 487 done.link(&m_jit); 488 489 jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); 490 } 491 492 void SpeculativeJIT::nonSpeculativeKnownConstantArithOp(NodeType op, NodeIndex regChild, NodeIndex immChild, bool commute) 493 { 494 JSValueOperand regArg(this, regChild); 495 regArg.fill(); 496 497 if (regArg.isDouble()) { 498 FPRReg regArgFPR = regArg.fpr(); 499 FPRTemporary imm(this); 500 FPRTemporary result(this, regArg); 501 GPRTemporary scratch(this); 502 FPRReg immFPR = imm.fpr(); 503 FPRReg resultFPR = result.fpr(); 504 GPRReg scratchGPR = scratch.gpr(); 505 use(regChild); 506 use(immChild); 507 508 int32_t imm32 = valueOfInt32Constant(immChild); 509 m_jit.move(TrustedImm32(imm32), scratchGPR); 510 m_jit.convertInt32ToDouble(scratchGPR, immFPR); 511 512 switch (op) { 513 case ValueAdd: 514 case ArithAdd: 515 m_jit.addDouble(regArgFPR, immFPR, resultFPR); 516 break; 517 518 case ArithSub: 519 m_jit.subDouble(regArgFPR, immFPR, resultFPR); 520 break; 521 522 default: 523 ASSERT_NOT_REACHED(); 524 } 525 526 doubleResult(resultFPR, m_compileIndex, UseChildrenCalledExplicitly); 527 return; 528 } 529 530 GPRReg regArgTagGPR = regArg.tagGPR(); 531 GPRReg regArgPayloadGPR = regArg.payloadGPR(); 532 GPRTemporary resultTag(this, regArg); 533 GPRTemporary resultPayload(this, regArg, false); 534 GPRReg resultTagGPR = resultTag.gpr(); 535 GPRReg resultPayloadGPR = resultPayload.gpr(); 536 FPRTemporary tmp1(this); 537 FPRTemporary tmp2(this); 538 FPRReg tmp1FPR = tmp1.fpr(); 539 FPRReg tmp2FPR = tmp2.fpr(); 540 use(regChild); 541 use(immChild); 542 543 JITCompiler::Jump notInt; 544 int32_t imm = valueOfInt32Constant(immChild); 545 546 if (!isKnownNumeric(regChild)) 547 notInt = m_jit.branch32(MacroAssembler::NotEqual, regArgTagGPR, TrustedImm32(JSValue::Int32Tag)); 548 549 JITCompiler::Jump overflow; 550 551 switch (op) { 552 case ValueAdd: 553 case ArithAdd: 554 overflow = m_jit.branchAdd32(MacroAssembler::Overflow, regArgPayloadGPR, Imm32(imm), resultPayloadGPR); 555 break; 556 557 case ArithSub: 558 overflow = m_jit.branchSub32(MacroAssembler::Overflow, regArgPayloadGPR, Imm32(imm), resultPayloadGPR); 559 break; 560 561 default: 562 ASSERT_NOT_REACHED(); 563 } 564 565 m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); 566 JITCompiler::Jump done = m_jit.jump(); 567 568 overflow.link(&m_jit); 569 // first deal with overflow case 570 m_jit.convertInt32ToDouble(regArgPayloadGPR, tmp2FPR); 571 m_jit.move(TrustedImm32(imm), resultPayloadGPR); 572 m_jit.convertInt32ToDouble(resultPayloadGPR, tmp1FPR); 573 switch (op) { 574 case ValueAdd: 575 case ArithAdd: 576 m_jit.addDouble(tmp1FPR, tmp2FPR); 577 break; 578 579 case ArithSub: 580 m_jit.subDouble(tmp1FPR, tmp2FPR); 581 break; 582 583 default: 584 ASSERT_NOT_REACHED(); 585 } 586 587 JITCompiler::Jump doneCaseConvertedToInt; 588 589 if (op == ValueAdd) { 590 JITCompiler::JumpList failureCases; 591 m_jit.branchConvertDoubleToInt32(tmp2FPR, resultPayloadGPR, failureCases, tmp1FPR); 592 m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); 593 doneCaseConvertedToInt = m_jit.jump(); 594 595 failureCases.link(&m_jit); 596 } 597 598 boxDouble(tmp2FPR, resultTagGPR, resultPayloadGPR); 599 600 if (!isKnownNumeric(regChild)) { 601 ASSERT(notInt.isSet()); 602 ASSERT(op == ValueAdd); 603 604 JITCompiler::Jump doneCaseWasNumber = m_jit.jump(); 605 606 notInt.link(&m_jit); 607 608 silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); 609 if (commute) 610 callOperation(operationValueAddNotNumber, resultTagGPR, resultPayloadGPR, MacroAssembler::Imm32(imm), regArgTagGPR, regArgPayloadGPR); 611 else 612 callOperation(operationValueAddNotNumber, resultTagGPR, resultPayloadGPR, regArgTagGPR, regArgPayloadGPR, MacroAssembler::Imm32(imm)); 613 silentFillAllRegisters(resultTagGPR, resultPayloadGPR); 614 615 doneCaseWasNumber.link(&m_jit); 616 } 617 618 done.link(&m_jit); 619 if (doneCaseConvertedToInt.isSet()) 620 doneCaseConvertedToInt.link(&m_jit); 621 622 jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); 623 } 624 625 void SpeculativeJIT::nonSpeculativeBasicArithOp(NodeType op, Node &node) 626 { 627 JSValueOperand arg1(this, node.child1()); 628 JSValueOperand arg2(this, node.child2()); 629 arg1.fill(); 630 arg2.fill(); 631 632 if (arg1.isDouble() && arg2.isDouble()) { 633 FPRReg arg1FPR = arg1.fpr(); 634 FPRReg arg2FPR = arg2.fpr(); 635 FPRTemporary result(this, arg1); 636 arg1.use(); 637 arg2.use(); 638 switch (op) { 639 case ValueAdd: 640 case ArithAdd: 641 m_jit.addDouble(arg1FPR, arg2FPR, result.fpr()); 642 break; 643 644 case ArithSub: 645 m_jit.subDouble(arg1FPR, arg2FPR, result.fpr()); 646 break; 647 648 case ArithMul: 649 m_jit.mulDouble(arg1FPR, arg2FPR, result.fpr()); 650 break; 651 652 default: 653 ASSERT_NOT_REACHED(); 654 } 655 656 doubleResult(result.fpr(), m_compileIndex, UseChildrenCalledExplicitly); 657 return; 658 } 659 660 FPRTemporary tmp1(this); 661 FPRTemporary tmp2(this); 662 FPRReg tmp1FPR = tmp1.fpr(); 663 FPRReg tmp2FPR = tmp2.fpr(); 664 665 GPRTemporary resultTag(this, arg1.isDouble() ? arg2 : arg1); 666 GPRTemporary resultPayload(this, arg1.isDouble() ? arg2 : arg1, false); 667 GPRReg resultTagGPR = resultTag.gpr(); 668 GPRReg resultPayloadGPR = resultPayload.gpr(); 669 670 GPRReg arg1TagGPR = InvalidGPRReg; 671 GPRReg arg1PayloadGPR = InvalidGPRReg; 672 GPRReg arg2TagGPR = InvalidGPRReg; 673 GPRReg arg2PayloadGPR = InvalidGPRReg; 674 GPRTemporary tmpTag(this); 675 GPRTemporary tmpPayload(this); 676 677 if (arg1.isDouble()) { 678 arg1TagGPR = tmpTag.gpr(); 679 arg1PayloadGPR = tmpPayload.gpr(); 680 boxDouble(arg1.fpr(), arg1TagGPR, arg1PayloadGPR); 681 arg2TagGPR = arg2.tagGPR(); 682 arg2PayloadGPR = arg2.payloadGPR(); 683 } else if (arg2.isDouble()) { 684 arg1TagGPR = arg1.tagGPR(); 685 arg1PayloadGPR = arg1.payloadGPR(); 686 arg2TagGPR = tmpTag.gpr(); 687 arg2PayloadGPR = tmpPayload.gpr(); 688 boxDouble(arg2.fpr(), arg2TagGPR, arg2PayloadGPR); 689 } else { 690 arg1TagGPR = arg1.tagGPR(); 691 arg1PayloadGPR = arg1.payloadGPR(); 692 arg2TagGPR = arg2.tagGPR(); 693 arg2PayloadGPR = arg2.payloadGPR(); 694 } 695 696 arg1.use(); 697 arg2.use(); 698 699 JITCompiler::Jump child1NotInt; 700 JITCompiler::Jump child2NotInt; 701 JITCompiler::JumpList overflow; 702 703 if (!isKnownInteger(node.child1())) 704 child1NotInt = m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, TrustedImm32(JSValue::Int32Tag)); 705 706 if (!isKnownInteger(node.child2())) 707 child2NotInt = m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, TrustedImm32(JSValue::Int32Tag)); 708 709 switch (op) { 710 case ValueAdd: 711 case ArithAdd: { 712 overflow.append(m_jit.branchAdd32(MacroAssembler::Overflow, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR)); 713 break; 714 } 715 716 case ArithSub: { 717 overflow.append(m_jit.branchSub32(MacroAssembler::Overflow, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR)); 718 break; 719 } 720 721 case ArithMul: { 722 overflow.append(m_jit.branchMul32(MacroAssembler::Overflow, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR)); 723 overflow.append(m_jit.branchTest32(MacroAssembler::Zero, resultPayloadGPR)); 724 break; 725 } 726 727 default: 728 ASSERT_NOT_REACHED(); 729 } 730 731 m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); 732 733 JITCompiler::Jump done = m_jit.jump(); 734 735 JITCompiler::JumpList haveFPRArguments; 736 737 overflow.link(&m_jit); 738 739 // both arguments are integers 740 m_jit.convertInt32ToDouble(arg1PayloadGPR, tmp1FPR); 741 m_jit.convertInt32ToDouble(arg2PayloadGPR, tmp2FPR); 742 743 haveFPRArguments.append(m_jit.jump()); 744 745 JITCompiler::JumpList notNumbers; 746 747 JITCompiler::Jump child2NotInt2; 748 749 if (!isKnownInteger(node.child1())) { 750 FPRTemporary scratch(this); 751 child1NotInt.link(&m_jit); 752 753 if (!isKnownNumeric(node.child1())) { 754 ASSERT(op == ValueAdd); 755 notNumbers.append(m_jit.branch32(MacroAssembler::AboveOrEqual, arg1TagGPR, TrustedImm32(JSValue::LowestTag))); 756 } 757 758 if (arg1.isDouble()) 759 m_jit.moveDouble(arg1.fpr(), tmp1FPR); 760 else 761 unboxDouble(arg1TagGPR, arg1PayloadGPR, tmp1FPR, scratch.fpr()); 762 763 // child1 is converted to a double; child2 may either be an int or 764 // a boxed double 765 766 if (!isKnownInteger(node.child2())) { 767 if (isKnownNumeric(node.child2())) 768 child2NotInt2 = m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, TrustedImm32(JSValue::Int32Tag)); 769 else { 770 ASSERT(op == ValueAdd); 771 JITCompiler::Jump child2IsInt = m_jit.branch32(MacroAssembler::Equal, arg2TagGPR, TrustedImm32(JSValue::Int32Tag)); 772 notNumbers.append(m_jit.branch32(MacroAssembler::AboveOrEqual, arg2TagGPR, TrustedImm32(JSValue::LowestTag))); 773 child2NotInt2 = m_jit.jump(); 774 child2IsInt.link(&m_jit); 775 } 776 } 777 778 // child 2 is definitely an integer 779 m_jit.convertInt32ToDouble(arg2PayloadGPR, tmp2FPR); 780 781 haveFPRArguments.append(m_jit.jump()); 782 } 783 784 if (!isKnownInteger(node.child2())) { 785 FPRTemporary scratch(this); 786 child2NotInt.link(&m_jit); 787 788 if (!isKnownNumeric(node.child2())) { 789 ASSERT(op == ValueAdd); 790 notNumbers.append(m_jit.branch32(MacroAssembler::AboveOrEqual, arg2TagGPR, TrustedImm32(JSValue::LowestTag))); 791 } 792 793 // child1 is definitely an integer, and child 2 is definitely not 794 m_jit.convertInt32ToDouble(arg1PayloadGPR, tmp1FPR); 795 796 if (child2NotInt2.isSet()) 797 child2NotInt2.link(&m_jit); 798 799 if (arg2.isDouble()) 800 m_jit.moveDouble(arg2.fpr(), tmp2FPR); 801 else 802 unboxDouble(arg2TagGPR, arg2PayloadGPR, tmp2FPR, scratch.fpr()); 803 } 804 805 haveFPRArguments.link(&m_jit); 806 807 switch (op) { 808 case ValueAdd: 809 case ArithAdd: 810 m_jit.addDouble(tmp2FPR, tmp1FPR); 811 break; 812 813 case ArithSub: 814 m_jit.subDouble(tmp2FPR, tmp1FPR); 815 break; 816 817 case ArithMul: 818 m_jit.mulDouble(tmp2FPR, tmp1FPR); 819 break; 820 821 default: 822 ASSERT_NOT_REACHED(); 823 } 824 825 JITCompiler::Jump doneCaseConvertedToInt; 826 827 if (op == ValueAdd) { 828 JITCompiler::JumpList failureCases; 829 m_jit.branchConvertDoubleToInt32(tmp1FPR, resultPayloadGPR, failureCases, tmp2FPR); 830 m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); 831 832 doneCaseConvertedToInt = m_jit.jump(); 833 834 failureCases.link(&m_jit); 835 } 836 837 boxDouble(tmp1FPR, resultTagGPR, resultPayloadGPR); 838 839 if (!notNumbers.empty()) { 840 ASSERT(op == ValueAdd); 841 842 JITCompiler::Jump doneCaseWasNumber = m_jit.jump(); 843 844 notNumbers.link(&m_jit); 845 846 silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); 847 callOperation(operationValueAddNotNumber, resultTagGPR, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 848 silentFillAllRegisters(resultTagGPR, resultPayloadGPR); 849 850 doneCaseWasNumber.link(&m_jit); 851 } 852 853 done.link(&m_jit); 854 if (doneCaseConvertedToInt.isSet()) 855 doneCaseConvertedToInt.link(&m_jit); 856 857 jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); 858 } 859 860 JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, NodeType nodeType) 861 { 862 ASSERT(nodeType == GetById || nodeType == GetMethod); 863 864 m_jit.beginUninterruptedSequence(); 865 JITCompiler::DataLabelPtr structureToCompare; 866 JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 867 m_jit.endUninterruptedSequence(); 868 869 m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR); 870 JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); 871 JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); 872 873 JITCompiler::Jump done = m_jit.jump(); 874 875 structureCheck.link(&m_jit); 876 877 if (slowPathTarget.isSet()) 878 slowPathTarget.link(&m_jit); 879 880 JITCompiler::Label slowCase = m_jit.label(); 881 882 silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); 883 JITCompiler::Call functionCall = callOperation(nodeType == GetById ? operationGetByIdOptimize : operationGetMethodOptimize, resultTagGPR, resultPayloadGPR, basePayloadGPR, identifier(identifierNumber)); 884 silentFillAllRegisters(resultTagGPR, resultPayloadGPR); 885 886 done.link(&m_jit); 887 888 JITCompiler::Label doneLabel = m_jit.label(); 889 890 m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, tagLoadWithPatch, payloadLoadWithPatch, slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR))); 891 892 return functionCall; 893 } 894 895 void SpeculativeJIT::cachedPutById(GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget) 896 { 897 m_jit.beginUninterruptedSequence(); 898 JITCompiler::DataLabelPtr structureToCompare; 899 JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 900 m_jit.endUninterruptedSequence(); 901 902 writeBarrier(basePayloadGPR, valueTagGPR, valueIndex, WriteBarrierForPropertyAccess, scratchGPR); 903 904 m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); 905 JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); 906 JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); 907 908 JITCompiler::Jump done = m_jit.jump(); 909 910 structureCheck.link(&m_jit); 911 912 if (slowPathTarget.isSet()) 913 slowPathTarget.link(&m_jit); 914 915 JITCompiler::Label slowCase = m_jit.label(); 916 917 silentSpillAllRegisters(InvalidGPRReg); 918 V_DFGOperation_EJCI optimizedCall; 919 if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) { 920 if (putKind == Direct) 921 optimizedCall = operationPutByIdDirectStrictOptimize; 922 else 923 optimizedCall = operationPutByIdStrictOptimize; 924 } else { 925 if (putKind == Direct) 926 optimizedCall = operationPutByIdDirectNonStrictOptimize; 927 else 928 optimizedCall = operationPutByIdNonStrictOptimize; 929 } 930 JITCompiler::Call functionCall = callOperation(optimizedCall, valueTagGPR, valuePayloadGPR, basePayloadGPR, identifier(identifierNumber)); 931 silentFillAllRegisters(InvalidGPRReg); 932 933 done.link(&m_jit); 934 JITCompiler::Label doneLabel = m_jit.label(); 935 936 m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR))); 937 } 938 939 void SpeculativeJIT::cachedGetMethod(GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget) 940 { 941 JITCompiler::Call slowCall; 942 JITCompiler::DataLabelPtr structToCompare, protoObj, protoStructToCompare, putFunction; 943 944 // m_jit.emitLoadPayload(baseIndex, scratchGPR); 945 JITCompiler::Jump wrongStructure = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 946 protoObj = m_jit.moveWithPatch(JITCompiler::TrustedImmPtr(0), resultPayloadGPR); 947 JITCompiler::Jump wrongProtoStructure = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(resultPayloadGPR, JSCell::structureOffset()), protoStructToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 948 949 putFunction = m_jit.moveWithPatch(JITCompiler::TrustedImmPtr(0), resultPayloadGPR); 950 m_jit.move(TrustedImm32(JSValue::CellTag), resultTagGPR); 951 952 JITCompiler::Jump done = m_jit.jump(); 953 954 wrongStructure.link(&m_jit); 955 wrongProtoStructure.link(&m_jit); 956 957 slowCall = cachedGetById(basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, identifierNumber, slowPathTarget, GetMethod); 958 959 done.link(&m_jit); 960 961 m_jit.addMethodGet(slowCall, structToCompare, protoObj, protoStructToCompare, putFunction); 962 } 963 964 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert) 965 { 966 JSValueOperand arg(this, operand); 967 GPRReg argTagGPR = arg.tagGPR(); 968 GPRReg argPayloadGPR = arg.payloadGPR(); 969 970 GPRTemporary resultPayload(this, arg, false); 971 GPRReg resultPayloadGPR = resultPayload.gpr(); 972 973 JITCompiler::Jump notCell; 974 if (!isKnownCell(operand)) 975 notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); 976 977 m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR); 978 m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultPayloadGPR); 979 980 if (!isKnownCell(operand)) { 981 JITCompiler::Jump done = m_jit.jump(); 982 983 notCell.link(&m_jit); 984 // null or undefined? 985 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); 986 m_jit.move(argTagGPR, resultPayloadGPR); 987 m_jit.or32(TrustedImm32(1), resultPayloadGPR); 988 m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR); 989 990 done.link(&m_jit); 991 } 992 993 booleanResult(resultPayloadGPR, m_compileIndex); 994 } 995 996 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert) 997 { 998 Node& branchNode = at(branchNodeIndex); 999 BlockIndex taken = branchNode.takenBlockIndex(); 1000 BlockIndex notTaken = branchNode.notTakenBlockIndex(); 1001 1002 if (taken == (m_block + 1)) { 1003 invert = !invert; 1004 BlockIndex tmp = taken; 1005 taken = notTaken; 1006 notTaken = tmp; 1007 } 1008 1009 JSValueOperand arg(this, operand); 1010 GPRReg argTagGPR = arg.tagGPR(); 1011 GPRReg argPayloadGPR = arg.payloadGPR(); 1012 1013 GPRTemporary result(this, arg); 1014 GPRReg resultGPR = result.gpr(); 1015 1016 JITCompiler::Jump notCell; 1017 1018 if (!isKnownCell(operand)) 1019 notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); 1020 1021 m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR); 1022 addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)), taken); 1023 1024 if (!isKnownCell(operand)) { 1025 addBranch(m_jit.jump(), notTaken); 1026 1027 notCell.link(&m_jit); 1028 // null or undefined? 1029 COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); 1030 m_jit.move(argTagGPR, resultGPR); 1031 m_jit.or32(TrustedImm32(1), resultGPR); 1032 addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag)), taken); 1033 } 1034 1035 if (notTaken != (m_block + 1)) 1036 addBranch(m_jit.jump(), notTaken); 1037 } 1038 1039 bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeIndex operand, bool invert) 1040 { 1041 NodeIndex branchNodeIndex = detectPeepHoleBranch(); 1042 if (branchNodeIndex != NoNode) { 1043 ASSERT(node.adjustedRefCount() == 1); 1044 1045 nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert); 1046 1047 use(node.child1()); 1048 use(node.child2()); 1049 m_compileIndex = branchNodeIndex; 1050 1051 return true; 1052 } 1053 1054 nonSpeculativeNonPeepholeCompareNull(operand, invert); 1055 1056 return false; 1057 } 1058 1059 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) 1060 { 1061 Node& branchNode = at(branchNodeIndex); 1062 BlockIndex taken = branchNode.takenBlockIndex(); 1063 BlockIndex notTaken = branchNode.notTakenBlockIndex(); 1064 1065 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero; 1066 1067 // The branch instruction will branch to the taken block. 1068 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. 1069 if (taken == (m_block + 1)) { 1070 cond = JITCompiler::invert(cond); 1071 callResultCondition = JITCompiler::Zero; 1072 BlockIndex tmp = taken; 1073 taken = notTaken; 1074 notTaken = tmp; 1075 } 1076 1077 JSValueOperand arg1(this, node.child1()); 1078 JSValueOperand arg2(this, node.child2()); 1079 GPRReg arg1TagGPR = arg1.tagGPR(); 1080 GPRReg arg1PayloadGPR = arg1.payloadGPR(); 1081 GPRReg arg2TagGPR = arg2.tagGPR(); 1082 GPRReg arg2PayloadGPR = arg2.payloadGPR(); 1083 1084 JITCompiler::JumpList slowPath; 1085 1086 if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) { 1087 GPRResult result(this); 1088 GPRReg resultGPR = result.gpr(); 1089 1090 arg1.use(); 1091 arg2.use(); 1092 1093 flushRegisters(); 1094 callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1095 1096 addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken); 1097 } else { 1098 GPRTemporary result(this); 1099 GPRReg resultGPR = result.gpr(); 1100 1101 arg1.use(); 1102 arg2.use(); 1103 1104 if (!isKnownInteger(node.child1())) 1105 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); 1106 if (!isKnownInteger(node.child2())) 1107 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); 1108 1109 addBranch(m_jit.branch32(cond, arg1PayloadGPR, arg2PayloadGPR), taken); 1110 1111 if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) { 1112 addBranch(m_jit.jump(), notTaken); 1113 1114 slowPath.link(&m_jit); 1115 1116 silentSpillAllRegisters(resultGPR); 1117 callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1118 silentFillAllRegisters(resultGPR); 1119 1120 addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken); 1121 } 1122 } 1123 1124 if (notTaken != (m_block + 1)) 1125 addBranch(m_jit.jump(), notTaken); 1126 } 1127 1128 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) 1129 { 1130 JSValueOperand arg1(this, node.child1()); 1131 JSValueOperand arg2(this, node.child2()); 1132 GPRReg arg1TagGPR = arg1.tagGPR(); 1133 GPRReg arg1PayloadGPR = arg1.payloadGPR(); 1134 GPRReg arg2TagGPR = arg2.tagGPR(); 1135 GPRReg arg2PayloadGPR = arg2.payloadGPR(); 1136 1137 JITCompiler::JumpList slowPath; 1138 1139 if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) { 1140 GPRResult result(this); 1141 GPRReg resultPayloadGPR = result.gpr(); 1142 1143 arg1.use(); 1144 arg2.use(); 1145 1146 flushRegisters(); 1147 callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1148 1149 booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); 1150 } else { 1151 GPRTemporary resultPayload(this, arg1, false); 1152 GPRReg resultPayloadGPR = resultPayload.gpr(); 1153 1154 arg1.use(); 1155 arg2.use(); 1156 1157 if (!isKnownInteger(node.child1())) 1158 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); 1159 if (!isKnownInteger(node.child2())) 1160 slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); 1161 1162 m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR); 1163 1164 if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) { 1165 JITCompiler::Jump haveResult = m_jit.jump(); 1166 1167 slowPath.link(&m_jit); 1168 1169 silentSpillAllRegisters(resultPayloadGPR); 1170 callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1171 silentFillAllRegisters(resultPayloadGPR); 1172 1173 m_jit.andPtr(TrustedImm32(1), resultPayloadGPR); 1174 1175 haveResult.link(&m_jit); 1176 } 1177 1178 booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); 1179 } 1180 } 1181 1182 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert) 1183 { 1184 Node& branchNode = at(branchNodeIndex); 1185 BlockIndex taken = branchNode.takenBlockIndex(); 1186 BlockIndex notTaken = branchNode.notTakenBlockIndex(); 1187 1188 // The branch instruction will branch to the taken block. 1189 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. 1190 if (taken == (m_block + 1)) { 1191 invert = !invert; 1192 BlockIndex tmp = taken; 1193 taken = notTaken; 1194 notTaken = tmp; 1195 } 1196 1197 JSValueOperand arg1(this, node.child1()); 1198 JSValueOperand arg2(this, node.child2()); 1199 GPRReg arg1TagGPR = arg1.tagGPR(); 1200 GPRReg arg1PayloadGPR = arg1.payloadGPR(); 1201 GPRReg arg2TagGPR = arg2.tagGPR(); 1202 GPRReg arg2PayloadGPR = arg2.payloadGPR(); 1203 1204 GPRTemporary resultPayload(this, arg1, false); 1205 GPRReg resultPayloadGPR = resultPayload.gpr(); 1206 1207 arg1.use(); 1208 arg2.use(); 1209 1210 if (isKnownCell(node.child1()) && isKnownCell(node.child2())) { 1211 // see if we get lucky: if the arguments are cells and they reference the same 1212 // cell, then they must be strictly equal. 1213 addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR), invert ? notTaken : taken); 1214 1215 silentSpillAllRegisters(resultPayloadGPR); 1216 callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1217 silentFillAllRegisters(resultPayloadGPR); 1218 1219 addBranch(m_jit.branchTest32(invert ? JITCompiler::NonZero : JITCompiler::Zero, resultPayloadGPR), taken); 1220 } else { 1221 // FIXME: Add fast paths for twoCells, number etc. 1222 1223 silentSpillAllRegisters(resultPayloadGPR); 1224 callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1225 silentFillAllRegisters(resultPayloadGPR); 1226 1227 addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR), taken); 1228 } 1229 1230 if (notTaken != (m_block + 1)) 1231 addBranch(m_jit.jump(), notTaken); 1232 } 1233 1234 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert) 1235 { 1236 JSValueOperand arg1(this, node.child1()); 1237 JSValueOperand arg2(this, node.child2()); 1238 GPRReg arg1TagGPR = arg1.tagGPR(); 1239 GPRReg arg1PayloadGPR = arg1.payloadGPR(); 1240 GPRReg arg2TagGPR = arg2.tagGPR(); 1241 GPRReg arg2PayloadGPR = arg2.payloadGPR(); 1242 1243 GPRTemporary resultPayload(this, arg1, false); 1244 GPRReg resultPayloadGPR = resultPayload.gpr(); 1245 1246 arg1.use(); 1247 arg2.use(); 1248 1249 if (isKnownCell(node.child1()) && isKnownCell(node.child2())) { 1250 // see if we get lucky: if the arguments are cells and they reference the same 1251 // cell, then they must be strictly equal. 1252 JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR); 1253 1254 m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR); 1255 JITCompiler::Jump done = m_jit.jump(); 1256 1257 notEqualCase.link(&m_jit); 1258 1259 silentSpillAllRegisters(resultPayloadGPR); 1260 callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1261 silentFillAllRegisters(resultPayloadGPR); 1262 1263 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); 1264 1265 done.link(&m_jit); 1266 } else { 1267 // FIXME: Add fast paths. 1268 1269 silentSpillAllRegisters(resultPayloadGPR); 1270 callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); 1271 silentFillAllRegisters(resultPayloadGPR); 1272 1273 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); 1274 } 1275 1276 booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); 1277 } 1278 1279 void SpeculativeJIT::emitCall(Node& node) 1280 { 1281 P_DFGOperation_E slowCallFunction; 1282 1283 if (node.op == Call) 1284 slowCallFunction = operationLinkCall; 1285 else { 1286 ASSERT(node.op == Construct); 1287 slowCallFunction = operationLinkConstruct; 1288 } 1289 1290 // For constructors, the this argument is not passed but we have to make space 1291 // for it. 1292 int dummyThisArgument = node.op == Call ? 0 : 1; 1293 1294 CallLinkInfo::CallType callType = node.op == Call ? CallLinkInfo::Call : CallLinkInfo::Construct; 1295 1296 NodeIndex calleeNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild()]; 1297 JSValueOperand callee(this, calleeNodeIndex); 1298 GPRReg calleeTagGPR = callee.tagGPR(); 1299 GPRReg calleePayloadGPR = callee.payloadGPR(); 1300 use(calleeNodeIndex); 1301 1302 // the call instruction's first child is either the function (normal call) or the 1303 // receiver (method call). subsequent children are the arguments. 1304 int numArgs = node.numChildren() - 1; 1305 1306 // For constructors, the this argument is not passed but we have to make space 1307 // for it. 1308 int numPassedArgs = numArgs + dummyThisArgument; 1309 1310 // amount of stuff (in units of sizeof(Register)) that we need to place at the 1311 // top of the JS stack. 1312 int callDataSize = 0; 1313 1314 // first there are the arguments 1315 callDataSize += numPassedArgs; 1316 1317 // and then there is the call frame header 1318 callDataSize += RegisterFile::CallFrameHeaderSize; 1319 1320 m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), payloadOfCallData(RegisterFile::ArgumentCount)); 1321 m_jit.store32(MacroAssembler::TrustedImm32(JSValue::Int32Tag), tagOfCallData(RegisterFile::ArgumentCount)); 1322 m_jit.storePtr(GPRInfo::callFrameRegister, payloadOfCallData(RegisterFile::CallerFrame)); 1323 m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), tagOfCallData(RegisterFile::CallerFrame)); 1324 1325 for (int argIdx = 0; argIdx < numArgs; argIdx++) { 1326 NodeIndex argNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + argIdx]; 1327 JSValueOperand arg(this, argNodeIndex); 1328 GPRReg argTagGPR = arg.tagGPR(); 1329 GPRReg argPayloadGPR = arg.payloadGPR(); 1330 use(argNodeIndex); 1331 1332 m_jit.store32(argTagGPR, tagOfCallData(-callDataSize + argIdx + dummyThisArgument)); 1333 m_jit.store32(argPayloadGPR, payloadOfCallData(-callDataSize + argIdx + dummyThisArgument)); 1334 } 1335 1336 m_jit.store32(calleeTagGPR, tagOfCallData(RegisterFile::Callee)); 1337 m_jit.store32(calleePayloadGPR, payloadOfCallData(RegisterFile::Callee)); 1338 1339 flushRegisters(); 1340 1341 GPRResult resultPayload(this); 1342 GPRResult2 resultTag(this); 1343 GPRReg resultPayloadGPR = resultPayload.gpr(); 1344 GPRReg resultTagGPR = resultTag.gpr(); 1345 1346 JITCompiler::DataLabelPtr targetToCheck; 1347 JITCompiler::Jump slowPath; 1348 1349 slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck); 1350 m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), resultPayloadGPR); 1351 m_jit.storePtr(resultPayloadGPR, payloadOfCallData(RegisterFile::ScopeChain)); 1352 m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), tagOfCallData(RegisterFile::ScopeChain)); 1353 1354 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); 1355 1356 JITCompiler::Call fastCall = m_jit.nearCall(); 1357 m_jit.notifyCall(fastCall, at(m_compileIndex).codeOrigin); 1358 1359 JITCompiler::Jump done = m_jit.jump(); 1360 1361 slowPath.link(&m_jit); 1362 1363 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1364 m_jit.poke(GPRInfo::argumentGPR0); 1365 JITCompiler::Call slowCall = m_jit.addFastExceptionCheck(m_jit.appendCall(slowCallFunction), at(m_compileIndex).codeOrigin); 1366 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); 1367 m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), at(m_compileIndex).codeOrigin); 1368 1369 done.link(&m_jit); 1370 1371 setupResults(resultPayloadGPR, resultTagGPR); 1372 1373 jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly); 1374 1375 m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, at(m_compileIndex).codeOrigin); 1376 } 49 1377 50 1378 template<bool strict> -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
r99898 r100244 34 34 35 35 #if USE(JSVALUE64) 36 37 GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat) 38 { 39 Node& node = at(nodeIndex); 40 VirtualRegister virtualRegister = node.virtualRegister(); 41 GenerationInfo& info = m_generationInfo[virtualRegister]; 42 43 if (info.registerFormat() == DataFormatNone) { 44 GPRReg gpr = allocate(); 45 46 if (node.hasConstant()) { 47 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); 48 if (isInt32Constant(nodeIndex)) { 49 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); 50 info.fillInteger(gpr); 51 returnFormat = DataFormatInteger; 52 return gpr; 53 } 54 if (isNumberConstant(nodeIndex)) { 55 JSValue jsValue = jsNumber(valueOfNumberConstant(nodeIndex)); 56 m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); 57 } else { 58 ASSERT(isJSConstant(nodeIndex)); 59 JSValue jsValue = valueOfJSConstant(nodeIndex); 60 m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); 61 } 62 } else if (info.spillFormat() == DataFormatInteger) { 63 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); 64 m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); 65 // Tag it, since fillInteger() is used when we want a boxed integer. 66 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); 67 } else { 68 ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger); 69 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); 70 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); 71 } 72 73 // Since we statically know that we're filling an integer, and values 74 // in the RegisterFile are boxed, this must be DataFormatJSInteger. 75 // We will check this with a jitAssert below. 76 info.fillJSValue(gpr, DataFormatJSInteger); 77 unlock(gpr); 78 } 79 80 switch (info.registerFormat()) { 81 case DataFormatNone: 82 // Should have filled, above. 83 case DataFormatJSDouble: 84 case DataFormatDouble: 85 case DataFormatJS: 86 case DataFormatCell: 87 case DataFormatJSCell: 88 case DataFormatBoolean: 89 case DataFormatJSBoolean: 90 case DataFormatStorage: 91 // Should only be calling this function if we know this operand to be integer. 92 ASSERT_NOT_REACHED(); 93 94 case DataFormatJSInteger: { 95 GPRReg gpr = info.gpr(); 96 m_gprs.lock(gpr); 97 m_jit.jitAssertIsJSInt32(gpr); 98 returnFormat = DataFormatJSInteger; 99 return gpr; 100 } 101 102 case DataFormatInteger: { 103 GPRReg gpr = info.gpr(); 104 m_gprs.lock(gpr); 105 m_jit.jitAssertIsInt32(gpr); 106 returnFormat = DataFormatInteger; 107 return gpr; 108 } 109 } 110 111 ASSERT_NOT_REACHED(); 112 return InvalidGPRReg; 113 } 114 115 FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) 116 { 117 Node& node = at(nodeIndex); 118 VirtualRegister virtualRegister = node.virtualRegister(); 119 GenerationInfo& info = m_generationInfo[virtualRegister]; 120 121 if (info.registerFormat() == DataFormatNone) { 122 if (node.hasConstant()) { 123 GPRReg gpr = allocate(); 124 125 if (isInt32Constant(nodeIndex)) { 126 // FIXME: should not be reachable? 127 m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); 128 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); 129 info.fillInteger(gpr); 130 unlock(gpr); 131 } else if (isNumberConstant(nodeIndex)) { 132 FPRReg fpr = fprAllocate(); 133 m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), gpr); 134 m_jit.movePtrToDouble(gpr, fpr); 135 unlock(gpr); 136 137 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); 138 info.fillDouble(fpr); 139 return fpr; 140 } else { 141 // FIXME: should not be reachable? 142 ASSERT(isJSConstant(nodeIndex)); 143 JSValue jsValue = valueOfJSConstant(nodeIndex); 144 m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); 145 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); 146 info.fillJSValue(gpr, DataFormatJS); 147 unlock(gpr); 148 } 149 } else { 150 DataFormat spillFormat = info.spillFormat(); 151 switch (spillFormat) { 152 case DataFormatDouble: { 153 FPRReg fpr = fprAllocate(); 154 m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); 155 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); 156 info.fillDouble(fpr); 157 return fpr; 158 } 159 160 case DataFormatInteger: { 161 GPRReg gpr = allocate(); 162 163 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); 164 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); 165 info.fillInteger(gpr); 166 unlock(gpr); 167 break; 168 } 169 170 default: 171 GPRReg gpr = allocate(); 172 173 ASSERT(spillFormat & DataFormatJS); 174 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); 175 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); 176 info.fillJSValue(gpr, spillFormat); 177 unlock(gpr); 178 break; 179 } 180 } 181 } 182 183 switch (info.registerFormat()) { 184 case DataFormatNone: 185 // Should have filled, above. 186 case DataFormatCell: 187 case DataFormatJSCell: 188 case DataFormatBoolean: 189 case DataFormatJSBoolean: 190 case DataFormatStorage: 191 // Should only be calling this function if we know this operand to be numeric. 192 ASSERT_NOT_REACHED(); 193 194 case DataFormatJS: { 195 GPRReg jsValueGpr = info.gpr(); 196 m_gprs.lock(jsValueGpr); 197 FPRReg fpr = fprAllocate(); 198 GPRReg tempGpr = allocate(); // FIXME: can we skip this allocation on the last use of the virtual register? 199 200 JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); 201 202 m_jit.jitAssertIsJSDouble(jsValueGpr); 203 204 // First, if we get here we have a double encoded as a JSValue 205 m_jit.move(jsValueGpr, tempGpr); 206 unboxDouble(tempGpr, fpr); 207 JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); 208 209 // Finally, handle integers. 210 isInteger.link(&m_jit); 211 m_jit.convertInt32ToDouble(jsValueGpr, fpr); 212 hasUnboxedDouble.link(&m_jit); 213 214 m_gprs.release(jsValueGpr); 215 m_gprs.unlock(jsValueGpr); 216 m_gprs.unlock(tempGpr); 217 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); 218 info.fillDouble(fpr); 219 info.killSpilled(); 220 return fpr; 221 } 222 223 case DataFormatJSInteger: 224 case DataFormatInteger: { 225 FPRReg fpr = fprAllocate(); 226 GPRReg gpr = info.gpr(); 227 m_gprs.lock(gpr); 228 m_jit.convertInt32ToDouble(gpr, fpr); 229 m_gprs.unlock(gpr); 230 return fpr; 231 } 232 233 // Unbox the double 234 case DataFormatJSDouble: { 235 GPRReg gpr = info.gpr(); 236 FPRReg fpr = fprAllocate(); 237 if (m_gprs.isLocked(gpr)) { 238 // Make sure we don't trample gpr if it is in use. 239 GPRReg temp = allocate(); 240 m_jit.move(gpr, temp); 241 unboxDouble(temp, fpr); 242 unlock(temp); 243 } else 244 unboxDouble(gpr, fpr); 245 246 m_gprs.release(gpr); 247 m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); 248 249 info.fillDouble(fpr); 250 return fpr; 251 } 252 253 case DataFormatDouble: { 254 FPRReg fpr = info.fpr(); 255 m_fprs.lock(fpr); 256 return fpr; 257 } 258 } 259 260 ASSERT_NOT_REACHED(); 261 return InvalidFPRReg; 262 } 263 264 GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) 265 { 266 Node& node = at(nodeIndex); 267 VirtualRegister virtualRegister = node.virtualRegister(); 268 GenerationInfo& info = m_generationInfo[virtualRegister]; 269 270 switch (info.registerFormat()) { 271 case DataFormatNone: { 272 GPRReg gpr = allocate(); 273 274 if (node.hasConstant()) { 275 if (isInt32Constant(nodeIndex)) { 276 info.fillJSValue(gpr, DataFormatJSInteger); 277 JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex)); 278 m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); 279 } else if (isNumberConstant(nodeIndex)) { 280 info.fillJSValue(gpr, DataFormatJSDouble); 281 JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(nodeIndex)); 282 m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); 283 } else { 284 ASSERT(isJSConstant(nodeIndex)); 285 JSValue jsValue = valueOfJSConstant(nodeIndex); 286 m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); 287 info.fillJSValue(gpr, DataFormatJS); 288 } 289 290 m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); 291 } else { 292 DataFormat spillFormat = info.spillFormat(); 293 m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); 294 if (spillFormat == DataFormatInteger) { 295 m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); 296 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); 297 spillFormat = DataFormatJSInteger; 298 } else { 299 m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); 300 if (spillFormat == DataFormatDouble) { 301 // Need to box the double, since we want a JSValue. 302 m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr); 303 spillFormat = DataFormatJSDouble; 304 } else 305 ASSERT(spillFormat & DataFormatJS); 306 } 307 info.fillJSValue(gpr, spillFormat); 308 } 309 return gpr; 310 } 311 312 case DataFormatInteger: { 313 GPRReg gpr = info.gpr(); 314 // If the register has already been locked we need to take a copy. 315 // If not, we'll zero extend in place, so mark on the info that this is now type DataFormatInteger, not DataFormatJSInteger. 316 if (m_gprs.isLocked(gpr)) { 317 GPRReg result = allocate(); 318 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr, result); 319 return result; 320 } 321 m_gprs.lock(gpr); 322 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); 323 info.fillJSValue(gpr, DataFormatJSInteger); 324 return gpr; 325 } 326 327 case DataFormatDouble: { 328 FPRReg fpr = info.fpr(); 329 GPRReg gpr = boxDouble(fpr); 330 331 // Update all info 332 info.fillJSValue(gpr, DataFormatJSDouble); 333 m_fprs.release(fpr); 334 m_gprs.retain(gpr, virtualRegister, SpillOrderJS); 335 336 return gpr; 337 } 338 339 case DataFormatCell: 340 // No retag required on JSVALUE64! 341 case DataFormatJS: 342 case DataFormatJSInteger: 343 case DataFormatJSDouble: 344 case DataFormatJSCell: 345 case DataFormatJSBoolean: { 346 GPRReg gpr = info.gpr(); 347 m_gprs.lock(gpr); 348 return gpr; 349 } 350 351 case DataFormatBoolean: 352 case DataFormatStorage: 353 // this type currently never occurs 354 ASSERT_NOT_REACHED(); 355 } 356 357 ASSERT_NOT_REACHED(); 358 return InvalidGPRReg; 359 } 360 361 void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node) 362 { 363 if (isKnownNumeric(node.child1())) { 364 JSValueOperand op1(this, node.child1()); 365 GPRTemporary result(this, op1); 366 m_jit.move(op1.gpr(), result.gpr()); 367 jsValueResult(result.gpr(), m_compileIndex); 368 return; 369 } 370 371 JSValueOperand op1(this, node.child1()); 372 GPRTemporary result(this); 373 374 ASSERT(!isInt32Constant(node.child1())); 375 ASSERT(!isNumberConstant(node.child1())); 376 377 GPRReg jsValueGpr = op1.gpr(); 378 GPRReg gpr = result.gpr(); 379 op1.use(); 380 381 JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); 382 JITCompiler::Jump nonNumeric = m_jit.branchTestPtr(MacroAssembler::Zero, jsValueGpr, GPRInfo::tagTypeNumberRegister); 383 384 // First, if we get here we have a double encoded as a JSValue 385 m_jit.move(jsValueGpr, gpr); 386 JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); 387 388 // Next handle cells (& other JS immediates) 389 nonNumeric.link(&m_jit); 390 silentSpillAllRegisters(gpr); 391 callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, jsValueGpr); 392 boxDouble(FPRInfo::returnValueFPR, gpr); 393 silentFillAllRegisters(gpr); 394 JITCompiler::Jump hasCalledToNumber = m_jit.jump(); 395 396 // Finally, handle integers. 397 isInteger.link(&m_jit); 398 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, jsValueGpr, gpr); 399 hasUnboxedDouble.link(&m_jit); 400 hasCalledToNumber.link(&m_jit); 401 402 jsValueResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly); 403 } 404 405 void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node) 406 { 407 ASSERT(!isInt32Constant(node.child1())); 408 409 if (isKnownInteger(node.child1())) { 410 IntegerOperand op1(this, node.child1()); 411 GPRTemporary result(this, op1); 412 m_jit.move(op1.gpr(), result.gpr()); 413 integerResult(result.gpr(), m_compileIndex); 414 return; 415 } 416 417 GenerationInfo& childInfo = m_generationInfo[at(node.child1()).virtualRegister()]; 418 if (childInfo.isJSDouble()) { 419 DoubleOperand op1(this, node.child1()); 420 GPRTemporary result(this); 421 FPRReg fpr = op1.fpr(); 422 GPRReg gpr = result.gpr(); 423 op1.use(); 424 JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful); 425 426 silentSpillAllRegisters(gpr); 427 callOperation(toInt32, gpr, fpr); 428 silentFillAllRegisters(gpr); 429 430 truncatedToInteger.link(&m_jit); 431 integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly); 432 return; 433 } 434 435 JSValueOperand op1(this, node.child1()); 436 GPRTemporary result(this, op1); 437 GPRReg jsValueGpr = op1.gpr(); 438 GPRReg resultGPR = result.gpr(); 439 op1.use(); 440 441 JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, jsValueGpr, GPRInfo::tagTypeNumberRegister); 442 443 // First handle non-integers 444 silentSpillAllRegisters(resultGPR); 445 callOperation(dfgConvertJSValueToInt32, resultGPR, jsValueGpr); 446 silentFillAllRegisters(resultGPR); 447 JITCompiler::Jump hasCalledToInt32 = m_jit.jump(); 448 449 // Then handle integers. 450 isInteger.link(&m_jit); 451 m_jit.zeroExtend32ToPtr(jsValueGpr, resultGPR); 452 hasCalledToInt32.link(&m_jit); 453 integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); 454 } 455 456 void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node) 457 { 458 IntegerOperand op1(this, node.child1()); 459 FPRTemporary boxer(this); 460 GPRTemporary result(this, op1); 461 462 JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0)); 463 464 m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr()); 465 m_jit.addDouble(JITCompiler::AbsoluteAddress(&twoToThe32), boxer.fpr()); 466 467 boxDouble(boxer.fpr(), result.gpr()); 468 469 JITCompiler::Jump done = m_jit.jump(); 470 471 positive.link(&m_jit); 472 473 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, op1.gpr(), result.gpr()); 474 475 done.link(&m_jit); 476 477 jsValueResult(result.gpr(), m_compileIndex); 478 } 479 480 void SpeculativeJIT::nonSpeculativeKnownConstantArithOp(NodeType op, NodeIndex regChild, NodeIndex immChild, bool commute) 481 { 482 JSValueOperand regArg(this, regChild); 483 GPRReg regArgGPR = regArg.gpr(); 484 GPRTemporary result(this); 485 GPRReg resultGPR = result.gpr(); 486 FPRTemporary tmp1(this); 487 FPRTemporary tmp2(this); 488 FPRReg tmp1FPR = tmp1.fpr(); 489 FPRReg tmp2FPR = tmp2.fpr(); 490 491 regArg.use(); 492 use(immChild); 493 494 JITCompiler::Jump notInt; 495 496 int32_t imm = valueOfInt32Constant(immChild); 497 498 if (!isKnownInteger(regChild)) 499 notInt = m_jit.branchPtr(MacroAssembler::Below, regArgGPR, GPRInfo::tagTypeNumberRegister); 500 501 JITCompiler::Jump overflow; 502 503 switch (op) { 504 case ValueAdd: 505 case ArithAdd: 506 overflow = m_jit.branchAdd32(MacroAssembler::Overflow, regArgGPR, Imm32(imm), resultGPR); 507 break; 508 509 case ArithSub: 510 overflow = m_jit.branchSub32(MacroAssembler::Overflow, regArgGPR, Imm32(imm), resultGPR); 511 break; 512 513 default: 514 ASSERT_NOT_REACHED(); 515 } 516 517 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR); 518 519 JITCompiler::Jump done = m_jit.jump(); 520 521 overflow.link(&m_jit); 522 523 JITCompiler::Jump notNumber; 524 525 // first deal with overflow case 526 m_jit.convertInt32ToDouble(regArgGPR, tmp2FPR); 527 528 // now deal with not-int case, if applicable 529 if (!isKnownInteger(regChild)) { 530 JITCompiler::Jump haveValue = m_jit.jump(); 531 532 notInt.link(&m_jit); 533 534 if (!isKnownNumeric(regChild)) { 535 ASSERT(op == ValueAdd); 536 notNumber = m_jit.branchTestPtr(MacroAssembler::Zero, regArgGPR, GPRInfo::tagTypeNumberRegister); 537 } 538 539 m_jit.move(regArgGPR, resultGPR); 540 m_jit.addPtr(GPRInfo::tagTypeNumberRegister, resultGPR); 541 m_jit.movePtrToDouble(resultGPR, tmp2FPR); 542 543 haveValue.link(&m_jit); 544 } 545 546 m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(immChild)))), resultGPR); 547 m_jit.movePtrToDouble(resultGPR, tmp1FPR); 548 switch (op) { 549 case ValueAdd: 550 case ArithAdd: 551 m_jit.addDouble(tmp1FPR, tmp2FPR); 552 break; 553 554 case ArithSub: 555 m_jit.subDouble(tmp1FPR, tmp2FPR); 556 break; 557 558 default: 559 ASSERT_NOT_REACHED(); 560 } 561 562 JITCompiler::Jump doneCaseConvertedToInt; 563 564 if (op == ValueAdd) { 565 JITCompiler::JumpList failureCases; 566 m_jit.branchConvertDoubleToInt32(tmp2FPR, resultGPR, failureCases, tmp1FPR); 567 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR); 568 569 doneCaseConvertedToInt = m_jit.jump(); 570 571 failureCases.link(&m_jit); 572 } 573 574 m_jit.moveDoubleToPtr(tmp2FPR, resultGPR); 575 m_jit.subPtr(GPRInfo::tagTypeNumberRegister, resultGPR); 576 577 if (!isKnownNumeric(regChild)) { 578 ASSERT(notNumber.isSet()); 579 ASSERT(op == ValueAdd); 580 581 JITCompiler::Jump doneCaseWasNumber = m_jit.jump(); 582 583 notNumber.link(&m_jit); 584 585 silentSpillAllRegisters(resultGPR); 586 if (commute) 587 callOperation(operationValueAddNotNumber, resultGPR, MacroAssembler::Imm32(imm), regArgGPR); 588 else 589 callOperation(operationValueAddNotNumber, resultGPR, regArgGPR, MacroAssembler::Imm32(imm)); 590 silentFillAllRegisters(resultGPR); 591 592 doneCaseWasNumber.link(&m_jit); 593 } 594 595 done.link(&m_jit); 596 if (doneCaseConvertedToInt.isSet()) 597 doneCaseConvertedToInt.link(&m_jit); 598 599 jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); 600 } 601 602 void SpeculativeJIT::nonSpeculativeBasicArithOp(NodeType op, Node &node) 603 { 604 JSValueOperand arg1(this, node.child1()); 605 JSValueOperand arg2(this, node.child2()); 606 607 FPRTemporary tmp1(this); 608 FPRTemporary tmp2(this); 609 FPRReg tmp1FPR = tmp1.fpr(); 610 FPRReg tmp2FPR = tmp2.fpr(); 611 612 GPRTemporary result(this); 613 614 GPRReg arg1GPR = arg1.gpr(); 615 GPRReg arg2GPR = arg2.gpr(); 616 617 GPRReg resultGPR = result.gpr(); 618 619 arg1.use(); 620 arg2.use(); 621 622 JITCompiler::Jump child1NotInt; 623 JITCompiler::Jump child2NotInt; 624 JITCompiler::JumpList overflow; 625 626 if (!isKnownInteger(node.child1())) 627 child1NotInt = m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister); 628 if (!isKnownInteger(node.child2())) 629 child2NotInt = m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister); 630 631 switch (op) { 632 case ValueAdd: 633 case ArithAdd: { 634 overflow.append(m_jit.branchAdd32(MacroAssembler::Overflow, arg1GPR, arg2GPR, resultGPR)); 635 break; 636 } 637 638 case ArithSub: { 639 overflow.append(m_jit.branchSub32(MacroAssembler::Overflow, arg1GPR, arg2GPR, resultGPR)); 640 break; 641 } 642 643 case ArithMul: { 644 overflow.append(m_jit.branchMul32(MacroAssembler::Overflow, arg1GPR, arg2GPR, resultGPR)); 645 overflow.append(m_jit.branchTest32(MacroAssembler::Zero, resultGPR)); 646 break; 647 } 648 649 default: 650 ASSERT_NOT_REACHED(); 651 } 652 653 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR); 654 655 JITCompiler::Jump done = m_jit.jump(); 656 657 JITCompiler::JumpList haveFPRArguments; 658 659 overflow.link(&m_jit); 660 661 // both arguments are integers 662 m_jit.convertInt32ToDouble(arg1GPR, tmp1FPR); 663 m_jit.convertInt32ToDouble(arg2GPR, tmp2FPR); 664 665 haveFPRArguments.append(m_jit.jump()); 666 667 JITCompiler::JumpList notNumbers; 668 669 JITCompiler::Jump child2NotInt2; 670 671 if (!isKnownInteger(node.child1())) { 672 child1NotInt.link(&m_jit); 673 674 if (!isKnownNumeric(node.child1())) { 675 ASSERT(op == ValueAdd); 676 notNumbers.append(m_jit.branchTestPtr(MacroAssembler::Zero, arg1GPR, GPRInfo::tagTypeNumberRegister)); 677 } 678 679 m_jit.move(arg1GPR, resultGPR); 680 unboxDouble(resultGPR, tmp1FPR); 681 682 // child1 is converted to a double; child2 may either be an int or 683 // a boxed double 684 685 if (!isKnownInteger(node.child2())) { 686 if (isKnownNumeric(node.child2())) 687 child2NotInt2 = m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister); 688 else { 689 ASSERT(op == ValueAdd); 690 JITCompiler::Jump child2IsInt = m_jit.branchPtr(MacroAssembler::AboveOrEqual, arg2GPR, GPRInfo::tagTypeNumberRegister); 691 notNumbers.append(m_jit.branchTestPtr(MacroAssembler::Zero, arg2GPR, GPRInfo::tagTypeNumberRegister)); 692 child2NotInt2 = m_jit.jump(); 693 child2IsInt.link(&m_jit); 694 } 695 } 696 697 // child 2 is definitely an integer 698 m_jit.convertInt32ToDouble(arg2GPR, tmp2FPR); 699 700 haveFPRArguments.append(m_jit.jump()); 701 } 702 703 if (!isKnownInteger(node.child2())) { 704 child2NotInt.link(&m_jit); 705 706 if (!isKnownNumeric(node.child2())) { 707 ASSERT(op == ValueAdd); 708 notNumbers.append(m_jit.branchTestPtr(MacroAssembler::Zero, arg2GPR, GPRInfo::tagTypeNumberRegister)); 709 } 710 711 // child1 is definitely an integer, and child 2 is definitely not 712 713 m_jit.convertInt32ToDouble(arg1GPR, tmp1FPR); 714 715 if (child2NotInt2.isSet()) 716 child2NotInt2.link(&m_jit); 717 718 m_jit.move(arg2GPR, resultGPR); 719 unboxDouble(resultGPR, tmp2FPR); 720 } 721 722 haveFPRArguments.link(&m_jit); 723 724 switch (op) { 725 case ValueAdd: 726 case ArithAdd: 727 m_jit.addDouble(tmp2FPR, tmp1FPR); 728 break; 729 730 case ArithSub: 731 m_jit.subDouble(tmp2FPR, tmp1FPR); 732 break; 733 734 case ArithMul: 735 m_jit.mulDouble(tmp2FPR, tmp1FPR); 736 break; 737 738 default: 739 ASSERT_NOT_REACHED(); 740 } 741 742 JITCompiler::Jump doneCaseConvertedToInt; 743 744 if (op == ValueAdd) { 745 JITCompiler::JumpList failureCases; 746 m_jit.branchConvertDoubleToInt32(tmp1FPR, resultGPR, failureCases, tmp2FPR); 747 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR); 748 749 doneCaseConvertedToInt = m_jit.jump(); 750 751 failureCases.link(&m_jit); 752 } 753 754 boxDouble(tmp1FPR, resultGPR); 755 756 if (!notNumbers.empty()) { 757 ASSERT(op == ValueAdd); 758 759 JITCompiler::Jump doneCaseWasNumber = m_jit.jump(); 760 761 notNumbers.link(&m_jit); 762 763 silentSpillAllRegisters(resultGPR); 764 callOperation(operationValueAddNotNumber, resultGPR, arg1GPR, arg2GPR); 765 silentFillAllRegisters(resultGPR); 766 767 doneCaseWasNumber.link(&m_jit); 768 } 769 770 done.link(&m_jit); 771 if (doneCaseConvertedToInt.isSet()) 772 doneCaseConvertedToInt.link(&m_jit); 773 774 jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); 775 } 776 777 JITCompiler::Call SpeculativeJIT::cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, NodeType nodeType) 778 { 779 ASSERT(nodeType == GetById || nodeType == GetMethod); 780 781 JITCompiler::DataLabelPtr structureToCompare; 782 JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 783 784 m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); 785 JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR); 786 787 JITCompiler::Jump done = m_jit.jump(); 788 789 structureCheck.link(&m_jit); 790 791 if (slowPathTarget.isSet()) 792 slowPathTarget.link(&m_jit); 793 794 JITCompiler::Label slowCase = m_jit.label(); 795 796 silentSpillAllRegisters(resultGPR); 797 JITCompiler::Call functionCall = callOperation(nodeType == GetById ? operationGetByIdOptimize : operationGetMethodOptimize, resultGPR, baseGPR, identifier(identifierNumber)); 798 silentFillAllRegisters(resultGPR); 799 800 done.link(&m_jit); 801 802 JITCompiler::Label doneLabel = m_jit.label(); 803 804 m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, loadWithPatch, slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), safeCast<int8_t>(scratchGPR))); 805 806 if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg) 807 unlock(scratchGPR); 808 809 return functionCall; 810 } 811 812 void SpeculativeJIT::cachedPutById(GPRReg baseGPR, GPRReg valueGPR, NodeIndex valueIndex, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget) 813 { 814 815 JITCompiler::DataLabelPtr structureToCompare; 816 JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 817 818 writeBarrier(baseGPR, valueGPR, valueIndex, WriteBarrierForPropertyAccess, scratchGPR); 819 820 m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); 821 JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0)); 822 823 JITCompiler::Jump done = m_jit.jump(); 824 825 structureCheck.link(&m_jit); 826 827 if (slowPathTarget.isSet()) 828 slowPathTarget.link(&m_jit); 829 830 JITCompiler::Label slowCase = m_jit.label(); 831 832 silentSpillAllRegisters(InvalidGPRReg); 833 V_DFGOperation_EJCI optimizedCall; 834 if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) { 835 if (putKind == Direct) 836 optimizedCall = operationPutByIdDirectStrictOptimize; 837 else 838 optimizedCall = operationPutByIdStrictOptimize; 839 } else { 840 if (putKind == Direct) 841 optimizedCall = operationPutByIdDirectNonStrictOptimize; 842 else 843 optimizedCall = operationPutByIdNonStrictOptimize; 844 } 845 JITCompiler::Call functionCall = callOperation(optimizedCall, valueGPR, baseGPR, identifier(identifierNumber)); 846 silentFillAllRegisters(InvalidGPRReg); 847 848 done.link(&m_jit); 849 JITCompiler::Label doneLabel = m_jit.label(); 850 851 m_jit.addPropertyAccess(PropertyAccessRecord(structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR))); 852 } 853 854 void SpeculativeJIT::cachedGetMethod(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget) 855 { 856 JITCompiler::Call slowCall; 857 JITCompiler::DataLabelPtr structToCompare, protoObj, protoStructToCompare, putFunction; 858 859 JITCompiler::Jump wrongStructure = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 860 protoObj = m_jit.moveWithPatch(JITCompiler::TrustedImmPtr(0), resultGPR); 861 JITCompiler::Jump wrongProtoStructure = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(resultGPR, JSCell::structureOffset()), protoStructToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); 862 863 putFunction = m_jit.moveWithPatch(JITCompiler::TrustedImmPtr(0), resultGPR); 864 865 JITCompiler::Jump done = m_jit.jump(); 866 867 wrongStructure.link(&m_jit); 868 wrongProtoStructure.link(&m_jit); 869 870 slowCall = cachedGetById(baseGPR, resultGPR, scratchGPR, identifierNumber, slowPathTarget, GetMethod); 871 872 done.link(&m_jit); 873 874 m_jit.addMethodGet(slowCall, structToCompare, protoObj, protoStructToCompare, putFunction); 875 } 876 877 void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert) 878 { 879 JSValueOperand arg(this, operand); 880 GPRReg argGPR = arg.gpr(); 881 882 GPRTemporary result(this, arg); 883 GPRReg resultGPR = result.gpr(); 884 885 JITCompiler::Jump notCell; 886 887 if (!isKnownCell(operand)) 888 notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); 889 890 m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR); 891 m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultGPR); 892 893 if (!isKnownCell(operand)) { 894 JITCompiler::Jump done = m_jit.jump(); 895 896 notCell.link(&m_jit); 897 898 m_jit.move(argGPR, resultGPR); 899 m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR); 900 m_jit.comparePtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(ValueNull), resultGPR); 901 902 done.link(&m_jit); 903 } 904 905 m_jit.or32(TrustedImm32(ValueFalse), resultGPR); 906 jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean); 907 } 908 909 void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert) 910 { 911 Node& branchNode = at(branchNodeIndex); 912 BlockIndex taken = branchNode.takenBlockIndex(); 913 BlockIndex notTaken = branchNode.notTakenBlockIndex(); 914 915 if (taken == (m_block + 1)) { 916 invert = !invert; 917 BlockIndex tmp = taken; 918 taken = notTaken; 919 notTaken = tmp; 920 } 921 922 JSValueOperand arg(this, operand); 923 GPRReg argGPR = arg.gpr(); 924 925 GPRTemporary result(this, arg); 926 GPRReg resultGPR = result.gpr(); 927 928 JITCompiler::Jump notCell; 929 930 if (!isKnownCell(operand)) 931 notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister); 932 933 m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR); 934 addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)), taken); 935 936 if (!isKnownCell(operand)) { 937 addBranch(m_jit.jump(), notTaken); 938 939 notCell.link(&m_jit); 940 941 m_jit.move(argGPR, resultGPR); 942 m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR); 943 addBranch(m_jit.branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))), taken); 944 } 945 946 if (notTaken != (m_block + 1)) 947 addBranch(m_jit.jump(), notTaken); 948 } 949 950 bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeIndex operand, bool invert) 951 { 952 NodeIndex branchNodeIndex = detectPeepHoleBranch(); 953 if (branchNodeIndex != NoNode) { 954 ASSERT(node.adjustedRefCount() == 1); 955 956 nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert); 957 958 use(node.child1()); 959 use(node.child2()); 960 m_compileIndex = branchNodeIndex; 961 962 return true; 963 } 964 965 nonSpeculativeNonPeepholeCompareNull(operand, invert); 966 967 return false; 968 } 969 970 void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) 971 { 972 Node& branchNode = at(branchNodeIndex); 973 BlockIndex taken = branchNode.takenBlockIndex(); 974 BlockIndex notTaken = branchNode.notTakenBlockIndex(); 975 976 JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero; 977 978 // The branch instruction will branch to the taken block. 979 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. 980 if (taken == (m_block + 1)) { 981 cond = JITCompiler::invert(cond); 982 callResultCondition = JITCompiler::Zero; 983 BlockIndex tmp = taken; 984 taken = notTaken; 985 notTaken = tmp; 986 } 987 988 JSValueOperand arg1(this, node.child1()); 989 JSValueOperand arg2(this, node.child2()); 990 GPRReg arg1GPR = arg1.gpr(); 991 GPRReg arg2GPR = arg2.gpr(); 992 993 JITCompiler::JumpList slowPath; 994 995 if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) { 996 GPRResult result(this); 997 GPRReg resultGPR = result.gpr(); 998 999 arg1.use(); 1000 arg2.use(); 1001 1002 flushRegisters(); 1003 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); 1004 1005 addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken); 1006 } else { 1007 GPRTemporary result(this, arg2); 1008 GPRReg resultGPR = result.gpr(); 1009 1010 arg1.use(); 1011 arg2.use(); 1012 1013 if (!isKnownInteger(node.child1())) 1014 slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister)); 1015 if (!isKnownInteger(node.child2())) 1016 slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister)); 1017 1018 addBranch(m_jit.branch32(cond, arg1GPR, arg2GPR), taken); 1019 1020 if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) { 1021 addBranch(m_jit.jump(), notTaken); 1022 1023 slowPath.link(&m_jit); 1024 1025 silentSpillAllRegisters(resultGPR); 1026 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); 1027 silentFillAllRegisters(resultGPR); 1028 1029 addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken); 1030 } 1031 } 1032 1033 if (notTaken != (m_block + 1)) 1034 addBranch(m_jit.jump(), notTaken); 1035 } 1036 1037 void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) 1038 { 1039 JSValueOperand arg1(this, node.child1()); 1040 JSValueOperand arg2(this, node.child2()); 1041 GPRReg arg1GPR = arg1.gpr(); 1042 GPRReg arg2GPR = arg2.gpr(); 1043 1044 JITCompiler::JumpList slowPath; 1045 1046 if (isKnownNotInteger(node.child1()) || isKnownNotInteger(node.child2())) { 1047 GPRResult result(this); 1048 GPRReg resultGPR = result.gpr(); 1049 1050 arg1.use(); 1051 arg2.use(); 1052 1053 flushRegisters(); 1054 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); 1055 1056 m_jit.or32(TrustedImm32(ValueFalse), resultGPR); 1057 jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly); 1058 } else { 1059 GPRTemporary result(this, arg2); 1060 GPRReg resultGPR = result.gpr(); 1061 1062 arg1.use(); 1063 arg2.use(); 1064 1065 if (!isKnownInteger(node.child1())) 1066 slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg1GPR, GPRInfo::tagTypeNumberRegister)); 1067 if (!isKnownInteger(node.child2())) 1068 slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister)); 1069 1070 m_jit.compare32(cond, arg1GPR, arg2GPR, resultGPR); 1071 1072 if (!isKnownInteger(node.child1()) || !isKnownInteger(node.child2())) { 1073 JITCompiler::Jump haveResult = m_jit.jump(); 1074 1075 slowPath.link(&m_jit); 1076 1077 silentSpillAllRegisters(resultGPR); 1078 callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR); 1079 silentFillAllRegisters(resultGPR); 1080 1081 m_jit.andPtr(TrustedImm32(1), resultGPR); 1082 1083 haveResult.link(&m_jit); 1084 } 1085 1086 m_jit.or32(TrustedImm32(ValueFalse), resultGPR); 1087 1088 jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly); 1089 } 1090 } 1091 1092 void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert) 1093 { 1094 Node& branchNode = at(branchNodeIndex); 1095 BlockIndex taken = branchNode.takenBlockIndex(); 1096 BlockIndex notTaken = branchNode.notTakenBlockIndex(); 1097 1098 // The branch instruction will branch to the taken block. 1099 // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. 1100 if (taken == (m_block + 1)) { 1101 invert = !invert; 1102 BlockIndex tmp = taken; 1103 taken = notTaken; 1104 notTaken = tmp; 1105 } 1106 1107 JSValueOperand arg1(this, node.child1()); 1108 JSValueOperand arg2(this, node.child2()); 1109 GPRReg arg1GPR = arg1.gpr(); 1110 GPRReg arg2GPR = arg2.gpr(); 1111 1112 GPRTemporary result(this); 1113 GPRReg resultGPR = result.gpr(); 1114 1115 arg1.use(); 1116 arg2.use(); 1117 1118 if (isKnownCell(node.child1()) && isKnownCell(node.child2())) { 1119 // see if we get lucky: if the arguments are cells and they reference the same 1120 // cell, then they must be strictly equal. 1121 addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR), invert ? notTaken : taken); 1122 1123 silentSpillAllRegisters(resultGPR); 1124 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR); 1125 silentFillAllRegisters(resultGPR); 1126 1127 addBranch(m_jit.branchTest32(invert ? JITCompiler::NonZero : JITCompiler::Zero, resultGPR), taken); 1128 } else { 1129 m_jit.orPtr(arg1GPR, arg2GPR, resultGPR); 1130 1131 JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister); 1132 1133 JITCompiler::Jump numberCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, GPRInfo::tagTypeNumberRegister); 1134 1135 addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR), taken); 1136 addBranch(m_jit.jump(), notTaken); 1137 1138 twoCellsCase.link(&m_jit); 1139 addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR), invert ? notTaken : taken); 1140 1141 numberCase.link(&m_jit); 1142 1143 silentSpillAllRegisters(resultGPR); 1144 callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR); 1145 silentFillAllRegisters(resultGPR); 1146 1147 addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR), taken); 1148 } 1149 1150 if (notTaken != (m_block + 1)) 1151 addBranch(m_jit.jump(), notTaken); 1152 } 1153 1154 void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert) 1155 { 1156 JSValueOperand arg1(this, node.child1()); 1157 JSValueOperand arg2(this, node.child2()); 1158 GPRReg arg1GPR = arg1.gpr(); 1159 GPRReg arg2GPR = arg2.gpr(); 1160 1161 GPRTemporary result(this); 1162 GPRReg resultGPR = result.gpr(); 1163 1164 arg1.use(); 1165 arg2.use(); 1166 1167 if (isKnownCell(node.child1()) && isKnownCell(node.child2())) { 1168 // see if we get lucky: if the arguments are cells and they reference the same 1169 // cell, then they must be strictly equal. 1170 JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR); 1171 1172 m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR); 1173 1174 JITCompiler::Jump done = m_jit.jump(); 1175 1176 notEqualCase.link(&m_jit); 1177 1178 silentSpillAllRegisters(resultGPR); 1179 callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR); 1180 silentFillAllRegisters(resultGPR); 1181 1182 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR); 1183 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR); 1184 1185 done.link(&m_jit); 1186 } else { 1187 m_jit.orPtr(arg1GPR, arg2GPR, resultGPR); 1188 1189 JITCompiler::Jump twoCellsCase = m_jit.branchTestPtr(JITCompiler::Zero, resultGPR, GPRInfo::tagMaskRegister); 1190 1191 JITCompiler::Jump numberCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, GPRInfo::tagTypeNumberRegister); 1192 1193 m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, resultGPR); 1194 1195 JITCompiler::Jump done1 = m_jit.jump(); 1196 1197 twoCellsCase.link(&m_jit); 1198 JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1GPR, arg2GPR); 1199 1200 m_jit.move(JITCompiler::TrustedImmPtr(JSValue::encode(jsBoolean(!invert))), resultGPR); 1201 1202 JITCompiler::Jump done2 = m_jit.jump(); 1203 1204 numberCase.link(&m_jit); 1205 notEqualCase.link(&m_jit); 1206 1207 silentSpillAllRegisters(resultGPR); 1208 callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR); 1209 silentFillAllRegisters(resultGPR); 1210 1211 m_jit.andPtr(JITCompiler::TrustedImm32(1), resultGPR); 1212 1213 done1.link(&m_jit); 1214 1215 m_jit.or32(JITCompiler::TrustedImm32(ValueFalse), resultGPR); 1216 1217 done2.link(&m_jit); 1218 } 1219 1220 jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly); 1221 } 1222 1223 void SpeculativeJIT::emitCall(Node& node) 1224 { 1225 P_DFGOperation_E slowCallFunction; 1226 1227 if (node.op == Call) 1228 slowCallFunction = operationLinkCall; 1229 else { 1230 ASSERT(node.op == Construct); 1231 slowCallFunction = operationLinkConstruct; 1232 } 1233 1234 // For constructors, the this argument is not passed but we have to make space 1235 // for it. 1236 int dummyThisArgument = node.op == Call ? 0 : 1; 1237 1238 CallLinkInfo::CallType callType = node.op == Call ? CallLinkInfo::Call : CallLinkInfo::Construct; 1239 1240 NodeIndex calleeNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild()]; 1241 JSValueOperand callee(this, calleeNodeIndex); 1242 GPRReg calleeGPR = callee.gpr(); 1243 use(calleeNodeIndex); 1244 1245 // the call instruction's first child is either the function (normal call) or the 1246 // receiver (method call). subsequent children are the arguments. 1247 int numArgs = node.numChildren() - 1; 1248 1249 int numPassedArgs = numArgs + dummyThisArgument; 1250 1251 // amount of stuff (in units of sizeof(Register)) that we need to place at the 1252 // top of the JS stack. 1253 int callDataSize = 0; 1254 1255 // first there are the arguments 1256 callDataSize += numPassedArgs; 1257 1258 // and then there is the call frame header 1259 callDataSize += RegisterFile::CallFrameHeaderSize; 1260 1261 m_jit.storePtr(MacroAssembler::TrustedImmPtr(JSValue::encode(jsNumber(numPassedArgs))), addressOfCallData(RegisterFile::ArgumentCount)); 1262 m_jit.storePtr(GPRInfo::callFrameRegister, addressOfCallData(RegisterFile::CallerFrame)); 1263 1264 for (int argIdx = 0; argIdx < numArgs; argIdx++) { 1265 NodeIndex argNodeIndex = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + argIdx]; 1266 JSValueOperand arg(this, argNodeIndex); 1267 GPRReg argGPR = arg.gpr(); 1268 use(argNodeIndex); 1269 1270 m_jit.storePtr(argGPR, addressOfCallData(-callDataSize + argIdx + dummyThisArgument)); 1271 } 1272 1273 m_jit.storePtr(calleeGPR, addressOfCallData(RegisterFile::Callee)); 1274 1275 flushRegisters(); 1276 1277 GPRResult result(this); 1278 GPRReg resultGPR = result.gpr(); 1279 1280 JITCompiler::DataLabelPtr targetToCheck; 1281 JITCompiler::Jump slowPath; 1282 1283 slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue()))); 1284 m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), resultGPR); 1285 m_jit.storePtr(resultGPR, addressOfCallData(RegisterFile::ScopeChain)); 1286 1287 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); 1288 1289 JITCompiler::Call fastCall = m_jit.nearCall(); 1290 m_jit.notifyCall(fastCall, at(m_compileIndex).codeOrigin); 1291 1292 JITCompiler::Jump done = m_jit.jump(); 1293 1294 slowPath.link(&m_jit); 1295 1296 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); 1297 JITCompiler::Call slowCall = m_jit.addFastExceptionCheck(m_jit.appendCall(slowCallFunction), at(m_compileIndex).codeOrigin); 1298 m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); 1299 m_jit.notifyCall(m_jit.call(GPRInfo::returnValueGPR), at(m_compileIndex).codeOrigin); 1300 1301 done.link(&m_jit); 1302 1303 m_jit.move(GPRInfo::returnValueGPR, resultGPR); 1304 1305 jsValueResult(resultGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly); 1306 1307 m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, at(m_compileIndex).codeOrigin); 1308 } 36 1309 37 1310 template<bool strict> -
trunk/Source/JavaScriptCore/runtime/JSFunction.h
r100113 r100244 38 38 class VPtrHackExecutable; 39 39 namespace DFG { 40 class JITCodeGenerator;40 class SpeculativeJIT; 41 41 class JITCompiler; 42 42 } … … 48 48 class JSFunction : public JSNonFinalObject { 49 49 friend class JIT; 50 friend class DFG:: JITCodeGenerator;50 friend class DFG::SpeculativeJIT; 51 51 friend class DFG::JITCompiler; 52 52 friend class JSGlobalData;
Note:
See TracChangeset
for help on using the changeset viewer.