Changeset 237547 in webkit
- Timestamp:
- Oct 29, 2018, 6:16:03 AM (6 years ago)
- Location:
- trunk
- Files:
-
- 21 added
- 4 deleted
- 125 edited
- 9 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/ChangeLog
r237491 r237547 1 2018-10-29 Tadeu Zagallo <tzagallo@apple.com> 2 3 New bytecode format for JSC 4 https://bugs.webkit.org/show_bug.cgi?id=187373 5 <rdar://problem/44186758> 6 7 Reviewed by Filip Pizlo. 8 9 Disable JIT by default on 32-bit platforms 10 11 * Source/cmake/WebKitFeatures.cmake: 12 1 13 2018-10-27 Charlie Turner <cturner@igalia.com> 2 14 -
trunk/JSTests/ChangeLog
r237486 r237547 1 2018-10-29 Tadeu Zagallo <tzagallo@apple.com> 2 3 New bytecode format for JSC 4 https://bugs.webkit.org/show_bug.cgi?id=187373 5 <rdar://problem/44186758> 6 7 Reviewed by Filip Pizlo. 8 9 Add tests to ensure that the inferred inline capacity for a narrow op_new_object will be capped at 255. 10 11 * stress/maximum-inline-capacity.js: Added. 12 (test1): 13 (test3.Foo): 14 (test3): 15 1 16 2018-10-26 Commit Queue <commit-queue@webkit.org> 2 17 -
trunk/LayoutTests/ChangeLog
r237531 r237547 1 2018-10-29 Tadeu Zagallo <tzagallo@apple.com> 2 3 New bytecode format for JSC 4 https://bugs.webkit.org/show_bug.cgi?id=187373 5 <rdar://problem/44186758> 6 7 Reviewed by Filip Pizlo. 8 9 Don't use recursion on `equal` to avoid premature stack overflows when testing deep arrays. 10 11 * fast/dom/Window/resources/postmessage-test.js: 12 1 13 2018-10-28 Michael Catanzaro <mcatanzaro@igalia.com> 2 14 -
trunk/LayoutTests/fast/dom/Window/resources/postmessage-test.js
r237486 r237547 11 11 function equal(actual, expected) 12 12 { 13 if (actual === expected) 14 return true; 15 if (typeof actual !== typeof expected) 16 return false; 17 if ((actual instanceof Date) || (expected instanceof Date)) { 18 if ((actual instanceof Date) && (expected instanceof Date)) 19 return (expected instanceof Date) && actual.getTime() == expected.getTime(); 20 return false; 13 var actualQueue = [actual]; 14 var expectedQueue = [expected]; 15 while (actualQueue.length && expectedQueue.length) { 16 var actual = actualQueue.shift(); 17 var expected = expectedQueue.shift(); 18 19 if (actual === expected) 20 continue; 21 if (typeof actual !== typeof expected) 22 return false; 23 if ((actual instanceof Date) || (expected instanceof Date)) { 24 if ((actual instanceof Date) && (expected instanceof Date) && actual.getTime() == expected.getTime()) 25 continue; 26 return false; 27 } 28 if ((actual instanceof Number) || (expected instanceof Number)) { 29 if ((actual instanceof Number) && (expected instanceof Number) && (expected.valueOf() == actual.valueOf())) 30 continue; 31 return false; 32 } 33 if ((actual instanceof Boolean) || (expected instanceof Boolean)) { 34 if ((actual instanceof Boolean) && (expected instanceof Boolean) && (expected.valueOf() == actual.valueOf())) 35 continue; 36 return false; 37 } 38 if ((actual instanceof String) || (expected instanceof String)) { 39 if ((actual instanceof String) && (expected instanceof String) && (expected.valueOf() == actual.valueOf())) 40 continue; 41 return false; 42 } 43 if (Array.isArray(actual) || Array.isArray(expected)) { 44 if (!Array.isArray(actual) || !Array.isArray(expected)) 45 return false; 46 if (actual.length != expected.length) 47 return false; 48 for (var i = 0; i < actual.length; i++) { 49 if ((i in actual) ^ (i in expected)) 50 return false; 51 actualQueue.push(actual[i]); 52 expectedQueue.push(expected[i]); 53 } 54 continue; 55 } 56 if (actual.constructor !== expected.constructor) 57 return false; 58 try { 59 var keys = Object.keys(actual); 60 } catch(e) { 61 return false; 62 } 63 try { 64 if (!equal(keys, Object.keys(expected))) 65 return false; 66 } catch(e) { 67 return false; 68 } 69 for (var i = 0; i < keys.length; i++) { 70 actualQueue.push(actual[keys[i]]); 71 expectedQueue.push(expected[keys[i]]); 72 } 21 73 } 22 if ((actual instanceof Number) || (expected instanceof Number)) { 23 return (actual instanceof Number) && (expected instanceof Number) && 24 (expected.valueOf() == actual.valueOf()); 25 } 26 if ((actual instanceof Boolean) || (expected instanceof Boolean)) { 27 return (actual instanceof Boolean) && (expected instanceof Boolean) && 28 (expected.valueOf() == actual.valueOf()); 29 } 30 if ((actual instanceof String) || (expected instanceof String)) { 31 return (actual instanceof String) && (expected instanceof String) && 32 (expected.valueOf() == actual.valueOf()); 33 } 34 if (Array.isArray(actual) || Array.isArray(expected)) { 35 if (!Array.isArray(actual) || !Array.isArray(expected)) 36 return false; 37 if (actual.length != expected.length) 38 return false; 39 for (var i = 0; i < actual.length; i++) { 40 if ((i in actual) ^ (i in expected)) 41 return false; 42 if (!equal(actual[i], expected[i])) 43 return false; 44 } 45 return true; 46 } 47 if (actual.constructor !== expected.constructor) 48 return false; 49 try { 50 var keys = Object.keys(actual); 51 } catch(e) { 52 return false; 53 } 54 try { 55 if (!equal(keys, Object.keys(expected))) 56 return false; 57 } catch(e) { 58 return false; 59 } 60 for (var i = 0; i < keys.length; i++) { 61 if (!equal(actual[keys[i]], expected[keys[i]])) 62 return false; 63 } 74 64 75 return true; 65 76 } -
trunk/Source/JavaScriptCore/CMakeLists.txt
r237486 r237547 202 202 ) 203 203 204 add_custom_command( 205 OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h 206 MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generate-bytecode-files 207 DEPENDS ${JAVASCRIPTCORE_DIR}/generate-bytecode-files bytecode/BytecodeList.json 208 COMMAND ${PYTHON_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generate-bytecode-files --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm --bytecode_structs_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.json 204 set(GENERATOR 205 generator/Argument.rb 206 generator/Assertion.rb 207 generator/DSL.rb 208 generator/Fits.rb 209 generator/GeneratedFile.rb 210 generator/Metadata.rb 211 generator/Opcode.rb 212 generator/OpcodeGroup.rb 213 generator/Options.rb 214 generator/Section.rb 215 generator/Template.rb 216 generator/Type.rb 217 generator/main.rb 218 ) 219 220 add_custom_command( 221 OUTPUT ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeIndices.h 222 MAIN_DEPENDENCY ${JAVASCRIPTCORE_DIR}/generator/main.rb 223 DEPENDS ${GENERATOR} bytecode/BytecodeList.rb 224 COMMAND ${RUBY_EXECUTABLE} ${JAVASCRIPTCORE_DIR}/generator/main.rb --bytecodes_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/Bytecodes.h --init_bytecodes_asm ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/InitBytecodes.asm --bytecode_structs_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeStructs.h --bytecode_indices_h ${DERIVED_SOURCES_JAVASCRIPTCORE_DIR}/BytecodeIndices.h ${JAVASCRIPTCORE_DIR}/bytecode/BytecodeList.rb 209 225 VERBATIM) 210 226 … … 467 483 bytecode/InlineCallFrame.h 468 484 bytecode/Instruction.h 485 bytecode/InstructionStream.h 469 486 bytecode/InternalFunctionAllocationProfile.h 470 487 bytecode/JumpTable.h … … 472 489 bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h 473 490 bytecode/LazyOperandValueProfile.h 491 bytecode/MetadataTable.h 474 492 bytecode/ObjectAllocationProfile.h 475 493 bytecode/ObjectPropertyCondition.h 494 bytecode/Opcode.h 495 bytecode/OpcodeSize.h 476 496 bytecode/PropertyCondition.h 477 497 bytecode/PutByIdFlags.h … … 486 506 bytecode/UnlinkedFunctionExecutable.h 487 507 bytecode/UnlinkedGlobalCodeBlock.h 508 bytecode/UnlinkedMetadataTable.h 488 509 bytecode/ValueProfile.h 489 510 bytecode/ValueRecovery.h … … 669 690 jit/ThunkGenerator.h 670 691 jit/UnusedPointer.h 692 693 llint/LLIntOpcode.h 671 694 672 695 parser/ParserError.h -
trunk/Source/JavaScriptCore/ChangeLog
r237492 r237547 1 2018-10-27 Yusuke Suzuki <yusukesuzuki@slowstart.org> 2 3 Unreviewed, partial rolling in r237254 4 https://bugs.webkit.org/show_bug.cgi?id=190340 5 6 We do not use the added function right now to investigate what is the reason of the regression. 7 It also does not include any Parser.{h,cpp} changes to ensure that Parser.cpp's inlining decision 8 seems culprit of the regression on iOS devices. 9 10 * bytecode/UnlinkedFunctionExecutable.cpp: 11 (JSC::UnlinkedFunctionExecutable::fromGlobalCode): 12 * bytecode/UnlinkedFunctionExecutable.h: 13 * parser/SourceCodeKey.h: 14 (JSC::SourceCodeKey::SourceCodeKey): 15 (JSC::SourceCodeKey::operator== const): 16 * runtime/CodeCache.cpp: 17 (JSC::CodeCache::getUnlinkedGlobalCodeBlock): 18 (JSC::CodeCache::getUnlinkedGlobalFunctionExecutable): 19 * runtime/CodeCache.h: 20 * runtime/FunctionConstructor.cpp: 21 (JSC::constructFunctionSkippingEvalEnabledCheck): 22 * runtime/FunctionExecutable.cpp: 23 (JSC::FunctionExecutable::fromGlobalCode): 24 * runtime/FunctionExecutable.h: 25 26 2018-10-26 Commit Queue <commit-queue@webkit.org> 27 28 Unreviewed, rolling out r237479 and r237484. 29 https://bugs.webkit.org/show_bug.cgi?id=190978 30 31 broke JSC on iOS (Requested by tadeuzagallo on #webkit). 32 33 Reverted changesets: 34 35 "New bytecode format for JSC" 36 https://bugs.webkit.org/show_bug.cgi?id=187373 37 https://trac.webkit.org/changeset/237479 38 39 "Gardening: Build fix after r237479." 40 https://bugs.webkit.org/show_bug.cgi?id=187373 41 https://trac.webkit.org/changeset/237484 42 43 2018-10-26 Tadeu Zagallo <tzagallo@apple.com> 44 45 Gardening: Build fix after r237479. 46 https://bugs.webkit.org/show_bug.cgi?id=187373 47 48 Unreviewed. 49 50 * Configurations/JSC.xcconfig: 51 * JavaScriptCore.xcodeproj/project.pbxproj: 52 * llint/LLIntData.cpp: 53 (JSC::LLInt::initialize): 54 55 2018-10-26 Tadeu Zagallo <tzagallo@apple.com> 1 2018-10-29 Tadeu Zagallo <tzagallo@apple.com> 56 2 57 3 New bytecode format for JSC … … 902 848 * tools/HeapVerifier.cpp: 903 849 850 2018-10-27 Yusuke Suzuki <yusukesuzuki@slowstart.org> 851 852 Unreviewed, partial rolling in r237254 853 https://bugs.webkit.org/show_bug.cgi?id=190340 854 855 We do not use the added function right now to investigate what is the reason of the regression. 856 It also does not include any Parser.{h,cpp} changes to ensure that Parser.cpp's inlining decision 857 seems culprit of the regression on iOS devices. 858 859 * bytecode/UnlinkedFunctionExecutable.cpp: 860 (JSC::UnlinkedFunctionExecutable::fromGlobalCode): 861 * bytecode/UnlinkedFunctionExecutable.h: 862 * parser/SourceCodeKey.h: 863 (JSC::SourceCodeKey::SourceCodeKey): 864 (JSC::SourceCodeKey::operator== const): 865 * runtime/CodeCache.cpp: 866 (JSC::CodeCache::getUnlinkedGlobalCodeBlock): 867 (JSC::CodeCache::getUnlinkedGlobalFunctionExecutable): 868 * runtime/CodeCache.h: 869 * runtime/FunctionConstructor.cpp: 870 (JSC::constructFunctionSkippingEvalEnabledCheck): 871 * runtime/FunctionExecutable.cpp: 872 (JSC::FunctionExecutable::fromGlobalCode): 873 * runtime/FunctionExecutable.h: 874 875 2018-10-26 Commit Queue <commit-queue@webkit.org> 876 877 Unreviewed, rolling out r237479 and r237484. 878 https://bugs.webkit.org/show_bug.cgi?id=190978 879 880 broke JSC on iOS (Requested by tadeuzagallo on #webkit). 881 882 Reverted changesets: 883 884 "New bytecode format for JSC" 885 https://bugs.webkit.org/show_bug.cgi?id=187373 886 https://trac.webkit.org/changeset/237479 887 888 "Gardening: Build fix after r237479." 889 https://bugs.webkit.org/show_bug.cgi?id=187373 890 https://trac.webkit.org/changeset/237484 891 892 2018-10-26 Tadeu Zagallo <tzagallo@apple.com> 893 894 Gardening: Build fix after r237479. 895 https://bugs.webkit.org/show_bug.cgi?id=187373 896 897 Unreviewed. 898 899 * Configurations/JSC.xcconfig: 900 * JavaScriptCore.xcodeproj/project.pbxproj: 901 * llint/LLIntData.cpp: 902 (JSC::LLInt::initialize): 903 904 2018-10-26 Tadeu Zagallo <tzagallo@apple.com> 905 906 New bytecode format for JSC 907 https://bugs.webkit.org/show_bug.cgi?id=187373 908 <rdar://problem/44186758> 909 910 Reviewed by Filip Pizlo. 911 912 Replace unlinked and linked bytecode with a new immutable bytecode that does not embed 913 any addresses. Instructions can be encoded as narrow (1-byte operands) or wide (4-byte 914 operands) and might contain an extra operand, the metadataID. The metadataID is used to 915 access the instruction's mutable data in a side table in the CodeBlock (the MetadataTable). 916 917 Bytecodes now must be structs declared in the new BytecodeList.rb. All bytecodes give names 918 and types to all its operands. Additionally, reading a bytecode from the instruction stream 919 requires decoding the whole bytecode, i.e. it's no longer possible to access arbitrary 920 operands directly from the stream. 921 922 923 * CMakeLists.txt: 924 * DerivedSources.make: 925 * JavaScriptCore.xcodeproj/project.pbxproj: 926 * Sources.txt: 927 * assembler/MacroAssemblerCodeRef.h: 928 (JSC::ReturnAddressPtr::ReturnAddressPtr): 929 (JSC::ReturnAddressPtr::value const): 930 (JSC::MacroAssemblerCodePtr::MacroAssemblerCodePtr): 931 (JSC::MacroAssemblerCodePtr::createFromExecutableAddress): 932 * bytecode/ArithProfile.h: 933 (JSC::ArithProfile::ArithProfile): 934 * bytecode/ArrayAllocationProfile.h: 935 (JSC::ArrayAllocationProfile::ArrayAllocationProfile): 936 * bytecode/ArrayProfile.h: 937 * bytecode/BytecodeBasicBlock.cpp: 938 (JSC::isJumpTarget): 939 (JSC::BytecodeBasicBlock::computeImpl): 940 (JSC::BytecodeBasicBlock::compute): 941 * bytecode/BytecodeBasicBlock.h: 942 (JSC::BytecodeBasicBlock::leaderOffset const): 943 (JSC::BytecodeBasicBlock::totalLength const): 944 (JSC::BytecodeBasicBlock::offsets const): 945 (JSC::BytecodeBasicBlock::BytecodeBasicBlock): 946 (JSC::BytecodeBasicBlock::addLength): 947 * bytecode/BytecodeDumper.cpp: 948 (JSC::BytecodeDumper<Block>::printLocationAndOp): 949 (JSC::BytecodeDumper<Block>::dumpBytecode): 950 (JSC::BytecodeDumper<Block>::dumpIdentifiers): 951 (JSC::BytecodeDumper<Block>::dumpConstants): 952 (JSC::BytecodeDumper<Block>::dumpExceptionHandlers): 953 (JSC::BytecodeDumper<Block>::dumpSwitchJumpTables): 954 (JSC::BytecodeDumper<Block>::dumpStringSwitchJumpTables): 955 (JSC::BytecodeDumper<Block>::dumpBlock): 956 * bytecode/BytecodeDumper.h: 957 (JSC::BytecodeDumper::dumpOperand): 958 (JSC::BytecodeDumper::dumpValue): 959 (JSC::BytecodeDumper::BytecodeDumper): 960 (JSC::BytecodeDumper::block const): 961 * bytecode/BytecodeGeneratorification.cpp: 962 (JSC::BytecodeGeneratorification::BytecodeGeneratorification): 963 (JSC::BytecodeGeneratorification::enterPoint const): 964 (JSC::BytecodeGeneratorification::instructions const): 965 (JSC::GeneratorLivenessAnalysis::run): 966 (JSC::BytecodeGeneratorification::run): 967 (JSC::performGeneratorification): 968 * bytecode/BytecodeGeneratorification.h: 969 * bytecode/BytecodeGraph.h: 970 (JSC::BytecodeGraph::blockContainsBytecodeOffset): 971 (JSC::BytecodeGraph::findBasicBlockForBytecodeOffset): 972 (JSC::BytecodeGraph::findBasicBlockWithLeaderOffset): 973 (JSC::BytecodeGraph::BytecodeGraph): 974 * bytecode/BytecodeKills.h: 975 * bytecode/BytecodeList.json: Removed. 976 * bytecode/BytecodeList.rb: Added. 977 * bytecode/BytecodeLivenessAnalysis.cpp: 978 (JSC::BytecodeLivenessAnalysis::dumpResults): 979 * bytecode/BytecodeLivenessAnalysis.h: 980 * bytecode/BytecodeLivenessAnalysisInlines.h: 981 (JSC::isValidRegisterForLiveness): 982 (JSC::BytecodeLivenessPropagation::stepOverInstruction): 983 * bytecode/BytecodeRewriter.cpp: 984 (JSC::BytecodeRewriter::applyModification): 985 (JSC::BytecodeRewriter::execute): 986 (JSC::BytecodeRewriter::adjustJumpTargetsInFragment): 987 (JSC::BytecodeRewriter::insertImpl): 988 (JSC::BytecodeRewriter::adjustJumpTarget): 989 (JSC::BytecodeRewriter::adjustJumpTargets): 990 * bytecode/BytecodeRewriter.h: 991 (JSC::BytecodeRewriter::InsertionPoint::InsertionPoint): 992 (JSC::BytecodeRewriter::Fragment::Fragment): 993 (JSC::BytecodeRewriter::Fragment::appendInstruction): 994 (JSC::BytecodeRewriter::BytecodeRewriter): 995 (JSC::BytecodeRewriter::insertFragmentBefore): 996 (JSC::BytecodeRewriter::insertFragmentAfter): 997 (JSC::BytecodeRewriter::removeBytecode): 998 (JSC::BytecodeRewriter::adjustAbsoluteOffset): 999 (JSC::BytecodeRewriter::adjustJumpTarget): 1000 * bytecode/BytecodeUseDef.h: 1001 (JSC::computeUsesForBytecodeOffset): 1002 (JSC::computeDefsForBytecodeOffset): 1003 * bytecode/CallLinkStatus.cpp: 1004 (JSC::CallLinkStatus::computeFromLLInt): 1005 * bytecode/CodeBlock.cpp: 1006 (JSC::CodeBlock::dumpBytecode): 1007 (JSC::CodeBlock::CodeBlock): 1008 (JSC::CodeBlock::finishCreation): 1009 (JSC::CodeBlock::estimatedSize): 1010 (JSC::CodeBlock::visitChildren): 1011 (JSC::CodeBlock::propagateTransitions): 1012 (JSC::CodeBlock::finalizeLLIntInlineCaches): 1013 (JSC::CodeBlock::addJITAddIC): 1014 (JSC::CodeBlock::addJITMulIC): 1015 (JSC::CodeBlock::addJITSubIC): 1016 (JSC::CodeBlock::addJITNegIC): 1017 (JSC::CodeBlock::stronglyVisitStrongReferences): 1018 (JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset): 1019 (JSC::CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow): 1020 (JSC::CodeBlock::hasOpDebugForLineAndColumn): 1021 (JSC::CodeBlock::getArrayProfile): 1022 (JSC::CodeBlock::updateAllArrayPredictions): 1023 (JSC::CodeBlock::predictedMachineCodeSize): 1024 (JSC::CodeBlock::tryGetValueProfileForBytecodeOffset): 1025 (JSC::CodeBlock::valueProfilePredictionForBytecodeOffset): 1026 (JSC::CodeBlock::valueProfileForBytecodeOffset): 1027 (JSC::CodeBlock::validate): 1028 (JSC::CodeBlock::outOfLineJumpOffset): 1029 (JSC::CodeBlock::outOfLineJumpTarget): 1030 (JSC::CodeBlock::arithProfileForBytecodeOffset): 1031 (JSC::CodeBlock::arithProfileForPC): 1032 (JSC::CodeBlock::couldTakeSpecialFastCase): 1033 (JSC::CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler): 1034 * bytecode/CodeBlock.h: 1035 (JSC::CodeBlock::addMathIC): 1036 (JSC::CodeBlock::outOfLineJumpOffset): 1037 (JSC::CodeBlock::bytecodeOffset): 1038 (JSC::CodeBlock::instructions const): 1039 (JSC::CodeBlock::instructionCount const): 1040 (JSC::CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters): 1041 (JSC::CodeBlock::metadata): 1042 (JSC::CodeBlock::metadataSizeInBytes): 1043 (JSC::CodeBlock::numberOfNonArgumentValueProfiles): 1044 (JSC::CodeBlock::totalNumberOfValueProfiles): 1045 * bytecode/CodeBlockInlines.h: Added. 1046 (JSC::CodeBlock::forEachValueProfile): 1047 (JSC::CodeBlock::forEachArrayProfile): 1048 (JSC::CodeBlock::forEachArrayAllocationProfile): 1049 (JSC::CodeBlock::forEachObjectAllocationProfile): 1050 (JSC::CodeBlock::forEachLLIntCallLinkInfo): 1051 * bytecode/Fits.h: Added. 1052 * bytecode/GetByIdMetadata.h: Copied from Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h. 1053 * bytecode/GetByIdStatus.cpp: 1054 (JSC::GetByIdStatus::computeFromLLInt): 1055 * bytecode/Instruction.h: 1056 (JSC::Instruction::Instruction): 1057 (JSC::Instruction::Impl::opcodeID const): 1058 (JSC::Instruction::opcodeID const): 1059 (JSC::Instruction::name const): 1060 (JSC::Instruction::isWide const): 1061 (JSC::Instruction::size const): 1062 (JSC::Instruction::is const): 1063 (JSC::Instruction::as const): 1064 (JSC::Instruction::cast): 1065 (JSC::Instruction::cast const): 1066 (JSC::Instruction::narrow const): 1067 (JSC::Instruction::wide const): 1068 * bytecode/InstructionStream.cpp: Copied from Source/JavaScriptCore/bytecode/SpecialPointer.cpp. 1069 (JSC::InstructionStream::InstructionStream): 1070 (JSC::InstructionStream::sizeInBytes const): 1071 * bytecode/InstructionStream.h: Added. 1072 (JSC::InstructionStream::BaseRef::BaseRef): 1073 (JSC::InstructionStream::BaseRef::operator=): 1074 (JSC::InstructionStream::BaseRef::operator-> const): 1075 (JSC::InstructionStream::BaseRef::ptr const): 1076 (JSC::InstructionStream::BaseRef::operator!= const): 1077 (JSC::InstructionStream::BaseRef::next const): 1078 (JSC::InstructionStream::BaseRef::offset const): 1079 (JSC::InstructionStream::BaseRef::isValid const): 1080 (JSC::InstructionStream::BaseRef::unwrap const): 1081 (JSC::InstructionStream::MutableRef::freeze const): 1082 (JSC::InstructionStream::MutableRef::operator->): 1083 (JSC::InstructionStream::MutableRef::ptr): 1084 (JSC::InstructionStream::MutableRef::operator Ref): 1085 (JSC::InstructionStream::MutableRef::unwrap): 1086 (JSC::InstructionStream::iterator::operator*): 1087 (JSC::InstructionStream::iterator::operator++): 1088 (JSC::InstructionStream::begin const): 1089 (JSC::InstructionStream::end const): 1090 (JSC::InstructionStream::at const): 1091 (JSC::InstructionStream::size const): 1092 (JSC::InstructionStreamWriter::InstructionStreamWriter): 1093 (JSC::InstructionStreamWriter::ref): 1094 (JSC::InstructionStreamWriter::seek): 1095 (JSC::InstructionStreamWriter::position): 1096 (JSC::InstructionStreamWriter::write): 1097 (JSC::InstructionStreamWriter::rewind): 1098 (JSC::InstructionStreamWriter::finalize): 1099 (JSC::InstructionStreamWriter::swap): 1100 (JSC::InstructionStreamWriter::iterator::operator*): 1101 (JSC::InstructionStreamWriter::iterator::operator++): 1102 (JSC::InstructionStreamWriter::begin): 1103 (JSC::InstructionStreamWriter::end): 1104 * bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp: 1105 (JSC::LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint): 1106 (JSC::LLIntPrototypeLoadAdaptiveStructureWatchpoint::fireInternal): 1107 (JSC::LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache): 1108 * bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h: 1109 * bytecode/MetadataTable.cpp: Copied from Source/JavaScriptCore/bytecode/SpecialPointer.cpp. 1110 (JSC::MetadataTable::MetadataTable): 1111 (JSC::DeallocTable::withOpcodeType): 1112 (JSC::MetadataTable::~MetadataTable): 1113 (JSC::MetadataTable::sizeInBytes): 1114 * bytecode/MetadataTable.h: Copied from Source/JavaScriptCore/runtime/Watchdog.h. 1115 (JSC::MetadataTable::get): 1116 (JSC::MetadataTable::forEach): 1117 (JSC::MetadataTable::getImpl): 1118 * bytecode/Opcode.cpp: 1119 (JSC::metadataSize): 1120 * bytecode/Opcode.h: 1121 (JSC::padOpcodeName): 1122 * bytecode/OpcodeInlines.h: 1123 (JSC::isOpcodeShape): 1124 (JSC::getOpcodeType): 1125 * bytecode/OpcodeSize.h: Copied from Source/JavaScriptCore/bytecode/SpecialPointer.cpp. 1126 * bytecode/PreciseJumpTargets.cpp: 1127 (JSC::getJumpTargetsForInstruction): 1128 (JSC::computePreciseJumpTargetsInternal): 1129 (JSC::computePreciseJumpTargets): 1130 (JSC::recomputePreciseJumpTargets): 1131 (JSC::findJumpTargetsForInstruction): 1132 * bytecode/PreciseJumpTargets.h: 1133 * bytecode/PreciseJumpTargetsInlines.h: 1134 (JSC::jumpTargetForInstruction): 1135 (JSC::extractStoredJumpTargetsForInstruction): 1136 (JSC::updateStoredJumpTargetsForInstruction): 1137 * bytecode/PutByIdStatus.cpp: 1138 (JSC::PutByIdStatus::computeFromLLInt): 1139 * bytecode/SpecialPointer.cpp: 1140 (WTF::printInternal): 1141 * bytecode/SpecialPointer.h: 1142 * bytecode/UnlinkedCodeBlock.cpp: 1143 (JSC::UnlinkedCodeBlock::UnlinkedCodeBlock): 1144 (JSC::UnlinkedCodeBlock::visitChildren): 1145 (JSC::UnlinkedCodeBlock::estimatedSize): 1146 (JSC::UnlinkedCodeBlock::lineNumberForBytecodeOffset): 1147 (JSC::dumpLineColumnEntry): 1148 (JSC::UnlinkedCodeBlock::expressionRangeForBytecodeOffset const): 1149 (JSC::UnlinkedCodeBlock::setInstructions): 1150 (JSC::UnlinkedCodeBlock::instructions const): 1151 (JSC::UnlinkedCodeBlock::applyModification): 1152 (JSC::UnlinkedCodeBlock::addOutOfLineJumpTarget): 1153 (JSC::UnlinkedCodeBlock::outOfLineJumpOffset): 1154 * bytecode/UnlinkedCodeBlock.h: 1155 (JSC::UnlinkedCodeBlock::addPropertyAccessInstruction): 1156 (JSC::UnlinkedCodeBlock::propertyAccessInstructions const): 1157 (JSC::UnlinkedCodeBlock::addOpProfileControlFlowBytecodeOffset): 1158 (JSC::UnlinkedCodeBlock::opProfileControlFlowBytecodeOffsets const): 1159 (JSC::UnlinkedCodeBlock::metadata): 1160 (JSC::UnlinkedCodeBlock::metadataSizeInBytes): 1161 (JSC::UnlinkedCodeBlock::outOfLineJumpOffset): 1162 (JSC::UnlinkedCodeBlock::replaceOutOfLineJumpTargets): 1163 * bytecode/UnlinkedInstructionStream.cpp: Removed. 1164 * bytecode/UnlinkedInstructionStream.h: Removed. 1165 * bytecode/UnlinkedMetadataTable.h: Copied from Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h. 1166 * bytecode/UnlinkedMetadataTableInlines.h: Added. 1167 (JSC::UnlinkedMetadataTable::UnlinkedMetadataTable): 1168 (JSC::UnlinkedMetadataTable::~UnlinkedMetadataTable): 1169 (JSC::UnlinkedMetadataTable::addEntry): 1170 (JSC::UnlinkedMetadataTable::sizeInBytes): 1171 (JSC::UnlinkedMetadataTable::finalize): 1172 (JSC::UnlinkedMetadataTable::link): 1173 (JSC::UnlinkedMetadataTable::unlink): 1174 * bytecode/VirtualRegister.cpp: 1175 (JSC::VirtualRegister::VirtualRegister): 1176 * bytecode/VirtualRegister.h: 1177 * bytecompiler/BytecodeGenerator.cpp: 1178 (JSC::Label::setLocation): 1179 (JSC::Label::bind): 1180 (JSC::BytecodeGenerator::generate): 1181 (JSC::BytecodeGenerator::BytecodeGenerator): 1182 (JSC::BytecodeGenerator::initializeVarLexicalEnvironment): 1183 (JSC::BytecodeGenerator::emitEnter): 1184 (JSC::BytecodeGenerator::emitLoopHint): 1185 (JSC::BytecodeGenerator::emitJump): 1186 (JSC::BytecodeGenerator::emitCheckTraps): 1187 (JSC::BytecodeGenerator::rewind): 1188 (JSC::BytecodeGenerator::fuseCompareAndJump): 1189 (JSC::BytecodeGenerator::fuseTestAndJmp): 1190 (JSC::BytecodeGenerator::emitJumpIfTrue): 1191 (JSC::BytecodeGenerator::emitJumpIfFalse): 1192 (JSC::BytecodeGenerator::emitJumpIfNotFunctionCall): 1193 (JSC::BytecodeGenerator::emitJumpIfNotFunctionApply): 1194 (JSC::BytecodeGenerator::moveLinkTimeConstant): 1195 (JSC::BytecodeGenerator::moveEmptyValue): 1196 (JSC::BytecodeGenerator::emitMove): 1197 (JSC::BytecodeGenerator::emitUnaryOp): 1198 (JSC::BytecodeGenerator::emitBinaryOp): 1199 (JSC::BytecodeGenerator::emitToObject): 1200 (JSC::BytecodeGenerator::emitToNumber): 1201 (JSC::BytecodeGenerator::emitToString): 1202 (JSC::BytecodeGenerator::emitTypeOf): 1203 (JSC::BytecodeGenerator::emitInc): 1204 (JSC::BytecodeGenerator::emitDec): 1205 (JSC::BytecodeGenerator::emitEqualityOp): 1206 (JSC::BytecodeGenerator::emitProfileType): 1207 (JSC::BytecodeGenerator::emitProfileControlFlow): 1208 (JSC::BytecodeGenerator::pushLexicalScopeInternal): 1209 (JSC::BytecodeGenerator::emitResolveScopeForHoistingFuncDeclInEval): 1210 (JSC::BytecodeGenerator::prepareLexicalScopeForNextForLoopIteration): 1211 (JSC::BytecodeGenerator::emitOverridesHasInstance): 1212 (JSC::BytecodeGenerator::emitResolveScope): 1213 (JSC::BytecodeGenerator::emitGetFromScope): 1214 (JSC::BytecodeGenerator::emitPutToScope): 1215 (JSC::BytecodeGenerator::emitInstanceOf): 1216 (JSC::BytecodeGenerator::emitInstanceOfCustom): 1217 (JSC::BytecodeGenerator::emitInByVal): 1218 (JSC::BytecodeGenerator::emitInById): 1219 (JSC::BytecodeGenerator::emitTryGetById): 1220 (JSC::BytecodeGenerator::emitGetById): 1221 (JSC::BytecodeGenerator::emitDirectGetById): 1222 (JSC::BytecodeGenerator::emitPutById): 1223 (JSC::BytecodeGenerator::emitDirectPutById): 1224 (JSC::BytecodeGenerator::emitPutGetterById): 1225 (JSC::BytecodeGenerator::emitPutSetterById): 1226 (JSC::BytecodeGenerator::emitPutGetterSetter): 1227 (JSC::BytecodeGenerator::emitPutGetterByVal): 1228 (JSC::BytecodeGenerator::emitPutSetterByVal): 1229 (JSC::BytecodeGenerator::emitDeleteById): 1230 (JSC::BytecodeGenerator::emitGetByVal): 1231 (JSC::BytecodeGenerator::emitPutByVal): 1232 (JSC::BytecodeGenerator::emitDirectPutByVal): 1233 (JSC::BytecodeGenerator::emitDeleteByVal): 1234 (JSC::BytecodeGenerator::emitSuperSamplerBegin): 1235 (JSC::BytecodeGenerator::emitSuperSamplerEnd): 1236 (JSC::BytecodeGenerator::emitIdWithProfile): 1237 (JSC::BytecodeGenerator::emitUnreachable): 1238 (JSC::BytecodeGenerator::emitGetArgument): 1239 (JSC::BytecodeGenerator::emitCreateThis): 1240 (JSC::BytecodeGenerator::emitTDZCheck): 1241 (JSC::BytecodeGenerator::emitNewObject): 1242 (JSC::BytecodeGenerator::emitNewArrayBuffer): 1243 (JSC::BytecodeGenerator::emitNewArray): 1244 (JSC::BytecodeGenerator::emitNewArrayWithSpread): 1245 (JSC::BytecodeGenerator::emitNewArrayWithSize): 1246 (JSC::BytecodeGenerator::emitNewRegExp): 1247 (JSC::BytecodeGenerator::emitNewFunctionExpressionCommon): 1248 (JSC::BytecodeGenerator::emitNewDefaultConstructor): 1249 (JSC::BytecodeGenerator::emitNewFunction): 1250 (JSC::BytecodeGenerator::emitSetFunctionNameIfNeeded): 1251 (JSC::BytecodeGenerator::emitCall): 1252 (JSC::BytecodeGenerator::emitCallInTailPosition): 1253 (JSC::BytecodeGenerator::emitCallEval): 1254 (JSC::BytecodeGenerator::emitExpectedFunctionSnippet): 1255 (JSC::BytecodeGenerator::emitCallVarargs): 1256 (JSC::BytecodeGenerator::emitCallVarargsInTailPosition): 1257 (JSC::BytecodeGenerator::emitConstructVarargs): 1258 (JSC::BytecodeGenerator::emitCallForwardArgumentsInTailPosition): 1259 (JSC::BytecodeGenerator::emitLogShadowChickenPrologueIfNecessary): 1260 (JSC::BytecodeGenerator::emitLogShadowChickenTailIfNecessary): 1261 (JSC::BytecodeGenerator::emitCallDefineProperty): 1262 (JSC::BytecodeGenerator::emitReturn): 1263 (JSC::BytecodeGenerator::emitEnd): 1264 (JSC::BytecodeGenerator::emitConstruct): 1265 (JSC::BytecodeGenerator::emitStrcat): 1266 (JSC::BytecodeGenerator::emitToPrimitive): 1267 (JSC::BytecodeGenerator::emitGetScope): 1268 (JSC::BytecodeGenerator::emitPushWithScope): 1269 (JSC::BytecodeGenerator::emitGetParentScope): 1270 (JSC::BytecodeGenerator::emitDebugHook): 1271 (JSC::BytecodeGenerator::emitCatch): 1272 (JSC::BytecodeGenerator::emitThrow): 1273 (JSC::BytecodeGenerator::emitArgumentCount): 1274 (JSC::BytecodeGenerator::emitThrowStaticError): 1275 (JSC::BytecodeGenerator::beginSwitch): 1276 (JSC::prepareJumpTableForSwitch): 1277 (JSC::prepareJumpTableForStringSwitch): 1278 (JSC::BytecodeGenerator::endSwitch): 1279 (JSC::BytecodeGenerator::emitGetEnumerableLength): 1280 (JSC::BytecodeGenerator::emitHasGenericProperty): 1281 (JSC::BytecodeGenerator::emitHasIndexedProperty): 1282 (JSC::BytecodeGenerator::emitHasStructureProperty): 1283 (JSC::BytecodeGenerator::emitGetPropertyEnumerator): 1284 (JSC::BytecodeGenerator::emitEnumeratorStructurePropertyName): 1285 (JSC::BytecodeGenerator::emitEnumeratorGenericPropertyName): 1286 (JSC::BytecodeGenerator::emitToIndexString): 1287 (JSC::BytecodeGenerator::emitIsCellWithType): 1288 (JSC::BytecodeGenerator::emitIsObject): 1289 (JSC::BytecodeGenerator::emitIsNumber): 1290 (JSC::BytecodeGenerator::emitIsUndefined): 1291 (JSC::BytecodeGenerator::emitIsEmpty): 1292 (JSC::BytecodeGenerator::emitRestParameter): 1293 (JSC::BytecodeGenerator::emitRequireObjectCoercible): 1294 (JSC::BytecodeGenerator::emitYieldPoint): 1295 (JSC::BytecodeGenerator::emitYield): 1296 (JSC::BytecodeGenerator::emitGetAsyncIterator): 1297 (JSC::BytecodeGenerator::emitDelegateYield): 1298 (JSC::BytecodeGenerator::emitFinallyCompletion): 1299 (JSC::BytecodeGenerator::emitJumpIf): 1300 (JSC::ForInContext::finalize): 1301 (JSC::StructureForInContext::finalize): 1302 (JSC::IndexedForInContext::finalize): 1303 (JSC::StaticPropertyAnalysis::record): 1304 (JSC::BytecodeGenerator::emitToThis): 1305 * bytecompiler/BytecodeGenerator.h: 1306 (JSC::StructureForInContext::addGetInst): 1307 (JSC::BytecodeGenerator::recordOpcode): 1308 (JSC::BytecodeGenerator::addMetadataFor): 1309 (JSC::BytecodeGenerator::emitUnaryOp): 1310 (JSC::BytecodeGenerator::kill): 1311 (JSC::BytecodeGenerator::instructions const): 1312 (JSC::BytecodeGenerator::write): 1313 (JSC::BytecodeGenerator::withWriter): 1314 * bytecompiler/Label.h: 1315 (JSC::Label::Label): 1316 (JSC::Label::bind): 1317 * bytecompiler/NodesCodegen.cpp: 1318 (JSC::ArrayNode::emitBytecode): 1319 (JSC::BytecodeIntrinsicNode::emit_intrinsic_argumentCount): 1320 (JSC::ApplyFunctionCallDotNode::emitBytecode): 1321 (JSC::BitwiseNotNode::emitBytecode): 1322 (JSC::BinaryOpNode::emitBytecode): 1323 (JSC::EqualNode::emitBytecode): 1324 (JSC::StrictEqualNode::emitBytecode): 1325 (JSC::emitReadModifyAssignment): 1326 (JSC::ForInNode::emitBytecode): 1327 (JSC::CaseBlockNode::emitBytecodeForBlock): 1328 (JSC::FunctionNode::emitBytecode): 1329 (JSC::ClassExprNode::emitBytecode): 1330 * bytecompiler/ProfileTypeBytecodeFlag.cpp: Copied from Source/JavaScriptCore/bytecode/VirtualRegister.cpp. 1331 (WTF::printInternal): 1332 * bytecompiler/ProfileTypeBytecodeFlag.h: Copied from Source/JavaScriptCore/bytecode/SpecialPointer.cpp. 1333 * bytecompiler/RegisterID.h: 1334 * bytecompiler/StaticPropertyAnalysis.h: 1335 (JSC::StaticPropertyAnalysis::create): 1336 (JSC::StaticPropertyAnalysis::StaticPropertyAnalysis): 1337 * bytecompiler/StaticPropertyAnalyzer.h: 1338 (JSC::StaticPropertyAnalyzer::createThis): 1339 (JSC::StaticPropertyAnalyzer::newObject): 1340 (JSC::StaticPropertyAnalyzer::putById): 1341 (JSC::StaticPropertyAnalyzer::mov): 1342 (JSC::StaticPropertyAnalyzer::kill): 1343 * dfg/DFGByteCodeParser.cpp: 1344 (JSC::DFG::ByteCodeParser::addCall): 1345 (JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit): 1346 (JSC::DFG::ByteCodeParser::getArrayMode): 1347 (JSC::DFG::ByteCodeParser::handleCall): 1348 (JSC::DFG::ByteCodeParser::handleVarargsCall): 1349 (JSC::DFG::ByteCodeParser::handleRecursiveTailCall): 1350 (JSC::DFG::ByteCodeParser::inlineCall): 1351 (JSC::DFG::ByteCodeParser::handleCallVariant): 1352 (JSC::DFG::ByteCodeParser::handleVarargsInlining): 1353 (JSC::DFG::ByteCodeParser::handleInlining): 1354 (JSC::DFG::ByteCodeParser::handleMinMax): 1355 (JSC::DFG::ByteCodeParser::handleIntrinsicCall): 1356 (JSC::DFG::ByteCodeParser::handleDOMJITCall): 1357 (JSC::DFG::ByteCodeParser::handleIntrinsicGetter): 1358 (JSC::DFG::ByteCodeParser::handleDOMJITGetter): 1359 (JSC::DFG::ByteCodeParser::handleModuleNamespaceLoad): 1360 (JSC::DFG::ByteCodeParser::handleTypedArrayConstructor): 1361 (JSC::DFG::ByteCodeParser::handleConstantInternalFunction): 1362 (JSC::DFG::ByteCodeParser::handleGetById): 1363 (JSC::DFG::ByteCodeParser::handlePutById): 1364 (JSC::DFG::ByteCodeParser::parseGetById): 1365 (JSC::DFG::ByteCodeParser::parseBlock): 1366 (JSC::DFG::ByteCodeParser::parseCodeBlock): 1367 (JSC::DFG::ByteCodeParser::handlePutByVal): 1368 (JSC::DFG::ByteCodeParser::handlePutAccessorById): 1369 (JSC::DFG::ByteCodeParser::handlePutAccessorByVal): 1370 (JSC::DFG::ByteCodeParser::handleNewFunc): 1371 (JSC::DFG::ByteCodeParser::handleNewFuncExp): 1372 (JSC::DFG::ByteCodeParser::parse): 1373 * dfg/DFGCapabilities.cpp: 1374 (JSC::DFG::capabilityLevel): 1375 * dfg/DFGCapabilities.h: 1376 (JSC::DFG::capabilityLevel): 1377 * dfg/DFGOSREntry.cpp: 1378 (JSC::DFG::prepareCatchOSREntry): 1379 * dfg/DFGSpeculativeJIT.cpp: 1380 (JSC::DFG::SpeculativeJIT::compileValueAdd): 1381 (JSC::DFG::SpeculativeJIT::compileValueSub): 1382 (JSC::DFG::SpeculativeJIT::compileValueNegate): 1383 (JSC::DFG::SpeculativeJIT::compileArithMul): 1384 * ftl/FTLLowerDFGToB3.cpp: 1385 (JSC::FTL::DFG::LowerDFGToB3::compileValueAdd): 1386 (JSC::FTL::DFG::LowerDFGToB3::compileValueSub): 1387 (JSC::FTL::DFG::LowerDFGToB3::compileUnaryMathIC): 1388 (JSC::FTL::DFG::LowerDFGToB3::compileBinaryMathIC): 1389 (JSC::FTL::DFG::LowerDFGToB3::compileArithAddOrSub): 1390 (JSC::FTL::DFG::LowerDFGToB3::compileArithMul): 1391 (JSC::FTL::DFG::LowerDFGToB3::compileValueNegate): 1392 * ftl/FTLOperations.cpp: 1393 (JSC::FTL::operationMaterializeObjectInOSR): 1394 * generate-bytecode-files: Removed. 1395 * generator/Argument.rb: Added. 1396 * generator/Assertion.rb: Added. 1397 * generator/DSL.rb: Added. 1398 * generator/Fits.rb: Added. 1399 * generator/GeneratedFile.rb: Added. 1400 * generator/Metadata.rb: Added. 1401 * generator/Opcode.rb: Added. 1402 * generator/OpcodeGroup.rb: Added. 1403 * generator/Options.rb: Added. 1404 * generator/Section.rb: Added. 1405 * generator/Template.rb: Added. 1406 * generator/Type.rb: Added. 1407 * generator/main.rb: Added. 1408 * interpreter/AbstractPC.h: 1409 * interpreter/CallFrame.cpp: 1410 (JSC::CallFrame::currentVPC const): 1411 (JSC::CallFrame::setCurrentVPC): 1412 * interpreter/CallFrame.h: 1413 (JSC::CallSiteIndex::CallSiteIndex): 1414 (JSC::ExecState::setReturnPC): 1415 * interpreter/Interpreter.cpp: 1416 (WTF::printInternal): 1417 * interpreter/Interpreter.h: 1418 * interpreter/InterpreterInlines.h: 1419 * interpreter/StackVisitor.cpp: 1420 (JSC::StackVisitor::Frame::dump const): 1421 * interpreter/VMEntryRecord.h: 1422 * jit/JIT.cpp: 1423 (JSC::JIT::JIT): 1424 (JSC::JIT::emitSlowCaseCall): 1425 (JSC::JIT::privateCompileMainPass): 1426 (JSC::JIT::privateCompileSlowCases): 1427 (JSC::JIT::compileWithoutLinking): 1428 (JSC::JIT::link): 1429 * jit/JIT.h: 1430 * jit/JITArithmetic.cpp: 1431 (JSC::JIT::emit_op_jless): 1432 (JSC::JIT::emit_op_jlesseq): 1433 (JSC::JIT::emit_op_jgreater): 1434 (JSC::JIT::emit_op_jgreatereq): 1435 (JSC::JIT::emit_op_jnless): 1436 (JSC::JIT::emit_op_jnlesseq): 1437 (JSC::JIT::emit_op_jngreater): 1438 (JSC::JIT::emit_op_jngreatereq): 1439 (JSC::JIT::emitSlow_op_jless): 1440 (JSC::JIT::emitSlow_op_jlesseq): 1441 (JSC::JIT::emitSlow_op_jgreater): 1442 (JSC::JIT::emitSlow_op_jgreatereq): 1443 (JSC::JIT::emitSlow_op_jnless): 1444 (JSC::JIT::emitSlow_op_jnlesseq): 1445 (JSC::JIT::emitSlow_op_jngreater): 1446 (JSC::JIT::emitSlow_op_jngreatereq): 1447 (JSC::JIT::emit_op_below): 1448 (JSC::JIT::emit_op_beloweq): 1449 (JSC::JIT::emit_op_jbelow): 1450 (JSC::JIT::emit_op_jbeloweq): 1451 (JSC::JIT::emit_op_unsigned): 1452 (JSC::JIT::emit_compareAndJump): 1453 (JSC::JIT::emit_compareUnsignedAndJump): 1454 (JSC::JIT::emit_compareUnsigned): 1455 (JSC::JIT::emit_compareAndJumpSlow): 1456 (JSC::JIT::emit_op_inc): 1457 (JSC::JIT::emit_op_dec): 1458 (JSC::JIT::emit_op_mod): 1459 (JSC::JIT::emitSlow_op_mod): 1460 (JSC::JIT::emit_op_negate): 1461 (JSC::JIT::emitSlow_op_negate): 1462 (JSC::JIT::emitBitBinaryOpFastPath): 1463 (JSC::JIT::emit_op_bitand): 1464 (JSC::JIT::emit_op_bitor): 1465 (JSC::JIT::emit_op_bitxor): 1466 (JSC::JIT::emit_op_lshift): 1467 (JSC::JIT::emitRightShiftFastPath): 1468 (JSC::JIT::emit_op_rshift): 1469 (JSC::JIT::emit_op_urshift): 1470 (JSC::getOperandTypes): 1471 (JSC::JIT::emit_op_add): 1472 (JSC::JIT::emitSlow_op_add): 1473 (JSC::JIT::emitMathICFast): 1474 (JSC::JIT::emitMathICSlow): 1475 (JSC::JIT::emit_op_div): 1476 (JSC::JIT::emit_op_mul): 1477 (JSC::JIT::emitSlow_op_mul): 1478 (JSC::JIT::emit_op_sub): 1479 (JSC::JIT::emitSlow_op_sub): 1480 * jit/JITCall.cpp: 1481 (JSC::JIT::emitPutCallResult): 1482 (JSC::JIT::compileSetupFrame): 1483 (JSC::JIT::compileCallEval): 1484 (JSC::JIT::compileCallEvalSlowCase): 1485 (JSC::JIT::compileTailCall): 1486 (JSC::JIT::compileOpCall): 1487 (JSC::JIT::compileOpCallSlowCase): 1488 (JSC::JIT::emit_op_call): 1489 (JSC::JIT::emit_op_tail_call): 1490 (JSC::JIT::emit_op_call_eval): 1491 (JSC::JIT::emit_op_call_varargs): 1492 (JSC::JIT::emit_op_tail_call_varargs): 1493 (JSC::JIT::emit_op_tail_call_forward_arguments): 1494 (JSC::JIT::emit_op_construct_varargs): 1495 (JSC::JIT::emit_op_construct): 1496 (JSC::JIT::emitSlow_op_call): 1497 (JSC::JIT::emitSlow_op_tail_call): 1498 (JSC::JIT::emitSlow_op_call_eval): 1499 (JSC::JIT::emitSlow_op_call_varargs): 1500 (JSC::JIT::emitSlow_op_tail_call_varargs): 1501 (JSC::JIT::emitSlow_op_tail_call_forward_arguments): 1502 (JSC::JIT::emitSlow_op_construct_varargs): 1503 (JSC::JIT::emitSlow_op_construct): 1504 * jit/JITDisassembler.cpp: 1505 (JSC::JITDisassembler::JITDisassembler): 1506 * jit/JITExceptions.cpp: 1507 (JSC::genericUnwind): 1508 * jit/JITInlines.h: 1509 (JSC::JIT::emitDoubleGetByVal): 1510 (JSC::JIT::emitLoadForArrayMode): 1511 (JSC::JIT::emitContiguousGetByVal): 1512 (JSC::JIT::emitArrayStorageGetByVal): 1513 (JSC::JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile): 1514 (JSC::JIT::sampleInstruction): 1515 (JSC::JIT::emitValueProfilingSiteIfProfiledOpcode): 1516 (JSC::JIT::emitValueProfilingSite): 1517 (JSC::JIT::jumpTarget): 1518 (JSC::JIT::copiedGetPutInfo): 1519 (JSC::JIT::copiedArithProfile): 1520 * jit/JITMathIC.h: 1521 (JSC::isProfileEmpty): 1522 (JSC::JITBinaryMathIC::JITBinaryMathIC): 1523 (JSC::JITUnaryMathIC::JITUnaryMathIC): 1524 * jit/JITOpcodes.cpp: 1525 (JSC::JIT::emit_op_mov): 1526 (JSC::JIT::emit_op_end): 1527 (JSC::JIT::emit_op_jmp): 1528 (JSC::JIT::emit_op_new_object): 1529 (JSC::JIT::emitSlow_op_new_object): 1530 (JSC::JIT::emit_op_overrides_has_instance): 1531 (JSC::JIT::emit_op_instanceof): 1532 (JSC::JIT::emitSlow_op_instanceof): 1533 (JSC::JIT::emit_op_instanceof_custom): 1534 (JSC::JIT::emit_op_is_empty): 1535 (JSC::JIT::emit_op_is_undefined): 1536 (JSC::JIT::emit_op_is_boolean): 1537 (JSC::JIT::emit_op_is_number): 1538 (JSC::JIT::emit_op_is_cell_with_type): 1539 (JSC::JIT::emit_op_is_object): 1540 (JSC::JIT::emit_op_ret): 1541 (JSC::JIT::emit_op_to_primitive): 1542 (JSC::JIT::emit_op_set_function_name): 1543 (JSC::JIT::emit_op_not): 1544 (JSC::JIT::emit_op_jfalse): 1545 (JSC::JIT::emit_op_jeq_null): 1546 (JSC::JIT::emit_op_jneq_null): 1547 (JSC::JIT::emit_op_jneq_ptr): 1548 (JSC::JIT::emit_op_eq): 1549 (JSC::JIT::emit_op_jeq): 1550 (JSC::JIT::emit_op_jtrue): 1551 (JSC::JIT::emit_op_neq): 1552 (JSC::JIT::emit_op_jneq): 1553 (JSC::JIT::emit_op_throw): 1554 (JSC::JIT::compileOpStrictEq): 1555 (JSC::JIT::emit_op_stricteq): 1556 (JSC::JIT::emit_op_nstricteq): 1557 (JSC::JIT::compileOpStrictEqJump): 1558 (JSC::JIT::emit_op_jstricteq): 1559 (JSC::JIT::emit_op_jnstricteq): 1560 (JSC::JIT::emitSlow_op_jstricteq): 1561 (JSC::JIT::emitSlow_op_jnstricteq): 1562 (JSC::JIT::emit_op_to_number): 1563 (JSC::JIT::emit_op_to_string): 1564 (JSC::JIT::emit_op_to_object): 1565 (JSC::JIT::emit_op_catch): 1566 (JSC::JIT::emit_op_identity_with_profile): 1567 (JSC::JIT::emit_op_get_parent_scope): 1568 (JSC::JIT::emit_op_switch_imm): 1569 (JSC::JIT::emit_op_switch_char): 1570 (JSC::JIT::emit_op_switch_string): 1571 (JSC::JIT::emit_op_debug): 1572 (JSC::JIT::emit_op_eq_null): 1573 (JSC::JIT::emit_op_neq_null): 1574 (JSC::JIT::emit_op_enter): 1575 (JSC::JIT::emit_op_get_scope): 1576 (JSC::JIT::emit_op_to_this): 1577 (JSC::JIT::emit_op_create_this): 1578 (JSC::JIT::emit_op_check_tdz): 1579 (JSC::JIT::emitSlow_op_eq): 1580 (JSC::JIT::emitSlow_op_neq): 1581 (JSC::JIT::emitSlow_op_jeq): 1582 (JSC::JIT::emitSlow_op_jneq): 1583 (JSC::JIT::emitSlow_op_instanceof_custom): 1584 (JSC::JIT::emit_op_loop_hint): 1585 (JSC::JIT::emitSlow_op_loop_hint): 1586 (JSC::JIT::emit_op_check_traps): 1587 (JSC::JIT::emit_op_nop): 1588 (JSC::JIT::emit_op_super_sampler_begin): 1589 (JSC::JIT::emit_op_super_sampler_end): 1590 (JSC::JIT::emitSlow_op_check_traps): 1591 (JSC::JIT::emit_op_new_regexp): 1592 (JSC::JIT::emitNewFuncCommon): 1593 (JSC::JIT::emit_op_new_func): 1594 (JSC::JIT::emit_op_new_generator_func): 1595 (JSC::JIT::emit_op_new_async_generator_func): 1596 (JSC::JIT::emit_op_new_async_func): 1597 (JSC::JIT::emitNewFuncExprCommon): 1598 (JSC::JIT::emit_op_new_func_exp): 1599 (JSC::JIT::emit_op_new_generator_func_exp): 1600 (JSC::JIT::emit_op_new_async_func_exp): 1601 (JSC::JIT::emit_op_new_async_generator_func_exp): 1602 (JSC::JIT::emit_op_new_array): 1603 (JSC::JIT::emit_op_new_array_with_size): 1604 (JSC::JIT::emit_op_has_structure_property): 1605 (JSC::JIT::privateCompileHasIndexedProperty): 1606 (JSC::JIT::emit_op_has_indexed_property): 1607 (JSC::JIT::emitSlow_op_has_indexed_property): 1608 (JSC::JIT::emit_op_get_direct_pname): 1609 (JSC::JIT::emit_op_enumerator_structure_pname): 1610 (JSC::JIT::emit_op_enumerator_generic_pname): 1611 (JSC::JIT::emit_op_profile_type): 1612 (JSC::JIT::emit_op_log_shadow_chicken_prologue): 1613 (JSC::JIT::emit_op_log_shadow_chicken_tail): 1614 (JSC::JIT::emit_op_profile_control_flow): 1615 (JSC::JIT::emit_op_argument_count): 1616 (JSC::JIT::emit_op_get_rest_length): 1617 (JSC::JIT::emit_op_get_argument): 1618 * jit/JITOpcodes32_64.cpp: 1619 (JSC::JIT::emit_op_to_this): 1620 * jit/JITOperations.cpp: 1621 * jit/JITOperations.h: 1622 * jit/JITPropertyAccess.cpp: 1623 (JSC::JIT::emit_op_get_by_val): 1624 (JSC::JIT::emitGetByValWithCachedId): 1625 (JSC::JIT::emitSlow_op_get_by_val): 1626 (JSC::JIT::emit_op_put_by_val_direct): 1627 (JSC::JIT::emit_op_put_by_val): 1628 (JSC::JIT::emitGenericContiguousPutByVal): 1629 (JSC::JIT::emitArrayStoragePutByVal): 1630 (JSC::JIT::emitPutByValWithCachedId): 1631 (JSC::JIT::emitSlow_op_put_by_val): 1632 (JSC::JIT::emit_op_put_getter_by_id): 1633 (JSC::JIT::emit_op_put_setter_by_id): 1634 (JSC::JIT::emit_op_put_getter_setter_by_id): 1635 (JSC::JIT::emit_op_put_getter_by_val): 1636 (JSC::JIT::emit_op_put_setter_by_val): 1637 (JSC::JIT::emit_op_del_by_id): 1638 (JSC::JIT::emit_op_del_by_val): 1639 (JSC::JIT::emit_op_try_get_by_id): 1640 (JSC::JIT::emitSlow_op_try_get_by_id): 1641 (JSC::JIT::emit_op_get_by_id_direct): 1642 (JSC::JIT::emitSlow_op_get_by_id_direct): 1643 (JSC::JIT::emit_op_get_by_id): 1644 (JSC::JIT::emit_op_get_by_id_with_this): 1645 (JSC::JIT::emitSlow_op_get_by_id): 1646 (JSC::JIT::emitSlow_op_get_by_id_with_this): 1647 (JSC::JIT::emit_op_put_by_id): 1648 (JSC::JIT::emitSlow_op_put_by_id): 1649 (JSC::JIT::emit_op_in_by_id): 1650 (JSC::JIT::emitSlow_op_in_by_id): 1651 (JSC::JIT::emit_op_resolve_scope): 1652 (JSC::JIT::emit_op_get_from_scope): 1653 (JSC::JIT::emitSlow_op_get_from_scope): 1654 (JSC::JIT::emit_op_put_to_scope): 1655 (JSC::JIT::emitSlow_op_put_to_scope): 1656 (JSC::JIT::emit_op_get_from_arguments): 1657 (JSC::JIT::emit_op_put_to_arguments): 1658 (JSC::JIT::privateCompileGetByVal): 1659 (JSC::JIT::privateCompileGetByValWithCachedId): 1660 (JSC::JIT::privateCompilePutByVal): 1661 (JSC::JIT::privateCompilePutByValWithCachedId): 1662 (JSC::JIT::emitDoubleLoad): 1663 (JSC::JIT::emitContiguousLoad): 1664 (JSC::JIT::emitArrayStorageLoad): 1665 (JSC::JIT::emitDirectArgumentsGetByVal): 1666 (JSC::JIT::emitScopedArgumentsGetByVal): 1667 (JSC::JIT::emitIntTypedArrayGetByVal): 1668 (JSC::JIT::emitFloatTypedArrayGetByVal): 1669 (JSC::JIT::emitIntTypedArrayPutByVal): 1670 (JSC::JIT::emitFloatTypedArrayPutByVal): 1671 * jit/RegisterSet.cpp: 1672 (JSC::RegisterSet::llintBaselineCalleeSaveRegisters): 1673 * jit/SlowPathCall.h: 1674 (JSC::JITSlowPathCall::JITSlowPathCall): 1675 * llint/LLIntData.cpp: 1676 (JSC::LLInt::initialize): 1677 (JSC::LLInt::Data::performAssertions): 1678 * llint/LLIntData.h: 1679 (JSC::LLInt::exceptionInstructions): 1680 (JSC::LLInt::opcodeMap): 1681 (JSC::LLInt::opcodeMapWide): 1682 (JSC::LLInt::getOpcode): 1683 (JSC::LLInt::getOpcodeWide): 1684 (JSC::LLInt::getWideCodePtr): 1685 * llint/LLIntOffsetsExtractor.cpp: 1686 * llint/LLIntSlowPaths.cpp: 1687 (JSC::LLInt::llint_trace_operand): 1688 (JSC::LLInt::llint_trace_value): 1689 (JSC::LLInt::LLINT_SLOW_PATH_DECL): 1690 (JSC::LLInt::entryOSR): 1691 (JSC::LLInt::setupGetByIdPrototypeCache): 1692 (JSC::LLInt::getByVal): 1693 (JSC::LLInt::handleHostCall): 1694 (JSC::LLInt::setUpCall): 1695 (JSC::LLInt::genericCall): 1696 (JSC::LLInt::varargsSetup): 1697 (JSC::LLInt::commonCallEval): 1698 * llint/LLIntSlowPaths.h: 1699 * llint/LowLevelInterpreter.asm: 1700 * llint/LowLevelInterpreter.cpp: 1701 (JSC::CLoopRegister::operator const Instruction*): 1702 (JSC::CLoop::execute): 1703 * llint/LowLevelInterpreter32_64.asm: 1704 * llint/LowLevelInterpreter64.asm: 1705 * offlineasm/arm64.rb: 1706 * offlineasm/asm.rb: 1707 * offlineasm/ast.rb: 1708 * offlineasm/cloop.rb: 1709 * offlineasm/generate_offset_extractor.rb: 1710 * offlineasm/instructions.rb: 1711 * offlineasm/offsets.rb: 1712 * offlineasm/parser.rb: 1713 * offlineasm/transform.rb: 1714 * offlineasm/x86.rb: 1715 * parser/ResultType.h: 1716 (JSC::ResultType::dump const): 1717 (JSC::OperandTypes::first const): 1718 (JSC::OperandTypes::second const): 1719 (JSC::OperandTypes::dump const): 1720 * profiler/ProfilerBytecodeSequence.cpp: 1721 (JSC::Profiler::BytecodeSequence::BytecodeSequence): 1722 * runtime/CommonSlowPaths.cpp: 1723 (JSC::SLOW_PATH_DECL): 1724 (JSC::updateArithProfileForUnaryArithOp): 1725 (JSC::updateArithProfileForBinaryArithOp): 1726 * runtime/CommonSlowPaths.h: 1727 (JSC::CommonSlowPaths::tryCachePutToScopeGlobal): 1728 (JSC::CommonSlowPaths::tryCacheGetFromScopeGlobal): 1729 * runtime/ExceptionFuzz.cpp: 1730 (JSC::doExceptionFuzzing): 1731 * runtime/ExceptionFuzz.h: 1732 (JSC::doExceptionFuzzingIfEnabled): 1733 * runtime/GetPutInfo.cpp: Copied from Source/JavaScriptCore/bytecode/SpecialPointer.cpp. 1734 (JSC::GetPutInfo::dump const): 1735 (WTF::printInternal): 1736 * runtime/GetPutInfo.h: 1737 (JSC::GetPutInfo::operand const): 1738 * runtime/JSCPoison.h: 1739 * runtime/JSType.cpp: Added. 1740 (WTF::printInternal): 1741 * runtime/JSType.h: 1742 * runtime/SamplingProfiler.cpp: 1743 (JSC::SamplingProfiler::StackFrame::displayName): 1744 * runtime/SamplingProfiler.h: 1745 (JSC::SamplingProfiler::UnprocessedStackFrame::UnprocessedStackFrame): 1746 * runtime/SlowPathReturnType.h: 1747 (JSC::encodeResult): 1748 (JSC::decodeResult): 1749 * runtime/VM.h: 1750 * runtime/Watchdog.h: 1751 * tools/HeapVerifier.cpp: 1752 904 1753 2018-10-26 Commit Queue <commit-queue@webkit.org> 905 1754 -
trunk/Source/JavaScriptCore/Configurations/JSC.xcconfig
r237486 r237547 38 38 // Explicitly add the PrivateHeaders directory to the search path so that generated header files can be found in production builds. 39 39 HEADER_SEARCH_PATHS = "$(JAVASCRIPTCORE_FRAMEWORKS_DIR)/JavaScriptCore.framework/PrivateHeaders" $(inherited); 40 HEADER_SEARCH_PATHS = "${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore" $(HEADER_SEARCH_PATHS); -
trunk/Source/JavaScriptCore/DerivedSources.make
r237486 r237547 217 217 # Bytecode files 218 218 219 Bytecodes.h: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json 220 $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --bytecodes_h Bytecodes.h $(JavaScriptCore)/bytecode/BytecodeList.json 221 222 BytecodeStructs.h: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json 223 $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --bytecode_structs_h BytecodeStructs.h $(JavaScriptCore)/bytecode/BytecodeList.json 224 225 InitBytecodes.asm: $(JavaScriptCore)/generate-bytecode-files $(JavaScriptCore)/bytecode/BytecodeList.json 226 $(PYTHON) $(JavaScriptCore)/generate-bytecode-files --init_bytecodes_asm InitBytecodes.asm $(JavaScriptCore)/bytecode/BytecodeList.json 219 Bytecodes.h BytecodeOffsets.h BytecodeStructs.h InitBytecodes.asm: $(wildcard $(JavaScriptCore)/generator/*.rb) $(JavaScriptCore)/bytecode/BytecodeList.rb 220 $(RUBY) $(JavaScriptCore)/generator/main.rb $(JavaScriptCore)/bytecode/BytecodeList.rb --bytecode_structs_h BytecodeStructs.h --init_bytecodes_asm InitBytecodes.asm --bytecodes_h Bytecodes.h --bytecode_indices_h BytecodeIndices.h 227 221 228 222 # Inspector interfaces -
trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
r237486 r237547 15 15 ); 16 16 dependencies = ( 17 65442D5018EBB744007AF92F /* PBXTargetDependency */,18 17 14BD68992151916D0050DAFF /* PBXTargetDependency */, 19 18 ); … … 767 766 142E313B134FF0A600AFADB5 /* Strong.h in Headers */ = {isa = PBXBuildFile; fileRef = 142E3132134FF0A600AFADB5 /* Strong.h */; settings = {ATTRIBUTES = (Private, ); }; }; 768 767 142E313C134FF0A600AFADB5 /* Weak.h in Headers */ = {isa = PBXBuildFile; fileRef = 142E3133134FF0A600AFADB5 /* Weak.h */; settings = {ATTRIBUTES = (Private, ); }; }; 768 142F16E021558802003D49C9 /* MetadataTable.h in Headers */ = {isa = PBXBuildFile; fileRef = 142F16DF215585C8003D49C9 /* MetadataTable.h */; settings = {ATTRIBUTES = (Private, ); }; }; 769 769 14386A751DD69895008652C4 /* DirectEvalExecutable.h in Headers */ = {isa = PBXBuildFile; fileRef = 14386A731DD69895008652C4 /* DirectEvalExecutable.h */; settings = {ATTRIBUTES = (Private, ); }; }; 770 770 14386A791DD6989C008652C4 /* IndirectEvalExecutable.h in Headers */ = {isa = PBXBuildFile; fileRef = 14386A771DD6989C008652C4 /* IndirectEvalExecutable.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 781 781 144836E7132DA7BE005BE785 /* ConservativeRoots.h in Headers */ = {isa = PBXBuildFile; fileRef = 149DAAF212EB559D0083B12B /* ConservativeRoots.h */; settings = {ATTRIBUTES = (Private, ); }; }; 782 782 145722861437E140005FDE26 /* StrongInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 145722851437E140005FDE26 /* StrongInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; 783 146C384B2177ACDF0079F6D9 /* UnlinkedMetadataTable.h in Headers */ = {isa = PBXBuildFile; fileRef = 142D52BE21762958002DB086 /* UnlinkedMetadataTable.h */; settings = {ATTRIBUTES = (Private, ); }; }; 783 784 1471483020D323D30090E630 /* JSWrapperMapTests.mm in Sources */ = {isa = PBXBuildFile; fileRef = 1471482F20D323650090E630 /* JSWrapperMapTests.mm */; }; 784 785 147341CC1DC02D7200AA29BA /* ExecutableBase.h in Headers */ = {isa = PBXBuildFile; fileRef = 147341CB1DC02D7200AA29BA /* ExecutableBase.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 795 796 148A7BF01B82975A002D9157 /* InlineCallFrame.h in Headers */ = {isa = PBXBuildFile; fileRef = 148A7BEE1B82975A002D9157 /* InlineCallFrame.h */; settings = {ATTRIBUTES = (Private, ); }; }; 796 797 148CD1D8108CF902008163C6 /* JSContextRefPrivate.h in Headers */ = {isa = PBXBuildFile; fileRef = 148CD1D7108CF902008163C6 /* JSContextRefPrivate.h */; settings = {ATTRIBUTES = (Private, ); }; }; 798 1498CAD6214BF36D00710879 /* GetByIdMetadata.h in Headers */ = {isa = PBXBuildFile; fileRef = 1498CAD5214BF36D00710879 /* GetByIdMetadata.h */; }; 797 799 14A1563210966365006FA260 /* DateInstanceCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A1563010966365006FA260 /* DateInstanceCache.h */; settings = {ATTRIBUTES = (Private, ); }; }; 800 14A4680C216FA565000D2B1A /* Instruction.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A4680A216FA535000D2B1A /* Instruction.h */; settings = {ATTRIBUTES = (Private, ); }; }; 801 14A4680D216FA56A000D2B1A /* OpcodeSize.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A4680B216FA535000D2B1A /* OpcodeSize.h */; settings = {ATTRIBUTES = (Private, ); }; }; 798 802 14AB66761DECF40900A56C26 /* UnlinkedSourceCode.h in Headers */ = {isa = PBXBuildFile; fileRef = 14AB66751DECF40900A56C26 /* UnlinkedSourceCode.h */; settings = {ATTRIBUTES = (Private, ); }; }; 799 803 14AD910C1DCA92940014F9FE /* EvalCodeBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 14AD91061DCA92940014F9FE /* EvalCodeBlock.h */; }; … … 815 819 14BE7D3317135CF400D1807A /* WeakInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 14BE7D3217135CF400D1807A /* WeakInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; 816 820 14BFCE6910CDB1FC00364CCE /* WeakGCMap.h in Headers */ = {isa = PBXBuildFile; fileRef = 14BFCE6810CDB1FC00364CCE /* WeakGCMap.h */; settings = {ATTRIBUTES = (Private, ); }; }; 821 14C25B9E216EA36A00137764 /* InstructionStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 14CC3BA22138A238002D58B6 /* InstructionStream.h */; settings = {ATTRIBUTES = (Private, ); }; }; 817 822 14CA958B16AB50DE00938A06 /* StaticPropertyAnalyzer.h in Headers */ = {isa = PBXBuildFile; fileRef = 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */; settings = {ATTRIBUTES = (Private, ); }; }; 818 823 14CA958D16AB50FA00938A06 /* ObjectAllocationProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 14CA958C16AB50FA00938A06 /* ObjectAllocationProfile.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 823 828 14E84FA214EE1ACC00D6D5D4 /* WeakImpl.h in Headers */ = {isa = PBXBuildFile; fileRef = 14E84F9D14EE1ACC00D6D5D4 /* WeakImpl.h */; settings = {ATTRIBUTES = (Private, ); }; }; 824 829 14F7256614EE265E00B1652B /* WeakHandleOwner.h in Headers */ = {isa = PBXBuildFile; fileRef = 14F7256414EE265E00B1652B /* WeakHandleOwner.h */; settings = {ATTRIBUTES = (Private, ); }; }; 830 14F79F70216EAFD200046D39 /* Opcode.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07950ED1D3AE00F1F681 /* Opcode.h */; settings = {ATTRIBUTES = (Private, ); }; }; 825 831 1A28D4A8177B71C80007FA3C /* JSStringRefPrivate.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A28D4A7177B71C80007FA3C /* JSStringRefPrivate.h */; settings = {ATTRIBUTES = (Private, ); }; }; 826 832 2600B5A7152BAAA70091EE5F /* JSStringJoiner.h in Headers */ = {isa = PBXBuildFile; fileRef = 2600B5A5152BAAA70091EE5F /* JSStringJoiner.h */; }; … … 1255 1261 969A07970ED1D3AE00F1F681 /* CodeBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07910ED1D3AE00F1F681 /* CodeBlock.h */; settings = {ATTRIBUTES = (Private, ); }; }; 1256 1262 969A07980ED1D3AE00F1F681 /* DirectEvalCodeCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07920ED1D3AE00F1F681 /* DirectEvalCodeCache.h */; settings = {ATTRIBUTES = (Private, ); }; }; 1257 969A07990ED1D3AE00F1F681 /* Instruction.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07930ED1D3AE00F1F681 /* Instruction.h */; settings = {ATTRIBUTES = (Private, ); }; };1258 969A079B0ED1D3AE00F1F681 /* Opcode.h in Headers */ = {isa = PBXBuildFile; fileRef = 969A07950ED1D3AE00F1F681 /* Opcode.h */; };1259 1263 978801411471AD920041B016 /* JSDateMath.h in Headers */ = {isa = PBXBuildFile; fileRef = 9788FC231471AD0C0068CE2D /* JSDateMath.h */; settings = {ATTRIBUTES = (Private, ); }; }; 1260 1264 981ED82328234D91BAECCADE /* MachineContext.h in Headers */ = {isa = PBXBuildFile; fileRef = 28806E21155E478A93FA7B02 /* MachineContext.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 1889 1893 remoteInfo = "LLInt Settings"; 1890 1894 }; 1895 14D9D9D9218462B5009126C2 /* PBXContainerItemProxy */ = { 1896 isa = PBXContainerItemProxy; 1897 containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; 1898 proxyType = 1; 1899 remoteGlobalIDString = 65FB3F6609D11E9100F49DEB; 1900 remoteInfo = "Derived Sources"; 1901 }; 1891 1902 53B4BD131F68C2AA00D2BEA3 /* PBXContainerItemProxy */ = { 1892 1903 isa = PBXContainerItemProxy; … … 1932 1943 }; 1933 1944 65244BD218ECB5000010B708 /* PBXContainerItemProxy */ = { 1934 isa = PBXContainerItemProxy;1935 containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;1936 proxyType = 1;1937 remoteGlobalIDString = 65FB3F6609D11E9100F49DEB;1938 remoteInfo = "Derived Sources";1939 };1940 65442D4F18EBB744007AF92F /* PBXContainerItemProxy */ = {1941 1945 isa = PBXContainerItemProxy; 1942 1946 containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; … … 3119 3123 1429D92E0ED22D7000B89619 /* JIT.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JIT.h; sourceTree = "<group>"; }; 3120 3124 142D3938103E4560007DCB52 /* NumericStrings.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NumericStrings.h; sourceTree = "<group>"; }; 3125 142D52BD21762957002DB086 /* UnlinkedMetadataTableInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnlinkedMetadataTableInlines.h; sourceTree = "<group>"; }; 3126 142D52BE21762958002DB086 /* UnlinkedMetadataTable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnlinkedMetadataTable.h; sourceTree = "<group>"; }; 3121 3127 142D6F0613539A2800B02E86 /* MarkedBlock.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MarkedBlock.cpp; sourceTree = "<group>"; }; 3122 3128 142D6F0713539A2800B02E86 /* MarkedBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MarkedBlock.h; sourceTree = "<group>"; }; … … 3128 3134 142E3132134FF0A600AFADB5 /* Strong.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Strong.h; sourceTree = "<group>"; }; 3129 3135 142E3133134FF0A600AFADB5 /* Weak.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Weak.h; sourceTree = "<group>"; }; 3136 142F16DF215585C8003D49C9 /* MetadataTable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MetadataTable.h; sourceTree = "<group>"; }; 3137 142F16E921583B5E003D49C9 /* CodeBlockInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeBlockInlines.h; sourceTree = "<group>"; }; 3130 3138 14386A721DD69895008652C4 /* DirectEvalExecutable.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DirectEvalExecutable.cpp; sourceTree = "<group>"; }; 3131 3139 14386A731DD69895008652C4 /* DirectEvalExecutable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DirectEvalExecutable.h; sourceTree = "<group>"; }; … … 3171 3179 147341E01DC2CE9600AA29BA /* ScriptExecutable.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ScriptExecutable.cpp; sourceTree = "<group>"; }; 3172 3180 147341E91DC2CF2500AA29BA /* ExecutableBase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ExecutableBase.cpp; sourceTree = "<group>"; }; 3181 14788EE221501AF700A561C8 /* ProfileTypeBytecodeFlag.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ProfileTypeBytecodeFlag.cpp; sourceTree = "<group>"; }; 3182 14788EE421501B2800A561C8 /* JSType.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSType.cpp; sourceTree = "<group>"; }; 3183 14788EE521501B2900A561C8 /* GetPutInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GetPutInfo.cpp; sourceTree = "<group>"; }; 3173 3184 147B83AA0E6DB8C9004775A4 /* BatchedTransitionOptimizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BatchedTransitionOptimizer.h; sourceTree = "<group>"; }; 3174 3185 147B84620E6DE6B1004775A4 /* PutPropertySlot.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PutPropertySlot.h; sourceTree = "<group>"; }; … … 3186 3197 148CD1D7108CF902008163C6 /* JSContextRefPrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSContextRefPrivate.h; sourceTree = "<group>"; }; 3187 3198 149559ED0DDCDDF700648087 /* DebuggerCallFrame.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DebuggerCallFrame.cpp; sourceTree = "<group>"; }; 3199 1498CAD3214656C400710879 /* libWTF.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; path = libWTF.a; sourceTree = BUILT_PRODUCTS_DIR; }; 3200 1498CAD5214BF36D00710879 /* GetByIdMetadata.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GetByIdMetadata.h; sourceTree = "<group>"; }; 3188 3201 149B24FF0D8AF6D1009CB8C7 /* Register.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Register.h; sourceTree = "<group>"; }; 3189 3202 149DAAF212EB559D0083B12B /* ConservativeRoots.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConservativeRoots.h; sourceTree = "<group>"; }; 3190 3203 14A1563010966365006FA260 /* DateInstanceCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DateInstanceCache.h; sourceTree = "<group>"; }; 3191 3204 14A396A60CD2933100B5B4FF /* SymbolTable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SymbolTable.h; sourceTree = "<group>"; }; 3205 14A46809216FA534000D2B1A /* Fits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Fits.h; sourceTree = "<group>"; }; 3206 14A4680A216FA535000D2B1A /* Instruction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Instruction.h; sourceTree = "<group>"; }; 3207 14A4680B216FA535000D2B1A /* OpcodeSize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpcodeSize.h; sourceTree = "<group>"; }; 3192 3208 14AB66751DECF40900A56C26 /* UnlinkedSourceCode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnlinkedSourceCode.h; sourceTree = "<group>"; }; 3193 3209 14ABB36E099C076400E2A24F /* JSCJSValue.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = JSCJSValue.h; sourceTree = "<group>"; }; … … 3215 3231 14B7233F12D7D0DA003BD5ED /* MachineStackMarker.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MachineStackMarker.cpp; sourceTree = "<group>"; }; 3216 3232 14B7234012D7D0DA003BD5ED /* MachineStackMarker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MachineStackMarker.h; sourceTree = "<group>"; }; 3233 14BA7751211086A0008D0B05 /* BytecodeList.rb */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.ruby; path = BytecodeList.rb; sourceTree = "<group>"; }; 3234 14BA7752211A8E5F008D0B05 /* ProfileTypeBytecodeFlag.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ProfileTypeBytecodeFlag.h; sourceTree = "<group>"; }; 3217 3235 14BA78F013AAB88F005B7C2C /* SlotVisitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SlotVisitor.h; sourceTree = "<group>"; }; 3218 3236 14BA7A9513AADFF8005B7C2C /* Heap.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Heap.cpp; sourceTree = "<group>"; }; … … 3229 3247 14CA958A16AB50DE00938A06 /* StaticPropertyAnalyzer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StaticPropertyAnalyzer.h; sourceTree = "<group>"; }; 3230 3248 14CA958C16AB50FA00938A06 /* ObjectAllocationProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ObjectAllocationProfile.h; sourceTree = "<group>"; }; 3231 14D2F3D8139F4BE200491031 /* MarkedSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MarkedSpace.cpp; sourceTree = "<group>"; }; 3249 14CC3BA12138A238002D58B6 /* InstructionStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = InstructionStream.cpp; sourceTree = "<group>"; }; 3250 14CC3BA22138A238002D58B6 /* InstructionStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InstructionStream.h; sourceTree = "<group>"; }; 3232 3251 14D2F3D9139F4BE200491031 /* MarkedSpace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MarkedSpace.h; sourceTree = "<group>"; }; 3233 3252 14D792640DAA03FB001A9F05 /* CLoopStack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CLoopStack.h; sourceTree = "<group>"; }; … … 3244 3263 14F7256314EE265E00B1652B /* WeakHandleOwner.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WeakHandleOwner.cpp; sourceTree = "<group>"; }; 3245 3264 14F7256414EE265E00B1652B /* WeakHandleOwner.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WeakHandleOwner.h; sourceTree = "<group>"; }; 3265 14F79F6E216EAD5000046D39 /* MetadataTable.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MetadataTable.cpp; sourceTree = "<group>"; }; 3246 3266 169948EDE68D4054B01EF797 /* DefinePropertyAttributes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DefinePropertyAttributes.h; sourceTree = "<group>"; }; 3247 3267 1879510614C540FFB561C124 /* JSModuleLoader.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSModuleLoader.cpp; sourceTree = "<group>"; }; … … 3598 3618 6514F21718B3E1670098FF8B /* Bytecodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Bytecodes.h; sourceTree = "<group>"; }; 3599 3619 6514F21818B3E1670098FF8B /* InitBytecodes.asm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.asm.asm; path = InitBytecodes.asm; sourceTree = "<group>"; }; 3600 6529FB3018B2D63900C61102 /* generate-bytecode-files */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = "generate-bytecode-files"; sourceTree = "<group>"; };3601 6529FB3118B2D99900C61102 /* BytecodeList.json */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = BytecodeList.json; sourceTree = "<group>"; };3602 3620 652A3A201651C66100A80AFE /* ARM64Disassembler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = ARM64Disassembler.cpp; path = disassembler/ARM64Disassembler.cpp; sourceTree = "<group>"; }; 3603 3621 652A3A221651C69700A80AFE /* A64DOpcode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = A64DOpcode.cpp; path = disassembler/ARM64/A64DOpcode.cpp; sourceTree = "<group>"; }; … … 3949 3967 969A07910ED1D3AE00F1F681 /* CodeBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeBlock.h; sourceTree = "<group>"; }; 3950 3968 969A07920ED1D3AE00F1F681 /* DirectEvalCodeCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DirectEvalCodeCache.h; sourceTree = "<group>"; }; 3951 969A07930ED1D3AE00F1F681 /* Instruction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Instruction.h; sourceTree = "<group>"; };3952 3969 969A07940ED1D3AE00F1F681 /* Opcode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Opcode.cpp; sourceTree = "<group>"; }; 3953 3970 969A07950ED1D3AE00F1F681 /* Opcode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Opcode.h; sourceTree = "<group>"; }; … … 4432 4449 ADE802971E08F1C90058DE78 /* WebAssemblyLinkErrorPrototype.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = WebAssemblyLinkErrorPrototype.h; path = js/WebAssemblyLinkErrorPrototype.h; sourceTree = "<group>"; }; 4433 4450 ADE8029D1E08F2260058DE78 /* WebAssemblyLinkErrorConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = WebAssemblyLinkErrorConstructor.cpp; path = js/WebAssemblyLinkErrorConstructor.cpp; sourceTree = "<group>"; }; 4434 B59F89371891AD3300D5CCDC /* UnlinkedInstructionStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnlinkedInstructionStream.h; sourceTree = "<group>"; };4435 B59F89381891ADB500D5CCDC /* UnlinkedInstructionStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UnlinkedInstructionStream.cpp; sourceTree = "<group>"; };4436 4451 BC021BF2136900C300FC5467 /* ToolExecutable.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = ToolExecutable.xcconfig; sourceTree = "<group>"; }; 4437 4452 BC02E9040E1839DB000F9297 /* ErrorConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ErrorConstructor.cpp; sourceTree = "<group>"; }; … … 5005 5020 937B63CC09E766D200A671DD /* DerivedSources.make */, 5006 5021 0F93275A1C20BCDF00CF6564 /* dynbench.cpp */, 5007 6529FB3018B2D63900C61102 /* generate-bytecode-files */,5008 5022 F5C290E60284F98E018635CA /* JavaScriptCorePrefix.h */, 5009 5023 45E12D8806A49B0F00E9DF84 /* jsc.cpp */, … … 5055 5069 9322A00306C341D3009067BB /* libicucore.dylib */, 5056 5070 51F0EC0705C86C9A00E6DF1B /* libobjc.dylib */, 5071 1498CAD3214656C400710879 /* libWTF.a */, 5057 5072 A8A4748D151A8306004123FF /* libWTF.a */, 5058 5073 371D842C17C98B6E00ECF994 /* libz.dylib */, … … 5837 5852 0F7C5FB71D888A010044F5E2 /* MarkedBlockInlines.h */, 5838 5853 141448CA13A176EC00F5BA1A /* MarkedBlockSet.h */, 5839 14D2F3D8139F4BE200491031 /* MarkedSpace.cpp */,5840 5854 14D2F3D9139F4BE200491031 /* MarkedSpace.h */, 5841 5855 0F7DF1301E2970D50095951B /* MarkedSpaceInlines.h */, … … 6441 6455 960097A50EBABB58007A7297 /* LabelScope.h */, 6442 6456 655EB29A10CE2581001A990E /* NodesCodegen.cpp */, 6457 14788EE221501AF700A561C8 /* ProfileTypeBytecodeFlag.cpp */, 6458 14BA7752211A8E5F008D0B05 /* ProfileTypeBytecodeFlag.h */, 6443 6459 969A07280ED1CE6900F1F681 /* RegisterID.h */, 6444 6460 14DF04D916B3996D0016A513 /* StaticPropertyAnalysis.h */, … … 6667 6683 0F2B66B217B6B5AB00A7AE3F /* GenericTypedArrayView.h */, 6668 6684 0F2B66B317B6B5AB00A7AE3F /* GenericTypedArrayViewInlines.h */, 6685 14788EE521501B2900A561C8 /* GetPutInfo.cpp */, 6669 6686 796465681B952FF0003059EE /* GetPutInfo.h */, 6670 6687 BC02E9B80E184545000F9297 /* GetterSetter.cpp */, … … 6889 6906 70ECA6001AFDBEA200449739 /* JSTemplateObjectDescriptor.cpp */, 6890 6907 70ECA6011AFDBEA200449739 /* JSTemplateObjectDescriptor.h */, 6908 14788EE421501B2800A561C8 /* JSType.cpp */, 6891 6909 14ABB454099C2A0F00E2A24F /* JSType.h */, 6892 6910 0F2B66CC17B6B5AB00A7AE3F /* JSTypedArrayConstructors.cpp */, … … 7666 7684 7094C4DD1AE439530041A2EE /* BytecodeIntrinsicRegistry.h */, 7667 7685 0F2DD80A1AB3D85800BBB8E8 /* BytecodeKills.h */, 7668 6529FB3118B2D99900C61102 /* BytecodeList.json*/,7686 14BA7751211086A0008D0B05 /* BytecodeList.rb */, 7669 7687 C2FCAE0E17A9C24E0034C735 /* BytecodeLivenessAnalysis.cpp */, 7670 7688 C2FCAE0F17A9C24E0034C735 /* BytecodeLivenessAnalysis.h */, … … 7688 7706 0F8F943D1667632D00D61971 /* CodeBlockHash.cpp */, 7689 7707 0F8F943E1667632D00D61971 /* CodeBlockHash.h */, 7708 142F16E921583B5E003D49C9 /* CodeBlockInlines.h */, 7690 7709 0FC97F2F182020D7002C9B26 /* CodeBlockJettisoningWatchpoint.cpp */, 7691 7710 0FC97F30182020D7002C9B26 /* CodeBlockJettisoningWatchpoint.h */, … … 7723 7742 0FB105831675480C00F8AB6E /* ExitKind.h */, 7724 7743 0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */, 7744 14A46809216FA534000D2B1A /* Fits.h */, 7725 7745 0F666EBF183566F900D017F1 /* FullBytecodeLiveness.h */, 7726 7746 AD4252521E5D0F22009D2A97 /* FullCodeOrigin.cpp */, … … 7728 7748 14AD91161DCA97FD0014F9FE /* FunctionCodeBlock.cpp */, 7729 7749 14AD91071DCA92940014F9FE /* FunctionCodeBlock.h */, 7750 1498CAD5214BF36D00710879 /* GetByIdMetadata.h */, 7730 7751 0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */, 7731 7752 0F93329614CA7DC10085F3C6 /* GetByIdStatus.h */, … … 7752 7773 0FB399BC20AF6B2A0017E213 /* InstanceOfVariant.cpp */, 7753 7774 0FB399B920AF6B2A0017E213 /* InstanceOfVariant.h */, 7754 969A07930ED1D3AE00F1F681 /* Instruction.h */, 7775 14A4680A216FA535000D2B1A /* Instruction.h */, 7776 14CC3BA12138A238002D58B6 /* InstructionStream.cpp */, 7777 14CC3BA22138A238002D58B6 /* InstructionStream.h */, 7755 7778 53F6BF6C1C3F060A00F41E5D /* InternalFunctionAllocationProfile.h */, 7756 7779 BCFD8C900EEB2EE700283848 /* JumpTable.cpp */, … … 7761 7784 53FA2AE21CF380390022711D /* LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp */, 7762 7785 53FA2AE01CF37F3F0022711D /* LLIntPrototypeLoadAdaptiveStructureWatchpoint.h */, 7786 14F79F6E216EAD5000046D39 /* MetadataTable.cpp */, 7787 142F16DF215585C8003D49C9 /* MetadataTable.h */, 7763 7788 0FB5467C14F5CFD3002C2989 /* MethodOfGettingAValueProfile.cpp */, 7764 7789 0FB5467A14F5C7D4002C2989 /* MethodOfGettingAValueProfile.h */, … … 7776 7801 969A07950ED1D3AE00F1F681 /* Opcode.h */, 7777 7802 FE64872D2141D04800AB0D3E /* OpcodeInlines.h */, 7803 14A4680B216FA535000D2B1A /* OpcodeSize.h */, 7778 7804 0F2BDC2B151FDE8B00CD8910 /* Operands.h */, 7779 7805 A70447E917A0BD4600F5898E /* OperandsInlines.h */, … … 7831 7857 14142E501B796ECE00F4BF4B /* UnlinkedFunctionExecutable.h */, 7832 7858 14AD911C1DCA9FA40014F9FE /* UnlinkedGlobalCodeBlock.h */, 7833 B59F89381891ADB500D5CCDC /* UnlinkedInstructionStream.cpp*/,7834 B59F89371891AD3300D5CCDC /* UnlinkedInstructionStream.h */,7859 142D52BE21762958002DB086 /* UnlinkedMetadataTable.h */, 7860 142D52BD21762957002DB086 /* UnlinkedMetadataTableInlines.h */, 7835 7861 14AD912A1DCAAAB00014F9FE /* UnlinkedModuleProgramCodeBlock.cpp */, 7836 7862 14AD911F1DCA9FA40014F9FE /* UnlinkedModuleProgramCodeBlock.h */, … … 8952 8978 0F2B66E017B6B5AB00A7AE3F /* GenericTypedArrayView.h in Headers */, 8953 8979 0F2B66E117B6B5AB00A7AE3F /* GenericTypedArrayViewInlines.h in Headers */, 8980 1498CAD6214BF36D00710879 /* GetByIdMetadata.h in Headers */, 8954 8981 0F9332A014CA7DCD0085F3C6 /* GetByIdStatus.h in Headers */, 8955 8982 0F0332C418B01763005F979A /* GetByIdVariant.h in Headers */, … … 9034 9061 0FB399BF20AF6B3F0017E213 /* InstanceOfStatus.h in Headers */, 9035 9062 0FB399C020AF6B430017E213 /* InstanceOfVariant.h in Headers */, 9036 969A07990ED1D3AE00F1F681 /* Instruction.h in Headers */, 9063 14A4680C216FA565000D2B1A /* Instruction.h in Headers */, 9064 14C25B9E216EA36A00137764 /* InstructionStream.h in Headers */, 9037 9065 A7A8AF3B17ADB5F3005AB174 /* Int16Array.h in Headers */, 9038 9066 A7A8AF3C17ADB5F3005AB174 /* Int32Array.h in Headers */, … … 9347 9375 E328C6C71DA4304500D255FD /* MaxFrameExtentForSlowPathCall.h in Headers */, 9348 9376 90213E3E123A40C200D422F3 /* MemoryStatistics.h in Headers */, 9377 142F16E021558802003D49C9 /* MetadataTable.h in Headers */, 9349 9378 0FB5467B14F5C7E1002C2989 /* MethodOfGettingAValueProfile.h in Headers */, 9350 9379 7C008CE7187631B600955C24 /* Microtask.h in Headers */, … … 9386 9415 BC18C4460E16F5CD00B34460 /* ObjectPrototype.h in Headers */, 9387 9416 E124A8F70E555775003091F1 /* OpaqueJSString.h in Headers */, 9388 969A079B0ED1D3AE00F1F681/* Opcode.h in Headers */,9417 14F79F70216EAFD200046D39 /* Opcode.h in Headers */, 9389 9418 FE64872E2141D04800AB0D3E /* OpcodeInlines.h in Headers */, 9419 14A4680D216FA56A000D2B1A /* OpcodeSize.h in Headers */, 9390 9420 0F2BDC2C151FDE9100CD8910 /* Operands.h in Headers */, 9391 9421 A70447EA17A0BD4600F5898E /* OperandsInlines.h in Headers */, … … 9622 9652 14142E511B796ECE00F4BF4B /* UnlinkedFunctionExecutable.h in Headers */, 9623 9653 14AD91221DCA9FA40014F9FE /* UnlinkedGlobalCodeBlock.h in Headers */, 9654 146C384B2177ACDF0079F6D9 /* UnlinkedMetadataTable.h in Headers */, 9624 9655 14AD91251DCA9FA40014F9FE /* UnlinkedModuleProgramCodeBlock.h in Headers */, 9625 9656 14AD91261DCA9FA40014F9FE /* UnlinkedProgramCodeBlock.h in Headers */, … … 9950 9981 ); 9951 9982 dependencies = ( 9983 14D9D9DA218462B5009126C2 /* PBXTargetDependency */, 9952 9984 ); 9953 9985 name = jsc; … … 10108 10140 runOnlyForDeploymentPostprocessing = 0; 10109 10141 shellPath = /bin/sh; 10110 shellScript = "exec ${SRCROOT}/postprocess-headers.sh ";10142 shellScript = "exec ${SRCROOT}/postprocess-headers.sh\n"; 10111 10143 }; 10112 10144 374F95C9205F9975002BF68F /* Make libWTF.a Symbolic Link */ = { … … 10585 10617 targetProxy = 14BD689A215191740050DAFF /* PBXContainerItemProxy */; 10586 10618 }; 10619 14D9D9DA218462B5009126C2 /* PBXTargetDependency */ = { 10620 isa = PBXTargetDependency; 10621 target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; 10622 targetProxy = 14D9D9D9218462B5009126C2 /* PBXContainerItemProxy */; 10623 }; 10587 10624 53B4BD141F68C2AA00D2BEA3 /* PBXTargetDependency */ = { 10588 10625 isa = PBXTargetDependency; … … 10619 10656 target = 65FB3F6609D11E9100F49DEB /* Derived Sources */; 10620 10657 targetProxy = 65244BD218ECB5000010B708 /* PBXContainerItemProxy */; 10621 };10622 65442D5018EBB744007AF92F /* PBXTargetDependency */ = {10623 isa = PBXTargetDependency;10624 target = 65FB3F6609D11E9100F49DEB /* Derived Sources */;10625 targetProxy = 65442D4F18EBB744007AF92F /* PBXContainerItemProxy */;10626 10658 }; 10627 10659 65788A9E18B409EB00C189FF /* PBXTargetDependency */ = { -
trunk/Source/JavaScriptCore/Sources.txt
r237486 r237547 234 234 bytecode/InstanceOfStatus.cpp 235 235 bytecode/InstanceOfVariant.cpp 236 bytecode/InstructionStream.cpp 236 237 bytecode/IntrinsicGetterAccessCase.cpp 237 238 bytecode/JumpTable.cpp 238 239 bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp 239 240 bytecode/LazyOperandValueProfile.cpp 241 bytecode/MetadataTable.cpp 240 242 bytecode/MethodOfGettingAValueProfile.cpp 241 243 bytecode/ModuleNamespaceAccessCase.cpp … … 269 271 bytecode/UnlinkedFunctionCodeBlock.cpp 270 272 bytecode/UnlinkedFunctionExecutable.cpp 271 bytecode/UnlinkedInstructionStream.cpp272 273 bytecode/UnlinkedModuleProgramCodeBlock.cpp 273 274 bytecode/UnlinkedProgramCodeBlock.cpp … … 279 280 bytecompiler/BytecodeGenerator.cpp 280 281 bytecompiler/NodesCodegen.cpp 282 bytecompiler/ProfileTypeBytecodeFlag.cpp 281 283 282 284 debugger/Debugger.cpp … … 760 762 runtime/GeneratorFunctionPrototype.cpp 761 763 runtime/GeneratorPrototype.cpp 764 runtime/GetPutInfo.cpp 762 765 runtime/GetterSetter.cpp 763 766 runtime/HashMapImpl.cpp … … 852 855 runtime/JSSymbolTableObject.cpp 853 856 runtime/JSTemplateObjectDescriptor.cpp 857 runtime/JSType.cpp 854 858 runtime/JSTypedArrayConstructors.cpp 855 859 runtime/JSTypedArrayPrototypes.cpp -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
r237486 r237547 177 177 ReturnAddressPtr() { } 178 178 179 explicit ReturnAddressPtr( void* value)179 explicit ReturnAddressPtr(const void* value) 180 180 : m_value(value) 181 181 { … … 192 192 } 193 193 194 void* value() const194 const void* value() const 195 195 { 196 196 PoisonedMasmPtr::assertIsNotPoisoned(m_value); … … 204 204 205 205 private: 206 void* m_value { nullptr };206 const void* m_value { nullptr }; 207 207 }; 208 208 … … 223 223 MacroAssemblerCodePtr(std::nullptr_t) : m_value(nullptr) { } 224 224 225 explicit MacroAssemblerCodePtr( void* value)225 explicit MacroAssemblerCodePtr(const void* value) 226 226 #if CPU(ARM_THUMB2) 227 227 // Decorate the pointer as a thumb code pointer. 228 : m_value(reinterpret_cast<c har*>(value) + 1)228 : m_value(reinterpret_cast<const char*>(value) + 1) 229 229 #else 230 230 : m_value(value) … … 240 240 } 241 241 242 static MacroAssemblerCodePtr createFromExecutableAddress( void* value)242 static MacroAssemblerCodePtr createFromExecutableAddress(const void* value) 243 243 { 244 244 ASSERT(value); -
trunk/Source/JavaScriptCore/bytecode/ArithProfile.h
r237486 r237547 106 106 ASSERT(rhsObservedType().isEmpty()); 107 107 } 108 109 ArithProfile(OperandTypes types) 110 : ArithProfile(types.first(), types.second()) 111 { } 112 108 113 ArithProfile() = default; 109 114 -
trunk/Source/JavaScriptCore/bytecode/ArrayAllocationProfile.h
r237486 r237547 33 33 class ArrayAllocationProfile { 34 34 public: 35 ArrayAllocationProfile() = default; 36 37 ArrayAllocationProfile(IndexingType recommendedIndexingMode) 38 { 39 initializeIndexingMode(recommendedIndexingMode); 40 } 41 35 42 IndexingType selectIndexingType() 36 43 { -
trunk/Source/JavaScriptCore/bytecode/ArrayProfile.h
r237486 r237547 211 211 212 212 class ArrayProfile { 213 friend class CodeBlock; 214 213 215 public: 214 216 ArrayProfile() -
trunk/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.cpp
r237486 r237547 40 40 } 41 41 42 static bool isJumpTarget(OpcodeID opcodeID, const Vector< unsigned, 32>& jumpTargets, unsigned bytecodeOffset)42 static bool isJumpTarget(OpcodeID opcodeID, const Vector<InstructionStream::Offset, 32>& jumpTargets, unsigned bytecodeOffset) 43 43 { 44 44 if (opcodeID == op_catch) … … 48 48 } 49 49 50 template<typename Block , typename Instruction>51 void BytecodeBasicBlock::computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)52 { 53 Vector< unsigned, 32> jumpTargets;54 computePreciseJumpTargets(codeBlock, instructions Begin, instructionCount, jumpTargets);50 template<typename Block> 51 void BytecodeBasicBlock::computeImpl(Block* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) 52 { 53 Vector<InstructionStream::Offset, 32> jumpTargets; 54 computePreciseJumpTargets(codeBlock, instructions, jumpTargets); 55 55 56 56 auto appendBlock = [&] (std::unique_ptr<BytecodeBasicBlock>&& block) { … … 67 67 68 68 auto entry = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); 69 auto firstBlock = std::make_unique<BytecodeBasicBlock>( 0, 0);69 auto firstBlock = std::make_unique<BytecodeBasicBlock>(BytecodeBasicBlock::EntryBlock); 70 70 linkBlocks(entry.get(), firstBlock.get()); 71 71 … … 78 78 bool nextInstructionIsLeader = false; 79 79 80 for ( unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {81 OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]);82 unsigned opcodeLength = opcodeLengths[opcodeID];80 for (const auto& instruction : instructions) { 81 auto bytecodeOffset = instruction.offset(); 82 OpcodeID opcodeID = instruction->opcodeID(); 83 83 84 84 bool createdBlock = false; 85 85 // If the current bytecode is a jump target, then it's the leader of its own basic block. 86 86 if (isJumpTarget(opcodeID, jumpTargets, bytecodeOffset) || nextInstructionIsLeader) { 87 auto newBlock = std::make_unique<BytecodeBasicBlock>( bytecodeOffset, opcodeLength);87 auto newBlock = std::make_unique<BytecodeBasicBlock>(instruction); 88 88 current = newBlock.get(); 89 89 appendBlock(WTFMove(newBlock)); 90 90 createdBlock = true; 91 91 nextInstructionIsLeader = false; 92 bytecodeOffset += opcodeLength;93 92 } 94 93 … … 101 100 102 101 // Otherwise, just add to the length of the current block. 103 current->addLength(opcodeLength); 104 bytecodeOffset += opcodeLength; 102 current->addLength(instruction->size()); 105 103 } 106 104 … … 112 110 continue; 113 111 114 bool fallsThrough = true; 115 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { 116 OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); 117 unsigned opcodeLength = opcodeLengths[opcodeID]; 112 bool fallsThrough = true; 113 for (auto bytecodeOffset : block->offsets()) { 114 auto instruction = instructions.at(bytecodeOffset); 115 OpcodeID opcodeID = instruction->opcodeID(); 116 118 117 // If we found a terminal bytecode, link to the exit block. 119 118 if (isTerminal(opcodeID)) { 120 ASSERT(bytecodeOffset + opcodeLength== block->leaderOffset() + block->totalLength());119 ASSERT(bytecodeOffset + instruction->size() == block->leaderOffset() + block->totalLength()); 121 120 linkBlocks(block, exit.get()); 122 121 fallsThrough = false; … … 124 123 } 125 124 126 // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. 125 // If we found a throw, get the HandlerInfo for this instruction to see where we will jump. 127 126 // If there isn't one, treat this throw as a terminal. This is true even if we have a finally 128 127 // block because the finally block will create its own catch, which will generate a HandlerInfo. 129 128 if (isThrow(opcodeID)) { 130 ASSERT(bytecodeOffset + opcodeLength== block->leaderOffset() + block->totalLength());131 auto* handler = codeBlock->handlerForBytecodeOffset( bytecodeOffset);129 ASSERT(bytecodeOffset + instruction->size() == block->leaderOffset() + block->totalLength()); 130 auto* handler = codeBlock->handlerForBytecodeOffset(instruction.offset()); 132 131 fallsThrough = false; 133 132 if (!handler) { … … 147 146 // If we found a branch, link to the block(s) that we jump to. 148 147 if (isBranch(opcodeID)) { 149 ASSERT(bytecodeOffset + opcodeLength== block->leaderOffset() + block->totalLength());150 Vector< unsigned, 1> bytecodeOffsetsJumpedTo;151 findJumpTargetsFor BytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, bytecodeOffsetsJumpedTo);148 ASSERT(bytecodeOffset + instruction->size() == block->leaderOffset() + block->totalLength()); 149 Vector<InstructionStream::Offset, 1> bytecodeOffsetsJumpedTo; 150 findJumpTargetsForInstruction(codeBlock, instruction, bytecodeOffsetsJumpedTo); 152 151 153 152 size_t numberOfJumpTargets = bytecodeOffsetsJumpedTo.size(); … … 173 172 break; 174 173 } 175 bytecodeOffset += opcodeLength;176 174 } 177 175 … … 185 183 186 184 appendBlock(WTFMove(exit)); 187 185 188 186 for (auto& basicBlock : basicBlocks) 189 187 basicBlock->shrinkToFit(); 190 188 } 191 189 192 void BytecodeBasicBlock::compute(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)193 { 194 computeImpl(codeBlock, instructions Begin, instructionCount, basicBlocks);195 } 196 197 void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks)198 { 199 BytecodeBasicBlock::computeImpl(codeBlock, instructionsBegin, instructionCount, basicBlocks);190 void BytecodeBasicBlock::compute(CodeBlock* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) 191 { 192 computeImpl(codeBlock, instructions, basicBlocks); 193 } 194 195 void BytecodeBasicBlock::compute(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks) 196 { 197 computeImpl(codeBlock, instructions, basicBlocks); 200 198 } 201 199 -
trunk/Source/JavaScriptCore/bytecode/BytecodeBasicBlock.h
r237486 r237547 26 26 #pragma once 27 27 28 #include "InstructionStream.h" 28 29 #include <limits.h> 29 30 #include <wtf/FastBitVector.h> … … 35 36 class UnlinkedCodeBlock; 36 37 struct Instruction; 37 struct UnlinkedInstruction;38 38 39 39 class BytecodeBasicBlock { … … 41 41 public: 42 42 enum SpecialBlockType { EntryBlock, ExitBlock }; 43 BytecodeBasicBlock( unsigned start, unsigned length);43 BytecodeBasicBlock(const InstructionStream::Ref&); 44 44 BytecodeBasicBlock(SpecialBlockType); 45 45 void shrinkToFit(); … … 48 48 bool isExitBlock() { return m_leaderOffset == UINT_MAX && m_totalLength == UINT_MAX; } 49 49 50 unsigned leaderOffset() { return m_leaderOffset; }51 unsigned totalLength() { return m_totalLength; }50 unsigned leaderOffset() const { return m_leaderOffset; } 51 unsigned totalLength() const { return m_totalLength; } 52 52 53 const Vector< unsigned>& offsets() const { return m_offsets; }53 const Vector<InstructionStream::Offset>& offsets() const { return m_offsets; } 54 54 55 55 const Vector<BytecodeBasicBlock*>& successors() const { return m_successors; } … … 60 60 unsigned index() const { return m_index; } 61 61 62 static void compute(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&);63 static void compute(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>&);62 static void compute(CodeBlock*, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>&); 63 static void compute(UnlinkedCodeBlock*, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>&); 64 64 65 65 private: 66 template<typename Block , typename Instruction> static void computeImpl(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks);66 template<typename Block> static void computeImpl(Block* codeBlock, const InstructionStream& instructions, Vector<std::unique_ptr<BytecodeBasicBlock>>& basicBlocks); 67 67 68 68 void addSuccessor(BytecodeBasicBlock* block) { m_successors.append(block); } … … 70 70 void addLength(unsigned); 71 71 72 unsignedm_leaderOffset;72 InstructionStream::Offset m_leaderOffset; 73 73 unsigned m_totalLength; 74 74 unsigned m_index; 75 75 76 Vector< unsigned> m_offsets;76 Vector<InstructionStream::Offset> m_offsets; 77 77 Vector<BytecodeBasicBlock*> m_successors; 78 78 … … 81 81 }; 82 82 83 inline BytecodeBasicBlock::BytecodeBasicBlock( unsigned start, unsigned length)84 : m_leaderOffset( start)85 , m_totalLength( length)83 inline BytecodeBasicBlock::BytecodeBasicBlock(const InstructionStream::Ref& instruction) 84 : m_leaderOffset(instruction.offset()) 85 , m_totalLength(instruction->size()) 86 86 { 87 87 m_offsets.append(m_leaderOffset); -
trunk/Source/JavaScriptCore/bytecode/BytecodeDumper.cpp
r237486 r237547 29 29 30 30 #include "ArithProfile.h" 31 #include "BytecodeStructs.h" 31 32 #include "CallLinkStatus.h" 32 33 #include "CodeBlock.h" … … 39 40 #include "ToThisStatus.h" 40 41 #include "UnlinkedCodeBlock.h" 42 #include "UnlinkedMetadataTableInlines.h" 41 43 42 44 namespace JSC { 43 45 44 static StructureID getStructureID(const Instruction& instruction)45 {46 return instruction.u.structureID;47 }48 49 static StructureID getStructureID(const UnlinkedInstruction&)50 {51 return 0;52 }53 54 static Special::Pointer getSpecialPointer(const Instruction& instruction)55 {56 return instruction.u.specialPointer;57 }58 59 static Special::Pointer getSpecialPointer(const UnlinkedInstruction& instruction)60 {61 return static_cast<Special::Pointer>(instruction.u.operand);62 }63 64 static PutByIdFlags getPutByIdFlags(const Instruction& instruction)65 {66 return instruction.u.putByIdFlags;67 }68 69 static PutByIdFlags getPutByIdFlags(const UnlinkedInstruction& instruction)70 {71 return static_cast<PutByIdFlags>(instruction.u.operand);72 }73 74 static ToThisStatus getToThisStatus(const Instruction& instruction)75 {76 return instruction.u.toThisStatus;77 }78 79 static ToThisStatus getToThisStatus(const UnlinkedInstruction& instruction)80 {81 return static_cast<ToThisStatus>(instruction.u.operand);82 }83 84 static void* getPointer(const Instruction& instruction)85 {86 return instruction.u.pointer;87 }88 89 static void* getPointer(const UnlinkedInstruction&)90 {91 return nullptr;92 }93 94 static StructureChain* getStructureChain(const Instruction& instruction)95 {96 return instruction.u.structureChain.get();97 }98 99 static StructureChain* getStructureChain(const UnlinkedInstruction&)100 {101 return nullptr;102 }103 104 static Structure* getStructure(const Instruction& instruction)105 {106 return instruction.u.structure.get();107 }108 109 static Structure* getStructure(const UnlinkedInstruction&)110 {111 return nullptr;112 }113 114 static LLIntCallLinkInfo* getCallLinkInfo(const Instruction& instruction)115 {116 return instruction.u.callLinkInfo;117 }118 119 static LLIntCallLinkInfo* getCallLinkInfo(const UnlinkedInstruction&)120 {121 return nullptr;122 }123 124 static BasicBlockLocation* getBasicBlockLocation(const Instruction& instruction)125 {126 return instruction.u.basicBlockLocation;127 }128 129 static BasicBlockLocation* getBasicBlockLocation(const UnlinkedInstruction&)130 {131 return nullptr;132 }133 134 template<class Block>135 void* BytecodeDumper<Block>::actualPointerFor(Special::Pointer) const136 {137 return nullptr;138 }139 140 template<>141 void* BytecodeDumper<CodeBlock>::actualPointerFor(Special::Pointer pointer) const142 {143 return block()->globalObject()->actualPointerFor(pointer);144 }145 146 static void beginDumpProfiling(PrintStream& out, bool& hasPrintedProfiling)147 {148 if (hasPrintedProfiling) {149 out.print("; ");150 return;151 }152 153 out.print(" ");154 hasPrintedProfiling = true;155 }156 157 template<class Block>158 void BytecodeDumper<Block>::dumpValueProfiling(PrintStream&, const typename Block::Instruction*& it, bool&)159 {160 ++it;161 }162 163 template<>164 void BytecodeDumper<CodeBlock>::dumpValueProfiling(PrintStream& out, const typename CodeBlock::Instruction*& it, bool& hasPrintedProfiling)165 {166 ConcurrentJSLocker locker(block()->m_lock);167 168 ++it;169 CString description = it->u.profile->briefDescription(locker);170 if (!description.length())171 return;172 beginDumpProfiling(out, hasPrintedProfiling);173 out.print(description);174 }175 176 template<class Block>177 void BytecodeDumper<Block>::dumpArrayProfiling(PrintStream&, const typename Block::Instruction*& it, bool&)178 {179 ++it;180 }181 182 template<>183 void BytecodeDumper<CodeBlock>::dumpArrayProfiling(PrintStream& out, const typename CodeBlock::Instruction*& it, bool& hasPrintedProfiling)184 {185 ConcurrentJSLocker locker(block()->m_lock);186 187 ++it;188 if (!it->u.arrayProfile)189 return;190 CString description = it->u.arrayProfile->briefDescription(locker, block());191 if (!description.length())192 return;193 beginDumpProfiling(out, hasPrintedProfiling);194 out.print(description);195 }196 197 template<class Block>198 void BytecodeDumper<Block>::dumpProfilesForBytecodeOffset(PrintStream&, unsigned, bool&)199 {200 }201 202 static void dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)203 {204 if (!profile || !profile->m_counter)205 return;206 207 beginDumpProfiling(out, hasPrintedProfiling);208 out.print(name, profile->m_counter);209 }210 211 static void dumpArithProfile(PrintStream& out, ArithProfile* profile, bool& hasPrintedProfiling)212 {213 if (!profile)214 return;215 216 beginDumpProfiling(out, hasPrintedProfiling);217 out.print("results: ", *profile);218 }219 220 template<>221 void BytecodeDumper<CodeBlock>::dumpProfilesForBytecodeOffset(PrintStream& out, unsigned location, bool& hasPrintedProfiling)222 {223 dumpRareCaseProfile(out, "rare case: ", block()->rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);224 {225 dumpArithProfile(out, block()->arithProfileForBytecodeOffset(location), hasPrintedProfiling);226 }227 228 #if ENABLE(DFG_JIT)229 Vector<DFG::FrequentExitSite> exitSites = block()->unlinkedCodeBlock()->exitProfile().exitSitesFor(location);230 if (!exitSites.isEmpty()) {231 out.print(" !! frequent exits: ");232 CommaPrinter comma;233 for (auto& exitSite : exitSites)234 out.print(comma, exitSite.kind(), " ", exitSite.jitType());235 }236 #else // ENABLE(DFG_JIT)237 UNUSED_PARAM(location);238 #endif // ENABLE(DFG_JIT)239 }240 241 46 template<class Block> 242 47 VM* BytecodeDumper<Block>::vm() const … … 251 56 } 252 57 253 template<class Instruction>254 static void printLocationAndOp(PrintStream& out, int location, const Instruction*&, const char* op)255 {256 out.printf("[%4d] %-17s ", location, op);257 }258 259 58 static ALWAYS_INLINE bool isConstantRegisterIndex(int index) 260 59 { 261 60 return index >= FirstConstantRegisterIndex; 262 }263 264 NEVER_INLINE static const char* debugHookName(int debugHookType)265 {266 switch (static_cast<DebugHookType>(debugHookType)) {267 case DidEnterCallFrame:268 return "didEnterCallFrame";269 case WillLeaveCallFrame:270 return "willLeaveCallFrame";271 case WillExecuteStatement:272 return "willExecuteStatement";273 case WillExecuteExpression:274 return "willExecuteExpression";275 case WillExecuteProgram:276 return "willExecuteProgram";277 case DidExecuteProgram:278 return "didExecuteProgram";279 case DidReachBreakpoint:280 return "didReachBreakpoint";281 }282 283 RELEASE_ASSERT_NOT_REACHED();284 return "";285 61 } 286 62 … … 294 70 } 295 71 296 static CString idName(int id0, const Identifier& ident)297 {298 return toCString(ident.impl(), "(@id", id0, ")");299 }300 301 72 template<class Block> 302 73 CString BytecodeDumper<Block>::constantName(int index) const … … 307 78 308 79 template<class Block> 309 void BytecodeDumper<Block>::printUnaryOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) 310 { 311 int r0 = (++it)->u.operand; 312 int r1 = (++it)->u.operand; 313 314 printLocationAndOp(out, location, it, op); 315 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); 316 } 317 318 template<class Block> 319 void BytecodeDumper<Block>::printBinaryOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) 320 { 321 int r0 = (++it)->u.operand; 322 int r1 = (++it)->u.operand; 323 int r2 = (++it)->u.operand; 324 printLocationAndOp(out, location, it, op); 325 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); 326 } 327 328 template<class Block> 329 void BytecodeDumper<Block>::printConditionalJump(PrintStream& out, const typename Block::Instruction*, const typename Block::Instruction*& it, int location, const char* op) 330 { 331 int r0 = (++it)->u.operand; 332 int offset = (++it)->u.operand; 333 printLocationAndOp(out, location, it, op); 334 out.printf("%s, %d(->%d)", registerName(r0).data(), offset, location + offset); 335 } 336 337 template<class Block> 338 void BytecodeDumper<Block>::printCompareJump(PrintStream& out, const typename Block::Instruction*, const typename Block::Instruction*& it, int location, const char* op) 339 { 340 int r0 = (++it)->u.operand; 341 int r1 = (++it)->u.operand; 342 int offset = (++it)->u.operand; 343 printLocationAndOp(out, location, it, op); 344 out.printf("%s, %s, %d(->%d)", registerName(r0).data(), registerName(r1).data(), offset, location + offset); 345 } 346 347 template<class Block> 348 void BytecodeDumper<Block>::printGetByIdOp(PrintStream& out, int location, const typename Block::Instruction*& it) 349 { 350 const char* op; 351 switch (Interpreter::getOpcodeID(*it)) { 352 case op_get_by_id: 353 op = "get_by_id"; 354 break; 355 case op_get_by_id_proto_load: 356 op = "get_by_id_proto_load"; 357 break; 358 case op_get_by_id_unset: 359 op = "get_by_id_unset"; 360 break; 361 case op_get_array_length: 362 op = "array_length"; 363 break; 364 default: 365 RELEASE_ASSERT_NOT_REACHED(); 366 #if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE) 367 op = 0; 368 #endif 369 } 370 int r0 = (++it)->u.operand; 371 int r1 = (++it)->u.operand; 372 int id0 = (++it)->u.operand; 373 printLocationAndOp(out, location, it, op); 374 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); 375 it += 4; // Increment up to the value profiler. 376 } 377 378 static void dumpStructure(PrintStream& out, const char* name, Structure* structure, const Identifier& ident) 379 { 380 if (!structure) 381 return; 382 383 out.printf("%s = %p", name, structure); 384 385 PropertyOffset offset = structure->getConcurrently(ident.impl()); 386 if (offset != invalidOffset) 387 out.printf(" (offset = %d)", offset); 388 } 389 390 static void dumpChain(PrintStream& out, StructureChain* chain, const Identifier& ident) 391 { 392 out.printf("chain = %p: [", chain); 393 bool first = true; 394 for (WriteBarrier<Structure>* currentStructure = chain->head(); *currentStructure; ++currentStructure) { 395 if (first) 396 first = false; 397 else 398 out.printf(", "); 399 dumpStructure(out, "struct", currentStructure->get(), ident); 400 } 401 out.printf("]"); 402 } 403 404 template<class Block> 405 void BytecodeDumper<Block>::printGetByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) 406 { 407 const auto* instruction = instructionsBegin() + location; 408 409 const Identifier& ident = identifier(instruction[3].u.operand); 410 411 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. 412 413 if (Interpreter::getOpcodeID(instruction[0]) == op_get_array_length) 414 out.printf(" llint(array_length)"); 415 else if (StructureID structureID = getStructureID(instruction[4])) { 416 Structure* structure = vm()->heap.structureIDTable().get(structureID); 417 out.printf(" llint("); 418 dumpStructure(out, "struct", structure, ident); 419 out.printf(")"); 420 if (Interpreter::getOpcodeID(instruction[0]) == op_get_by_id_proto_load) 421 out.printf(" proto(%p)", getPointer(instruction[6])); 422 } 423 424 #if ENABLE(JIT) 425 if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { 426 StructureStubInfo& stubInfo = *stubPtr; 427 if (stubInfo.resetByGC) 428 out.print(" (Reset By GC)"); 429 430 out.printf(" jit("); 431 432 Structure* baseStructure = nullptr; 433 PolymorphicAccess* stub = nullptr; 434 435 switch (stubInfo.cacheType) { 436 case CacheType::GetByIdSelf: 437 out.printf("self"); 438 baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get(); 439 break; 440 case CacheType::Stub: 441 out.printf("stub"); 442 stub = stubInfo.u.stub; 443 break; 444 case CacheType::Unset: 445 out.printf("unset"); 446 break; 447 case CacheType::ArrayLength: 448 out.printf("ArrayLength"); 449 break; 450 case CacheType::StringLength: 451 out.printf("StringLength"); 452 break; 453 default: 454 RELEASE_ASSERT_NOT_REACHED(); 455 break; 456 } 457 458 if (baseStructure) { 459 out.printf(", "); 460 dumpStructure(out, "struct", baseStructure, ident); 461 } 462 463 if (stub) 464 out.print(", ", *stub); 465 466 out.printf(")"); 467 } 468 #else 469 UNUSED_PARAM(statusMap); 470 #endif 471 } 472 473 template<class Block> 474 void BytecodeDumper<Block>::printPutByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) 475 { 476 const auto* instruction = instructionsBegin() + location; 477 478 const Identifier& ident = identifier(instruction[2].u.operand); 479 480 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. 481 482 out.print(", ", getPutByIdFlags(instruction[8])); 483 484 if (StructureID structureID = getStructureID(instruction[4])) { 485 Structure* structure = vm()->heap.structureIDTable().get(structureID); 486 out.print(" llint("); 487 if (StructureID newStructureID = getStructureID(instruction[6])) { 488 Structure* newStructure = vm()->heap.structureIDTable().get(newStructureID); 489 dumpStructure(out, "prev", structure, ident); 490 out.print(", "); 491 dumpStructure(out, "next", newStructure, ident); 492 if (StructureChain* chain = getStructureChain(instruction[7])) { 493 out.print(", "); 494 dumpChain(out, chain, ident); 495 } 496 } else 497 dumpStructure(out, "struct", structure, ident); 498 out.print(")"); 499 } 500 501 #if ENABLE(JIT) 502 if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { 503 StructureStubInfo& stubInfo = *stubPtr; 504 if (stubInfo.resetByGC) 505 out.print(" (Reset By GC)"); 506 507 out.printf(" jit("); 508 509 switch (stubInfo.cacheType) { 510 case CacheType::PutByIdReplace: 511 out.print("replace, "); 512 dumpStructure(out, "struct", stubInfo.u.byIdSelf.baseObjectStructure.get(), ident); 513 break; 514 case CacheType::Stub: { 515 out.print("stub, ", *stubInfo.u.stub); 516 break; 517 } 518 case CacheType::Unset: 519 out.printf("unset"); 520 break; 521 default: 522 RELEASE_ASSERT_NOT_REACHED(); 523 break; 524 } 525 out.printf(")"); 526 } 527 #else 528 UNUSED_PARAM(statusMap); 529 #endif 530 } 531 532 template<class Block> 533 void BytecodeDumper<Block>::printInByIdCacheStatus(PrintStream& out, int location, const ICStatusMap& statusMap) 534 { 535 const auto* instruction = instructionsBegin() + location; 536 537 const Identifier& ident = identifier(instruction[3].u.operand); 538 539 UNUSED_PARAM(ident); // tell the compiler to shut up in certain platform configurations. 540 541 #if ENABLE(JIT) 542 if (StructureStubInfo* stubPtr = statusMap.get(CodeOrigin(location)).stubInfo) { 543 StructureStubInfo& stubInfo = *stubPtr; 544 if (stubInfo.resetByGC) 545 out.print(" (Reset By GC)"); 546 547 out.printf(" jit("); 548 549 Structure* baseStructure = nullptr; 550 PolymorphicAccess* stub = nullptr; 551 552 switch (stubInfo.cacheType) { 553 case CacheType::InByIdSelf: 554 out.printf("self"); 555 baseStructure = stubInfo.u.byIdSelf.baseObjectStructure.get(); 556 break; 557 case CacheType::Stub: 558 out.printf("stub"); 559 stub = stubInfo.u.stub; 560 break; 561 case CacheType::Unset: 562 out.printf("unset"); 563 break; 564 default: 565 RELEASE_ASSERT_NOT_REACHED(); 566 break; 567 } 568 569 if (baseStructure) { 570 out.printf(", "); 571 dumpStructure(out, "struct", baseStructure, ident); 572 } 573 574 if (stub) 575 out.print(", ", *stub); 576 577 out.printf(")"); 578 } 579 #else 580 UNUSED_PARAM(out); 581 UNUSED_PARAM(statusMap); 582 #endif 583 } 584 585 #if ENABLE(JIT) 586 template<typename Block> 587 void BytecodeDumper<Block>::dumpCallLinkStatus(PrintStream&, unsigned, const ICStatusMap&) 588 { 589 } 590 591 template<> 592 void BytecodeDumper<CodeBlock>::dumpCallLinkStatus(PrintStream& out, unsigned location, const ICStatusMap& statusMap) 593 { 594 if (block()->jitType() != JITCode::FTLJIT) 595 out.print(" status(", CallLinkStatus::computeFor(block(), location, statusMap), ")"); 596 } 597 #endif 598 599 template<class Block> 600 void BytecodeDumper<Block>::printCallOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op, CacheDumpMode cacheDumpMode, bool& hasPrintedProfiling, const ICStatusMap& statusMap) 601 { 602 int dst = (++it)->u.operand; 603 int func = (++it)->u.operand; 604 int argCount = (++it)->u.operand; 605 int registerOffset = (++it)->u.operand; 606 printLocationAndOp(out, location, it, op); 607 out.print(registerName(dst), ", ", registerName(func), ", ", argCount, ", ", registerOffset); 608 out.print(" (this at ", virtualRegisterForArgument(0, -registerOffset), ")"); 609 if (cacheDumpMode == DumpCaches) { 610 LLIntCallLinkInfo* callLinkInfo = getCallLinkInfo(it[1]); 611 if (callLinkInfo->lastSeenCallee) { 612 JSObject* object = callLinkInfo->lastSeenCallee.get(); 613 if (auto* function = jsDynamicCast<JSFunction*>(*vm(), object)) 614 out.printf(" llint(%p, exec %p)", function, function->executable()); 615 else 616 out.printf(" llint(%p)", object); 617 } 618 #if ENABLE(JIT) 619 if (CallLinkInfo* info = statusMap.get(CodeOrigin(location)).callLinkInfo) { 620 if (!info->isDirect() && info->haveLastSeenCallee()) { 621 JSObject* object = info->lastSeenCallee(); 622 if (auto* function = jsDynamicCast<JSFunction*>(*vm(), object)) 623 out.printf(" jit(%p, exec %p)", function, function->executable()); 624 else 625 out.printf(" jit(%p)", object); 626 } 627 } 628 629 dumpCallLinkStatus(out, location, statusMap); 630 #else 631 UNUSED_PARAM(statusMap); 632 #endif 633 } 634 ++it; 635 ++it; 636 dumpArrayProfiling(out, it, hasPrintedProfiling); 637 dumpValueProfiling(out, it, hasPrintedProfiling); 638 } 639 640 template<class Block> 641 void BytecodeDumper<Block>::printPutByIdOp(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op) 642 { 643 int r0 = (++it)->u.operand; 644 int id0 = (++it)->u.operand; 645 int r1 = (++it)->u.operand; 646 printLocationAndOp(out, location, it, op); 647 out.printf("%s, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), registerName(r1).data()); 648 it += 5; 649 } 650 651 template<class Block> 652 void BytecodeDumper<Block>::printLocationOpAndRegisterOperand(PrintStream& out, int location, const typename Block::Instruction*& it, const char* op, int operand) 653 { 654 printLocationAndOp(out, location, it, op); 655 out.printf("%s", registerName(operand).data()); 656 } 657 658 template<class Block> 659 void BytecodeDumper<Block>::dumpBytecode(PrintStream& out, const typename Block::Instruction* begin, const typename Block::Instruction*& it, const ICStatusMap& statusMap) 660 { 661 int location = it - begin; 662 bool hasPrintedProfiling = false; 663 OpcodeID opcode = Interpreter::getOpcodeID(*it); 664 switch (opcode) { 665 case op_enter: { 666 printLocationAndOp(out, location, it, "enter"); 667 break; 668 } 669 case op_get_scope: { 670 int r0 = (++it)->u.operand; 671 printLocationOpAndRegisterOperand(out, location, it, "get_scope", r0); 672 break; 673 } 674 case op_create_direct_arguments: { 675 int r0 = (++it)->u.operand; 676 printLocationAndOp(out, location, it, "create_direct_arguments"); 677 out.printf("%s", registerName(r0).data()); 678 break; 679 } 680 case op_create_scoped_arguments: { 681 int r0 = (++it)->u.operand; 682 int r1 = (++it)->u.operand; 683 printLocationAndOp(out, location, it, "create_scoped_arguments"); 684 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); 685 break; 686 } 687 case op_create_cloned_arguments: { 688 int r0 = (++it)->u.operand; 689 printLocationAndOp(out, location, it, "create_cloned_arguments"); 690 out.printf("%s", registerName(r0).data()); 691 break; 692 } 693 case op_argument_count: { 694 int r0 = (++it)->u.operand; 695 printLocationOpAndRegisterOperand(out, location, it, "argument_count", r0); 696 break; 697 } 698 case op_get_argument: { 699 int r0 = (++it)->u.operand; 700 int index = (++it)->u.operand; 701 printLocationOpAndRegisterOperand(out, location, it, "argument", r0); 702 out.printf(", %d", index); 703 dumpValueProfiling(out, it, hasPrintedProfiling); 704 break; 705 } 706 case op_create_rest: { 707 int r0 = (++it)->u.operand; 708 int r1 = (++it)->u.operand; 709 unsigned argumentOffset = (++it)->u.unsignedValue; 710 printLocationAndOp(out, location, it, "create_rest"); 711 out.printf("%s, %s, ", registerName(r0).data(), registerName(r1).data()); 712 out.printf("ArgumentsOffset: %u", argumentOffset); 713 break; 714 } 715 case op_get_rest_length: { 716 int r0 = (++it)->u.operand; 717 printLocationAndOp(out, location, it, "get_rest_length"); 718 out.printf("%s, ", registerName(r0).data()); 719 unsigned argumentOffset = (++it)->u.unsignedValue; 720 out.printf("ArgumentsOffset: %u", argumentOffset); 721 break; 722 } 723 case op_create_this: { 724 int r0 = (++it)->u.operand; 725 int r1 = (++it)->u.operand; 726 unsigned inferredInlineCapacity = (++it)->u.operand; 727 unsigned cachedFunction = (++it)->u.operand; 728 printLocationAndOp(out, location, it, "create_this"); 729 out.printf("%s, %s, %u, %u", registerName(r0).data(), registerName(r1).data(), inferredInlineCapacity, cachedFunction); 730 break; 731 } 732 case op_to_this: { 733 int r0 = (++it)->u.operand; 734 printLocationOpAndRegisterOperand(out, location, it, "to_this", r0); 735 Structure* structure = getStructure(*(++it)); 736 if (structure) 737 out.print(", cache(struct = ", RawPointer(structure), ")"); 738 out.print(", ", getToThisStatus(*(++it))); 739 dumpValueProfiling(out, it, hasPrintedProfiling); 740 break; 741 } 742 case op_check_tdz: { 743 int r0 = (++it)->u.operand; 744 printLocationOpAndRegisterOperand(out, location, it, "op_check_tdz", r0); 745 break; 746 } 747 case op_new_object: { 748 int r0 = (++it)->u.operand; 749 unsigned inferredInlineCapacity = (++it)->u.operand; 750 printLocationAndOp(out, location, it, "new_object"); 751 out.printf("%s, %u", registerName(r0).data(), inferredInlineCapacity); 752 ++it; // Skip object allocation profile. 753 break; 754 } 755 case op_new_array: { 756 int dst = (++it)->u.operand; 757 int argv = (++it)->u.operand; 758 int argc = (++it)->u.operand; 759 printLocationAndOp(out, location, it, "new_array"); 760 out.printf("%s, %s, %d", registerName(dst).data(), registerName(argv).data(), argc); 761 ++it; // Skip array allocation profile. 762 break; 763 } 764 case op_new_array_with_spread: { 765 int dst = (++it)->u.operand; 766 int argv = (++it)->u.operand; 767 int argc = (++it)->u.operand; 768 printLocationAndOp(out, location, it, "new_array_with_spread"); 769 out.printf("%s, %s, %d, ", registerName(dst).data(), registerName(argv).data(), argc); 770 unsigned bitVectorIndex = (++it)->u.unsignedValue; 771 const BitVector& bitVector = block()->bitVector(bitVectorIndex); 772 out.print("BitVector:", bitVectorIndex, ":"); 773 for (unsigned i = 0; i < static_cast<unsigned>(argc); i++) { 774 if (bitVector.get(i)) 775 out.print("1"); 776 else 777 out.print("0"); 778 } 779 break; 780 } 781 case op_spread: { 782 int dst = (++it)->u.operand; 783 int arg = (++it)->u.operand; 784 printLocationAndOp(out, location, it, "spread"); 785 out.printf("%s, %s", registerName(dst).data(), registerName(arg).data()); 786 break; 787 } 788 case op_new_array_with_size: { 789 int dst = (++it)->u.operand; 790 int length = (++it)->u.operand; 791 printLocationAndOp(out, location, it, "new_array_with_size"); 792 out.printf("%s, %s", registerName(dst).data(), registerName(length).data()); 793 ++it; // Skip array allocation profile. 794 break; 795 } 796 case op_new_array_buffer: { 797 int dst = (++it)->u.operand; 798 int array = (++it)->u.operand; 799 printLocationAndOp(out, location, it, "new_array_buffer"); 800 out.printf("%s, %s", registerName(dst).data(), registerName(array).data()); 801 ++it; // Skip array allocation profile. 802 break; 803 } 804 case op_new_regexp: { 805 int r0 = (++it)->u.operand; 806 int re0 = (++it)->u.operand; 807 printLocationAndOp(out, location, it, "new_regexp"); 808 out.printf("%s, %s", registerName(r0).data(), registerName(re0).data()); 809 break; 810 } 811 case op_mov: { 812 int r0 = (++it)->u.operand; 813 int r1 = (++it)->u.operand; 814 printLocationAndOp(out, location, it, "mov"); 815 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); 816 break; 817 } 818 case op_profile_type: { 819 int r0 = (++it)->u.operand; 820 ++it; 821 ++it; 822 ++it; 823 ++it; 824 printLocationAndOp(out, location, it, "op_profile_type"); 825 out.printf("%s", registerName(r0).data()); 826 break; 827 } 828 case op_profile_control_flow: { 829 BasicBlockLocation* basicBlockLocation = getBasicBlockLocation(*(++it)); 830 printLocationAndOp(out, location, it, "profile_control_flow"); 831 if (basicBlockLocation) 832 out.printf("[%d, %d]", basicBlockLocation->startOffset(), basicBlockLocation->endOffset()); 833 break; 834 } 835 case op_not: { 836 printUnaryOp(out, location, it, "not"); 837 break; 838 } 839 case op_eq: { 840 printBinaryOp(out, location, it, "eq"); 841 break; 842 } 843 case op_eq_null: { 844 printUnaryOp(out, location, it, "eq_null"); 845 break; 846 } 847 case op_neq: { 848 printBinaryOp(out, location, it, "neq"); 849 break; 850 } 851 case op_neq_null: { 852 printUnaryOp(out, location, it, "neq_null"); 853 break; 854 } 855 case op_stricteq: { 856 printBinaryOp(out, location, it, "stricteq"); 857 break; 858 } 859 case op_nstricteq: { 860 printBinaryOp(out, location, it, "nstricteq"); 861 break; 862 } 863 case op_less: { 864 printBinaryOp(out, location, it, "less"); 865 break; 866 } 867 case op_lesseq: { 868 printBinaryOp(out, location, it, "lesseq"); 869 break; 870 } 871 case op_greater: { 872 printBinaryOp(out, location, it, "greater"); 873 break; 874 } 875 case op_greatereq: { 876 printBinaryOp(out, location, it, "greatereq"); 877 break; 878 } 879 case op_below: { 880 printBinaryOp(out, location, it, "below"); 881 break; 882 } 883 case op_beloweq: { 884 printBinaryOp(out, location, it, "beloweq"); 885 break; 886 } 887 case op_inc: { 888 int r0 = (++it)->u.operand; 889 printLocationOpAndRegisterOperand(out, location, it, "inc", r0); 890 break; 891 } 892 case op_dec: { 893 int r0 = (++it)->u.operand; 894 printLocationOpAndRegisterOperand(out, location, it, "dec", r0); 895 break; 896 } 897 case op_to_number: { 898 printUnaryOp(out, location, it, "to_number"); 899 dumpValueProfiling(out, it, hasPrintedProfiling); 900 break; 901 } 902 case op_to_string: { 903 printUnaryOp(out, location, it, "to_string"); 904 break; 905 } 906 case op_to_object: { 907 printUnaryOp(out, location, it, "to_object"); 908 int id0 = (++it)->u.operand; 909 out.printf(" %s", idName(id0, identifier(id0)).data()); 910 dumpValueProfiling(out, it, hasPrintedProfiling); 911 break; 912 } 913 case op_negate: { 914 printUnaryOp(out, location, it, "negate"); 915 ++it; // op_negate has an extra operand for the ArithProfile. 916 break; 917 } 918 case op_add: { 919 printBinaryOp(out, location, it, "add"); 920 ++it; 921 break; 922 } 923 case op_mul: { 924 printBinaryOp(out, location, it, "mul"); 925 ++it; 926 break; 927 } 928 case op_div: { 929 printBinaryOp(out, location, it, "div"); 930 ++it; 931 break; 932 } 933 case op_mod: { 934 printBinaryOp(out, location, it, "mod"); 935 break; 936 } 937 case op_pow: { 938 printBinaryOp(out, location, it, "pow"); 939 break; 940 } 941 case op_sub: { 942 printBinaryOp(out, location, it, "sub"); 943 ++it; 944 break; 945 } 946 case op_lshift: { 947 printBinaryOp(out, location, it, "lshift"); 948 break; 949 } 950 case op_rshift: { 951 printBinaryOp(out, location, it, "rshift"); 952 break; 953 } 954 case op_urshift: { 955 printBinaryOp(out, location, it, "urshift"); 956 break; 957 } 958 case op_bitand: { 959 printBinaryOp(out, location, it, "bitand"); 960 ++it; 961 break; 962 } 963 case op_bitxor: { 964 printBinaryOp(out, location, it, "bitxor"); 965 ++it; 966 break; 967 } 968 case op_bitor: { 969 printBinaryOp(out, location, it, "bitor"); 970 ++it; 971 break; 972 } 973 case op_overrides_has_instance: { 974 int r0 = (++it)->u.operand; 975 int r1 = (++it)->u.operand; 976 int r2 = (++it)->u.operand; 977 printLocationAndOp(out, location, it, "overrides_has_instance"); 978 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); 979 break; 980 } 981 case op_instanceof: { 982 int r0 = (++it)->u.operand; 983 int r1 = (++it)->u.operand; 984 int r2 = (++it)->u.operand; 985 printLocationAndOp(out, location, it, "instanceof"); 986 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); 987 break; 988 } 989 case op_instanceof_custom: { 990 int r0 = (++it)->u.operand; 991 int r1 = (++it)->u.operand; 992 int r2 = (++it)->u.operand; 993 int r3 = (++it)->u.operand; 994 printLocationAndOp(out, location, it, "instanceof_custom"); 995 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); 996 break; 997 } 998 case op_unsigned: { 999 printUnaryOp(out, location, it, "unsigned"); 1000 break; 1001 } 1002 case op_typeof: { 1003 printUnaryOp(out, location, it, "typeof"); 1004 break; 1005 } 1006 case op_is_empty: { 1007 printUnaryOp(out, location, it, "is_empty"); 1008 break; 1009 } 1010 case op_is_undefined: { 1011 printUnaryOp(out, location, it, "is_undefined"); 1012 break; 1013 } 1014 case op_is_boolean: { 1015 printUnaryOp(out, location, it, "is_boolean"); 1016 break; 1017 } 1018 case op_is_number: { 1019 printUnaryOp(out, location, it, "is_number"); 1020 break; 1021 } 1022 case op_is_cell_with_type: { 1023 int r0 = (++it)->u.operand; 1024 int r1 = (++it)->u.operand; 1025 int type = (++it)->u.operand; 1026 printLocationAndOp(out, location, it, "is_cell_with_type"); 1027 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), type); 1028 break; 1029 } 1030 case op_is_object: { 1031 printUnaryOp(out, location, it, "is_object"); 1032 break; 1033 } 1034 case op_is_object_or_null: { 1035 printUnaryOp(out, location, it, "is_object_or_null"); 1036 break; 1037 } 1038 case op_is_function: { 1039 printUnaryOp(out, location, it, "is_function"); 1040 break; 1041 } 1042 case op_in_by_id: { 1043 int r0 = (++it)->u.operand; 1044 int r1 = (++it)->u.operand; 1045 int id0 = (++it)->u.operand; 1046 printLocationAndOp(out, location, it, "in_by_id"); 1047 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); 1048 printInByIdCacheStatus(out, location, statusMap); 1049 break; 1050 } 1051 case op_in_by_val: { 1052 printBinaryOp(out, location, it, "in_by_val"); 1053 dumpArrayProfiling(out, it, hasPrintedProfiling); 1054 break; 1055 } 1056 case op_try_get_by_id: { 1057 int r0 = (++it)->u.operand; 1058 int r1 = (++it)->u.operand; 1059 int id0 = (++it)->u.operand; 1060 printLocationAndOp(out, location, it, "try_get_by_id"); 1061 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); 1062 dumpValueProfiling(out, it, hasPrintedProfiling); 1063 break; 1064 } 1065 case op_get_by_id_direct: { 1066 int r0 = (++it)->u.operand; 1067 int r1 = (++it)->u.operand; 1068 int id0 = (++it)->u.operand; 1069 printLocationAndOp(out, location, it, "get_by_id_direct"); 1070 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); 1071 it += 2; // Increment up to the value profiler. 1072 printGetByIdCacheStatus(out, location, statusMap); 1073 dumpValueProfiling(out, it, hasPrintedProfiling); 1074 break; 1075 } 1076 case op_get_by_id: 1077 case op_get_by_id_proto_load: 1078 case op_get_by_id_unset: 1079 case op_get_array_length: { 1080 printGetByIdOp(out, location, it); 1081 printGetByIdCacheStatus(out, location, statusMap); 1082 dumpValueProfiling(out, it, hasPrintedProfiling); 1083 break; 1084 } 1085 case op_get_by_id_with_this: { 1086 printLocationAndOp(out, location, it, "get_by_id_with_this"); 1087 int r0 = (++it)->u.operand; 1088 int r1 = (++it)->u.operand; 1089 int r2 = (++it)->u.operand; 1090 int id0 = (++it)->u.operand; 1091 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), idName(id0, identifier(id0)).data()); 1092 dumpValueProfiling(out, it, hasPrintedProfiling); 1093 break; 1094 } 1095 case op_get_by_val_with_this: { 1096 int r0 = (++it)->u.operand; 1097 int r1 = (++it)->u.operand; 1098 int r2 = (++it)->u.operand; 1099 int r3 = (++it)->u.operand; 1100 printLocationAndOp(out, location, it, "get_by_val_with_this"); 1101 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); 1102 dumpValueProfiling(out, it, hasPrintedProfiling); 1103 break; 1104 } 1105 case op_put_by_id: { 1106 printPutByIdOp(out, location, it, "put_by_id"); 1107 printPutByIdCacheStatus(out, location, statusMap); 1108 break; 1109 } 1110 case op_put_by_id_with_this: { 1111 int r0 = (++it)->u.operand; 1112 int r1 = (++it)->u.operand; 1113 int id0 = (++it)->u.operand; 1114 int r2 = (++it)->u.operand; 1115 printLocationAndOp(out, location, it, "put_by_id_with_this"); 1116 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data(), registerName(r2).data()); 1117 break; 1118 } 1119 case op_put_by_val_with_this: { 1120 int r0 = (++it)->u.operand; 1121 int r1 = (++it)->u.operand; 1122 int r2 = (++it)->u.operand; 1123 int r3 = (++it)->u.operand; 1124 printLocationAndOp(out, location, it, "put_by_val_with_this"); 1125 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); 1126 break; 1127 } 1128 case op_put_getter_by_id: { 1129 int r0 = (++it)->u.operand; 1130 int id0 = (++it)->u.operand; 1131 int n0 = (++it)->u.operand; 1132 int r1 = (++it)->u.operand; 1133 printLocationAndOp(out, location, it, "put_getter_by_id"); 1134 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); 1135 break; 1136 } 1137 case op_put_setter_by_id: { 1138 int r0 = (++it)->u.operand; 1139 int id0 = (++it)->u.operand; 1140 int n0 = (++it)->u.operand; 1141 int r1 = (++it)->u.operand; 1142 printLocationAndOp(out, location, it, "put_setter_by_id"); 1143 out.printf("%s, %s, %d, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data()); 1144 break; 1145 } 1146 case op_put_getter_setter_by_id: { 1147 int r0 = (++it)->u.operand; 1148 int id0 = (++it)->u.operand; 1149 int n0 = (++it)->u.operand; 1150 int r1 = (++it)->u.operand; 1151 int r2 = (++it)->u.operand; 1152 printLocationAndOp(out, location, it, "put_getter_setter_by_id"); 1153 out.printf("%s, %s, %d, %s, %s", registerName(r0).data(), idName(id0, identifier(id0)).data(), n0, registerName(r1).data(), registerName(r2).data()); 1154 break; 1155 } 1156 case op_put_getter_by_val: { 1157 int r0 = (++it)->u.operand; 1158 int r1 = (++it)->u.operand; 1159 int n0 = (++it)->u.operand; 1160 int r2 = (++it)->u.operand; 1161 printLocationAndOp(out, location, it, "put_getter_by_val"); 1162 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); 1163 break; 1164 } 1165 case op_put_setter_by_val: { 1166 int r0 = (++it)->u.operand; 1167 int r1 = (++it)->u.operand; 1168 int n0 = (++it)->u.operand; 1169 int r2 = (++it)->u.operand; 1170 printLocationAndOp(out, location, it, "put_setter_by_val"); 1171 out.printf("%s, %s, %d, %s", registerName(r0).data(), registerName(r1).data(), n0, registerName(r2).data()); 1172 break; 1173 } 1174 case op_define_data_property: { 1175 int r0 = (++it)->u.operand; 1176 int r1 = (++it)->u.operand; 1177 int r2 = (++it)->u.operand; 1178 int r3 = (++it)->u.operand; 1179 printLocationAndOp(out, location, it, "define_data_property"); 1180 out.printf("%s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data()); 1181 break; 1182 } 1183 case op_define_accessor_property: { 1184 int r0 = (++it)->u.operand; 1185 int r1 = (++it)->u.operand; 1186 int r2 = (++it)->u.operand; 1187 int r3 = (++it)->u.operand; 1188 int r4 = (++it)->u.operand; 1189 printLocationAndOp(out, location, it, "define_accessor_property"); 1190 out.printf("%s, %s, %s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data(), registerName(r3).data(), registerName(r4).data()); 1191 break; 1192 } 1193 case op_del_by_id: { 1194 int r0 = (++it)->u.operand; 1195 int r1 = (++it)->u.operand; 1196 int id0 = (++it)->u.operand; 1197 printLocationAndOp(out, location, it, "del_by_id"); 1198 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), idName(id0, identifier(id0)).data()); 1199 break; 1200 } 1201 case op_get_by_val: { 1202 int r0 = (++it)->u.operand; 1203 int r1 = (++it)->u.operand; 1204 int r2 = (++it)->u.operand; 1205 printLocationAndOp(out, location, it, "get_by_val"); 1206 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); 1207 dumpArrayProfiling(out, it, hasPrintedProfiling); 1208 dumpValueProfiling(out, it, hasPrintedProfiling); 1209 break; 1210 } 1211 case op_put_by_val: { 1212 int r0 = (++it)->u.operand; 1213 int r1 = (++it)->u.operand; 1214 int r2 = (++it)->u.operand; 1215 printLocationAndOp(out, location, it, "put_by_val"); 1216 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); 1217 dumpArrayProfiling(out, it, hasPrintedProfiling); 1218 break; 1219 } 1220 case op_put_by_val_direct: { 1221 int r0 = (++it)->u.operand; 1222 int r1 = (++it)->u.operand; 1223 int r2 = (++it)->u.operand; 1224 printLocationAndOp(out, location, it, "put_by_val_direct"); 1225 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); 1226 dumpArrayProfiling(out, it, hasPrintedProfiling); 1227 break; 1228 } 1229 case op_del_by_val: { 1230 int r0 = (++it)->u.operand; 1231 int r1 = (++it)->u.operand; 1232 int r2 = (++it)->u.operand; 1233 printLocationAndOp(out, location, it, "del_by_val"); 1234 out.printf("%s, %s, %s", registerName(r0).data(), registerName(r1).data(), registerName(r2).data()); 1235 break; 1236 } 1237 case op_jmp: { 1238 int offset = (++it)->u.operand; 1239 printLocationAndOp(out, location, it, "jmp"); 1240 out.printf("%d(->%d)", offset, location + offset); 1241 break; 1242 } 1243 case op_jtrue: { 1244 printConditionalJump(out, begin, it, location, "jtrue"); 1245 break; 1246 } 1247 case op_jfalse: { 1248 printConditionalJump(out, begin, it, location, "jfalse"); 1249 break; 1250 } 1251 case op_jeq_null: { 1252 printConditionalJump(out, begin, it, location, "jeq_null"); 1253 break; 1254 } 1255 case op_jneq_null: { 1256 printConditionalJump(out, begin, it, location, "jneq_null"); 1257 break; 1258 } 1259 case op_jneq_ptr: { 1260 int r0 = (++it)->u.operand; 1261 Special::Pointer pointer = getSpecialPointer(*(++it)); 1262 int offset = (++it)->u.operand; 1263 printLocationAndOp(out, location, it, "jneq_ptr"); 1264 out.printf("%s, %d (%p), %d(->%d)", registerName(r0).data(), pointer, actualPointerFor(pointer), offset, location + offset); 1265 ++it; 1266 break; 1267 } 1268 case op_jless: { 1269 printCompareJump(out, begin, it, location, "jless"); 1270 break; 1271 } 1272 case op_jlesseq: { 1273 printCompareJump(out, begin, it, location, "jlesseq"); 1274 break; 1275 } 1276 case op_jgreater: { 1277 printCompareJump(out, begin, it, location, "jgreater"); 1278 break; 1279 } 1280 case op_jgreatereq: { 1281 printCompareJump(out, begin, it, location, "jgreatereq"); 1282 break; 1283 } 1284 case op_jnless: { 1285 printCompareJump(out, begin, it, location, "jnless"); 1286 break; 1287 } 1288 case op_jnlesseq: { 1289 printCompareJump(out, begin, it, location, "jnlesseq"); 1290 break; 1291 } 1292 case op_jngreater: { 1293 printCompareJump(out, begin, it, location, "jngreater"); 1294 break; 1295 } 1296 case op_jngreatereq: { 1297 printCompareJump(out, begin, it, location, "jngreatereq"); 1298 break; 1299 } 1300 case op_jeq: { 1301 printCompareJump(out, begin, it, location, "jeq"); 1302 break; 1303 } 1304 case op_jneq: { 1305 printCompareJump(out, begin, it, location, "jneq"); 1306 break; 1307 } 1308 case op_jstricteq: { 1309 printCompareJump(out, begin, it, location, "jstricteq"); 1310 break; 1311 } 1312 case op_jnstricteq: { 1313 printCompareJump(out, begin, it, location, "jnstricteq"); 1314 break; 1315 } 1316 case op_jbelow: { 1317 printCompareJump(out, begin, it, location, "jbelow"); 1318 break; 1319 } 1320 case op_jbeloweq: { 1321 printCompareJump(out, begin, it, location, "jbeloweq"); 1322 break; 1323 } 1324 case op_loop_hint: { 1325 printLocationAndOp(out, location, it, "loop_hint"); 1326 break; 1327 } 1328 case op_check_traps: { 1329 printLocationAndOp(out, location, it, "check_traps"); 1330 break; 1331 } 1332 case op_nop: { 1333 printLocationAndOp(out, location, it, "nop"); 1334 break; 1335 } 1336 case op_super_sampler_begin: { 1337 printLocationAndOp(out, location, it, "super_sampler_begin"); 1338 break; 1339 } 1340 case op_super_sampler_end: { 1341 printLocationAndOp(out, location, it, "super_sampler_end"); 1342 break; 1343 } 1344 case op_log_shadow_chicken_prologue: { 1345 int r0 = (++it)->u.operand; 1346 printLocationAndOp(out, location, it, "log_shadow_chicken_prologue"); 1347 out.printf("%s", registerName(r0).data()); 1348 break; 1349 } 1350 case op_log_shadow_chicken_tail: { 1351 int r0 = (++it)->u.operand; 1352 int r1 = (++it)->u.operand; 1353 printLocationAndOp(out, location, it, "log_shadow_chicken_tail"); 1354 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); 1355 break; 1356 } 1357 case op_switch_imm: { 1358 int tableIndex = (++it)->u.operand; 1359 int defaultTarget = (++it)->u.operand; 1360 int scrutineeRegister = (++it)->u.operand; 1361 printLocationAndOp(out, location, it, "switch_imm"); 1362 out.printf("%d, default:%d(->%d)", tableIndex, defaultTarget, location + defaultTarget); 1363 const auto& table = block()->switchJumpTable(tableIndex); 1364 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) 1365 out.printf(", %d:%d(->%d)", i, table.branchOffsets[i], location + table.branchOffsets[i]); 1366 out.print(", ", registerName(scrutineeRegister).data()); 1367 break; 1368 } 1369 case op_switch_char: { 1370 int tableIndex = (++it)->u.operand; 1371 int defaultTarget = (++it)->u.operand; 1372 int scrutineeRegister = (++it)->u.operand; 1373 printLocationAndOp(out, location, it, "switch_char"); 1374 out.printf("%d, default:%d(->%d)", tableIndex, defaultTarget, location + defaultTarget); 1375 const auto& table = block()->switchJumpTable(tableIndex); 1376 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) 1377 out.printf(", %c:%d(->%d)", i, table.branchOffsets[i], location + table.branchOffsets[i]); 1378 out.print(", ", registerName(scrutineeRegister).data()); 1379 break; 1380 } 1381 case op_switch_string: { 1382 int tableIndex = (++it)->u.operand; 1383 int defaultTarget = (++it)->u.operand; 1384 int scrutineeRegister = (++it)->u.operand; 1385 printLocationAndOp(out, location, it, "switch_string"); 1386 out.printf("%d, default:%d(->%d)", tableIndex, defaultTarget, location + defaultTarget); 1387 const auto& table = block()->stringSwitchJumpTable(tableIndex); 1388 for (const auto& offset : table.offsetTable) { 1389 out.print(", ", Identifier::fromUid(vm(), static_cast<UniquedStringImpl*>(offset.key.get()))); 1390 out.printf(":%d(%d)", offset.value.branchOffset, location + offset.value.branchOffset); 1391 } 1392 out.print(", ", registerName(scrutineeRegister).data()); 1393 break; 1394 } 1395 case op_new_func: { 1396 int r0 = (++it)->u.operand; 1397 int r1 = (++it)->u.operand; 1398 int f0 = (++it)->u.operand; 1399 printLocationAndOp(out, location, it, "new_func"); 1400 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1401 break; 1402 } 1403 case op_new_generator_func: { 1404 int r0 = (++it)->u.operand; 1405 int r1 = (++it)->u.operand; 1406 int f0 = (++it)->u.operand; 1407 printLocationAndOp(out, location, it, "new_generator_func"); 1408 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1409 break; 1410 } 1411 case op_new_async_func: { 1412 int r0 = (++it)->u.operand; 1413 int r1 = (++it)->u.operand; 1414 int f0 = (++it)->u.operand; 1415 printLocationAndOp(out, location, it, "new_async_func"); 1416 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1417 break; 1418 } 1419 case op_new_async_generator_func: { 1420 int r0 = (++it)->u.operand; 1421 int r1 = (++it)->u.operand; 1422 int f0 = (++it)->u.operand; 1423 printLocationAndOp(out, location, it, "new_async_generator_func"); 1424 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1425 break; 1426 } 1427 case op_new_func_exp: { 1428 int r0 = (++it)->u.operand; 1429 int r1 = (++it)->u.operand; 1430 int f0 = (++it)->u.operand; 1431 printLocationAndOp(out, location, it, "new_func_exp"); 1432 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1433 break; 1434 } 1435 case op_new_generator_func_exp: { 1436 int r0 = (++it)->u.operand; 1437 int r1 = (++it)->u.operand; 1438 int f0 = (++it)->u.operand; 1439 printLocationAndOp(out, location, it, "new_generator_func_exp"); 1440 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1441 break; 1442 } 1443 case op_new_async_func_exp: { 1444 int r0 = (++it)->u.operand; 1445 int r1 = (++it)->u.operand; 1446 int f0 = (++it)->u.operand; 1447 printLocationAndOp(out, location, it, "new_async_func_exp"); 1448 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1449 break; 1450 } 1451 case op_new_async_generator_func_exp: { 1452 int r0 = (++it)->u.operand; 1453 int r1 = (++it)->u.operand; 1454 int f0 = (++it)->u.operand; 1455 printLocationAndOp(out, location, it, "op_new_async_generator_func_exp"); 1456 out.printf("%s, %s, f%d", registerName(r0).data(), registerName(r1).data(), f0); 1457 break; 1458 } 1459 case op_set_function_name: { 1460 int funcReg = (++it)->u.operand; 1461 int nameReg = (++it)->u.operand; 1462 printLocationAndOp(out, location, it, "set_function_name"); 1463 out.printf("%s, %s", registerName(funcReg).data(), registerName(nameReg).data()); 1464 break; 1465 } 1466 case op_call: { 1467 printCallOp(out, location, it, "call", DumpCaches, hasPrintedProfiling, statusMap); 1468 break; 1469 } 1470 case op_tail_call: { 1471 printCallOp(out, location, it, "tail_call", DumpCaches, hasPrintedProfiling, statusMap); 1472 break; 1473 } 1474 case op_call_eval: { 1475 printCallOp(out, location, it, "call_eval", DontDumpCaches, hasPrintedProfiling, statusMap); 1476 break; 1477 } 1478 1479 case op_construct_varargs: 1480 case op_call_varargs: 1481 case op_tail_call_varargs: 1482 case op_tail_call_forward_arguments: { 1483 int result = (++it)->u.operand; 1484 int callee = (++it)->u.operand; 1485 int thisValue = (++it)->u.operand; 1486 int arguments = (++it)->u.operand; 1487 int firstFreeRegister = (++it)->u.operand; 1488 int varArgOffset = (++it)->u.operand; 1489 ++it; 1490 const char* opName; 1491 if (opcode == op_call_varargs) 1492 opName = "call_varargs"; 1493 else if (opcode == op_construct_varargs) 1494 opName = "construct_varargs"; 1495 else if (opcode == op_tail_call_varargs) 1496 opName = "tail_call_varargs"; 1497 else if (opcode == op_tail_call_forward_arguments) 1498 opName = "tail_call_forward_arguments"; 1499 else 1500 RELEASE_ASSERT_NOT_REACHED(); 1501 1502 printLocationAndOp(out, location, it, opName); 1503 out.printf("%s, %s, %s, %s, %d, %d", registerName(result).data(), registerName(callee).data(), registerName(thisValue).data(), registerName(arguments).data(), firstFreeRegister, varArgOffset); 1504 dumpValueProfiling(out, it, hasPrintedProfiling); 1505 break; 1506 } 1507 1508 case op_ret: { 1509 int r0 = (++it)->u.operand; 1510 printLocationOpAndRegisterOperand(out, location, it, "ret", r0); 1511 break; 1512 } 1513 case op_construct: { 1514 printCallOp(out, location, it, "construct", DumpCaches, hasPrintedProfiling, statusMap); 1515 break; 1516 } 1517 case op_strcat: { 1518 int r0 = (++it)->u.operand; 1519 int r1 = (++it)->u.operand; 1520 int count = (++it)->u.operand; 1521 printLocationAndOp(out, location, it, "strcat"); 1522 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), count); 1523 break; 1524 } 1525 case op_to_primitive: { 1526 int r0 = (++it)->u.operand; 1527 int r1 = (++it)->u.operand; 1528 printLocationAndOp(out, location, it, "to_primitive"); 1529 out.printf("%s, %s", registerName(r0).data(), registerName(r1).data()); 1530 break; 1531 } 1532 case op_get_enumerable_length: { 1533 int dst = it[1].u.operand; 1534 int base = it[2].u.operand; 1535 printLocationAndOp(out, location, it, "op_get_enumerable_length"); 1536 out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); 1537 it += OPCODE_LENGTH(op_get_enumerable_length) - 1; 1538 break; 1539 } 1540 case op_has_indexed_property: { 1541 int dst = (++it)->u.operand; 1542 int base = (++it)->u.operand; 1543 int propertyName = (++it)->u.operand; 1544 printLocationAndOp(out, location, it, "op_has_indexed_property"); 1545 out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); 1546 dumpArrayProfiling(out, it, hasPrintedProfiling); 1547 break; 1548 } 1549 case op_has_structure_property: { 1550 int dst = it[1].u.operand; 1551 int base = it[2].u.operand; 1552 int propertyName = it[3].u.operand; 1553 int enumerator = it[4].u.operand; 1554 printLocationAndOp(out, location, it, "op_has_structure_property"); 1555 out.printf("%s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(enumerator).data()); 1556 it += OPCODE_LENGTH(op_has_structure_property) - 1; 1557 break; 1558 } 1559 case op_has_generic_property: { 1560 int dst = it[1].u.operand; 1561 int base = it[2].u.operand; 1562 int propertyName = it[3].u.operand; 1563 printLocationAndOp(out, location, it, "op_has_generic_property"); 1564 out.printf("%s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data()); 1565 it += OPCODE_LENGTH(op_has_generic_property) - 1; 1566 break; 1567 } 1568 case op_get_direct_pname: { 1569 int dst = (++it)->u.operand; 1570 int base = (++it)->u.operand; 1571 int propertyName = (++it)->u.operand; 1572 int index = (++it)->u.operand; 1573 int enumerator = (++it)->u.operand; 1574 printLocationAndOp(out, location, it, "op_get_direct_pname"); 1575 out.printf("%s, %s, %s, %s, %s", registerName(dst).data(), registerName(base).data(), registerName(propertyName).data(), registerName(index).data(), registerName(enumerator).data()); 1576 dumpValueProfiling(out, it, hasPrintedProfiling); 1577 break; 1578 1579 } 1580 case op_get_property_enumerator: { 1581 int dst = it[1].u.operand; 1582 int base = it[2].u.operand; 1583 printLocationAndOp(out, location, it, "op_get_property_enumerator"); 1584 out.printf("%s, %s", registerName(dst).data(), registerName(base).data()); 1585 it += OPCODE_LENGTH(op_get_property_enumerator) - 1; 1586 break; 1587 } 1588 case op_enumerator_structure_pname: { 1589 int dst = it[1].u.operand; 1590 int enumerator = it[2].u.operand; 1591 int index = it[3].u.operand; 1592 printLocationAndOp(out, location, it, "op_enumerator_structure_pname"); 1593 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); 1594 it += OPCODE_LENGTH(op_enumerator_structure_pname) - 1; 1595 break; 1596 } 1597 case op_enumerator_generic_pname: { 1598 int dst = it[1].u.operand; 1599 int enumerator = it[2].u.operand; 1600 int index = it[3].u.operand; 1601 printLocationAndOp(out, location, it, "op_enumerator_generic_pname"); 1602 out.printf("%s, %s, %s", registerName(dst).data(), registerName(enumerator).data(), registerName(index).data()); 1603 it += OPCODE_LENGTH(op_enumerator_generic_pname) - 1; 1604 break; 1605 } 1606 case op_to_index_string: { 1607 int dst = it[1].u.operand; 1608 int index = it[2].u.operand; 1609 printLocationAndOp(out, location, it, "op_to_index_string"); 1610 out.printf("%s, %s", registerName(dst).data(), registerName(index).data()); 1611 it += OPCODE_LENGTH(op_to_index_string) - 1; 1612 break; 1613 } 1614 case op_push_with_scope: { 1615 int dst = (++it)->u.operand; 1616 int newScope = (++it)->u.operand; 1617 int currentScope = (++it)->u.operand; 1618 printLocationAndOp(out, location, it, "push_with_scope"); 1619 out.printf("%s, %s, %s", registerName(dst).data(), registerName(newScope).data(), registerName(currentScope).data()); 1620 break; 1621 } 1622 case op_get_parent_scope: { 1623 int dst = (++it)->u.operand; 1624 int parentScope = (++it)->u.operand; 1625 printLocationAndOp(out, location, it, "get_parent_scope"); 1626 out.printf("%s, %s", registerName(dst).data(), registerName(parentScope).data()); 1627 break; 1628 } 1629 case op_create_lexical_environment: { 1630 int dst = (++it)->u.operand; 1631 int scope = (++it)->u.operand; 1632 int symbolTable = (++it)->u.operand; 1633 int initialValue = (++it)->u.operand; 1634 printLocationAndOp(out, location, it, "create_lexical_environment"); 1635 out.printf("%s, %s, %s, %s", 1636 registerName(dst).data(), registerName(scope).data(), registerName(symbolTable).data(), registerName(initialValue).data()); 1637 break; 1638 } 1639 case op_catch: { 1640 int r0 = (++it)->u.operand; 1641 int r1 = (++it)->u.operand; 1642 void* pointer = getPointer(*(++it)); 1643 printLocationAndOp(out, location, it, "catch"); 1644 out.printf("%s, %s, %p", registerName(r0).data(), registerName(r1).data(), pointer); 1645 break; 1646 } 1647 case op_throw: { 1648 int r0 = (++it)->u.operand; 1649 printLocationOpAndRegisterOperand(out, location, it, "throw", r0); 1650 break; 1651 } 1652 case op_throw_static_error: { 1653 int r0 = (++it)->u.operand; 1654 ErrorType k1 = static_cast<ErrorType>((++it)->u.unsignedValue); 1655 printLocationAndOp(out, location, it, "throw_static_error"); 1656 out.printf("%s, ", registerName(r0).data()); 1657 out.print(k1); 1658 break; 1659 } 1660 case op_debug: { 1661 int debugHookType = (++it)->u.operand; 1662 int hasBreakpointFlag = (++it)->u.operand; 1663 printLocationAndOp(out, location, it, "debug"); 1664 out.printf("%s, %d", debugHookName(debugHookType), hasBreakpointFlag); 1665 break; 1666 } 1667 case op_identity_with_profile: { 1668 int r0 = (++it)->u.operand; 1669 ++it; // Profile top half 1670 ++it; // Profile bottom half 1671 printLocationAndOp(out, location, it, "identity_with_profile"); 1672 out.printf("%s", registerName(r0).data()); 1673 break; 1674 } 1675 case op_unreachable: { 1676 printLocationAndOp(out, location, it, "unreachable"); 1677 break; 1678 } 1679 case op_end: { 1680 int r0 = (++it)->u.operand; 1681 printLocationOpAndRegisterOperand(out, location, it, "end", r0); 1682 break; 1683 } 1684 case op_resolve_scope_for_hoisting_func_decl_in_eval: { 1685 int r0 = (++it)->u.operand; 1686 int scope = (++it)->u.operand; 1687 int id0 = (++it)->u.operand; 1688 printLocationAndOp(out, location, it, "resolve_scope_for_hoisting_func_decl_in_eval"); 1689 out.printf("%s, %s, %s", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data()); 1690 break; 1691 } 1692 case op_resolve_scope: { 1693 int r0 = (++it)->u.operand; 1694 int scope = (++it)->u.operand; 1695 int id0 = (++it)->u.operand; 1696 ResolveType resolveType = static_cast<ResolveType>((++it)->u.operand); 1697 int depth = (++it)->u.operand; 1698 void* pointer = getPointer(*(++it)); 1699 printLocationAndOp(out, location, it, "resolve_scope"); 1700 out.printf("%s, %s, %s, <%s>, %d, %p", registerName(r0).data(), registerName(scope).data(), idName(id0, identifier(id0)).data(), resolveTypeName(resolveType), depth, pointer); 1701 break; 1702 } 1703 case op_get_from_scope: { 1704 int r0 = (++it)->u.operand; 1705 int r1 = (++it)->u.operand; 1706 int id0 = (++it)->u.operand; 1707 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); 1708 ++it; // Structure 1709 int operand = (++it)->u.operand; // Operand 1710 printLocationAndOp(out, location, it, "get_from_scope"); 1711 out.print(registerName(r0), ", ", registerName(r1)); 1712 if (static_cast<unsigned>(id0) == UINT_MAX) 1713 out.print(", anonymous"); 1714 else 1715 out.print(", ", idName(id0, identifier(id0))); 1716 out.print(", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, ", operand); 1717 dumpValueProfiling(out, it, hasPrintedProfiling); 1718 break; 1719 } 1720 case op_put_to_scope: { 1721 int r0 = (++it)->u.operand; 1722 int id0 = (++it)->u.operand; 1723 int r1 = (++it)->u.operand; 1724 GetPutInfo getPutInfo = GetPutInfo((++it)->u.operand); 1725 ++it; // Structure 1726 int operand = (++it)->u.operand; // Operand 1727 printLocationAndOp(out, location, it, "put_to_scope"); 1728 out.print(registerName(r0)); 1729 if (static_cast<unsigned>(id0) == UINT_MAX) 1730 out.print(", anonymous"); 1731 else 1732 out.print(", ", idName(id0, identifier(id0))); 1733 out.print(", ", registerName(r1), ", ", getPutInfo.operand(), "<", resolveModeName(getPutInfo.resolveMode()), "|", resolveTypeName(getPutInfo.resolveType()), "|", initializationModeName(getPutInfo.initializationMode()), ">, <structure>, ", operand); 1734 break; 1735 } 1736 case op_get_from_arguments: { 1737 int r0 = (++it)->u.operand; 1738 int r1 = (++it)->u.operand; 1739 int offset = (++it)->u.operand; 1740 printLocationAndOp(out, location, it, "get_from_arguments"); 1741 out.printf("%s, %s, %d", registerName(r0).data(), registerName(r1).data(), offset); 1742 dumpValueProfiling(out, it, hasPrintedProfiling); 1743 break; 1744 } 1745 case op_put_to_arguments: { 1746 int r0 = (++it)->u.operand; 1747 int offset = (++it)->u.operand; 1748 int r1 = (++it)->u.operand; 1749 printLocationAndOp(out, location, it, "put_to_arguments"); 1750 out.printf("%s, %d, %s", registerName(r0).data(), offset, registerName(r1).data()); 1751 break; 1752 } 1753 case op_yield: { 1754 int r0 = (++it)->u.operand; 1755 unsigned yieldPoint = (++it)->u.unsignedValue; 1756 int r1 = (++it)->u.operand; 1757 printLocationAndOp(out, location, it, "yield"); 1758 out.printf("%s, %u, %s", registerName(r0).data(), yieldPoint, registerName(r1).data()); 1759 break; 1760 } 1761 default: 1762 RELEASE_ASSERT_NOT_REACHED(); 1763 } 1764 dumpProfilesForBytecodeOffset(out, location, hasPrintedProfiling); 1765 out.print("\n"); 1766 } 1767 1768 template<class Block> 1769 void BytecodeDumper<Block>::dumpBytecode(Block* block, PrintStream& out, const typename Block::Instruction* begin, const typename Block::Instruction*& it, const ICStatusMap& statusMap) 1770 { 1771 BytecodeDumper dumper(block, begin); 1772 dumper.dumpBytecode(out, begin, it, statusMap); 1773 } 1774 1775 template<class Block> 1776 void BytecodeDumper<Block>::dumpIdentifiers(PrintStream& out) 80 void BytecodeDumper<Block>::printLocationAndOp(InstructionStream::Offset location, const char* op) 81 { 82 m_out.printf("[%4u] %-18s ", location, op); 83 } 84 85 template<class Block> 86 void BytecodeDumper<Block>::dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&) 87 { 88 ::JSC::dumpBytecode(this, it.offset(), it.ptr()); 89 m_out.print("\n"); 90 } 91 92 template<class Block> 93 void BytecodeDumper<Block>::dumpBytecode(Block* block, PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) 94 { 95 BytecodeDumper dumper(block, out); 96 dumper.dumpBytecode(it, statusMap); 97 } 98 99 template<class Block> 100 void BytecodeDumper<Block>::dumpIdentifiers() 1777 101 { 1778 102 if (size_t count = block()->numberOfIdentifiers()) { 1779 out.printf("\nIdentifiers:\n");103 m_out.printf("\nIdentifiers:\n"); 1780 104 size_t i = 0; 1781 105 do { 1782 out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data());106 m_out.printf(" id%u = %s\n", static_cast<unsigned>(i), identifier(i).string().utf8().data()); 1783 107 ++i; 1784 108 } while (i != count); … … 1787 111 1788 112 template<class Block> 1789 void BytecodeDumper<Block>::dumpConstants( PrintStream& out)113 void BytecodeDumper<Block>::dumpConstants() 1790 114 { 1791 115 if (!block()->constantRegisters().isEmpty()) { 1792 out.printf("\nConstants:\n");116 m_out.printf("\nConstants:\n"); 1793 117 size_t i = 0; 1794 118 for (const auto& constant : block()->constantRegisters()) { … … 1805 129 break; 1806 130 } 1807 out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(constant.get()).data(), sourceCodeRepresentationDescription);131 m_out.printf(" k%u = %s%s\n", static_cast<unsigned>(i), toCString(constant.get()).data(), sourceCodeRepresentationDescription); 1808 132 ++i; 1809 133 } … … 1812 136 1813 137 template<class Block> 1814 void BytecodeDumper<Block>::dumpExceptionHandlers( PrintStream& out)138 void BytecodeDumper<Block>::dumpExceptionHandlers() 1815 139 { 1816 140 if (unsigned count = block()->numberOfExceptionHandlers()) { 1817 out.printf("\nException Handlers:\n");141 m_out.printf("\nException Handlers:\n"); 1818 142 unsigned i = 0; 1819 143 do { 1820 144 const auto& handler = block()->exceptionHandler(i); 1821 out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", i + 1, handler.start, handler.end, handler.target, handler.typeName());145 m_out.printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] } %s\n", i + 1, handler.start, handler.end, handler.target, handler.typeName()); 1822 146 ++i; 1823 147 } while (i < count); … … 1826 150 1827 151 template<class Block> 1828 void BytecodeDumper<Block>::dumpSwitchJumpTables( PrintStream& out)152 void BytecodeDumper<Block>::dumpSwitchJumpTables() 1829 153 { 1830 154 if (unsigned count = block()->numberOfSwitchJumpTables()) { 1831 out.printf("Switch Jump Tables:\n");155 m_out.printf("Switch Jump Tables:\n"); 1832 156 unsigned i = 0; 1833 157 do { 1834 out.printf(" %1d = {\n", i);158 m_out.printf(" %1d = {\n", i); 1835 159 const auto& switchJumpTable = block()->switchJumpTable(i); 1836 160 int entry = 0; … … 1839 163 if (!*iter) 1840 164 continue; 1841 out.printf("\t\t%4d => %04d\n", entry + switchJumpTable.min, *iter);165 m_out.printf("\t\t%4d => %04d\n", entry + switchJumpTable.min, *iter); 1842 166 } 1843 out.printf(" }\n");167 m_out.printf(" }\n"); 1844 168 ++i; 1845 169 } while (i < count); … … 1848 172 1849 173 template<class Block> 1850 void BytecodeDumper<Block>::dumpStringSwitchJumpTables( PrintStream& out)174 void BytecodeDumper<Block>::dumpStringSwitchJumpTables() 1851 175 { 1852 176 if (unsigned count = block()->numberOfStringSwitchJumpTables()) { 1853 out.printf("\nString Switch Jump Tables:\n");177 m_out.printf("\nString Switch Jump Tables:\n"); 1854 178 unsigned i = 0; 1855 179 do { 1856 out.printf(" %1d = {\n", i);180 m_out.printf(" %1d = {\n", i); 1857 181 const auto& stringSwitchJumpTable = block()->stringSwitchJumpTable(i); 1858 182 auto end = stringSwitchJumpTable.offsetTable.end(); 1859 183 for (auto iter = stringSwitchJumpTable.offsetTable.begin(); iter != end; ++iter) 1860 out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset);1861 out.printf(" }\n");184 m_out.printf("\t\t\"%s\" => %04d\n", iter->key->utf8().data(), iter->value.branchOffset); 185 m_out.printf(" }\n"); 1862 186 ++i; 1863 187 } while (i < count); … … 1866 190 1867 191 template<class Block> 1868 void BytecodeDumper<Block>::dumpBlock(Block* block, const typename Block::UnpackedInstructions& instructions, PrintStream& out, const ICStatusMap& statusMap)192 void BytecodeDumper<Block>::dumpBlock(Block* block, const InstructionStream& instructions, PrintStream& out, const ICStatusMap& statusMap) 1869 193 { 1870 194 size_t instructionCount = 0; 1871 1872 for (size_t i = 0; i < instructions.size(); i += opcodeLengths[Interpreter::getOpcodeID(instructions[i])]) 195 size_t wideInstructionCount = 0; 196 size_t instructionWithMetadataCount = 0; 197 198 for (const auto& instruction : instructions) { 199 if (instruction->isWide()) 200 ++wideInstructionCount; 201 if (instruction->opcodeID() < NUMBER_OF_BYTECODE_WITH_METADATA) 202 ++instructionWithMetadataCount; 1873 203 ++instructionCount; 204 } 1874 205 1875 206 out.print(*block); 1876 207 out.printf( 1877 ": %lu m_instructions; %lu bytes; %d parameter(s); %d callee register(s); %d variable(s)", 1878 static_cast<unsigned long>(instructions.size()), 1879 static_cast<unsigned long>(instructions.size() * sizeof(Instruction)), 208 ": %lu instructions (%lu wide instructions, %lu instructions with metadata); %lu bytes (%lu metadata bytes); %d parameter(s); %d callee register(s); %d variable(s)", 209 static_cast<unsigned long>(instructionCount), 210 static_cast<unsigned long>(wideInstructionCount), 211 static_cast<unsigned long>(instructionWithMetadataCount), 212 static_cast<unsigned long>(instructions.sizeInBytes() + block->metadataSizeInBytes()), 213 static_cast<unsigned long>(block->metadataSizeInBytes()), 1880 214 block->numParameters(), block->numCalleeLocals(), block->numVars()); 1881 215 out.print("; scope at ", block->scopeRegister()); 1882 216 out.printf("\n"); 1883 217 1884 const auto* begin = instructions.begin(); 1885 const auto* end = instructions.end(); 1886 BytecodeDumper<Block> dumper(block, begin); 1887 for (const auto* it = begin; it != end; ++it) 1888 dumper.dumpBytecode(out, begin, it, statusMap); 1889 1890 dumper.dumpIdentifiers(out); 1891 dumper.dumpConstants(out); 1892 dumper.dumpExceptionHandlers(out); 1893 dumper.dumpSwitchJumpTables(out); 1894 dumper.dumpStringSwitchJumpTables(out); 218 BytecodeDumper<Block> dumper(block, out); 219 for (const auto& it : instructions) 220 dumper.dumpBytecode(it, statusMap); 221 222 dumper.dumpIdentifiers(); 223 dumper.dumpConstants(); 224 dumper.dumpExceptionHandlers(); 225 dumper.dumpSwitchJumpTables(); 226 dumper.dumpStringSwitchJumpTables(); 1895 227 1896 228 out.printf("\n"); -
trunk/Source/JavaScriptCore/bytecode/BytecodeDumper.h
r237486 r237547 29 29 #include "CallLinkInfo.h" 30 30 #include "ICStatusMap.h" 31 #include "InstructionStream.h" 31 32 #include "StructureStubInfo.h" 32 33 … … 38 39 class BytecodeDumper { 39 40 public: 40 typedef typename Block::Instruction Instruction; 41 static void dumpBytecode(Block*, PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap()); 42 static void dumpBlock(Block*, const InstructionStream&, PrintStream& out, const ICStatusMap& = ICStatusMap()); 41 43 42 static void dumpBytecode(Block*, PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap& statusMap = ICStatusMap()); 43 static void dumpBlock(Block*, const typename Block::UnpackedInstructions&, PrintStream& out, const ICStatusMap& statusMap = ICStatusMap()); 44 void printLocationAndOp(InstructionStream::Offset location, const char* op); 45 46 template<typename T> 47 void dumpOperand(T operand, bool isFirst = false) 48 { 49 if (!isFirst) 50 m_out.print(", "); 51 dumpValue(operand); 52 } 53 54 void dumpValue(VirtualRegister reg) { m_out.printf("%s", registerName(reg.offset()).data()); } 55 template<typename T> 56 void dumpValue(T v) { m_out.print(v); } 44 57 45 58 private: 46 BytecodeDumper(Block* block, const Instruction* instructionsBegin)59 BytecodeDumper(Block* block, PrintStream& out) 47 60 : m_block(block) 48 , m_ instructionsBegin(instructionsBegin)61 , m_out(out) 49 62 { 50 63 } 51 64 52 65 Block* block() const { return m_block; } 53 const Instruction* instructionsBegin() const { return m_instructionsBegin; }54 66 55 67 ALWAYS_INLINE VM* vm() const; … … 60 72 const Identifier& identifier(int index) const; 61 73 62 void dumpIdentifiers( PrintStream& out);63 void dumpConstants( PrintStream& out);64 void dumpExceptionHandlers( PrintStream& out);65 void dumpSwitchJumpTables( PrintStream& out);66 void dumpStringSwitchJumpTables( PrintStream& out);74 void dumpIdentifiers(); 75 void dumpConstants(); 76 void dumpExceptionHandlers(); 77 void dumpSwitchJumpTables(); 78 void dumpStringSwitchJumpTables(); 67 79 68 void printUnaryOp(PrintStream& out, int location, const Instruction*& it, const char* op); 69 void printBinaryOp(PrintStream& out, int location, const Instruction*& it, const char* op); 70 void printConditionalJump(PrintStream& out, const Instruction*, const Instruction*& it, int location, const char* op); 71 void printCompareJump(PrintStream& out, const Instruction*, const Instruction*& it, int location, const char* op); 72 void printGetByIdOp(PrintStream& out, int location, const Instruction*& it); 73 void printGetByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); 74 void printPutByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); 75 void printInByIdCacheStatus(PrintStream& out, int location, const ICStatusMap&); 76 enum CacheDumpMode { DumpCaches, DontDumpCaches }; 77 void printCallOp(PrintStream& out, int location, const Instruction*& it, const char* op, CacheDumpMode, bool& hasPrintedProfiling, const ICStatusMap&); 78 void printPutByIdOp(PrintStream& out, int location, const Instruction*& it, const char* op); 79 void printLocationOpAndRegisterOperand(PrintStream& out, int location, const Instruction*& it, const char* op, int operand); 80 void dumpBytecode(PrintStream& out, const Instruction* begin, const Instruction*& it, const ICStatusMap&); 81 82 void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); 83 void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling); 84 void dumpProfilesForBytecodeOffset(PrintStream&, unsigned location, bool& hasPrintedProfiling); 85 86 void* actualPointerFor(Special::Pointer) const; 87 88 #if ENABLE(JIT) 89 void dumpCallLinkStatus(PrintStream&, unsigned location, const ICStatusMap&); 90 #endif 80 void dumpBytecode(const InstructionStream::Ref& it, const ICStatusMap&); 91 81 92 82 Block* m_block; 93 const Instruction* m_instructionsBegin;83 PrintStream& m_out; 94 84 }; 95 85 -
trunk/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.cpp
r237486 r237547 31 31 #include "BytecodeLivenessAnalysisInlines.h" 32 32 #include "BytecodeRewriter.h" 33 #include "BytecodeStructs.h" 33 34 #include "BytecodeUseDef.h" 34 35 #include "IdentifierInlines.h" … … 39 40 #include "StrongInlines.h" 40 41 #include "UnlinkedCodeBlock.h" 42 #include "UnlinkedMetadataTableInlines.h" 41 43 #include <wtf/Optional.h> 42 44 … … 44 46 45 47 struct YieldData { 46 size_t point { 0 };47 intargument { 0 };48 InstructionStream::Offset point { 0 }; 49 VirtualRegister argument { 0 }; 48 50 FastBitVector liveness; 49 51 }; … … 53 55 typedef Vector<YieldData> Yields; 54 56 55 BytecodeGeneratorification(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) 56 : m_codeBlock(codeBlock) 57 BytecodeGeneratorification(BytecodeGenerator& bytecodeGenerator, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) 58 : m_bytecodeGenerator(bytecodeGenerator) 59 , m_codeBlock(codeBlock) 57 60 , m_instructions(instructions) 58 61 , m_graph(m_codeBlock, m_instructions) … … 61 64 { 62 65 for (BytecodeBasicBlock* block : m_graph) { 63 for ( unsigned bytecodeOffset : block->offsets()) {64 const UnlinkedInstruction* pc = &instructions[bytecodeOffset];65 switch ( pc->u.opcode) {66 for (const auto offset : block->offsets()) { 67 const auto instruction = m_instructions.at(offset); 68 switch (instruction->opcodeID()) { 66 69 case op_enter: { 67 m_enterPoint = bytecodeOffset;70 m_enterPoint = instruction.offset(); 68 71 break; 69 72 } 70 73 71 74 case op_yield: { 72 unsigned liveCalleeLocalsIndex = pc[2].u.unsignedValue; 75 auto bytecode = instruction->as<OpYield>(); 76 unsigned liveCalleeLocalsIndex = bytecode.yieldPoint; 73 77 if (liveCalleeLocalsIndex >= m_yields.size()) 74 78 m_yields.resize(liveCalleeLocalsIndex + 1); 75 79 YieldData& data = m_yields[liveCalleeLocalsIndex]; 76 data.point = bytecodeOffset;77 data.argument = pc[3].u.operand;80 data.point = instruction.offset(); 81 data.argument = bytecode.argument; 78 82 break; 79 83 } … … 106 110 } 107 111 108 unsigned enterPoint() const 109 { 110 return m_enterPoint; 112 InstructionStream::Ref enterPoint() const 113 { 114 return m_instructions.at(m_enterPoint); 115 } 116 117 const InstructionStream& instructions() const 118 { 119 return m_instructions; 111 120 } 112 121 … … 139 148 } 140 149 141 unsigned m_enterPoint { 0 }; 150 BytecodeGenerator& m_bytecodeGenerator; 151 InstructionStream::Offset m_enterPoint; 142 152 UnlinkedCodeBlock* m_codeBlock; 143 UnlinkedCodeBlock::UnpackedInstructions& m_instructions;153 InstructionStreamWriter& m_instructions; 144 154 BytecodeGraph m_graph; 145 155 Vector<std::optional<Storage>> m_storages; … … 156 166 } 157 167 158 void run(UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions)168 void run(UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions) 159 169 { 160 170 // Perform modified liveness analysis to determine which locals are live at the merge points. … … 164 174 165 175 for (YieldData& data : m_generatorification.yields()) 166 data.liveness = getLivenessInfoAtBytecodeOffset(codeBlock, instructions, m_generatorification.graph(), data.point + opcodeLength(op_yield));176 data.liveness = getLivenessInfoAtBytecodeOffset(codeBlock, instructions, m_generatorification.graph(), m_generatorification.instructions().at(data.point).next().offset()); 167 177 } 168 178 … … 180 190 } 181 191 182 BytecodeRewriter rewriter(m_ graph, m_codeBlock, m_instructions);192 BytecodeRewriter rewriter(m_bytecodeGenerator, m_graph, m_codeBlock, m_instructions); 183 193 184 194 // Setup the global switch for the generator. 185 195 { 186 unsigned nextToEnterPoint = enterPoint() + opcodeLength(op_enter);196 auto nextToEnterPoint = enterPoint().next(); 187 197 unsigned switchTableIndex = m_codeBlock->numberOfSwitchJumpTables(); 188 198 VirtualRegister state = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::State)); … … 191 201 jumpTable.branchOffsets.resize(m_yields.size() + 1); 192 202 jumpTable.branchOffsets.fill(0); 193 jumpTable.add(0, nextToEnterPoint );203 jumpTable.add(0, nextToEnterPoint.offset()); 194 204 for (unsigned i = 0; i < m_yields.size(); ++i) 195 205 jumpTable.add(i + 1, m_yields[i].point); 196 206 197 207 rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) { 198 fragment.appendInstruction (op_switch_imm, switchTableIndex, nextToEnterPoint, state.offset());208 fragment.appendInstruction<OpSwitchImm>(switchTableIndex, nextToEnterPoint.offset(), state); 199 209 }); 200 210 } … … 203 213 VirtualRegister scope = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::Frame)); 204 214 215 auto instruction = m_instructions.at(data.point); 205 216 // Emit save sequence. 206 rewriter.insertFragmentBefore( data.point, [&](BytecodeRewriter::Fragment& fragment) {217 rewriter.insertFragmentBefore(instruction, [&](BytecodeRewriter::Fragment& fragment) { 207 218 data.liveness.forEachSetBit([&](size_t index) { 208 219 VirtualRegister operand = virtualRegisterForLocal(index); 209 220 Storage storage = storageForGeneratorLocal(index); 210 221 211 fragment.appendInstruction( 212 op_put_to_scope, 213 scope.offset(), // scope 222 fragment.appendInstruction<OpPutToScope>( 223 scope, // scope 214 224 storage.identifierIndex, // identifier 215 operand .offset(), // value216 GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization) .operand(), // info225 operand, // value 226 GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info 217 227 m_generatorFrameSymbolTableIndex, // symbol table constant index 218 228 storage.scopeOffset.offset() // scope offset … … 221 231 222 232 // Insert op_ret just after save sequence. 223 fragment.appendInstruction (op_ret,data.argument);233 fragment.appendInstruction<OpRet>(data.argument); 224 234 }); 225 235 226 236 // Emit resume sequence. 227 rewriter.insertFragmentAfter( data.point, [&](BytecodeRewriter::Fragment& fragment) {237 rewriter.insertFragmentAfter(instruction, [&](BytecodeRewriter::Fragment& fragment) { 228 238 data.liveness.forEachSetBit([&](size_t index) { 229 239 VirtualRegister operand = virtualRegisterForLocal(index); 230 240 Storage storage = storageForGeneratorLocal(index); 231 241 232 UnlinkedValueProfile profile = m_codeBlock->vm()->canUseJIT() 233 ? m_codeBlock->addValueProfile() 234 : static_cast<UnlinkedValueProfile>(-1); 235 fragment.appendInstruction( 236 op_get_from_scope, 237 operand.offset(), // dst 238 scope.offset(), // scope 242 fragment.appendInstruction<OpGetFromScope>( 243 operand, // dst 244 scope, // scope 239 245 storage.identifierIndex, // identifier 240 GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization) .operand(), // info246 GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info 241 247 0, // local scope depth 242 storage.scopeOffset.offset(), // scope offset 243 profile // profile 248 storage.scopeOffset.offset() // scope offset 244 249 ); 245 250 }); … … 247 252 248 253 // Clip the unnecessary bytecodes. 249 rewriter.removeBytecode( data.point);254 rewriter.removeBytecode(instruction); 250 255 } 251 256 … … 253 258 } 254 259 255 void performGeneratorification( UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex)260 void performGeneratorification(BytecodeGenerator& bytecodeGenerator, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& instructions, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex) 256 261 { 257 262 if (Options::dumpBytecodesBeforeGeneratorification()) 258 263 BytecodeDumper<UnlinkedCodeBlock>::dumpBlock(codeBlock, instructions, WTF::dataFile()); 259 264 260 BytecodeGeneratorification pass( codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex);265 BytecodeGeneratorification pass(bytecodeGenerator, codeBlock, instructions, generatorFrameSymbolTable, generatorFrameSymbolTableIndex); 261 266 pass.run(); 262 267 } -
trunk/Source/JavaScriptCore/bytecode/BytecodeGeneratorification.h
r237486 r237547 27 27 #pragma once 28 28 29 #include "UnlinkedCodeBlock.h"30 31 29 namespace JSC { 32 30 31 class BytecodeGenerator; 32 class InstructionStreamWriter; 33 class SymbolTable; 34 class UnlinkedCodeBlock; 33 35 class SymbolTable; 34 36 35 void performGeneratorification( UnlinkedCodeBlock*, UnlinkedCodeBlock::UnpackedInstructions&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex);37 void performGeneratorification(BytecodeGenerator&, UnlinkedCodeBlock*, InstructionStreamWriter&, SymbolTable* generatorFrameSymbolTable, int generatorFrameSymbolTableIndex); 36 38 37 39 } // namespace JSC -
trunk/Source/JavaScriptCore/bytecode/BytecodeGraph.h
r237486 r237547 45 45 46 46 template <typename CodeBlockType> 47 inline BytecodeGraph(CodeBlockType*, typename CodeBlockType::UnpackedInstructions&);47 inline BytecodeGraph(CodeBlockType*, const InstructionStream&); 48 48 49 49 WTF::IteratorRange<BasicBlocksVector::reverse_iterator> basicBlocksInReverseOrder() … … 52 52 } 53 53 54 static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, unsignedbytecodeOffset)54 static bool blockContainsBytecodeOffset(BytecodeBasicBlock* block, InstructionStream::Offset bytecodeOffset) 55 55 { 56 56 unsigned leaderOffset = block->leaderOffset(); … … 58 58 } 59 59 60 BytecodeBasicBlock* findBasicBlockForBytecodeOffset( unsignedbytecodeOffset)60 BytecodeBasicBlock* findBasicBlockForBytecodeOffset(InstructionStream::Offset bytecodeOffset) 61 61 { 62 62 /* … … 86 86 } 87 87 88 BytecodeBasicBlock* findBasicBlockWithLeaderOffset( unsignedleaderOffset)88 BytecodeBasicBlock* findBasicBlockWithLeaderOffset(InstructionStream::Offset leaderOffset) 89 89 { 90 90 return (*tryBinarySearch<std::unique_ptr<BytecodeBasicBlock>, unsigned>(m_basicBlocks, m_basicBlocks.size(), leaderOffset, [] (std::unique_ptr<BytecodeBasicBlock>* basicBlock) { return (*basicBlock)->leaderOffset(); })).get(); … … 106 106 107 107 template<typename CodeBlockType> 108 BytecodeGraph::BytecodeGraph(CodeBlockType* codeBlock, typename CodeBlockType::UnpackedInstructions& instructions)108 BytecodeGraph::BytecodeGraph(CodeBlockType* codeBlock, const InstructionStream& instructions) 109 109 { 110 BytecodeBasicBlock::compute(codeBlock, instructions .begin(), instructions.size(), m_basicBlocks);110 BytecodeBasicBlock::compute(codeBlock, instructions, m_basicBlocks); 111 111 ASSERT(m_basicBlocks.size()); 112 112 } -
trunk/Source/JavaScriptCore/bytecode/BytecodeKills.h
r237486 r237547 39 39 : m_codeBlock(nullptr) 40 40 { 41 }42 43 // By convention, we say that non-local operands are never killed.44 bool operandIsKilled(unsigned bytecodeIndex, int operand) const45 {46 ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());47 VirtualRegister reg(operand);48 if (reg.isLocal())49 return m_killSets[bytecodeIndex].contains(operand);50 return false;51 }52 53 bool operandIsKilled(Instruction* instruction, int operand) const54 {55 return operandIsKilled(m_codeBlock->bytecodeOffset(instruction), operand);56 }57 58 template<typename Functor>59 void forEachOperandKilledAt(unsigned bytecodeIndex, const Functor& functor) const60 {61 ASSERT_WITH_SECURITY_IMPLICATION(bytecodeIndex < m_codeBlock->instructions().size());62 m_killSets[bytecodeIndex].forEachLocal(63 [&] (unsigned local) {64 functor(virtualRegisterForLocal(local));65 });66 }67 68 template<typename Functor>69 void forEachOperandKilledAt(Instruction* pc, const Functor& functor) const70 {71 forEachOperandKilledAt(m_codeBlock->bytecodeOffset(pc), functor);72 41 } 73 42 -
trunk/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.cpp
r237486 r237547 120 120 { 121 121 dataLog("\nDumping bytecode liveness for ", *codeBlock, ":\n"); 122 Instruction* instructionsBegin = codeBlock->instructions().begin();122 const auto& instructions = codeBlock->instructions(); 123 123 unsigned i = 0; 124 124 … … 168 168 } 169 169 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { 170 const Instruction* currentInstruction = &instructionsBegin[bytecodeOffset];170 const auto currentInstruction = instructions.at(bytecodeOffset); 171 171 172 172 dataLogF("Live variables:"); … … 174 174 dumpBitVector(liveBefore); 175 175 dataLogF("\n"); 176 codeBlock->dumpBytecode(WTF::dataFile(), instructionsBegin,currentInstruction);176 codeBlock->dumpBytecode(WTF::dataFile(), currentInstruction); 177 177 178 OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode); 179 unsigned opcodeLength = opcodeLengths[opcodeID]; 180 bytecodeOffset += opcodeLength; 178 bytecodeOffset += currentInstruction->size(); 181 179 } 182 180 -
trunk/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysis.h
r237486 r237547 38 38 class BytecodeLivenessPropagation { 39 39 protected: 40 template<typename CodeBlockType, typename Instructions, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const Instructions&, BytecodeGraph&, unsignedbytecodeOffset, const UseFunctor&, const DefFunctor&);40 template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, const UseFunctor&, const DefFunctor&); 41 41 42 template<typename CodeBlockType , typename Instructions> void stepOverInstruction(CodeBlockType*, const Instructions&, BytecodeGraph&, unsignedbytecodeOffset, FastBitVector& out);42 template<typename CodeBlockType> void stepOverInstruction(CodeBlockType*, const InstructionStream&, BytecodeGraph&, InstructionStream::Offset bytecodeOffset, FastBitVector& out); 43 43 44 44 template<typename CodeBlockType, typename Instructions> bool computeLocalLivenessForBytecodeOffset(CodeBlockType*, const Instructions&, BytecodeGraph&, BytecodeBasicBlock*, unsigned targetOffset, FastBitVector& result); -
trunk/Source/JavaScriptCore/bytecode/BytecodeLivenessAnalysisInlines.h
r237486 r237547 52 52 } 53 53 54 inline bool isValidRegisterForLiveness( intoperand)54 inline bool isValidRegisterForLiveness(VirtualRegister operand) 55 55 { 56 VirtualRegister virtualReg(operand); 57 if (virtualReg.isConstant()) 56 if (operand.isConstant()) 58 57 return false; 59 return virtualReg.isLocal();58 return operand.isLocal(); 60 59 } 61 60 62 61 // Simplified interface to bytecode use/def, which determines defs first and then uses, and includes 63 62 // exception handlers in the uses. 64 template<typename CodeBlockType, typename Instructions, typenameUseFunctor, typename DefFunctor>65 inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const Instruction s& instructions, BytecodeGraph& graph, unsignedbytecodeOffset, const UseFunctor& use, const DefFunctor& def)63 template<typename CodeBlockType, typename UseFunctor, typename DefFunctor> 64 inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, const UseFunctor& use, const DefFunctor& def) 66 65 { 67 66 // This abstractly execute the instruction in reverse. Instructions logically first use operands and … … 80 79 // first add it to the out set (the use), and then we'd remove it (the def). 81 80 82 auto* instructionsBegin = instructions.begin(); 83 auto* instruction = &instructionsBegin[bytecodeOffset]; 84 OpcodeID opcodeID = Interpreter::getOpcodeID(*instruction); 81 auto* instruction = instructions.at(bytecodeOffset).ptr(); 82 OpcodeID opcodeID = instruction->opcodeID(); 85 83 86 84 computeDefsForBytecodeOffset( 87 85 codeBlock, opcodeID, instruction, 88 [&] ( CodeBlockType*, const typename CodeBlockType::Instruction*, OpcodeID, intoperand) {86 [&] (VirtualRegister operand) { 89 87 if (isValidRegisterForLiveness(operand)) 90 def( VirtualRegister(operand).toLocal());88 def(operand.toLocal()); 91 89 }); 92 90 93 91 computeUsesForBytecodeOffset( 94 92 codeBlock, opcodeID, instruction, 95 [&] ( CodeBlockType*, const typename CodeBlockType::Instruction*, OpcodeID, intoperand) {93 [&] (VirtualRegister operand) { 96 94 if (isValidRegisterForLiveness(operand)) 97 use( VirtualRegister(operand).toLocal());95 use(operand.toLocal()); 98 96 }); 99 97 … … 107 105 } 108 106 109 template<typename CodeBlockType , typename Instructions>110 inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const Instruction s& instructions, BytecodeGraph& graph, unsignedbytecodeOffset, FastBitVector& out)107 template<typename CodeBlockType> 108 inline void BytecodeLivenessPropagation::stepOverInstruction(CodeBlockType* codeBlock, const InstructionStream& instructions, BytecodeGraph& graph, InstructionStream::Offset bytecodeOffset, FastBitVector& out) 111 109 { 112 110 stepOverInstruction( -
trunk/Source/JavaScriptCore/bytecode/BytecodeRewriter.cpp
r237486 r237547 39 39 Insertion& insertion = m_insertions[insertionIndex]; 40 40 if (insertion.type == Insertion::Type::Remove) 41 m_ instructions.remove(insertion.index.bytecodeOffset, insertion.length());41 m_writer.m_instructions.remove(insertion.index.bytecodeOffset, insertion.length()); 42 42 else { 43 43 if (insertion.includeBranch == IncludeBranch::Yes) { … … 45 45 adjustJumpTargetsInFragment(finalOffset, insertion); 46 46 } 47 m_ instructions.insertVector(insertion.index.bytecodeOffset, insertion.instructions);47 m_writer.m_instructions.insertVector(insertion.index.bytecodeOffset, insertion.instructions.m_instructions); 48 48 } 49 49 } … … 57 57 }); 58 58 59 m_codeBlock->applyModification(*this, m_ instructions);59 m_codeBlock->applyModification(*this, m_writer); 60 60 } 61 61 62 62 void BytecodeRewriter::adjustJumpTargetsInFragment(unsigned finalOffset, Insertion& insertion) 63 63 { 64 auto& fragment = insertion.instructions; 65 UnlinkedInstruction* instructionsBegin = fragment.data(); 66 for (unsigned fragmentOffset = 0, fragmentCount = fragment.size(); fragmentOffset < fragmentCount;) { 67 UnlinkedInstruction& instruction = fragment[fragmentOffset]; 68 OpcodeID opcodeID = instruction.u.opcode; 69 if (isBranch(opcodeID)) { 70 unsigned bytecodeOffset = finalOffset + fragmentOffset; 71 extractStoredJumpTargetsForBytecodeOffset(m_codeBlock, instructionsBegin, fragmentOffset, [&](int32_t& label) { 64 for (auto& instruction : insertion.instructions) { 65 if (isBranch(instruction->opcodeID())) { 66 unsigned bytecodeOffset = finalOffset + instruction.offset(); 67 updateStoredJumpTargetsForInstruction(m_codeBlock, finalOffset, instruction, [&](int32_t label) { 72 68 int absoluteOffset = adjustAbsoluteOffset(label); 73 label =absoluteOffset - static_cast<int>(bytecodeOffset);69 return absoluteOffset - static_cast<int>(bytecodeOffset); 74 70 }); 75 71 } 76 fragmentOffset += opcodeLength(opcodeID);77 72 } 78 73 } 79 74 80 void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, Vector<UnlinkedInstruction>&& fragment)75 void BytecodeRewriter::insertImpl(InsertionPoint insertionPoint, IncludeBranch includeBranch, InstructionStreamWriter&& writer) 81 76 { 82 77 ASSERT(insertionPoint.position == Position::Before || insertionPoint.position == Position::After); … … 86 81 includeBranch, 87 82 0, 88 WTFMove( fragment)83 WTFMove(writer) 89 84 }); 90 85 } 91 86 92 int BytecodeRewriter::adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint)87 int32_t BytecodeRewriter::adjustJumpTarget(InsertionPoint startPoint, InsertionPoint jumpTargetPoint) 93 88 { 94 89 if (startPoint < jumpTargetPoint) { … … 112 107 } 113 108 109 // FIXME: unit test the logic in this method 110 // https://bugs.webkit.org/show_bug.cgi?id=190950 111 void BytecodeRewriter::adjustJumpTargets() 112 { 113 auto currentInsertion = m_insertions.begin(); 114 auto outOfLineJumpTargets = m_codeBlock->replaceOutOfLineJumpTargets(); 115 116 int offset = 0; 117 for (InstructionStream::Offset i = 0; i < m_writer.size();) { 118 int before = 0; 119 int after = 0; 120 int remove = 0; 121 while (currentInsertion != m_insertions.end() && static_cast<InstructionStream::Offset>(currentInsertion->index.bytecodeOffset) == i) { 122 auto size = currentInsertion->length(); 123 if (currentInsertion->type == Insertion::Type::Remove) 124 remove += size; 125 else if (currentInsertion->index.position == Position::Before) 126 before += size; 127 else if (currentInsertion->index.position == Position::After) 128 after += size; 129 ++currentInsertion; 130 } 131 132 offset += before; 133 134 if (!remove) { 135 auto instruction = m_writer.ref(i); 136 updateStoredJumpTargetsForInstruction(m_codeBlock, offset, instruction, [&](int32_t relativeOffset) { 137 return adjustJumpTarget(instruction.offset(), instruction.offset() + relativeOffset); 138 }, outOfLineJumpTargets); 139 i += instruction->size(); 140 } else { 141 offset -= remove; 142 i += remove; 143 } 144 145 offset += after; 146 } 147 } 148 114 149 } // namespace JSC -
trunk/Source/JavaScriptCore/bytecode/BytecodeRewriter.h
r237486 r237547 27 27 #pragma once 28 28 29 #include "BytecodeGenerator.h" 29 30 #include "BytecodeGraph.h" 30 31 #include "Bytecodes.h" … … 94 95 95 96 struct InsertionPoint { 96 int bytecodeOffset;97 int32_t bytecodeOffset; 97 98 Position position; 98 99 99 InsertionPoint( int offset, Position pos)100 InsertionPoint(InstructionStream::Offset offset, Position pos) 100 101 : bytecodeOffset(offset) 101 102 , position(pos) … … 131 132 IncludeBranch includeBranch; 132 133 size_t removeLength; 133 Vector<UnlinkedInstruction>instructions;134 InstructionStreamWriter instructions; 134 135 }; 135 136 … … 138 139 WTF_MAKE_NONCOPYABLE(Fragment); 139 140 public: 140 Fragment(Vector<UnlinkedInstruction>& fragment, IncludeBranch& includeBranch) 141 : m_fragment(fragment) 141 Fragment(BytecodeGenerator& bytecodeGenerator, InstructionStreamWriter& writer, IncludeBranch& includeBranch) 142 : m_bytecodeGenerator(bytecodeGenerator) 143 , m_writer(writer) 142 144 , m_includeBranch(includeBranch) 143 145 { 144 146 } 145 147 146 template<class ... Args>147 void appendInstruction( OpcodeID opcodeID,Args... args)148 { 149 if (isBranch( opcodeID))148 template<class Op, class... Args> 149 void appendInstruction(Args... args) 150 { 151 if (isBranch(Op::opcodeID)) 150 152 m_includeBranch = IncludeBranch::Yes; 151 153 152 UnlinkedInstruction instructions[sizeof...(args) + 1] = { 153 UnlinkedInstruction(opcodeID), 154 UnlinkedInstruction(args)... 155 }; 156 m_fragment.append(instructions, sizeof...(args) + 1); 154 m_bytecodeGenerator.withWriter(m_writer, [&] { 155 Op::emit(&m_bytecodeGenerator, std::forward<Args>(args)...); 156 }); 157 157 } 158 158 159 159 private: 160 Vector<UnlinkedInstruction>& m_fragment; 160 BytecodeGenerator& m_bytecodeGenerator; 161 InstructionStreamWriter& m_writer; 161 162 IncludeBranch& m_includeBranch; 162 163 }; 163 164 164 BytecodeRewriter(BytecodeGraph& graph, UnlinkedCodeBlock* codeBlock, UnlinkedCodeBlock::UnpackedInstructions& instructions) 165 : m_graph(graph) 165 BytecodeRewriter(BytecodeGenerator& bytecodeGenerator, BytecodeGraph& graph, UnlinkedCodeBlock* codeBlock, InstructionStreamWriter& writer) 166 : m_bytecodeGenerator(bytecodeGenerator) 167 , m_graph(graph) 166 168 , m_codeBlock(codeBlock) 167 , m_ instructions(instructions)169 , m_writer(writer) 168 170 { 169 171 } 170 172 171 173 template<class Function> 172 void insertFragmentBefore( unsigned bytecodeOffset, Function function)174 void insertFragmentBefore(const InstructionStream::Ref& instruction, Function function) 173 175 { 174 176 IncludeBranch includeBranch = IncludeBranch::No; 175 Vector<UnlinkedInstruction> instructions;176 Fragment fragment( instructions, includeBranch);177 InstructionStreamWriter writer; 178 Fragment fragment(m_bytecodeGenerator, writer, includeBranch); 177 179 function(fragment); 178 insertImpl(InsertionPoint( bytecodeOffset, Position::Before), includeBranch, WTFMove(instructions));180 insertImpl(InsertionPoint(instruction.offset(), Position::Before), includeBranch, WTFMove(writer)); 179 181 } 180 182 181 183 template<class Function> 182 void insertFragmentAfter( unsigned bytecodeOffset, Function function)184 void insertFragmentAfter(const InstructionStream::Ref& instruction, Function function) 183 185 { 184 186 IncludeBranch includeBranch = IncludeBranch::No; 185 Vector<UnlinkedInstruction> instructions;186 Fragment fragment( instructions, includeBranch);187 InstructionStreamWriter writer; 188 Fragment fragment(m_bytecodeGenerator, writer, includeBranch); 187 189 function(fragment); 188 insertImpl(InsertionPoint( bytecodeOffset, Position::After), includeBranch, WTFMove(instructions));189 } 190 191 void removeBytecode( unsigned bytecodeOffset)192 { 193 m_insertions.append(Insertion { InsertionPoint( bytecodeOffset, Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, opcodeLength(m_instructions[bytecodeOffset].u.opcode), { } });190 insertImpl(InsertionPoint(instruction.offset(), Position::After), includeBranch, WTFMove(writer)); 191 } 192 193 void removeBytecode(const InstructionStream::Ref& instruction) 194 { 195 m_insertions.append(Insertion { InsertionPoint(instruction.offset(), Position::OriginalBytecodePoint), Insertion::Type::Remove, IncludeBranch::No, instruction->size(), { } }); 194 196 } 195 197 … … 198 200 BytecodeGraph& graph() { return m_graph; } 199 201 200 int adjustAbsoluteOffset(int absoluteOffset)202 int32_t adjustAbsoluteOffset(InstructionStream::Offset absoluteOffset) 201 203 { 202 204 return adjustJumpTarget(InsertionPoint(0, Position::EntryPoint), InsertionPoint(absoluteOffset, Position::LabelPoint)); 203 205 } 204 206 205 int adjustJumpTarget(int originalBytecodeOffset, int originalJumpTarget)207 int32_t adjustJumpTarget(InstructionStream::Offset originalBytecodeOffset, int32_t originalJumpTarget) 206 208 { 207 209 return adjustJumpTarget(InsertionPoint(originalBytecodeOffset, Position::LabelPoint), InsertionPoint(originalJumpTarget, Position::LabelPoint)); 208 210 } 209 211 212 void adjustJumpTargets(); 213 210 214 private: 211 void insertImpl(InsertionPoint, IncludeBranch, Vector<UnlinkedInstruction>&& fragment);215 void insertImpl(InsertionPoint, IncludeBranch, InstructionStreamWriter&& fragment); 212 216 213 217 friend class UnlinkedCodeBlock; … … 218 222 template<typename Iterator> int calculateDifference(Iterator begin, Iterator end); 219 223 224 BytecodeGenerator& m_bytecodeGenerator; 220 225 BytecodeGraph& m_graph; 221 226 UnlinkedCodeBlock* m_codeBlock; 222 UnlinkedCodeBlock::UnpackedInstructions& m_instructions;227 InstructionStreamWriter& m_writer; 223 228 Vector<Insertion, 8> m_insertions; 224 229 }; -
trunk/Source/JavaScriptCore/bytecode/BytecodeUseDef.h
r237486 r237547 27 27 28 28 #include "CodeBlock.h" 29 #include "Instruction.h" 30 #include <wtf/Forward.h> 29 31 30 32 namespace JSC { 31 33 32 template<typename Block, typename Functor, typename Instruction> 33 void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor) 34 #define CALL_FUNCTOR(__arg) \ 35 functor(__bytecode.__arg); 36 37 #define USES_OR_DEFS(__opcode, ...) \ 38 case __opcode::opcodeID: { \ 39 auto __bytecode = instruction->as<__opcode>(); \ 40 WTF_LAZY_FOR_EACH_TERM(CALL_FUNCTOR, __VA_ARGS__) \ 41 return; \ 42 } 43 44 #define USES USES_OR_DEFS 45 #define DEFS USES_OR_DEFS 46 47 template<typename Block, typename Functor> 48 void computeUsesForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor) 34 49 { 35 50 if (opcodeID != op_enter && (codeBlock->wasCompiledWithDebuggingOpcodes() || codeBlock->usesEval()) && codeBlock->scopeRegister().isValid()) 36 functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset()); 51 functor(codeBlock->scopeRegister()); 52 53 auto handleNewArrayLike = [&](auto op) { 54 int base = op.argv.offset(); 55 for (int i = 0; i < static_cast<int>(op.argc); i++) 56 functor(VirtualRegister { base - i }); 57 }; 58 59 auto handleOpCallLike = [&](auto op) { 60 functor(op.callee); 61 int lastArg = -static_cast<int>(op.argv) + CallFrame::thisArgumentOffset(); 62 for (int i = 0; i < static_cast<int>(op.argc); i++) 63 functor(VirtualRegister { lastArg + i }); 64 if (opcodeID == op_call_eval) 65 functor(codeBlock->scopeRegister()); 66 return; 67 }; 37 68 38 69 switch (opcodeID) { 70 case op_wide: 71 RELEASE_ASSERT_NOT_REACHED(); 72 39 73 // No uses. 40 74 case op_new_regexp: … … 58 92 case op_super_sampler_end: 59 93 return; 60 case op_get_scope: 61 case op_to_this: 62 case op_check_tdz: 63 case op_identity_with_profile: 64 case op_profile_type: 65 case op_throw: 66 case op_throw_static_error: 67 case op_end: 68 case op_ret: 69 case op_jtrue: 70 case op_jfalse: 71 case op_jeq_null: 72 case op_jneq_null: 73 case op_dec: 74 case op_inc: 75 case op_log_shadow_chicken_prologue: { 76 ASSERT(opcodeLengths[opcodeID] > 1); 77 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 94 95 USES(OpGetScope, dst) 96 USES(OpToThis, srcDst) 97 USES(OpCheckTdz, target) 98 USES(OpIdentityWithProfile, srcDst) 99 USES(OpProfileType, target); 100 USES(OpThrow, value) 101 USES(OpThrowStaticError, message) 102 USES(OpEnd, value) 103 USES(OpRet, value) 104 USES(OpJtrue, condition) 105 USES(OpJfalse, condition) 106 USES(OpJeqNull, value) 107 USES(OpJneqNull, value) 108 USES(OpDec, srcDst) 109 USES(OpInc, srcDst) 110 USES(OpLogShadowChickenPrologue, scope) 111 112 USES(OpJless, lhs, rhs) 113 USES(OpJlesseq, lhs, rhs) 114 USES(OpJgreater, lhs, rhs) 115 USES(OpJgreatereq, lhs, rhs) 116 USES(OpJnless, lhs, rhs) 117 USES(OpJnlesseq, lhs, rhs) 118 USES(OpJngreater, lhs, rhs) 119 USES(OpJngreatereq, lhs, rhs) 120 USES(OpJeq, lhs, rhs) 121 USES(OpJneq, lhs, rhs) 122 USES(OpJstricteq, lhs, rhs) 123 USES(OpJnstricteq, lhs, rhs) 124 USES(OpJbelow, lhs, rhs) 125 USES(OpJbeloweq, lhs, rhs) 126 USES(OpSetFunctionName, function, name) 127 USES(OpLogShadowChickenTail, thisValue, scope) 128 129 USES(OpPutByVal, base, property, value) 130 USES(OpPutByValDirect, base, property, value) 131 132 USES(OpPutById, base, value) 133 USES(OpPutToScope, scope, value) 134 USES(OpPutToArguments, arguments, value) 135 136 USES(OpPutByIdWithThis, base, thisValue, value) 137 138 USES(OpPutByValWithThis, base, thisValue, property, value) 139 140 USES(OpPutGetterById, base, accessor) 141 USES(OpPutSetterById, base, accessor) 142 143 USES(OpPutGetterSetterById, base, getter, setter) 144 145 USES(OpPutGetterByVal, base, property, accessor) 146 USES(OpPutSetterByVal, base, property, accessor) 147 148 USES(OpDefineDataProperty, base, property, value, attributes) 149 150 USES(OpDefineAccessorProperty, base, property, getter, setter, attributes) 151 152 USES(OpSpread, argument) 153 USES(OpGetPropertyEnumerator, base) 154 USES(OpGetEnumerableLength, base) 155 USES(OpNewFuncExp, scope) 156 USES(OpNewGeneratorFuncExp, scope) 157 USES(OpNewAsyncFuncExp, scope) 158 USES(OpToIndexString, index) 159 USES(OpCreateLexicalEnvironment, scope) 160 USES(OpResolveScope, scope) 161 USES(OpResolveScopeForHoistingFuncDeclInEval, scope) 162 USES(OpGetFromScope, scope) 163 USES(OpToPrimitive, src) 164 USES(OpTryGetById, base) 165 USES(OpGetById, base) 166 USES(OpGetByIdDirect, base) 167 USES(OpInById, base) 168 USES(OpTypeof, value) 169 USES(OpIsEmpty, operand) 170 USES(OpIsUndefined, operand) 171 USES(OpIsBoolean, operand) 172 USES(OpIsNumber, operand) 173 USES(OpIsObject, operand) 174 USES(OpIsObjectOrNull, operand) 175 USES(OpIsCellWithType, operand) 176 USES(OpIsFunction, operand) 177 USES(OpToNumber, operand) 178 USES(OpToString, operand) 179 USES(OpToObject, operand) 180 USES(OpNegate, operand) 181 USES(OpEqNull, operand) 182 USES(OpNeqNull, operand) 183 USES(OpNot, operand) 184 USES(OpUnsigned, operand) 185 USES(OpMov, src) 186 USES(OpNewArrayWithSize, length) 187 USES(OpCreateThis, callee) 188 USES(OpDelById, base) 189 USES(OpNewFunc, scope) 190 USES(OpNewAsyncGeneratorFunc, scope) 191 USES(OpNewAsyncGeneratorFuncExp, scope) 192 USES(OpNewGeneratorFunc, scope) 193 USES(OpNewAsyncFunc, scope) 194 USES(OpGetParentScope, scope) 195 USES(OpCreateScopedArguments, scope) 196 USES(OpCreateRest, arraySize) 197 USES(OpGetFromArguments, arguments) 198 USES(OpNewArrayBuffer, immutableButterfly) 199 200 USES(OpHasGenericProperty, base, property) 201 USES(OpHasIndexedProperty, base, property) 202 USES(OpEnumeratorStructurePname, enumerator, index) 203 USES(OpEnumeratorGenericPname, enumerator, index) 204 USES(OpGetByVal, base, property) 205 USES(OpInByVal, base, property) 206 USES(OpOverridesHasInstance, constructor, hasInstanceValue) 207 USES(OpInstanceof, value, prototype) 208 USES(OpAdd, lhs, rhs) 209 USES(OpMul, lhs, rhs) 210 USES(OpDiv, lhs, rhs) 211 USES(OpMod, lhs, rhs) 212 USES(OpSub, lhs, rhs) 213 USES(OpPow, lhs, rhs) 214 USES(OpLshift, lhs, rhs) 215 USES(OpRshift, lhs, rhs) 216 USES(OpUrshift, lhs, rhs) 217 USES(OpBitand, lhs, rhs) 218 USES(OpBitxor, lhs, rhs) 219 USES(OpBitor, lhs, rhs) 220 USES(OpLess, lhs, rhs) 221 USES(OpLesseq, lhs, rhs) 222 USES(OpGreater, lhs, rhs) 223 USES(OpGreatereq, lhs, rhs) 224 USES(OpBelow, lhs, rhs) 225 USES(OpBeloweq, lhs, rhs) 226 USES(OpNstricteq, lhs, rhs) 227 USES(OpStricteq, lhs, rhs) 228 USES(OpNeq, lhs, rhs) 229 USES(OpEq, lhs, rhs) 230 USES(OpPushWithScope, currentScope, newScope) 231 USES(OpGetByIdWithThis, base, thisValue) 232 USES(OpDelByVal, base, property) 233 USES(OpTailCallForwardArguments, callee, thisValue) 234 235 USES(OpGetByValWithThis, base, thisValue, property) 236 USES(OpInstanceofCustom, value, constructor, hasInstanceValue) 237 USES(OpHasStructureProperty, base, property, enumerator) 238 USES(OpConstructVarargs, callee, thisValue, arguments) 239 USES(OpCallVarargs, callee, thisValue, arguments) 240 USES(OpTailCallVarargs, callee, thisValue, arguments) 241 242 USES(OpGetDirectPname, base, property, index, enumerator) 243 244 USES(OpSwitchString, scrutinee) 245 USES(OpSwitchChar, scrutinee) 246 USES(OpSwitchImm, scrutinee) 247 248 USES(OpYield, generator, argument) 249 250 case op_new_array_with_spread: 251 handleNewArrayLike(instruction->as<OpNewArrayWithSpread>()); 252 return; 253 case op_new_array: 254 handleNewArrayLike(instruction->as<OpNewArray>()); 255 return; 256 257 case op_strcat: { 258 auto bytecode = instruction->as<OpStrcat>(); 259 int base = bytecode.src.offset(); 260 for (int i = 0; i < bytecode.count; i++) 261 functor(VirtualRegister { base - i }); 78 262 return; 79 263 } 80 case op_jlesseq: 81 case op_jgreater: 82 case op_jgreatereq: 83 case op_jnless: 84 case op_jnlesseq: 85 case op_jngreater: 86 case op_jngreatereq: 87 case op_jless: 88 case op_jeq: 89 case op_jneq: 90 case op_jstricteq: 91 case op_jnstricteq: 92 case op_jbelow: 93 case op_jbeloweq: 94 case op_set_function_name: 95 case op_log_shadow_chicken_tail: { 96 ASSERT(opcodeLengths[opcodeID] > 2); 97 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 98 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 99 return; 100 } 101 case op_put_by_val_direct: 102 case op_put_by_val: { 103 ASSERT(opcodeLengths[opcodeID] > 3); 104 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 105 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 106 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 107 return; 108 } 109 case op_put_by_id: 110 case op_put_to_scope: 111 case op_put_to_arguments: { 112 ASSERT(opcodeLengths[opcodeID] > 3); 113 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 114 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 115 return; 116 } 117 case op_put_by_id_with_this: { 118 ASSERT(opcodeLengths[opcodeID] > 4); 119 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 120 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 121 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 122 return; 123 } 124 case op_put_by_val_with_this: { 125 ASSERT(opcodeLengths[opcodeID] > 4); 126 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 127 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 128 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 129 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 130 return; 131 } 132 case op_put_getter_by_id: 133 case op_put_setter_by_id: { 134 ASSERT(opcodeLengths[opcodeID] > 4); 135 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 136 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 137 return; 138 } 139 case op_put_getter_setter_by_id: { 140 ASSERT(opcodeLengths[opcodeID] > 5); 141 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 142 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 143 functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); 144 return; 145 } 146 case op_put_getter_by_val: 147 case op_put_setter_by_val: { 148 ASSERT(opcodeLengths[opcodeID] > 4); 149 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 150 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 151 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 152 return; 153 } 154 case op_define_data_property: { 155 ASSERT(opcodeLengths[opcodeID] > 4); 156 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 157 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 158 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 159 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 160 return; 161 } 162 case op_define_accessor_property: { 163 ASSERT(opcodeLengths[opcodeID] > 5); 164 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 165 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 166 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 167 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 168 functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); 169 return; 170 } 171 case op_spread: 172 case op_get_property_enumerator: 173 case op_get_enumerable_length: 174 case op_new_func_exp: 175 case op_new_generator_func_exp: 176 case op_new_async_func_exp: 177 case op_to_index_string: 178 case op_create_lexical_environment: 179 case op_resolve_scope: 180 case op_resolve_scope_for_hoisting_func_decl_in_eval: 181 case op_get_from_scope: 182 case op_to_primitive: 183 case op_try_get_by_id: 184 case op_get_by_id: 185 case op_get_by_id_proto_load: 186 case op_get_by_id_unset: 187 case op_get_by_id_direct: 188 case op_get_array_length: 189 case op_in_by_id: 190 case op_typeof: 191 case op_is_empty: 192 case op_is_undefined: 193 case op_is_boolean: 194 case op_is_number: 195 case op_is_object: 196 case op_is_object_or_null: 197 case op_is_cell_with_type: 198 case op_is_function: 199 case op_to_number: 200 case op_to_string: 201 case op_to_object: 202 case op_negate: 203 case op_neq_null: 204 case op_eq_null: 205 case op_not: 206 case op_mov: 207 case op_new_array_with_size: 208 case op_create_this: 209 case op_del_by_id: 210 case op_unsigned: 211 case op_new_func: 212 case op_new_async_generator_func: 213 case op_new_async_generator_func_exp: 214 case op_new_generator_func: 215 case op_new_async_func: 216 case op_get_parent_scope: 217 case op_create_scoped_arguments: 218 case op_create_rest: 219 case op_get_from_arguments: 220 case op_new_array_buffer: { 221 ASSERT(opcodeLengths[opcodeID] > 2); 222 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 223 return; 224 } 225 case op_has_generic_property: 226 case op_has_indexed_property: 227 case op_enumerator_structure_pname: 228 case op_enumerator_generic_pname: 229 case op_get_by_val: 230 case op_in_by_val: 231 case op_overrides_has_instance: 232 case op_instanceof: 233 case op_add: 234 case op_mul: 235 case op_div: 236 case op_mod: 237 case op_sub: 238 case op_pow: 239 case op_lshift: 240 case op_rshift: 241 case op_urshift: 242 case op_bitand: 243 case op_bitxor: 244 case op_bitor: 245 case op_less: 246 case op_lesseq: 247 case op_greater: 248 case op_greatereq: 249 case op_below: 250 case op_beloweq: 251 case op_nstricteq: 252 case op_stricteq: 253 case op_neq: 254 case op_eq: 255 case op_push_with_scope: 256 case op_get_by_id_with_this: 257 case op_del_by_val: 258 case op_tail_call_forward_arguments: { 259 ASSERT(opcodeLengths[opcodeID] > 3); 260 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 261 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 262 return; 263 } 264 case op_get_by_val_with_this: { 265 ASSERT(opcodeLengths[opcodeID] > 4); 266 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 267 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 268 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 269 return; 270 } 271 case op_instanceof_custom: 272 case op_has_structure_property: 273 case op_construct_varargs: 274 case op_call_varargs: 275 case op_tail_call_varargs: { 276 ASSERT(opcodeLengths[opcodeID] > 4); 277 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 278 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 279 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 280 return; 281 } 282 case op_get_direct_pname: { 283 ASSERT(opcodeLengths[opcodeID] > 5); 284 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 285 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 286 functor(codeBlock, instruction, opcodeID, instruction[4].u.operand); 287 functor(codeBlock, instruction, opcodeID, instruction[5].u.operand); 288 return; 289 } 290 case op_switch_string: 291 case op_switch_char: 292 case op_switch_imm: { 293 ASSERT(opcodeLengths[opcodeID] > 3); 294 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 295 return; 296 } 297 case op_new_array_with_spread: 298 case op_new_array: 299 case op_strcat: { 300 int base = instruction[2].u.operand; 301 int count = instruction[3].u.operand; 302 for (int i = 0; i < count; i++) 303 functor(codeBlock, instruction, opcodeID, base - i); 304 return; 305 } 264 306 265 case op_construct: 266 handleOpCallLike(instruction->as<OpConstruct>()); 267 return; 307 268 case op_call_eval: 269 handleOpCallLike(instruction->as<OpCallEval>()); 270 return; 308 271 case op_call: 309 case op_tail_call: { 310 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 311 int argCount = instruction[3].u.operand; 312 int registerOffset = -instruction[4].u.operand; 313 int lastArg = registerOffset + CallFrame::thisArgumentOffset(); 314 for (int i = 0; i < argCount; i++) 315 functor(codeBlock, instruction, opcodeID, lastArg + i); 316 if (opcodeID == op_call_eval) 317 functor(codeBlock, instruction, opcodeID, codeBlock->scopeRegister().offset()); 318 return; 319 } 320 case op_yield: { 321 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 322 functor(codeBlock, instruction, opcodeID, instruction[3].u.operand); 323 return; 324 } 272 handleOpCallLike(instruction->as<OpCall>()); 273 return; 274 case op_tail_call: 275 handleOpCallLike(instruction->as<OpTailCall>()); 276 return; 277 325 278 default: 326 279 RELEASE_ASSERT_NOT_REACHED(); … … 329 282 } 330 283 331 template<typename Block, typename Instruction, typenameFunctor>332 void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, Instruction* instruction, const Functor& functor)284 template<typename Block, typename Functor> 285 void computeDefsForBytecodeOffset(Block* codeBlock, OpcodeID opcodeID, const Instruction* instruction, const Functor& functor) 333 286 { 334 287 switch (opcodeID) { 288 case op_wide: 289 RELEASE_ASSERT_NOT_REACHED(); 290 335 291 // These don't define anything. 336 292 case op_put_to_scope: … … 393 349 return; 394 350 // These all have a single destination for the first argument. 395 case op_argument_count: 396 case op_to_index_string: 397 case op_get_enumerable_length: 398 case op_has_indexed_property: 399 case op_has_structure_property: 400 case op_has_generic_property: 401 case op_get_direct_pname: 402 case op_get_property_enumerator: 403 case op_enumerator_structure_pname: 404 case op_enumerator_generic_pname: 405 case op_get_parent_scope: 406 case op_push_with_scope: 407 case op_create_lexical_environment: 408 case op_resolve_scope: 409 case op_resolve_scope_for_hoisting_func_decl_in_eval: 410 case op_strcat: 411 case op_to_primitive: 412 case op_create_this: 413 case op_new_array: 414 case op_new_array_with_spread: 415 case op_spread: 416 case op_new_array_buffer: 417 case op_new_array_with_size: 418 case op_new_regexp: 419 case op_new_func: 420 case op_new_func_exp: 421 case op_new_generator_func: 422 case op_new_generator_func_exp: 423 case op_new_async_generator_func: 424 case op_new_async_generator_func_exp: 425 case op_new_async_func: 426 case op_new_async_func_exp: 427 case op_call_varargs: 428 case op_tail_call_varargs: 429 case op_tail_call_forward_arguments: 430 case op_construct_varargs: 431 case op_get_from_scope: 432 case op_call: 433 case op_tail_call: 434 case op_call_eval: 435 case op_construct: 436 case op_try_get_by_id: 437 case op_get_by_id: 438 case op_get_by_id_proto_load: 439 case op_get_by_id_unset: 440 case op_get_by_id_direct: 441 case op_get_by_id_with_this: 442 case op_get_by_val_with_this: 443 case op_get_array_length: 444 case op_overrides_has_instance: 445 case op_instanceof: 446 case op_instanceof_custom: 447 case op_get_by_val: 448 case op_typeof: 449 case op_identity_with_profile: 450 case op_is_empty: 451 case op_is_undefined: 452 case op_is_boolean: 453 case op_is_number: 454 case op_is_object: 455 case op_is_object_or_null: 456 case op_is_cell_with_type: 457 case op_is_function: 458 case op_in_by_id: 459 case op_in_by_val: 460 case op_to_number: 461 case op_to_string: 462 case op_to_object: 463 case op_negate: 464 case op_add: 465 case op_mul: 466 case op_div: 467 case op_mod: 468 case op_sub: 469 case op_pow: 470 case op_lshift: 471 case op_rshift: 472 case op_urshift: 473 case op_bitand: 474 case op_bitxor: 475 case op_bitor: 476 case op_inc: 477 case op_dec: 478 case op_eq: 479 case op_neq: 480 case op_stricteq: 481 case op_nstricteq: 482 case op_less: 483 case op_lesseq: 484 case op_greater: 485 case op_greatereq: 486 case op_below: 487 case op_beloweq: 488 case op_neq_null: 489 case op_eq_null: 490 case op_not: 491 case op_mov: 492 case op_new_object: 493 case op_to_this: 494 case op_check_tdz: 495 case op_get_scope: 496 case op_create_direct_arguments: 497 case op_create_scoped_arguments: 498 case op_create_cloned_arguments: 499 case op_del_by_id: 500 case op_del_by_val: 501 case op_unsigned: 502 case op_get_from_arguments: 503 case op_get_argument: 504 case op_create_rest: 505 case op_get_rest_length: { 506 ASSERT(opcodeLengths[opcodeID] > 1); 507 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 508 return; 509 } 510 case op_catch: { 511 ASSERT(opcodeLengths[opcodeID] > 2); 512 functor(codeBlock, instruction, opcodeID, instruction[1].u.operand); 513 functor(codeBlock, instruction, opcodeID, instruction[2].u.operand); 514 return; 515 } 351 DEFS(OpArgumentCount, dst) 352 DEFS(OpToIndexString, dst) 353 DEFS(OpGetEnumerableLength, dst) 354 DEFS(OpHasIndexedProperty, dst) 355 DEFS(OpHasStructureProperty, dst) 356 DEFS(OpHasGenericProperty, dst) 357 DEFS(OpGetDirectPname, dst) 358 DEFS(OpGetPropertyEnumerator, dst) 359 DEFS(OpEnumeratorStructurePname, dst) 360 DEFS(OpEnumeratorGenericPname, dst) 361 DEFS(OpGetParentScope, dst) 362 DEFS(OpPushWithScope, dst) 363 DEFS(OpCreateLexicalEnvironment, dst) 364 DEFS(OpResolveScope, dst) 365 DEFS(OpResolveScopeForHoistingFuncDeclInEval, dst) 366 DEFS(OpStrcat, dst) 367 DEFS(OpToPrimitive, dst) 368 DEFS(OpCreateThis, dst) 369 DEFS(OpNewArray, dst) 370 DEFS(OpNewArrayWithSpread, dst) 371 DEFS(OpSpread, dst) 372 DEFS(OpNewArrayBuffer, dst) 373 DEFS(OpNewArrayWithSize, dst) 374 DEFS(OpNewRegexp, dst) 375 DEFS(OpNewFunc, dst) 376 DEFS(OpNewFuncExp, dst) 377 DEFS(OpNewGeneratorFunc, dst) 378 DEFS(OpNewGeneratorFuncExp, dst) 379 DEFS(OpNewAsyncGeneratorFunc, dst) 380 DEFS(OpNewAsyncGeneratorFuncExp, dst) 381 DEFS(OpNewAsyncFunc, dst) 382 DEFS(OpNewAsyncFuncExp, dst) 383 DEFS(OpCallVarargs, dst) 384 DEFS(OpTailCallVarargs, dst) 385 DEFS(OpTailCallForwardArguments, dst) 386 DEFS(OpConstructVarargs, dst) 387 DEFS(OpGetFromScope, dst) 388 DEFS(OpCall, dst) 389 DEFS(OpTailCall, dst) 390 DEFS(OpCallEval, dst) 391 DEFS(OpConstruct, dst) 392 DEFS(OpTryGetById, dst) 393 DEFS(OpGetById, dst) 394 DEFS(OpGetByIdDirect, dst) 395 DEFS(OpGetByIdWithThis, dst) 396 DEFS(OpGetByValWithThis, dst) 397 DEFS(OpOverridesHasInstance, dst) 398 DEFS(OpInstanceof, dst) 399 DEFS(OpInstanceofCustom, dst) 400 DEFS(OpGetByVal, dst) 401 DEFS(OpTypeof, dst) 402 DEFS(OpIdentityWithProfile, srcDst) 403 DEFS(OpIsEmpty, dst) 404 DEFS(OpIsUndefined, dst) 405 DEFS(OpIsBoolean, dst) 406 DEFS(OpIsNumber, dst) 407 DEFS(OpIsObject, dst) 408 DEFS(OpIsObjectOrNull, dst) 409 DEFS(OpIsCellWithType, dst) 410 DEFS(OpIsFunction, dst) 411 DEFS(OpInById, dst) 412 DEFS(OpInByVal, dst) 413 DEFS(OpToNumber, dst) 414 DEFS(OpToString, dst) 415 DEFS(OpToObject, dst) 416 DEFS(OpNegate, dst) 417 DEFS(OpAdd, dst) 418 DEFS(OpMul, dst) 419 DEFS(OpDiv, dst) 420 DEFS(OpMod, dst) 421 DEFS(OpSub, dst) 422 DEFS(OpPow, dst) 423 DEFS(OpLshift, dst) 424 DEFS(OpRshift, dst) 425 DEFS(OpUrshift, dst) 426 DEFS(OpBitand, dst) 427 DEFS(OpBitxor, dst) 428 DEFS(OpBitor, dst) 429 DEFS(OpInc, srcDst) 430 DEFS(OpDec, srcDst) 431 DEFS(OpEq, dst) 432 DEFS(OpNeq, dst) 433 DEFS(OpStricteq, dst) 434 DEFS(OpNstricteq, dst) 435 DEFS(OpLess, dst) 436 DEFS(OpLesseq, dst) 437 DEFS(OpGreater, dst) 438 DEFS(OpGreatereq, dst) 439 DEFS(OpBelow, dst) 440 DEFS(OpBeloweq, dst) 441 DEFS(OpNeqNull, dst) 442 DEFS(OpEqNull, dst) 443 DEFS(OpNot, dst) 444 DEFS(OpMov, dst) 445 DEFS(OpNewObject, dst) 446 DEFS(OpToThis, srcDst) 447 DEFS(OpCheckTdz, target) 448 DEFS(OpGetScope, dst) 449 DEFS(OpCreateDirectArguments, dst) 450 DEFS(OpCreateScopedArguments, dst) 451 DEFS(OpCreateClonedArguments, dst) 452 DEFS(OpDelById, dst) 453 DEFS(OpDelByVal, dst) 454 DEFS(OpUnsigned, dst) 455 DEFS(OpGetFromArguments, dst) 456 DEFS(OpGetArgument, dst) 457 DEFS(OpCreateRest, dst) 458 DEFS(OpGetRestLength, dst) 459 460 DEFS(OpCatch, exception, thrownValue) 461 516 462 case op_enter: { 517 463 for (unsigned i = codeBlock->numVars(); i--;) 518 functor( codeBlock, instruction, opcodeID, virtualRegisterForLocal(i).offset());464 functor(virtualRegisterForLocal(i)); 519 465 return; 520 466 } … … 522 468 } 523 469 470 #undef CALL_FUNCTOR 471 #undef USES_OR_DEFS 472 #undef USES 473 #undef DEFS 524 474 } // namespace JSC -
trunk/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
r237486 r237547 27 27 #include "CallLinkStatus.h" 28 28 29 #include "BytecodeStructs.h" 29 30 #include "CallLinkInfo.h" 30 31 #include "CodeBlock.h" … … 67 68 #endif 68 69 69 Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; 70 OpcodeID op = Interpreter::getOpcodeID(instruction[0].u.opcode); 71 if (op != op_call && op != op_construct && op != op_tail_call) 70 auto instruction = profiledBlock->instructions().at(bytecodeIndex); 71 OpcodeID op = instruction->opcodeID(); 72 73 LLIntCallLinkInfo* callLinkInfo; 74 switch (op) { 75 case op_call: 76 callLinkInfo = &instruction->as<OpCall>().metadata(profiledBlock).callLinkInfo; 77 break; 78 case op_construct: 79 callLinkInfo = &instruction->as<OpConstruct>().metadata(profiledBlock).callLinkInfo; 80 break; 81 case op_tail_call: 82 callLinkInfo = &instruction->as<OpTailCall>().metadata(profiledBlock).callLinkInfo; 83 break; 84 default: 72 85 return CallLinkStatus(); 73 74 LLIntCallLinkInfo* callLinkInfo = instruction[5].u.callLinkInfo;86 } 87 75 88 76 89 return CallLinkStatus(callLinkInfo->lastSeenCallee.get()); -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.cpp
r237486 r237547 39 39 #include "BytecodeUseDef.h" 40 40 #include "CallLinkStatus.h" 41 #include "CodeBlockInlines.h" 41 42 #include "CodeBlockSet.h" 42 43 #include "DFGCapabilities.h" … … 52 53 #include "GetPutInfo.h" 53 54 #include "InlineCallFrame.h" 55 #include "Instruction.h" 56 #include "InstructionStream.h" 54 57 #include "InterpreterInlines.h" 55 58 #include "IsoCellSetInlines.h" … … 69 72 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" 70 73 #include "LowLevelInterpreter.h" 74 #include "MetadataTable.h" 71 75 #include "ModuleProgramCodeBlock.h" 72 76 #include "ObjectAllocationProfileInlines.h" 77 #include "OpcodeInlines.h" 73 78 #include "PCToCodeOriginMap.h" 74 79 #include "PolymorphicAccess.h" … … 82 87 #include "TypeLocationCache.h" 83 88 #include "TypeProfiler.h" 84 #include "UnlinkedInstructionStream.h"85 89 #include "VMInlines.h" 86 90 #include <wtf/BagToHashMap.h> 87 91 #include <wtf/CommaPrinter.h> 92 #include <wtf/Forward.h> 88 93 #include <wtf/SimpleStats.h> 89 94 #include <wtf/StringPrintStream.h> … … 244 249 } 245 250 246 void CodeBlock::dumpBytecode(PrintStream& out, const Instruction * begin, const Instruction*& it, const ICStatusMap& statusMap)247 { 248 BytecodeDumper<CodeBlock>::dumpBytecode(this, out, begin,it, statusMap);251 void CodeBlock::dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& statusMap) 252 { 253 BytecodeDumper<CodeBlock>::dumpBytecode(this, out, it, statusMap); 249 254 } 250 255 251 256 void CodeBlock::dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& statusMap) 252 257 { 253 const Instruction* it = &instructions()[bytecodeOffset]; 254 dumpBytecode(out, instructions().begin(), it, statusMap); 255 } 256 257 #define FOR_EACH_MEMBER_VECTOR(macro) \ 258 macro(instructions) \ 259 macro(callLinkInfos) \ 260 macro(linkedCallerList) \ 261 macro(identifiers) \ 262 macro(functionExpressions) \ 263 macro(constantRegisters) 264 265 template<typename T> 266 static size_t sizeInBytes(const Vector<T>& vector) 267 { 268 return vector.capacity() * sizeof(T); 258 const auto it = instructions().at(bytecodeOffset); 259 dumpBytecode(out, it, statusMap); 269 260 } 270 261 … … 313 304 , m_ownerExecutable(*other.vm(), this, other.m_ownerExecutable.get()) 314 305 , m_poisonedVM(other.m_poisonedVM) 306 , m_instructionCount(other.m_instructionCount) 315 307 , m_instructions(other.m_instructions) 316 308 , m_thisRegister(other.m_thisRegister) … … 327 319 , m_optimizationDelayCounter(0) 328 320 , m_reoptimizationRetryCounter(0) 321 , m_metadata(other.m_metadata) 329 322 , m_creationTime(MonotonicTime::now()) 330 323 { … … 376 369 , m_ownerExecutable(*vm, this, ownerExecutable) 377 370 , m_poisonedVM(vm) 371 , m_instructions(&unlinkedCodeBlock->instructions()) 378 372 , m_thisRegister(unlinkedCodeBlock->thisRegister()) 379 373 , m_scopeRegister(unlinkedCodeBlock->scopeRegister()) … … 384 378 , m_optimizationDelayCounter(0) 385 379 , m_reoptimizationRetryCounter(0) 380 , m_metadata(unlinkedCodeBlock->metadata().link()) 386 381 , m_creationTime(MonotonicTime::now()) 387 382 { … … 496 491 } 497 492 498 // Allocate metadata buffers for the bytecode499 if (size_t size = unlinkedCodeBlock->numberOfLLintCallLinkInfos())500 m_llintCallLinkInfos = RefCountedArray<LLIntCallLinkInfo>(size);501 if (size_t size = unlinkedCodeBlock->numberOfArrayProfiles())502 m_arrayProfiles.grow(size);503 if (size_t size = unlinkedCodeBlock->numberOfArrayAllocationProfiles())504 m_arrayAllocationProfiles = RefCountedArray<ArrayAllocationProfile>(size);505 if (size_t size = unlinkedCodeBlock->numberOfValueProfiles())506 m_valueProfiles = RefCountedArray<ValueProfile>(size);507 if (!vm.canUseJIT())508 RELEASE_ASSERT(!m_valueProfiles.size());509 if (size_t size = unlinkedCodeBlock->numberOfObjectAllocationProfiles())510 m_objectAllocationProfiles = RefCountedArray<ObjectAllocationProfile>(size);511 512 493 #if !ENABLE(C_LOOP) 513 494 setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); 514 495 #endif 515 496 516 // Copy and translate the UnlinkedInstructions517 unsigned instructionCount = unlinkedCodeBlock->instructions().count();518 UnlinkedInstructionStream::Reader instructionReader(unlinkedCodeBlock->instructions());519 520 497 // Bookkeep the strongly referenced module environments. 521 498 HashSet<JSModuleEnvironment*> stronglyReferencedModuleEnvironments; 522 499 523 RefCountedArray<Instruction> instructions(instructionCount); 524 525 unsigned valueProfileCount = 0; 526 auto linkValueProfile = [&](unsigned bytecodeOffset, unsigned opLength) { 527 if (!vm.canUseJIT()) { 528 ASSERT(vm.noJITValueProfileSingleton); 529 instructions[bytecodeOffset + opLength - 1] = vm.noJITValueProfileSingleton.get(); 530 return; 531 } 532 533 unsigned valueProfileIndex = valueProfileCount++; 534 ValueProfile* profile = &m_valueProfiles[valueProfileIndex]; 535 ASSERT(profile->m_bytecodeOffset == -1); 536 profile->m_bytecodeOffset = bytecodeOffset; 537 instructions[bytecodeOffset + opLength - 1] = profile; 500 auto link_profile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) { 501 m_numberOfNonArgumentValueProfiles++; 502 metadata.profile.m_bytecodeOffset = instruction.offset(); 538 503 }; 539 504 540 for (unsigned i = 0; !instructionReader.atEnd(); ) { 541 const UnlinkedInstruction* pc = instructionReader.next(); 542 543 unsigned opLength = opcodeLength(pc[0].u.opcode); 544 545 instructions[i] = Interpreter::getOpcode(pc[0].u.opcode); 546 for (size_t j = 1; j < opLength; ++j) { 547 if (sizeof(int32_t) != sizeof(intptr_t)) 548 instructions[i + j].u.pointer = 0; 549 instructions[i + j].u.operand = pc[j].u.operand; 550 } 551 switch (pc[0].u.opcode) { 552 case op_has_indexed_property: { 553 int arrayProfileIndex = pc[opLength - 1].u.operand; 554 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); 555 556 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; 557 break; 558 } 559 case op_call_varargs: 560 case op_tail_call_varargs: 561 case op_tail_call_forward_arguments: 562 case op_construct_varargs: 563 case op_get_by_val: { 564 int arrayProfileIndex = pc[opLength - 2].u.operand; 565 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); 566 567 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; 568 FALLTHROUGH; 569 } 570 case op_get_direct_pname: 571 case op_get_by_id: 572 case op_get_by_id_with_this: 573 case op_try_get_by_id: 574 case op_get_by_id_direct: 575 case op_get_by_val_with_this: 576 case op_get_from_arguments: 577 case op_to_number: 578 case op_to_object: 579 case op_get_argument: { 580 linkValueProfile(i, opLength); 581 break; 582 } 583 584 case op_bitand: 585 case op_bitor: 586 case op_to_this: { 587 linkValueProfile(i, opLength); 588 break; 589 } 590 591 case op_in_by_val: 592 case op_put_by_val: 593 case op_put_by_val_direct: { 594 int arrayProfileIndex = pc[opLength - 1].u.operand; 595 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); 596 instructions[i + opLength - 1] = &m_arrayProfiles[arrayProfileIndex]; 597 break; 598 } 599 600 case op_new_array: 601 case op_new_array_with_size: 602 case op_new_array_buffer: { 603 unsigned arrayAllocationProfileIndex; 604 IndexingType recommendedIndexingType; 605 std::tie(arrayAllocationProfileIndex, recommendedIndexingType) = UnlinkedCodeBlock::decompressArrayAllocationProfile(pc[opLength - 1].u.operand); 606 607 ArrayAllocationProfile* profile = &m_arrayAllocationProfiles[arrayAllocationProfileIndex]; 608 if (pc[0].u.opcode == op_new_array_buffer) 609 profile->initializeIndexingMode(recommendedIndexingType); 610 instructions[i + opLength - 1] = profile; 611 break; 612 } 613 614 case op_new_object: { 615 int objectAllocationProfileIndex = pc[opLength - 1].u.operand; 616 ObjectAllocationProfile* objectAllocationProfile = &m_objectAllocationProfiles[objectAllocationProfileIndex]; 617 int inferredInlineCapacity = pc[opLength - 2].u.operand; 618 619 instructions[i + opLength - 1] = objectAllocationProfile; 620 objectAllocationProfile->initializeProfile(vm, 621 m_globalObject.get(), this, m_globalObject->objectPrototype(), inferredInlineCapacity); 622 break; 623 } 624 625 case op_call: 626 case op_tail_call: 627 case op_call_eval: { 628 linkValueProfile(i, opLength); 629 int arrayProfileIndex = pc[opLength - 2].u.operand; 630 m_arrayProfiles[arrayProfileIndex] = ArrayProfile(i); 631 instructions[i + opLength - 2] = &m_arrayProfiles[arrayProfileIndex]; 632 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; 633 break; 634 } 635 case op_construct: { 636 instructions[i + 5] = &m_llintCallLinkInfos[pc[5].u.operand]; 637 linkValueProfile(i, opLength); 638 break; 639 } 640 case op_get_array_length: 641 CRASH(); 505 auto link_arrayProfile = [&](const auto& instruction, auto /*bytecode*/, auto& metadata) { 506 metadata.arrayProfile.m_bytecodeOffset = instruction.offset(); 507 }; 508 509 auto link_objectAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { 510 metadata.objectAllocationProfile.initializeProfile(vm, m_globalObject.get(), this, m_globalObject->objectPrototype(), bytecode.inlineCapacity); 511 }; 512 513 auto link_arrayAllocationProfile = [&](const auto& /*instruction*/, auto bytecode, auto& metadata) { 514 metadata.arrayAllocationProfile.initializeIndexingMode(bytecode.recommendedIndexingType); 515 }; 516 517 auto link_hitCountForLLIntCaching = [&](const auto& /*instruction*/, auto /*bytecode*/, auto& metadata) { 518 metadata.hitCountForLLIntCaching = Options::prototypeHitCountForLLIntCaching(); 519 }; 520 521 #define LINK_FIELD(__field) \ 522 WTF_LAZY_JOIN(link_, __field)(instruction, bytecode, metadata); 523 524 #define INITIALIZE_METADATA(__op) \ 525 auto bytecode = instruction->as<__op>(); \ 526 auto& metadata = bytecode.metadata(this); \ 527 new (&metadata) __op::Metadata { bytecode }; \ 528 529 #define CASE(__op) case __op::opcodeID 530 531 #define LINK(...) \ 532 CASE(WTF_LAZY_FIRST(__VA_ARGS__)): { \ 533 INITIALIZE_METADATA(WTF_LAZY_FIRST(__VA_ARGS__)) \ 534 WTF_LAZY_HAS_REST(__VA_ARGS__)({ \ 535 WTF_LAZY_FOR_EACH_TERM(LINK_FIELD, WTF_LAZY_REST_(__VA_ARGS__)) \ 536 }) \ 537 break; \ 538 } 539 540 for (const auto& instruction : *m_instructions) { 541 OpcodeID opcodeID = instruction->opcodeID(); 542 m_instructionCount += opcodeLengths[opcodeID]; 543 switch (opcodeID) { 544 LINK(OpHasIndexedProperty, arrayProfile) 545 546 LINK(OpCallVarargs, arrayProfile, profile) 547 LINK(OpTailCallVarargs, arrayProfile, profile) 548 LINK(OpTailCallForwardArguments, arrayProfile, profile) 549 LINK(OpConstructVarargs, arrayProfile, profile) 550 LINK(OpGetByVal, arrayProfile, profile) 551 552 LINK(OpGetDirectPname, profile) 553 LINK(OpGetByIdWithThis, profile) 554 LINK(OpTryGetById, profile) 555 LINK(OpGetByIdDirect, profile) 556 LINK(OpGetByValWithThis, profile) 557 LINK(OpGetFromArguments, profile) 558 LINK(OpToNumber, profile) 559 LINK(OpToObject, profile) 560 LINK(OpGetArgument, profile) 561 LINK(OpToThis, profile) 562 LINK(OpBitand, profile) 563 LINK(OpBitor, profile) 564 565 LINK(OpGetById, profile, hitCountForLLIntCaching) 566 567 LINK(OpCall, profile, arrayProfile) 568 LINK(OpTailCall, profile, arrayProfile) 569 LINK(OpCallEval, profile, arrayProfile) 570 LINK(OpConstruct, profile, arrayProfile) 571 572 LINK(OpInByVal, arrayProfile) 573 LINK(OpPutByVal, arrayProfile) 574 LINK(OpPutByValDirect, arrayProfile) 575 576 LINK(OpNewArray) 577 LINK(OpNewArrayWithSize) 578 LINK(OpNewArrayBuffer, arrayAllocationProfile) 579 580 LINK(OpNewObject, objectAllocationProfile) 581 582 LINK(OpPutById) 583 LINK(OpCreateThis) 584 585 LINK(OpAdd) 586 LINK(OpMul) 587 LINK(OpDiv) 588 LINK(OpSub) 589 LINK(OpBitxor) 590 591 LINK(OpNegate) 592 593 LINK(OpJneqPtr) 594 595 LINK(OpCatch) 596 LINK(OpProfileControlFlow) 642 597 643 598 case op_resolve_scope: { 644 const Identifier& ident = identifier(pc[3].u.operand);645 ResolveType type = static_cast<ResolveType>(pc[4].u.operand); 646 RELEASE_ASSERT(type != LocalClosureVar);647 int localScopeDepth = pc[5].u.operand;648 649 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);599 INITIALIZE_METADATA(OpResolveScope) 600 601 const Identifier& ident = identifier(bytecode.var); 602 RELEASE_ASSERT(bytecode.resolveType != LocalClosureVar); 603 604 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.localScopeDepth, scope, ident, Get, bytecode.resolveType, InitializationMode::NotInitialization); 650 605 RETURN_IF_EXCEPTION(throwScope, false); 651 606 652 instructions[i + 4].u.operand= op.type;653 instructions[i + 5].u.operand= op.depth;607 metadata.resolveType = op.type; 608 metadata.localScopeDepth = op.depth; 654 609 if (op.lexicalEnvironment) { 655 610 if (op.type == ModuleVar) { … … 657 612 if (stronglyReferencedModuleEnvironments.add(jsCast<JSModuleEnvironment*>(op.lexicalEnvironment)).isNewEntry) 658 613 addConstant(op.lexicalEnvironment); 659 instructions[i + 6].u.jsCell.set(vm, this, op.lexicalEnvironment);614 metadata.lexicalEnvironment.set(vm, this, op.lexicalEnvironment); 660 615 } else 661 instructions[i + 6].u.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable());616 metadata.symbolTable.set(vm, this, op.lexicalEnvironment->symbolTable()); 662 617 } else if (JSScope* constantScope = JSScope::constantScopeForCodeBlock(op.type, this)) 663 instructions[i + 6].u.jsCell.set(vm, this, constantScope);618 metadata.constantScope.set(vm, this, constantScope); 664 619 else 665 instructions[i + 6].u.pointer= nullptr;620 metadata.globalObject = nullptr; 666 621 break; 667 622 } 668 623 669 624 case op_get_from_scope: { 670 linkValueProfile(i, opLength); 671 672 // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand 673 674 int localScopeDepth = pc[5].u.operand; 675 instructions[i + 5].u.pointer = nullptr; 676 677 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); 678 ASSERT(!isInitialization(getPutInfo.initializationMode())); 679 if (getPutInfo.resolveType() == LocalClosureVar) { 680 instructions[i + 4] = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand(); 625 INITIALIZE_METADATA(OpGetFromScope) 626 627 link_profile(instruction, bytecode, metadata); 628 metadata.watchpointSet = nullptr; 629 630 ASSERT(!isInitialization(bytecode.getPutInfo.initializationMode())); 631 if (bytecode.getPutInfo.resolveType() == LocalClosureVar) { 632 metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), ClosureVar, bytecode.getPutInfo.initializationMode()); 681 633 break; 682 634 } 683 635 684 const Identifier& ident = identifier( pc[3].u.operand);685 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get,getPutInfo.resolveType(), InitializationMode::NotInitialization);636 const Identifier& ident = identifier(bytecode.var); 637 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.localScopeDepth, scope, ident, Get, bytecode.getPutInfo.resolveType(), InitializationMode::NotInitialization); 686 638 RETURN_IF_EXCEPTION(throwScope, false); 687 639 688 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();640 metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), op.type, bytecode.getPutInfo.initializationMode()); 689 641 if (op.type == ModuleVar) 690 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), ClosureVar, getPutInfo.initializationMode()).operand();642 metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), ClosureVar, bytecode.getPutInfo.initializationMode()); 691 643 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) 692 instructions[i + 5].u.watchpointSet = op.watchpointSet;644 metadata.watchpointSet = op.watchpointSet; 693 645 else if (op.structure) 694 instructions[i + 5].u.structure.set(vm, this, op.structure);695 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand);646 metadata.structure.set(vm, this, op.structure); 647 metadata.operand = op.operand; 696 648 break; 697 649 } 698 650 699 651 case op_put_to_scope: { 700 // put_to_scope scope, id, value, GetPutInfo, Structure, Operand701 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); 702 if ( getPutInfo.resolveType() == LocalClosureVar) {652 INITIALIZE_METADATA(OpPutToScope) 653 654 if (bytecode.getPutInfo.resolveType() == LocalClosureVar) { 703 655 // Only do watching if the property we're putting to is not anonymous. 704 if (static_cast<unsigned>(pc[2].u.operand) != UINT_MAX) { 705 int symbolTableIndex = pc[5].u.operand; 706 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); 707 const Identifier& ident = identifier(pc[2].u.operand); 656 if (bytecode.var != UINT_MAX) { 657 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(bytecode.symbolTableOrScopeDepth)); 658 const Identifier& ident = identifier(bytecode.var); 708 659 ConcurrentJSLocker locker(symbolTable->m_lock); 709 660 auto iter = symbolTable->find(locker, ident.impl()); 710 661 ASSERT(iter != symbolTable->end(locker)); 711 662 iter->value.prepareToWatch(); 712 instructions[i + 5].u.watchpointSet = iter->value.watchpointSet();663 metadata.watchpointSet = iter->value.watchpointSet(); 713 664 } else 714 instructions[i + 5].u.watchpointSet = nullptr;665 metadata.watchpointSet = nullptr; 715 666 break; 716 667 } 717 668 718 const Identifier& ident = identifier(pc[2].u.operand); 719 int localScopeDepth = pc[5].u.operand; 720 instructions[i + 5].u.pointer = nullptr; 721 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Put, getPutInfo.resolveType(), getPutInfo.initializationMode()); 669 const Identifier& ident = identifier(bytecode.var); 670 metadata.watchpointSet = nullptr; 671 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), bytecode.symbolTableOrScopeDepth, scope, ident, Put, bytecode.getPutInfo.resolveType(), bytecode.getPutInfo.initializationMode()); 722 672 RETURN_IF_EXCEPTION(throwScope, false); 723 673 724 instructions[i + 4].u.operand = GetPutInfo(getPutInfo.resolveMode(), op.type, getPutInfo.initializationMode()).operand();674 metadata.getPutInfo = GetPutInfo(bytecode.getPutInfo.resolveMode(), op.type, bytecode.getPutInfo.initializationMode()); 725 675 if (op.type == GlobalVar || op.type == GlobalVarWithVarInjectionChecks || op.type == GlobalLexicalVar || op.type == GlobalLexicalVarWithVarInjectionChecks) 726 instructions[i + 5].u.watchpointSet = op.watchpointSet;676 metadata.watchpointSet = op.watchpointSet; 727 677 else if (op.type == ClosureVar || op.type == ClosureVarWithVarInjectionChecks) { 728 678 if (op.watchpointSet) 729 679 op.watchpointSet->invalidate(vm, PutToScopeFireDetail(this, ident)); 730 680 } else if (op.structure) 731 instructions[i + 5].u.structure.set(vm, this, op.structure); 732 instructions[i + 6].u.pointer = reinterpret_cast<void*>(op.operand); 733 681 metadata.structure.set(vm, this, op.structure); 682 metadata.operand = op.operand; 734 683 break; 735 684 } … … 737 686 case op_profile_type: { 738 687 RELEASE_ASSERT(vm.typeProfiler()); 739 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? 740 size_t instructionOffset = i + opLength - 1; 688 689 INITIALIZE_METADATA(OpProfileType) 690 691 size_t instructionOffset = instruction.offset() + instruction->size() - 1; 741 692 unsigned divotStart, divotEnd; 742 693 GlobalVariableID globalVariableID = 0; 743 694 RefPtr<TypeSet> globalTypeSet; 744 695 bool shouldAnalyze = m_unlinkedCode->typeProfilerExpressionInfoForBytecodeOffset(instructionOffset, divotStart, divotEnd); 745 VirtualRegister profileRegister(pc[1].u.operand);746 ProfileTypeBytecodeFlag flag = static_cast<ProfileTypeBytecodeFlag>(pc[3].u.operand);747 696 SymbolTable* symbolTable = nullptr; 748 697 749 switch ( flag) {698 switch (bytecode.flag) { 750 699 case ProfileTypeBytecodeClosureVar: { 751 const Identifier& ident = identifier(pc[4].u.operand); 752 int localScopeDepth = pc[2].u.operand; 753 ResolveType type = static_cast<ResolveType>(pc[5].u.operand); 700 const Identifier& ident = identifier(bytecode.identifier); 701 unsigned localScopeDepth = bytecode.symbolTableOrScopeDepth; 754 702 // Even though type profiling may be profiling either a Get or a Put, we can always claim a Get because 755 703 // we're abstractly "read"ing from a JSScope. 756 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, type, InitializationMode::NotInitialization);704 ResolveOp op = JSScope::abstractResolve(m_globalObject->globalExec(), localScopeDepth, scope, ident, Get, bytecode.resolveType, InitializationMode::NotInitialization); 757 705 RETURN_IF_EXCEPTION(throwScope, false); 758 706 … … 775 723 } 776 724 case ProfileTypeBytecodeLocallyResolved: { 777 int symbolTableIndex = pc[2].u.operand;725 int symbolTableIndex = bytecode.symbolTableOrScopeDepth; 778 726 SymbolTable* symbolTable = jsCast<SymbolTable*>(getConstant(symbolTableIndex)); 779 const Identifier& ident = identifier( pc[4].u.operand);727 const Identifier& ident = identifier(bytecode.identifier); 780 728 ConcurrentJSLocker locker(symbolTable->m_lock); 781 729 // If our parent scope was created while profiling was disabled, it will not have prepared for profiling yet. … … 812 760 bool isNewLocation = locationPair.second; 813 761 814 if ( flag == ProfileTypeBytecodeFunctionReturnStatement)762 if (bytecode.flag == ProfileTypeBytecodeFunctionReturnStatement) 815 763 location->m_divotForFunctionOffsetIfReturnStatement = ownerExecutable->typeProfilingStartOffset(); 816 764 … … 818 766 vm.typeProfiler()->insertNewLocation(location); 819 767 820 instructions[i + 2].u.location = location;768 metadata.typeLocation = location; 821 769 break; 822 770 } 823 771 824 772 case op_debug: { 825 if ( pc[1].u.unsignedValue == DidReachBreakpoint)773 if (instruction->as<OpDebug>().debugHookType == DidReachBreakpoint) 826 774 m_hasDebuggerStatement = true; 827 775 break; … … 829 777 830 778 case op_create_rest: { 831 int numberOfArgumentsToSkip = instruction s[i + 3].u.operand;779 int numberOfArgumentsToSkip = instruction->as<OpCreateRest>().numParametersToSkip; 832 780 ASSERT_UNUSED(numberOfArgumentsToSkip, numberOfArgumentsToSkip >= 0); 833 781 // This is used when rematerializing the rest parameter during OSR exit in the FTL JIT."); … … 839 787 break; 840 788 } 841 842 i += opLength; 843 } 789 } 790 791 #undef CASE 792 #undef INITIALIZE_METADATA 793 #undef LINK_FIELD 794 #undef LINK 844 795 845 796 if (vm.controlFlowProfiler()) 846 insertBasicBlockBoundariesForControlFlowProfiler(instructions); 847 848 m_instructions = WTFMove(instructions); 797 insertBasicBlockBoundariesForControlFlowProfiler(); 849 798 850 799 // Set optimization thresholds only after m_instructions is initialized, since these … … 862 811 dumpBytecode(); 863 812 864 heap()->reportExtraMemoryAllocated(m_ instructions.size() * sizeof(Instruction));813 heap()->reportExtraMemoryAllocated(m_metadata->sizeInBytes()); 865 814 866 815 return true; … … 1003 952 { 1004 953 CodeBlock* thisObject = jsCast<CodeBlock*>(cell); 1005 size_t extraMemoryAllocated = thisObject->m_ instructions.size() * sizeof(Instruction);954 size_t extraMemoryAllocated = thisObject->m_metadata->sizeInBytes(); 1006 955 if (thisObject->m_jitCode) 1007 956 extraMemoryAllocated += thisObject->m_jitCode->size(); … … 1024 973 visitor.appendUnbarriered(otherBlock); 1025 974 975 size_t extraMemory = m_metadata->sizeInBytes(); 1026 976 if (m_jitCode) 1027 visitor.reportExtraMemoryVisited(m_jitCode->size()); 1028 if (m_instructions.size()) { 1029 unsigned refCount = m_instructions.refCount(); 1030 if (!refCount) { 1031 dataLog("CodeBlock: ", RawPointer(this), "\n"); 1032 dataLog("m_instructions.data(): ", RawPointer(m_instructions.data()), "\n"); 1033 dataLog("refCount: ", refCount, "\n"); 1034 RELEASE_ASSERT_NOT_REACHED(); 1035 } 1036 visitor.reportExtraMemoryVisited(m_instructions.size() * sizeof(Instruction) / refCount); 1037 } 977 extraMemory = m_jitCode->size(); 978 visitor.reportExtraMemoryVisited(extraMemory); 1038 979 1039 980 stronglyVisitStrongReferences(locker, visitor); … … 1134 1075 1135 1076 if (jitType() == JITCode::InterpreterThunk) { 1136 const Vector< unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions();1077 const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); 1137 1078 for (size_t i = 0; i < propertyAccessInstructions.size(); ++i) { 1138 Instruction* instruction = &instructions()[propertyAccessInstructions[i]];1139 switch (Interpreter::getOpcodeID(instruction[0])) {1140 case op_put_by_id: {1141 StructureID oldStructureID = instruction[4].u.structureID;1142 StructureID newStructureID = instruction[6].u.structureID;1079 auto instruction = m_instructions->at(propertyAccessInstructions[i]); 1080 if (instruction->is<OpPutById>()) { 1081 auto& metadata = instruction->as<OpPutById>().metadata(this); 1082 StructureID oldStructureID = metadata.oldStructure; 1083 StructureID newStructureID = metadata.newStructure; 1143 1084 if (!oldStructureID || !newStructureID) 1144 break;1085 continue; 1145 1086 Structure* oldStructure = 1146 1087 vm.heap.structureIDTable().get(oldStructureID); … … 1149 1090 if (Heap::isMarked(oldStructure)) 1150 1091 visitor.appendUnbarriered(newStructure); 1151 break; 1152 } 1153 default: 1154 break; 1092 continue; 1155 1093 } 1156 1094 } … … 1248 1186 } 1249 1187 1250 void CodeBlock::clearLLIntGetByIdCache(Instruction* instruction)1251 {1252 instruction[0].u.opcode = LLInt::getOpcode(op_get_by_id);1253 instruction[4].u.pointer = nullptr;1254 instruction[5].u.pointer = nullptr;1255 instruction[6].u.pointer = nullptr;1256 }1257 1258 1188 void CodeBlock::finalizeLLIntInlineCaches() 1259 1189 { 1260 1190 VM& vm = *m_poisonedVM; 1261 const Vector<unsigned>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); 1191 const Vector<InstructionStream::Offset>& propertyAccessInstructions = m_unlinkedCode->propertyAccessInstructions(); 1192 1193 auto handleGetPutFromScope = [](auto& metadata) { 1194 GetPutInfo getPutInfo = metadata.getPutInfo; 1195 if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 1196 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) 1197 return; 1198 WriteBarrierBase<Structure>& structure = metadata.structure; 1199 if (!structure || Heap::isMarked(structure.get())) 1200 return; 1201 if (Options::verboseOSR()) 1202 dataLogF("Clearing scope access with structure %p.\n", structure.get()); 1203 structure.clear(); 1204 }; 1205 1262 1206 for (size_t size = propertyAccessInstructions.size(), i = 0; i < size; ++i) { 1263 Instruction* curInstruction = &instructions()[propertyAccessInstructions[i]];1264 switch ( Interpreter::getOpcodeID(curInstruction[0])) {1207 const auto curInstruction = m_instructions->at(propertyAccessInstructions[i]); 1208 switch (curInstruction->opcodeID()) { 1265 1209 case op_get_by_id: { 1266 StructureID oldStructureID = curInstruction[4].u.structureID; 1210 auto& metadata = curInstruction->as<OpGetById>().metadata(this); 1211 if (metadata.mode != GetByIdMode::Default) 1212 break; 1213 StructureID oldStructureID = metadata.modeMetadata.defaultMode.structure; 1267 1214 if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) 1268 1215 break; 1269 1216 if (Options::verboseOSR()) 1270 1217 dataLogF("Clearing LLInt property access.\n"); 1271 clearLLIntGetByIdCache(curInstruction);1218 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(metadata); 1272 1219 break; 1273 1220 } 1274 1221 case op_get_by_id_direct: { 1275 StructureID oldStructureID = curInstruction[4].u.structureID; 1222 auto& metadata = curInstruction->as<OpGetByIdDirect>().metadata(this); 1223 StructureID oldStructureID = metadata.structure; 1276 1224 if (!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) 1277 1225 break; 1278 1226 if (Options::verboseOSR()) 1279 1227 dataLogF("Clearing LLInt property access.\n"); 1280 curInstruction[4].u.pointer = nullptr;1281 curInstruction[5].u.pointer = nullptr;1228 metadata.structure = 0; 1229 metadata.offset = 0; 1282 1230 break; 1283 1231 } 1284 1232 case op_put_by_id: { 1285 StructureID oldStructureID = curInstruction[4].u.structureID; 1286 StructureID newStructureID = curInstruction[6].u.structureID; 1287 StructureChain* chain = curInstruction[7].u.structureChain.get(); 1233 auto& metadata = curInstruction->as<OpPutById>().metadata(this); 1234 StructureID oldStructureID = metadata.oldStructure; 1235 StructureID newStructureID = metadata.newStructure; 1236 StructureChain* chain = metadata.structureChain.get(); 1288 1237 if ((!oldStructureID || Heap::isMarked(vm.heap.structureIDTable().get(oldStructureID))) 1289 1238 && (!newStructureID || Heap::isMarked(vm.heap.structureIDTable().get(newStructureID))) … … 1292 1241 if (Options::verboseOSR()) 1293 1242 dataLogF("Clearing LLInt put transition.\n"); 1294 curInstruction[4].u.structureID= 0;1295 curInstruction[5].u.operand= 0;1296 curInstruction[6].u.structureID= 0;1297 curInstruction[7].u.structureChain.clear();1243 metadata.oldStructure = 0; 1244 metadata.offset = 0; 1245 metadata.newStructure = 0; 1246 metadata.structureChain.clear(); 1298 1247 break; 1299 1248 } … … 1302 1251 case op_resolve_scope_for_hoisting_func_decl_in_eval: 1303 1252 break; 1304 case op_get_by_id_proto_load: 1305 case op_get_by_id_unset: 1306 case op_get_array_length: 1307 break; 1308 case op_to_this: 1309 if (!curInstruction[2].u.structure || Heap::isMarked(curInstruction[2].u.structure.get())) 1253 case op_to_this: { 1254 auto& metadata = curInstruction->as<OpToThis>().metadata(this); 1255 if (!metadata.cachedStructure || Heap::isMarked(metadata.cachedStructure.get())) 1310 1256 break; 1311 1257 if (Options::verboseOSR()) 1312 dataLogF("Clearing LLInt to_this with structure %p.\n", curInstruction[2].u.structure.get()); 1313 curInstruction[2].u.structure.clear(); 1314 curInstruction[3].u.toThisStatus = merge( 1315 curInstruction[3].u.toThisStatus, ToThisClearedByGC); 1258 dataLogF("Clearing LLInt to_this with structure %p.\n", metadata.cachedStructure.get()); 1259 metadata.cachedStructure.clear(); 1260 metadata.toThisStatus = merge(metadata.toThisStatus, ToThisClearedByGC); 1316 1261 break; 1262 } 1317 1263 case op_create_this: { 1318 auto& cacheWriteBarrier = curInstruction[4].u.jsCell; 1264 auto& metadata = curInstruction->as<OpCreateThis>().metadata(this); 1265 auto& cacheWriteBarrier = metadata.cachedCallee; 1319 1266 if (!cacheWriteBarrier || cacheWriteBarrier.unvalidatedGet() == JSCell::seenMultipleCalleeObjects()) 1320 1267 break; … … 1331 1278 // are for outer functions, and we refer to those functions strongly, and they refer 1332 1279 // to the symbol table strongly. But it's nice to be on the safe side. 1333 WriteBarrierBase<SymbolTable>& symbolTable = curInstruction[6].u.symbolTable; 1280 auto metadata = curInstruction->as<OpResolveScope>().metadata(this); 1281 WriteBarrierBase<SymbolTable>& symbolTable = metadata.symbolTable; 1334 1282 if (!symbolTable || Heap::isMarked(symbolTable.get())) 1335 1283 break; … … 1340 1288 } 1341 1289 case op_get_from_scope: 1342 case op_put_to_scope: { 1343 GetPutInfo getPutInfo = GetPutInfo(curInstruction[4].u.operand); 1344 if (getPutInfo.resolveType() == GlobalVar || getPutInfo.resolveType() == GlobalVarWithVarInjectionChecks 1345 || getPutInfo.resolveType() == LocalClosureVar || getPutInfo.resolveType() == GlobalLexicalVar || getPutInfo.resolveType() == GlobalLexicalVarWithVarInjectionChecks) 1346 continue; 1347 WriteBarrierBase<Structure>& structure = curInstruction[5].u.structure; 1348 if (!structure || Heap::isMarked(structure.get())) 1349 break; 1350 if (Options::verboseOSR()) 1351 dataLogF("Clearing scope access with structure %p.\n", structure.get()); 1352 structure.clear(); 1290 handleGetPutFromScope(curInstruction->as<OpGetFromScope>().metadata(this)); 1353 1291 break; 1354 } 1292 case op_put_to_scope: 1293 handleGetPutFromScope(curInstruction->as<OpPutToScope>().metadata(this)); 1294 break; 1355 1295 default: 1356 OpcodeID opcodeID = Interpreter::getOpcodeID(curInstruction[0]);1296 OpcodeID opcodeID = curInstruction->opcodeID(); 1357 1297 ASSERT_WITH_MESSAGE_UNUSED(opcodeID, false, "Unhandled opcode in CodeBlock::finalizeUnconditionally, %s(%d) at bc %u", opcodeNames[opcodeID], opcodeID, propertyAccessInstructions[i]); 1358 1298 } … … 1363 1303 m_llintGetByIdWatchpointMap.removeIf([&] (const StructureWatchpointMap::KeyValuePairType& pair) -> bool { 1364 1304 auto clear = [&] () { 1365 Instruction* instruction = std::get<1>(pair.key);1366 OpcodeID opcode = Interpreter::getOpcodeID(*instruction);1367 if (opcode == op_get_by_id _proto_load || opcode == op_get_by_id_unset) {1305 const Instruction* instruction = std::get<1>(pair.key); 1306 OpcodeID opcode = instruction->opcodeID(); 1307 if (opcode == op_get_by_id) { 1368 1308 if (Options::verboseOSR()) 1369 1309 dataLogF("Clearing LLInt property access.\n"); 1370 clearLLIntGetByIdCache(instruction);1310 LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(instruction->as<OpGetById>().metadata(this)); 1371 1311 } 1372 1312 return true; … … 1384 1324 }); 1385 1325 1386 for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {1387 if ( m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {1326 forEachLLIntCallLinkInfo([&](LLIntCallLinkInfo& callLinkInfo) { 1327 if (callLinkInfo.isLinked() && !Heap::isMarked(callLinkInfo.callee.get())) { 1388 1328 if (Options::verboseOSR()) 1389 1329 dataLog("Clearing LLInt call from ", *this, "\n"); 1390 m_llintCallLinkInfos[i].unlink();1391 } 1392 if (!! m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))1393 m_llintCallLinkInfos[i].lastSeenCallee.clear();1394 } 1330 callLinkInfo.unlink(); 1331 } 1332 if (!!callLinkInfo.lastSeenCallee && !Heap::isMarked(callLinkInfo.lastSeenCallee.get())) 1333 callLinkInfo.lastSeenCallee.clear(); 1334 }); 1395 1335 } 1396 1336 … … 1470 1410 } 1471 1411 1472 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, Instruction* instruction)1412 JITAddIC* CodeBlock::addJITAddIC(ArithProfile* arithProfile, const Instruction* instruction) 1473 1413 { 1474 1414 return m_addICs.add(arithProfile, instruction); 1475 1415 } 1476 1416 1477 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, Instruction* instruction)1417 JITMulIC* CodeBlock::addJITMulIC(ArithProfile* arithProfile, const Instruction* instruction) 1478 1418 { 1479 1419 return m_mulICs.add(arithProfile, instruction); 1480 1420 } 1481 1421 1482 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, Instruction* instruction)1422 JITSubIC* CodeBlock::addJITSubIC(ArithProfile* arithProfile, const Instruction* instruction) 1483 1423 { 1484 1424 return m_subICs.add(arithProfile, instruction); 1485 1425 } 1486 1426 1487 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, Instruction* instruction)1427 JITNegIC* CodeBlock::addJITNegIC(ArithProfile* arithProfile, const Instruction* instruction) 1488 1428 { 1489 1429 return m_negICs.add(arithProfile, instruction); … … 1573 1513 for (auto& functionDecl : m_functionDecls) 1574 1514 visitor.append(functionDecl); 1575 for (auto& objectAllocationProfile : m_objectAllocationProfiles)1515 forEachObjectAllocationProfile([&](ObjectAllocationProfile& objectAllocationProfile) { 1576 1516 objectAllocationProfile.visitAggregate(visitor); 1517 }); 1577 1518 1578 1519 #if ENABLE(JIT) … … 1700 1641 } 1701 1642 1702 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(unsigned bytecodeOffset) 1703 { 1704 ASSERT(Interpreter::getOpcodeID(m_instructions[bytecodeOffset]) == op_catch); 1643 1644 1645 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset) 1646 { 1647 auto instruction = m_instructions->at(bytecodeOffset); 1648 OpCatch op = instruction->as<OpCatch>(); 1649 auto& metadata = op.metadata(this); 1650 if (!!metadata.buffer) { 1651 #if !ASSERT_DISABLED 1652 ConcurrentJSLocker locker(m_lock); 1653 bool found = false; 1654 for (auto& profile : m_catchProfiles) { 1655 if (profile.get() == metadata.buffer) { 1656 found = true; 1657 break; 1658 } 1659 } 1660 ASSERT(found); 1661 #endif 1662 return; 1663 } 1664 1665 ensureCatchLivenessIsComputedForBytecodeOffsetSlow(op, bytecodeOffset); 1666 } 1667 1668 void CodeBlock::ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch& op, InstructionStream::Offset bytecodeOffset) 1669 { 1705 1670 BytecodeLivenessAnalysis& bytecodeLiveness = livenessAnalysis(); 1706 1671 … … 1709 1674 // we can avoid profiling them and extracting them when doing OSR entry 1710 1675 // into the DFG. 1711 FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, bytecodeOffset + OPCODE_LENGTH(op_catch)); 1676 1677 auto nextOffset = m_instructions->at(bytecodeOffset).next().offset(); 1678 FastBitVector liveLocals = bytecodeLiveness.getLivenessInfoAtBytecodeOffset(this, nextOffset); 1712 1679 Vector<VirtualRegister> liveOperands; 1713 1680 liveOperands.reserveInitialCapacity(liveLocals.bitCount()); … … 1729 1696 WTF::storeStoreFence(); 1730 1697 1731 m_instructions[bytecodeOffset + 3].u.pointer = profiles.get();1698 op.metadata(this).buffer = profiles.get(); 1732 1699 1733 1700 { … … 1780 1747 bool CodeBlock::hasOpDebugForLineAndColumn(unsigned line, unsigned column) 1781 1748 { 1782 const Instruction* begin = instructions().begin(); 1783 const Instruction* end = instructions().end(); 1784 for (const Instruction* it = begin; it != end;) { 1785 OpcodeID opcodeID = Interpreter::getOpcodeID(*it); 1786 if (opcodeID == op_debug) { 1787 unsigned bytecodeOffset = it - begin; 1749 for (const auto& it : *m_instructions) { 1750 if (it->is<OpDebug>()) { 1788 1751 int unused; 1789 1752 unsigned opDebugLine; 1790 1753 unsigned opDebugColumn; 1791 expressionRangeForBytecodeOffset( bytecodeOffset, unused, unused, unused, opDebugLine, opDebugColumn);1754 expressionRangeForBytecodeOffset(it.offset(), unused, unused, unused, opDebugLine, opDebugColumn); 1792 1755 if (line == opDebugLine && (column == Breakpoint::unspecifiedColumn || column == opDebugColumn)) 1793 1756 return true; 1794 1757 } 1795 it += opcodeLengths[opcodeID];1796 1758 } 1797 1759 return false; … … 2508 2470 ArrayProfile* CodeBlock::getArrayProfile(const ConcurrentJSLocker&, unsigned bytecodeOffset) 2509 2471 { 2472 auto instruction = m_instructions->at(bytecodeOffset); 2473 switch (instruction->opcodeID()) { 2474 #define CASE(Op) \ 2475 case Op::opcodeID: \ 2476 return &instruction->as<Op>().metadata(this).arrayProfile; 2477 2478 FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(CASE) 2479 #undef CASE 2480 2481 case OpGetById::opcodeID: { 2482 auto bytecode = instruction->as<OpGetById>(); 2483 auto& metadata = bytecode.metadata(this); 2484 if (metadata.mode == GetByIdMode::ArrayLength) 2485 return &metadata.modeMetadata.arrayLengthMode.arrayProfile; 2486 break; 2487 } 2488 2489 default: 2490 break; 2491 } 2492 2510 2493 for (auto& m_arrayProfile : m_arrayProfiles) { 2511 2494 if (m_arrayProfile.bytecodeOffset() == bytecodeOffset) … … 2546 2529 return getOrAddArrayProfile(locker, bytecodeOffset); 2547 2530 } 2531 2548 2532 2549 2533 #if ENABLE(DFG_JIT) … … 2613 2597 ConcurrentJSLocker locker(m_lock); 2614 2598 2615 for (unsigned i = m_arrayProfiles.size(); i--;) 2616 m_arrayProfiles[i].computeUpdatedPrediction(locker, this); 2617 2618 // Don't count these either, for similar reasons. 2619 for (unsigned i = m_arrayAllocationProfiles.size(); i--;) 2620 m_arrayAllocationProfiles[i].updateProfile(); 2599 forEachArrayProfile([&](ArrayProfile& profile) { 2600 profile.computeUpdatedPrediction(locker, this); 2601 }); 2602 2603 forEachArrayAllocationProfile([&](ArrayAllocationProfile& profile) { 2604 profile.updateProfile(); 2605 }); 2621 2606 } 2622 2607 … … 2772 2757 return 0; 2773 2758 2774 double doubleResult = multiplier * m_instructions.size();2759 double doubleResult = multiplier * instructionCount(); 2775 2760 2776 2761 // Be even more paranoid: silently reject values that won't fit into a size_t. If … … 2811 2796 ValueProfile* CodeBlock::tryGetValueProfileForBytecodeOffset(int bytecodeOffset) 2812 2797 { 2813 return tryBinarySearch<ValueProfile, int>( 2814 m_valueProfiles, m_valueProfiles.size(), bytecodeOffset, 2815 getValueProfileBytecodeOffset<ValueProfile>); 2798 auto instruction = m_instructions->at(bytecodeOffset); 2799 switch (instruction->opcodeID()) { 2800 2801 #define CASE(Op) \ 2802 case Op::opcodeID: \ 2803 return &instruction->as<Op>().metadata(this).profile; 2804 2805 FOR_EACH_OPCODE_WITH_VALUE_PROFILE(CASE) 2806 2807 #undef CASE 2808 2809 default: 2810 return nullptr; 2811 2812 } 2813 } 2814 2815 SpeculatedType CodeBlock::valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) 2816 { 2817 if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset)) 2818 return valueProfile->computeUpdatedPrediction(locker); 2819 return SpecNone; 2816 2820 } 2817 2821 2818 2822 ValueProfile& CodeBlock::valueProfileForBytecodeOffset(int bytecodeOffset) 2819 2823 { 2820 OpcodeID opcodeID = Interpreter::getOpcodeID(instructions()[bytecodeOffset]); 2821 unsigned length = opcodeLength(opcodeID); 2822 ASSERT(!!tryGetValueProfileForBytecodeOffset(bytecodeOffset)); 2823 return *instructions()[bytecodeOffset + length - 1].u.profile; 2824 return *tryGetValueProfileForBytecodeOffset(bytecodeOffset); 2824 2825 } 2825 2826 … … 2848 2849 } 2849 2850 } 2850 2851 for (unsigned i = 0; i + 1 < numberOfValueProfiles(); ++i) {2852 if (valueProfile(i).m_bytecodeOffset > valueProfile(i + 1).m_bytecodeOffset) {2853 beginValidationDidFail();2854 dataLog(" Value profiles are not sorted.\n");2855 endValidationDidFail();2856 }2857 }2858 2851 2859 for ( unsigned bytecodeOffset = 0; bytecodeOffset < m_instructions.size();) {2860 OpcodeID opcode = Interpreter::getOpcodeID(m_instructions[bytecodeOffset]);2861 if (!!baselineAlternative()->handlerForBytecodeOffset( bytecodeOffset)) {2852 for (const auto& instruction : *m_instructions) { 2853 OpcodeID opcode = instruction->opcodeID(); 2854 if (!!baselineAlternative()->handlerForBytecodeOffset(instruction.offset())) { 2862 2855 if (opcode == op_catch || opcode == op_enter) { 2863 2856 // op_catch/op_enter logically represent an entrypoint. Entrypoints are not allowed to be … … 2871 2864 } 2872 2865 } 2873 bytecodeOffset += opcodeLength(opcode);2874 2866 } 2875 2867 } … … 2905 2897 } 2906 2898 2899 int CodeBlock::outOfLineJumpOffset(const Instruction* pc) 2900 { 2901 int offset = bytecodeOffset(pc); 2902 return m_unlinkedCode->outOfLineJumpOffset(offset); 2903 } 2904 2905 const Instruction* CodeBlock::outOfLineJumpTarget(const Instruction* pc) 2906 { 2907 int offset = bytecodeOffset(pc); 2908 int target = m_unlinkedCode->outOfLineJumpOffset(offset); 2909 return m_instructions->at(offset + target).ptr(); 2910 } 2911 2907 2912 RareCaseProfile* CodeBlock::addRareCaseProfile(int bytecodeOffset) 2908 2913 { … … 2926 2931 } 2927 2932 2928 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(int bytecodeOffset) 2929 { 2930 return arithProfileForPC(&instructions()[bytecodeOffset]); 2931 } 2932 2933 ArithProfile* CodeBlock::arithProfileForPC(Instruction* pc) 2934 { 2935 auto opcodeID = Interpreter::getOpcodeID(pc[0]); 2936 switch (opcodeID) { 2933 ArithProfile* CodeBlock::arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset) 2934 { 2935 return arithProfileForPC(m_instructions->at(bytecodeOffset).ptr()); 2936 } 2937 2938 ArithProfile* CodeBlock::arithProfileForPC(const Instruction* pc) 2939 { 2940 switch (pc->opcodeID()) { 2937 2941 case op_negate: 2938 return bitwise_cast<ArithProfile*>(&pc[3].u.operand);2942 return &pc->as<OpNegate>().metadata(this).arithProfile; 2939 2943 case op_bitxor: 2944 return &pc->as<OpBitxor>().metadata(this).arithProfile; 2940 2945 case op_add: 2946 return &pc->as<OpAdd>().metadata(this).arithProfile; 2941 2947 case op_mul: 2948 return &pc->as<OpMul>().metadata(this).arithProfile; 2942 2949 case op_sub: 2950 return &pc->as<OpSub>().metadata(this).arithProfile; 2943 2951 case op_div: 2944 return bitwise_cast<ArithProfile*>(&pc[4].u.operand);2952 return &pc->as<OpDiv>().metadata(this).arithProfile; 2945 2953 default: 2946 2954 break; … … 2950 2958 } 2951 2959 2952 bool CodeBlock::couldTakeSpecialFastCase( int bytecodeOffset)2960 bool CodeBlock::couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset) 2953 2961 { 2954 2962 if (!hasBaselineJITProfiling()) … … 2969 2977 #endif 2970 2978 2971 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler( RefCountedArray<Instruction>& instructions)2979 void CodeBlock::insertBasicBlockBoundariesForControlFlowProfiler() 2972 2980 { 2973 2981 if (!unlinkedCodeBlock()->hasOpProfileControlFlowBytecodeOffsets()) 2974 2982 return; 2975 const Vector< size_t>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets();2983 const Vector<InstructionStream::Offset>& bytecodeOffsets = unlinkedCodeBlock()->opProfileControlFlowBytecodeOffsets(); 2976 2984 for (size_t i = 0, offsetsLength = bytecodeOffsets.size(); i < offsetsLength; i++) { 2977 2985 // Because op_profile_control_flow is emitted at the beginning of every basic block, finding 2978 2986 // the next op_profile_control_flow will give us the text range of a single basic block. 2979 2987 size_t startIdx = bytecodeOffsets[i]; 2980 RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[startIdx]) == op_profile_control_flow); 2981 int basicBlockStartOffset = instructions[startIdx + 1].u.operand; 2988 auto instruction = m_instructions->at(startIdx); 2989 RELEASE_ASSERT(instruction->opcodeID() == op_profile_control_flow); 2990 auto bytecode = instruction->as<OpProfileControlFlow>(); 2991 auto& metadata = bytecode.metadata(this); 2992 int basicBlockStartOffset = bytecode.textOffset; 2982 2993 int basicBlockEndOffset; 2983 2994 if (i + 1 < offsetsLength) { 2984 2995 size_t endIdx = bytecodeOffsets[i + 1]; 2985 RELEASE_ASSERT(Interpreter::getOpcodeID(instructions[endIdx]) == op_profile_control_flow); 2986 basicBlockEndOffset = instructions[endIdx + 1].u.operand - 1; 2996 auto endInstruction = m_instructions->at(endIdx); 2997 RELEASE_ASSERT(endInstruction->opcodeID() == op_profile_control_flow); 2998 basicBlockEndOffset = endInstruction->as<OpProfileControlFlow>().textOffset - 1; 2987 2999 } else { 2988 3000 basicBlockEndOffset = m_sourceOffset + ownerScriptExecutable()->source().length() - 1; // Offset before the closing brace. … … 3010 3022 if (basicBlockEndOffset < basicBlockStartOffset) { 3011 3023 RELEASE_ASSERT(i + 1 < offsetsLength); // We should never encounter dummy blocks at the end of a CodeBlock. 3012 instructions[startIdx + 1].u.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock();3024 metadata.basicBlockLocation = vm()->controlFlowProfiler()->dummyBasicBlock(); 3013 3025 continue; 3014 3026 } … … 3034 3046 insertFunctionGaps(executable); 3035 3047 3036 instructions[startIdx + 1].u.basicBlockLocation = basicBlockLocation;3048 metadata.basicBlockLocation = basicBlockLocation; 3037 3049 } 3038 3050 } -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.h
r237486 r237547 48 48 #include "ICStatusMap.h" 49 49 #include "Instruction.h" 50 #include "InstructionStream.h" 50 51 #include "JITCode.h" 51 52 #include "JITCodeMap.h" … … 56 57 #include "JumpTable.h" 57 58 #include "LLIntCallLinkInfo.h" 58 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h"59 59 #include "LazyOperandValueProfile.h" 60 #include "MetadataTable.h" 60 61 #include "ModuleProgramExecutable.h" 61 62 #include "ObjectAllocationProfile.h" … … 86 87 class BytecodeLivenessAnalysis; 87 88 class CodeBlockSet; 88 class ExecState;89 89 class ExecutableToCodeBlockEdge; 90 90 class JSModuleEnvironment; 91 91 class LLIntOffsetsExtractor; 92 class LLIntPrototypeLoadAdaptiveStructureWatchpoint; 93 class MetadataTable; 92 94 class PCToCodeOriginMap; 93 95 class RegisterAtOffsetList; … … 97 99 98 100 struct ArithProfile; 101 struct OpCatch; 99 102 100 103 enum ReoptimizationMode { DontCountReoptimization, CountReoptimization }; … … 198 201 void dumpBytecode(); 199 202 void dumpBytecode(PrintStream&); 200 void dumpBytecode(PrintStream& out, const Instruction * begin, const Instruction*& it, const ICStatusMap& = ICStatusMap());203 void dumpBytecode(PrintStream& out, const InstructionStream::Ref& it, const ICStatusMap& = ICStatusMap()); 201 204 void dumpBytecode(PrintStream& out, unsigned bytecodeOffset, const ICStatusMap& = ICStatusMap()); 202 205 … … 243 246 244 247 #if ENABLE(JIT) 245 JITAddIC* addJITAddIC(ArithProfile*, Instruction*);246 JITMulIC* addJITMulIC(ArithProfile*, Instruction*);247 JITNegIC* addJITNegIC(ArithProfile*, Instruction*);248 JITSubIC* addJITSubIC(ArithProfile*, Instruction*);248 JITAddIC* addJITAddIC(ArithProfile*, const Instruction*); 249 JITMulIC* addJITMulIC(ArithProfile*, const Instruction*); 250 JITNegIC* addJITNegIC(ArithProfile*, const Instruction*); 251 JITSubIC* addJITSubIC(ArithProfile*, const Instruction*); 249 252 250 253 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITAddGenerator>::value>::type> 251 JITAddIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITAddIC(profile, instruction); }254 JITAddIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITAddIC(profile, instruction); } 252 255 253 256 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITMulGenerator>::value>::type> 254 JITMulIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITMulIC(profile, instruction); }257 JITMulIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITMulIC(profile, instruction); } 255 258 256 259 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITNegGenerator>::value>::type> 257 JITNegIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITNegIC(profile, instruction); }260 JITNegIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITNegIC(profile, instruction); } 258 261 259 262 template <typename Generator, typename = typename std::enable_if<std::is_same<Generator, JITSubGenerator>::value>::type> 260 JITSubIC* addMathIC(ArithProfile* profile, Instruction* instruction) { return addJITSubIC(profile, instruction); }263 JITSubIC* addMathIC(ArithProfile* profile, const Instruction* instruction) { return addJITSubIC(profile, instruction); } 261 264 262 265 StructureStubInfo* addStubInfo(AccessType); … … 307 310 #endif 308 311 309 typedef JSC::Instruction Instruction; 310 typedef PoisonedRefCountedArray<CodeBlockPoison, Instruction>& UnpackedInstructions; 311 312 static void clearLLIntGetByIdCache(Instruction*); 313 314 unsigned bytecodeOffset(Instruction* returnAddress) 315 { 316 RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end()); 317 return static_cast<Instruction*>(returnAddress) - instructions().begin(); 318 } 319 320 unsigned numberOfInstructions() const { return m_instructions.size(); } 321 PoisonedRefCountedArray<CodeBlockPoison, Instruction>& instructions() { return m_instructions; } 322 const PoisonedRefCountedArray<CodeBlockPoison, Instruction>& instructions() const { return m_instructions; } 312 const Instruction* outOfLineJumpTarget(const Instruction* pc); 313 int outOfLineJumpOffset(const Instruction* pc); 314 int outOfLineJumpOffset(const InstructionStream::Ref& instruction) 315 { 316 return outOfLineJumpOffset(instruction.ptr()); 317 } 318 319 inline unsigned bytecodeOffset(const Instruction* returnAddress) 320 { 321 const auto* instructionsBegin = instructions().at(0).ptr(); 322 const auto* instructionsEnd = reinterpret_cast<const Instruction*>(reinterpret_cast<uintptr_t>(instructionsBegin) + instructions().size()); 323 RELEASE_ASSERT(returnAddress >= instructionsBegin && returnAddress < instructionsEnd); 324 return returnAddress - instructionsBegin; 325 } 326 327 const InstructionStream& instructions() const { return *m_instructions; } 323 328 324 329 size_t predictedMachineCodeSize(); 325 330 326 unsigned instructionCount() const { return m_instruction s.size(); }331 unsigned instructionCount() const { return m_instructionCount; } 327 332 328 333 // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind()) … … 426 431 427 432 ValueProfile& valueProfileForBytecodeOffset(int bytecodeOffset); 428 SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker& locker, int bytecodeOffset) 429 { 430 if (ValueProfile* valueProfile = tryGetValueProfileForBytecodeOffset(bytecodeOffset)) 431 return valueProfile->computeUpdatedPrediction(locker); 432 return SpecNone; 433 } 434 435 template<typename Functor> void forEachValueProfile(const Functor& func) 436 { 437 for (unsigned i = 0; i < numberOfArgumentValueProfiles(); ++i) 438 func(valueProfileForArgument(i)); 439 for (unsigned i = 0; i < numberOfValueProfiles(); ++i) 440 func(valueProfile(i)); 441 } 433 SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJSLocker&, int bytecodeOffset); 434 435 template<typename Functor> void forEachValueProfile(const Functor&); 436 template<typename Functor> void forEachArrayProfile(const Functor&); 437 template<typename Functor> void forEachArrayAllocationProfile(const Functor&); 438 template<typename Functor> void forEachObjectAllocationProfile(const Functor&); 439 template<typename Functor> void forEachLLIntCallLinkInfo(const Functor&); 442 440 443 441 RareCaseProfile* addRareCaseProfile(int bytecodeOffset); … … 462 460 } 463 461 464 ArithProfile* arithProfileForBytecodeOffset( int bytecodeOffset);465 ArithProfile* arithProfileForPC( Instruction*);466 467 bool couldTakeSpecialFastCase( int bytecodeOffset);462 ArithProfile* arithProfileForBytecodeOffset(InstructionStream::Offset bytecodeOffset); 463 ArithProfile* arithProfileForPC(const Instruction*); 464 465 bool couldTakeSpecialFastCase(InstructionStream::Offset bytecodeOffset); 468 466 469 467 unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); } … … 615 613 } 616 614 617 typedef HashMap<std::tuple<Structure*, Instruction*>, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap;615 typedef HashMap<std::tuple<Structure*, const Instruction*>, Bag<LLIntPrototypeLoadAdaptiveStructureWatchpoint>> StructureWatchpointMap; 618 616 StructureWatchpointMap& llintGetByIdWatchpointMap() { return m_llintGetByIdWatchpointMap; } 619 617 … … 653 651 #else 654 652 static unsigned numberOfLLIntBaselineCalleeSaveRegisters() { return 0; } 655 static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 0; };653 static size_t llintBaselineCalleeSaveSpaceAsVirtualRegisters() { return 1; }; 656 654 size_t calleeSaveSpaceAsVirtualRegisters() { return 0; } 657 655 #endif … … 850 848 CallSiteIndex newExceptionHandlingCallSiteIndex(CallSiteIndex originalCallSite); 851 849 852 void ensureCatchLivenessIsComputedForBytecodeOffset(unsigned bytecodeOffset) 853 { 854 if (!!m_instructions[bytecodeOffset + 3].u.pointer) { 855 #if !ASSERT_DISABLED 856 ConcurrentJSLocker locker(m_lock); 857 bool found = false; 858 for (auto& profile : m_catchProfiles) { 859 if (profile.get() == m_instructions[bytecodeOffset + 3].u.pointer) { 860 found = true; 861 break; 862 } 863 } 864 ASSERT(found); 865 #endif 866 return; 867 } 868 869 ensureCatchLivenessIsComputedForBytecodeOffsetSlow(bytecodeOffset); 870 } 850 void ensureCatchLivenessIsComputedForBytecodeOffset(InstructionStream::Offset bytecodeOffset); 871 851 872 852 #if ENABLE(JIT) … … 876 856 877 857 bool hasTailCalls() const { return m_unlinkedCode->hasTailCalls(); } 858 859 template<typename Metadata> 860 Metadata& metadata(OpcodeID opcodeID, unsigned metadataID) 861 { 862 return reinterpret_cast<Metadata*>(m_metadata->get(opcodeID))[metadataID]; 863 } 864 865 size_t metadataSizeInBytes() 866 { 867 return m_unlinkedCode->metadataSizeInBytes(); 868 } 878 869 879 870 protected: … … 922 913 void visitOSRExitTargets(const ConcurrentJSLocker&, SlotVisitor&); 923 914 924 unsigned numberOfValueProfiles() { return m_valueProfiles.size(); } 925 unsigned numberOfNonArgumentValueProfiles() { return numberOfValueProfiles(); } 915 unsigned numberOfNonArgumentValueProfiles() { return m_numberOfNonArgumentValueProfiles; } 926 916 unsigned totalNumberOfValueProfiles() { return numberOfArgumentValueProfiles() + numberOfNonArgumentValueProfiles(); } 927 ValueProfile& valueProfile(int index) { return m_valueProfiles[index]; }928 917 ValueProfile* tryGetValueProfileForBytecodeOffset(int bytecodeOffset); 929 918 … … 939 928 } 940 929 941 void insertBasicBlockBoundariesForControlFlowProfiler( RefCountedArray<Instruction>&);942 void ensureCatchLivenessIsComputedForBytecodeOffsetSlow( unsigned);930 void insertBasicBlockBoundariesForControlFlowProfiler(); 931 void ensureCatchLivenessIsComputedForBytecodeOffsetSlow(const OpCatch&, InstructionStream::Offset); 943 932 944 933 int m_numCalleeLocals; … … 946 935 int m_numParameters; 947 936 int m_numberOfArgumentsToSkip { 0 }; 937 unsigned m_numberOfNonArgumentValueProfiles { 0 }; 948 938 union { 949 939 unsigned m_debuggerRequests; … … 959 949 Poisoned<CodeBlockPoison, VM*> m_poisonedVM; 960 950 961 PoisonedRefCountedArray<CodeBlockPoison, Instruction> m_instructions; 951 unsigned m_instructionCount { 0 }; 952 const InstructionStream* m_instructions; 962 953 VirtualRegister m_thisRegister; 963 954 VirtualRegister m_scopeRegister; … … 968 959 unsigned m_firstLineColumnOffset; 969 960 970 RefCountedArray<LLIntCallLinkInfo> m_llintCallLinkInfos;971 961 SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo>> m_incomingLLIntCalls; 972 962 StructureWatchpointMap m_llintGetByIdWatchpointMap; … … 994 984 #endif 995 985 RefCountedArray<ValueProfile> m_argumentValueProfiles; 996 RefCountedArray<ValueProfile> m_valueProfiles;997 986 Vector<std::unique_ptr<ValueProfileAndOperandBuffer>> m_catchProfiles; 998 987 SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles; 999 RefCountedArray<ArrayAllocationProfile> m_arrayAllocationProfiles;1000 988 ArrayProfileVector m_arrayProfiles; 1001 RefCountedArray<ObjectAllocationProfile> m_objectAllocationProfiles;1002 989 1003 990 // Constant Pool … … 1018 1005 uint16_t m_optimizationDelayCounter; 1019 1006 uint16_t m_reoptimizationRetryCounter; 1007 1008 RefPtr<MetadataTable> m_metadata; 1020 1009 1021 1010 MonotonicTime m_creationTime; -
trunk/Source/JavaScriptCore/bytecode/GetByIdMetadata.h
r237546 r237547 1 1 /* 2 * Copyright (C) 201 6Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 26 26 #pragma once 27 27 28 #include "Instruction.h"29 #include "ObjectPropertyCondition.h"30 #include "Watchpoint.h"31 32 28 namespace JSC { 33 29 34 class LLIntPrototypeLoadAdaptiveStructureWatchpoint : public Watchpoint { 35 public: 36 LLIntPrototypeLoadAdaptiveStructureWatchpoint() = default; 37 LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*); 30 enum class GetByIdMode : uint8_t { 31 Default = 0, 32 Unset = 1, 33 ProtoLoad = 2, 34 ArrayLength = 3, 35 }; 38 36 39 void install(VM&); 37 union GetByIdModeMetadata { 38 GetByIdModeMetadata() 39 { } 40 40 41 const ObjectPropertyCondition& key() const { return m_key; } 41 struct Default { 42 StructureID structure; 43 PropertyOffset cachedOffset; 44 } defaultMode; 42 45 43 protected: 44 void fireInternal(VM&, const FireDetail&) override; 46 struct Unset { 47 StructureID structure; 48 } unsetMode; 45 49 46 private: 47 ObjectPropertyCondition m_key; 48 Instruction* m_getByIdInstruction { nullptr }; 50 struct ProtoLoad { 51 StructureID structure; 52 PropertyOffset cachedOffset; 53 JSObject* cachedSlot; 54 } protoLoadMode; 55 56 struct ArrayLength { 57 ArrayProfile arrayProfile; 58 } arrayLengthMode; 49 59 }; 50 60 -
trunk/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
r237486 r237547 27 27 #include "GetByIdStatus.h" 28 28 29 #include "BytecodeStructs.h" 29 30 #include "CodeBlock.h" 30 31 #include "ComplexGetStatus.h" … … 56 57 VM& vm = *profiledBlock->vm(); 57 58 58 Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; 59 60 switch (Interpreter::getOpcodeID(instruction[0].u.opcode)) { 61 case op_get_by_id: 62 case op_get_by_id_direct: { 63 StructureID structureID = instruction[4].u.structureID; 64 if (!structureID) 59 auto instruction = profiledBlock->instructions().at(bytecodeIndex); 60 61 StructureID structureID; 62 switch (instruction->opcodeID()) { 63 case op_get_by_id: { 64 auto& metadata = instruction->as<OpGetById>().metadata(profiledBlock); 65 // FIXME: We should not just bail if we see a get_by_id_proto_load. 66 // https://bugs.webkit.org/show_bug.cgi?id=158039 67 if (metadata.mode != GetByIdMode::Default) 65 68 return GetByIdStatus(NoInformation, false); 66 67 Structure* structure = vm.heap.structureIDTable().get(structureID); 68 69 if (structure->takesSlowPathInDFGForImpureProperty()) 70 return GetByIdStatus(NoInformation, false); 71 72 unsigned attributes; 73 PropertyOffset offset = structure->getConcurrently(uid, attributes); 74 if (!isValidOffset(offset)) 75 return GetByIdStatus(NoInformation, false); 76 if (attributes & PropertyAttribute::CustomAccessor) 77 return GetByIdStatus(NoInformation, false); 78 79 return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); 80 } 81 82 case op_get_array_length: 83 case op_try_get_by_id: 84 case op_get_by_id_proto_load: 85 case op_get_by_id_unset: { 86 // FIXME: We should not just bail if we see a try_get_by_id or a get_by_id_proto_load. 69 structureID = metadata.modeMetadata.defaultMode.structure; 70 break; 71 } 72 case op_get_by_id_direct: 73 structureID = instruction->as<OpGetByIdDirect>().metadata(profiledBlock).structure; 74 break; 75 case op_try_get_by_id: { 76 // FIXME: We should not just bail if we see a try_get_by_id. 87 77 // https://bugs.webkit.org/show_bug.cgi?id=158039 88 78 return GetByIdStatus(NoInformation, false); … … 94 84 } 95 85 } 86 87 if (!structureID) 88 return GetByIdStatus(NoInformation, false); 89 90 Structure* structure = vm.heap.structureIDTable().get(structureID); 91 92 if (structure->takesSlowPathInDFGForImpureProperty()) 93 return GetByIdStatus(NoInformation, false); 94 95 unsigned attributes; 96 PropertyOffset offset = structure->getConcurrently(uid, attributes); 97 if (!isValidOffset(offset)) 98 return GetByIdStatus(NoInformation, false); 99 if (attributes & PropertyAttribute::CustomAccessor) 100 return GetByIdStatus(NoInformation, false); 101 102 return GetByIdStatus(Simple, false, GetByIdVariant(StructureSet(structure), offset)); 96 103 } 97 104 -
trunk/Source/JavaScriptCore/bytecode/Instruction.h
r237486 r237547 1 1 /* 2 * Copyright (C) 20 08, 2012-2015Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without 5 5 * modification, are permitted provided that the following conditions 6 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 7 12 * 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. Neither the name of Apple Inc. ("Apple") nor the names of 14 * its contributors may be used to endorse or promote products derived 15 * from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY 21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' 14 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS 17 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 18 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 19 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 20 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 21 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 22 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 23 * THE POSSIBILITY OF SUCH DAMAGE. 27 24 */ 28 25 29 26 #pragma once 30 27 31 #include "BasicBlockLocation.h" 32 #include "PutByIdFlags.h" 33 #include "SymbolTable.h" 34 #include "TypeLocation.h" 35 #include "PropertySlot.h" 36 #include "SpecialPointer.h" 37 #include "Structure.h" 38 #include "StructureChain.h" 39 #include "ToThisStatus.h" 40 #include <wtf/VectorTraits.h> 28 #include "Opcode.h" 29 #include "OpcodeSize.h" 41 30 42 31 namespace JSC { 43 32 44 class ArrayAllocationProfile; 45 class ArrayProfile; 46 class ObjectAllocationProfile; 47 class WatchpointSet; 48 struct LLIntCallLinkInfo; 49 struct ValueProfile; 33 struct Instruction { 50 34 51 #if ENABLE(COMPUTED_GOTO_OPCODES) 52 typedef void* Opcode; 53 #else 54 typedef OpcodeID Opcode; 55 #endif 35 struct Metadata { }; 56 36 57 struct Instruction { 58 constexpr Instruction() 59 : u({ nullptr }) 37 protected: 38 Instruction() 39 { } 40 41 private: 42 template<OpcodeSize Width> 43 class Impl { 44 public: 45 OpcodeID opcodeID() const { return static_cast<OpcodeID>(m_opcode); } 46 47 private: 48 typename TypeBySize<Width>::type m_opcode; 49 }; 50 51 public: 52 OpcodeID opcodeID() const 60 53 { 54 if (isWide()) 55 return wide()->opcodeID(); 56 return narrow()->opcodeID(); 61 57 } 62 58 63 Instruction(Opcode opcode)59 const char* name() const 64 60 { 65 #if !ENABLE(COMPUTED_GOTO_OPCODES) 66 // We have to initialize one of the pointer members to ensure that 67 // the entire struct is initialized, when opcode is not a pointer. 68 u.jsCell.clear(); 69 #endif 70 u.opcode = opcode; 61 return opcodeNames[opcodeID()]; 71 62 } 72 63 73 Instruction(int operand)64 bool isWide() const 74 65 { 75 // We have to initialize one of the pointer members to ensure that 76 // the entire struct is initialized in 64-bit. 77 u.jsCell.clear(); 78 u.operand = operand; 79 } 80 Instruction(unsigned unsignedValue) 81 { 82 // We have to initialize one of the pointer members to ensure that 83 // the entire struct is initialized in 64-bit. 84 u.jsCell.clear(); 85 u.unsignedValue = unsignedValue; 66 return narrow()->opcodeID() == op_wide; 86 67 } 87 68 88 Instruction(PutByIdFlags flags)69 size_t size() const 89 70 { 90 u.putByIdFlags = flags; 71 auto wide = isWide(); 72 auto padding = wide ? 1 : 0; 73 auto size = wide ? 4 : 1; 74 return opcodeLengths[opcodeID()] * size + padding; 91 75 } 92 76 93 Instruction(VM& vm, JSCell* owner, Structure* structure) 77 template<class T> 78 bool is() const 94 79 { 95 u.structure.clear(); 96 u.structure.set(vm, owner, structure); 97 } 98 Instruction(VM& vm, JSCell* owner, StructureChain* structureChain) 99 { 100 u.structureChain.clear(); 101 u.structureChain.set(vm, owner, structureChain); 102 } 103 Instruction(VM& vm, JSCell* owner, JSCell* jsCell) 104 { 105 u.jsCell.clear(); 106 u.jsCell.set(vm, owner, jsCell); 80 return opcodeID() == T::opcodeID; 107 81 } 108 82 109 Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; } 110 111 Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; } 112 Instruction(ValueProfile* profile) { u.profile = profile; } 113 Instruction(ArrayProfile* profile) { u.arrayProfile = profile; } 114 Instruction(ArrayAllocationProfile* profile) { u.arrayAllocationProfile = profile; } 115 Instruction(ObjectAllocationProfile* profile) { u.objectAllocationProfile = profile; } 116 Instruction(WriteBarrier<Unknown>* variablePointer) { u.variablePointer = variablePointer; } 117 Instruction(Special::Pointer pointer) { u.specialPointer = pointer; } 118 Instruction(UniquedStringImpl* uid) { u.uid = uid; } 119 Instruction(bool* predicatePointer) { u.predicatePointer = predicatePointer; } 83 template<class T> 84 T as() const 85 { 86 ASSERT(is<T>()); 87 return T::decode(reinterpret_cast<const uint8_t*>(this)); 88 } 120 89 121 union { 122 void* pointer; 123 Opcode opcode; 124 int operand; 125 unsigned unsignedValue; 126 WriteBarrierBase<Structure> structure; 127 StructureID structureID; 128 WriteBarrierBase<SymbolTable> symbolTable; 129 WriteBarrierBase<StructureChain> structureChain; 130 WriteBarrierBase<JSCell> jsCell; 131 WriteBarrier<Unknown>* variablePointer; 132 Special::Pointer specialPointer; 133 PropertySlot::GetValueFunc getterFunc; 134 LLIntCallLinkInfo* callLinkInfo; 135 UniquedStringImpl* uid; 136 ValueProfile* profile; 137 ArrayProfile* arrayProfile; 138 ArrayAllocationProfile* arrayAllocationProfile; 139 ObjectAllocationProfile* objectAllocationProfile; 140 WatchpointSet* watchpointSet; 141 bool* predicatePointer; 142 ToThisStatus toThisStatus; 143 TypeLocation* location; 144 BasicBlockLocation* basicBlockLocation; 145 PutByIdFlags putByIdFlags; 146 } u; 147 148 private: 149 Instruction(StructureChain*); 150 Instruction(Structure*); 90 template<class T> 91 T* cast() 92 { 93 ASSERT(is<T>()); 94 return reinterpret_cast<T*>(this); 95 } 96 97 template<class T> 98 const T* cast() const 99 { 100 ASSERT(is<T>()); 101 return reinterpret_cast<const T*>(this); 102 } 103 104 const Impl<OpcodeSize::Narrow>* narrow() const 105 { 106 return reinterpret_cast<const Impl<OpcodeSize::Narrow>*>(this); 107 } 108 109 const Impl<OpcodeSize::Wide>* wide() const 110 { 111 112 ASSERT(isWide()); 113 return reinterpret_cast<const Impl<OpcodeSize::Wide>*>((uintptr_t)this + 1); 114 } 151 115 }; 152 static_assert(sizeof(Instruction) == sizeof(void*), "");153 116 154 117 } // namespace JSC 155 156 namespace WTF {157 158 template<> struct VectorTraits<JSC::Instruction> : VectorTraitsBase<true, JSC::Instruction> { };159 160 } // namespace WTF -
trunk/Source/JavaScriptCore/bytecode/InstructionStream.cpp
r237546 r237547 1 1 /* 2 * Copyright (C) 201 2Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 26 26 #include "config.h" 27 #include " SpecialPointer.h"27 #include "InstructionStream.h" 28 28 29 #include "CodeBlock.h" 30 #include "JSGlobalObject.h" 31 #include "JSCInlines.h" 29 #include "Instruction.h" 30 #include "Opcode.h" 32 31 33 32 namespace JSC { 34 33 35 void* actualPointerFor(JSGlobalObject* globalObject, Special::Pointer pointer) 34 InstructionStream::InstructionStream(InstructionBuffer&& instructions) 35 : m_instructions(WTFMove(instructions)) 36 { } 37 38 size_t InstructionStream::sizeInBytes() const 36 39 { 37 return globalObject->actualPointerFor(pointer);40 return m_instructions.size(); 38 41 } 39 42 40 void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer)41 {42 return actualPointerFor(codeBlock->globalObject(), pointer);43 43 } 44 45 } // namespace JSC46 -
trunk/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.cpp
r237486 r237547 33 33 namespace JSC { 34 34 35 LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, Instruction* getByIdInstruction)35 LLIntPrototypeLoadAdaptiveStructureWatchpoint::LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition& key, OpGetById::Metadata& getByIdMetadata) 36 36 : m_key(key) 37 , m_getById Instruction(getByIdInstruction)37 , m_getByIdMetadata(getByIdMetadata) 38 38 { 39 39 RELEASE_ASSERT(key.watchingRequiresStructureTransitionWatchpoint()); … … 55 55 } 56 56 57 CodeBlock::clearLLIntGetByIdCache(m_getByIdInstruction);57 clearLLIntGetByIdCache(m_getByIdMetadata); 58 58 } 59 59 60 void LLIntPrototypeLoadAdaptiveStructureWatchpoint::clearLLIntGetByIdCache(OpGetById::Metadata& metadata) 61 { 62 metadata.mode = GetByIdMode::Default; 63 metadata.modeMetadata.defaultMode.cachedOffset = 0; 64 metadata.modeMetadata.defaultMode.structure = 0; 65 } 66 67 60 68 } // namespace JSC -
trunk/Source/JavaScriptCore/bytecode/LLIntPrototypeLoadAdaptiveStructureWatchpoint.h
r237486 r237547 26 26 #pragma once 27 27 28 #include " Instruction.h"28 #include "BytecodeStructs.h" 29 29 #include "ObjectPropertyCondition.h" 30 30 #include "Watchpoint.h" … … 35 35 public: 36 36 LLIntPrototypeLoadAdaptiveStructureWatchpoint() = default; 37 LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*);37 LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, OpGetById::Metadata&); 38 38 39 39 void install(VM&); 40 41 static void clearLLIntGetByIdCache(OpGetById::Metadata&); 40 42 41 43 const ObjectPropertyCondition& key() const { return m_key; } … … 46 48 private: 47 49 ObjectPropertyCondition m_key; 48 Instruction* m_getByIdInstruction { nullptr };50 OpGetById::Metadata& m_getByIdMetadata; 49 51 }; 50 52 -
trunk/Source/JavaScriptCore/bytecode/MetadataTable.cpp
r237546 r237547 1 1 /* 2 * Copyright (C) 201 2Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 26 26 #include "config.h" 27 #include " SpecialPointer.h"27 #include "MetadataTable.h" 28 28 29 29 #include "CodeBlock.h" 30 #include "JSGlobalObject.h" 31 #include "JSCInlines.h" 30 #include "OpcodeInlines.h" 31 #include "UnlinkedMetadataTableInlines.h" 32 #include <wtf/FastMalloc.h> 32 33 33 34 namespace JSC { 34 35 35 void* actualPointerFor(JSGlobalObject* globalObject, Special::Pointer pointer) 36 MetadataTable::MetadataTable(UnlinkedMetadataTable& unlinkedMetadata, UnlinkedMetadataTable::Offset* buffer) 37 : m_buffer(buffer) 38 , m_unlinkedMetadata(unlinkedMetadata) 36 39 { 37 return globalObject->actualPointerFor(pointer);38 40 } 39 41 40 void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer) 42 struct DeallocTable { 43 template<typename Op> 44 static void withOpcodeType(MetadataTable* table) 45 { 46 table->forEach<Op>([](auto& entry) { 47 entry.~Metadata(); 48 }); 49 } 50 }; 51 52 MetadataTable::~MetadataTable() 41 53 { 42 return actualPointerFor(codeBlock->globalObject(), pointer); 54 for (unsigned i = 0; i < NUMBER_OF_BYTECODE_WITH_METADATA; i++) 55 getOpcodeType<DeallocTable>(static_cast<OpcodeID>(i), this); 56 m_unlinkedMetadata.unlink(*this); 57 } 58 59 size_t MetadataTable::sizeInBytes() 60 { 61 return m_unlinkedMetadata.sizeInBytes(*this); 43 62 } 44 63 45 64 } // namespace JSC 46 -
trunk/Source/JavaScriptCore/bytecode/MetadataTable.h
r237546 r237547 1 1 /* 2 * Copyright (C) 201 3-2017Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 26 26 #pragma once 27 27 28 #include <wtf/Lock.h>29 #include <wtf/Ref.h>30 #include <wtf/ThreadSafeRefCounted.h>31 #include <wtf/ WorkQueue.h>28 #include "Instruction.h" 29 #include "Opcode.h" 30 #include "UnlinkedMetadataTable.h" 31 #include <wtf/RefCounted.h> 32 32 33 33 namespace JSC { 34 34 35 class ExecState; 36 class VM; 35 class CodeBlock; 37 36 38 class Watchdog : public WTF::ThreadSafeRefCounted<Watchdog> { 39 WTF_MAKE_FAST_ALLOCATED; 37 class MetadataTable : public RefCounted<MetadataTable> { 38 friend class LLIntOffsetsExtractor; 39 friend class UnlinkedMetadataTable; 40 40 41 public: 41 class Scope;42 ~MetadataTable(); 42 43 43 Watchdog(VM*); 44 void willDestroyVM(VM*); 44 ALWAYS_INLINE Instruction::Metadata* get(OpcodeID opcodeID) 45 { 46 ASSERT(m_buffer && opcodeID < NUMBER_OF_BYTECODE_WITH_METADATA); 47 return reinterpret_cast<Instruction::Metadata*>(getImpl(opcodeID)); 48 } 45 49 46 typedef bool (*ShouldTerminateCallback)(ExecState*, void* data1, void* data2);47 void setTimeLimit(Seconds limit, ShouldTerminateCallback = 0, void* data1 = 0, void* data2 = 0);48 50 49 bool shouldTerminate(ExecState*); 51 template<typename Op, typename Functor> 52 ALWAYS_INLINE void forEach(const Functor& func) 53 { 54 if (!m_buffer) 55 return; 50 56 51 bool hasTimeLimit(); 52 void enteredVM(); 53 void exitedVM(); 57 auto* metadata = reinterpret_cast<typename Op::Metadata*>(get(Op::opcodeID)); 58 auto* end = reinterpret_cast<typename Op::Metadata*>(getImpl(Op::opcodeID + 1)); 59 for (; metadata != end; ++metadata) 60 func(*metadata); 61 } 54 62 55 s tatic const Seconds noTimeLimit;63 size_t sizeInBytes(); 56 64 57 65 private: 58 void startTimer(Seconds timeLimit); 59 void stopTimer(); 66 MetadataTable(UnlinkedMetadataTable&, UnlinkedMetadataTable::Offset*); 60 67 61 Lock m_lock; // Guards access to m_vm. 62 VM* m_vm; 68 ALWAYS_INLINE uint8_t* getImpl(unsigned i) 69 { 70 return reinterpret_cast<uint8_t*>(m_buffer) + m_buffer[i]; 71 } 63 72 64 Seconds m_timeLimit; 65 66 Seconds m_cpuDeadline; 67 MonotonicTime m_deadline; 68 69 bool m_hasEnteredVM { false }; 70 71 ShouldTerminateCallback m_callback; 72 void* m_callbackData1; 73 void* m_callbackData2; 74 75 Ref<WorkQueue> m_timerQueue; 76 77 friend class LLIntOffsetsExtractor; 73 UnlinkedMetadataTable::Offset* m_buffer; 74 UnlinkedMetadataTable& m_unlinkedMetadata; 78 75 }; 79 76 -
trunk/Source/JavaScriptCore/bytecode/Opcode.cpp
r237486 r237547 31 31 #include "Opcode.h" 32 32 33 #include "BytecodeStructs.h" 33 34 #include <wtf/PrintStream.h> 34 35 … … 185 186 #endif 186 187 188 static unsigned metadataSizes[] = { 189 190 #define METADATA_SIZE(size) size, 191 FOR_EACH_BYTECODE_METADATA_SIZE(METADATA_SIZE) 192 #undef METADATA_SIZE 193 194 }; 195 196 unsigned metadataSize(OpcodeID opcodeID) 197 { 198 return metadataSizes[opcodeID]; 199 } 200 201 187 202 } // namespace JSC 188 203 -
trunk/Source/JavaScriptCore/bytecode/Opcode.h
r237486 r237547 54 54 55 55 56 #define OPCODE_ID_ENUM(opcode, length) opcode, 57 enum OpcodeID : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) }; 58 #undef OPCODE_ID_ENUM 59 60 const int maxOpcodeLength = 9; 56 const int maxOpcodeLength = 40; 61 57 #if ENABLE(C_LOOP) 62 58 const int numOpcodeIDs = NUMBER_OF_BYTECODE_IDS + NUMBER_OF_CLOOP_BYTECODE_HELPER_IDS + NUMBER_OF_BYTECODE_HELPER_IDS; … … 65 61 #endif 66 62 63 #define OPCODE_ID_ENUM(opcode, length) opcode, 64 enum OpcodeID : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) }; 65 #undef OPCODE_ID_ENUM 66 67 #if ENABLE(C_LOOP) && !HAVE(COMPUTED_GOTO) 68 69 #define OPCODE_ID_ENUM(opcode, length) opcode##_wide = numOpcodeIDs + opcode, 70 enum OpcodeIDWide : unsigned { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) }; 71 #undef OPCODE_ID_ENUM 72 #endif 73 74 #define OPCODE_LENGTH(opcode, length) length, 75 static unsigned opcodeLengths[] = { 76 FOR_EACH_OPCODE_ID(OPCODE_LENGTH) 77 }; 78 #undef OPCODE_LENGTH 79 67 80 #define OPCODE_ID_LENGTHS(id, length) const int id##_length = length; 68 81 FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS); 69 82 #undef OPCODE_ID_LENGTHS 70 83 71 #define OPCODE_LENGTH(opcode) opcode##_length 72 73 #define OPCODE_ID_LENGTH_MAP(opcode, length) length, 74 const int opcodeLengths[numOpcodeIDs] = { FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTH_MAP) }; 75 #undef OPCODE_ID_LENGTH_MAP 84 #define FOR_EACH_OPCODE_WITH_VALUE_PROFILE(macro) \ 85 macro(OpCallVarargs) \ 86 macro(OpTailCallVarargs) \ 87 macro(OpTailCallForwardArguments) \ 88 macro(OpConstructVarargs) \ 89 macro(OpGetByVal) \ 90 macro(OpGetDirectPname) \ 91 macro(OpGetById) \ 92 macro(OpGetByIdWithThis) \ 93 macro(OpTryGetById) \ 94 macro(OpGetByIdDirect) \ 95 macro(OpGetByValWithThis) \ 96 macro(OpGetFromArguments) \ 97 macro(OpToNumber) \ 98 macro(OpToObject) \ 99 macro(OpGetArgument) \ 100 macro(OpToThis) \ 101 macro(OpCall) \ 102 macro(OpTailCall) \ 103 macro(OpCallEval) \ 104 macro(OpConstruct) \ 105 macro(OpGetFromScope) \ 106 macro(OpBitand) \ 107 macro(OpBitor) \ 108 109 #define FOR_EACH_OPCODE_WITH_ARRAY_PROFILE(macro) \ 110 macro(OpHasIndexedProperty) \ 111 macro(OpCallVarargs) \ 112 macro(OpTailCallVarargs) \ 113 macro(OpTailCallForwardArguments) \ 114 macro(OpConstructVarargs) \ 115 macro(OpGetByVal) \ 116 macro(OpCall) \ 117 macro(OpTailCall) \ 118 macro(OpCallEval) \ 119 macro(OpConstruct) \ 120 macro(OpInByVal) \ 121 macro(OpPutByVal) \ 122 macro(OpPutByValDirect) \ 123 124 #define FOR_EACH_OPCODE_WITH_ARRAY_ALLOCATION_PROFILE(macro) \ 125 macro(OpNewArray) \ 126 macro(OpNewArrayWithSize) \ 127 macro(OpNewArrayBuffer) \ 128 129 #define FOR_EACH_OPCODE_WITH_OBJECT_ALLOCATION_PROFILE(macro) \ 130 macro(OpNewObject) \ 131 132 #define FOR_EACH_OPCODE_WITH_LLINT_CALL_LINK_INFO(macro) \ 133 macro(OpCall) \ 134 macro(OpTailCall) \ 135 macro(OpCallEval) \ 136 macro(OpConstruct) \ 76 137 77 138 IGNORE_WARNINGS_BEGIN("type-limits") … … 118 179 119 180 #endif 120 121 inline size_t opcodeLength(OpcodeID opcode)122 {123 switch (opcode) {124 #define OPCODE_ID_LENGTHS(id, length) case id: return OPCODE_LENGTH(id);125 FOR_EACH_OPCODE_ID(OPCODE_ID_LENGTHS)126 #undef OPCODE_ID_LENGTHS127 }128 RELEASE_ASSERT_NOT_REACHED();129 return 0;130 }131 181 132 182 inline bool isBranch(OpcodeID opcodeID) … … 195 245 } 196 246 247 unsigned metadataSize(OpcodeID); 248 197 249 } // namespace JSC 198 250 -
trunk/Source/JavaScriptCore/bytecode/OpcodeInlines.h
r237486 r237547 27 27 28 28 #include "ArrayProfile.h" 29 #include "BytecodeStructs.h" 29 30 #include "Instruction.h" 30 31 #include "InterpreterInlines.h" … … 36 37 AnyOpcodeShape, 37 38 OpCallShape, 38 OpHasIndexedPropertyShape,39 OpGetArrayLengthShape,40 OpGetByValShape,41 OpInByValShape,42 OpPutByValShape,43 39 }; 44 40 … … 47 43 { 48 44 if (shape == OpCallShape) { 49 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), "");50 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval), "");51 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), "");52 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), "");53 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), "");54 45 return opcodeID == op_call 55 46 || opcodeID == op_tail_call … … 60 51 } 61 52 62 if (shape == OpHasIndexedPropertyShape)63 return opcodeID == op_has_indexed_property;64 65 if (shape == OpGetArrayLengthShape)66 return opcodeID == op_get_array_length;67 68 if (shape == OpGetByValShape)69 return opcodeID == op_get_by_val;70 71 if (shape == OpInByValShape)72 return opcodeID == op_in_by_val;73 74 if (shape == OpPutByValShape) {75 static_assert(OPCODE_LENGTH(op_put_by_val) == OPCODE_LENGTH(op_put_by_val_direct), "");76 return opcodeID == op_put_by_val77 || opcodeID == op_put_by_val_direct;78 }79 80 53 RELEASE_ASSERT_NOT_REACHED(); 81 54 } … … 84 57 inline bool isOpcodeShape(const Instruction* instruction) 85 58 { 86 OpcodeID opcodeID = Interpreter::getOpcodeID(*instruction); 87 return isOpcodeShape<shape>(opcodeID); 59 return isOpcodeShape<shape>(instruction->opcodeID()); 88 60 } 89 61 90 template< OpcodeShape shape = AnyOpcodeShape>91 inline ArrayProfile* arrayProfileFor(const Instruction* instruction)62 template<typename T, typename... Args> 63 void getOpcodeType(OpcodeID opcodeID, Args&&... args) 92 64 { 93 ArrayProfile* arrayProfile = nullptr;94 OpcodeID opcodeID = Interpreter::getOpcodeID(*instruction);95 if (OpCallShape == shape || (AnyOpcodeShape == shape && isOpcodeShape<OpCallShape>(opcodeID))) {96 ASSERT(isOpcodeShape<OpCallShape>(instruction));97 arrayProfile = instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile;98 65 99 } else if (OpHasIndexedPropertyShape == shape || (AnyOpcodeShape == shape && isOpcodeShape<OpHasIndexedPropertyShape>(opcodeID))) { 100 ASSERT(isOpcodeShape<OpHasIndexedPropertyShape>(instruction)); 101 arrayProfile = instruction[4].u.arrayProfile; 66 #define CASE(__Op) \ 67 case __Op::opcodeID: \ 68 T::template withOpcodeType<__Op>(std::forward<Args>(args)...); \ 69 break; \ 102 70 103 } else if (OpGetArrayLengthShape == shape || (AnyOpcodeShape == shape && isOpcodeShape<OpGetArrayLengthShape>(opcodeID))) { 104 ASSERT(isOpcodeShape<OpGetArrayLengthShape>(instruction)); 105 arrayProfile = instruction[4].u.arrayProfile; 71 switch (opcodeID) { 72 FOR_EACH_BYTECODE_STRUCT(CASE) 73 default: 74 ASSERT_NOT_REACHED(); 75 } 106 76 107 } else if (OpGetByValShape == shape || (AnyOpcodeShape == shape && isOpcodeShape<OpGetByValShape>(opcodeID))) { 108 ASSERT(isOpcodeShape<OpGetByValShape>(instruction)); 109 arrayProfile = instruction[4].u.arrayProfile; 110 111 } else if (OpInByValShape == shape || (AnyOpcodeShape == shape && isOpcodeShape<OpInByValShape>(opcodeID))) { 112 ASSERT(isOpcodeShape<OpInByValShape>(instruction)); 113 arrayProfile = instruction[OPCODE_LENGTH(op_in_by_val) - 1].u.arrayProfile; 114 115 } else if (OpPutByValShape == shape || (AnyOpcodeShape == shape && isOpcodeShape<OpPutByValShape>(opcodeID))) { 116 ASSERT(isOpcodeShape<OpPutByValShape>(instruction)); 117 arrayProfile = instruction[4].u.arrayProfile; 118 119 } else if (AnyOpcodeShape != shape) 120 RELEASE_ASSERT_NOT_REACHED(); 121 122 ASSERT(!arrayProfile || arrayProfile->isValid()); 123 return arrayProfile; 77 #undef CASE 124 78 } 125 79 -
trunk/Source/JavaScriptCore/bytecode/OpcodeSize.h
r237546 r237547 1 1 /* 2 * Copyright (C) 201 2Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 26 #include "config.h" 27 #include "SpecialPointer.h" 28 29 #include "CodeBlock.h" 30 #include "JSGlobalObject.h" 31 #include "JSCInlines.h" 26 #pragma once 32 27 33 28 namespace JSC { 34 29 35 void* actualPointerFor(JSGlobalObject* globalObject, Special::Pointer pointer) 36 { 37 return globalObject->actualPointerFor(pointer);38 } 30 enum OpcodeSize { 31 Narrow = 1, 32 Wide = 4, 33 }; 39 34 40 void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer) 41 { 42 return actualPointerFor(codeBlock->globalObject(), pointer); 43 } 35 template<OpcodeSize> 36 struct TypeBySize; 37 38 template<> 39 struct TypeBySize<OpcodeSize::Narrow> { 40 using type = uint8_t; 41 }; 42 43 template<> 44 struct TypeBySize<OpcodeSize::Wide> { 45 using type = uint32_t; 46 }; 47 48 template<OpcodeSize> 49 struct PaddingBySize; 50 51 template<> 52 struct PaddingBySize<OpcodeSize::Narrow> { 53 static constexpr uint8_t value = 0; 54 }; 55 56 template<> 57 struct PaddingBySize<OpcodeSize::Wide> { 58 static constexpr uint8_t value = 1; 59 }; 44 60 45 61 } // namespace JSC 46 -
trunk/Source/JavaScriptCore/bytecode/PreciseJumpTargets.cpp
r237486 r237547 33 33 namespace JSC { 34 34 35 template <size_t vectorSize, typename Block , typename Instruction>36 static void getJumpTargetsFor BytecodeOffset(Block* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, vectorSize>& out)35 template <size_t vectorSize, typename Block> 36 static void getJumpTargetsForInstruction(Block* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, vectorSize>& out) 37 37 { 38 OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); 39 extractStoredJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) { 40 out.append(bytecodeOffset + relativeOffset); 38 extractStoredJumpTargetsForInstruction(codeBlock, instruction, [&](int32_t relativeOffset) { 39 out.append(instruction.offset() + relativeOffset); 41 40 }); 41 OpcodeID opcodeID = instruction->opcodeID(); 42 42 // op_loop_hint does not have jump target stored in bytecode instructions. 43 43 if (opcodeID == op_loop_hint) 44 out.append( bytecodeOffset);44 out.append(instruction.offset()); 45 45 else if (opcodeID == op_enter && codeBlock->hasTailCalls() && Options::optimizeRecursiveTailCalls()) { 46 46 // We need to insert a jump after op_enter, so recursive tail calls have somewhere to jump to. 47 47 // But we only want to pay that price for functions that have at least one tail call. 48 out.append( bytecodeOffset + opcodeLengths[op_enter]);48 out.append(instruction.next().offset()); 49 49 } 50 50 } … … 55 55 }; 56 56 57 template<ComputePreciseJumpTargetsMode Mode, typename Block, typename Instruction,size_t vectorSize>58 void computePreciseJumpTargetsInternal(Block* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, vectorSize>& out)57 template<ComputePreciseJumpTargetsMode Mode, typename Block, size_t vectorSize> 58 void computePreciseJumpTargetsInternal(Block* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, vectorSize>& out) 59 59 { 60 60 ASSERT(out.isEmpty()); … … 70 70 } 71 71 72 for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) { 73 OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); 74 getJumpTargetsForBytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out); 75 bytecodeOffset += opcodeLengths[opcodeID]; 72 for (const auto& instruction : instructions) { 73 getJumpTargetsForInstruction(codeBlock, instruction, out); 76 74 } 77 75 … … 92 90 } 93 91 94 void computePreciseJumpTargets(CodeBlock* codeBlock, Vector< unsigned, 32>& out)92 void computePreciseJumpTargets(CodeBlock* codeBlock, Vector<InstructionStream::Offset, 32>& out) 95 93 { 96 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, codeBlock->instructions() .begin(), codeBlock->instructions().size(), out);94 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, codeBlock->instructions(), out); 97 95 } 98 96 99 void computePreciseJumpTargets(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out)97 void computePreciseJumpTargets(CodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out) 100 98 { 101 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions Begin, instructionCount, out);99 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions, out); 102 100 } 103 101 104 void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out)102 void computePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out) 105 103 { 106 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions Begin, instructionCount, out);104 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::FollowCodeBlockClaim>(codeBlock, instructions, out); 107 105 } 108 106 109 void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out)107 void recomputePreciseJumpTargets(UnlinkedCodeBlock* codeBlock, const InstructionStream& instructions, Vector<InstructionStream::Offset>& out) 110 108 { 111 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::ForceCompute>(codeBlock, instructions Begin, instructionCount, out);109 computePreciseJumpTargetsInternal<ComputePreciseJumpTargetsMode::ForceCompute>(codeBlock, instructions, out); 112 110 } 113 111 114 void findJumpTargetsFor BytecodeOffset(CodeBlock* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out)112 void findJumpTargetsForInstruction(CodeBlock* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out) 115 113 { 116 getJumpTargetsFor BytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out);114 getJumpTargetsForInstruction(codeBlock, instruction, out); 117 115 } 118 116 119 void findJumpTargetsFor BytecodeOffset(UnlinkedCodeBlock* codeBlock, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out)117 void findJumpTargetsForInstruction(UnlinkedCodeBlock* codeBlock, const InstructionStream::Ref& instruction, Vector<InstructionStream::Offset, 1>& out) 120 118 { 121 getJumpTargetsFor BytecodeOffset(codeBlock, instructionsBegin, bytecodeOffset, out);119 getJumpTargetsForInstruction(codeBlock, instruction, out); 122 120 } 123 121 -
trunk/Source/JavaScriptCore/bytecode/PreciseJumpTargets.h
r237486 r237547 31 31 32 32 class UnlinkedCodeBlock; 33 struct UnlinkedInstruction;34 33 35 34 // Return a sorted list of bytecode index that are the destination of a jump. 36 void computePreciseJumpTargets(CodeBlock*, Vector< unsigned, 32>& out);37 void computePreciseJumpTargets(CodeBlock*, Instruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out);38 void computePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned, 32>& out);35 void computePreciseJumpTargets(CodeBlock*, Vector<InstructionStream::Offset, 32>& out); 36 void computePreciseJumpTargets(CodeBlock*, const InstructionStream& instructions, Vector<InstructionStream::Offset, 32>& out); 37 void computePreciseJumpTargets(UnlinkedCodeBlock*, const InstructionStream&, Vector<InstructionStream::Offset, 32>& out); 39 38 40 void recomputePreciseJumpTargets(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned instructionCount, Vector<unsigned>& out);39 void recomputePreciseJumpTargets(UnlinkedCodeBlock*, const InstructionStream&, Vector<InstructionStream::Offset>& out); 41 40 42 void findJumpTargetsFor BytecodeOffset(CodeBlock*, Instruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out);43 void findJumpTargetsFor BytecodeOffset(UnlinkedCodeBlock*, UnlinkedInstruction* instructionsBegin, unsigned bytecodeOffset, Vector<unsigned, 1>& out);41 void findJumpTargetsForInstruction(CodeBlock*, const InstructionStream::Ref&, Vector<InstructionStream::Offset, 1>& out); 42 void findJumpTargetsForInstruction(UnlinkedCodeBlock*, const InstructionStream::Ref&, Vector<InstructionStream::Offset, 1>& out); 44 43 45 44 } // namespace JSC -
trunk/Source/JavaScriptCore/bytecode/PreciseJumpTargetsInlines.h
r237486 r237547 26 26 #pragma once 27 27 28 #include "BytecodeStructs.h" 28 29 #include "InterpreterInlines.h" 29 30 #include "Opcode.h" … … 32 33 namespace JSC { 33 34 34 template<typename Block, typename Instruction, typename Function> 35 inline void extractStoredJumpTargetsForBytecodeOffset(Block* codeBlock, Instruction* instructionsBegin, unsigned bytecodeOffset, Function function) 35 #define SWITCH_JMP(CASE_OP, SWITCH_CASE, SWITCH_DEFAULT_OFFSET) \ 36 switch (instruction->opcodeID()) { \ 37 CASE_OP(OpJmp) \ 38 \ 39 CASE_OP(OpJtrue) \ 40 CASE_OP(OpJfalse) \ 41 CASE_OP(OpJeqNull) \ 42 CASE_OP(OpJneqNull) \ 43 CASE_OP(OpJneqPtr) \ 44 \ 45 CASE_OP(OpJless) \ 46 CASE_OP(OpJlesseq) \ 47 CASE_OP(OpJgreater) \ 48 CASE_OP(OpJgreatereq) \ 49 CASE_OP(OpJnless) \ 50 CASE_OP(OpJnlesseq) \ 51 CASE_OP(OpJngreater) \ 52 CASE_OP(OpJngreatereq) \ 53 CASE_OP(OpJeq) \ 54 CASE_OP(OpJneq) \ 55 CASE_OP(OpJstricteq) \ 56 CASE_OP(OpJnstricteq) \ 57 CASE_OP(OpJbelow) \ 58 CASE_OP(OpJbeloweq) \ 59 case op_switch_imm: { \ 60 auto bytecode = instruction->as<OpSwitchImm>(); \ 61 auto& table = codeBlock->switchJumpTable(bytecode.tableIndex); \ 62 for (unsigned i = table.branchOffsets.size(); i--;) \ 63 SWITCH_CASE(table.branchOffsets[i]); \ 64 SWITCH_DEFAULT_OFFSET(OpSwitchImm); \ 65 break; \ 66 } \ 67 case op_switch_char: { \ 68 auto bytecode = instruction->as<OpSwitchChar>(); \ 69 auto& table = codeBlock->switchJumpTable(bytecode.tableIndex); \ 70 for (unsigned i = table.branchOffsets.size(); i--;) \ 71 SWITCH_CASE(table.branchOffsets[i]); \ 72 SWITCH_DEFAULT_OFFSET(OpSwitchChar); \ 73 break; \ 74 } \ 75 case op_switch_string: { \ 76 auto bytecode = instruction->as<OpSwitchString>(); \ 77 auto& table = codeBlock->stringSwitchJumpTable(bytecode.tableIndex); \ 78 auto iter = table.offsetTable.begin(); \ 79 auto end = table.offsetTable.end(); \ 80 for (; iter != end; ++iter) \ 81 SWITCH_CASE(iter->value.branchOffset); \ 82 SWITCH_DEFAULT_OFFSET(OpSwitchString); \ 83 break; \ 84 } \ 85 default: \ 86 break; \ 87 } \ 88 89 90 template<typename Block> 91 inline int jumpTargetForInstruction(Block* codeBlock, const InstructionStream::Ref& instruction, unsigned target) 36 92 { 37 OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset]); 38 Instruction* current = instructionsBegin + bytecodeOffset; 39 switch (opcodeID) { 40 case op_jmp: 41 function(current[1].u.operand); 93 if (target) 94 return target; 95 return codeBlock->outOfLineJumpOffset(instruction); 96 } 97 98 template<typename HashMap> 99 inline int jumpTargetForInstruction(HashMap& outOfLineJumpTargets, const InstructionStream::Ref& instruction, unsigned target) 100 { 101 if (target) 102 return target; 103 ASSERT(outOfLineJumpTargets.contains(instruction.offset())); 104 return outOfLineJumpTargets.get(instruction.offset()); 105 } 106 107 template<typename Op, typename Block> 108 inline int jumpTargetForInstruction(Block&& codeBlock, const InstructionStream::Ref& instruction) 109 { 110 auto bytecode = instruction->as<Op>(); 111 return jumpTargetForInstruction(codeBlock, instruction, bytecode.target); 112 } 113 114 template<typename Block, typename Function> 115 inline void extractStoredJumpTargetsForInstruction(Block&& codeBlock, const InstructionStream::Ref& instruction, Function function) 116 { 117 #define CASE_OP(__op) \ 118 case __op::opcodeID: \ 119 function(jumpTargetForInstruction<__op>(codeBlock, instruction)); \ 42 120 break; 43 case op_jtrue: 44 case op_jfalse: 45 case op_jeq_null: 46 case op_jneq_null: 47 function(current[2].u.operand); 48 break; 49 case op_jneq_ptr: 50 case op_jless: 51 case op_jlesseq: 52 case op_jgreater: 53 case op_jgreatereq: 54 case op_jnless: 55 case op_jnlesseq: 56 case op_jngreater: 57 case op_jngreatereq: 58 case op_jeq: 59 case op_jneq: 60 case op_jstricteq: 61 case op_jnstricteq: 62 case op_jbelow: 63 case op_jbeloweq: 64 function(current[3].u.operand); 65 break; 66 case op_switch_imm: 67 case op_switch_char: { 68 auto& table = codeBlock->switchJumpTable(current[1].u.operand); 69 for (unsigned i = table.branchOffsets.size(); i--;) 70 function(table.branchOffsets[i]); 71 function(current[2].u.operand); 72 break; 121 122 #define SWITCH_CASE(__target) \ 123 function(__target) 124 125 #define SWITCH_DEFAULT_OFFSET(__op) \ 126 function(jumpTargetForInstruction(codeBlock, instruction, bytecode.defaultOffset)) \ 127 128 SWITCH_JMP(CASE_OP, SWITCH_CASE, SWITCH_DEFAULT_OFFSET) 129 130 #undef CASE_OP 131 #undef SWITCH_CASE 132 #undef SWITCH_DEFAULT_OFFSET 133 } 134 135 template<typename Block, typename Function, typename CodeBlockOrHashMap> 136 inline void updateStoredJumpTargetsForInstruction(Block&& codeBlock, unsigned finalOffset, InstructionStream::MutableRef instruction, Function function, CodeBlockOrHashMap codeBlockOrHashMap) 137 { 138 #define CASE_OP(__op) \ 139 case __op::opcodeID: { \ 140 int32_t target = jumpTargetForInstruction<__op>(codeBlockOrHashMap, instruction); \ 141 int32_t newTarget = function(target); \ 142 if (newTarget != target || finalOffset) { \ 143 instruction->cast<__op>()->setTarget(newTarget, [&]() { \ 144 codeBlock->addOutOfLineJumpTarget(finalOffset + instruction.offset(), newTarget); \ 145 return 0; \ 146 }); \ 147 } \ 148 break; \ 73 149 } 74 case op_switch_string: { 75 auto& table = codeBlock->stringSwitchJumpTable(current[1].u.operand); 76 auto iter = table.offsetTable.begin(); 77 auto end = table.offsetTable.end(); 78 for (; iter != end; ++iter) 79 function(iter->value.branchOffset); 80 function(current[2].u.operand); 81 break; 82 } 83 default: 84 break; 85 } 150 151 #define SWITCH_CASE(__target) \ 152 do { \ 153 int32_t target = __target; \ 154 int32_t newTarget = function(target); \ 155 if (newTarget != target) \ 156 __target = newTarget; \ 157 } while (false) 158 159 #define SWITCH_DEFAULT_OFFSET(__op) \ 160 do { \ 161 int32_t target = jumpTargetForInstruction(codeBlockOrHashMap, instruction, bytecode.defaultOffset); \ 162 int32_t newTarget = function(target); \ 163 if (newTarget != target || finalOffset) { \ 164 instruction->cast<__op>()->setDefaultOffset(newTarget, [&]() { \ 165 codeBlock->addOutOfLineJumpTarget(finalOffset + instruction.offset(), newTarget); \ 166 return 0; \ 167 }); \ 168 } \ 169 } while (false) 170 171 SWITCH_JMP(CASE_OP, SWITCH_CASE, SWITCH_DEFAULT_OFFSET) 172 173 #undef CASE_OP 174 #undef JMP_TARGET 175 } 176 177 template<typename Block, typename Function> 178 inline void updateStoredJumpTargetsForInstruction(Block* codeBlock, unsigned finalOffset, InstructionStream::MutableRef instruction, Function function) 179 { 180 updateStoredJumpTargetsForInstruction(codeBlock, finalOffset, instruction, function, codeBlock); 86 181 } 87 182 -
trunk/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
r237486 r237547 27 27 #include "PutByIdStatus.h" 28 28 29 #include "BytecodeStructs.h" 29 30 #include "CodeBlock.h" 30 31 #include "ComplexGetStatus.h" … … 56 57 PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, UniquedStringImpl* uid) 57 58 { 58 UNUSED_PARAM(profiledBlock);59 UNUSED_PARAM(bytecodeIndex);60 UNUSED_PARAM(uid);61 62 59 VM& vm = *profiledBlock->vm(); 63 60 64 Instruction* instruction = &profiledBlock->instructions()[bytecodeIndex]; 65 66 StructureID structureID = instruction[4].u.structureID; 61 auto instruction = profiledBlock->instructions().at(bytecodeIndex); 62 auto& metadata = instruction->as<OpPutById>().metadata(profiledBlock); 63 64 StructureID structureID = metadata.oldStructure; 67 65 if (!structureID) 68 66 return PutByIdStatus(NoInformation); … … 70 68 Structure* structure = vm.heap.structureIDTable().get(structureID); 71 69 72 StructureID newStructureID = instruction[6].u.structureID;70 StructureID newStructureID = metadata.newStructure; 73 71 if (!newStructureID) { 74 72 PropertyOffset offset = structure->getConcurrently(uid); … … 88 86 89 87 ObjectPropertyConditionSet conditionSet; 90 if (!( instruction[8].u.putByIdFlags & PutByIdIsDirect)) {88 if (!(metadata.flags & PutByIdIsDirect)) { 91 89 conditionSet = 92 90 generateConditionsForPropertySetterMissConcurrently( -
trunk/Source/JavaScriptCore/bytecode/SpecialPointer.cpp
r237486 r237547 45 45 } // namespace JSC 46 46 47 namespace WTF { 48 49 void printInternal(PrintStream& out, JSC::Special::Pointer pointer) 50 { 51 switch (pointer) { 52 case JSC::Special::CallFunction: 53 out.print("CallFunction"); 54 return; 55 case JSC::Special::ApplyFunction: 56 out.print("ApplyFunction"); 57 return; 58 case JSC::Special::ObjectConstructor: 59 out.print("ObjectConstructor"); 60 return; 61 case JSC::Special::ArrayConstructor: 62 out.print("ArrayConstructor"); 63 return; 64 case JSC::Special::TableSize: 65 out.print("TableSize"); 66 return; 67 } 68 } 69 70 } // namespace WTF -
trunk/Source/JavaScriptCore/bytecode/SpecialPointer.h
r237486 r237547 62 62 63 63 } // namespace JSC 64 65 namespace WTF { 66 67 class PrintStream; 68 69 void printInternal(PrintStream&, JSC::Special::Pointer); 70 71 } // namespace WTF -
trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.cpp
r237486 r237547 35 35 #include "ExecutableInfo.h" 36 36 #include "FunctionOverrides.h" 37 #include "InstructionStream.h" 37 38 #include "JSCInlines.h" 38 39 #include "JSString.h" 40 #include "Opcode.h" 39 41 #include "Parser.h" 40 42 #include "PreciseJumpTargetsInlines.h" … … 44 46 #include "UnlinkedEvalCodeBlock.h" 45 47 #include "UnlinkedFunctionCodeBlock.h" 46 #include "Unlinked InstructionStream.h"48 #include "UnlinkedMetadataTableInlines.h" 47 49 #include "UnlinkedModuleProgramCodeBlock.h" 48 50 #include "UnlinkedProgramCodeBlock.h" … … 74 76 , m_parseMode(info.parseMode()) 75 77 , m_codeType(codeType) 76 , m_arrayProfileCount(0)77 , m_arrayAllocationProfileCount(0)78 , m_objectAllocationProfileCount(0)79 , m_valueProfileCount(0)80 , m_llintCallLinkInfoCount(0)81 78 { 82 79 for (auto& constantRegisterIndex : m_linkTimeConstants) … … 96 93 visitor.append(*ptr); 97 94 visitor.appendValues(thisObject->m_constantRegisters.data(), thisObject->m_constantRegisters.size()); 98 if (thisObject->m_unlinkedInstructions) 99 visitor.reportExtraMemoryVisited(thisObject->m_unlinkedInstructions->sizeInBytes()); 95 size_t extraMemory = thisObject->m_metadata.sizeInBytes(); 96 if (thisObject->m_instructions) 97 extraMemory += thisObject->m_instructions->sizeInBytes(); 98 visitor.reportExtraMemoryVisited(extraMemory); 100 99 } 101 100 … … 103 102 { 104 103 UnlinkedCodeBlock* thisObject = jsCast<UnlinkedCodeBlock*>(cell); 105 size_t extraSize = thisObject->m_unlinkedInstructions ? thisObject->m_unlinkedInstructions->sizeInBytes() : 0; 104 size_t extraSize = thisObject->m_metadata.sizeInBytes(); 105 if (thisObject->m_instructions) 106 extraSize += thisObject->m_instructions->sizeInBytes(); 106 107 return Base::estimatedSize(cell, vm) + extraSize; 107 108 } … … 109 110 int UnlinkedCodeBlock::lineNumberForBytecodeOffset(unsigned bytecodeOffset) 110 111 { 111 ASSERT(bytecodeOffset < instructions(). count());112 ASSERT(bytecodeOffset < instructions().size()); 112 113 int divot { 0 }; 113 114 int startOffset { 0 }; … … 140 141 141 142 #ifndef NDEBUG 142 static void dumpLineColumnEntry(size_t index, const UnlinkedInstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) 143 { 144 const auto& instructions = instructionStream.unpackForDebugging(); 145 OpcodeID opcode = instructions[instructionOffset].u.opcode; 143 static void dumpLineColumnEntry(size_t index, const InstructionStream& instructionStream, unsigned instructionOffset, unsigned line, unsigned column) 144 { 145 const auto instruction = instructionStream.at(instructionOffset); 146 146 const char* event = ""; 147 if ( opcode == op_debug) {148 switch (instruction s[instructionOffset + 1].u.operand) {147 if (instruction->is<OpDebug>()) { 148 switch (instruction->as<OpDebug>().debugHookType) { 149 149 case WillExecuteProgram: event = " WillExecuteProgram"; break; 150 150 case DidExecuteProgram: event = " DidExecuteProgram"; break; … … 156 156 } 157 157 } 158 dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, opcodeNames[opcode], event);158 dataLogF(" [%zu] pc %u @ line %u col %u : %s%s\n", index, instructionOffset, line, column, instruction->name(), event); 159 159 } 160 160 … … 179 179 int& divot, int& startOffset, int& endOffset, unsigned& line, unsigned& column) const 180 180 { 181 ASSERT(bytecodeOffset < instructions(). count());181 ASSERT(bytecodeOffset < instructions().size()); 182 182 183 183 if (!m_expressionInfo.size()) { … … 305 305 } 306 306 307 void UnlinkedCodeBlock::setInstructions(std::unique_ptr< UnlinkedInstructionStream> instructions)307 void UnlinkedCodeBlock::setInstructions(std::unique_ptr<InstructionStream> instructions) 308 308 { 309 309 ASSERT(instructions); 310 310 { 311 311 auto locker = holdLock(cellLock()); 312 m_unlinkedInstructions = WTFMove(instructions); 313 } 314 Heap::heap(this)->reportExtraMemoryAllocated(m_unlinkedInstructions->sizeInBytes()); 315 } 316 317 const UnlinkedInstructionStream& UnlinkedCodeBlock::instructions() const 318 { 319 ASSERT(m_unlinkedInstructions.get()); 320 return *m_unlinkedInstructions; 312 m_instructions = WTFMove(instructions); 313 m_metadata.finalize(); 314 } 315 Heap::heap(this)->reportExtraMemoryAllocated(m_instructions->sizeInBytes() + m_metadata.sizeInBytes()); 316 } 317 318 const InstructionStream& UnlinkedCodeBlock::instructions() const 319 { 320 ASSERT(m_instructions.get()); 321 return *m_instructions; 321 322 } 322 323 … … 333 334 } 334 335 335 void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, UnpackedInstructions& instructions)336 void UnlinkedCodeBlock::applyModification(BytecodeRewriter& rewriter, InstructionStreamWriter& instructions) 336 337 { 337 338 // Before applying the changes, we adjust the jumps based on the original bytecode offset, the offset to the jump target, and 338 339 // the insertion information. 339 340 340 UnlinkedInstruction* instructionsBegin = instructions.begin(); // OOPS: make this an accessor on rewriter. 341 342 for (int bytecodeOffset = 0, instructionCount = instructions.size(); bytecodeOffset < instructionCount;) { 343 UnlinkedInstruction* current = instructionsBegin + bytecodeOffset; 344 OpcodeID opcodeID = current[0].u.opcode; 345 extractStoredJumpTargetsForBytecodeOffset(this, instructionsBegin, bytecodeOffset, [&](int32_t& relativeOffset) { 346 relativeOffset = rewriter.adjustJumpTarget(bytecodeOffset, bytecodeOffset + relativeOffset); 347 }); 348 bytecodeOffset += opcodeLength(opcodeID); 349 } 341 rewriter.adjustJumpTargets(); 350 342 351 343 // Then, exception handlers should be adjusted. … … 379 371 // And recompute the jump target based on the modified unlinked instructions. 380 372 m_jumpTargets.clear(); 381 recomputePreciseJumpTargets(this, instructions .begin(), instructions.size(), m_jumpTargets);373 recomputePreciseJumpTargets(this, instructions, m_jumpTargets); 382 374 } 383 375 … … 426 418 } 427 419 420 void UnlinkedCodeBlock::addOutOfLineJumpTarget(InstructionStream::Offset bytecodeOffset, int target) 421 { 422 RELEASE_ASSERT(target); 423 m_outOfLineJumpTargets.set(bytecodeOffset, target); 424 } 425 426 int UnlinkedCodeBlock::outOfLineJumpOffset(InstructionStream::Offset bytecodeOffset) 427 { 428 ASSERT(m_outOfLineJumpTargets.contains(bytecodeOffset)); 429 return m_outOfLineJumpTargets.get(bytecodeOffset); 430 } 431 428 432 } // namespace JSC -
trunk/Source/JavaScriptCore/bytecode/UnlinkedCodeBlock.h
r237486 r237547 32 32 #include "HandlerInfo.h" 33 33 #include "Identifier.h" 34 #include "InstructionStream.h" 34 35 #include "JSCast.h" 35 36 #include "LockDuringMarking.h" 37 #include "Opcode.h" 36 38 #include "ParserModes.h" 37 39 #include "RegExp.h" 38 40 #include "SpecialPointer.h" 39 41 #include "UnlinkedFunctionExecutable.h" 42 #include "UnlinkedMetadataTable.h" 40 43 #include "VirtualRegister.h" 41 44 #include <algorithm> … … 61 64 class UnlinkedFunctionCodeBlock; 62 65 class UnlinkedFunctionExecutable; 63 class UnlinkedInstructionStream;64 66 struct ExecutableInfo; 65 67 … … 102 104 }; 103 105 104 struct UnlinkedInstruction {105 UnlinkedInstruction() { u.operand = 0; }106 UnlinkedInstruction(OpcodeID opcode) { u.opcode = opcode; }107 UnlinkedInstruction(int operand) { u.operand = operand; }108 union {109 OpcodeID opcode;110 int32_t operand;111 unsigned unsignedValue;112 } u;113 };114 115 106 class UnlinkedCodeBlock : public JSCell { 116 107 public: … … 121 112 122 113 enum { CallFunction, ApplyFunction }; 123 124 typedef UnlinkedInstruction Instruction;125 typedef Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> UnpackedInstructions;126 114 127 115 bool isConstructor() const { return m_isConstructor; } … … 238 226 void shrinkToFit(); 239 227 240 void setInstructions(std::unique_ptr< UnlinkedInstructionStream>);241 const UnlinkedInstructionStream& instructions() const;228 void setInstructions(std::unique_ptr<InstructionStream>); 229 const InstructionStream& instructions() const; 242 230 243 231 int numCalleeLocals() const { return m_numCalleeLocals; } … … 282 270 UnlinkedHandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; } 283 271 284 UnlinkedArrayProfile addArrayProfile() { return m_arrayProfileCount++; }285 unsigned numberOfArrayProfiles() { return m_arrayProfileCount; }286 UnlinkedArrayAllocationProfile addArrayAllocationProfile(IndexingType recommendedIndexingType) { return (m_arrayAllocationProfileCount++) | recommendedIndexingType << 24; }287 unsigned numberOfArrayAllocationProfiles() { return m_arrayAllocationProfileCount; }288 UnlinkedObjectAllocationProfile addObjectAllocationProfile() { return m_objectAllocationProfileCount++; }289 static std::tuple<unsigned, IndexingType> decompressArrayAllocationProfile(UnlinkedArrayAllocationProfile compressedProfile)290 {291 unsigned profile = (compressedProfile << 8) >> 8;292 IndexingType recommendedIndexingType = compressedProfile >> 24;293 return std::make_tuple<unsigned, IndexingType>(WTFMove(profile), WTFMove(recommendedIndexingType));294 295 }296 unsigned numberOfObjectAllocationProfiles() { return m_objectAllocationProfileCount; }297 UnlinkedValueProfile addValueProfile() { return m_valueProfileCount++; }298 unsigned numberOfValueProfiles() { return m_valueProfileCount; }299 300 UnlinkedLLIntCallLinkInfo addLLIntCallLinkInfo() { return m_llintCallLinkInfoCount++; }301 unsigned numberOfLLintCallLinkInfos() { return m_llintCallLinkInfoCount; }302 303 272 CodeType codeType() const { return m_codeType; } 304 273 … … 306 275 VirtualRegister scopeRegister() const { return m_scopeRegister; } 307 276 308 void addPropertyAccessInstruction( unsignedpropertyAccessInstruction)277 void addPropertyAccessInstruction(InstructionStream::Offset propertyAccessInstruction) 309 278 { 310 279 m_propertyAccessInstructions.append(propertyAccessInstruction); … … 312 281 313 282 size_t numberOfPropertyAccessInstructions() const { return m_propertyAccessInstructions.size(); } 314 const Vector< unsigned>& propertyAccessInstructions() const { return m_propertyAccessInstructions; }283 const Vector<InstructionStream::Offset>& propertyAccessInstructions() const { return m_propertyAccessInstructions; } 315 284 316 285 bool hasRareData() const { return m_rareData.get(); } … … 343 312 unsigned endColumn() const { return m_endColumn; } 344 313 345 void addOpProfileControlFlowBytecodeOffset( size_t offset)314 void addOpProfileControlFlowBytecodeOffset(InstructionStream::Offset offset) 346 315 { 347 316 createRareDataIfNecessary(); 348 317 m_rareData->m_opProfileControlFlowBytecodeOffsets.append(offset); 349 318 } 350 const Vector< size_t>& opProfileControlFlowBytecodeOffsets() const319 const Vector<InstructionStream::Offset>& opProfileControlFlowBytecodeOffsets() const 351 320 { 352 321 ASSERT(m_rareData); … … 389 358 #endif 390 359 360 UnlinkedMetadataTable& metadata() { return m_metadata; } 361 362 size_t metadataSizeInBytes() 363 { 364 return m_metadata.sizeInBytes(); 365 } 366 367 391 368 protected: 392 369 UnlinkedCodeBlock(VM*, Structure*, CodeType, const ExecutableInfo&, DebuggerMode); … … 402 379 friend class BytecodeGenerator; 403 380 404 void applyModification(BytecodeRewriter&, UnpackedInstructions&);381 void applyModification(BytecodeRewriter&, InstructionStreamWriter&); 405 382 406 383 void createRareDataIfNecessary() … … 415 392 BytecodeLivenessAnalysis& livenessAnalysisSlow(CodeBlock*); 416 393 417 std::unique_ptr< UnlinkedInstructionStream> m_unlinkedInstructions;394 std::unique_ptr<InstructionStream> m_instructions; 418 395 std::unique_ptr<BytecodeLivenessAnalysis> m_liveness; 419 396 … … 424 401 String m_sourceURLDirective; 425 402 String m_sourceMappingURLDirective; 403 UnlinkedMetadataTable m_metadata; 426 404 427 405 #if ENABLE(DFG_JIT) … … 459 437 CodeType m_codeType; 460 438 461 Vector< unsigned> m_jumpTargets;462 463 Vector< unsigned> m_propertyAccessInstructions;439 Vector<InstructionStream::Offset> m_jumpTargets; 440 441 Vector<InstructionStream::Offset> m_propertyAccessInstructions; 464 442 465 443 // Constant Pools … … 474 452 std::array<unsigned, LinkTimeConstantCount> m_linkTimeConstants; 475 453 476 unsigned m_arrayProfileCount { 0 };477 unsigned m_arrayAllocationProfileCount { 0 };478 unsigned m_objectAllocationProfileCount { 0 };479 unsigned m_valueProfileCount { 0 };480 unsigned m_llintCallLinkInfoCount { 0 };481 482 454 public: 483 455 struct RareData { … … 497 469 }; 498 470 HashMap<unsigned, TypeProfilerExpressionRange> m_typeProfilerInfoMap; 499 Vector< size_t> m_opProfileControlFlowBytecodeOffsets;471 Vector<InstructionStream::Offset> m_opProfileControlFlowBytecodeOffsets; 500 472 }; 501 473 474 void addOutOfLineJumpTarget(InstructionStream::Offset, int target); 475 int outOfLineJumpOffset(InstructionStream::Offset); 476 int outOfLineJumpOffset(const InstructionStream::Ref& instruction) 477 { 478 return outOfLineJumpOffset(instruction.offset()); 479 } 480 502 481 private: 482 using OutOfLineJumpTargets = HashMap<InstructionStream::Offset, int>; 483 484 OutOfLineJumpTargets replaceOutOfLineJumpTargets() 485 { 486 OutOfLineJumpTargets newJumpTargets; 487 std::swap(m_outOfLineJumpTargets, newJumpTargets); 488 return newJumpTargets; 489 } 490 491 OutOfLineJumpTargets m_outOfLineJumpTargets; 503 492 std::unique_ptr<RareData> m_rareData; 504 493 Vector<ExpressionRangeInfo> m_expressionInfo; -
trunk/Source/JavaScriptCore/bytecode/UnlinkedMetadataTable.h
r237546 r237547 1 1 /* 2 * Copyright (C) 201 6Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 26 26 #pragma once 27 27 28 #include "Instruction.h" 29 #include "ObjectPropertyCondition.h" 30 #include "Watchpoint.h" 28 #include "Opcode.h" 29 #include <wtf/Ref.h> 31 30 32 31 namespace JSC { 33 32 34 class LLIntPrototypeLoadAdaptiveStructureWatchpoint : public Watchpoint { 33 class MetadataTable; 34 35 class UnlinkedMetadataTable { 36 friend class LLIntOffsetsExtractor; 37 friend class MetadataTable; 38 35 39 public: 36 LLIntPrototypeLoadAdaptiveStructureWatchpoint() = default;37 LLIntPrototypeLoadAdaptiveStructureWatchpoint(const ObjectPropertyCondition&, Instruction*);40 UnlinkedMetadataTable(); 41 ~UnlinkedMetadataTable(); 38 42 39 void install(VM&);43 unsigned addEntry(OpcodeID); 40 44 41 const ObjectPropertyCondition& key() const { return m_key; }45 size_t sizeInBytes(); 42 46 43 protected: 44 void fireInternal(VM&, const FireDetail&) override; 47 void finalize(); 48 49 Ref<MetadataTable> link(); 45 50 46 51 private: 47 ObjectPropertyCondition m_key; 48 Instruction* m_getByIdInstruction { nullptr }; 52 void unlink(MetadataTable&); 53 54 size_t sizeInBytes(MetadataTable&); 55 56 using Offset = unsigned; 57 58 static constexpr unsigned s_offsetTableEntries = NUMBER_OF_BYTECODE_WITH_METADATA + 1; // one extra entry for the "end" offset; 59 static constexpr unsigned s_offsetTableSize = s_offsetTableEntries * sizeof(UnlinkedMetadataTable::Offset); 60 61 bool m_hasMetadata : 1; 62 bool m_isFinalized : 1; 63 bool m_isLinked : 1; 64 Offset* m_buffer; 49 65 }; 50 66 -
trunk/Source/JavaScriptCore/bytecode/VirtualRegister.cpp
r237486 r237547 26 26 #include "config.h" 27 27 #include "VirtualRegister.h" 28 29 #include "RegisterID.h" 28 30 29 31 namespace JSC { … … 62 64 } 63 65 66 67 VirtualRegister::VirtualRegister(RegisterID* reg) 68 : VirtualRegister(reg->m_virtualRegister.m_virtualRegister) 69 { 70 } 71 64 72 } // namespace JSC 65 -
trunk/Source/JavaScriptCore/bytecode/VirtualRegister.h
r237486 r237547 43 43 44 44 45 class RegisterID; 46 45 47 class VirtualRegister { 46 48 public: 47 49 friend VirtualRegister virtualRegisterForLocal(int); 48 50 friend VirtualRegister virtualRegisterForArgument(int, int); 51 52 VirtualRegister(RegisterID*); 49 53 50 54 VirtualRegister() -
trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
r237486 r237547 37 37 #include "BytecodeGeneratorification.h" 38 38 #include "BytecodeLivenessAnalysis.h" 39 #include "BytecodeStructs.h" 39 40 #include "BytecodeUseDef.h" 40 41 #include "CatchScope.h" … … 52 53 #include "LowLevelInterpreter.h" 53 54 #include "Options.h" 55 #include "PreciseJumpTargetsInlines.h" 54 56 #include "StackAlignment.h" 55 57 #include "StrongInlines.h" … … 58 60 #include "UnlinkedEvalCodeBlock.h" 59 61 #include "UnlinkedFunctionCodeBlock.h" 60 #include "Unlinked InstructionStream.h"62 #include "UnlinkedMetadataTableInlines.h" 61 63 #include "UnlinkedModuleProgramCodeBlock.h" 62 64 #include "UnlinkedProgramCodeBlock.h" … … 69 71 namespace JSC { 70 72 73 template<typename CallOp, typename = std::true_type> 74 struct VarArgsOp; 75 76 template<typename CallOp> 77 struct VarArgsOp<CallOp, std::enable_if_t<std::is_same<CallOp, OpTailCall>::value, std::true_type>> { 78 using type = OpTailCallVarargs; 79 }; 80 81 82 template<typename CallOp> 83 struct VarArgsOp<CallOp, std::enable_if_t<!std::is_same<CallOp, OpTailCall>::value, std::true_type>> { 84 using type = OpCallVarargs; 85 }; 86 87 71 88 template<typename T> 72 89 static inline void shrinkToFit(T& segmentedVector) … … 80 97 m_location = location; 81 98 82 unsigned size = m_unresolvedJumps.size(); 83 for (unsigned i = 0; i < size; ++i) 84 generator.instructions()[m_unresolvedJumps[i].second].u.operand = m_location - m_unresolvedJumps[i].first; 99 for (auto offset : m_unresolvedJumps) { 100 auto instruction = generator.m_writer.ref(offset); 101 int target = m_location - offset; 102 103 #define CASE(__op) \ 104 case __op::opcodeID: \ 105 instruction->cast<__op>()->setTarget(target, [&]() { \ 106 generator.m_codeBlock->addOutOfLineJumpTarget(instruction.offset(), target); \ 107 return 0; \ 108 }); \ 109 break; 110 111 switch (instruction->opcodeID()) { 112 CASE(OpJmp) 113 CASE(OpJtrue) 114 CASE(OpJfalse) 115 CASE(OpJeqNull) 116 CASE(OpJneqNull) 117 CASE(OpJeq) 118 CASE(OpJstricteq) 119 CASE(OpJneq) 120 CASE(OpJneqPtr) 121 CASE(OpJnstricteq) 122 CASE(OpJless) 123 CASE(OpJlesseq) 124 CASE(OpJgreater) 125 CASE(OpJgreatereq) 126 CASE(OpJnless) 127 CASE(OpJnlesseq) 128 CASE(OpJngreater) 129 CASE(OpJngreatereq) 130 CASE(OpJbelow) 131 CASE(OpJbeloweq) 132 default: 133 ASSERT_NOT_REACHED(); 134 } 135 #undef CASE 136 } 137 } 138 139 int Label::bind(BytecodeGenerator* generator) 140 { 141 return bind(generator->instructions().size()); 85 142 } 86 143 … … 161 218 for (auto& tuple : m_catchesToEmit) { 162 219 Ref<Label> realCatchTarget = newEmittedLabel(); 163 emitOpcode(op_catch); 164 instructions().append(std::get<1>(tuple)); 165 instructions().append(std::get<2>(tuple)); 166 instructions().append(0); 220 OpCatch::emit(this, std::get<1>(tuple), std::get<2>(tuple)); 167 221 168 222 TryData* tryData = std::get<0>(tuple); … … 209 263 210 264 if (isGeneratorOrAsyncFunctionBodyParseMode(m_codeBlock->parseMode())) 211 performGeneratorification( m_codeBlock.get(), m_instructions, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex);265 performGeneratorification(*this, m_codeBlock.get(), m_writer, m_generatorFrameSymbolTable.get(), m_generatorFrameSymbolTableIndex); 212 266 213 267 RELEASE_ASSERT(static_cast<unsigned>(m_codeBlock->numCalleeLocals()) < static_cast<unsigned>(FirstConstantRegisterIndex)); 214 m_codeBlock->setInstructions( std::make_unique<UnlinkedInstructionStream>(m_instructions));268 m_codeBlock->setInstructions(m_writer.finalize()); 215 269 216 270 m_codeBlock->shrinkToFit(); … … 450 504 functionSymbolTable->set(NoLockingNecessary, name, entry); 451 505 } 452 emitOpcode(op_put_to_scope); 453 instructions().append(m_lexicalEnvironmentRegister->index()); 454 instructions().append(UINT_MAX); 455 instructions().append(virtualRegisterForArgument(1 + i).offset()); 456 instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand()); 457 instructions().append(symbolTableConstantIndex); 458 instructions().append(offset.offset()); 506 OpPutToScope::emit(this, m_lexicalEnvironmentRegister, UINT_MAX, virtualRegisterForArgument(1 + i), GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), symbolTableConstantIndex, offset.offset()); 459 507 } 460 508 461 509 // This creates a scoped arguments object and copies the overflow arguments into the 462 510 // scope. It's the equivalent of calling ScopedArguments::createByCopying(). 463 emitOpcode(op_create_scoped_arguments); 464 instructions().append(m_argumentsRegister->index()); 465 instructions().append(m_lexicalEnvironmentRegister->index()); 511 OpCreateScopedArguments::emit(this, m_argumentsRegister, m_lexicalEnvironmentRegister); 466 512 } else { 467 513 // We're going to put all parameters into the DirectArguments object. First ensure … … 472 518 } 473 519 474 emitOpcode(op_create_direct_arguments); 475 instructions().append(m_argumentsRegister->index()); 520 OpCreateDirectArguments::emit(this, m_argumentsRegister); 476 521 } 477 522 } else if (isSimpleParameterList) { … … 497 542 functionSymbolTable->set(NoLockingNecessary, name, SymbolTableEntry(VarOffset(offset))); 498 543 499 emitOpcode(op_put_to_scope); 500 instructions().append(m_lexicalEnvironmentRegister->index()); 501 instructions().append(addConstant(ident)); 502 instructions().append(virtualRegisterForArgument(1 + i).offset()); 503 instructions().append(GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization).operand()); 504 instructions().append(symbolTableConstantIndex); 505 instructions().append(offset.offset()); 544 OpPutToScope::emit(this, m_lexicalEnvironmentRegister, addConstant(ident), virtualRegisterForArgument(1 + i), GetPutInfo(ThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), symbolTableConstantIndex, offset.offset()); 506 545 } 507 546 } … … 509 548 if (needsArguments && (codeBlock->isStrictMode() || !isSimpleParameterList)) { 510 549 // Allocate a cloned arguments object. 511 emitOpcode(op_create_cloned_arguments); 512 instructions().append(m_argumentsRegister->index()); 550 OpCreateClonedArguments::emit(this, m_argumentsRegister); 513 551 } 514 552 … … 1167 1205 if (hasCapturedVariables) { 1168 1206 RELEASE_ASSERT(m_lexicalEnvironmentRegister); 1169 emitOpcode(op_create_lexical_environment); 1170 instructions().append(m_lexicalEnvironmentRegister->index()); 1171 instructions().append(scopeRegister()->index()); 1172 instructions().append(symbolTableConstantIndex); 1173 instructions().append(addConstantValue(jsUndefined())->index()); 1174 1175 emitOpcode(op_mov); 1176 instructions().append(scopeRegister()->index()); 1177 instructions().append(m_lexicalEnvironmentRegister->index()); 1207 OpCreateLexicalEnvironment::emit(this, m_lexicalEnvironmentRegister, scopeRegister(), VirtualRegister { symbolTableConstantIndex }, addConstantValue(jsUndefined())); 1208 1209 OpMov::emit(this, scopeRegister(), m_lexicalEnvironmentRegister); 1178 1210 1179 1211 pushLocalControlFlowScope(); … … 1269 1301 } 1270 1302 1271 void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)1272 {1273 #ifndef NDEBUG1274 size_t opcodePosition = instructions().size();1275 ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end);1276 m_lastOpcodePosition = opcodePosition;1277 #endif1278 instructions().append(opcodeID);1279 m_lastOpcodeID = opcodeID;1280 }1281 1282 UnlinkedArrayProfile BytecodeGenerator::newArrayProfile()1283 {1284 return m_codeBlock->addArrayProfile();1285 }1286 1287 UnlinkedArrayAllocationProfile BytecodeGenerator::newArrayAllocationProfile(IndexingType recommendedIndexingType)1288 {1289 return m_codeBlock->addArrayAllocationProfile(recommendedIndexingType);1290 }1291 1292 UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()1293 {1294 return m_codeBlock->addObjectAllocationProfile();1295 }1296 1297 UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)1298 {1299 emitOpcode(opcodeID);1300 if (!m_vm->canUseJIT())1301 return static_cast<UnlinkedValueProfile>(-1);1302 UnlinkedValueProfile result = m_codeBlock->addValueProfile();1303 return result;1304 }1305 1306 1303 void BytecodeGenerator::emitEnter() 1307 1304 { 1308 emitOpcode(op_enter);1305 OpEnter::emit(this); 1309 1306 1310 1307 if (LIKELY(Options::optimizeRecursiveTailCalls())) { … … 1319 1316 void BytecodeGenerator::emitLoopHint() 1320 1317 { 1321 emitOpcode(op_loop_hint);1318 OpLoopHint::emit(this); 1322 1319 emitCheckTraps(); 1323 1320 } 1324 1321 1322 void BytecodeGenerator::emitJump(Label& target) 1323 { 1324 OpJmp::emit(this, target.bind(this)); 1325 } 1326 1325 1327 void BytecodeGenerator::emitCheckTraps() 1326 1328 { 1327 emitOpcode(op_check_traps); 1328 } 1329 1330 void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index) 1331 { 1332 ASSERT(instructions().size() >= 4); 1333 size_t size = instructions().size(); 1334 dstIndex = instructions().at(size - 3).u.operand; 1335 src1Index = instructions().at(size - 2).u.operand; 1336 src2Index = instructions().at(size - 1).u.operand; 1337 } 1338 1339 void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex) 1340 { 1341 ASSERT(instructions().size() >= 3); 1342 size_t size = instructions().size(); 1343 dstIndex = instructions().at(size - 2).u.operand; 1344 srcIndex = instructions().at(size - 1).u.operand; 1345 } 1346 1347 void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp() 1348 { 1349 ASSERT(instructions().size() >= 4); 1350 instructions().shrink(instructions().size() - 4); 1329 OpCheckTraps::emit(this); 1330 } 1331 1332 void ALWAYS_INLINE BytecodeGenerator::rewind() 1333 { 1334 ASSERT(m_lastInstruction.isValid()); 1351 1335 m_lastOpcodeID = op_end; 1352 } 1353 1354 void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp() 1355 { 1356 ASSERT(instructions().size() >= 3); 1357 instructions().shrink(instructions().size() - 3); 1358 m_lastOpcodeID = op_end; 1359 } 1360 1361 void BytecodeGenerator::emitJump(Label& target) 1362 { 1363 size_t begin = instructions().size(); 1364 emitOpcode(op_jmp); 1365 instructions().append(target.bind(begin, instructions().size())); 1336 m_writer.rewind(m_lastInstruction); 1337 } 1338 1339 template<typename BinOp, typename JmpOp> 1340 bool BytecodeGenerator::fuseCompareAndJump(RegisterID* cond, Label& target, bool swapOperands) 1341 { 1342 auto binop = m_lastInstruction->as<BinOp>(); 1343 if (cond->index() == binop.dst.offset() && cond->isTemporary() && !cond->refCount()) { 1344 rewind(); 1345 1346 if (swapOperands) 1347 std::swap(binop.lhs, binop.rhs); 1348 1349 JmpOp::emit(this, binop.lhs, binop.rhs, target.bind(this)); 1350 return true; 1351 } 1352 return false; 1353 } 1354 1355 template<typename UnaryOp, typename JmpOp> 1356 bool BytecodeGenerator::fuseTestAndJmp(RegisterID* cond, Label& target) 1357 { 1358 auto unop = m_lastInstruction->as<UnaryOp>(); 1359 if (cond->index() == unop.dst.offset() && cond->isTemporary() && !cond->refCount()) { 1360 rewind(); 1361 1362 JmpOp::emit(this, unop.operand, target.bind(this)); 1363 return true; 1364 } 1365 return false; 1366 1366 } 1367 1367 1368 1368 void BytecodeGenerator::emitJumpIfTrue(RegisterID* cond, Label& target) 1369 1369 { 1370 auto fuseCompareAndJump = [&] (OpcodeID jumpID) {1371 int dstIndex;1372 int src1Index;1373 int src2Index;1374 1375 retrieveLastBinaryOp(dstIndex, src1Index, src2Index);1376 1377 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {1378 rewindBinaryOp();1379 1380 size_t begin = instructions().size();1381 emitOpcode(jumpID);1382 instructions().append(src1Index);1383 instructions().append(src2Index);1384 instructions().append(target.bind(begin, instructions().size()));1385 return true;1386 }1387 return false;1388 };1389 1370 1390 1371 if (m_lastOpcodeID == op_less) { 1391 if (fuseCompareAndJump (op_jless))1372 if (fuseCompareAndJump<OpLess, OpJless>(cond, target)) 1392 1373 return; 1393 1374 } else if (m_lastOpcodeID == op_lesseq) { 1394 if (fuseCompareAndJump (op_jlesseq))1375 if (fuseCompareAndJump<OpLesseq, OpJlesseq>(cond, target)) 1395 1376 return; 1396 1377 } else if (m_lastOpcodeID == op_greater) { 1397 if (fuseCompareAndJump (op_jgreater))1378 if (fuseCompareAndJump<OpGreater, OpJgreater>(cond, target)) 1398 1379 return; 1399 1380 } else if (m_lastOpcodeID == op_greatereq) { 1400 if (fuseCompareAndJump (op_jgreatereq))1381 if (fuseCompareAndJump<OpGreatereq, OpJgreatereq>(cond, target)) 1401 1382 return; 1402 1383 } else if (m_lastOpcodeID == op_eq) { 1403 if (fuseCompareAndJump (op_jeq))1384 if (fuseCompareAndJump<OpEq, OpJeq>(cond, target)) 1404 1385 return; 1405 1386 } else if (m_lastOpcodeID == op_stricteq) { 1406 if (fuseCompareAndJump (op_jstricteq))1387 if (fuseCompareAndJump<OpStricteq, OpJstricteq>(cond, target)) 1407 1388 return; 1408 1389 } else if (m_lastOpcodeID == op_neq) { 1409 if (fuseCompareAndJump (op_jneq))1390 if (fuseCompareAndJump<OpNeq, OpJneq>(cond, target)) 1410 1391 return; 1411 1392 } else if (m_lastOpcodeID == op_nstricteq) { 1412 if (fuseCompareAndJump (op_jnstricteq))1393 if (fuseCompareAndJump<OpNstricteq, OpJnstricteq>(cond, target)) 1413 1394 return; 1414 1395 } else if (m_lastOpcodeID == op_below) { 1415 if (fuseCompareAndJump (op_jbelow))1396 if (fuseCompareAndJump<OpBelow, OpJbelow>(cond, target)) 1416 1397 return; 1417 1398 } else if (m_lastOpcodeID == op_beloweq) { 1418 if (fuseCompareAndJump (op_jbeloweq))1399 if (fuseCompareAndJump<OpBeloweq, OpJbeloweq>(cond, target)) 1419 1400 return; 1420 1401 } else if (m_lastOpcodeID == op_eq_null && target.isForward()) { 1421 int dstIndex; 1422 int srcIndex; 1423 1424 retrieveLastUnaryOp(dstIndex, srcIndex); 1425 1426 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { 1427 rewindUnaryOp(); 1428 1429 size_t begin = instructions().size(); 1430 emitOpcode(op_jeq_null); 1431 instructions().append(srcIndex); 1432 instructions().append(target.bind(begin, instructions().size())); 1402 if (fuseTestAndJmp<OpEqNull, OpJeqNull>(cond, target)) 1433 1403 return; 1434 }1435 1404 } else if (m_lastOpcodeID == op_neq_null && target.isForward()) { 1436 int dstIndex; 1437 int srcIndex; 1438 1439 retrieveLastUnaryOp(dstIndex, srcIndex); 1440 1441 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { 1442 rewindUnaryOp(); 1443 1444 size_t begin = instructions().size(); 1445 emitOpcode(op_jneq_null); 1446 instructions().append(srcIndex); 1447 instructions().append(target.bind(begin, instructions().size())); 1405 if (fuseTestAndJmp<OpNeqNull, OpJneqNull>(cond, target)) 1448 1406 return; 1449 } 1450 } 1451 1452 size_t begin = instructions().size(); 1453 1454 emitOpcode(op_jtrue); 1455 instructions().append(cond->index()); 1456 instructions().append(target.bind(begin, instructions().size())); 1407 } 1408 1409 OpJtrue::emit(this, cond, target.bind(this)); 1457 1410 } 1458 1411 1459 1412 void BytecodeGenerator::emitJumpIfFalse(RegisterID* cond, Label& target) 1460 1413 { 1461 auto fuseCompareAndJump = [&] (OpcodeID jumpID, bool replaceOperands) {1462 int dstIndex;1463 int src1Index;1464 int src2Index;1465 1466 retrieveLastBinaryOp(dstIndex, src1Index, src2Index);1467 1468 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) {1469 rewindBinaryOp();1470 1471 size_t begin = instructions().size();1472 emitOpcode(jumpID);1473 // Since op_below and op_beloweq only accepts Int32, replacing operands is not observable to users.1474 if (replaceOperands)1475 std::swap(src1Index, src2Index);1476 instructions().append(src1Index);1477 instructions().append(src2Index);1478 instructions().append(target.bind(begin, instructions().size()));1479 return true;1480 }1481 return false;1482 };1483 1484 1414 if (m_lastOpcodeID == op_less && target.isForward()) { 1485 if (fuseCompareAndJump (op_jnless, false))1415 if (fuseCompareAndJump<OpLess, OpJnless>(cond, target)) 1486 1416 return; 1487 1417 } else if (m_lastOpcodeID == op_lesseq && target.isForward()) { 1488 if (fuseCompareAndJump (op_jnlesseq, false))1418 if (fuseCompareAndJump<OpLesseq, OpJnlesseq>(cond, target)) 1489 1419 return; 1490 1420 } else if (m_lastOpcodeID == op_greater && target.isForward()) { 1491 if (fuseCompareAndJump (op_jngreater, false))1421 if (fuseCompareAndJump<OpGreater, OpJngreater>(cond, target)) 1492 1422 return; 1493 1423 } else if (m_lastOpcodeID == op_greatereq && target.isForward()) { 1494 if (fuseCompareAndJump (op_jngreatereq, false))1424 if (fuseCompareAndJump<OpGreatereq, OpJngreatereq>(cond, target)) 1495 1425 return; 1496 1426 } else if (m_lastOpcodeID == op_eq && target.isForward()) { 1497 if (fuseCompareAndJump (op_jneq, false))1427 if (fuseCompareAndJump<OpEq, OpJneq>(cond, target)) 1498 1428 return; 1499 1429 } else if (m_lastOpcodeID == op_stricteq && target.isForward()) { 1500 if (fuseCompareAndJump (op_jnstricteq, false))1430 if (fuseCompareAndJump<OpStricteq, OpJnstricteq>(cond, target)) 1501 1431 return; 1502 1432 } else if (m_lastOpcodeID == op_neq && target.isForward()) { 1503 if (fuseCompareAndJump (op_jeq, false))1433 if (fuseCompareAndJump<OpNeq, OpJeq>(cond, target)) 1504 1434 return; 1505 1435 } else if (m_lastOpcodeID == op_nstricteq && target.isForward()) { 1506 if (fuseCompareAndJump (op_jstricteq, false))1436 if (fuseCompareAndJump<OpNstricteq, OpJstricteq>(cond, target)) 1507 1437 return; 1508 1438 } else if (m_lastOpcodeID == op_below && target.isForward()) { 1509 if (fuseCompareAndJump (op_jbeloweq, true))1439 if (fuseCompareAndJump<OpBelow, OpJbeloweq>(cond, target, true)) 1510 1440 return; 1511 1441 } else if (m_lastOpcodeID == op_beloweq && target.isForward()) { 1512 if (fuseCompareAndJump (op_jbelow, true))1442 if (fuseCompareAndJump<OpBeloweq, OpJbelow>(cond, target, true)) 1513 1443 return; 1514 1444 } else if (m_lastOpcodeID == op_not) { 1515 int dstIndex; 1516 int srcIndex; 1517 1518 retrieveLastUnaryOp(dstIndex, srcIndex); 1519 1520 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { 1521 rewindUnaryOp(); 1522 1523 size_t begin = instructions().size(); 1524 emitOpcode(op_jtrue); 1525 instructions().append(srcIndex); 1526 instructions().append(target.bind(begin, instructions().size())); 1445 if (fuseTestAndJmp<OpNot, OpJtrue>(cond, target)) 1527 1446 return; 1528 }1529 1447 } else if (m_lastOpcodeID == op_eq_null && target.isForward()) { 1530 int dstIndex; 1531 int srcIndex; 1532 1533 retrieveLastUnaryOp(dstIndex, srcIndex); 1534 1535 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { 1536 rewindUnaryOp(); 1537 1538 size_t begin = instructions().size(); 1539 emitOpcode(op_jneq_null); 1540 instructions().append(srcIndex); 1541 instructions().append(target.bind(begin, instructions().size())); 1448 if (fuseTestAndJmp<OpEqNull, OpJneqNull>(cond, target)) 1542 1449 return; 1543 }1544 1450 } else if (m_lastOpcodeID == op_neq_null && target.isForward()) { 1545 int dstIndex; 1546 int srcIndex; 1547 1548 retrieveLastUnaryOp(dstIndex, srcIndex); 1549 1550 if (cond->index() == dstIndex && cond->isTemporary() && !cond->refCount()) { 1551 rewindUnaryOp(); 1552 1553 size_t begin = instructions().size(); 1554 emitOpcode(op_jeq_null); 1555 instructions().append(srcIndex); 1556 instructions().append(target.bind(begin, instructions().size())); 1451 if (fuseTestAndJmp<OpNeqNull, OpJeqNull>(cond, target)) 1557 1452 return; 1558 } 1559 } 1560 1561 size_t begin = instructions().size(); 1562 emitOpcode(op_jfalse); 1563 instructions().append(cond->index()); 1564 instructions().append(target.bind(begin, instructions().size())); 1453 } 1454 1455 OpJfalse::emit(this, cond, target.bind(this)); 1565 1456 } 1566 1457 1567 1458 void BytecodeGenerator::emitJumpIfNotFunctionCall(RegisterID* cond, Label& target) 1568 1459 { 1569 size_t begin = instructions().size(); 1570 1571 emitOpcode(op_jneq_ptr); 1572 instructions().append(cond->index()); 1573 instructions().append(Special::CallFunction); 1574 instructions().append(target.bind(begin, instructions().size())); 1575 instructions().append(0); 1460 OpJneqPtr::emit(this, cond, Special::CallFunction, target.bind(this)); 1576 1461 } 1577 1462 1578 1463 void BytecodeGenerator::emitJumpIfNotFunctionApply(RegisterID* cond, Label& target) 1579 1464 { 1580 size_t begin = instructions().size(); 1581 1582 emitOpcode(op_jneq_ptr); 1583 instructions().append(cond->index()); 1584 instructions().append(Special::ApplyFunction); 1585 instructions().append(target.bind(begin, instructions().size())); 1586 instructions().append(0); 1465 OpJneqPtr::emit(this, cond, Special::ApplyFunction, target.bind(this)); 1587 1466 } 1588 1467 … … 1646 1525 return m_linkTimeConstantRegisters[constantIndex]; 1647 1526 1648 emitOpcode(op_mov); 1649 instructions().append(dst->index()); 1650 instructions().append(m_linkTimeConstantRegisters[constantIndex]->index()); 1527 OpMov::emit(this, dst, m_linkTimeConstantRegisters[constantIndex]); 1651 1528 1652 1529 return dst; … … 1657 1534 RefPtr<RegisterID> emptyValue = addConstantEmptyValue(); 1658 1535 1659 emitOpcode(op_mov); 1660 instructions().append(dst->index()); 1661 instructions().append(emptyValue->index()); 1536 OpMov::emit(this, dst, emptyValue.get()); 1537 1662 1538 return dst; 1663 1539 } … … 1667 1543 ASSERT(src != m_emptyValueRegister); 1668 1544 1669 m_staticPropertyAnalyzer.mov(dst->index(), src->index()); 1670 emitOpcode(op_mov); 1671 instructions().append(dst->index()); 1672 instructions().append(src->index()); 1673 1674 return dst; 1675 } 1676 1677 RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src) 1678 { 1679 ASSERT_WITH_MESSAGE(op_to_number != opcodeID, "op_to_number has a Value Profile."); 1680 ASSERT_WITH_MESSAGE(op_negate != opcodeID, "op_negate has an Arith Profile."); 1681 emitOpcode(opcodeID); 1682 instructions().append(dst->index()); 1683 instructions().append(src->index()); 1545 m_staticPropertyAnalyzer.mov(dst, src); 1546 OpMov::emit(this, dst, src); 1684 1547 1685 1548 return dst; … … 1688 1551 RegisterID* BytecodeGenerator::emitUnaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src, OperandTypes types) 1689 1552 { 1690 ASSERT_WITH_MESSAGE(op_to_number != opcodeID, "op_to_number has a Value Profile."); 1691 emitOpcode(opcodeID); 1692 instructions().append(dst->index()); 1693 instructions().append(src->index()); 1694 1695 if (opcodeID == op_negate) 1696 instructions().append(ArithProfile(types.first()).bits()); 1697 return dst; 1698 } 1699 1700 RegisterID* BytecodeGenerator::emitUnaryOpProfiled(OpcodeID opcodeID, RegisterID* dst, RegisterID* src) 1701 { 1702 UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID); 1703 instructions().append(dst->index()); 1704 instructions().append(src->index()); 1705 instructions().append(profile); 1706 return dst; 1553 switch (opcodeID) { 1554 case op_not: 1555 emitUnaryOp<OpNot>(dst, src); 1556 break; 1557 case op_negate: 1558 OpNegate::emit(this, dst, src, types); 1559 break; 1560 case op_to_number: 1561 emitUnaryOp<OpToNumber>(dst, src); 1562 break; 1563 default: 1564 ASSERT_NOT_REACHED(); 1565 } 1566 return dst; 1567 } 1568 1569 RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) 1570 { 1571 switch (opcodeID) { 1572 case op_eq: 1573 return emitBinaryOp<OpEq>(dst, src1, src2, types); 1574 case op_neq: 1575 return emitBinaryOp<OpNeq>(dst, src1, src2, types); 1576 case op_stricteq: 1577 return emitBinaryOp<OpStricteq>(dst, src1, src2, types); 1578 case op_nstricteq: 1579 return emitBinaryOp<OpNstricteq>(dst, src1, src2, types); 1580 case op_less: 1581 return emitBinaryOp<OpLess>(dst, src1, src2, types); 1582 case op_lesseq: 1583 return emitBinaryOp<OpLesseq>(dst, src1, src2, types); 1584 case op_greater: 1585 return emitBinaryOp<OpGreater>(dst, src1, src2, types); 1586 case op_greatereq: 1587 return emitBinaryOp<OpGreatereq>(dst, src1, src2, types); 1588 case op_below: 1589 return emitBinaryOp<OpBelow>(dst, src1, src2, types); 1590 case op_beloweq: 1591 return emitBinaryOp<OpBeloweq>(dst, src1, src2, types); 1592 case op_mod: 1593 return emitBinaryOp<OpMod>(dst, src1, src2, types); 1594 case op_pow: 1595 return emitBinaryOp<OpPow>(dst, src1, src2, types); 1596 case op_lshift: 1597 return emitBinaryOp<OpLshift>(dst, src1, src2, types); 1598 case op_rshift: 1599 return emitBinaryOp<OpRshift>(dst, src1, src2, types); 1600 case op_urshift: 1601 return emitBinaryOp<OpUrshift>(dst, src1, src2, types); 1602 case op_add: 1603 return emitBinaryOp<OpAdd>(dst, src1, src2, types); 1604 case op_mul: 1605 return emitBinaryOp<OpMul>(dst, src1, src2, types); 1606 case op_div: 1607 return emitBinaryOp<OpDiv>(dst, src1, src2, types); 1608 case op_sub: 1609 return emitBinaryOp<OpSub>(dst, src1, src2, types); 1610 case op_bitand: 1611 return emitBinaryOp<OpBitand>(dst, src1, src2, types); 1612 case op_bitxor: 1613 return emitBinaryOp<OpBitxor>(dst, src1, src2, types); 1614 case op_bitor: 1615 return emitBinaryOp<OpBitor>(dst, src1, src2, types); 1616 default: 1617 ASSERT_NOT_REACHED(); 1618 return nullptr; 1619 } 1707 1620 } 1708 1621 1709 1622 RegisterID* BytecodeGenerator::emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message) 1710 1623 { 1711 UnlinkedValueProfile profile = emitProfiledOpcode(op_to_object); 1712 instructions().append(dst->index()); 1713 instructions().append(src->index()); 1714 instructions().append(addConstant(message)); 1715 instructions().append(profile); 1716 return dst; 1624 OpToObject::emit(this, dst, src, addConstant(message)); 1625 return dst; 1626 } 1627 1628 RegisterID* BytecodeGenerator::emitToNumber(RegisterID* dst, RegisterID* src) 1629 { 1630 return emitUnaryOp<OpToNumber>(dst, src); 1631 } 1632 1633 RegisterID* BytecodeGenerator::emitToString(RegisterID* dst, RegisterID* src) 1634 { 1635 return emitUnaryOp<OpToString>(dst, src); 1636 } 1637 1638 RegisterID* BytecodeGenerator::emitTypeOf(RegisterID* dst, RegisterID* src) 1639 { 1640 return emitUnaryOp<OpTypeof>(dst, src); 1717 1641 } 1718 1642 1719 1643 RegisterID* BytecodeGenerator::emitInc(RegisterID* srcDst) 1720 1644 { 1721 emitOpcode(op_inc); 1722 instructions().append(srcDst->index()); 1645 OpInc::emit(this, srcDst); 1723 1646 return srcDst; 1724 1647 } … … 1726 1649 RegisterID* BytecodeGenerator::emitDec(RegisterID* srcDst) 1727 1650 { 1728 emitOpcode(op_dec); 1729 instructions().append(srcDst->index()); 1651 OpDec::emit(this, srcDst); 1730 1652 return srcDst; 1731 1653 } 1732 1654 1733 RegisterID* BytecodeGenerator::emitBinaryOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) 1734 { 1735 1736 if (opcodeID == op_bitand || opcodeID == op_bitor) { 1737 UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID); 1738 instructions().append(dst->index()); 1739 instructions().append(src1->index()); 1740 instructions().append(src2->index()); 1741 instructions().append(profile); 1742 return dst; 1743 } 1744 1745 emitOpcode(opcodeID); 1746 instructions().append(dst->index()); 1747 instructions().append(src1->index()); 1748 instructions().append(src2->index()); 1749 1750 if (opcodeID == op_bitxor || opcodeID == op_add || 1751 opcodeID == op_mul || opcodeID == op_sub || opcodeID == op_div) 1752 instructions().append(ArithProfile(types.first(), types.second()).bits()); 1753 1754 return dst; 1755 } 1756 1757 RegisterID* BytecodeGenerator::emitEqualityOp(OpcodeID opcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2) 1758 { 1759 if (m_lastOpcodeID == op_typeof) { 1760 int dstIndex; 1761 int srcIndex; 1762 1763 retrieveLastUnaryOp(dstIndex, srcIndex); 1764 1765 if (src1->index() == dstIndex 1655 template<typename EqOp> 1656 RegisterID* BytecodeGenerator::emitEqualityOp(RegisterID* dst, RegisterID* src1, RegisterID* src2) 1657 { 1658 if (m_lastInstruction->is<OpTypeof>()) { 1659 auto op = m_lastInstruction->as<OpTypeof>(); 1660 if (src1->index() == op.dst.offset() 1766 1661 && src1->isTemporary() 1767 1662 && m_codeBlock->isConstantRegisterIndex(src2->index()) … … 1769 1664 const String& value = asString(m_codeBlock->constantRegister(src2->index()).get())->tryGetValue(); 1770 1665 if (value == "undefined") { 1771 rewindUnaryOp(); 1772 emitOpcode(op_is_undefined); 1773 instructions().append(dst->index()); 1774 instructions().append(srcIndex); 1666 rewind(); 1667 OpIsUndefined::emit(this, dst, op.value); 1775 1668 return dst; 1776 1669 } 1777 1670 if (value == "boolean") { 1778 rewindUnaryOp(); 1779 emitOpcode(op_is_boolean); 1780 instructions().append(dst->index()); 1781 instructions().append(srcIndex); 1671 rewind(); 1672 OpIsBoolean::emit(this, dst, op.value); 1782 1673 return dst; 1783 1674 } 1784 1675 if (value == "number") { 1785 rewindUnaryOp(); 1786 emitOpcode(op_is_number); 1787 instructions().append(dst->index()); 1788 instructions().append(srcIndex); 1676 rewind(); 1677 OpIsNumber::emit(this, dst, op.value); 1789 1678 return dst; 1790 1679 } 1791 1680 if (value == "string") { 1792 rewindUnaryOp(); 1793 emitOpcode(op_is_cell_with_type); 1794 instructions().append(dst->index()); 1795 instructions().append(srcIndex); 1796 instructions().append(StringType); 1681 rewind(); 1682 OpIsCellWithType::emit(this, dst, op.value, StringType); 1797 1683 return dst; 1798 1684 } 1799 1685 if (value == "symbol") { 1800 rewindUnaryOp(); 1801 emitOpcode(op_is_cell_with_type); 1802 instructions().append(dst->index()); 1803 instructions().append(srcIndex); 1804 instructions().append(SymbolType); 1686 rewind(); 1687 OpIsCellWithType::emit(this, dst, op.value, SymbolType); 1805 1688 return dst; 1806 1689 } 1807 1690 if (Options::useBigInt() && value == "bigint") { 1808 rewindUnaryOp(); 1809 emitOpcode(op_is_cell_with_type); 1810 instructions().append(dst->index()); 1811 instructions().append(srcIndex); 1812 instructions().append(BigIntType); 1691 rewind(); 1692 OpIsCellWithType::emit(this, dst, op.value, BigIntType); 1813 1693 return dst; 1814 1694 } 1815 1695 if (value == "object") { 1816 rewindUnaryOp(); 1817 emitOpcode(op_is_object_or_null); 1818 instructions().append(dst->index()); 1819 instructions().append(srcIndex); 1696 rewind(); 1697 OpIsObjectOrNull::emit(this, dst, op.value); 1820 1698 return dst; 1821 1699 } 1822 1700 if (value == "function") { 1823 rewindUnaryOp(); 1824 emitOpcode(op_is_function); 1825 instructions().append(dst->index()); 1826 instructions().append(srcIndex); 1701 rewind(); 1702 OpIsFunction::emit(this, dst, op.value); 1827 1703 return dst; 1828 1704 } … … 1830 1706 } 1831 1707 1832 emitOpcode(opcodeID); 1833 instructions().append(dst->index()); 1834 instructions().append(src1->index()); 1835 instructions().append(src2->index()); 1708 EqOp::emit(this, dst, src1, src2); 1836 1709 return dst; 1837 1710 } … … 1855 1728 return; 1856 1729 1857 emitOpcode(op_profile_type); 1858 instructions().append(registerToProfile->index()); 1859 instructions().append(0); 1860 instructions().append(flag); 1861 instructions().append(0); 1862 instructions().append(resolveType()); 1730 OpProfileType::emit(this, registerToProfile, 0, flag, { }, resolveType()); 1863 1731 1864 1732 // Don't emit expression info for this version of profile type. This generally means … … 1880 1748 return; 1881 1749 1882 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? 1883 emitOpcode(op_profile_type); 1884 instructions().append(registerToProfile->index()); 1885 instructions().append(0); 1886 instructions().append(flag); 1887 instructions().append(0); 1888 instructions().append(resolveType()); 1889 1750 OpProfileType::emit(this, registerToProfile, 0, flag, { }, resolveType()); 1890 1751 emitTypeProfilerExpressionInfo(startDivot, endDivot); 1891 1752 } … … 1910 1771 } 1911 1772 1912 // The format of this instruction is: op_profile_type regToProfile, TypeLocation*, flag, identifier?, resolveType? 1913 emitOpcode(op_profile_type); 1914 instructions().append(registerToProfile->index()); 1915 instructions().append(symbolTableOrScopeDepth); 1916 instructions().append(flag); 1917 instructions().append(addConstant(var.ident())); 1918 instructions().append(resolveType()); 1919 1773 OpProfileType::emit(this, registerToProfile, symbolTableOrScopeDepth, flag, addConstant(var.ident()), resolveType()); 1920 1774 emitTypeProfilerExpressionInfo(startDivot, endDivot); 1921 1775 } … … 1928 1782 m_codeBlock->addOpProfileControlFlowBytecodeOffset(bytecodeOffset); 1929 1783 1930 emitOpcode(op_profile_control_flow); 1931 instructions().append(textOffset); 1784 OpProfileControlFlow::emit(this, textOffset); 1932 1785 } 1933 1786 } … … 2128 1981 *constantSymbolTableResult = constantSymbolTable; 2129 1982 2130 emitOpcode(op_create_lexical_environment); 2131 instructions().append(newScope->index()); 2132 instructions().append(scopeRegister()->index()); 2133 instructions().append(constantSymbolTable->index()); 2134 instructions().append(addConstantValue(tdzRequirement == TDZRequirement::UnderTDZ ? jsTDZValue() : jsUndefined())->index()); 1983 OpCreateLexicalEnvironment::emit(this, newScope, scopeRegister(), VirtualRegister { symbolTableConstantIndex }, addConstantValue(tdzRequirement == TDZRequirement::UnderTDZ ? jsTDZValue() : jsUndefined())); 2135 1984 2136 1985 move(scopeRegister(), newScope); … … 2263 2112 2264 2113 dst = finalDestination(dst); 2265 emitOpcode(op_resolve_scope_for_hoisting_func_decl_in_eval); 2266 instructions().append(kill(dst)); 2267 instructions().append(m_topMostScope->index()); 2268 instructions().append(addConstant(property)); 2114 OpResolveScopeForHoistingFuncDeclInEval::emit(this, kill(dst), m_topMostScope, addConstant(property)); 2269 2115 return dst; 2270 2116 } … … 2364 2210 move(scopeRegister(), parentScope.get()); 2365 2211 2366 emitOpcode(op_create_lexical_environment); 2367 instructions().append(loopScope->index()); 2368 instructions().append(scopeRegister()->index()); 2369 instructions().append(loopSymbolTable->index()); 2370 instructions().append(addConstantValue(jsTDZValue())->index()); 2212 OpCreateLexicalEnvironment::emit(this, loopScope, scopeRegister(), loopSymbolTable, addConstantValue(jsTDZValue())); 2371 2213 2372 2214 move(scopeRegister(), loopScope); … … 2493 2335 RegisterID* BytecodeGenerator::emitOverridesHasInstance(RegisterID* dst, RegisterID* constructor, RegisterID* hasInstanceValue) 2494 2336 { 2495 emitOpcode(op_overrides_has_instance); 2496 instructions().append(dst->index()); 2497 instructions().append(constructor->index()); 2498 instructions().append(hasInstanceValue->index()); 2337 OpOverridesHasInstance::emit(this, dst, constructor, hasInstanceValue); 2499 2338 return dst; 2500 2339 } … … 2561 2400 // resolve_scope dst, id, ResolveType, depth 2562 2401 dst = tempDestination(dst); 2563 emitOpcode(op_resolve_scope); 2564 instructions().append(kill(dst)); 2565 instructions().append(scopeRegister()->index()); 2566 instructions().append(addConstant(variable.ident())); 2567 instructions().append(resolveType()); 2568 instructions().append(localScopeDepth()); 2569 instructions().append(0); 2402 OpResolveScope::emit(this, kill(dst), scopeRegister(), addConstant(variable.ident()), resolveType(), localScopeDepth()); 2570 2403 return dst; 2571 2404 } … … 2582 2415 2583 2416 case VarKind::DirectArgument: { 2584 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_arguments); 2585 instructions().append(kill(dst)); 2586 instructions().append(scope->index()); 2587 instructions().append(variable.offset().capturedArgumentsOffset().offset()); 2588 instructions().append(profile); 2417 OpGetFromArguments::emit(this, kill(dst), scope, variable.offset().capturedArgumentsOffset().offset()); 2589 2418 return dst; 2590 2419 } … … 2595 2424 2596 2425 // get_from_scope dst, scope, id, GetPutInfo, Structure, Operand 2597 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_from_scope);2598 instructions().append(kill(dst));2599 instructions().append(scope->index());2600 instructions().append(addConstant(variable.ident()));2601 instructions().append(GetPutInfo(resolveMode, variable.offset().isScope() ? LocalClosureVar : resolveType(), InitializationMode::NotInitialization).operand());2602 instructions().append(localScopeDepth());2603 instructions().append(variable.offset().isScope() ? variable.offset().scopeOffset().offset() : 0);2604 instructions().append(profile);2426 OpGetFromScope::emit( 2427 this, 2428 kill(dst), 2429 scope, 2430 addConstant(variable.ident()), 2431 GetPutInfo(resolveMode, variable.offset().isScope() ? LocalClosureVar : resolveType(), InitializationMode::NotInitialization), 2432 localScopeDepth(), 2433 variable.offset().isScope() ? variable.offset().scopeOffset().offset() : 0); 2605 2434 return dst; 2606 2435 } } … … 2617 2446 2618 2447 case VarKind::DirectArgument: 2619 emitOpcode(op_put_to_arguments); 2620 instructions().append(scope->index()); 2621 instructions().append(variable.offset().capturedArgumentsOffset().offset()); 2622 instructions().append(value->index()); 2448 OpPutToArguments::emit(this, scope, variable.offset().capturedArgumentsOffset().offset(), value); 2623 2449 return value; 2624 2450 … … 2628 2454 2629 2455 // put_to_scope scope, id, value, GetPutInfo, Structure, Operand 2630 emitOpcode(op_put_to_scope); 2631 instructions().append(scope->index()); 2632 instructions().append(addConstant(variable.ident())); 2633 instructions().append(value->index()); 2456 GetPutInfo getPutInfo(0); 2457 int scopeDepth; 2634 2458 ScopeOffset offset; 2635 2459 if (variable.offset().isScope()) { 2636 2460 offset = variable.offset().scopeOffset(); 2637 instructions().append(GetPutInfo(resolveMode, LocalClosureVar, initializationMode).operand());2638 instructions().append(variable.symbolTableConstantIndex());2461 getPutInfo = GetPutInfo(resolveMode, LocalClosureVar, initializationMode); 2462 scopeDepth = variable.symbolTableConstantIndex(); 2639 2463 } else { 2640 2464 ASSERT(resolveType() != LocalClosureVar); 2641 instructions().append(GetPutInfo(resolveMode, resolveType(), initializationMode).operand());2642 instructions().append(localScopeDepth());2643 } 2644 instructions().append(!!offset ? offset.offset() : 0);2465 getPutInfo = GetPutInfo(resolveMode, resolveType(), initializationMode); 2466 scopeDepth = localScopeDepth(); 2467 } 2468 OpPutToScope::emit(this, scope, addConstant(variable.ident()), value, getPutInfo, scopeDepth, !!offset ? offset.offset() : 0); 2645 2469 return value; 2646 2470 } } … … 2658 2482 RegisterID* BytecodeGenerator::emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype) 2659 2483 { 2660 emitOpcode(op_instanceof); 2661 instructions().append(dst->index()); 2662 instructions().append(value->index()); 2663 instructions().append(basePrototype->index()); 2484 OpInstanceof::emit(this, dst, value, basePrototype); 2664 2485 return dst; 2665 2486 } … … 2667 2488 RegisterID* BytecodeGenerator::emitInstanceOfCustom(RegisterID* dst, RegisterID* value, RegisterID* constructor, RegisterID* hasInstanceValue) 2668 2489 { 2669 emitOpcode(op_instanceof_custom); 2670 instructions().append(dst->index()); 2671 instructions().append(value->index()); 2672 instructions().append(constructor->index()); 2673 instructions().append(hasInstanceValue->index()); 2490 OpInstanceofCustom::emit(this, dst, value, constructor, hasInstanceValue); 2674 2491 return dst; 2675 2492 } … … 2677 2494 RegisterID* BytecodeGenerator::emitInByVal(RegisterID* dst, RegisterID* property, RegisterID* base) 2678 2495 { 2679 UnlinkedArrayProfile arrayProfile = newArrayProfile(); 2680 emitOpcode(op_in_by_val); 2681 instructions().append(dst->index()); 2682 instructions().append(base->index()); 2683 instructions().append(property->index()); 2684 instructions().append(arrayProfile); 2496 OpInByVal::emit(this, dst, base, property); 2685 2497 return dst; 2686 2498 } … … 2688 2500 RegisterID* BytecodeGenerator::emitInById(RegisterID* dst, RegisterID* base, const Identifier& property) 2689 2501 { 2690 emitOpcode(op_in_by_id); 2691 instructions().append(dst->index()); 2692 instructions().append(base->index()); 2693 instructions().append(addConstant(property)); 2502 OpInById::emit(this, dst, base, addConstant(property)); 2694 2503 return dst; 2695 2504 } … … 2699 2508 ASSERT_WITH_MESSAGE(!parseIndex(property), "Indexed properties are not supported with tryGetById."); 2700 2509 2701 UnlinkedValueProfile profile = emitProfiledOpcode(op_try_get_by_id); 2702 instructions().append(kill(dst)); 2703 instructions().append(base->index()); 2704 instructions().append(addConstant(property)); 2705 instructions().append(profile); 2510 OpTryGetById::emit(this, kill(dst), base, addConstant(property)); 2706 2511 return dst; 2707 2512 } … … 2713 2518 m_codeBlock->addPropertyAccessInstruction(instructions().size()); 2714 2519 2715 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id); 2716 instructions().append(kill(dst)); 2717 instructions().append(base->index()); 2718 instructions().append(addConstant(property)); 2719 instructions().append(0); 2720 instructions().append(0); 2721 instructions().append(0); 2722 instructions().append(Options::prototypeHitCountForLLIntCaching()); 2723 instructions().append(profile); 2520 OpGetById::emit(this, kill(dst), base, addConstant(property)); 2724 2521 return dst; 2725 2522 } … … 2729 2526 ASSERT_WITH_MESSAGE(!parseIndex(property), "Indexed properties should be handled with get_by_val."); 2730 2527 2731 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id_with_this); 2732 instructions().append(kill(dst)); 2733 instructions().append(base->index()); 2734 instructions().append(thisVal->index()); 2735 instructions().append(addConstant(property)); 2736 instructions().append(profile); 2528 OpGetByIdWithThis::emit(this, kill(dst), base, thisVal, addConstant(property)); 2737 2529 return dst; 2738 2530 } … … 2744 2536 m_codeBlock->addPropertyAccessInstruction(instructions().size()); 2745 2537 2746 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_id_direct); 2747 instructions().append(kill(dst)); 2748 instructions().append(base->index()); 2749 instructions().append(addConstant(property)); 2750 instructions().append(0); 2751 instructions().append(0); 2752 instructions().append(profile); 2538 OpGetByIdDirect::emit(this, kill(dst), base, addConstant(property)); 2753 2539 return dst; 2754 2540 } … … 2760 2546 unsigned propertyIndex = addConstant(property); 2761 2547 2762 m_staticPropertyAnalyzer.putById(base ->index(), propertyIndex);2548 m_staticPropertyAnalyzer.putById(base, propertyIndex); 2763 2549 2764 2550 m_codeBlock->addPropertyAccessInstruction(instructions().size()); 2765 2551 2766 emitOpcode(op_put_by_id); 2767 instructions().append(base->index()); 2768 instructions().append(propertyIndex); 2769 instructions().append(value->index()); 2770 instructions().append(0); // old structure 2771 instructions().append(0); // offset 2772 instructions().append(0); // new structure 2773 instructions().append(0); // structure chain 2774 instructions().append(static_cast<int>(PutByIdNone)); // is not direct 2552 OpPutById::emit(this, base, propertyIndex, value, PutByIdNone); // is not direct 2775 2553 2776 2554 return value; … … 2783 2561 unsigned propertyIndex = addConstant(property); 2784 2562 2785 emitOpcode(op_put_by_id_with_this); 2786 instructions().append(base->index()); 2787 instructions().append(thisValue->index()); 2788 instructions().append(propertyIndex); 2789 instructions().append(value->index()); 2563 OpPutByIdWithThis::emit(this, base, thisValue, propertyIndex, value); 2790 2564 2791 2565 return value; … … 2798 2572 unsigned propertyIndex = addConstant(property); 2799 2573 2800 m_staticPropertyAnalyzer.putById(base ->index(), propertyIndex);2574 m_staticPropertyAnalyzer.putById(base, propertyIndex); 2801 2575 2802 2576 m_codeBlock->addPropertyAccessInstruction(instructions().size()); 2803 2577 2804 emitOpcode(op_put_by_id); 2805 instructions().append(base->index()); 2806 instructions().append(propertyIndex); 2807 instructions().append(value->index()); 2808 instructions().append(0); // old structure 2809 instructions().append(0); // offset 2810 instructions().append(0); // new structure 2811 instructions().append(0); // structure chain (unused if direct) 2812 instructions().append(static_cast<int>((putType == PropertyNode::KnownDirect || property != m_vm->propertyNames->underscoreProto) ? PutByIdIsDirect : PutByIdNone)); 2578 PutByIdFlags type = (putType == PropertyNode::KnownDirect || property != m_vm->propertyNames->underscoreProto) ? PutByIdIsDirect : PutByIdNone; 2579 OpPutById::emit(this, base, propertyIndex, value, type); 2813 2580 return value; 2814 2581 } … … 2817 2584 { 2818 2585 unsigned propertyIndex = addConstant(property); 2819 m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); 2820 2821 emitOpcode(op_put_getter_by_id); 2822 instructions().append(base->index()); 2823 instructions().append(propertyIndex); 2824 instructions().append(attributes); 2825 instructions().append(getter->index()); 2586 m_staticPropertyAnalyzer.putById(base, propertyIndex); 2587 2588 OpPutGetterById::emit(this, base, propertyIndex, attributes, getter); 2826 2589 } 2827 2590 … … 2829 2592 { 2830 2593 unsigned propertyIndex = addConstant(property); 2831 m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); 2832 2833 emitOpcode(op_put_setter_by_id); 2834 instructions().append(base->index()); 2835 instructions().append(propertyIndex); 2836 instructions().append(attributes); 2837 instructions().append(setter->index()); 2594 m_staticPropertyAnalyzer.putById(base, propertyIndex); 2595 2596 OpPutSetterById::emit(this, base, propertyIndex, attributes, setter); 2838 2597 } 2839 2598 … … 2842 2601 unsigned propertyIndex = addConstant(property); 2843 2602 2844 m_staticPropertyAnalyzer.putById(base->index(), propertyIndex); 2845 2846 emitOpcode(op_put_getter_setter_by_id); 2847 instructions().append(base->index()); 2848 instructions().append(propertyIndex); 2849 instructions().append(attributes); 2850 instructions().append(getter->index()); 2851 instructions().append(setter->index()); 2603 m_staticPropertyAnalyzer.putById(base, propertyIndex); 2604 2605 OpPutGetterSetterById::emit(this, base, propertyIndex, attributes, getter, setter); 2852 2606 } 2853 2607 2854 2608 void BytecodeGenerator::emitPutGetterByVal(RegisterID* base, RegisterID* property, unsigned attributes, RegisterID* getter) 2855 2609 { 2856 emitOpcode(op_put_getter_by_val); 2857 instructions().append(base->index()); 2858 instructions().append(property->index()); 2859 instructions().append(attributes); 2860 instructions().append(getter->index()); 2610 OpPutGetterByVal::emit(this, base, property, attributes, getter); 2861 2611 } 2862 2612 2863 2613 void BytecodeGenerator::emitPutSetterByVal(RegisterID* base, RegisterID* property, unsigned attributes, RegisterID* setter) 2864 2614 { 2865 emitOpcode(op_put_setter_by_val); 2866 instructions().append(base->index()); 2867 instructions().append(property->index()); 2868 instructions().append(attributes); 2869 instructions().append(setter->index()); 2615 OpPutSetterByVal::emit(this, base, property, attributes, setter); 2870 2616 } 2871 2617 … … 2908 2654 RegisterID* BytecodeGenerator::emitDeleteById(RegisterID* dst, RegisterID* base, const Identifier& property) 2909 2655 { 2910 emitOpcode(op_del_by_id); 2911 instructions().append(dst->index()); 2912 instructions().append(base->index()); 2913 instructions().append(addConstant(property)); 2656 OpDelById::emit(this, dst, base, addConstant(property)); 2914 2657 return dst; 2915 2658 } … … 2917 2660 RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* property) 2918 2661 { 2662 bool forceWide = false; 2919 2663 for (size_t i = m_forInContextStack.size(); i--; ) { 2920 2664 ForInContext& context = m_forInContextStack[i].get(); … … 2928 2672 indexedContext.addGetInst(instIndex, property->index()); 2929 2673 property = indexedContext.index(); 2674 forceWide = true; 2930 2675 break; 2931 2676 } 2932 2677 2933 2678 StructureForInContext& structureContext = context.asStructureForInContext(); 2934 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_direct_pname); 2935 instructions().append(kill(dst)); 2936 instructions().append(base->index()); 2937 instructions().append(property->index()); 2938 instructions().append(structureContext.index()->index()); 2939 instructions().append(structureContext.enumerator()->index()); 2940 instructions().append(profile); 2941 2942 structureContext.addGetInst(instIndex, property->index(), profile); 2679 OpGetDirectPname::emit<OpcodeSize::Wide>(this, kill(dst), base, property, structureContext.index(), structureContext.enumerator()); 2680 2681 structureContext.addGetInst(instIndex, property->index()); 2943 2682 return dst; 2944 2683 } 2945 2684 2946 UnlinkedArrayProfile arrayProfile = newArrayProfile(); 2947 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val); 2948 instructions().append(kill(dst)); 2949 instructions().append(base->index()); 2950 instructions().append(property->index()); 2951 instructions().append(arrayProfile); 2952 instructions().append(profile); 2685 if (forceWide) 2686 OpGetByVal::emit<OpcodeSize::Wide>(this, kill(dst), base, property); 2687 else 2688 OpGetByVal::emit(this, kill(dst), base, property); 2953 2689 return dst; 2954 2690 } … … 2956 2692 RegisterID* BytecodeGenerator::emitGetByVal(RegisterID* dst, RegisterID* base, RegisterID* thisValue, RegisterID* property) 2957 2693 { 2958 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_by_val_with_this); 2959 instructions().append(kill(dst)); 2960 instructions().append(base->index()); 2961 instructions().append(thisValue->index()); 2962 instructions().append(property->index()); 2963 instructions().append(profile); 2694 OpGetByValWithThis::emit(this, kill(dst), base, thisValue, property); 2964 2695 return dst; 2965 2696 } … … 2967 2698 RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* property, RegisterID* value) 2968 2699 { 2969 UnlinkedArrayProfile arrayProfile = newArrayProfile(); 2970 emitOpcode(op_put_by_val); 2971 instructions().append(base->index()); 2972 instructions().append(property->index()); 2973 instructions().append(value->index()); 2974 instructions().append(arrayProfile); 2975 2700 OpPutByVal::emit(this, base, property, value); 2976 2701 return value; 2977 2702 } … … 2979 2704 RegisterID* BytecodeGenerator::emitPutByVal(RegisterID* base, RegisterID* thisValue, RegisterID* property, RegisterID* value) 2980 2705 { 2981 emitOpcode(op_put_by_val_with_this); 2982 instructions().append(base->index()); 2983 instructions().append(thisValue->index()); 2984 instructions().append(property->index()); 2985 instructions().append(value->index()); 2986 2706 OpPutByValWithThis::emit(this, base, thisValue, property, value); 2987 2707 return value; 2988 2708 } … … 2990 2710 RegisterID* BytecodeGenerator::emitDirectPutByVal(RegisterID* base, RegisterID* property, RegisterID* value) 2991 2711 { 2992 UnlinkedArrayProfile arrayProfile = newArrayProfile(); 2993 emitOpcode(op_put_by_val_direct); 2994 instructions().append(base->index()); 2995 instructions().append(property->index()); 2996 instructions().append(value->index()); 2997 instructions().append(arrayProfile); 2712 OpPutByValDirect::emit(this, base, property, value); 2998 2713 return value; 2999 2714 } … … 3001 2716 RegisterID* BytecodeGenerator::emitDeleteByVal(RegisterID* dst, RegisterID* base, RegisterID* property) 3002 2717 { 3003 emitOpcode(op_del_by_val); 3004 instructions().append(dst->index()); 3005 instructions().append(base->index()); 3006 instructions().append(property->index()); 2718 OpDelByVal::emit(this, dst, base, property); 3007 2719 return dst; 3008 2720 } … … 3010 2722 void BytecodeGenerator::emitSuperSamplerBegin() 3011 2723 { 3012 emitOpcode(op_super_sampler_begin);2724 OpSuperSamplerBegin::emit(this); 3013 2725 } 3014 2726 3015 2727 void BytecodeGenerator::emitSuperSamplerEnd() 3016 2728 { 3017 emitOpcode(op_super_sampler_end);2729 OpSuperSamplerEnd::emit(this); 3018 2730 } 3019 2731 3020 2732 RegisterID* BytecodeGenerator::emitIdWithProfile(RegisterID* src, SpeculatedType profile) 3021 2733 { 3022 emitOpcode(op_identity_with_profile); 3023 instructions().append(src->index()); 3024 instructions().append(static_cast<uint32_t>(profile >> 32)); 3025 instructions().append(static_cast<uint32_t>(profile)); 2734 OpIdentityWithProfile::emit(this, src, static_cast<uint32_t>(profile >> 32), static_cast<uint32_t>(profile)); 3026 2735 return src; 3027 2736 } … … 3029 2738 void BytecodeGenerator::emitUnreachable() 3030 2739 { 3031 emitOpcode(op_unreachable);2740 OpUnreachable::emit(this); 3032 2741 } 3033 2742 3034 2743 RegisterID* BytecodeGenerator::emitGetArgument(RegisterID* dst, int32_t index) 3035 2744 { 3036 UnlinkedValueProfile profile = emitProfiledOpcode(op_get_argument); 3037 instructions().append(dst->index()); 3038 instructions().append(index + 1); // Including |this|. 3039 instructions().append(profile); 2745 OpGetArgument::emit(this, dst, index + 1 /* Including |this| */); 3040 2746 return dst; 3041 2747 } … … 3043 2749 RegisterID* BytecodeGenerator::emitCreateThis(RegisterID* dst) 3044 2750 { 3045 size_t begin = instructions().size(); 3046 m_staticPropertyAnalyzer.createThis(dst->index(), begin + 3); 2751 m_staticPropertyAnalyzer.createThis(dst, m_writer.ref()); 3047 2752 3048 2753 m_codeBlock->addPropertyAccessInstruction(instructions().size()); 3049 emitOpcode(op_create_this); 3050 instructions().append(dst->index()); 3051 instructions().append(dst->index()); 3052 instructions().append(0); 3053 instructions().append(0); 2754 OpCreateThis::emit(this, dst, dst, 0); 3054 2755 return dst; 3055 2756 } … … 3057 2758 void BytecodeGenerator::emitTDZCheck(RegisterID* target) 3058 2759 { 3059 emitOpcode(op_check_tdz); 3060 instructions().append(target->index()); 2760 OpCheckTdz::emit(this, target); 3061 2761 } 3062 2762 … … 3158 2858 RegisterID* BytecodeGenerator::emitNewObject(RegisterID* dst) 3159 2859 { 3160 size_t begin = instructions().size(); 3161 m_staticPropertyAnalyzer.newObject(dst->index(), begin + 2); 3162 3163 emitOpcode(op_new_object); 3164 instructions().append(dst->index()); 3165 instructions().append(0); 3166 instructions().append(newObjectAllocationProfile()); 2860 m_staticPropertyAnalyzer.newObject(dst, m_writer.ref()); 2861 2862 OpNewObject::emit(this, dst, 0); 3167 2863 return dst; 3168 2864 } … … 3207 2903 RegisterID* BytecodeGenerator::emitNewArrayBuffer(RegisterID* dst, JSImmutableButterfly* array, IndexingType recommendedIndexingType) 3208 2904 { 3209 emitOpcode(op_new_array_buffer); 3210 instructions().append(dst->index()); 3211 instructions().append(addConstantValue(array)->index()); 3212 instructions().append(newArrayAllocationProfile(recommendedIndexingType)); 2905 OpNewArrayBuffer::emit(this, dst, addConstantValue(array), recommendedIndexingType); 3213 2906 return dst; 3214 2907 } … … 3228 2921 } 3229 2922 ASSERT(!length); 3230 emitOpcode(op_new_array); 3231 instructions().append(dst->index()); 3232 instructions().append(argv.size() ? argv[0]->index() : 0); // argv 3233 instructions().append(argv.size()); // argc 3234 instructions().append(newArrayAllocationProfile(recommendedIndexingType)); 2923 OpNewArray::emit(this, dst, argv.size() ? argv[0].get() : VirtualRegister { 0 }, argv.size(), recommendedIndexingType); 3235 2924 return dst; 3236 2925 } … … 3258 2947 emitNode(tmp.get(), expression); 3259 2948 3260 emitOpcode(op_spread); 3261 instructions().append(argv[i].get()->index()); 3262 instructions().append(tmp.get()->index()); 2949 OpSpread::emit(this, argv[i].get(), tmp.get()); 3263 2950 } else { 3264 2951 ExpressionNode* expression = node->value(); … … 3270 2957 3271 2958 unsigned bitVectorIndex = m_codeBlock->addBitVector(WTFMove(bitVector)); 3272 emitOpcode(op_new_array_with_spread); 3273 instructions().append(dst->index()); 3274 instructions().append(argv[0]->index()); // argv 3275 instructions().append(argv.size()); // argc 3276 instructions().append(bitVectorIndex); 3277 2959 OpNewArrayWithSpread::emit(this, dst, argv[0].get(), argv.size(), bitVectorIndex); 3278 2960 return dst; 3279 2961 } … … 3281 2963 RegisterID* BytecodeGenerator::emitNewArrayWithSize(RegisterID* dst, RegisterID* length) 3282 2964 { 3283 emitOpcode(op_new_array_with_size); 3284 instructions().append(dst->index()); 3285 instructions().append(length->index()); 3286 instructions().append(newArrayAllocationProfile(ArrayWithUndecided)); 3287 2965 OpNewArrayWithSize::emit(this, dst, length); 3288 2966 return dst; 3289 2967 } … … 3291 2969 RegisterID* BytecodeGenerator::emitNewRegExp(RegisterID* dst, RegExp* regExp) 3292 2970 { 3293 emitOpcode(op_new_regexp); 3294 instructions().append(dst->index()); 3295 instructions().append(addConstantValue(regExp)->index()); 2971 OpNewRegexp::emit(this, dst, addConstantValue(regExp)); 3296 2972 return dst; 3297 2973 } … … 3301 2977 unsigned index = m_codeBlock->addFunctionExpr(makeFunction(function)); 3302 2978 3303 OpcodeID opcodeID = op_new_func_exp;3304 2979 switch (function->parseMode()) { 3305 2980 case SourceParseMode::GeneratorWrapperFunctionMode: 3306 2981 case SourceParseMode::GeneratorWrapperMethodMode: 3307 opcodeID = op_new_generator_func_exp;2982 OpNewGeneratorFuncExp::emit(this, dst, scopeRegister(), index); 3308 2983 break; 3309 2984 case SourceParseMode::AsyncFunctionMode: 3310 2985 case SourceParseMode::AsyncMethodMode: 3311 2986 case SourceParseMode::AsyncArrowFunctionMode: 3312 opcodeID = op_new_async_func_exp;2987 OpNewAsyncFuncExp::emit(this, dst, scopeRegister(), index); 3313 2988 break; 3314 2989 case SourceParseMode::AsyncGeneratorWrapperFunctionMode: 3315 2990 case SourceParseMode::AsyncGeneratorWrapperMethodMode: 3316 opcodeID = op_new_async_generator_func_exp;2991 OpNewAsyncGeneratorFuncExp::emit(this, dst, scopeRegister(), index); 3317 2992 break; 3318 2993 default: 2994 OpNewFuncExp::emit(this, dst, scopeRegister(), index); 3319 2995 break; 3320 2996 } 3321 3322 emitOpcode(opcodeID);3323 instructions().append(dst->index());3324 instructions().append(scopeRegister()->index());3325 instructions().append(index);3326 2997 } 3327 2998 … … 3356 3027 unsigned index = m_codeBlock->addFunctionExpr(executable); 3357 3028 3358 emitOpcode(op_new_func_exp); 3359 instructions().append(dst->index()); 3360 instructions().append(scopeRegister()->index()); 3361 instructions().append(index); 3029 OpNewFuncExp::emit(this, dst, scopeRegister(), index); 3362 3030 return dst; 3363 3031 } … … 3367 3035 unsigned index = m_codeBlock->addFunctionDecl(makeFunction(function)); 3368 3036 if (isGeneratorWrapperParseMode(function->parseMode())) 3369 emitOpcode(op_new_generator_func);3037 OpNewGeneratorFunc::emit(this, dst, scopeRegister(), index); 3370 3038 else if (function->parseMode() == SourceParseMode::AsyncFunctionMode) 3371 emitOpcode(op_new_async_func);3039 OpNewAsyncFunc::emit(this, dst, scopeRegister(), index); 3372 3040 else if (isAsyncGeneratorWrapperParseMode(function->parseMode())) 3373 emitOpcode(op_new_async_generator_func);3041 OpNewAsyncGeneratorFunc::emit(this, dst, scopeRegister(), index); 3374 3042 else 3375 emitOpcode(op_new_func); 3376 instructions().append(dst->index()); 3377 instructions().append(scopeRegister()->index()); 3378 instructions().append(index); 3043 OpNewFunc::emit(this, dst, scopeRegister(), index); 3379 3044 return dst; 3380 3045 } … … 3397 3062 // FIXME: We should use an op_call to an internal function here instead. 3398 3063 // https://bugs.webkit.org/show_bug.cgi?id=155547 3399 emitOpcode(op_set_function_name); 3400 instructions().append(value->index()); 3401 instructions().append(name->index()); 3064 OpSetFunctionName::emit(this, value, name); 3402 3065 } 3403 3066 3404 3067 RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3405 3068 { 3406 return emitCall (op_call,dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall);3069 return emitCall<OpCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); 3407 3070 } 3408 3071 … … 3411 3074 if (m_inTailPosition) { 3412 3075 m_codeBlock->setHasTailCalls(); 3413 return emitCall (op_tail_call,dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall);3414 } 3415 return emitCall (op_call,dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall);3076 return emitCall<OpTailCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); 3077 } 3078 return emitCall<OpCall>(dst, func, expectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); 3416 3079 } 3417 3080 3418 3081 RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3419 3082 { 3420 return emitCall (op_call_eval,dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall);3083 return emitCall<OpCallEval>(dst, func, NoExpectedFunction, callArguments, divot, divotStart, divotEnd, debuggableCall); 3421 3084 } 3422 3085 … … 3439 3102 return NoExpectedFunction; 3440 3103 3441 size_t begin = instructions().size(); 3442 emitOpcode(op_jneq_ptr); 3443 instructions().append(func->index()); 3444 instructions().append(Special::ObjectConstructor); 3445 instructions().append(realCall->bind(begin, instructions().size())); 3446 instructions().append(0); 3104 OpJneqPtr::emit(this, func, Special::ObjectConstructor, realCall->bind(this)); 3447 3105 3448 3106 if (dst != ignoredResult()) … … 3460 3118 return NoExpectedFunction; 3461 3119 3462 size_t begin = instructions().size(); 3463 emitOpcode(op_jneq_ptr); 3464 instructions().append(func->index()); 3465 instructions().append(Special::ArrayConstructor); 3466 instructions().append(realCall->bind(begin, instructions().size())); 3467 instructions().append(0); 3120 OpJneqPtr::emit(this, func, Special::ArrayConstructor, realCall->bind(this)); 3468 3121 3469 3122 if (dst != ignoredResult()) { … … 3472 3125 else { 3473 3126 ASSERT(callArguments.argumentCountIncludingThis() == 1); 3474 emitOpcode(op_new_array); 3475 instructions().append(dst->index()); 3476 instructions().append(0); 3477 instructions().append(0); 3478 instructions().append(newArrayAllocationProfile(ArrayWithUndecided)); 3127 OpNewArray::emit(this, dst, VirtualRegister { 0 }, 0, ArrayWithUndecided); 3479 3128 } 3480 3129 } … … 3487 3136 } 3488 3137 3489 size_t begin = instructions().size(); 3490 emitOpcode(op_jmp); 3491 instructions().append(done.bind(begin, instructions().size())); 3138 OpJmp::emit(this, done.bind(this)); 3492 3139 emitLabel(realCall.get()); 3493 3140 … … 3495 3142 } 3496 3143 3497 RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3498 { 3144 template<typename CallOp> 3145 RegisterID* BytecodeGenerator::emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3146 { 3147 constexpr auto opcodeID = CallOp::opcodeID; 3499 3148 ASSERT(opcodeID == op_call || opcodeID == op_call_eval || opcodeID == op_tail_call); 3500 3149 ASSERT(func->refCount()); … … 3512 3161 ExpressionNode* expression = static_cast<SpreadExpressionNode*>(elements->value())->expression(); 3513 3162 RefPtr<RegisterID> argumentRegister = emitNode(callArguments.argumentRegister(0), expression); 3514 emitOpcode(op_spread); 3515 instructions().append(argumentRegister.get()->index()); 3516 instructions().append(argumentRegister.get()->index()); 3517 3518 RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); 3519 return emitCallVarargs(opcodeID == op_tail_call ? op_tail_call_varargs : op_call_varargs, dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); 3163 OpSpread::emit(this, argumentRegister.get(), argumentRegister.get()); 3164 3165 return emitCallVarargs<typename VarArgsOp<CallOp>::type>(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); 3520 3166 } 3521 3167 } … … 3523 3169 argumentRegister = expression->emitBytecode(*this, callArguments.argumentRegister(0)); 3524 3170 RefPtr<RegisterID> thisRegister = move(newTemporary(), callArguments.thisRegister()); 3525 return emitCallVarargs (opcodeID == op_tail_call ? op_tail_call_varargs : op_call_varargs,dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall);3171 return emitCallVarargs<typename VarArgsOp<CallOp>::type>(dst, func, callArguments.thisRegister(), argumentRegister.get(), newTemporary(), 0, divot, divotStart, divotEnd, debuggableCall); 3526 3172 } 3527 3173 for (; n; n = n->m_next) … … 3546 3192 3547 3193 // Emit call. 3548 UnlinkedArrayProfile arrayProfile = newArrayProfile();3549 UnlinkedValueProfile profile = emitProfiledOpcode(opcodeID);3550 3194 ASSERT(dst); 3551 3195 ASSERT(dst != ignoredResult()); 3552 instructions().append(dst->index()); 3553 instructions().append(func->index()); 3554 instructions().append(callArguments.argumentCountIncludingThis()); 3555 instructions().append(callArguments.stackOffset()); 3556 instructions().append(m_codeBlock->addLLIntCallLinkInfo()); 3557 instructions().append(0); 3558 instructions().append(arrayProfile); 3559 instructions().append(profile); 3196 CallOp::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset()); 3560 3197 3561 3198 if (expectedFunction != NoExpectedFunction) … … 3567 3204 RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3568 3205 { 3569 return emitCallVarargs (op_call_varargs,dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall);3206 return emitCallVarargs<OpCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); 3570 3207 } 3571 3208 3572 3209 RegisterID* BytecodeGenerator::emitCallVarargsInTailPosition(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3573 3210 { 3574 return emitCallVarargs(m_inTailPosition ? op_tail_call_varargs : op_call_varargs, dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); 3211 if (m_inTailPosition) 3212 return emitCallVarargs<OpTailCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); 3213 return emitCallVarargs<OpCallVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); 3575 3214 } 3576 3215 3577 3216 RegisterID* BytecodeGenerator::emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3578 3217 { 3579 return emitCallVarargs (op_construct_varargs,dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall);3218 return emitCallVarargs<OpConstructVarargs>(dst, func, thisRegister, arguments, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); 3580 3219 } 3581 3220 … … 3583 3222 { 3584 3223 ASSERT(m_inTailPosition); 3585 return emitCallVarargs(op_tail_call_forward_arguments, dst, func, thisRegister, nullptr, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); 3586 } 3587 3588 RegisterID* BytecodeGenerator::emitCallVarargs(OpcodeID opcode, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3224 return emitCallVarargs<OpTailCallForwardArguments>(dst, func, thisRegister, nullptr, firstFreeRegister, firstVarArgOffset, divot, divotStart, divotEnd, debuggableCall); 3225 } 3226 3227 template<typename VarargsOp> 3228 RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall debuggableCall) 3589 3229 { 3590 3230 if (m_shouldEmitDebugHooks && debuggableCall == DebuggableCall::Yes) … … 3593 3233 emitExpressionInfo(divot, divotStart, divotEnd); 3594 3234 3595 if ( opcode== op_tail_call_varargs)3235 if (VarargsOp::opcodeID == op_tail_call_varargs) 3596 3236 emitLogShadowChickenTailIfNecessary(); 3597 3237 3598 3238 // Emit call. 3599 UnlinkedArrayProfile arrayProfile = newArrayProfile();3600 UnlinkedValueProfile profile = emitProfiledOpcode(opcode);3601 3239 ASSERT(dst != ignoredResult()); 3602 instructions().append(dst->index()); 3603 instructions().append(func->index()); 3604 instructions().append(thisRegister ? thisRegister->index() : 0); 3605 instructions().append(arguments ? arguments->index() : 0); 3606 instructions().append(firstFreeRegister->index()); 3607 instructions().append(firstVarArgOffset); 3608 instructions().append(arrayProfile); 3609 instructions().append(profile); 3240 VarargsOp::emit(this, dst, func, thisRegister, arguments ? arguments : VirtualRegister(0), firstFreeRegister, firstVarArgOffset); 3610 3241 return dst; 3611 3242 } … … 3615 3246 if (!m_shouldEmitDebugHooks && !Options::alwaysUseShadowChicken()) 3616 3247 return; 3617 emitOpcode(op_log_shadow_chicken_prologue); 3618 instructions().append(scopeRegister()->index()); 3248 OpLogShadowChickenPrologue::emit(this, scopeRegister()); 3619 3249 } 3620 3250 … … 3623 3253 if (!m_shouldEmitDebugHooks && !Options::alwaysUseShadowChicken()) 3624 3254 return; 3625 emitOpcode(op_log_shadow_chicken_tail); 3626 instructions().append(thisRegister()->index()); 3627 instructions().append(scopeRegister()->index()); 3255 OpLogShadowChickenTail::emit(this, thisRegister(), scopeRegister()); 3628 3256 } 3629 3257 … … 3671 3299 setter = throwTypeErrorFunction; 3672 3300 3673 emitOpcode(op_define_accessor_property); 3674 instructions().append(newObj->index()); 3675 instructions().append(propertyNameRegister->index()); 3676 instructions().append(getter->index()); 3677 instructions().append(setter->index()); 3678 instructions().append(emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))->index()); 3301 OpDefineAccessorProperty::emit(this, newObj, propertyNameRegister, getter.get(), setter.get(), emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))); 3679 3302 } else { 3680 emitOpcode(op_define_data_property); 3681 instructions().append(newObj->index()); 3682 instructions().append(propertyNameRegister->index()); 3683 instructions().append(valueRegister->index()); 3684 instructions().append(emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))->index()); 3303 OpDefineDataProperty::emit(this, newObj, propertyNameRegister, valueRegister, emitLoad(nullptr, jsNumber(attributes.rawRepresentation()))); 3685 3304 } 3686 3305 } … … 3706 3325 emitTDZCheck(&m_thisRegister); 3707 3326 } 3708 emitUnaryNoDstOp(op_ret, &m_thisRegister);3327 OpRet::emit(this, &m_thisRegister); 3709 3328 emitLabel(isObjectLabel.get()); 3710 3329 } 3711 3330 } 3712 3331 3713 return emitUnaryNoDstOp(op_ret, src); 3714 } 3715 3716 RegisterID* BytecodeGenerator::emitUnaryNoDstOp(OpcodeID opcodeID, RegisterID* src) 3717 { 3718 emitOpcode(opcodeID); 3719 instructions().append(src->index()); 3332 OpRet::emit(this, src); 3720 3333 return src; 3721 3334 } 3335 3336 RegisterID* BytecodeGenerator::emitEnd(RegisterID* src) 3337 { 3338 OpEnd::emit(this, src); 3339 return src; 3340 } 3341 3722 3342 3723 3343 RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, RegisterID* lazyThis, ExpectedFunction expectedFunction, CallArguments& callArguments, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd) … … 3738 3358 ExpressionNode* expression = static_cast<SpreadExpressionNode*>(elements->value())->expression(); 3739 3359 RefPtr<RegisterID> argumentRegister = emitNode(callArguments.argumentRegister(0), expression); 3740 emitOpcode(op_spread); 3741 instructions().append(argumentRegister.get()->index()); 3742 instructions().append(argumentRegister.get()->index()); 3360 OpSpread::emit(this, argumentRegister.get(), argumentRegister.get()); 3743 3361 3744 3362 move(callArguments.thisRegister(), lazyThis); … … 3769 3387 expectedFunction = emitExpectedFunctionSnippet(dst, func, expectedFunction, callArguments, done.get()); 3770 3388 3771 UnlinkedValueProfile profile = emitProfiledOpcode(op_construct); 3772 ASSERT(dst != ignoredResult()); 3773 instructions().append(dst->index()); 3774 instructions().append(func->index()); 3775 instructions().append(callArguments.argumentCountIncludingThis()); 3776 instructions().append(callArguments.stackOffset()); 3777 instructions().append(m_codeBlock->addLLIntCallLinkInfo()); 3778 instructions().append(0); 3779 instructions().append(0); 3780 instructions().append(profile); 3389 OpConstruct::emit(this, dst, func, callArguments.argumentCountIncludingThis(), callArguments.stackOffset()); 3781 3390 3782 3391 if (expectedFunction != NoExpectedFunction) … … 3788 3397 RegisterID* BytecodeGenerator::emitStrcat(RegisterID* dst, RegisterID* src, int count) 3789 3398 { 3790 emitOpcode(op_strcat); 3791 instructions().append(dst->index()); 3792 instructions().append(src->index()); 3793 instructions().append(count); 3794 3399 OpStrcat::emit(this, dst, src, count); 3795 3400 return dst; 3796 3401 } … … 3798 3403 void BytecodeGenerator::emitToPrimitive(RegisterID* dst, RegisterID* src) 3799 3404 { 3800 emitOpcode(op_to_primitive); 3801 instructions().append(dst->index()); 3802 instructions().append(src->index()); 3405 OpToPrimitive::emit(this, dst, src); 3803 3406 } 3804 3407 3805 3408 void BytecodeGenerator::emitGetScope() 3806 3409 { 3807 emitOpcode(op_get_scope); 3808 instructions().append(scopeRegister()->index()); 3410 OpGetScope::emit(this, scopeRegister()); 3809 3411 } 3810 3412 … … 3815 3417 newScope->ref(); 3816 3418 3817 emitOpcode(op_push_with_scope); 3818 instructions().append(newScope->index()); 3819 instructions().append(scopeRegister()->index()); 3820 instructions().append(objectScope->index()); 3419 OpPushWithScope::emit(this, newScope, scopeRegister(), objectScope); 3821 3420 3822 3421 move(scopeRegister(), newScope); … … 3828 3427 RegisterID* BytecodeGenerator::emitGetParentScope(RegisterID* dst, RegisterID* scope) 3829 3428 { 3830 emitOpcode(op_get_parent_scope); 3831 instructions().append(dst->index()); 3832 instructions().append(scope->index()); 3429 OpGetParentScope::emit(this, dst, scope); 3833 3430 return dst; 3834 3431 } … … 3855 3452 3856 3453 emitExpressionInfo(divot, divot, divot); 3857 emitOpcode(op_debug); 3858 instructions().append(debugHookType); 3859 instructions().append(false); 3454 OpDebug::emit(this, debugHookType, false); 3860 3455 } 3861 3456 … … 4016 3611 void BytecodeGenerator::emitCatch(RegisterID* exceptionRegister, RegisterID* thrownValueRegister, TryData* data) 4017 3612 { 4018 m_catchesToEmit.append(CatchEntry { data, exceptionRegister ->index(), thrownValueRegister->index()});3613 m_catchesToEmit.append(CatchEntry { data, exceptionRegister, thrownValueRegister }); 4019 3614 } 4020 3615 … … 4056 3651 } 4057 3652 3653 void BytecodeGenerator::emitThrow(RegisterID* exc) 3654 { 3655 m_usesExceptions = true; 3656 OpThrow::emit(this, exc); 3657 } 3658 3659 RegisterID* BytecodeGenerator::emitArgumentCount(RegisterID* dst) 3660 { 3661 OpArgumentCount::emit(this, dst); 3662 return dst; 3663 } 3664 4058 3665 int BytecodeGenerator::localScopeDepth() const 4059 3666 { … … 4072 3679 RefPtr<RegisterID> message = newTemporary(); 4073 3680 emitToString(message.get(), raw); 4074 emitOpcode(op_throw_static_error); 4075 instructions().append(message->index()); 4076 instructions().append(static_cast<unsigned>(errorType)); 3681 OpThrowStaticError::emit(this, message.get(), errorType); 4077 3682 } 4078 3683 4079 3684 void BytecodeGenerator::emitThrowStaticError(ErrorType errorType, const Identifier& message) 4080 3685 { 4081 emitOpcode(op_throw_static_error); 4082 instructions().append(addConstantValue(addStringConstant(message))->index()); 4083 instructions().append(static_cast<unsigned>(errorType)); 3686 OpThrowStaticError::emit(this, addConstantValue(addStringConstant(message)), errorType); 4084 3687 } 4085 3688 … … 4162 3765 SwitchInfo info = { static_cast<uint32_t>(instructions().size()), type }; 4163 3766 switch (type) { 4164 case SwitchInfo::SwitchImmediate: 4165 emitOpcode(op_switch_imm); 4166 break; 4167 case SwitchInfo::SwitchCharacter: 4168 emitOpcode(op_switch_char); 4169 break; 4170 case SwitchInfo::SwitchString: 4171 emitOpcode(op_switch_string); 4172 break; 4173 default: 4174 RELEASE_ASSERT_NOT_REACHED(); 4175 } 4176 4177 instructions().append(0); // place holder for table index 4178 instructions().append(0); // place holder for default target 4179 instructions().append(scrutineeRegister->index()); 3767 case SwitchInfo::SwitchImmediate: { 3768 size_t tableIndex = m_codeBlock->numberOfSwitchJumpTables(); 3769 m_codeBlock->addSwitchJumpTable(); 3770 OpSwitchImm::emit(this, tableIndex, 0, scrutineeRegister); 3771 break; 3772 } 3773 case SwitchInfo::SwitchCharacter: { 3774 size_t tableIndex = m_codeBlock->numberOfSwitchJumpTables(); 3775 m_codeBlock->addSwitchJumpTable(); 3776 OpSwitchChar::emit(this, tableIndex, 0, scrutineeRegister); 3777 break; 3778 } 3779 case SwitchInfo::SwitchString: { 3780 size_t tableIndex = m_codeBlock->numberOfStringSwitchJumpTables(); 3781 m_codeBlock->addStringSwitchJumpTable(); 3782 OpSwitchString::emit(this, tableIndex, 0, scrutineeRegister); 3783 break; 3784 } 3785 default: 3786 RELEASE_ASSERT_NOT_REACHED(); 3787 } 3788 4180 3789 m_switchContextStack.append(info); 4181 3790 } … … 4218 3827 // the labels should not be "forward" references 4219 3828 ASSERT(!labels[i]->isForward()); 4220 jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress , switchAddress + 3));3829 jumpTable.add(keyGetter(nodes[i], min, max), labels[i]->bind(switchAddress)); 4221 3830 } 4222 3831 } … … 4231 3840 ASSERT(nodes[i]->isString()); 4232 3841 StringImpl* clause = static_cast<StringNode*>(nodes[i])->value().impl(); 4233 jumpTable.offsetTable.add(clause, UnlinkedStringJumpTable::OffsetLocation { labels[i]->bind(switchAddress , switchAddress + 3) });3842 jumpTable.offsetTable.add(clause, UnlinkedStringJumpTable::OffsetLocation { labels[i]->bind(switchAddress) }); 4234 3843 } 4235 3844 } … … 4239 3848 SwitchInfo switchInfo = m_switchContextStack.last(); 4240 3849 m_switchContextStack.removeLast(); 4241 4242 switch (switchInfo.switchType) { 4243 case SwitchInfo::SwitchImmediate: 4244 case SwitchInfo::SwitchCharacter: { 4245 instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfSwitchJumpTables(); 4246 instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel.bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); 4247 4248 UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->addSwitchJumpTable(); 3850 3851 int defaultTarget = defaultLabel.bind(switchInfo.bytecodeOffset); 3852 auto handleSwitch = [&](auto* op, auto bytecode) { 3853 op->setDefaultOffset(defaultTarget, [&]() { 3854 m_codeBlock->addOutOfLineJumpTarget(switchInfo.bytecodeOffset, defaultTarget); 3855 return 0; 3856 }); 3857 3858 UnlinkedSimpleJumpTable& jumpTable = m_codeBlock->switchJumpTable(bytecode.tableIndex); 4249 3859 prepareJumpTableForSwitch( 4250 3860 jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max, … … 4252 3862 ? keyForImmediateSwitch 4253 3863 : keyForCharacterSwitch); 3864 }; 3865 3866 auto ref = m_writer.ref(switchInfo.bytecodeOffset); 3867 switch (switchInfo.switchType) { 3868 case SwitchInfo::SwitchImmediate: { 3869 handleSwitch(ref->cast<OpSwitchImm>(), ref->as<OpSwitchImm>()); 3870 break; 3871 } 3872 case SwitchInfo::SwitchCharacter: { 3873 handleSwitch(ref->cast<OpSwitchChar>(), ref->as<OpSwitchChar>()); 4254 3874 break; 4255 3875 } 4256 3876 4257 3877 case SwitchInfo::SwitchString: { 4258 instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables(); 4259 instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel.bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3); 4260 4261 UnlinkedStringJumpTable& jumpTable = m_codeBlock->addStringSwitchJumpTable(); 3878 ref->cast<OpSwitchString>()->setDefaultOffset(defaultTarget, [&]() { 3879 m_codeBlock->addOutOfLineJumpTarget(switchInfo.bytecodeOffset, defaultTarget); 3880 return 0; 3881 }); 3882 3883 UnlinkedStringJumpTable& jumpTable = m_codeBlock->stringSwitchJumpTable(ref->as<OpSwitchString>().tableIndex); 4262 3884 prepareJumpTableForStringSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes); 4263 3885 break; … … 4469 4091 RegisterID* BytecodeGenerator::emitGetEnumerableLength(RegisterID* dst, RegisterID* base) 4470 4092 { 4471 emitOpcode(op_get_enumerable_length); 4472 instructions().append(dst->index()); 4473 instructions().append(base->index()); 4093 OpGetEnumerableLength::emit(this, dst, base); 4474 4094 return dst; 4475 4095 } … … 4477 4097 RegisterID* BytecodeGenerator::emitHasGenericProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName) 4478 4098 { 4479 emitOpcode(op_has_generic_property); 4480 instructions().append(dst->index()); 4481 instructions().append(base->index()); 4482 instructions().append(propertyName->index()); 4099 OpHasGenericProperty::emit(this, dst, base, propertyName); 4483 4100 return dst; 4484 4101 } … … 4486 4103 RegisterID* BytecodeGenerator::emitHasIndexedProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName) 4487 4104 { 4488 UnlinkedArrayProfile arrayProfile = newArrayProfile(); 4489 emitOpcode(op_has_indexed_property); 4490 instructions().append(dst->index()); 4491 instructions().append(base->index()); 4492 instructions().append(propertyName->index()); 4493 instructions().append(arrayProfile); 4105 OpHasIndexedProperty::emit(this, dst, base, propertyName); 4494 4106 return dst; 4495 4107 } … … 4497 4109 RegisterID* BytecodeGenerator::emitHasStructureProperty(RegisterID* dst, RegisterID* base, RegisterID* propertyName, RegisterID* enumerator) 4498 4110 { 4499 emitOpcode(op_has_structure_property); 4500 instructions().append(dst->index()); 4501 instructions().append(base->index()); 4502 instructions().append(propertyName->index()); 4503 instructions().append(enumerator->index()); 4111 OpHasStructureProperty::emit(this, dst, base, propertyName, enumerator); 4504 4112 return dst; 4505 4113 } … … 4507 4115 RegisterID* BytecodeGenerator::emitGetPropertyEnumerator(RegisterID* dst, RegisterID* base) 4508 4116 { 4509 emitOpcode(op_get_property_enumerator); 4510 instructions().append(dst->index()); 4511 instructions().append(base->index()); 4117 OpGetPropertyEnumerator::emit(this, dst, base); 4512 4118 return dst; 4513 4119 } … … 4515 4121 RegisterID* BytecodeGenerator::emitEnumeratorStructurePropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index) 4516 4122 { 4517 emitOpcode(op_enumerator_structure_pname); 4518 instructions().append(dst->index()); 4519 instructions().append(enumerator->index()); 4520 instructions().append(index->index()); 4123 OpEnumeratorStructurePname::emit(this, dst, enumerator, index); 4521 4124 return dst; 4522 4125 } … … 4524 4127 RegisterID* BytecodeGenerator::emitEnumeratorGenericPropertyName(RegisterID* dst, RegisterID* enumerator, RegisterID* index) 4525 4128 { 4526 emitOpcode(op_enumerator_generic_pname); 4527 instructions().append(dst->index()); 4528 instructions().append(enumerator->index()); 4529 instructions().append(index->index()); 4129 OpEnumeratorGenericPname::emit(this, dst, enumerator, index); 4530 4130 return dst; 4531 4131 } … … 4533 4133 RegisterID* BytecodeGenerator::emitToIndexString(RegisterID* dst, RegisterID* index) 4534 4134 { 4535 emitOpcode(op_to_index_string); 4536 instructions().append(dst->index()); 4537 instructions().append(index->index()); 4135 OpToIndexString::emit(this, dst, index); 4538 4136 return dst; 4539 4137 } … … 4541 4139 RegisterID* BytecodeGenerator::emitIsCellWithType(RegisterID* dst, RegisterID* src, JSType type) 4542 4140 { 4543 emitOpcode(op_is_cell_with_type); 4544 instructions().append(dst->index()); 4545 instructions().append(src->index()); 4546 instructions().append(type); 4141 OpIsCellWithType::emit(this, dst, src, type); 4547 4142 return dst; 4548 4143 } … … 4550 4145 RegisterID* BytecodeGenerator::emitIsObject(RegisterID* dst, RegisterID* src) 4551 4146 { 4552 emitOpcode(op_is_object); 4553 instructions().append(dst->index()); 4554 instructions().append(src->index()); 4147 OpIsObject::emit(this, dst, src); 4555 4148 return dst; 4556 4149 } … … 4558 4151 RegisterID* BytecodeGenerator::emitIsNumber(RegisterID* dst, RegisterID* src) 4559 4152 { 4560 emitOpcode(op_is_number); 4561 instructions().append(dst->index()); 4562 instructions().append(src->index()); 4153 OpIsNumber::emit(this, dst, src); 4563 4154 return dst; 4564 4155 } … … 4566 4157 RegisterID* BytecodeGenerator::emitIsUndefined(RegisterID* dst, RegisterID* src) 4567 4158 { 4568 emitOpcode(op_is_undefined); 4569 instructions().append(dst->index()); 4570 instructions().append(src->index()); 4159 OpIsUndefined::emit(this, dst, src); 4571 4160 return dst; 4572 4161 } … … 4574 4163 RegisterID* BytecodeGenerator::emitIsEmpty(RegisterID* dst, RegisterID* src) 4575 4164 { 4576 emitOpcode(op_is_empty); 4577 instructions().append(dst->index()); 4578 instructions().append(src->index()); 4165 OpIsEmpty::emit(this, dst, src); 4579 4166 return dst; 4580 4167 } … … 4762 4349 { 4763 4350 RefPtr<RegisterID> restArrayLength = newTemporary(); 4764 emitOpcode(op_get_rest_length); 4765 instructions().append(restArrayLength->index()); 4766 instructions().append(numParametersToSkip); 4767 4768 emitOpcode(op_create_rest); 4769 instructions().append(result->index()); 4770 instructions().append(restArrayLength->index()); 4771 instructions().append(numParametersToSkip); 4351 OpGetRestLength::emit(this, restArrayLength.get(), numParametersToSkip); 4352 4353 OpCreateRest::emit(this, result, restArrayLength.get(), numParametersToSkip); 4772 4354 4773 4355 return result; … … 4780 4362 Ref<Label> target = newLabel(); 4781 4363 size_t begin = instructions().size(); 4782 emitOpcode(op_jneq_null); 4783 instructions().append(value->index()); 4784 instructions().append(target->bind(begin, instructions().size())); 4364 OpJneqNull::emit(this, value, target->bind(begin)); 4785 4365 emitThrowTypeError(error); 4786 4366 emitLabel(target.get()); … … 4813 4393 m_tryContextStack.swap(savedTryContextStack); 4814 4394 4815 emitOpcode(op_yield); 4816 instructions().append(generatorFrameRegister()->index()); 4817 instructions().append(yieldPointIndex); 4818 instructions().append(argument->index()); 4395 OpYield::emit(this, generatorFrameRegister(), yieldPointIndex, argument); 4819 4396 4820 4397 // Restore the try contexts, which start offset is updated to the merge point. … … 4829 4406 Ref<Label> normalLabel = newLabel(); 4830 4407 RefPtr<RegisterID> condition = newTemporary(); 4831 emitEqualityOp (op_stricteq,condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode))));4408 emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); 4832 4409 emitJumpIfTrue(condition.get(), normalLabel.get()); 4833 4410 4834 4411 Ref<Label> throwLabel = newLabel(); 4835 emitEqualityOp (op_stricteq,condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode))));4412 emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); 4836 4413 emitJumpIfTrue(condition.get(), throwLabel.get()); 4837 4414 // Return. … … 4882 4459 Ref<Label> iteratorReceived = newLabel(); 4883 4460 4884 emitJumpIfTrue(emitUnaryOp (op_eq_null,newTemporary(), iterator.get()), asyncIteratorNotFound.get());4461 emitJumpIfTrue(emitUnaryOp<OpEqNull>(newTemporary(), iterator.get()), asyncIteratorNotFound.get()); 4885 4462 4886 4463 emitJump(asyncIteratorFound.get()); … … 4941 4518 { 4942 4519 RefPtr<RegisterID> condition = newTemporary(); 4943 emitEqualityOp (op_stricteq,condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode))));4520 emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); 4944 4521 emitJumpIfTrue(condition.get(), normalLabel.get()); 4945 4522 4946 emitEqualityOp (op_stricteq,condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ReturnMode))));4523 emitEqualityOp<OpStricteq>(condition.get(), generatorResumeModeRegister(), emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ReturnMode)))); 4947 4524 emitJumpIfTrue(condition.get(), returnLabel.get()); 4948 4525 … … 5111 4688 { 5112 4689 if (context.numberOfBreaksOrContinues() || context.handlesReturns()) { 5113 emitJumpIf (op_stricteq,completionTypeRegister, CompletionType::Normal, normalCompletionLabel);4690 emitJumpIf<OpStricteq>(completionTypeRegister, CompletionType::Normal, normalCompletionLabel); 5114 4691 5115 4692 FinallyContext* outerContext = context.outerContext(); … … 5121 4698 Ref<Label> nextLabel = newLabel(); 5122 4699 auto& jump = context.jumps(i); 5123 emitJumpIf (op_nstricteq,completionTypeRegister, jump.jumpID, nextLabel.get());4700 emitJumpIf<OpNstricteq>(completionTypeRegister, jump.jumpID, nextLabel.get()); 5124 4701 5125 4702 restoreScopeRegister(jump.targetLexicalScopeIndex); … … 5134 4711 bool hasBreaksOrContinuesNotCoveredByJumps = context.numberOfBreaksOrContinues() > numberOfJumps; 5135 4712 if (hasBreaksOrContinuesNotCoveredByJumps || context.handlesReturns()) 5136 emitJumpIf (op_nstricteq,completionTypeRegister, CompletionType::Throw, *outerContext->finallyLabel());4713 emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Throw, *outerContext->finallyLabel()); 5137 4714 5138 4715 } else { … … 5140 4717 if (context.handlesReturns()) { 5141 4718 Ref<Label> notReturnLabel = newLabel(); 5142 emitJumpIf (op_nstricteq,completionTypeRegister, CompletionType::Return, notReturnLabel.get());4719 emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Return, notReturnLabel.get()); 5143 4720 5144 4721 emitWillLeaveCallFrameDebugHook(); … … 5149 4726 } 5150 4727 } 5151 emitJumpIf (op_nstricteq,completionTypeRegister, CompletionType::Throw, normalCompletionLabel);4728 emitJumpIf<OpNstricteq>(completionTypeRegister, CompletionType::Throw, normalCompletionLabel); 5152 4729 emitThrow(completionValueRegister()); 5153 4730 } … … 5174 4751 } 5175 4752 5176 void BytecodeGenerator::emitJumpIf(OpcodeID compareOpcode, RegisterID* completionTypeRegister, CompletionType type, Label& jumpTarget) 4753 template<typename CompareOp> 4754 void BytecodeGenerator::emitJumpIf(RegisterID* completionTypeRegister, CompletionType type, Label& jumpTarget) 5177 4755 { 5178 4756 RefPtr<RegisterID> tempRegister = newTemporary(); … … 5180 4758 OperandTypes operandTypes = OperandTypes(ResultType::numberTypeIsInt32(), ResultType::unknownType()); 5181 4759 5182 auto equivalenceResult = emitBinaryOp (compareOpcode,tempRegister.get(), valueConstant, completionTypeRegister, operandTypes);4760 auto equivalenceResult = emitBinaryOp<CompareOp>(tempRegister.get(), valueConstant, completionTypeRegister, operandTypes); 5183 4761 emitJumpIfTrue(equivalenceResult, jumpTarget); 5184 4762 } … … 5199 4777 5200 4778 for (unsigned offset = bodyBytecodeStartOffset(); isValid() && offset < bodyBytecodeEndOffset;) { 5201 UnlinkedInstruction* instruction = &generator.instructions()[offset]; 5202 OpcodeID opcodeID = instruction->u.opcode; 5203 unsigned opcodeLength = opcodeLengths[opcodeID]; 4779 auto instruction = generator.instructions().at(offset); 4780 OpcodeID opcodeID = instruction->opcodeID(); 5204 4781 5205 4782 ASSERT(opcodeID != op_enter); 5206 computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction , [&] (UnlinkedCodeBlock*, UnlinkedInstruction*, OpcodeID, intoperand) {5207 if (local()-> index() == operand)4783 computeDefsForBytecodeOffset(codeBlock, opcodeID, instruction.ptr(), [&] (VirtualRegister operand) { 4784 if (local()->virtualRegister() == operand) 5208 4785 invalidate(); 5209 4786 }); 5210 offset += opcodeLength;4787 offset += instruction->size(); 5211 4788 } 5212 4789 } … … 5221 4798 unsigned instIndex = std::get<0>(instTuple); 5222 4799 int propertyRegIndex = std::get<1>(instTuple); 5223 UnlinkedValueProfile valueProfile = std::get<2>(instTuple); 5224 OpcodeID op = generator.instructions()[instIndex].u.opcode; 5225 RELEASE_ASSERT(op == op_get_direct_pname); 5226 ASSERT(opcodeLength(op_get_direct_pname) == 7); 5227 ASSERT(opcodeLength(op_get_by_val) == 6); 5228 5229 // 0. Change the opcode to get_by_val. 5230 generator.instructions()[instIndex].u.opcode = op_get_by_val; 4800 auto instruction = generator.m_writer.ref(instIndex); 4801 auto end = instIndex + instruction->size(); 4802 ASSERT(instruction->isWide()); 4803 4804 generator.m_writer.seek(instIndex); 4805 4806 auto bytecode = instruction->as<OpGetDirectPname>(); 4807 4808 // disable peephole optimizations 4809 OpcodeID lastOpcodeID = generator.m_lastOpcodeID; 4810 generator.m_lastOpcodeID = op_end; 4811 4812 // Change the opcode to get_by_val. 5231 4813 // 1. dst stays the same. 5232 4814 // 2. base stays the same. 5233 4815 // 3. property gets switched to the original property. 5234 generator.instructions()[instIndex + 3].u.operand = propertyRegIndex;5235 // 4. add an array profile. 5236 generator.instructions()[instIndex + 4].u.unsignedValue = generator.newArrayProfile();5237 // 5. set the result value profile.5238 generator.instructions()[instIndex + 5].u.unsignedValue = valueProfile;5239 // 6. nop out the last instruction word.5240 generator. instructions()[instIndex + 6].u.opcode = op_nop;4816 OpGetByVal::emit<OpcodeSize::Wide>(&generator, bytecode.dst, bytecode.base, VirtualRegister(propertyRegIndex)); 4817 4818 // 4. nop out the remaining bytes 4819 while (generator.m_writer.position() < end) 4820 OpNop::emit<OpcodeSize::Narrow>(&generator); 4821 generator.m_writer.seek(generator.m_writer.size()); 4822 generator.m_lastOpcodeID = lastOpcodeID; 5241 4823 } 5242 4824 } … … 5251 4833 unsigned instIndex = instPair.first; 5252 4834 int propertyRegIndex = instPair.second; 5253 OpcodeID op = generator.instructions()[instIndex].u.opcode; 5254 RELEASE_ASSERT(op == op_get_by_val); 5255 // We just need to perform the get_by_val with the original property here, 5256 // not the indexed one. 5257 generator.instructions()[instIndex + 3].u.operand = propertyRegIndex; 4835 // FIXME: we should not have to force this get_by_val to be wide, just guarantee that propertyRegIndex fits 4836 // https://bugs.webkit.org/show_bug.cgi?id=190929 4837 generator.m_writer.ref(instIndex)->cast<OpGetByVal>()->setProperty(VirtualRegister(propertyRegIndex), []() { 4838 ASSERT_NOT_REACHED(); 4839 return VirtualRegister(); 4840 }); 4841 } 4842 } 4843 4844 void StaticPropertyAnalysis::record() 4845 { 4846 auto* instruction = m_instructionRef.ptr(); 4847 auto size = m_propertyIndexes.size(); 4848 switch (instruction->opcodeID()) { 4849 case OpNewObject::opcodeID: 4850 instruction->cast<OpNewObject>()->setInlineCapacity(size, []() { 4851 return 255; 4852 }); 4853 return; 4854 case OpCreateThis::opcodeID: 4855 instruction->cast<OpCreateThis>()->setInlineCapacity(size, []() { 4856 return 255; 4857 }); 4858 return; 4859 default: 4860 ASSERT_NOT_REACHED(); 5258 4861 } 5259 4862 } … … 5262 4865 { 5263 4866 m_codeBlock->addPropertyAccessInstruction(instructions().size()); 5264 UnlinkedValueProfile profile = emitProfiledOpcode(op_to_this); 5265 instructions().append(kill(&m_thisRegister)); 5266 instructions().append(0); 5267 instructions().append(0); 5268 instructions().append(profile); 4867 OpToThis::emit(this, kill(&m_thisRegister)); 5269 4868 } 5270 4869 -
trunk/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
r237486 r237547 42 42 #include "Nodes.h" 43 43 #include "ParserError.h" 44 #include "ProfileTypeBytecodeFlag.h" 44 45 #include "RegisterID.h" 45 46 #include "StaticPropertyAnalyzer.h" … … 232 233 using Base = ForInContext; 233 234 public: 234 using GetInst = std::tuple<unsigned, int , UnlinkedValueProfile>;235 using GetInst = std::tuple<unsigned, int>; 235 236 236 237 StructureForInContext(RegisterID* localRegister, RegisterID* indexRegister, RegisterID* propertyRegister, RegisterID* enumeratorRegister, unsigned bodyBytecodeStartOffset) … … 246 247 RegisterID* enumerator() const { return m_enumeratorRegister.get(); } 247 248 248 void addGetInst(unsigned instIndex, int propertyRegIndex , UnlinkedValueProfile valueProfile)249 { 250 m_getInsts.append(GetInst { instIndex, propertyRegIndex , valueProfile});249 void addGetInst(unsigned instIndex, int propertyRegIndex) 250 { 251 m_getInsts.append(GetInst { instIndex, propertyRegIndex }); 251 252 } 252 253 … … 359 360 }; 360 361 361 enum ProfileTypeBytecodeFlag {362 ProfileTypeBytecodeClosureVar,363 ProfileTypeBytecodeLocallyResolved,364 ProfileTypeBytecodeDoesNotHaveGlobalID,365 ProfileTypeBytecodeFunctionArgument,366 ProfileTypeBytecodeFunctionReturnStatement367 };368 369 362 class BytecodeGenerator { 370 363 WTF_MAKE_FAST_ALLOCATED; 371 364 WTF_MAKE_NONCOPYABLE(BytecodeGenerator); 365 366 friend class Label; 367 friend class IndexedForInContext; 368 friend class StructureForInContext; 372 369 public: 373 370 typedef DeclarationStacks::FunctionStack FunctionStack; … … 510 507 } 511 508 509 void recordOpcode(OpcodeID opcodeID) 510 { 511 ASSERT(m_lastOpcodeID == op_end || m_writer.size() == m_lastInstruction.offset() + m_lastInstruction->size()); 512 m_lastInstruction = m_writer.ref(); 513 m_lastOpcodeID = opcodeID; 514 } 515 516 ALWAYS_INLINE unsigned addMetadataFor(OpcodeID opcodeID) 517 { 518 return m_codeBlock->metadata().addEntry(opcodeID); 519 } 520 512 521 void emitNode(StatementNode* n) 513 522 { … … 669 678 RegisterID* emitLoadGlobalObject(RegisterID* dst); 670 679 671 RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src); 680 template<typename UnaryOp, typename = std::enable_if_t<UnaryOp::opcodeID != op_negate>> 681 RegisterID* emitUnaryOp(RegisterID* dst, RegisterID* src) 682 { 683 UnaryOp::emit(this, dst, src); 684 return dst; 685 } 686 672 687 RegisterID* emitUnaryOp(OpcodeID, RegisterID* dst, RegisterID* src, OperandTypes); 673 RegisterID* emitUnaryOpProfiled(OpcodeID, RegisterID* dst, RegisterID* src); 688 689 template<typename BinaryOp> 690 std::enable_if_t< 691 BinaryOp::opcodeID != op_bitxor && BinaryOp::opcodeID != op_add 692 && BinaryOp::opcodeID != op_mul && BinaryOp::opcodeID != op_sub 693 && BinaryOp::opcodeID != op_div, 694 RegisterID*> 695 emitBinaryOp(RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes) 696 { 697 BinaryOp::emit(this, dst, src1, src2); 698 return dst; 699 } 700 701 template<typename BinaryOp> 702 std::enable_if_t< 703 BinaryOp::opcodeID == op_bitxor || BinaryOp::opcodeID == op_add 704 || BinaryOp::opcodeID == op_mul || BinaryOp::opcodeID == op_sub 705 || BinaryOp::opcodeID == op_div, 706 RegisterID*> 707 emitBinaryOp(RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes types) 708 { 709 BinaryOp::emit(this, dst, src1, src2, types); 710 return dst; 711 } 712 674 713 RegisterID* emitBinaryOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2, OperandTypes); 675 RegisterID* emitEqualityOp(OpcodeID, RegisterID* dst, RegisterID* src1, RegisterID* src2); 676 RegisterID* emitUnaryNoDstOp(OpcodeID, RegisterID* src);677 714 715 template<typename EqOp> 716 RegisterID* emitEqualityOp(RegisterID* dst, RegisterID* src1, RegisterID* src2); 678 717 RegisterID* emitCreateThis(RegisterID* dst); 679 718 void emitTDZCheck(RegisterID* target); … … 700 739 RegisterID* moveEmptyValue(RegisterID* dst); 701 740 702 RegisterID* emitToNumber(RegisterID* dst, RegisterID* src) { return emitUnaryOpProfiled(op_to_number, dst, src); }703 RegisterID* emitToString(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_to_string, dst, src); }741 RegisterID* emitToNumber(RegisterID* dst, RegisterID* src); 742 RegisterID* emitToString(RegisterID* dst, RegisterID* src); 704 743 RegisterID* emitToObject(RegisterID* dst, RegisterID* src, const Identifier& message); 705 744 RegisterID* emitInc(RegisterID* srcDst); … … 709 748 RegisterID* emitInstanceOf(RegisterID* dst, RegisterID* value, RegisterID* basePrototype); 710 749 RegisterID* emitInstanceOfCustom(RegisterID* dst, RegisterID* value, RegisterID* constructor, RegisterID* hasInstanceValue); 711 RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src) { return emitUnaryOp(op_typeof, dst, src); }750 RegisterID* emitTypeOf(RegisterID* dst, RegisterID* src); 712 751 RegisterID* emitInByVal(RegisterID* dst, RegisterID* property, RegisterID* base); 713 752 RegisterID* emitInById(RegisterID* dst, RegisterID* base, const Identifier& property); … … 770 809 enum class ReturnFrom { Normal, Finally }; 771 810 RegisterID* emitReturn(RegisterID* src, ReturnFrom = ReturnFrom::Normal); 772 RegisterID* emitEnd(RegisterID* src) { return emitUnaryNoDstOp(op_end, src); }811 RegisterID* emitEnd(RegisterID* src); 773 812 774 813 RegisterID* emitConstruct(RegisterID* dst, RegisterID* func, RegisterID* lazyThis, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd); … … 793 832 void emitJumpIfNotFunctionCall(RegisterID* cond, Label& target); 794 833 void emitJumpIfNotFunctionApply(RegisterID* cond, Label& target); 834 835 template<typename BinOp, typename JmpOp> 836 bool fuseCompareAndJump(RegisterID* cond, Label& target, bool swapOperands = false); 837 838 template<typename UnaryOp, typename JmpOp> 839 bool fuseTestAndJmp(RegisterID* cond, Label& target); 795 840 796 841 void emitEnter(); … … 855 900 int labelScopeDepthToLexicalScopeIndex(int labelScopeDepth); 856 901 857 void emitThrow(RegisterID* exc) 858 { 859 m_usesExceptions = true; 860 emitUnaryNoDstOp(op_throw, exc); 861 } 902 void emitThrow(RegisterID*); 903 RegisterID* emitArgumentCount(RegisterID*); 862 904 863 905 void emitThrowStaticError(ErrorType, RegisterID*); … … 929 971 } 930 972 931 void emitJumpIf(OpcodeID compareOpcode, RegisterID* completionTypeRegister, CompletionType, Label& jumpTarget); 973 template<typename CompareOp> 974 void emitJumpIf(RegisterID* completionTypeRegister, CompletionType, Label& jumpTarget); 932 975 933 976 bool emitJumpViaFinallyIfNeeded(int targetLabelScopeDepth, Label& jumpTarget); … … 1022 1065 Variable variableForLocalEntry(const Identifier&, const SymbolTableEntry&, int symbolTableConstantIndex, bool isLexicallyScoped); 1023 1066 1024 void emitOpcode(OpcodeID); 1025 UnlinkedArrayAllocationProfile newArrayAllocationProfile(IndexingType); 1026 UnlinkedObjectAllocationProfile newObjectAllocationProfile(); 1027 UnlinkedValueProfile emitProfiledOpcode(OpcodeID); 1028 int kill(RegisterID* dst) 1029 { 1030 int index = dst->index(); 1031 m_staticPropertyAnalyzer.kill(index); 1032 return index; 1033 } 1034 1035 void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); 1067 RegisterID* kill(RegisterID* dst) 1068 { 1069 m_staticPropertyAnalyzer.kill(dst); 1070 return dst; 1071 } 1072 1036 1073 void retrieveLastUnaryOp(int& dstIndex, int& srcIndex); 1037 ALWAYS_INLINE void rewindBinaryOp(); 1038 ALWAYS_INLINE void rewindUnaryOp(); 1074 ALWAYS_INLINE void rewind(); 1039 1075 1040 1076 void allocateCalleeSaveSpace(); 1041 1077 void allocateAndEmitScope(); 1078 1079 template<typename JumpOp> 1080 void setTargetForJumpInstruction(InstructionStream::MutableRef&, int target); 1042 1081 1043 1082 using BigIntMapEntry = std::tuple<UniquedStringImpl*, uint8_t, bool>; … … 1053 1092 ExpectedFunction emitExpectedFunctionSnippet(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, Label& done); 1054 1093 1055 RegisterID* emitCall(OpcodeID, RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); 1094 template<typename CallOp> 1095 RegisterID* emitCall(RegisterID* dst, RegisterID* func, ExpectedFunction, CallArguments&, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); 1056 1096 1057 1097 RegisterID* emitCallIterator(RegisterID* iterator, RegisterID* argument, ThrowableExpressionData*); … … 1116 1156 1117 1157 RegisterID* emitConstructVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); 1118 RegisterID* emitCallVarargs(OpcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); 1158 template<typename CallOp> 1159 RegisterID* emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* arguments, RegisterID* firstFreeRegister, int32_t firstVarArgOffset, const JSTextPosition& divot, const JSTextPosition& divotStart, const JSTextPosition& divotEnd, DebuggableCall); 1119 1160 1120 1161 void emitLogShadowChickenPrologueIfNecessary(); … … 1139 1180 RegisterID* addTemplateObjectConstant(Ref<TemplateObjectDescriptor>&&); 1140 1181 1141 Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>& instructions() { return m_instructions; }1182 const InstructionStream& instructions() const { return m_writer; } 1142 1183 1143 1184 RegisterID* emitThrowExpressionTooDeepException(); 1185 1186 void write(uint8_t byte) { m_writer.write(byte); } 1187 void write(uint32_t i) { m_writer.write(i); } 1144 1188 1145 1189 class PreservedTDZStack { … … 1152 1196 void restoreTDZStack(const PreservedTDZStack&); 1153 1197 1198 template<typename Func> 1199 void withWriter(InstructionStreamWriter& writer, const Func& fn) 1200 { 1201 auto prevLastOpcodeID = m_lastOpcodeID; 1202 auto prevLastInstruction = m_lastInstruction; 1203 m_writer.swap(writer); 1204 m_lastOpcodeID = op_end; 1205 m_lastInstruction = m_writer.ref(); 1206 fn(); 1207 m_writer.swap(writer); 1208 m_lastOpcodeID = prevLastOpcodeID; 1209 m_lastInstruction = prevLastInstruction; 1210 } 1211 1154 1212 private: 1155 Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow> m_instructions;1213 InstructionStreamWriter m_writer; 1156 1214 1157 1215 bool m_shouldEmitDebugHooks; … … 1243 1301 TemplateObjectDescriptorMap m_templateObjectDescriptorMap; 1244 1302 1245 StaticPropertyAnalyzer m_staticPropertyAnalyzer { &m_instructions };1303 StaticPropertyAnalyzer m_staticPropertyAnalyzer; 1246 1304 1247 1305 VM* m_vm; 1248 1306 1249 1307 OpcodeID m_lastOpcodeID = op_end; 1250 #ifndef NDEBUG 1251 size_t m_lastOpcodePosition { 0 }; 1252 #endif 1308 InstructionStream::MutableRef m_lastInstruction { m_writer.ref() }; 1253 1309 1254 1310 bool m_usesExceptions { false }; … … 1260 1316 DerivedContextType m_derivedContextType { DerivedContextType::None }; 1261 1317 1262 using CatchEntry = std::tuple<TryData*, int, int>;1318 using CatchEntry = std::tuple<TryData*, VirtualRegister, VirtualRegister>; 1263 1319 Vector<CatchEntry> m_catchesToEmit; 1264 1320 }; 1265 1321 1322 1266 1323 } // namespace JSC 1267 1324 -
trunk/Source/JavaScriptCore/bytecompiler/Label.h
r237486 r237547 35 35 36 36 namespace JSC { 37 38 37 class BytecodeGenerator; 39 38 … … 45 44 void setLocation(BytecodeGenerator&, unsigned); 46 45 47 int bind(int opcode, int offset) const 46 int bind(BytecodeGenerator*); 47 48 int bind(unsigned offset) 48 49 { 49 50 m_bound = true; 50 if (m_location == invalidLocation) { 51 m_unresolvedJumps.append(std::make_pair(opcode, offset)); 52 return 0; 53 } 54 return m_location - opcode; 51 if (!isForward()) 52 return m_location - offset; 53 m_unresolvedJumps.append(offset); 54 return 0; 55 } 56 57 int bind() 58 { 59 ASSERT(!isForward()); 60 return bind(0u); 55 61 } 56 62 … … 66 72 bool isForward() const { return m_location == invalidLocation; } 67 73 68 int bind()69 {70 ASSERT(!isForward());71 return bind(0, 0);72 }73 74 74 bool isBound() const { return m_bound; } 75 75 76 76 private: 77 typedef Vector< std::pair<int, int>, 8> JumpVector;77 typedef Vector<int, 8> JumpVector; 78 78 79 79 static const unsigned invalidLocation = UINT_MAX; -
trunk/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
r237486 r237547 465 465 for (; n; n = n->next()) { 466 466 if (n->elision()) 467 generator.emitBinaryOp (op_add,index.get(), index.get(), generator.emitLoad(0, jsNumber(n->elision())), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32()));467 generator.emitBinaryOp<OpAdd>(index.get(), index.get(), generator.emitLoad(0, jsNumber(n->elision())), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); 468 468 if (n->value()->isSpreadExpression()) { 469 469 SpreadExpressionNode* spread = static_cast<SpreadExpressionNode*>(n->value()); … … 476 476 477 477 if (m_elision) { 478 generator.emitBinaryOp (op_add,index.get(), index.get(), generator.emitLoad(0, jsNumber(m_elision)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32()));478 generator.emitBinaryOp<OpAdd>(index.get(), index.get(), generator.emitLoad(0, jsNumber(m_elision)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); 479 479 generator.emitPutById(array.get(), generator.propertyNames().length, index.get()); 480 480 } … … 997 997 ASSERT(!m_args->m_listNode); 998 998 999 return generator.emit UnaryNoDstOp(op_argument_count,generator.finalDestination(dst));999 return generator.emitArgumentCount(generator.finalDestination(dst)); 1000 1000 } 1001 1001 … … 1462 1462 Ref<Label> end = generator.newLabel(); 1463 1463 RefPtr<RegisterID> compareResult = generator.newTemporary(); 1464 RefPtr<RegisterID> indexZeroCompareResult = generator.emitBinaryOp (op_eq,compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(0)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32()));1464 RefPtr<RegisterID> indexZeroCompareResult = generator.emitBinaryOp<OpEq>(compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(0)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); 1465 1465 generator.emitJumpIfFalse(indexZeroCompareResult.get(), haveThis.get()); 1466 1466 generator.move(thisRegister.get(), value); … … 1468 1468 generator.emitJump(end.get()); 1469 1469 generator.emitLabel(haveThis.get()); 1470 RefPtr<RegisterID> indexOneCompareResult = generator.emitBinaryOp (op_eq,compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(1)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32()));1470 RefPtr<RegisterID> indexOneCompareResult = generator.emitBinaryOp<OpEq>(compareResult.get(), index.get(), generator.emitLoad(0, jsNumber(1)), OperandTypes(ResultType::numberTypeIsInt32(), ResultType::numberTypeIsInt32())); 1471 1471 generator.emitJumpIfFalse(indexOneCompareResult.get(), end.get()); 1472 1472 generator.move(argumentsRegister.get(), value); … … 1897 1897 RefPtr<RegisterID> src2 = generator.emitLoad(nullptr, jsNumber(-1)); 1898 1898 RefPtr<RegisterID> src1 = generator.emitNode(m_expr); 1899 return generator.emitBinaryOp (op_bitxor,generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(m_expr->resultDescriptor(), ResultType::numberTypeIsInt32()));1899 return generator.emitBinaryOp<OpBitxor>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(m_expr->resultDescriptor(), ResultType::numberTypeIsInt32())); 1900 1900 } 1901 1901 … … 2165 2165 RefPtr<RegisterID> src = generator.tempDestination(dst); 2166 2166 generator.emitNode(src.get(), m_expr1->isNull() ? m_expr2 : m_expr1); 2167 return generator.emitUnaryOp (op_neq_null,generator.finalDestination(dst, src.get()), src.get());2167 return generator.emitUnaryOp<OpNeqNull>(generator.finalDestination(dst, src.get()), src.get()); 2168 2168 } 2169 2169 } … … 2183 2183 RefPtr<RegisterID> tmp = generator.tempDestination(dst); 2184 2184 if (opcodeID == op_neq) 2185 generator.emitEqualityOp (op_eq,generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get());2185 generator.emitEqualityOp<OpEq>(generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); 2186 2186 else if (opcodeID == op_nstricteq) 2187 generator.emitEqualityOp (op_stricteq,generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get());2187 generator.emitEqualityOp<OpStricteq>(generator.finalDestination(tmp.get(), src1.get()), src1.get(), src2.get()); 2188 2188 else 2189 2189 RELEASE_ASSERT_NOT_REACHED(); 2190 return generator.emitUnaryOp (op_not,generator.finalDestination(dst, tmp.get()), tmp.get());2190 return generator.emitUnaryOp<OpNot>(generator.finalDestination(dst, tmp.get()), tmp.get()); 2191 2191 } 2192 2192 RegisterID* result = generator.emitBinaryOp(opcodeID, generator.finalDestination(dst, src1.get()), src1.get(), src2.get(), OperandTypes(left->resultDescriptor(), right->resultDescriptor())); 2193 2193 if (m_shouldToUnsignedResult) { 2194 2194 if (opcodeID == op_urshift && dst != generator.ignoredResult()) 2195 return generator.emitUnaryOp (op_unsigned,result, result);2195 return generator.emitUnaryOp<OpUnsigned>(result, result); 2196 2196 } 2197 2197 return result; … … 2203 2203 RefPtr<RegisterID> src = generator.tempDestination(dst); 2204 2204 generator.emitNode(src.get(), m_expr1->isNull() ? m_expr2 : m_expr1); 2205 return generator.emitUnaryOp (op_eq_null,generator.finalDestination(dst, src.get()), src.get());2205 return generator.emitUnaryOp<OpEqNull>(generator.finalDestination(dst, src.get()), src.get()); 2206 2206 } 2207 2207 … … 2213 2213 RefPtr<RegisterID> src1 = generator.emitNodeForLeftHandSide(left, m_rightHasAssignments, m_expr2->isPure(generator)); 2214 2214 RefPtr<RegisterID> src2 = generator.emitNode(right); 2215 return generator.emitEqualityOp (op_eq,generator.finalDestination(dst, src1.get()), src1.get(), src2.get());2215 return generator.emitEqualityOp<OpEq>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); 2216 2216 } 2217 2217 … … 2225 2225 RefPtr<RegisterID> src1 = generator.emitNodeForLeftHandSide(left, m_rightHasAssignments, m_expr2->isPure(generator)); 2226 2226 RefPtr<RegisterID> src2 = generator.emitNode(right); 2227 return generator.emitEqualityOp (op_stricteq,generator.finalDestination(dst, src1.get()), src1.get(), src2.get());2227 return generator.emitEqualityOp<OpStricteq>(generator.finalDestination(dst, src1.get()), src1.get(), src2.get()); 2228 2228 } 2229 2229 … … 2418 2418 RegisterID* result = generator.emitBinaryOp(opcodeID, dst, src1, src2, types); 2419 2419 if (oper == OpURShift) 2420 return generator.emitUnaryOp (op_unsigned,result, result);2420 return generator.emitUnaryOp<OpUnsigned>(result, result); 2421 2421 return result; 2422 2422 } … … 3084 3084 generator.emitLoopHint(); 3085 3085 3086 RefPtr<RegisterID> result = generator.emitEqualityOp (op_less,generator.newTemporary(), i.get(), length.get());3086 RefPtr<RegisterID> result = generator.emitEqualityOp<OpLess>(generator.newTemporary(), i.get(), length.get()); 3087 3087 generator.emitJumpIfFalse(result.get(), loopEnd.get()); 3088 3088 generator.emitHasIndexedProperty(result.get(), base.get(), i.get()); … … 3125 3125 generator.emitLoopHint(); 3126 3126 3127 RefPtr<RegisterID> result = generator.emitUnaryOp (op_eq_null,generator.newTemporary(), propertyName.get());3127 RefPtr<RegisterID> result = generator.emitUnaryOp<OpEqNull>(generator.newTemporary(), propertyName.get()); 3128 3128 generator.emitJumpIfTrue(result.get(), loopEnd.get()); 3129 3129 generator.emitHasStructureProperty(result.get(), base.get(), propertyName.get(), enumerator.get()); … … 3166 3166 generator.emitLoopHint(); 3167 3167 3168 RefPtr<RegisterID> result = generator.emitUnaryOp (op_eq_null,generator.newTemporary(), propertyName.get());3168 RefPtr<RegisterID> result = generator.emitUnaryOp<OpEqNull>(generator.newTemporary(), propertyName.get()); 3169 3169 generator.emitJumpIfTrue(result.get(), loopEnd.get()); 3170 3170 … … 3490 3490 RefPtr<RegisterID> clauseVal = generator.newTemporary(); 3491 3491 generator.emitNode(clauseVal.get(), list->getClause()->expr()); 3492 generator.emitBinaryOp (op_stricteq,clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes());3492 generator.emitBinaryOp<OpStricteq>(clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); 3493 3493 labelVector.append(generator.newLabel()); 3494 3494 generator.emitJumpIfTrue(clauseVal.get(), labelVector[labelVector.size() - 1].get()); … … 3498 3498 RefPtr<RegisterID> clauseVal = generator.newTemporary(); 3499 3499 generator.emitNode(clauseVal.get(), list->getClause()->expr()); 3500 generator.emitBinaryOp (op_stricteq,clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes());3500 generator.emitBinaryOp<OpStricteq>(clauseVal.get(), clauseVal.get(), switchExpression, OperandTypes()); 3501 3501 labelVector.append(generator.newLabel()); 3502 3502 generator.emitJumpIfTrue(clauseVal.get(), labelVector[labelVector.size() - 1].get()); … … 3846 3846 { 3847 3847 RefPtr<RegisterID> condition = generator.newTemporary(); 3848 generator.emitEqualityOp (op_stricteq,condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode))));3848 generator.emitEqualityOp<OpStricteq>(condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::NormalMode)))); 3849 3849 generator.emitJumpIfTrue(condition.get(), generatorBodyLabel.get()); 3850 3850 3851 3851 Ref<Label> throwLabel = generator.newLabel(); 3852 generator.emitEqualityOp (op_stricteq,condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode))));3852 generator.emitEqualityOp<OpStricteq>(condition.get(), generator.generatorResumeModeRegister(), generator.emitLoad(nullptr, jsNumber(static_cast<int32_t>(JSGeneratorFunction::GeneratorResumeMode::ThrowMode)))); 3853 3853 generator.emitJumpIfTrue(condition.get(), throwLabel.get()); 3854 3854 … … 4010 4010 4011 4011 Ref<Label> superclassIsNullLabel = generator.newLabel(); 4012 generator.emitJumpIfTrue(generator.emitUnaryOp (op_eq_null,tempRegister.get(), superclass.get()), superclassIsNullLabel.get());4012 generator.emitJumpIfTrue(generator.emitUnaryOp<OpEqNull>(tempRegister.get(), superclass.get()), superclassIsNullLabel.get()); 4013 4013 4014 4014 Ref<Label> superclassIsObjectLabel = generator.newLabel(); … … 4020 4020 4021 4021 Ref<Label> protoParentIsObjectOrNullLabel = generator.newLabel(); 4022 generator.emitJumpIfTrue(generator.emitUnaryOp (op_is_object_or_null,tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get());4023 generator.emitJumpIfTrue(generator.emitUnaryOp (op_is_function,tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get());4022 generator.emitJumpIfTrue(generator.emitUnaryOp<OpIsObjectOrNull>(tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); 4023 generator.emitJumpIfTrue(generator.emitUnaryOp<OpIsFunction>(tempRegister.get(), protoParent.get()), protoParentIsObjectOrNullLabel.get()); 4024 4024 generator.emitThrowTypeError("The value of the superclass's prototype property is not an object."_s); 4025 4025 generator.emitLabel(protoParentIsObjectOrNullLabel.get()); -
trunk/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.cpp
r237546 r237547 1 1 /* 2 * Copyright (C) 201 1, 2015Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 26 26 27 #include "config.h" 27 #include " VirtualRegister.h"28 #include "ProfileTypeBytecodeFlag.h" 28 29 29 namespace JSC { 30 #include <wtf/PrintStream.h> 30 31 31 void VirtualRegister::dump(PrintStream& out) const 32 namespace WTF { 33 34 void printInternal(PrintStream& out, JSC::ProfileTypeBytecodeFlag flag) 32 35 { 33 if (!isValid()) { 34 out.print("<invalid>"); 36 switch (flag) { 37 case JSC::ProfileTypeBytecodeClosureVar: 38 out.print("ProfileTypeBytecodeClosureVar"); 39 return; 40 case JSC::ProfileTypeBytecodeLocallyResolved: 41 out.print("ProfileTypeBytecodeLocallyResolved"); 42 return; 43 case JSC::ProfileTypeBytecodeDoesNotHaveGlobalID: 44 out.print("ProfileTypeBytecodeDoesNotHaveGlobalID"); 45 return; 46 case JSC::ProfileTypeBytecodeFunctionArgument: 47 out.print("ProfileTypeBytecodeFunctionArgument"); 48 return; 49 case JSC::ProfileTypeBytecodeFunctionReturnStatement: 50 out.print("ProfileTypeBytecodeFunctionReturnStatement"); 35 51 return; 36 52 } 37 38 if (isHeader()) {39 out.print("head", m_virtualRegister);40 return;41 }42 43 if (isConstant()) {44 out.print("const", toConstantIndex());45 return;46 }47 48 if (isArgument()) {49 if (!toArgument())50 out.print("this");51 else52 out.print("arg", toArgument());53 return;54 }55 56 if (isLocal()) {57 out.print("loc", toLocal());58 return;59 }60 61 RELEASE_ASSERT_NOT_REACHED();62 53 } 63 54 64 } // namespace JSC 65 55 } // namespace WTF -
trunk/Source/JavaScriptCore/bytecompiler/ProfileTypeBytecodeFlag.h
r237546 r237547 1 1 /* 2 * Copyright (C) 201 2Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 26 #include "config.h" 27 #include "SpecialPointer.h" 28 29 #include "CodeBlock.h" 30 #include "JSGlobalObject.h" 31 #include "JSCInlines.h" 26 #pragma once 32 27 33 28 namespace JSC { 34 29 35 void* actualPointerFor(JSGlobalObject* globalObject, Special::Pointer pointer) 36 { 37 return globalObject->actualPointerFor(pointer); 38 } 39 40 void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer) 41 { 42 return actualPointerFor(codeBlock->globalObject(), pointer); 43 } 30 enum ProfileTypeBytecodeFlag { 31 ProfileTypeBytecodeClosureVar, 32 ProfileTypeBytecodeLocallyResolved, 33 ProfileTypeBytecodeDoesNotHaveGlobalID, 34 ProfileTypeBytecodeFunctionArgument, 35 ProfileTypeBytecodeFunctionReturnStatement 36 }; 44 37 45 38 } // namespace JSC 46 39 40 namespace WTF { 41 42 class PrintStream; 43 44 void printInternal(PrintStream&, JSC::ProfileTypeBytecodeFlag); 45 46 } // namespace WTF -
trunk/Source/JavaScriptCore/bytecompiler/RegisterID.h
r237486 r237547 38 38 class RegisterID { 39 39 WTF_MAKE_NONCOPYABLE(RegisterID); 40 41 friend class VirtualRegister; 40 42 public: 41 43 RegisterID() … … 123 125 #endif 124 126 }; 125 126 127 } // namespace JSC 127 128 -
trunk/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalysis.h
r237486 r237547 26 26 #pragma once 27 27 28 #include "InstructionStream.h" 28 29 #include <wtf/HashSet.h> 29 30 … … 33 34 class StaticPropertyAnalysis : public RefCounted<StaticPropertyAnalysis> { 34 35 public: 35 static Ref<StaticPropertyAnalysis> create( Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions, unsigned target)36 static Ref<StaticPropertyAnalysis> create(InstructionStream::MutableRef&& instructionRef) 36 37 { 37 return adoptRef(*new StaticPropertyAnalysis( instructions, target));38 return adoptRef(*new StaticPropertyAnalysis(WTFMove(instructionRef))); 38 39 } 39 40 40 41 void addPropertyIndex(unsigned propertyIndex) { m_propertyIndexes.add(propertyIndex); } 41 42 42 void record() 43 { 44 (*m_instructions)[m_target] = m_propertyIndexes.size(); 45 } 43 void record(); 46 44 47 45 int propertyIndexCount() { return m_propertyIndexes.size(); } 48 46 49 47 private: 50 StaticPropertyAnalysis(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions, unsigned target) 51 : m_instructions(instructions) 52 , m_target(target) 48 StaticPropertyAnalysis(InstructionStream::MutableRef&& instructionRef) 49 : m_instructionRef(WTFMove(instructionRef)) 53 50 { 54 51 } 55 52 56 Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* m_instructions; 57 unsigned m_target; 53 InstructionStream::MutableRef m_instructionRef; 58 54 typedef HashSet<unsigned, WTF::IntHash<unsigned>, WTF::UnsignedWithZeroKeyHashTraits<unsigned>> PropertyIndexSet; 59 55 PropertyIndexSet m_propertyIndexes; -
trunk/Source/JavaScriptCore/bytecompiler/StaticPropertyAnalyzer.h
r237486 r237547 36 36 class StaticPropertyAnalyzer { 37 37 public: 38 StaticPropertyAnalyzer(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>*); 39 40 void createThis(int dst, unsigned offsetOfInlineCapacityOperand); 41 void newObject(int dst, unsigned offsetOfInlineCapacityOperand); 42 void putById(int dst, unsigned propertyIndex); // propertyIndex is an index into a uniqued set of strings. 43 void mov(int dst, int src); 38 void createThis(RegisterID* dst, InstructionStream::MutableRef&& instructionRef); 39 void newObject(RegisterID* dst, InstructionStream::MutableRef&& instructionRef); 40 void putById(RegisterID* dst, unsigned propertyIndex); // propertyIndex is an index into a uniqued set of strings. 41 void mov(RegisterID* dst, RegisterID* src); 44 42 45 43 void kill(); 46 void kill( intdst);44 void kill(RegisterID* dst); 47 45 48 46 private: 49 47 void kill(StaticPropertyAnalysis*); 50 48 51 Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* m_instructions;52 49 typedef HashMap<int, RefPtr<StaticPropertyAnalysis>, WTF::IntHash<int>, WTF::UnsignedWithZeroKeyHashTraits<int>> AnalysisMap; 53 50 AnalysisMap m_analyses; 54 51 }; 55 52 56 inline StaticPropertyAnalyzer::StaticPropertyAnalyzer(Vector<UnlinkedInstruction, 0, UnsafeVectorOverflow>* instructions) 57 : m_instructions(instructions) 58 { 59 } 60 61 inline void StaticPropertyAnalyzer::createThis(int dst, unsigned offsetOfInlineCapacityOperand) 53 inline void StaticPropertyAnalyzer::createThis(RegisterID* dst, InstructionStream::MutableRef&& instructionRef) 62 54 { 63 55 AnalysisMap::AddResult addResult = m_analyses.add( 64 dst , StaticPropertyAnalysis::create(m_instructions, offsetOfInlineCapacityOperand));56 dst->index(), StaticPropertyAnalysis::create(WTFMove(instructionRef))); 65 57 ASSERT_UNUSED(addResult, addResult.isNewEntry); // Can't have two 'this' in the same constructor. 66 58 } 67 59 68 inline void StaticPropertyAnalyzer::newObject( int dst, unsigned offsetOfInlineCapacityOperand)60 inline void StaticPropertyAnalyzer::newObject(RegisterID* dst, InstructionStream::MutableRef&& instructionRef) 69 61 { 70 RefPtr<StaticPropertyAnalysis> analysis = StaticPropertyAnalysis::create( m_instructions, offsetOfInlineCapacityOperand);71 AnalysisMap::AddResult addResult = m_analyses.add(dst , analysis);62 RefPtr<StaticPropertyAnalysis> analysis = StaticPropertyAnalysis::create(WTFMove(instructionRef)); 63 AnalysisMap::AddResult addResult = m_analyses.add(dst->index(), analysis); 72 64 if (!addResult.isNewEntry) { 73 65 kill(addResult.iterator->value.get()); … … 76 68 } 77 69 78 inline void StaticPropertyAnalyzer::putById( intdst, unsigned propertyIndex)70 inline void StaticPropertyAnalyzer::putById(RegisterID* dst, unsigned propertyIndex) 79 71 { 80 StaticPropertyAnalysis* analysis = m_analyses.get(dst );72 StaticPropertyAnalysis* analysis = m_analyses.get(dst->index()); 81 73 if (!analysis) 82 74 return; … … 84 76 } 85 77 86 inline void StaticPropertyAnalyzer::mov( int dst, intsrc)78 inline void StaticPropertyAnalyzer::mov(RegisterID* dst, RegisterID* src) 87 79 { 88 RefPtr<StaticPropertyAnalysis> analysis = m_analyses.get(src );80 RefPtr<StaticPropertyAnalysis> analysis = m_analyses.get(src->index()); 89 81 if (!analysis) { 90 82 kill(dst); … … 92 84 } 93 85 94 AnalysisMap::AddResult addResult = m_analyses.add(dst , analysis);86 AnalysisMap::AddResult addResult = m_analyses.add(dst->index(), analysis); 95 87 if (!addResult.isNewEntry) { 96 88 kill(addResult.iterator->value.get()); … … 108 100 } 109 101 110 inline void StaticPropertyAnalyzer::kill( intdst)102 inline void StaticPropertyAnalyzer::kill(RegisterID* dst) 111 103 { 112 104 // We observe kills in order to avoid piling on properties to an object after … … 149 141 // properties yet. 150 142 151 AnalysisMap::iterator it = m_analyses.find(dst );143 AnalysisMap::iterator it = m_analyses.find(dst->index()); 152 144 if (it == m_analyses.end()) 153 145 return; -
trunk/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
r237486 r237547 136 136 // Helper for min and max. 137 137 template<typename ChecksFunctor> 138 bool handleMinMax( int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks);138 bool handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks); 139 139 140 140 void refineStatically(CallLinkStatus&, Node* callTarget); … … 153 153 enum Terminality { Terminal, NonTerminal }; 154 154 Terminality handleCall( 155 intresult, NodeType op, InlineCallFrame::Kind, unsigned instructionSize,155 VirtualRegister result, NodeType op, InlineCallFrame::Kind, unsigned instructionSize, 156 156 Node* callTarget, int argumentCountIncludingThis, int registerOffset, CallLinkStatus, 157 157 SpeculatedType prediction); 158 Terminality handleCall(Instruction* pc, NodeType op, CallMode); 159 Terminality handleVarargsCall(Instruction* pc, NodeType op, CallMode); 158 template<typename CallOp> 159 Terminality handleCall(const Instruction* pc, NodeType op, CallMode); 160 template<typename CallOp> 161 Terminality handleVarargsCall(const Instruction* pc, NodeType op, CallMode); 160 162 void emitFunctionChecks(CallVariant, Node* callTarget, VirtualRegister thisArgumnt); 161 163 void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis); … … 165 167 unsigned inliningCost(CallVariant, int argumentCountIncludingThis, InlineCallFrame::Kind); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1. 166 168 // Handle inlining. Return true if it succeeded, false if we need to plant a call. 167 bool handleVarargsInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind);169 bool handleVarargsInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, VirtualRegister argumentsArgument, unsigned argumentsOffset, NodeType callOp, InlineCallFrame::Kind); 168 170 unsigned getInliningBalance(const CallLinkStatus&, CodeSpecializationKind); 169 171 enum class CallOptimizationResult { OptimizedToJump, Inlined, DidNothing }; 170 CallOptimizationResult handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee);171 CallOptimizationResult handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction);172 CallOptimizationResult handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee); 173 CallOptimizationResult handleInlining(Node* callTargetNode, VirtualRegister result, const CallLinkStatus&, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, NodeType callOp, InlineCallFrame::Kind, SpeculatedType prediction); 172 174 template<typename ChecksFunctor> 173 void inlineCall(Node* callTargetNode, int resultOperand, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks);175 void inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks); 174 176 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. 175 177 template<typename ChecksFunctor> 176 bool handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);178 bool handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); 177 179 template<typename ChecksFunctor> 178 bool handleDOMJITCall(Node* callee, int resultOperand, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks);180 bool handleDOMJITCall(Node* callee, VirtualRegister result, const DOMJIT::Signature*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks); 179 181 template<typename ChecksFunctor> 180 bool handleIntrinsicGetter( int resultOperand, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks);182 bool handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& intrinsicVariant, Node* thisNode, const ChecksFunctor& insertChecks); 181 183 template<typename ChecksFunctor> 182 bool handleTypedArrayConstructor( int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks);184 bool handleTypedArrayConstructor(VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType, const ChecksFunctor& insertChecks); 183 185 template<typename ChecksFunctor> 184 bool handleConstantInternalFunction(Node* callTargetNode, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks);186 bool handleConstantInternalFunction(Node* callTargetNode, VirtualRegister result, InternalFunction*, int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind, SpeculatedType, const ChecksFunctor& insertChecks); 185 187 Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, const InferredType::Descriptor&, Node* value); 186 188 Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset, const InferredType::Descriptor&, NodeType = GetByOffset); 187 bool handleDOMJITGetter(int resultOperand, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction); 188 bool handleModuleNamespaceLoad(int resultOperand, SpeculatedType, Node* base, GetByIdStatus); 189 bool handleDOMJITGetter(VirtualRegister result, const GetByIdVariant&, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction); 190 bool handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType, Node* base, GetByIdStatus); 191 192 template<typename Bytecode> 193 void handlePutByVal(Bytecode, unsigned instructionSize); 194 template <typename Bytecode> 195 void handlePutAccessorById(NodeType, Bytecode); 196 template <typename Bytecode> 197 void handlePutAccessorByVal(NodeType, Bytecode); 198 template <typename Bytecode> 199 void handleNewFunc(NodeType, Bytecode); 200 template <typename Bytecode> 201 void handleNewFuncExp(NodeType, Bytecode); 189 202 190 203 // Create a presence ObjectPropertyCondition based on some known offset and structure set. Does not … … 205 218 Node* store(Node* base, unsigned identifier, const PutByIdVariant&, Node* value); 206 219 220 template<typename Op> 221 void parseGetById(const Instruction*); 207 222 void handleGetById( 208 int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize);223 VirtualRegister destination, SpeculatedType, Node* base, unsigned identifierNumber, GetByIdStatus, AccessType, unsigned instructionSize); 209 224 void emitPutById( 210 225 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect); 211 226 void handlePutById( 212 227 Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, 213 bool isDirect );228 bool isDirect, unsigned intructionSize); 214 229 215 230 // Either register a watchpoint or emit a check for this condition. Returns false if the … … 788 803 789 804 Node* addCall( 790 intresult, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset,805 VirtualRegister result, NodeType op, const DOMJIT::Signature* signature, Node* callee, int argCount, int registerOffset, 791 806 SpeculatedType prediction) 792 807 { … … 800 815 Node* call = addCallWithoutSettingResult( 801 816 op, OpInfo(signature), callee, argCount, registerOffset, OpInfo(prediction)); 802 VirtualRegister resultReg(result); 803 if (resultReg.isValid()) 804 set(resultReg, call); 817 if (result.isValid()) 818 set(result, call); 805 819 return call; 806 820 } … … 834 848 // inlined tail call frames, we use SpecFullTop 835 849 // to avoid a spurious OSR exit. 836 Instruction* instruction = &m_inlineStackTop->m_profiledBlock->instructions()[bytecodeIndex];837 OpcodeID opcodeID = Interpreter::getOpcodeID(instruction->u.opcode);850 auto instruction = m_inlineStackTop->m_profiledBlock->instructions().at(bytecodeIndex); 851 OpcodeID opcodeID = instruction->opcodeID(); 838 852 839 853 switch (opcodeID) { … … 894 908 } 895 909 896 ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action) 910 ArrayMode getArrayMode(Array::Action action) 911 { 912 CodeBlock* codeBlock = m_inlineStackTop->m_profiledBlock; 913 ArrayProfile* profile = codeBlock->getArrayProfile(codeBlock->bytecodeOffset(m_currentInstruction)); 914 return getArrayMode(*profile, action); 915 } 916 917 ArrayMode getArrayMode(ArrayProfile& profile, Array::Action action) 897 918 { 898 919 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); 899 profile ->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);900 bool makeSafe = profile ->outOfBounds(locker);901 return ArrayMode::fromObserved(locker, profile, action, makeSafe);920 profile.computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); 921 bool makeSafe = profile.outOfBounds(locker); 922 return ArrayMode::fromObserved(locker, &profile, action, makeSafe); 902 923 } 903 924 … … 1147 1168 Vector<DelayedSetLocal, 2> m_setLocalQueue; 1148 1169 1149 Instruction* m_currentInstruction;1170 const Instruction* m_currentInstruction; 1150 1171 bool m_hasDebuggerEnabled; 1151 1172 bool m_hasAnyForceOSRExits { false }; … … 1198 1219 } 1199 1220 1200 ByteCodeParser::Terminality ByteCodeParser::handleCall(Instruction* pc, NodeType op, CallMode callMode) 1221 template<typename CallOp> 1222 ByteCodeParser::Terminality ByteCodeParser::handleCall(const Instruction* pc, NodeType op, CallMode callMode) 1201 1223 { 1202 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), 1203 "op_call, op_tail_call and op_construct should always have the same length"); 1204 static_assert(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), 1205 "op_call, op_tail_call and op_construct should always have the same length"); 1206 1207 int result = pc[1].u.operand; 1208 Node* callTarget = get(VirtualRegister(pc[2].u.operand)); 1209 int argumentCountIncludingThis = pc[3].u.operand; 1210 int registerOffset = -pc[4].u.operand; 1224 auto bytecode = pc->as<CallOp>(); 1225 Node* callTarget = get(bytecode.callee); 1226 int registerOffset = -static_cast<int>(bytecode.argv); 1211 1227 1212 1228 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( … … 1216 1232 InlineCallFrame::Kind kind = InlineCallFrame::kindFor(callMode); 1217 1233 1218 return handleCall( result, op, kind, OPCODE_LENGTH(op_call), callTarget,1219 argumentCountIncludingThis, registerOffset, callLinkStatus, getPrediction());1234 return handleCall(bytecode.dst, op, kind, pc->size(), callTarget, 1235 bytecode.argc, registerOffset, callLinkStatus, getPrediction()); 1220 1236 } 1221 1237 … … 1227 1243 1228 1244 ByteCodeParser::Terminality ByteCodeParser::handleCall( 1229 intresult, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize,1245 VirtualRegister result, NodeType op, InlineCallFrame::Kind kind, unsigned instructionSize, 1230 1246 Node* callTarget, int argumentCountIncludingThis, int registerOffset, 1231 1247 CallLinkStatus callLinkStatus, SpeculatedType prediction) … … 1259 1275 } 1260 1276 1261 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(Instruction* pc, NodeType op, CallMode callMode) 1277 template<typename CallOp> 1278 ByteCodeParser::Terminality ByteCodeParser::handleVarargsCall(const Instruction* pc, NodeType op, CallMode callMode) 1262 1279 { 1263 static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_construct_varargs), 1264 "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length"); 1265 static_assert(OPCODE_LENGTH(op_call_varargs) == OPCODE_LENGTH(op_tail_call_varargs), 1266 "op_call_varargs, op_tail_call_varargs and op_construct_varargs should always have the same length"); 1267 1268 int result = pc[1].u.operand; 1269 int callee = pc[2].u.operand; 1270 int thisReg = pc[3].u.operand; 1271 int arguments = pc[4].u.operand; 1272 int firstFreeReg = pc[5].u.operand; 1273 int firstVarArgOffset = pc[6].u.operand; 1280 auto bytecode = pc->as<CallOp>(); 1281 int firstFreeReg = bytecode.firstFree.offset(); 1282 int firstVarArgOffset = bytecode.firstVarArg; 1274 1283 1275 1284 SpeculatedType prediction = getPrediction(); 1276 1285 1277 Node* callTarget = get( VirtualRegister(callee));1286 Node* callTarget = get(bytecode.callee); 1278 1287 1279 1288 CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( … … 1287 1296 addToGraph(FilterCallLinkStatus, OpInfo(m_graph.m_plan.recordedStatuses().addCallLinkStatus(currentCodeOrigin(), callLinkStatus)), callTarget); 1288 1297 1289 if (handleVarargsInlining(callTarget, result,1290 callLinkStatus, firstFreeReg, VirtualRegister(thisReg), VirtualRegister(arguments),1298 if (handleVarargsInlining(callTarget, bytecode.dst, 1299 callLinkStatus, firstFreeReg, bytecode.thisValue, bytecode.arguments, 1291 1300 firstVarArgOffset, op, 1292 1301 InlineCallFrame::varargsKindFor(callMode))) { … … 1300 1309 data->firstVarArgOffset = firstVarArgOffset; 1301 1310 1302 Node* thisChild = get( VirtualRegister(thisReg));1311 Node* thisChild = get(bytecode.thisValue); 1303 1312 Node* argumentsChild = nullptr; 1304 1313 if (op != TailCallForwardVarargs) 1305 argumentsChild = get( VirtualRegister(arguments));1314 argumentsChild = get(bytecode.arguments); 1306 1315 1307 1316 if (op == TailCallVarargs || op == TailCallForwardVarargs) { … … 1314 1323 1315 1324 Node* call = addToGraph(op, OpInfo(data), OpInfo(prediction), callTarget, thisChild, argumentsChild); 1316 VirtualRegister resultReg(result); 1317 if (resultReg.isValid()) 1318 set(resultReg, call); 1325 if (bytecode.dst.isValid()) 1326 set(bytecode.dst, call); 1319 1327 return NonTerminal; 1320 1328 } … … 1425 1433 auto oldStackTop = m_inlineStackTop; 1426 1434 m_inlineStackTop = stackEntry; 1427 m_currentIndex = OPCODE_LENGTH(op_enter);1435 m_currentIndex = opcodeLengths[op_enter]; 1428 1436 m_exitOK = true; 1429 1437 processSetLocalQueue(); … … 1432 1440 m_exitOK = false; 1433 1441 1434 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), OPCODE_LENGTH(op_enter), getBytecodeBeginForBlock);1442 BasicBlock** entryBlockPtr = tryBinarySearch<BasicBlock*, unsigned>(stackEntry->m_blockLinkingTargets, stackEntry->m_blockLinkingTargets.size(), opcodeLengths[op_enter], getBytecodeBeginForBlock); 1435 1443 RELEASE_ASSERT(entryBlockPtr); 1436 1444 addJumpTo(*entryBlockPtr); … … 1542 1550 1543 1551 template<typename ChecksFunctor> 1544 void ByteCodeParser::inlineCall(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks)1552 void ByteCodeParser::inlineCall(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, int argumentCountIncludingThis, InlineCallFrame::Kind kind, BasicBlock* continuationBlock, const ChecksFunctor& insertChecks) 1545 1553 { 1546 Instruction* savedCurrentInstruction = m_currentInstruction;1554 const Instruction* savedCurrentInstruction = m_currentInstruction; 1547 1555 CodeSpecializationKind specializationKind = InlineCallFrame::specializationKindFor(kind); 1548 1556 … … 1575 1583 size_t argumentPositionStart = m_graph.m_argumentPositions.size(); 1576 1584 1577 VirtualRegister resultReg(resultOperand); 1578 if (resultReg.isValid()) 1579 resultReg = m_inlineStackTop->remapOperand(resultReg); 1585 if (result.isValid()) 1586 result = m_inlineStackTop->remapOperand(result); 1580 1587 1581 1588 VariableAccessData* calleeVariable = nullptr; … … 1638 1645 } 1639 1646 1640 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result Reg,1647 InlineStackEntry inlineStackEntry(this, codeBlock, codeBlock, callee.function(), result, 1641 1648 (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind, continuationBlock); 1642 1649 … … 1686 1693 } 1687 1694 1688 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, int resultOperand, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee)1695 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleCallVariant(Node* callTargetNode, VirtualRegister result, CallVariant callee, int registerOffset, VirtualRegister thisArgument, int argumentCountIncludingThis, unsigned nextOffset, InlineCallFrame::Kind kind, SpeculatedType prediction, unsigned& inliningBalance, BasicBlock* continuationBlock, bool needsToCheckCallee) 1689 1696 { 1690 1697 VERBOSE_LOG(" Considering callee ", callee, "\n"); … … 1722 1729 1723 1730 if (InternalFunction* function = callee.internalFunction()) { 1724 if (handleConstantInternalFunction(callTargetNode, result Operand, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) {1731 if (handleConstantInternalFunction(callTargetNode, result, function, registerOffset, argumentCountIncludingThis, specializationKind, prediction, insertChecksWithAccounting)) { 1725 1732 endSpecialCase(); 1726 1733 return CallOptimizationResult::Inlined; … … 1732 1739 Intrinsic intrinsic = callee.intrinsicFor(specializationKind); 1733 1740 if (intrinsic != NoIntrinsic) { 1734 if (handleIntrinsicCall(callTargetNode, result Operand, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {1741 if (handleIntrinsicCall(callTargetNode, result, intrinsic, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { 1735 1742 endSpecialCase(); 1736 1743 return CallOptimizationResult::Inlined; … … 1742 1749 if (Options::useDOMJIT()) { 1743 1750 if (const DOMJIT::Signature* signature = callee.signatureFor(specializationKind)) { 1744 if (handleDOMJITCall(callTargetNode, result Operand, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) {1751 if (handleDOMJITCall(callTargetNode, result, signature, registerOffset, argumentCountIncludingThis, prediction, insertChecksWithAccounting)) { 1745 1752 endSpecialCase(); 1746 1753 return CallOptimizationResult::Inlined; … … 1758 1765 emitFunctionChecks(callee, callTargetNode, thisArgument); 1759 1766 }; 1760 inlineCall(callTargetNode, result Operand, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck);1767 inlineCall(callTargetNode, result, callee, registerOffset, argumentCountIncludingThis, kind, continuationBlock, insertCheck); 1761 1768 inliningBalance -= myInliningCost; 1762 1769 return CallOptimizationResult::Inlined; 1763 1770 } 1764 1771 1765 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, int resultOperand,1772 bool ByteCodeParser::handleVarargsInlining(Node* callTargetNode, VirtualRegister result, 1766 1773 const CallLinkStatus& callLinkStatus, int firstFreeReg, VirtualRegister thisArgument, 1767 1774 VirtualRegister argumentsArgument, unsigned argumentsOffset, … … 1875 1882 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without 1876 1883 // calling LoadVarargs twice. 1877 inlineCall(callTargetNode, result Operand, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks);1884 inlineCall(callTargetNode, result, callVariant, registerOffset, maxNumArguments, kind, nullptr, insertChecks); 1878 1885 1879 1886 VERBOSE_LOG("Successful inlining (varargs, monomorphic).\nStack: ", currentCodeOrigin(), "\n"); … … 1892 1899 1893 1900 ByteCodeParser::CallOptimizationResult ByteCodeParser::handleInlining( 1894 Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus,1901 Node* callTargetNode, VirtualRegister result, const CallLinkStatus& callLinkStatus, 1895 1902 int registerOffset, VirtualRegister thisArgument, 1896 1903 int argumentCountIncludingThis, … … 1907 1914 if (!callLinkStatus.couldTakeSlowPath() && callLinkStatus.size() == 1) { 1908 1915 return handleCallVariant( 1909 callTargetNode, result Operand, callLinkStatus[0], registerOffset, thisArgument,1916 callTargetNode, result, callLinkStatus[0], registerOffset, thisArgument, 1910 1917 argumentCountIncludingThis, nextOffset, kind, prediction, inliningBalance, nullptr, true); 1911 1918 } … … 1997 2004 1998 2005 auto inliningResult = handleCallVariant( 1999 myCallTargetNode, result Operand, callLinkStatus[i], registerOffset,2006 myCallTargetNode, result, callLinkStatus[i], registerOffset, 2000 2007 thisArgument, argumentCountIncludingThis, nextOffset, kind, prediction, 2001 2008 inliningBalance, continuationBlock, false); … … 2034 2041 if (couldTakeSlowPath) { 2035 2042 addCall( 2036 result Operand, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis,2043 result, callOp, nullptr, myCallTargetNode, argumentCountIncludingThis, 2037 2044 registerOffset, prediction); 2038 2045 VERBOSE_LOG("We added a call in the slow path\n"); … … 2042 2049 emitArgumentPhantoms(registerOffset, argumentCountIncludingThis); 2043 2050 2044 set( VirtualRegister(resultOperand), addToGraph(BottomValue));2051 set(result, addToGraph(BottomValue)); 2045 2052 VERBOSE_LOG("couldTakeSlowPath was false\n"); 2046 2053 } … … 2067 2074 2068 2075 template<typename ChecksFunctor> 2069 bool ByteCodeParser::handleMinMax( int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks)2076 bool ByteCodeParser::handleMinMax(VirtualRegister result, NodeType op, int registerOffset, int argumentCountIncludingThis, const ChecksFunctor& insertChecks) 2070 2077 { 2071 2078 ASSERT(op == ArithMin || op == ArithMax); … … 2073 2080 if (argumentCountIncludingThis == 1) { 2074 2081 insertChecks(); 2075 double result = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity();2076 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(result)))));2082 double limit = op == ArithMax ? -std::numeric_limits<double>::infinity() : +std::numeric_limits<double>::infinity(); 2083 set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsDoubleNumber(limit))))); 2077 2084 return true; 2078 2085 } … … 2080 2087 if (argumentCountIncludingThis == 2) { 2081 2088 insertChecks(); 2082 Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset)));2083 addToGraph(Phantom, Edge(result , NumberUse));2084 set( VirtualRegister(resultOperand), result);2089 Node* resultNode = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); 2090 addToGraph(Phantom, Edge(resultNode, NumberUse)); 2091 set(result, resultNode); 2085 2092 return true; 2086 2093 } … … 2088 2095 if (argumentCountIncludingThis == 3) { 2089 2096 insertChecks(); 2090 set( VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));2097 set(result, addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); 2091 2098 return true; 2092 2099 } … … 2097 2104 2098 2105 template<typename ChecksFunctor> 2099 bool ByteCodeParser::handleIntrinsicCall(Node* callee, int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)2106 bool ByteCodeParser::handleIntrinsicCall(Node* callee, VirtualRegister result, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) 2100 2107 { 2101 2108 VERBOSE_LOG(" The intrinsic is ", intrinsic, "\n"); … … 2110 2117 // 2111 2118 // Which is extremely amusing, but probably not worth optimizing. 2112 if (! VirtualRegister(resultOperand).isValid())2119 if (!result.isValid()) 2113 2120 return false; 2114 2121 … … 2120 2127 if (argumentCountIncludingThis == 1) { // Math.abs() 2121 2128 insertChecks(); 2122 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));2129 set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); 2123 2130 return true; 2124 2131 } … … 2131 2138 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) 2132 2139 node->mergeFlags(NodeMayOverflowInt32InDFG); 2133 set( VirtualRegister(resultOperand), node);2140 set(result, node); 2134 2141 return true; 2135 2142 } 2136 2143 2137 2144 case MinIntrinsic: 2138 return handleMinMax(result Operand, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks);2145 return handleMinMax(result, ArithMin, registerOffset, argumentCountIncludingThis, insertChecks); 2139 2146 2140 2147 case MaxIntrinsic: 2141 return handleMinMax(result Operand, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks);2148 return handleMinMax(result, ArithMax, registerOffset, argumentCountIncludingThis, insertChecks); 2142 2149 2143 2150 #define DFG_ARITH_UNARY(capitalizedName, lowerName) \ … … 2148 2155 if (argumentCountIncludingThis == 1) { 2149 2156 insertChecks(); 2150 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));2157 set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); 2151 2158 return true; 2152 2159 } … … 2163 2170 } 2164 2171 insertChecks(); 2165 set( VirtualRegister(resultOperand), addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset))));2172 set(result, addToGraph(ArithUnary, OpInfo(static_cast<std::underlying_type<Arith::UnaryType>::type>(type)), get(virtualRegisterForArgument(1, registerOffset)))); 2166 2173 return true; 2167 2174 } … … 2171 2178 if (argumentCountIncludingThis == 1) { 2172 2179 insertChecks(); 2173 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));2180 set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); 2174 2181 return true; 2175 2182 } … … 2187 2194 } 2188 2195 insertChecks(); 2189 set( VirtualRegister(resultOperand), addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset))));2196 set(result, addToGraph(nodeType, get(virtualRegisterForArgument(1, registerOffset)))); 2190 2197 return true; 2191 2198 } … … 2195 2202 // Math.pow() and Math.pow(x) return NaN. 2196 2203 insertChecks(); 2197 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));2204 set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); 2198 2205 return true; 2199 2206 } … … 2201 2208 VirtualRegister xOperand = virtualRegisterForArgument(1, registerOffset); 2202 2209 VirtualRegister yOperand = virtualRegisterForArgument(2, registerOffset); 2203 set( VirtualRegister(resultOperand), addToGraph(ArithPow, get(xOperand), get(yOperand)));2210 set(result, addToGraph(ArithPow, get(xOperand), get(yOperand))); 2204 2211 return true; 2205 2212 } … … 2215 2222 if (static_cast<unsigned>(argumentCountIncludingThis) >= MIN_SPARSE_ARRAY_INDEX) 2216 2223 return false; 2217 ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpCallShape>(m_currentInstruction), Array::Write); 2224 2225 ArrayMode arrayMode = getArrayMode(Array::Write); 2218 2226 if (!arrayMode.isJSArray()) 2219 2227 return false; … … 2229 2237 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); 2230 2238 Node* arrayPush = addToGraph(Node::VarArg, ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction)); 2231 set( VirtualRegister(resultOperand), arrayPush);2239 set(result, arrayPush); 2232 2240 2233 2241 return true; … … 2252 2260 || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) 2253 2261 return false; 2254 ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpCallShape>(m_currentInstruction), Array::Read); 2262 2263 ArrayMode arrayMode = getArrayMode(Array::Read); 2255 2264 if (!arrayMode.isJSArray()) 2256 2265 return false; … … 2317 2326 2318 2327 Node* arraySlice = addToGraph(Node::VarArg, ArraySlice, OpInfo(), OpInfo()); 2319 set( VirtualRegister(resultOperand), arraySlice);2328 set(result, arraySlice); 2320 2329 return true; 2321 2330 } … … 2341 2350 return false; 2342 2351 2343 ArrayMode arrayMode = getArrayMode( arrayProfileFor<OpCallShape>(m_currentInstruction),Array::Read);2352 ArrayMode arrayMode = getArrayMode(Array::Read); 2344 2353 if (!arrayMode.isJSArray()) 2345 2354 return false; … … 2380 2389 2381 2390 Node* node = addToGraph(Node::VarArg, ArrayIndexOf, OpInfo(arrayMode.asWord()), OpInfo()); 2382 set( VirtualRegister(resultOperand), node);2391 set(result, node); 2383 2392 return true; 2384 2393 } … … 2398 2407 if (argumentCountIncludingThis != 1) 2399 2408 return false; 2400 ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpCallShape>(m_currentInstruction), Array::Write); 2409 2410 ArrayMode arrayMode = getArrayMode(Array::Write); 2401 2411 if (!arrayMode.isJSArray()) 2402 2412 return false; … … 2408 2418 insertChecks(); 2409 2419 Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset))); 2410 set( VirtualRegister(resultOperand), arrayPop);2420 set(result, arrayPop); 2411 2421 return true; 2412 2422 } … … 2491 2501 args.append(get(virtualRegisterForArgument(1 + i, registerOffset))); 2492 2502 2493 Node* result ;2503 Node* resultNode; 2494 2504 if (numArgs + 1 <= 3) { 2495 2505 while (args.size() < 3) 2496 2506 args.append(nullptr); 2497 result = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]);2507 resultNode = addToGraph(op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction), args[0], args[1], args[2]); 2498 2508 } else { 2499 2509 for (Node* node : args) 2500 2510 addVarArgChild(node); 2501 2511 addVarArgChild(nullptr); 2502 result = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction));2503 } 2504 2505 set( VirtualRegister(resultOperand), result);2512 resultNode = addToGraph(Node::VarArg, op, OpInfo(ArrayMode(Array::SelectUsingPredictions, action).asWord()), OpInfo(prediction)); 2513 } 2514 2515 set(result, resultNode); 2506 2516 return true; 2507 2517 } … … 2524 2534 parseInt = addToGraph(ParseInt, OpInfo(), OpInfo(prediction), get(valueOperand), get(radixOperand)); 2525 2535 } 2526 set( VirtualRegister(resultOperand), parseInt);2536 set(result, parseInt); 2527 2537 return true; 2528 2538 } … … 2537 2547 Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); 2538 2548 2539 set( VirtualRegister(resultOperand), charCode);2549 set(result, charCode); 2540 2550 return true; 2541 2551 } … … 2550 2560 Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String, Array::Read).asWord()), get(thisOperand), get(indexOperand)); 2551 2561 2552 set( VirtualRegister(resultOperand), charCode);2562 set(result, charCode); 2553 2563 return true; 2554 2564 } … … 2556 2566 insertChecks(); 2557 2567 if (argumentCountIncludingThis == 1) 2558 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32)))));2568 set(result, addToGraph(JSConstant, OpInfo(m_graph.freeze(jsNumber(32))))); 2559 2569 else { 2560 2570 Node* operand = get(virtualRegisterForArgument(1, registerOffset)); 2561 set( VirtualRegister(resultOperand), addToGraph(ArithClz32, operand));2571 set(result, addToGraph(ArithClz32, operand)); 2562 2572 } 2563 2573 return true; … … 2571 2581 Node* charCode = addToGraph(StringFromCharCode, get(indexOperand)); 2572 2582 2573 set( VirtualRegister(resultOperand), charCode);2583 set(result, charCode); 2574 2584 2575 2585 return true; … … 2582 2592 insertChecks(); 2583 2593 Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); 2584 set( VirtualRegister(resultOperand), regExpExec);2594 set(result, regExpExec); 2585 2595 2586 2596 return true; … … 2633 2643 Node* regExpObject = get(virtualRegisterForArgument(0, registerOffset)); 2634 2644 Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), regExpObject, get(virtualRegisterForArgument(1, registerOffset))); 2635 set( VirtualRegister(resultOperand), regExpExec);2645 set(result, regExpExec); 2636 2646 2637 2647 return true; … … 2643 2653 insertChecks(); 2644 2654 Node* regExpMatch = addToGraph(RegExpMatchFast, OpInfo(0), OpInfo(prediction), addToGraph(GetGlobalObject, callee), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); 2645 set( VirtualRegister(resultOperand), regExpMatch);2655 set(result, regExpMatch); 2646 2656 return true; 2647 2657 } … … 2652 2662 2653 2663 insertChecks(); 2654 set( VirtualRegister(resultOperand), addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset))));2664 set(result, addToGraph(ObjectCreate, get(virtualRegisterForArgument(1, registerOffset)))); 2655 2665 return true; 2656 2666 } … … 2661 2671 2662 2672 insertChecks(); 2663 set( VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));2673 set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); 2664 2674 return true; 2665 2675 } … … 2670 2680 2671 2681 insertChecks(); 2672 set( VirtualRegister(resultOperand), addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))));2682 set(result, addToGraph(SameValue, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); 2673 2683 return true; 2674 2684 } … … 2679 2689 2680 2690 insertChecks(); 2681 set( VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse)));2691 set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), Edge(get(virtualRegisterForArgument(1, registerOffset)), ObjectUse))); 2682 2692 return true; 2683 2693 } … … 2687 2697 2688 2698 insertChecks(); 2689 set( VirtualRegister(resultOperand), addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));2699 set(result, addToGraph(IsTypedArrayView, OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); 2690 2700 return true; 2691 2701 } … … 2694 2704 insertChecks(); 2695 2705 Node* value = get(virtualRegisterForArgument(0, registerOffset)); 2696 set( VirtualRegister(resultOperand), addToGraph(StringValueOf, value));2706 set(result, addToGraph(StringValueOf, value)); 2697 2707 return true; 2698 2708 } … … 2745 2755 insertChecks(); 2746 2756 2747 Node* result = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));2748 set( VirtualRegister(resultOperand), result);2757 Node* resultNode = addToGraph(StringReplace, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); 2758 set(result, resultNode); 2749 2759 return true; 2750 2760 } … … 2755 2765 2756 2766 insertChecks(); 2757 Node* result = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)));2758 set( VirtualRegister(resultOperand), result);2767 Node* resultNode = addToGraph(StringReplaceRegExp, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset))); 2768 set(result, resultNode); 2759 2769 return true; 2760 2770 } … … 2766 2776 if (argumentCountIncludingThis == 1) { 2767 2777 insertChecks(); 2768 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantNaN)));2778 set(result, addToGraph(JSConstant, OpInfo(m_constantNaN))); 2769 2779 return true; 2770 2780 } … … 2783 2793 } 2784 2794 Node* roundNode = addToGraph(op, OpInfo(0), OpInfo(prediction), operand); 2785 set( VirtualRegister(resultOperand), roundNode);2795 set(result, roundNode); 2786 2796 return true; 2787 2797 } … … 2794 2804 Node* left = get(leftOperand); 2795 2805 Node* right = get(rightOperand); 2796 set( VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right));2806 set(result, addToGraph(ArithIMul, left, right)); 2797 2807 return true; 2798 2808 } … … 2802 2812 return false; 2803 2813 insertChecks(); 2804 set( VirtualRegister(resultOperand), addToGraph(ArithRandom));2814 set(result, addToGraph(ArithRandom)); 2805 2815 return true; 2806 2816 } … … 2808 2818 case DFGTrueIntrinsic: { 2809 2819 insertChecks(); 2810 set( VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));2820 set(result, jsConstant(jsBoolean(true))); 2811 2821 return true; 2812 2822 } … … 2814 2824 case FTLTrueIntrinsic: { 2815 2825 insertChecks(); 2816 set( VirtualRegister(resultOperand), jsConstant(jsBoolean(m_graph.m_plan.isFTL())));2826 set(result, jsConstant(jsBoolean(m_graph.m_plan.isFTL()))); 2817 2827 return true; 2818 2828 } … … 2821 2831 insertChecks(); 2822 2832 addToGraph(ForceOSRExit); 2823 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));2833 set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined))); 2824 2834 return true; 2825 2835 } … … 2827 2837 case IsFinalTierIntrinsic: { 2828 2838 insertChecks(); 2829 set( VirtualRegister(resultOperand),2839 set(result, 2830 2840 jsConstant(jsBoolean(Options::useFTLJIT() ? m_graph.m_plan.isFTL() : true))); 2831 2841 return true; … … 2839 2849 node->setHeapPrediction(SpecInt32Only); 2840 2850 } 2841 set( VirtualRegister(resultOperand), addToGraph(JSConstant, OpInfo(m_constantUndefined)));2851 set(result, addToGraph(JSConstant, OpInfo(m_constantUndefined))); 2842 2852 return true; 2843 2853 } … … 2849 2859 addToGraph(Phantom, Edge(node, Int32Use)); 2850 2860 } 2851 set( VirtualRegister(resultOperand), jsConstant(jsBoolean(true)));2861 set(result, jsConstant(jsBoolean(true))); 2852 2862 return true; 2853 2863 } … … 2859 2869 VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); 2860 2870 if (enableInt52()) 2861 set( VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand)));2871 set(result, addToGraph(FiatInt52, get(operand))); 2862 2872 else 2863 set( VirtualRegister(resultOperand), get(operand));2873 set(result, get(operand)); 2864 2874 return true; 2865 2875 } … … 2875 2885 Node* hash = addToGraph(MapHash, normalizedKey); 2876 2886 Node* bucket = addToGraph(GetMapBucket, Edge(map, MapObjectUse), Edge(normalizedKey), Edge(hash)); 2877 Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);2878 set( VirtualRegister(resultOperand), result);2887 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); 2888 set(result, resultNode); 2879 2889 return true; 2880 2890 } … … 2900 2910 FrozenValue* frozenPointer = m_graph.freeze(sentinel); 2901 2911 Node* invertedResult = addToGraph(CompareEqPtr, OpInfo(frozenPointer), bucket); 2902 Node* result = addToGraph(LogicalNot, invertedResult);2903 set( VirtualRegister(resultOperand), result);2912 Node* resultNode = addToGraph(LogicalNot, invertedResult); 2913 set(result, resultNode); 2904 2914 return true; 2905 2915 } … … 2915 2925 Node* hash = addToGraph(MapHash, normalizedKey); 2916 2926 addToGraph(SetAdd, base, normalizedKey, hash); 2917 set( VirtualRegister(resultOperand), base);2927 set(result, base); 2918 2928 return true; 2919 2929 } … … 2936 2946 addVarArgChild(hash); 2937 2947 addToGraph(Node::VarArg, MapSet, OpInfo(0), OpInfo(0)); 2938 set( VirtualRegister(resultOperand), base);2948 set(result, base); 2939 2949 return true; 2940 2950 } … … 2947 2957 Node* map = get(virtualRegisterForArgument(1, registerOffset)); 2948 2958 UseKind useKind = intrinsic == JSSetBucketHeadIntrinsic ? SetObjectUse : MapObjectUse; 2949 Node* result = addToGraph(GetMapBucketHead, Edge(map, useKind));2950 set( VirtualRegister(resultOperand), result);2959 Node* resultNode = addToGraph(GetMapBucketHead, Edge(map, useKind)); 2960 set(result, resultNode); 2951 2961 return true; 2952 2962 } … … 2959 2969 Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); 2960 2970 BucketOwnerType type = intrinsic == JSSetBucketNextIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; 2961 Node* result = addToGraph(GetMapBucketNext, OpInfo(type), bucket);2962 set( VirtualRegister(resultOperand), result);2971 Node* resultNode = addToGraph(GetMapBucketNext, OpInfo(type), bucket); 2972 set(result, resultNode); 2963 2973 return true; 2964 2974 } … … 2971 2981 Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); 2972 2982 BucketOwnerType type = intrinsic == JSSetBucketKeyIntrinsic ? BucketOwnerType::Set : BucketOwnerType::Map; 2973 Node* result = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket);2974 set( VirtualRegister(resultOperand), result);2983 Node* resultNode = addToGraph(LoadKeyFromMapBucket, OpInfo(type), OpInfo(prediction), bucket); 2984 set(result, resultNode); 2975 2985 return true; 2976 2986 } … … 2981 2991 insertChecks(); 2982 2992 Node* bucket = get(virtualRegisterForArgument(1, registerOffset)); 2983 Node* result = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket);2984 set( VirtualRegister(resultOperand), result);2993 Node* resultNode = addToGraph(LoadValueFromMapBucket, OpInfo(BucketOwnerType::Map), OpInfo(prediction), bucket); 2994 set(result, resultNode); 2985 2995 return true; 2986 2996 } … … 2999 3009 Node* hash = addToGraph(MapHash, key); 3000 3010 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); 3001 Node* result = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder);3002 3003 set( VirtualRegister(resultOperand), result);3011 Node* resultNode = addToGraph(ExtractValueFromWeakMapGet, OpInfo(), OpInfo(prediction), holder); 3012 3013 set(result, resultNode); 3004 3014 return true; 3005 3015 } … … 3019 3029 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakMapObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); 3020 3030 Node* invertedResult = addToGraph(IsEmpty, holder); 3021 Node* result = addToGraph(LogicalNot, invertedResult);3022 3023 set( VirtualRegister(resultOperand), result);3031 Node* resultNode = addToGraph(LogicalNot, invertedResult); 3032 3033 set(result, resultNode); 3024 3034 return true; 3025 3035 } … … 3039 3049 Node* holder = addToGraph(WeakMapGet, Edge(map, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); 3040 3050 Node* invertedResult = addToGraph(IsEmpty, holder); 3041 Node* result = addToGraph(LogicalNot, invertedResult);3042 3043 set( VirtualRegister(resultOperand), result);3051 Node* resultNode = addToGraph(LogicalNot, invertedResult); 3052 3053 set(result, resultNode); 3044 3054 return true; 3045 3055 } … … 3058 3068 Node* hash = addToGraph(MapHash, key); 3059 3069 addToGraph(WeakSetAdd, Edge(base, WeakSetObjectUse), Edge(key, ObjectUse), Edge(hash, Int32Use)); 3060 set( VirtualRegister(resultOperand), base);3070 set(result, base); 3061 3071 return true; 3062 3072 } … … 3082 3092 addVarArgChild(Edge(hash, Int32Use)); 3083 3093 addToGraph(Node::VarArg, WeakMapSet, OpInfo(0), OpInfo(0)); 3084 set( VirtualRegister(resultOperand), base);3094 set(result, base); 3085 3095 return true; 3086 3096 } … … 3167 3177 data.byteSize = byteSize; 3168 3178 3169 set(VirtualRegister(result Operand),3179 set(VirtualRegister(result), 3170 3180 addToGraph(op, OpInfo(data.asQuadWord), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset)), littleEndianChild)); 3171 3181 return true; … … 3275 3285 Node* object = get(virtualRegisterForArgument(0, registerOffset)); 3276 3286 Node* key = get(virtualRegisterForArgument(1, registerOffset)); 3277 Node* result = addToGraph(HasOwnProperty, object, key);3278 set( VirtualRegister(resultOperand), result);3287 Node* resultNode = addToGraph(HasOwnProperty, object, key); 3288 set(result, resultNode); 3279 3289 return true; 3280 3290 } … … 3293 3303 if (argumentCountIncludingThis > 2) 3294 3304 end = get(virtualRegisterForArgument(2, registerOffset)); 3295 Node* result = addToGraph(StringSlice, thisString, start, end);3296 set( VirtualRegister(resultOperand), result);3305 Node* resultNode = addToGraph(StringSlice, thisString, start, end); 3306 set(result, resultNode); 3297 3307 return true; 3298 3308 } … … 3307 3317 insertChecks(); 3308 3318 Node* thisString = get(virtualRegisterForArgument(0, registerOffset)); 3309 Node* result = addToGraph(ToLowerCase, thisString);3310 set( VirtualRegister(resultOperand), result);3319 Node* resultNode = addToGraph(ToLowerCase, thisString); 3320 set(result, resultNode); 3311 3321 return true; 3312 3322 } … … 3322 3332 Node* thisNumber = get(virtualRegisterForArgument(0, registerOffset)); 3323 3333 if (argumentCountIncludingThis == 1) { 3324 Node* result = addToGraph(ToString, thisNumber);3325 set( VirtualRegister(resultOperand), result);3334 Node* resultNode = addToGraph(ToString, thisNumber); 3335 set(result, resultNode); 3326 3336 } else { 3327 3337 Node* radix = get(virtualRegisterForArgument(1, registerOffset)); 3328 Node* result = addToGraph(NumberToStringWithRadix, thisNumber, radix);3329 set( VirtualRegister(resultOperand), result);3338 Node* resultNode = addToGraph(NumberToStringWithRadix, thisNumber, radix); 3339 set(result, resultNode); 3330 3340 } 3331 3341 return true; … … 3338 3348 insertChecks(); 3339 3349 Node* input = get(virtualRegisterForArgument(1, registerOffset)); 3340 Node* result = addToGraph(NumberIsInteger, input);3341 set( VirtualRegister(resultOperand), result);3350 Node* resultNode = addToGraph(NumberIsInteger, input); 3351 set(result, resultNode); 3342 3352 return true; 3343 3353 } … … 3351 3361 return false; 3352 3362 insertChecks(); 3353 set( VirtualRegister(resultOperand),3363 set(result, 3354 3364 addToGraph(CPUIntrinsic, OpInfo(intrinsic), OpInfo())); 3355 3365 return true; … … 3366 3376 3367 3377 template<typename ChecksFunctor> 3368 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, int resultOperand, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks)3378 bool ByteCodeParser::handleDOMJITCall(Node* callTarget, VirtualRegister result, const DOMJIT::Signature* signature, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, const ChecksFunctor& insertChecks) 3369 3379 { 3370 3380 if (argumentCountIncludingThis != static_cast<int>(1 + signature->argumentCount)) … … 3379 3389 3380 3390 insertChecks(); 3381 addCall(result Operand, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction);3391 addCall(result, Call, signature, callTarget, argumentCountIncludingThis, registerOffset, prediction); 3382 3392 return true; 3383 3393 } … … 3385 3395 3386 3396 template<typename ChecksFunctor> 3387 bool ByteCodeParser::handleIntrinsicGetter( int resultOperand, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks)3397 bool ByteCodeParser::handleIntrinsicGetter(VirtualRegister result, SpeculatedType prediction, const GetByIdVariant& variant, Node* thisNode, const ChecksFunctor& insertChecks) 3388 3398 { 3389 3399 switch (variant.intrinsic()) { … … 3405 3415 3406 3416 if (!logSize) { 3407 set( VirtualRegister(resultOperand), lengthNode);3417 set(result, lengthNode); 3408 3418 return true; 3409 3419 } … … 3412 3422 // that overflows int32. 3413 3423 Node* shiftNode = jsConstant(jsNumber(logSize)); 3414 set( VirtualRegister(resultOperand), addToGraph(BitLShift, lengthNode, shiftNode));3424 set(result, addToGraph(BitLShift, lengthNode, shiftNode)); 3415 3425 3416 3426 return true; … … 3429 3439 }); 3430 3440 3431 set( VirtualRegister(resultOperand), addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));3441 set(result, addToGraph(GetArrayLength, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); 3432 3442 3433 3443 return true; … … 3447 3457 }); 3448 3458 3449 set( VirtualRegister(resultOperand), addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode));3459 set(result, addToGraph(GetTypedArrayByteOffset, OpInfo(ArrayMode(arrayType, Array::Read).asWord()), thisNode)); 3450 3460 3451 3461 return true; … … 3478 3488 // This information is important for super's constructor call to get new.target constant. 3479 3489 if (prototype && canFold) { 3480 set( VirtualRegister(resultOperand), weakJSConstant(prototype));3490 set(result, weakJSConstant(prototype)); 3481 3491 return true; 3482 3492 } 3483 3493 3484 set( VirtualRegister(resultOperand), addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode));3494 set(result, addToGraph(GetPrototypeOf, OpInfo(0), OpInfo(prediction), thisNode)); 3485 3495 return true; 3486 3496 } … … 3499 3509 } 3500 3510 3501 bool ByteCodeParser::handleDOMJITGetter( int resultOperand, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction)3511 bool ByteCodeParser::handleDOMJITGetter(VirtualRegister result, const GetByIdVariant& variant, Node* thisNode, unsigned identifierNumber, SpeculatedType prediction) 3502 3512 { 3503 3513 if (!variant.domAttribute()) … … 3539 3549 callDOMGetterNode = addToGraph(CallDOMGetter, OpInfo(callDOMGetterData), OpInfo(prediction), thisNode); 3540 3550 blessCallDOMGetter(callDOMGetterNode); 3541 set( VirtualRegister(resultOperand), callDOMGetterNode);3551 set(result, callDOMGetterNode); 3542 3552 return true; 3543 3553 } 3544 3554 3545 bool ByteCodeParser::handleModuleNamespaceLoad( int resultOperand, SpeculatedType prediction, Node* base, GetByIdStatus getById)3555 bool ByteCodeParser::handleModuleNamespaceLoad(VirtualRegister result, SpeculatedType prediction, Node* base, GetByIdStatus getById) 3546 3556 { 3547 3557 if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) … … 3566 3576 m_graph.freeze(getById.moduleEnvironment()); 3567 3577 if (JSValue value = m_graph.tryGetConstantClosureVar(getById.moduleEnvironment(), getById.scopeOffset())) { 3568 set( VirtualRegister(resultOperand), weakJSConstant(value));3578 set(result, weakJSConstant(value)); 3569 3579 return true; 3570 3580 } 3571 set( VirtualRegister(resultOperand), addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment())));3581 set(result, addToGraph(GetClosureVar, OpInfo(getById.scopeOffset().offset()), OpInfo(prediction), weakJSConstant(getById.moduleEnvironment()))); 3572 3582 return true; 3573 3583 } … … 3575 3585 template<typename ChecksFunctor> 3576 3586 bool ByteCodeParser::handleTypedArrayConstructor( 3577 int resultOperand, InternalFunction* function, int registerOffset,3587 VirtualRegister result, InternalFunction* function, int registerOffset, 3578 3588 int argumentCountIncludingThis, TypedArrayType type, const ChecksFunctor& insertChecks) 3579 3589 { … … 3625 3635 3626 3636 insertChecks(); 3627 set( VirtualRegister(resultOperand),3637 set(result, 3628 3638 addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset)))); 3629 3639 return true; … … 3632 3642 template<typename ChecksFunctor> 3633 3643 bool ByteCodeParser::handleConstantInternalFunction( 3634 Node* callTargetNode, int resultOperand, InternalFunction* function, int registerOffset,3644 Node* callTargetNode, VirtualRegister result, InternalFunction* function, int registerOffset, 3635 3645 int argumentCountIncludingThis, CodeSpecializationKind kind, SpeculatedType prediction, const ChecksFunctor& insertChecks) 3636 3646 { … … 3639 3649 // It so happens that the code below assumes that the result operand is valid. It's extremely 3640 3650 // unlikely that the result operand would be invalid - you'd have to call this via a setter call. 3641 if (! VirtualRegister(resultOperand).isValid())3651 if (!result.isValid()) 3642 3652 return false; 3643 3653 … … 3657 3667 insertChecks(); 3658 3668 if (argumentCountIncludingThis == 2) { 3659 set( VirtualRegister(resultOperand),3669 set(result, 3660 3670 addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset)))); 3661 3671 return true; … … 3664 3674 for (int i = 1; i < argumentCountIncludingThis; ++i) 3665 3675 addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); 3666 set( VirtualRegister(resultOperand),3676 set(result, 3667 3677 addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(argumentCountIncludingThis - 1))); 3668 3678 return true; … … 3675 3685 insertChecks(); 3676 3686 if (argumentCountIncludingThis <= 1) 3677 set( VirtualRegister(resultOperand), jsConstant(jsNumber(0)));3687 set(result, jsConstant(jsNumber(0))); 3678 3688 else 3679 set( VirtualRegister(resultOperand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))));3689 set(result, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)))); 3680 3690 3681 3691 return true; … … 3685 3695 insertChecks(); 3686 3696 3687 Node* result ;3697 Node* resultNode; 3688 3698 3689 3699 if (argumentCountIncludingThis <= 1) 3690 result = jsConstant(m_vm->smallStrings.emptyString());3700 resultNode = jsConstant(m_vm->smallStrings.emptyString()); 3691 3701 else 3692 result = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset)));3702 resultNode = addToGraph(CallStringConstructor, get(virtualRegisterForArgument(1, registerOffset))); 3693 3703 3694 3704 if (kind == CodeForConstruct) 3695 result = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), result);3696 3697 set( VirtualRegister(resultOperand), result);3705 resultNode = addToGraph(NewStringObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->stringObjectStructure())), resultNode); 3706 3707 set(result, resultNode); 3698 3708 return true; 3699 3709 } … … 3703 3713 insertChecks(); 3704 3714 3705 Node* result ;3715 Node* resultNode; 3706 3716 if (argumentCountIncludingThis <= 1) 3707 result = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor())));3717 resultNode = addToGraph(NewObject, OpInfo(m_graph.registerStructure(function->globalObject(*m_vm)->objectStructureForObjectConstructor()))); 3708 3718 else 3709 result = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset)));3710 set( VirtualRegister(resultOperand), result);3719 resultNode = addToGraph(CallObjectConstructor, OpInfo(m_graph.freeze(function->globalObject(*m_vm))), OpInfo(prediction), get(virtualRegisterForArgument(1, registerOffset))); 3720 set(result, resultNode); 3711 3721 return true; 3712 3722 } 3713 3723 3714 3724 for (unsigned typeIndex = 0; typeIndex < NumberOfTypedArrayTypes; ++typeIndex) { 3715 bool result= handleTypedArrayConstructor(3716 result Operand, function, registerOffset, argumentCountIncludingThis,3725 bool handled = handleTypedArrayConstructor( 3726 result, function, registerOffset, argumentCountIncludingThis, 3717 3727 indexToTypedArrayType(typeIndex), insertChecks); 3718 if ( result)3728 if (handled) 3719 3729 return true; 3720 3730 } … … 4149 4159 4150 4160 void ByteCodeParser::handleGetById( 4151 int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber,4161 VirtualRegister destination, SpeculatedType prediction, Node* base, unsigned identifierNumber, 4152 4162 GetByIdStatus getByIdStatus, AccessType type, unsigned instructionSize) 4153 4163 { … … 4177 4187 4178 4188 if (getById != TryGetById && getByIdStatus.isModuleNamespace()) { 4179 if (handleModuleNamespaceLoad(destination Operand, prediction, base, getByIdStatus)) {4189 if (handleModuleNamespaceLoad(destination, prediction, base, getByIdStatus)) { 4180 4190 if (UNLIKELY(m_graph.compilation())) 4181 4191 m_graph.compilation()->noticeInlinedGetById(); … … 4191 4201 GetByIdVariant variant = getByIdStatus[0]; 4192 4202 ASSERT(variant.domAttribute()); 4193 if (handleDOMJITGetter(destination Operand, variant, base, identifierNumber, prediction)) {4203 if (handleDOMJITGetter(destination, variant, base, identifierNumber, prediction)) { 4194 4204 if (UNLIKELY(m_graph.compilation())) 4195 4205 m_graph.compilation()->noticeInlinedGetById(); … … 4200 4210 ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !getByIdStatus.makesCalls()); 4201 4211 if (!getByIdStatus.isSimple() || !getByIdStatus.numVariants() || !Options::useAccessInlining()) { 4202 set( VirtualRegister(destinationOperand),4212 set(destination, 4203 4213 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); 4204 4214 return; … … 4213 4223 || !Options::usePolymorphicAccessInlining() 4214 4224 || getByIdStatus.numVariants() > Options::maxPolymorphicAccessInliningListSize()) { 4215 set( VirtualRegister(destinationOperand),4225 set(destination, 4216 4226 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); 4217 4227 return; … … 4227 4237 for (const GetByIdVariant& variant : getByIdStatus.variants()) { 4228 4238 if (variant.intrinsic() != NoIntrinsic) { 4229 set( VirtualRegister(destinationOperand),4239 set(destination, 4230 4240 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); 4231 4241 return; … … 4242 4252 GetByOffsetMethod method = planLoad(variant.conditionSet()); 4243 4253 if (!method) { 4244 set( VirtualRegister(destinationOperand),4254 set(destination, 4245 4255 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); 4246 4256 return; … … 4257 4267 data->cases = cases; 4258 4268 data->identifierNumber = identifierNumber; 4259 set( VirtualRegister(destinationOperand),4269 set(destination, 4260 4270 addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base)); 4261 4271 return; … … 4269 4279 Node* loadedValue = load(prediction, base, identifierNumber, variant); 4270 4280 if (!loadedValue) { 4271 set( VirtualRegister(destinationOperand),4281 set(destination, 4272 4282 addToGraph(getById, OpInfo(identifierNumber), OpInfo(prediction), base)); 4273 4283 return; … … 4279 4289 ASSERT(type == AccessType::Get || type == AccessType::GetDirect || !variant.callLinkStatus()); 4280 4290 if (!variant.callLinkStatus() && variant.intrinsic() == NoIntrinsic) { 4281 set( VirtualRegister(destinationOperand), loadedValue);4291 set(destination, loadedValue); 4282 4292 return; 4283 4293 } … … 4285 4295 Node* getter = addToGraph(GetGetter, loadedValue); 4286 4296 4287 if (handleIntrinsicGetter(destination Operand, prediction, variant, base,4297 if (handleIntrinsicGetter(destination, prediction, variant, base, 4288 4298 [&] () { 4289 4299 addToGraph(CheckCell, OpInfo(m_graph.freeze(variant.intrinsicFunction())), getter); … … 4331 4341 4332 4342 handleCall( 4333 destination Operand, Call, InlineCallFrame::GetterCall, instructionSize,4343 destination, Call, InlineCallFrame::GetterCall, instructionSize, 4334 4344 getter, numberOfParameters - 1, registerOffset, *variant.callLinkStatus(), prediction); 4335 4345 } … … 4346 4356 void ByteCodeParser::handlePutById( 4347 4357 Node* base, unsigned identifierNumber, Node* value, 4348 const PutByIdStatus& putByIdStatus, bool isDirect )4358 const PutByIdStatus& putByIdStatus, bool isDirect, unsigned instructionSize) 4349 4359 { 4350 4360 if (!putByIdStatus.isSimple() || !putByIdStatus.numVariants() || !Options::useAccessInlining()) { … … 4519 4529 4520 4530 handleCall( 4521 VirtualRegister() .offset(), Call, InlineCallFrame::SetterCall,4522 OPCODE_LENGTH(op_put_by_id), setter, numberOfParameters - 1, registerOffset,4531 VirtualRegister(), Call, InlineCallFrame::SetterCall, 4532 instructionSize, setter, numberOfParameters - 1, registerOffset, 4523 4533 *variant.callLinkStatus(), SpecOther); 4524 4534 return; … … 4540 4550 { 4541 4551 m_constants.shrink(0); 4552 } 4553 4554 template<typename Op> 4555 void ByteCodeParser::parseGetById(const Instruction* currentInstruction) 4556 { 4557 auto bytecode = currentInstruction->as<Op>(); 4558 SpeculatedType prediction = getPrediction(); 4559 4560 Node* base = get(bytecode.base); 4561 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 4562 4563 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; 4564 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( 4565 m_inlineStackTop->m_profiledBlock, 4566 m_inlineStackTop->m_baselineMap, m_icContextStack, 4567 currentCodeOrigin(), uid); 4568 4569 AccessType type = AccessType::Get; 4570 unsigned opcodeLength = currentInstruction->size(); 4571 if (Op::opcodeID == op_try_get_by_id) 4572 type = AccessType::TryGet; 4573 else if (Op::opcodeID == op_get_by_id_direct) 4574 type = AccessType::GetDirect; 4575 4576 handleGetById( 4577 bytecode.dst, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength); 4578 4542 4579 } 4543 4580 … … 4557 4594 #define NEXT_OPCODE(name) \ 4558 4595 if (true) { \ 4559 m_currentIndex += OPCODE_LENGTH(name); \4596 m_currentIndex += currentInstruction->size(); \ 4560 4597 goto WTF_CONCAT(NEXT_OPCODE_, __LINE__); /* Need a unique label: usable more than once per function. */ \ 4561 4598 } else \ … … 4564 4601 4565 4602 #define LAST_OPCODE_LINKED(name) do { \ 4566 m_currentIndex += OPCODE_LENGTH(name); \4603 m_currentIndex += currentInstruction->size(); \ 4567 4604 m_exitOK = false; \ 4568 4605 return; \ … … 4587 4624 void ByteCodeParser::parseBlock(unsigned limit) 4588 4625 { 4589 Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin();4626 auto& instructions = m_inlineStackTop->m_codeBlock->instructions(); 4590 4627 unsigned blockBegin = m_currentIndex; 4591 4628 … … 4616 4653 } 4617 4654 4655 CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; 4656 4657 auto jumpTarget = [&](int target) { 4658 if (target) 4659 return target; 4660 return codeBlock->outOfLineJumpOffset(m_currentInstruction); 4661 }; 4662 4618 4663 while (true) { 4619 4664 // We're staring at a new bytecode instruction. So we once again have a place that we can exit … … 4639 4684 4640 4685 // Switch on the current bytecode opcode. 4641 Instruction* currentInstruction = instructionsBegin + m_currentIndex;4686 const Instruction* currentInstruction = instructions.at(m_currentIndex).ptr(); 4642 4687 m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls. 4643 OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode);4688 OpcodeID opcodeID = currentInstruction->opcodeID(); 4644 4689 4645 4690 VERBOSE_LOG(" parsing ", currentCodeOrigin(), ": ", opcodeID, "\n"); … … 4666 4711 Node* op1 = getThis(); 4667 4712 if (op1->op() != ToThis) { 4668 Structure* cachedStructure = currentInstruction[2].u.structure.get(); 4669 if (currentInstruction[3].u.toThisStatus != ToThisOK 4713 auto metadata = currentInstruction->as<OpToThis>().metadata(codeBlock); 4714 Structure* cachedStructure = metadata.cachedStructure.get(); 4715 if (metadata.toThisStatus != ToThisOK 4670 4716 || !cachedStructure 4671 4717 || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis … … 4685 4731 4686 4732 case op_create_this: { 4687 auto & bytecode = *reinterpret_cast<OpCreateThis*>(currentInstruction);4688 Node* callee = get(VirtualRegister(bytecode.callee ()));4733 auto bytecode = currentInstruction->as<OpCreateThis>(); 4734 Node* callee = get(VirtualRegister(bytecode.callee)); 4689 4735 4690 4736 JSFunction* function = callee->dynamicCastConstant<JSFunction*>(*m_vm); 4691 4737 if (!function) { 4692 JSCell* cachedFunction = bytecode. cachedCallee().unvalidatedGet();4738 JSCell* cachedFunction = bytecode.metadata(codeBlock).cachedCallee.unvalidatedGet(); 4693 4739 if (cachedFunction 4694 4740 && cachedFunction != JSCell::seenMultipleCalleeObjects() … … 4729 4775 addToGraph(PutByOffset, OpInfo(data), object, object, weakJSConstant(prototype)); 4730 4776 } 4731 set(VirtualRegister(bytecode.dst ()), object);4777 set(VirtualRegister(bytecode.dst), object); 4732 4778 alreadyEmitted = true; 4733 4779 } … … 4736 4782 } 4737 4783 if (!alreadyEmitted) { 4738 set(VirtualRegister(bytecode.dst ()),4739 addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity ()), callee));4784 set(VirtualRegister(bytecode.dst), 4785 addToGraph(CreateThis, OpInfo(bytecode.inlineCapacity), callee)); 4740 4786 } 4741 4787 NEXT_OPCODE(op_create_this); … … 4743 4789 4744 4790 case op_new_object: { 4745 set(VirtualRegister(currentInstruction[1].u.operand), 4791 auto bytecode = currentInstruction->as<OpNewObject>(); 4792 set(bytecode.dst, 4746 4793 addToGraph(NewObject, 4747 OpInfo(m_graph.registerStructure( currentInstruction[3].u.objectAllocationProfile->structure()))));4794 OpInfo(m_graph.registerStructure(bytecode.metadata(codeBlock).objectAllocationProfile.structure())))); 4748 4795 NEXT_OPCODE(op_new_object); 4749 4796 } 4750 4797 4751 4798 case op_new_array: { 4752 int startOperand = currentInstruction[2].u.operand; 4753 int numOperands = currentInstruction[3].u.operand; 4754 ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile; 4799 auto bytecode = currentInstruction->as<OpNewArray>(); 4800 int startOperand = bytecode.argv.offset(); 4801 int numOperands = bytecode.argc; 4802 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).arrayAllocationProfile; 4755 4803 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) 4756 4804 addVarArgChild(get(VirtualRegister(operandIdx))); 4757 unsigned vectorLengthHint = std::max<unsigned>(profile ->vectorLengthHint(), numOperands);4758 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(vectorLengthHint)));4805 unsigned vectorLengthHint = std::max<unsigned>(profile.vectorLengthHint(), numOperands); 4806 set(bytecode.dst, addToGraph(Node::VarArg, NewArray, OpInfo(profile.selectIndexingType()), OpInfo(vectorLengthHint))); 4759 4807 NEXT_OPCODE(op_new_array); 4760 4808 } 4761 4809 4762 4810 case op_new_array_with_spread: { 4763 int startOperand = currentInstruction[2].u.operand; 4764 int numOperands = currentInstruction[3].u.operand; 4765 const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(currentInstruction[4].u.unsignedValue); 4811 auto bytecode = currentInstruction->as<OpNewArrayWithSpread>(); 4812 int startOperand = bytecode.argv.offset(); 4813 int numOperands = bytecode.argc; 4814 const BitVector& bitVector = m_inlineStackTop->m_profiledBlock->unlinkedCodeBlock()->bitVector(bytecode.bitVector); 4766 4815 for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) 4767 4816 addVarArgChild(get(VirtualRegister(operandIdx))); … … 4770 4819 ASSERT(*copy == bitVector); 4771 4820 4772 set( VirtualRegister(currentInstruction[1].u.operand),4821 set(bytecode.dst, 4773 4822 addToGraph(Node::VarArg, NewArrayWithSpread, OpInfo(copy))); 4774 4823 NEXT_OPCODE(op_new_array_with_spread); … … 4776 4825 4777 4826 case op_spread: { 4778 set(VirtualRegister(currentInstruction[1].u.operand), 4779 addToGraph(Spread, get(VirtualRegister(currentInstruction[2].u.operand)))); 4827 auto bytecode = currentInstruction->as<OpSpread>(); 4828 set(bytecode.dst, 4829 addToGraph(Spread, get(bytecode.argument))); 4780 4830 NEXT_OPCODE(op_spread); 4781 4831 } 4782 4832 4783 4833 case op_new_array_with_size: { 4784 int lengthOperand = currentInstruction[2].u.operand;4785 ArrayAllocationProfile * profile = currentInstruction[3].u.arrayAllocationProfile;4786 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand))));4834 auto bytecode = currentInstruction->as<OpNewArrayWithSize>(); 4835 ArrayAllocationProfile& profile = bytecode.metadata(codeBlock).arrayAllocationProfile; 4836 set(bytecode.dst, addToGraph(NewArrayWithSize, OpInfo(profile.selectIndexingType()), get(bytecode.length))); 4787 4837 NEXT_OPCODE(op_new_array_with_size); 4788 4838 } 4789 4839 4790 4840 case op_new_array_buffer: { 4791 auto & bytecode = *reinterpret_cast<OpNewArrayBuffer*>(currentInstruction);4841 auto bytecode = currentInstruction->as<OpNewArrayBuffer>(); 4792 4842 // Unfortunately, we can't allocate a new JSImmutableButterfly if the profile tells us new information because we 4793 4843 // cannot allocate from compilation threads. 4794 4844 WTF::loadLoadFence(); 4795 FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly ()))->constant();4845 FrozenValue* frozen = get(VirtualRegister(bytecode.immutableButterfly))->constant(); 4796 4846 WTF::loadLoadFence(); 4797 4847 JSImmutableButterfly* immutableButterfly = frozen->cast<JSImmutableButterfly*>(); … … 4800 4850 data.vectorLengthHint = immutableButterfly->toButterfly()->vectorLength(); 4801 4851 4802 set(VirtualRegister(bytecode.dst ()), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord)));4852 set(VirtualRegister(bytecode.dst), addToGraph(NewArrayBuffer, OpInfo(frozen), OpInfo(data.asQuadWord))); 4803 4853 NEXT_OPCODE(op_new_array_buffer); 4804 4854 } 4805 4855 4806 4856 case op_new_regexp: { 4807 VirtualRegister regExpRegister(currentInstruction[2].u.operand);4808 ASSERT( regExpRegister.isConstant());4809 FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant( regExpRegister.offset()));4810 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0))));4857 auto bytecode = currentInstruction->as<OpNewRegexp>(); 4858 ASSERT(bytecode.regexp.isConstant()); 4859 FrozenValue* frozenRegExp = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.regexp.offset())); 4860 set(bytecode.dst, addToGraph(NewRegexp, OpInfo(frozenRegExp), jsConstant(jsNumber(0)))); 4811 4861 NEXT_OPCODE(op_new_regexp); 4812 4862 } 4813 4863 4814 4864 case op_get_rest_length: { 4865 auto bytecode = currentInstruction->as<OpGetRestLength>(); 4815 4866 InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); 4816 4867 Node* length; 4817 4868 if (inlineCallFrame && !inlineCallFrame->isVarargs()) { 4818 4869 unsigned argumentsLength = inlineCallFrame->argumentCountIncludingThis - 1; 4819 unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue;4820 4870 JSValue restLength; 4821 if (argumentsLength <= numParamsToSkip)4871 if (argumentsLength <= bytecode.numParametersToSkip) 4822 4872 restLength = jsNumber(0); 4823 4873 else 4824 restLength = jsNumber(argumentsLength - numParamsToSkip);4874 restLength = jsNumber(argumentsLength - bytecode.numParametersToSkip); 4825 4875 4826 4876 length = jsConstant(restLength); 4827 4877 } else 4828 length = addToGraph(GetRestLength, OpInfo( currentInstruction[2].u.unsignedValue));4829 set( VirtualRegister(currentInstruction[1].u.operand), length);4878 length = addToGraph(GetRestLength, OpInfo(bytecode.numParametersToSkip)); 4879 set(bytecode.dst, length); 4830 4880 NEXT_OPCODE(op_get_rest_length); 4831 4881 } 4832 4882 4833 4883 case op_create_rest: { 4884 auto bytecode = currentInstruction->as<OpCreateRest>(); 4834 4885 noticeArgumentsUse(); 4835 Node* arrayLength = get( VirtualRegister(currentInstruction[2].u.operand));4836 set( VirtualRegister(currentInstruction[1].u.operand),4837 addToGraph(CreateRest, OpInfo( currentInstruction[3].u.unsignedValue), arrayLength));4886 Node* arrayLength = get(bytecode.arraySize); 4887 set(bytecode.dst, 4888 addToGraph(CreateRest, OpInfo(bytecode.numParametersToSkip), arrayLength)); 4838 4889 NEXT_OPCODE(op_create_rest); 4839 4890 } … … 4842 4893 4843 4894 case op_bitand: { 4844 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4845 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4895 auto bytecode = currentInstruction->as<OpBitand>(); 4896 Node* op1 = get(bytecode.lhs); 4897 Node* op2 = get(bytecode.rhs); 4846 4898 if (isInt32Speculation(getPrediction())) 4847 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithBitAnd, op1, op2));4899 set(bytecode.dst, addToGraph(ArithBitAnd, op1, op2)); 4848 4900 else 4849 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ValueBitAnd, op1, op2)); 4850 4901 set(bytecode.dst, addToGraph(ValueBitAnd, op1, op2)); 4851 4902 NEXT_OPCODE(op_bitand); 4852 4903 } 4853 4904 4854 4905 case op_bitor: { 4855 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4856 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4906 auto bytecode = currentInstruction->as<OpBitor>(); 4907 Node* op1 = get(bytecode.lhs); 4908 Node* op2 = get(bytecode.rhs); 4857 4909 if (isInt32Speculation(getPrediction())) 4858 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithBitOr, op1, op2));4910 set(bytecode.dst, addToGraph(ArithBitOr, op1, op2)); 4859 4911 else 4860 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(ValueBitOr, op1, op2));4912 set(bytecode.dst, addToGraph(ValueBitOr, op1, op2)); 4861 4913 NEXT_OPCODE(op_bitor); 4862 4914 } 4863 4915 4864 4916 case op_bitxor: { 4865 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4866 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4867 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2)); 4917 auto bytecode = currentInstruction->as<OpBitxor>(); 4918 Node* op1 = get(bytecode.lhs); 4919 Node* op2 = get(bytecode.rhs); 4920 set(bytecode.dst, addToGraph(BitXor, op1, op2)); 4868 4921 NEXT_OPCODE(op_bitxor); 4869 4922 } 4870 4923 4871 4924 case op_rshift: { 4872 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));4873 Node* op 2 = get(VirtualRegister(currentInstruction[3].u.operand));4874 set(VirtualRegister(currentInstruction[1].u.operand),4875 4925 auto bytecode = currentInstruction->as<OpRshift>(); 4926 Node* op1 = get(bytecode.lhs); 4927 Node* op2 = get(bytecode.rhs); 4928 set(bytecode.dst, addToGraph(BitRShift, op1, op2)); 4876 4929 NEXT_OPCODE(op_rshift); 4877 4930 } 4878 4931 4879 4932 case op_lshift: { 4880 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));4881 Node* op 2 = get(VirtualRegister(currentInstruction[3].u.operand));4882 set(VirtualRegister(currentInstruction[1].u.operand),4883 4933 auto bytecode = currentInstruction->as<OpLshift>(); 4934 Node* op1 = get(bytecode.lhs); 4935 Node* op2 = get(bytecode.rhs); 4936 set(bytecode.dst, addToGraph(BitLShift, op1, op2)); 4884 4937 NEXT_OPCODE(op_lshift); 4885 4938 } 4886 4939 4887 4940 case op_urshift: { 4888 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand));4889 Node* op 2 = get(VirtualRegister(currentInstruction[3].u.operand));4890 set(VirtualRegister(currentInstruction[1].u.operand),4891 4941 auto bytecode = currentInstruction->as<OpUrshift>(); 4942 Node* op1 = get(bytecode.lhs); 4943 Node* op2 = get(bytecode.rhs); 4944 set(bytecode.dst, addToGraph(BitURShift, op1, op2)); 4892 4945 NEXT_OPCODE(op_urshift); 4893 4946 } 4894 4947 4895 4948 case op_unsigned: { 4896 set(VirtualRegister(currentInstruction[1].u.operand),4897 makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand)))));4949 auto bytecode = currentInstruction->as<OpUnsigned>(); 4950 set(bytecode.dst, makeSafe(addToGraph(UInt32ToNumber, get(bytecode.operand)))); 4898 4951 NEXT_OPCODE(op_unsigned); 4899 4952 } … … 4902 4955 4903 4956 case op_inc: { 4904 int srcDst = currentInstruction[1].u.operand; 4905 VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); 4906 Node* op = get(srcDstVirtualRegister); 4907 set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); 4957 auto bytecode = currentInstruction->as<OpInc>(); 4958 Node* op = get(bytecode.srcDst); 4959 set(bytecode.srcDst, makeSafe(addToGraph(ArithAdd, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); 4908 4960 NEXT_OPCODE(op_inc); 4909 4961 } 4910 4962 4911 4963 case op_dec: { 4912 int srcDst = currentInstruction[1].u.operand; 4913 VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); 4914 Node* op = get(srcDstVirtualRegister); 4915 set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); 4964 auto bytecode = currentInstruction->as<OpDec>(); 4965 Node* op = get(bytecode.srcDst); 4966 set(bytecode.srcDst, makeSafe(addToGraph(ArithSub, op, addToGraph(JSConstant, OpInfo(m_constantOne))))); 4916 4967 NEXT_OPCODE(op_dec); 4917 4968 } … … 4920 4971 4921 4972 case op_add: { 4922 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4923 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4973 auto bytecode = currentInstruction->as<OpAdd>(); 4974 Node* op1 = get(bytecode.lhs); 4975 Node* op2 = get(bytecode.rhs); 4924 4976 if (op1->hasNumberResult() && op2->hasNumberResult()) 4925 set( VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2)));4977 set(bytecode.dst, makeSafe(addToGraph(ArithAdd, op1, op2))); 4926 4978 else 4927 set( VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2)));4979 set(bytecode.dst, makeSafe(addToGraph(ValueAdd, op1, op2))); 4928 4980 NEXT_OPCODE(op_add); 4929 4981 } 4930 4982 4931 4983 case op_sub: { 4932 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4933 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4984 auto bytecode = currentInstruction->as<OpSub>(); 4985 Node* op1 = get(bytecode.lhs); 4986 Node* op2 = get(bytecode.rhs); 4934 4987 if (op1->hasNumberResult() && op2->hasNumberResult()) 4935 set( VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2)));4988 set(bytecode.dst, makeSafe(addToGraph(ArithSub, op1, op2))); 4936 4989 else 4937 set( VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueSub, op1, op2)));4990 set(bytecode.dst, makeSafe(addToGraph(ValueSub, op1, op2))); 4938 4991 NEXT_OPCODE(op_sub); 4939 4992 } 4940 4993 4941 4994 case op_negate: { 4942 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4995 auto bytecode = currentInstruction->as<OpNegate>(); 4996 Node* op1 = get(bytecode.operand); 4943 4997 if (op1->hasNumberResult()) 4944 set( VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1)));4998 set(bytecode.dst, makeSafe(addToGraph(ArithNegate, op1))); 4945 4999 else 4946 set( VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueNegate, op1)));5000 set(bytecode.dst, makeSafe(addToGraph(ValueNegate, op1))); 4947 5001 NEXT_OPCODE(op_negate); 4948 5002 } … … 4950 5004 case op_mul: { 4951 5005 // Multiply requires that the inputs are not truncated, unfortunately. 4952 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4953 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4954 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2))); 5006 auto bytecode = currentInstruction->as<OpMul>(); 5007 Node* op1 = get(bytecode.lhs); 5008 Node* op2 = get(bytecode.rhs); 5009 set(bytecode.dst, makeSafe(addToGraph(ArithMul, op1, op2))); 4955 5010 NEXT_OPCODE(op_mul); 4956 5011 } 4957 5012 4958 5013 case op_mod: { 4959 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4960 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4961 set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2))); 5014 auto bytecode = currentInstruction->as<OpMod>(); 5015 Node* op1 = get(bytecode.lhs); 5016 Node* op2 = get(bytecode.rhs); 5017 set(bytecode.dst, makeSafe(addToGraph(ArithMod, op1, op2))); 4962 5018 NEXT_OPCODE(op_mod); 4963 5019 } … … 4966 5022 // FIXME: ArithPow(Untyped, Untyped) should be supported as the same to ArithMul, ArithSub etc. 4967 5023 // https://bugs.webkit.org/show_bug.cgi?id=160012 4968 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4969 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4970 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ArithPow, op1, op2)); 5024 auto bytecode = currentInstruction->as<OpPow>(); 5025 Node* op1 = get(bytecode.lhs); 5026 Node* op2 = get(bytecode.rhs); 5027 set(bytecode.dst, addToGraph(ArithPow, op1, op2)); 4971 5028 NEXT_OPCODE(op_pow); 4972 5029 } 4973 5030 4974 5031 case op_div: { 4975 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 4976 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 4977 set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2))); 5032 auto bytecode = currentInstruction->as<OpDiv>(); 5033 Node* op1 = get(bytecode.lhs); 5034 Node* op2 = get(bytecode.rhs); 5035 set(bytecode.dst, makeDivSafe(addToGraph(ArithDiv, op1, op2))); 4978 5036 NEXT_OPCODE(op_div); 4979 5037 } … … 4989 5047 4990 5048 case op_mov: { 4991 Node* op = get(VirtualRegister(currentInstruction[2].u.operand)); 4992 set(VirtualRegister(currentInstruction[1].u.operand), op); 5049 auto bytecode = currentInstruction->as<OpMov>(); 5050 Node* op = get(bytecode.src); 5051 set(bytecode.dst, op); 4993 5052 NEXT_OPCODE(op_mov); 4994 5053 } 4995 5054 4996 5055 case op_check_tdz: { 4997 addToGraph(CheckNotEmpty, get(VirtualRegister(currentInstruction[1].u.operand))); 5056 auto bytecode = currentInstruction->as<OpCheckTdz>(); 5057 addToGraph(CheckNotEmpty, get(bytecode.target)); 4998 5058 NEXT_OPCODE(op_check_tdz); 4999 5059 } 5000 5060 5001 5061 case op_overrides_has_instance: { 5002 auto & bytecode = *reinterpret_cast<OpOverridesHasInstance*>(currentInstruction);5062 auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); 5003 5063 JSFunction* defaultHasInstanceSymbolFunction = m_inlineStackTop->m_codeBlock->globalObjectFor(currentCodeOrigin())->functionProtoHasInstanceSymbolFunction(); 5004 5064 5005 Node* constructor = get(VirtualRegister(bytecode.constructor ()));5006 Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue ()));5007 5008 set(VirtualRegister(bytecode.dst ()), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue));5065 Node* constructor = get(VirtualRegister(bytecode.constructor)); 5066 Node* hasInstanceValue = get(VirtualRegister(bytecode.hasInstanceValue)); 5067 5068 set(VirtualRegister(bytecode.dst), addToGraph(OverridesHasInstance, OpInfo(m_graph.freeze(defaultHasInstanceSymbolFunction)), constructor, hasInstanceValue)); 5009 5069 NEXT_OPCODE(op_overrides_has_instance); 5010 5070 } 5011 5071 5012 5072 case op_identity_with_profile: { 5013 Node* src = get(VirtualRegister(currentInstruction[1].u.operand)); 5014 SpeculatedType speculation = static_cast<SpeculatedType>(currentInstruction[2].u.operand) << 32 | static_cast<SpeculatedType>(currentInstruction[3].u.operand); 5015 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IdentityWithProfile, OpInfo(speculation), src)); 5073 auto bytecode = currentInstruction->as<OpIdentityWithProfile>(); 5074 Node* srcDst = get(bytecode.srcDst); 5075 SpeculatedType speculation = static_cast<SpeculatedType>(bytecode.topProfile) << 32 | static_cast<SpeculatedType>(bytecode.bottomProfile); 5076 set(bytecode.srcDst, addToGraph(IdentityWithProfile, OpInfo(speculation), srcDst)); 5016 5077 NEXT_OPCODE(op_identity_with_profile); 5017 5078 } 5018 5079 5019 5080 case op_instanceof: { 5020 auto & bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction);5081 auto bytecode = currentInstruction->as<OpInstanceof>(); 5021 5082 5022 5083 InstanceOfStatus status = InstanceOfStatus::computeFor( … … 5024 5085 m_currentIndex); 5025 5086 5026 Node* value = get( VirtualRegister(bytecode.value()));5027 Node* prototype = get( VirtualRegister(bytecode.prototype()));5087 Node* value = get(bytecode.value); 5088 Node* prototype = get(bytecode.prototype); 5028 5089 5029 5090 // Only inline it if it's Simple with a commonPrototype; bottom/top or variable … … 5053 5114 if (allOK) { 5054 5115 Node* match = addToGraph(MatchStructure, OpInfo(data), value); 5055 set( VirtualRegister(bytecode.dst()), match);5116 set(bytecode.dst, match); 5056 5117 NEXT_OPCODE(op_instanceof); 5057 5118 } 5058 5119 } 5059 5120 5060 set( VirtualRegister(bytecode.dst()), addToGraph(InstanceOf, value, prototype));5121 set(bytecode.dst, addToGraph(InstanceOf, value, prototype)); 5061 5122 NEXT_OPCODE(op_instanceof); 5062 5123 } 5063 5124 5064 5125 case op_instanceof_custom: { 5065 auto & bytecode = *reinterpret_cast<OpInstanceofCustom*>(currentInstruction);5066 Node* value = get( VirtualRegister(bytecode.value()));5067 Node* constructor = get( VirtualRegister(bytecode.constructor()));5068 Node* hasInstanceValue = get( VirtualRegister(bytecode.hasInstanceValue()));5069 set( VirtualRegister(bytecode.dst()), addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue));5126 auto bytecode = currentInstruction->as<OpInstanceofCustom>(); 5127 Node* value = get(bytecode.value); 5128 Node* constructor = get(bytecode.constructor); 5129 Node* hasInstanceValue = get(bytecode.hasInstanceValue); 5130 set(bytecode.dst, addToGraph(InstanceOfCustom, value, constructor, hasInstanceValue)); 5070 5131 NEXT_OPCODE(op_instanceof_custom); 5071 5132 } 5072 5133 case op_is_empty: { 5073 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5074 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsEmpty, value)); 5134 auto bytecode = currentInstruction->as<OpIsEmpty>(); 5135 Node* value = get(bytecode.operand); 5136 set(bytecode.dst, addToGraph(IsEmpty, value)); 5075 5137 NEXT_OPCODE(op_is_empty); 5076 5138 } 5077 5139 case op_is_undefined: { 5078 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5079 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value)); 5140 auto bytecode = currentInstruction->as<OpIsUndefined>(); 5141 Node* value = get(bytecode.operand); 5142 set(bytecode.dst, addToGraph(IsUndefined, value)); 5080 5143 NEXT_OPCODE(op_is_undefined); 5081 5144 } 5082 5145 5083 5146 case op_is_boolean: { 5084 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5085 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value)); 5147 auto bytecode = currentInstruction->as<OpIsBoolean>(); 5148 Node* value = get(bytecode.operand); 5149 set(bytecode.dst, addToGraph(IsBoolean, value)); 5086 5150 NEXT_OPCODE(op_is_boolean); 5087 5151 } 5088 5152 5089 5153 case op_is_number: { 5090 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5091 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value)); 5154 auto bytecode = currentInstruction->as<OpIsNumber>(); 5155 Node* value = get(bytecode.operand); 5156 set(bytecode.dst, addToGraph(IsNumber, value)); 5092 5157 NEXT_OPCODE(op_is_number); 5093 5158 } 5094 5159 5095 5160 case op_is_cell_with_type: { 5096 JSType type = static_cast<JSType>(currentInstruction[3].u.operand);5097 Node* value = get( VirtualRegister(currentInstruction[2].u.operand));5098 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsCellWithType, OpInfo(type), value));5161 auto bytecode = currentInstruction->as<OpIsCellWithType>(); 5162 Node* value = get(bytecode.operand); 5163 set(bytecode.dst, addToGraph(IsCellWithType, OpInfo(bytecode.type), value)); 5099 5164 NEXT_OPCODE(op_is_cell_with_type); 5100 5165 } 5101 5166 5102 5167 case op_is_object: { 5103 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5104 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value)); 5168 auto bytecode = currentInstruction->as<OpIsObject>(); 5169 Node* value = get(bytecode.operand); 5170 set(bytecode.dst, addToGraph(IsObject, value)); 5105 5171 NEXT_OPCODE(op_is_object); 5106 5172 } 5107 5173 5108 5174 case op_is_object_or_null: { 5109 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5110 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObjectOrNull, value)); 5175 auto bytecode = currentInstruction->as<OpIsObjectOrNull>(); 5176 Node* value = get(bytecode.operand); 5177 set(bytecode.dst, addToGraph(IsObjectOrNull, value)); 5111 5178 NEXT_OPCODE(op_is_object_or_null); 5112 5179 } 5113 5180 5114 5181 case op_is_function: { 5115 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5116 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value)); 5182 auto bytecode = currentInstruction->as<OpIsFunction>(); 5183 Node* value = get(bytecode.operand); 5184 set(bytecode.dst, addToGraph(IsFunction, value)); 5117 5185 NEXT_OPCODE(op_is_function); 5118 5186 } 5119 5187 5120 5188 case op_not: { 5121 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5122 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value)); 5189 auto bytecode = currentInstruction->as<OpNot>(); 5190 Node* value = get(bytecode.operand); 5191 set(bytecode.dst, addToGraph(LogicalNot, value)); 5123 5192 NEXT_OPCODE(op_not); 5124 5193 } 5125 5194 5126 5195 case op_to_primitive: { 5127 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5128 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value)); 5196 auto bytecode = currentInstruction->as<OpToPrimitive>(); 5197 Node* value = get(bytecode.src); 5198 set(bytecode.dst, addToGraph(ToPrimitive, value)); 5129 5199 NEXT_OPCODE(op_to_primitive); 5130 5200 } 5131 5201 5132 5202 case op_strcat: { 5133 int startOperand = currentInstruction[2].u.operand; 5134 int numOperands = currentInstruction[3].u.operand; 5203 auto bytecode = currentInstruction->as<OpStrcat>(); 5204 int startOperand = bytecode.src.offset(); 5205 int numOperands = bytecode.count; 5135 5206 #if CPU(X86) 5136 5207 // X86 doesn't have enough registers to compile MakeRope with three arguments. The … … 5157 5228 operands[indexInOperands++] = get(VirtualRegister(startOperand - operandIdx)); 5158 5229 } 5159 set(VirtualRegister(currentInstruction[1].u.operand), 5160 addToGraph(StrCat, operands[0], operands[1], operands[2])); 5230 set(bytecode.dst, addToGraph(StrCat, operands[0], operands[1], operands[2])); 5161 5231 NEXT_OPCODE(op_strcat); 5162 5232 } 5163 5233 5164 5234 case op_less: { 5165 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5166 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5167 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2)); 5235 auto bytecode = currentInstruction->as<OpLess>(); 5236 Node* op1 = get(bytecode.lhs); 5237 Node* op2 = get(bytecode.rhs); 5238 set(bytecode.dst, addToGraph(CompareLess, op1, op2)); 5168 5239 NEXT_OPCODE(op_less); 5169 5240 } 5170 5241 5171 5242 case op_lesseq: { 5172 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5173 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5174 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2)); 5243 auto bytecode = currentInstruction->as<OpLesseq>(); 5244 Node* op1 = get(bytecode.lhs); 5245 Node* op2 = get(bytecode.rhs); 5246 set(bytecode.dst, addToGraph(CompareLessEq, op1, op2)); 5175 5247 NEXT_OPCODE(op_lesseq); 5176 5248 } 5177 5249 5178 5250 case op_greater: { 5179 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5180 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5181 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2)); 5251 auto bytecode = currentInstruction->as<OpGreater>(); 5252 Node* op1 = get(bytecode.lhs); 5253 Node* op2 = get(bytecode.rhs); 5254 set(bytecode.dst, addToGraph(CompareGreater, op1, op2)); 5182 5255 NEXT_OPCODE(op_greater); 5183 5256 } 5184 5257 5185 5258 case op_greatereq: { 5186 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5187 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5188 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2)); 5259 auto bytecode = currentInstruction->as<OpGreatereq>(); 5260 Node* op1 = get(bytecode.lhs); 5261 Node* op2 = get(bytecode.rhs); 5262 set(bytecode.dst, addToGraph(CompareGreaterEq, op1, op2)); 5189 5263 NEXT_OPCODE(op_greatereq); 5190 5264 } 5191 5265 5192 5266 case op_below: { 5193 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5194 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5195 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelow, op1, op2)); 5267 auto bytecode = currentInstruction->as<OpBelow>(); 5268 Node* op1 = get(bytecode.lhs); 5269 Node* op2 = get(bytecode.rhs); 5270 set(bytecode.dst, addToGraph(CompareBelow, op1, op2)); 5196 5271 NEXT_OPCODE(op_below); 5197 5272 } 5198 5273 5199 5274 case op_beloweq: { 5200 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5201 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5202 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareBelowEq, op1, op2)); 5275 auto bytecode = currentInstruction->as<OpBeloweq>(); 5276 Node* op1 = get(bytecode.lhs); 5277 Node* op2 = get(bytecode.rhs); 5278 set(bytecode.dst, addToGraph(CompareBelowEq, op1, op2)); 5203 5279 NEXT_OPCODE(op_beloweq); 5204 5280 } 5205 5281 5206 5282 case op_eq: { 5207 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5208 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5209 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2)); 5283 auto bytecode = currentInstruction->as<OpEq>(); 5284 Node* op1 = get(bytecode.lhs); 5285 Node* op2 = get(bytecode.rhs); 5286 set(bytecode.dst, addToGraph(CompareEq, op1, op2)); 5210 5287 NEXT_OPCODE(op_eq); 5211 5288 } 5212 5289 5213 5290 case op_eq_null: { 5214 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5291 auto bytecode = currentInstruction->as<OpEqNull>(); 5292 Node* value = get(bytecode.operand); 5215 5293 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); 5216 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, value, nullConstant));5294 set(bytecode.dst, addToGraph(CompareEq, value, nullConstant)); 5217 5295 NEXT_OPCODE(op_eq_null); 5218 5296 } 5219 5297 5220 5298 case op_stricteq: { 5221 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5222 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5223 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2)); 5299 auto bytecode = currentInstruction->as<OpStricteq>(); 5300 Node* op1 = get(bytecode.lhs); 5301 Node* op2 = get(bytecode.rhs); 5302 set(bytecode.dst, addToGraph(CompareStrictEq, op1, op2)); 5224 5303 NEXT_OPCODE(op_stricteq); 5225 5304 } 5226 5305 5227 5306 case op_neq: { 5228 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5229 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5230 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); 5307 auto bytecode = currentInstruction->as<OpNeq>(); 5308 Node* op1 = get(bytecode.lhs); 5309 Node* op2 = get(bytecode.rhs); 5310 set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); 5231 5311 NEXT_OPCODE(op_neq); 5232 5312 } 5233 5313 5234 5314 case op_neq_null: { 5235 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 5315 auto bytecode = currentInstruction->as<OpNeqNull>(); 5316 Node* value = get(bytecode.operand); 5236 5317 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); 5237 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant)));5318 set(bytecode.dst, addToGraph(LogicalNot, addToGraph(CompareEq, value, nullConstant))); 5238 5319 NEXT_OPCODE(op_neq_null); 5239 5320 } 5240 5321 5241 5322 case op_nstricteq: { 5242 Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); 5243 Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); 5323 auto bytecode = currentInstruction->as<OpNstricteq>(); 5324 Node* op1 = get(bytecode.lhs); 5325 Node* op2 = get(bytecode.rhs); 5244 5326 Node* invertedResult; 5245 5327 invertedResult = addToGraph(CompareStrictEq, op1, op2); 5246 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult));5328 set(bytecode.dst, addToGraph(LogicalNot, invertedResult)); 5247 5329 NEXT_OPCODE(op_nstricteq); 5248 5330 } … … 5251 5333 5252 5334 case op_get_by_val: { 5335 auto bytecode = currentInstruction->as<OpGetByVal>(); 5253 5336 SpeculatedType prediction = getPredictionWithoutOSRExit(); 5254 5337 5255 Node* base = get( VirtualRegister(currentInstruction[2].u.operand));5256 Node* property = get( VirtualRegister(currentInstruction[3].u.operand));5338 Node* base = get(bytecode.base); 5339 Node* property = get(bytecode.property); 5257 5340 bool compiledAsGetById = false; 5258 5341 GetByIdStatus getByIdStatus; … … 5288 5371 5289 5372 if (compiledAsGetById) 5290 handleGetById( currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, OPCODE_LENGTH(op_get_by_val));5373 handleGetById(bytecode.dst, prediction, base, identifierNumber, getByIdStatus, AccessType::Get, currentInstruction->size()); 5291 5374 else { 5292 ArrayMode arrayMode = getArrayMode( arrayProfileFor<OpGetByValShape>(currentInstruction), Array::Read);5375 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).arrayProfile, Array::Read); 5293 5376 // FIXME: We could consider making this not vararg, since it only uses three child 5294 5377 // slots. … … 5299 5382 Node* getByVal = addToGraph(Node::VarArg, GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction)); 5300 5383 m_exitOK = false; // GetByVal must be treated as if it clobbers exit state, since FixupPhase may make it generic. 5301 set( VirtualRegister(currentInstruction[1].u.operand), getByVal);5384 set(bytecode.dst, getByVal); 5302 5385 } 5303 5386 … … 5306 5389 5307 5390 case op_get_by_val_with_this: { 5391 auto bytecode = currentInstruction->as<OpGetByValWithThis>(); 5308 5392 SpeculatedType prediction = getPrediction(); 5309 5393 5310 Node* base = get( VirtualRegister(currentInstruction[2].u.operand));5311 Node* thisValue = get( VirtualRegister(currentInstruction[3].u.operand));5312 Node* property = get( VirtualRegister(currentInstruction[4].u.operand));5394 Node* base = get(bytecode.base); 5395 Node* thisValue = get(bytecode.thisValue); 5396 Node* property = get(bytecode.property); 5313 5397 Node* getByValWithThis = addToGraph(GetByValWithThis, OpInfo(), OpInfo(prediction), base, thisValue, property); 5314 set( VirtualRegister(currentInstruction[1].u.operand), getByValWithThis);5398 set(bytecode.dst, getByValWithThis); 5315 5399 5316 5400 NEXT_OPCODE(op_get_by_val_with_this); … … 5318 5402 5319 5403 case op_put_by_val_direct: 5404 handlePutByVal(currentInstruction->as<OpPutByValDirect>(), currentInstruction->size()); 5405 NEXT_OPCODE(op_put_by_val_direct); 5406 5320 5407 case op_put_by_val: { 5321 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5322 Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); 5323 Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); 5324 bool isDirect = opcodeID == op_put_by_val_direct; 5325 bool compiledAsPutById = false; 5326 { 5327 unsigned identifierNumber = std::numeric_limits<unsigned>::max(); 5328 PutByIdStatus putByIdStatus; 5329 { 5330 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); 5331 ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo; 5332 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. 5333 // At that time, there is no information. 5334 if (byValInfo 5335 && byValInfo->stubInfo 5336 && !byValInfo->tookSlowPath 5337 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) 5338 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) 5339 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { 5340 compiledAsPutById = true; 5341 identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); 5342 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; 5343 5344 if (Symbol* symbol = byValInfo->cachedSymbol.get()) { 5345 FrozenValue* frozen = m_graph.freezeStrong(symbol); 5346 addToGraph(CheckCell, OpInfo(frozen), property); 5347 } else { 5348 ASSERT(!uid->isSymbol()); 5349 addToGraph(CheckStringIdent, OpInfo(uid), property); 5350 } 5351 5352 putByIdStatus = PutByIdStatus::computeForStubInfo( 5353 locker, m_inlineStackTop->m_profiledBlock, 5354 byValInfo->stubInfo, currentCodeOrigin(), uid); 5355 5356 } 5357 } 5358 5359 if (compiledAsPutById) 5360 handlePutById(base, identifierNumber, value, putByIdStatus, isDirect); 5361 } 5362 5363 if (!compiledAsPutById) { 5364 ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpPutByValShape>(currentInstruction), Array::Write); 5365 5366 addVarArgChild(base); 5367 addVarArgChild(property); 5368 addVarArgChild(value); 5369 addVarArgChild(0); // Leave room for property storage. 5370 addVarArgChild(0); // Leave room for length. 5371 addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); 5372 } 5373 5408 handlePutByVal(currentInstruction->as<OpPutByVal>(), currentInstruction->size()); 5374 5409 NEXT_OPCODE(op_put_by_val); 5375 5410 } 5376 5411 5377 5412 case op_put_by_val_with_this: { 5378 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5379 Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand)); 5380 Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); 5381 Node* value = get(VirtualRegister(currentInstruction[4].u.operand)); 5413 auto bytecode = currentInstruction->as<OpPutByValWithThis>(); 5414 Node* base = get(bytecode.base); 5415 Node* thisValue = get(bytecode.thisValue); 5416 Node* property = get(bytecode.property); 5417 Node* value = get(bytecode.value); 5382 5418 5383 5419 addVarArgChild(base); … … 5391 5427 5392 5428 case op_define_data_property: { 5393 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5394 Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); 5395 Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); 5396 Node* attributes = get(VirtualRegister(currentInstruction[4].u.operand)); 5429 auto bytecode = currentInstruction->as<OpDefineDataProperty>(); 5430 Node* base = get(bytecode.base); 5431 Node* property = get(bytecode.property); 5432 Node* value = get(bytecode.value); 5433 Node* attributes = get(bytecode.attributes); 5397 5434 5398 5435 addVarArgChild(base); … … 5406 5443 5407 5444 case op_define_accessor_property: { 5408 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5409 Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); 5410 Node* getter = get(VirtualRegister(currentInstruction[3].u.operand)); 5411 Node* setter = get(VirtualRegister(currentInstruction[4].u.operand)); 5412 Node* attributes = get(VirtualRegister(currentInstruction[5].u.operand)); 5445 auto bytecode = currentInstruction->as<OpDefineAccessorProperty>(); 5446 Node* base = get(bytecode.base); 5447 Node* property = get(bytecode.property); 5448 Node* getter = get(bytecode.getter); 5449 Node* setter = get(bytecode.setter); 5450 Node* attributes = get(bytecode.attributes); 5413 5451 5414 5452 addVarArgChild(base); … … 5422 5460 } 5423 5461 5424 case op_get_by_id_direct: 5425 case op_try_get_by_id: 5426 case op_get_by_id: 5427 case op_get_by_id_proto_load: 5428 case op_get_by_id_unset: 5429 case op_get_array_length: { 5430 SpeculatedType prediction = getPrediction(); 5431 5432 Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); 5433 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; 5434 5435 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; 5436 GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( 5437 m_inlineStackTop->m_profiledBlock, 5438 m_inlineStackTop->m_baselineMap, m_icContextStack, 5439 currentCodeOrigin(), uid); 5440 5441 AccessType type = AccessType::Get; 5442 unsigned opcodeLength = OPCODE_LENGTH(op_get_by_id); 5443 if (opcodeID == op_try_get_by_id) { 5444 type = AccessType::TryGet; 5445 opcodeLength = OPCODE_LENGTH(op_try_get_by_id); 5446 } else if (opcodeID == op_get_by_id_direct) { 5447 type = AccessType::GetDirect; 5448 opcodeLength = OPCODE_LENGTH(op_get_by_id_direct); 5449 } 5450 5451 handleGetById( 5452 currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus, type, opcodeLength); 5453 5454 // Opcode's length is different from others in try and direct cases. 5455 if (opcodeID == op_try_get_by_id) 5456 NEXT_OPCODE(op_try_get_by_id); 5457 else if (opcodeID == op_get_by_id_direct) 5458 NEXT_OPCODE(op_get_by_id_direct); 5459 else 5460 NEXT_OPCODE(op_get_by_id); 5462 case op_get_by_id_direct: { 5463 parseGetById<OpGetByIdDirect>(currentInstruction); 5464 NEXT_OPCODE(op_get_by_id_direct); 5465 } 5466 case op_try_get_by_id: { 5467 parseGetById<OpTryGetById>(currentInstruction); 5468 NEXT_OPCODE(op_try_get_by_id); 5469 } 5470 case op_get_by_id: { 5471 parseGetById<OpGetById>(currentInstruction); 5472 NEXT_OPCODE(op_get_by_id); 5461 5473 } 5462 5474 case op_get_by_id_with_this: { 5463 5475 SpeculatedType prediction = getPrediction(); 5464 5476 5465 Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); 5466 Node* thisValue = get(VirtualRegister(currentInstruction[3].u.operand)); 5467 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[4].u.operand]; 5468 5469 set(VirtualRegister(currentInstruction[1].u.operand), 5477 auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); 5478 Node* base = get(bytecode.base); 5479 Node* thisValue = get(bytecode.thisValue); 5480 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 5481 5482 set(bytecode.dst, 5470 5483 addToGraph(GetByIdWithThis, OpInfo(identifierNumber), OpInfo(prediction), base, thisValue)); 5471 5484 … … 5473 5486 } 5474 5487 case op_put_by_id: { 5475 Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); 5476 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5477 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; 5478 bool direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect; 5488 auto bytecode = currentInstruction->as<OpPutById>(); 5489 Node* value = get(bytecode.value); 5490 Node* base = get(bytecode.base); 5491 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 5492 bool direct = bytecode.metadata(codeBlock).flags & PutByIdIsDirect; 5479 5493 5480 5494 PutByIdStatus putByIdStatus = PutByIdStatus::computeFor( … … 5483 5497 currentCodeOrigin(), m_graph.identifiers()[identifierNumber]); 5484 5498 5485 handlePutById(base, identifierNumber, value, putByIdStatus, direct );5499 handlePutById(base, identifierNumber, value, putByIdStatus, direct, currentInstruction->size()); 5486 5500 NEXT_OPCODE(op_put_by_id); 5487 5501 } 5488 5502 5489 5503 case op_put_by_id_with_this: { 5490 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5491 Node* thisValue = get(VirtualRegister(currentInstruction[2].u.operand)); 5492 Node* value = get(VirtualRegister(currentInstruction[4].u.operand)); 5493 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; 5504 auto bytecode = currentInstruction->as<OpPutByIdWithThis>(); 5505 Node* base = get(bytecode.base); 5506 Node* thisValue = get(bytecode.thisValue); 5507 Node* value = get(bytecode.value); 5508 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 5494 5509 5495 5510 addToGraph(PutByIdWithThis, OpInfo(identifierNumber), base, thisValue, value); … … 5498 5513 5499 5514 case op_put_getter_by_id: 5515 handlePutAccessorById(PutGetterById, currentInstruction->as<OpPutGetterById>()); 5516 NEXT_OPCODE(op_put_getter_by_id); 5500 5517 case op_put_setter_by_id: { 5501 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5502 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; 5503 unsigned attributes = currentInstruction[3].u.operand; 5504 Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); 5505 NodeType op = (opcodeID == op_put_getter_by_id) ? PutGetterById : PutSetterById; 5506 addToGraph(op, OpInfo(identifierNumber), OpInfo(attributes), base, accessor); 5507 NEXT_OPCODE(op_put_getter_by_id); 5518 handlePutAccessorById(PutSetterById, currentInstruction->as<OpPutSetterById>()); 5519 NEXT_OPCODE(op_put_setter_by_id); 5508 5520 } 5509 5521 5510 5522 case op_put_getter_setter_by_id: { 5511 Node* base = get(VirtualRegister(currentInstruction[1].u.operand));5512 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];5513 unsigned attributes = currentInstruction[3].u.operand;5514 Node* getter = get( VirtualRegister(currentInstruction[4].u.operand));5515 Node* setter = get( VirtualRegister(currentInstruction[5].u.operand));5516 addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo( attributes), base, getter, setter);5523 auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); 5524 Node* base = get(bytecode.base); 5525 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 5526 Node* getter = get(bytecode.getter); 5527 Node* setter = get(bytecode.setter); 5528 addToGraph(PutGetterSetterById, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, getter, setter); 5517 5529 NEXT_OPCODE(op_put_getter_setter_by_id); 5518 5530 } 5519 5531 5520 5532 case op_put_getter_by_val: 5533 handlePutAccessorByVal(PutGetterByVal, currentInstruction->as<OpPutGetterByVal>()); 5534 NEXT_OPCODE(op_put_getter_by_val); 5521 5535 case op_put_setter_by_val: { 5522 Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); 5523 Node* subscript = get(VirtualRegister(currentInstruction[2].u.operand)); 5524 unsigned attributes = currentInstruction[3].u.operand; 5525 Node* accessor = get(VirtualRegister(currentInstruction[4].u.operand)); 5526 NodeType op = (opcodeID == op_put_getter_by_val) ? PutGetterByVal : PutSetterByVal; 5527 addToGraph(op, OpInfo(attributes), base, subscript, accessor); 5528 NEXT_OPCODE(op_put_getter_by_val); 5536 handlePutAccessorByVal(PutSetterByVal, currentInstruction->as<OpPutSetterByVal>()); 5537 NEXT_OPCODE(op_put_setter_by_val); 5529 5538 } 5530 5539 5531 5540 case op_del_by_id: { 5532 Node* base = get(VirtualRegister(currentInstruction[2].u.operand));5533 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand];5534 set(VirtualRegister(currentInstruction[1].u.operand),5535 5541 auto bytecode = currentInstruction->as<OpDelById>(); 5542 Node* base = get(bytecode.base); 5543 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 5544 set(bytecode.dst, addToGraph(DeleteById, OpInfo(identifierNumber), base)); 5536 5545 NEXT_OPCODE(op_del_by_id); 5537 5546 } 5538 5547 5539 5548 case op_del_by_val: { 5540 int dst = currentInstruction[1].u.operand;5541 Node* base = get( VirtualRegister(currentInstruction[2].u.operand));5542 Node* key = get( VirtualRegister(currentInstruction[3].u.operand));5543 set( VirtualRegister(dst), addToGraph(DeleteByVal, base, key));5549 auto bytecode = currentInstruction->as<OpDelByVal>(); 5550 Node* base = get(bytecode.base); 5551 Node* key = get(bytecode.property); 5552 set(bytecode.dst, addToGraph(DeleteByVal, base, key)); 5544 5553 NEXT_OPCODE(op_del_by_val); 5545 5554 } 5546 5555 5547 5556 case op_profile_type: { 5548 Node* valueToProfile = get(VirtualRegister(currentInstruction[1].u.operand)); 5549 addToGraph(ProfileType, OpInfo(currentInstruction[2].u.location), valueToProfile); 5557 auto bytecode = currentInstruction->as<OpProfileType>(); 5558 auto& metadata = bytecode.metadata(codeBlock); 5559 Node* valueToProfile = get(bytecode.target); 5560 addToGraph(ProfileType, OpInfo(metadata.typeLocation), valueToProfile); 5550 5561 NEXT_OPCODE(op_profile_type); 5551 5562 } 5552 5563 5553 5564 case op_profile_control_flow: { 5554 BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; 5565 auto bytecode = currentInstruction->as<OpProfileControlFlow>(); 5566 BasicBlockLocation* basicBlockLocation = bytecode.metadata(codeBlock).basicBlockLocation; 5555 5567 addToGraph(ProfileControlFlow, OpInfo(basicBlockLocation)); 5556 5568 NEXT_OPCODE(op_profile_control_flow); … … 5561 5573 case op_jmp: { 5562 5574 ASSERT(!m_currentBlock->terminal()); 5563 int relativeOffset = currentInstruction[1].u.operand; 5575 auto bytecode = currentInstruction->as<OpJmp>(); 5576 int relativeOffset = jumpTarget(bytecode.target); 5564 5577 addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); 5565 5578 if (relativeOffset <= 0) … … 5569 5582 5570 5583 case op_jtrue: { 5571 unsigned relativeOffset = currentInstruction[2].u.operand; 5572 Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); 5573 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition); 5584 auto bytecode = currentInstruction->as<OpJtrue>(); 5585 unsigned relativeOffset = jumpTarget(bytecode.target); 5586 Node* condition = get(bytecode.condition); 5587 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5574 5588 LAST_OPCODE(op_jtrue); 5575 5589 } 5576 5590 5577 5591 case op_jfalse: { 5578 unsigned relativeOffset = currentInstruction[2].u.operand; 5579 Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); 5580 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition); 5592 auto bytecode = currentInstruction->as<OpJfalse>(); 5593 unsigned relativeOffset = jumpTarget(bytecode.target); 5594 Node* condition = get(bytecode.condition); 5595 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5581 5596 LAST_OPCODE(op_jfalse); 5582 5597 } 5583 5598 5584 5599 case op_jeq_null: { 5585 unsigned relativeOffset = currentInstruction[2].u.operand; 5586 Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); 5600 auto bytecode = currentInstruction->as<OpJeqNull>(); 5601 unsigned relativeOffset = jumpTarget(bytecode.target); 5602 Node* value = get(bytecode.value); 5587 5603 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); 5588 5604 Node* condition = addToGraph(CompareEq, value, nullConstant); 5589 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition);5605 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5590 5606 LAST_OPCODE(op_jeq_null); 5591 5607 } 5592 5608 5593 5609 case op_jneq_null: { 5594 unsigned relativeOffset = currentInstruction[2].u.operand; 5595 Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); 5610 auto bytecode = currentInstruction->as<OpJneqNull>(); 5611 unsigned relativeOffset = jumpTarget(bytecode.target); 5612 Node* value = get(bytecode.value); 5596 5613 Node* nullConstant = addToGraph(JSConstant, OpInfo(m_constantNull)); 5597 5614 Node* condition = addToGraph(CompareEq, value, nullConstant); 5598 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition);5615 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5599 5616 LAST_OPCODE(op_jneq_null); 5600 5617 } 5601 5618 5602 5619 case op_jless: { 5603 unsigned relativeOffset = currentInstruction[3].u.operand; 5604 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5605 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5620 auto bytecode = currentInstruction->as<OpJless>(); 5621 unsigned relativeOffset = jumpTarget(bytecode.target); 5622 Node* op1 = get(bytecode.lhs); 5623 Node* op2 = get(bytecode.rhs); 5606 5624 Node* condition = addToGraph(CompareLess, op1, op2); 5607 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition);5625 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5608 5626 LAST_OPCODE(op_jless); 5609 5627 } 5610 5628 5611 5629 case op_jlesseq: { 5612 unsigned relativeOffset = currentInstruction[3].u.operand; 5613 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5614 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5630 auto bytecode = currentInstruction->as<OpJlesseq>(); 5631 unsigned relativeOffset = jumpTarget(bytecode.target); 5632 Node* op1 = get(bytecode.lhs); 5633 Node* op2 = get(bytecode.rhs); 5615 5634 Node* condition = addToGraph(CompareLessEq, op1, op2); 5616 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition);5635 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5617 5636 LAST_OPCODE(op_jlesseq); 5618 5637 } 5619 5638 5620 5639 case op_jgreater: { 5621 unsigned relativeOffset = currentInstruction[3].u.operand; 5622 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5623 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5640 auto bytecode = currentInstruction->as<OpJgreater>(); 5641 unsigned relativeOffset = jumpTarget(bytecode.target); 5642 Node* op1 = get(bytecode.lhs); 5643 Node* op2 = get(bytecode.rhs); 5624 5644 Node* condition = addToGraph(CompareGreater, op1, op2); 5625 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition);5645 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5626 5646 LAST_OPCODE(op_jgreater); 5627 5647 } 5628 5648 5629 5649 case op_jgreatereq: { 5630 unsigned relativeOffset = currentInstruction[3].u.operand; 5631 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5632 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5650 auto bytecode = currentInstruction->as<OpJgreatereq>(); 5651 unsigned relativeOffset = jumpTarget(bytecode.target); 5652 Node* op1 = get(bytecode.lhs); 5653 Node* op2 = get(bytecode.rhs); 5633 5654 Node* condition = addToGraph(CompareGreaterEq, op1, op2); 5634 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition);5655 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5635 5656 LAST_OPCODE(op_jgreatereq); 5636 5657 } 5637 5658 5638 5659 case op_jeq: { 5639 unsigned relativeOffset = currentInstruction[3].u.operand; 5640 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5641 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5660 auto bytecode = currentInstruction->as<OpJeq>(); 5661 unsigned relativeOffset = jumpTarget(bytecode.target); 5662 Node* op1 = get(bytecode.lhs); 5663 Node* op2 = get(bytecode.rhs); 5642 5664 Node* condition = addToGraph(CompareEq, op1, op2); 5643 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq))), condition);5665 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5644 5666 LAST_OPCODE(op_jeq); 5645 5667 } 5646 5668 5647 5669 case op_jstricteq: { 5648 unsigned relativeOffset = currentInstruction[3].u.operand; 5649 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5650 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5670 auto bytecode = currentInstruction->as<OpJstricteq>(); 5671 unsigned relativeOffset = jumpTarget(bytecode.target); 5672 Node* op1 = get(bytecode.lhs); 5673 Node* op2 = get(bytecode.rhs); 5651 5674 Node* condition = addToGraph(CompareStrictEq, op1, op2); 5652 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jstricteq))), condition);5675 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5653 5676 LAST_OPCODE(op_jstricteq); 5654 5677 } 5655 5678 5656 5679 case op_jnless: { 5657 unsigned relativeOffset = currentInstruction[3].u.operand; 5658 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5659 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5680 auto bytecode = currentInstruction->as<OpJnless>(); 5681 unsigned relativeOffset = jumpTarget(bytecode.target); 5682 Node* op1 = get(bytecode.lhs); 5683 Node* op2 = get(bytecode.rhs); 5660 5684 Node* condition = addToGraph(CompareLess, op1, op2); 5661 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition);5685 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5662 5686 LAST_OPCODE(op_jnless); 5663 5687 } 5664 5688 5665 5689 case op_jnlesseq: { 5666 unsigned relativeOffset = currentInstruction[3].u.operand; 5667 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5668 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5690 auto bytecode = currentInstruction->as<OpJnlesseq>(); 5691 unsigned relativeOffset = jumpTarget(bytecode.target); 5692 Node* op1 = get(bytecode.lhs); 5693 Node* op2 = get(bytecode.rhs); 5669 5694 Node* condition = addToGraph(CompareLessEq, op1, op2); 5670 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition);5695 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5671 5696 LAST_OPCODE(op_jnlesseq); 5672 5697 } 5673 5698 5674 5699 case op_jngreater: { 5675 unsigned relativeOffset = currentInstruction[3].u.operand; 5676 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5677 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5700 auto bytecode = currentInstruction->as<OpJngreater>(); 5701 unsigned relativeOffset = jumpTarget(bytecode.target); 5702 Node* op1 = get(bytecode.lhs); 5703 Node* op2 = get(bytecode.rhs); 5678 5704 Node* condition = addToGraph(CompareGreater, op1, op2); 5679 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition);5705 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5680 5706 LAST_OPCODE(op_jngreater); 5681 5707 } 5682 5708 5683 5709 case op_jngreatereq: { 5684 unsigned relativeOffset = currentInstruction[3].u.operand; 5685 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5686 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5710 auto bytecode = currentInstruction->as<OpJngreatereq>(); 5711 unsigned relativeOffset = jumpTarget(bytecode.target); 5712 Node* op1 = get(bytecode.lhs); 5713 Node* op2 = get(bytecode.rhs); 5687 5714 Node* condition = addToGraph(CompareGreaterEq, op1, op2); 5688 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition);5715 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5689 5716 LAST_OPCODE(op_jngreatereq); 5690 5717 } 5691 5718 5692 5719 case op_jneq: { 5693 unsigned relativeOffset = currentInstruction[3].u.operand; 5694 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5695 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5720 auto bytecode = currentInstruction->as<OpJneq>(); 5721 unsigned relativeOffset = jumpTarget(bytecode.target); 5722 Node* op1 = get(bytecode.lhs); 5723 Node* op2 = get(bytecode.rhs); 5696 5724 Node* condition = addToGraph(CompareEq, op1, op2); 5697 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq), m_currentIndex + relativeOffset)), condition);5725 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5698 5726 LAST_OPCODE(op_jneq); 5699 5727 } 5700 5728 5701 5729 case op_jnstricteq: { 5702 unsigned relativeOffset = currentInstruction[3].u.operand; 5703 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5704 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5730 auto bytecode = currentInstruction->as<OpJnstricteq>(); 5731 unsigned relativeOffset = jumpTarget(bytecode.target); 5732 Node* op1 = get(bytecode.lhs); 5733 Node* op2 = get(bytecode.rhs); 5705 5734 Node* condition = addToGraph(CompareStrictEq, op1, op2); 5706 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnstricteq), m_currentIndex + relativeOffset)), condition);5735 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 5707 5736 LAST_OPCODE(op_jnstricteq); 5708 5737 } 5709 5738 5710 5739 case op_jbelow: { 5711 unsigned relativeOffset = currentInstruction[3].u.operand; 5712 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5713 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5740 auto bytecode = currentInstruction->as<OpJbelow>(); 5741 unsigned relativeOffset = jumpTarget(bytecode.target); 5742 Node* op1 = get(bytecode.lhs); 5743 Node* op2 = get(bytecode.rhs); 5714 5744 Node* condition = addToGraph(CompareBelow, op1, op2); 5715 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbelow))), condition);5745 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5716 5746 LAST_OPCODE(op_jbelow); 5717 5747 } 5718 5748 5719 5749 case op_jbeloweq: { 5720 unsigned relativeOffset = currentInstruction[3].u.operand; 5721 Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); 5722 Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); 5750 auto bytecode = currentInstruction->as<OpJbeloweq>(); 5751 unsigned relativeOffset = jumpTarget(bytecode.target); 5752 Node* op1 = get(bytecode.lhs); 5753 Node* op2 = get(bytecode.rhs); 5723 5754 Node* condition = addToGraph(CompareBelowEq, op1, op2); 5724 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jbeloweq))), condition);5755 addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + currentInstruction->size())), condition); 5725 5756 LAST_OPCODE(op_jbeloweq); 5726 5757 } 5727 5758 5728 5759 case op_switch_imm: { 5760 auto bytecode = currentInstruction->as<OpSwitchImm>(); 5729 5761 SwitchData& data = *m_graph.m_switchData.add(); 5730 5762 data.kind = SwitchImm; 5731 data.switchTableIndex = m_inlineStackTop->m_switchRemap[ currentInstruction[1].u.operand];5732 data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);5763 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex]; 5764 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.defaultOffset)); 5733 5765 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); 5734 5766 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { … … 5740 5772 data.cases.append(SwitchCase::withBytecodeIndex(m_graph.freeze(jsNumber(static_cast<int32_t>(table.min + i))), target)); 5741 5773 } 5742 addToGraph(Switch, OpInfo(&data), get( VirtualRegister(currentInstruction[3].u.operand)));5774 addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); 5743 5775 flushIfTerminal(data); 5744 5776 LAST_OPCODE(op_switch_imm); … … 5746 5778 5747 5779 case op_switch_char: { 5780 auto bytecode = currentInstruction->as<OpSwitchChar>(); 5748 5781 SwitchData& data = *m_graph.m_switchData.add(); 5749 5782 data.kind = SwitchChar; 5750 data.switchTableIndex = m_inlineStackTop->m_switchRemap[ currentInstruction[1].u.operand];5751 data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);5783 data.switchTableIndex = m_inlineStackTop->m_switchRemap[bytecode.tableIndex]; 5784 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.defaultOffset)); 5752 5785 SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); 5753 5786 for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { … … 5760 5793 SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target)); 5761 5794 } 5762 addToGraph(Switch, OpInfo(&data), get( VirtualRegister(currentInstruction[3].u.operand)));5795 addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); 5763 5796 flushIfTerminal(data); 5764 5797 LAST_OPCODE(op_switch_char); … … 5766 5799 5767 5800 case op_switch_string: { 5801 auto bytecode = currentInstruction->as<OpSwitchString>(); 5768 5802 SwitchData& data = *m_graph.m_switchData.add(); 5769 5803 data.kind = SwitchString; 5770 data.switchTableIndex = currentInstruction[1].u.operand;5771 data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand);5804 data.switchTableIndex = bytecode.tableIndex; 5805 data.fallThrough.setBytecodeIndex(m_currentIndex + jumpTarget(bytecode.defaultOffset)); 5772 5806 StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); 5773 5807 StringJumpTable::StringOffsetTable::iterator iter; … … 5780 5814 SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target)); 5781 5815 } 5782 addToGraph(Switch, OpInfo(&data), get( VirtualRegister(currentInstruction[3].u.operand)));5816 addToGraph(Switch, OpInfo(&data), get(bytecode.scrutinee)); 5783 5817 flushIfTerminal(data); 5784 5818 LAST_OPCODE(op_switch_string); 5785 5819 } 5786 5820 5787 case op_ret: 5821 case op_ret: { 5822 auto bytecode = currentInstruction->as<OpRet>(); 5788 5823 ASSERT(!m_currentBlock->terminal()); 5789 5824 if (!inlineCallFrame()) { 5790 5825 // Simple case: we are just producing a return 5791 addToGraph(Return, get( VirtualRegister(currentInstruction[1].u.operand)));5826 addToGraph(Return, get(bytecode.value)); 5792 5827 flushForReturn(); 5793 5828 LAST_OPCODE(op_ret); … … 5796 5831 flushForReturn(); 5797 5832 if (m_inlineStackTop->m_returnValue.isValid()) 5798 setDirect(m_inlineStackTop->m_returnValue, get( VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush);5799 5800 if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size()) {5833 setDirect(m_inlineStackTop->m_returnValue, get(bytecode.value), ImmediateSetWithFlush); 5834 5835 if (!m_inlineStackTop->m_continuationBlock && m_currentIndex + currentInstruction->size() != m_inlineStackTop->m_codeBlock->instructions().size()) { 5801 5836 // This is an early return from an inlined function and we do not have a continuation block, so we must allocate one. 5802 5837 // It is untargetable, because we do not know the appropriate index. … … 5812 5847 } 5813 5848 LAST_OPCODE_LINKED(op_ret); 5814 5849 } 5815 5850 case op_end: 5816 5851 ASSERT(!inlineCallFrame()); 5817 addToGraph(Return, get( VirtualRegister(currentInstruction[1].u.operand)));5852 addToGraph(Return, get(currentInstruction->as<OpEnd>().value)); 5818 5853 flushForReturn(); 5819 5854 LAST_OPCODE(op_end); 5820 5855 5821 5856 case op_throw: 5822 addToGraph(Throw, get( VirtualRegister(currentInstruction[1].u.operand)));5857 addToGraph(Throw, get(currentInstruction->as<OpThrow>().value)); 5823 5858 flushForTerminal(); 5824 5859 LAST_OPCODE(op_throw); 5825 5860 5826 5861 case op_throw_static_error: { 5827 uint32_t errorType = currentInstruction[2].u.unsignedValue;5828 addToGraph(ThrowStaticError, OpInfo( errorType), get(VirtualRegister(currentInstruction[1].u.operand)));5862 auto bytecode = currentInstruction->as<OpThrowStaticError>(); 5863 addToGraph(ThrowStaticError, OpInfo(bytecode.errorType), get(bytecode.message)); 5829 5864 flushForTerminal(); 5830 5865 LAST_OPCODE(op_throw_static_error); … … 5832 5867 5833 5868 case op_catch: { 5869 auto bytecode = currentInstruction->as<OpCatch>(); 5834 5870 m_graph.m_hasExceptionHandlers = true; 5835 5871 … … 5845 5881 RELEASE_ASSERT(!m_currentBlock->size() || (m_graph.compilation() && m_currentBlock->size() == 1 && m_currentBlock->at(0)->op() == CountExecution)); 5846 5882 5847 ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(currentInstruction[3].u.pointer);5883 ValueProfileAndOperandBuffer* buffer = bytecode.metadata(codeBlock).buffer; 5848 5884 5849 5885 if (!buffer) { … … 5925 5961 entrypointArguments.resize(m_numArguments); 5926 5962 5927 unsigned exitBytecodeIndex = m_currentIndex + OPCODE_LENGTH(op_catch);5963 unsigned exitBytecodeIndex = m_currentIndex + currentInstruction->size(); 5928 5964 5929 5965 for (unsigned argument = 0; argument < argumentPredictions.size(); ++argument) { … … 5952 5988 5953 5989 case op_call: 5954 handleCall (currentInstruction, Call, CallMode::Regular);5990 handleCall<OpCall>(currentInstruction, Call, CallMode::Regular); 5955 5991 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); 5956 5992 NEXT_OPCODE(op_call); … … 5958 5994 case op_tail_call: { 5959 5995 flushForReturn(); 5960 Terminality terminality = handleCall (currentInstruction, TailCall, CallMode::Tail);5996 Terminality terminality = handleCall<OpTailCall>(currentInstruction, TailCall, CallMode::Tail); 5961 5997 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); 5962 5998 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. … … 5971 6007 5972 6008 case op_construct: 5973 handleCall (currentInstruction, Construct, CallMode::Construct);6009 handleCall<OpConstruct>(currentInstruction, Construct, CallMode::Construct); 5974 6010 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleCall, which may have inlined the callee, trashed m_currentInstruction"); 5975 6011 NEXT_OPCODE(op_construct); 5976 6012 5977 6013 case op_call_varargs: { 5978 handleVarargsCall (currentInstruction, CallVarargs, CallMode::Regular);6014 handleVarargsCall<OpCallVarargs>(currentInstruction, CallVarargs, CallMode::Regular); 5979 6015 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); 5980 6016 NEXT_OPCODE(op_call_varargs); … … 5983 6019 case op_tail_call_varargs: { 5984 6020 flushForReturn(); 5985 Terminality terminality = handleVarargsCall (currentInstruction, TailCallVarargs, CallMode::Tail);6021 Terminality terminality = handleVarargsCall<OpTailCallVarargs>(currentInstruction, TailCallVarargs, CallMode::Tail); 5986 6022 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); 5987 6023 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. … … 5999 6035 noticeArgumentsUse(); 6000 6036 flushForReturn(); 6001 Terminality terminality = handleVarargsCall (currentInstruction, TailCallForwardVarargs, CallMode::Tail);6037 Terminality terminality = handleVarargsCall<OpTailCallForwardArguments>(currentInstruction, TailCallForwardVarargs, CallMode::Tail); 6002 6038 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); 6003 6039 // If the call is terminal then we should not parse any further bytecodes as the TailCall will exit the function. … … 6011 6047 6012 6048 case op_construct_varargs: { 6013 handleVarargsCall (currentInstruction, ConstructVarargs, CallMode::Construct);6049 handleVarargsCall<OpConstructVarargs>(currentInstruction, ConstructVarargs, CallMode::Construct); 6014 6050 ASSERT_WITH_MESSAGE(m_currentInstruction == currentInstruction, "handleVarargsCall, which may have inlined the callee, trashed m_currentInstruction"); 6015 6051 NEXT_OPCODE(op_construct_varargs); … … 6017 6053 6018 6054 case op_call_eval: { 6019 int result = currentInstruction[1].u.operand; 6020 int callee = currentInstruction[2].u.operand; 6021 int argumentCountIncludingThis = currentInstruction[3].u.operand; 6022 int registerOffset = -currentInstruction[4].u.operand; 6023 addCall(result, CallEval, nullptr, get(VirtualRegister(callee)), argumentCountIncludingThis, registerOffset, getPrediction()); 6055 auto bytecode = currentInstruction->as<OpCallEval>(); 6056 int registerOffset = -bytecode.argv; 6057 addCall(bytecode.dst, CallEval, nullptr, get(bytecode.callee), bytecode.argc, registerOffset, getPrediction()); 6024 6058 NEXT_OPCODE(op_call_eval); 6025 6059 } 6026 6060 6027 6061 case op_jneq_ptr: { 6028 Special::Pointer specialPointer = currentInstruction[2].u.specialPointer; 6062 auto bytecode = currentInstruction->as<OpJneqPtr>(); 6063 Special::Pointer specialPointer = bytecode.specialPointer; 6029 6064 ASSERT(pointerIsCell(specialPointer)); 6030 6065 JSCell* actualPointer = static_cast<JSCell*>( 6031 6066 actualPointerFor(m_inlineStackTop->m_codeBlock, specialPointer)); 6032 6067 FrozenValue* frozenPointer = m_graph.freeze(actualPointer); 6033 int operand = currentInstruction[1].u.operand; 6034 unsigned relativeOffset = currentInstruction[3].u.operand; 6035 Node* child = get(VirtualRegister(operand)); 6036 if (currentInstruction[4].u.operand) { 6068 unsigned relativeOffset = jumpTarget(bytecode.target); 6069 Node* child = get(bytecode.value); 6070 if (bytecode.metadata(codeBlock).hasJumped) { 6037 6071 Node* condition = addToGraph(CompareEqPtr, OpInfo(frozenPointer), child); 6038 addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr), m_currentIndex + relativeOffset)), condition);6072 addToGraph(Branch, OpInfo(branchData(m_currentIndex + currentInstruction->size(), m_currentIndex + relativeOffset)), condition); 6039 6073 LAST_OPCODE(op_jneq_ptr); 6040 6074 } … … 6044 6078 6045 6079 case op_resolve_scope: { 6046 int dst = currentInstruction[1].u.operand; 6047 ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); 6048 unsigned depth = currentInstruction[5].u.operand; 6049 int scope = currentInstruction[2].u.operand; 6050 6051 if (needsDynamicLookup(resolveType, op_resolve_scope)) { 6052 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; 6053 set(VirtualRegister(dst), addToGraph(ResolveScope, OpInfo(identifierNumber), get(VirtualRegister(scope)))); 6080 auto bytecode = currentInstruction->as<OpResolveScope>(); 6081 auto& metadata = bytecode.metadata(codeBlock); 6082 unsigned depth = metadata.localScopeDepth; 6083 6084 if (needsDynamicLookup(metadata.resolveType, op_resolve_scope)) { 6085 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var]; 6086 set(bytecode.dst, addToGraph(ResolveScope, OpInfo(identifierNumber), get(bytecode.scope))); 6054 6087 NEXT_OPCODE(op_resolve_scope); 6055 6088 } 6056 6089 6057 6090 // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints. 6058 if (needsVarInjectionChecks( resolveType))6091 if (needsVarInjectionChecks(metadata.resolveType)) 6059 6092 m_graph.watchpoints().addLazily(m_inlineStackTop->m_codeBlock->globalObject()->varInjectionWatchpoint()); 6060 6093 6061 switch ( resolveType) {6094 switch (metadata.resolveType) { 6062 6095 case GlobalProperty: 6063 6096 case GlobalVar: … … 6066 6099 case GlobalLexicalVar: 6067 6100 case GlobalLexicalVarWithVarInjectionChecks: { 6068 JSScope* constantScope = JSScope::constantScopeForCodeBlock( resolveType, m_inlineStackTop->m_codeBlock);6101 JSScope* constantScope = JSScope::constantScopeForCodeBlock(metadata.resolveType, m_inlineStackTop->m_codeBlock); 6069 6102 RELEASE_ASSERT(constantScope); 6070 RELEASE_ASSERT( static_cast<JSScope*>(currentInstruction[6].u.pointer) == constantScope);6071 set( VirtualRegister(dst), weakJSConstant(constantScope));6072 addToGraph(Phantom, get( VirtualRegister(scope)));6103 RELEASE_ASSERT(metadata.constantScope.get() == constantScope); 6104 set(bytecode.dst, weakJSConstant(constantScope)); 6105 addToGraph(Phantom, get(bytecode.scope)); 6073 6106 break; 6074 6107 } … … 6076 6109 // Since the value of the "scope" virtual register is not used in LLInt / baseline op_resolve_scope with ModuleVar, 6077 6110 // we need not to keep it alive by the Phantom node. 6078 JSModuleEnvironment* moduleEnvironment = jsCast<JSModuleEnvironment*>(currentInstruction[6].u.jsCell.get());6079 6111 // Module environment is already strongly referenced by the CodeBlock. 6080 set( VirtualRegister(dst), weakJSConstant(moduleEnvironment));6112 set(bytecode.dst, weakJSConstant(metadata.lexicalEnvironment.get())); 6081 6113 break; 6082 6114 } … … 6084 6116 case ClosureVar: 6085 6117 case ClosureVarWithVarInjectionChecks: { 6086 Node* localBase = get( VirtualRegister(scope));6118 Node* localBase = get(bytecode.scope); 6087 6119 addToGraph(Phantom, localBase); // OSR exit cannot handle resolve_scope on a DCE'd scope. 6088 6120 6089 6121 // We have various forms of constant folding here. This is necessary to avoid 6090 6122 // spurious recompiles in dead-but-foldable code. 6091 if (SymbolTable* symbolTable = currentInstruction[6].u.symbolTable.get()) {6123 if (SymbolTable* symbolTable = metadata.symbolTable.get()) { 6092 6124 InferredValue* singleton = symbolTable->singletonScope(); 6093 6125 if (JSValue value = singleton->inferredValue()) { 6094 6126 m_graph.watchpoints().addLazily(singleton); 6095 set( VirtualRegister(dst), weakJSConstant(value));6127 set(bytecode.dst, weakJSConstant(value)); 6096 6128 break; 6097 6129 } … … 6100 6132 for (unsigned n = depth; n--;) 6101 6133 scope = scope->next(); 6102 set( VirtualRegister(dst), weakJSConstant(scope));6134 set(bytecode.dst, weakJSConstant(scope)); 6103 6135 break; 6104 6136 } 6105 6137 for (unsigned n = depth; n--;) 6106 6138 localBase = addToGraph(SkipScope, localBase); 6107 set( VirtualRegister(dst), localBase);6139 set(bytecode.dst, localBase); 6108 6140 break; 6109 6141 } 6110 6142 case UnresolvedProperty: 6111 6143 case UnresolvedPropertyWithVarInjectionChecks: { 6112 addToGraph(Phantom, get( VirtualRegister(scope)));6144 addToGraph(Phantom, get(bytecode.scope)); 6113 6145 addToGraph(ForceOSRExit); 6114 set( VirtualRegister(dst), addToGraph(JSConstant, OpInfo(m_constantNull)));6146 set(bytecode.dst, addToGraph(JSConstant, OpInfo(m_constantNull))); 6115 6147 break; 6116 6148 } … … 6122 6154 } 6123 6155 case op_resolve_scope_for_hoisting_func_decl_in_eval: { 6124 int dst = currentInstruction[1].u.operand; 6125 int scope = currentInstruction[2].u.operand; 6126 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; 6127 6128 set(VirtualRegister(dst), addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(VirtualRegister(scope)))); 6156 auto bytecode = currentInstruction->as<OpResolveScopeForHoistingFuncDeclInEval>(); 6157 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 6158 6159 set(bytecode.dst, addToGraph(ResolveScopeForHoistingFuncDeclInEval, OpInfo(identifierNumber), get(bytecode.scope))); 6129 6160 6130 6161 NEXT_OPCODE(op_resolve_scope_for_hoisting_func_decl_in_eval); … … 6132 6163 6133 6164 case op_get_from_scope: { 6134 int dst = currentInstruction[1].u.operand;6135 int scope = currentInstruction[2].u.operand;6136 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[ currentInstruction[3].u.operand];6165 auto bytecode = currentInstruction->as<OpGetFromScope>(); 6166 auto metadata = bytecode.metadata(codeBlock); 6167 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.var]; 6137 6168 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; 6138 ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();6169 ResolveType resolveType = metadata.getPutInfo.resolveType(); 6139 6170 6140 6171 Structure* structure = 0; … … 6144 6175 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); 6145 6176 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) 6146 watchpoints = currentInstruction[5].u.watchpointSet;6177 watchpoints = metadata.watchpointSet; 6147 6178 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) 6148 structure = currentInstruction[5].u.structure.get();6149 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);6179 structure = metadata.structure.get(); 6180 operand = metadata.operand; 6150 6181 } 6151 6182 6152 6183 if (needsDynamicLookup(resolveType, op_get_from_scope)) { 6153 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand);6184 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, metadata.getPutInfo.operand()); 6154 6185 SpeculatedType prediction = getPrediction(); 6155 set( VirtualRegister(dst),6156 addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get( VirtualRegister(scope))));6186 set(bytecode.dst, 6187 addToGraph(GetDynamicVar, OpInfo(opInfo1), OpInfo(prediction), get(bytecode.scope))); 6157 6188 NEXT_OPCODE(op_get_from_scope); 6158 6189 } … … 6171 6202 || status.numVariants() != 1 6172 6203 || status[0].structureSet().size() != 1) { 6173 set( VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope))));6204 set(bytecode.dst, addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(bytecode.scope))); 6174 6205 break; 6175 6206 } … … 6177 6208 Node* base = weakJSConstant(globalObject); 6178 6209 Node* result = load(prediction, base, identifierNumber, status[0]); 6179 addToGraph(Phantom, get( VirtualRegister(scope)));6180 set( VirtualRegister(dst), result);6210 addToGraph(Phantom, get(bytecode.scope)); 6211 set(bytecode.dst, result); 6181 6212 break; 6182 6213 } … … 6185 6216 case GlobalLexicalVar: 6186 6217 case GlobalLexicalVarWithVarInjectionChecks: { 6187 addToGraph(Phantom, get( VirtualRegister(scope)));6218 addToGraph(Phantom, get(bytecode.scope)); 6188 6219 WatchpointSet* watchpointSet; 6189 6220 ScopeOffset offset; … … 6241 6272 if (value) { 6242 6273 m_graph.watchpoints().addLazily(watchpointSet); 6243 set( VirtualRegister(dst), weakJSConstant(value));6274 set(bytecode.dst, weakJSConstant(value)); 6244 6275 break; 6245 6276 } … … 6255 6286 if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) 6256 6287 addToGraph(CheckNotEmpty, value); 6257 set( VirtualRegister(dst), value);6288 set(bytecode.dst, value); 6258 6289 break; 6259 6290 } … … 6261 6292 case ClosureVar: 6262 6293 case ClosureVarWithVarInjectionChecks: { 6263 Node* scopeNode = get( VirtualRegister(scope));6294 Node* scopeNode = get(bytecode.scope); 6264 6295 6265 6296 // Ideally we wouldn't have to do this Phantom. But: … … 6277 6308 // would recompile. But if we can fold it here, we avoid the exit. 6278 6309 if (JSValue value = m_graph.tryGetConstantClosureVar(scopeNode, ScopeOffset(operand))) { 6279 set( VirtualRegister(dst), weakJSConstant(value));6310 set(bytecode.dst, weakJSConstant(value)); 6280 6311 break; 6281 6312 } 6282 6313 SpeculatedType prediction = getPrediction(); 6283 set( VirtualRegister(dst),6314 set(bytecode.dst, 6284 6315 addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), scopeNode)); 6285 6316 break; … … 6296 6327 6297 6328 case op_put_to_scope: { 6298 unsigned scope = currentInstruction[1].u.operand; 6299 unsigned identifierNumber = currentInstruction[2].u.operand; 6329 auto bytecode = currentInstruction->as<OpPutToScope>(); 6330 auto& metadata = bytecode.metadata(codeBlock); 6331 unsigned identifierNumber = bytecode.var; 6300 6332 if (identifierNumber != UINT_MAX) 6301 6333 identifierNumber = m_inlineStackTop->m_identifierRemap[identifierNumber]; 6302 unsigned value = currentInstruction[3].u.operand; 6303 GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand); 6304 ResolveType resolveType = getPutInfo.resolveType(); 6334 ResolveType resolveType = metadata.getPutInfo.resolveType(); 6305 6335 UniquedStringImpl* uid; 6306 6336 if (identifierNumber != UINT_MAX) … … 6315 6345 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); 6316 6346 if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) 6317 watchpoints = currentInstruction[5].u.watchpointSet;6347 watchpoints = metadata.watchpointSet; 6318 6348 else if (resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) 6319 structure = currentInstruction[5].u.structure.get();6320 operand = reinterpret_cast<uintptr_t>(currentInstruction[6].u.pointer);6349 structure = metadata.structure.get(); 6350 operand = metadata.operand; 6321 6351 } 6322 6352 … … 6325 6355 if (needsDynamicLookup(resolveType, op_put_to_scope)) { 6326 6356 ASSERT(identifierNumber != UINT_MAX); 6327 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, currentInstruction[4].u.operand);6328 addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get( VirtualRegister(scope)), get(VirtualRegister(value)));6357 uint64_t opInfo1 = makeDynamicVarOpInfo(identifierNumber, metadata.getPutInfo.operand()); 6358 addToGraph(PutDynamicVar, OpInfo(opInfo1), OpInfo(), get(bytecode.scope), get(bytecode.value)); 6329 6359 NEXT_OPCODE(op_put_to_scope); 6330 6360 } … … 6341 6371 || status[0].kind() != PutByIdVariant::Replace 6342 6372 || status[0].structure().size() != 1) { 6343 addToGraph(PutById, OpInfo(identifierNumber), get( VirtualRegister(scope)), get(VirtualRegister(value)));6373 addToGraph(PutById, OpInfo(identifierNumber), get(bytecode.scope), get(bytecode.value)); 6344 6374 break; 6345 6375 } 6346 6376 Node* base = weakJSConstant(globalObject); 6347 store(base, identifierNumber, status[0], get( VirtualRegister(value)));6377 store(base, identifierNumber, status[0], get(bytecode.value)); 6348 6378 // Keep scope alive until after put. 6349 addToGraph(Phantom, get( VirtualRegister(scope)));6379 addToGraph(Phantom, get(bytecode.scope)); 6350 6380 break; 6351 6381 } … … 6354 6384 case GlobalVar: 6355 6385 case GlobalVarWithVarInjectionChecks: { 6356 if (!isInitialization( getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {6386 if (!isInitialization(metadata.getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) { 6357 6387 SpeculatedType prediction = SpecEmpty; 6358 6388 Node* value = addToGraph(GetGlobalLexicalVariable, OpInfo(operand), OpInfo(prediction)); … … 6365 6395 ASSERT_UNUSED(entry, watchpoints == entry.watchpointSet()); 6366 6396 } 6367 Node* valueNode = get( VirtualRegister(value));6397 Node* valueNode = get(bytecode.value); 6368 6398 addToGraph(PutGlobalVariable, OpInfo(operand), weakJSConstant(scopeObject), valueNode); 6369 6399 if (watchpoints && watchpoints->state() != IsInvalidated) { … … 6372 6402 } 6373 6403 // Keep scope alive until after put. 6374 addToGraph(Phantom, get( VirtualRegister(scope)));6404 addToGraph(Phantom, get(bytecode.scope)); 6375 6405 break; 6376 6406 } … … 6378 6408 case ClosureVar: 6379 6409 case ClosureVarWithVarInjectionChecks: { 6380 Node* scopeNode = get( VirtualRegister(scope));6381 Node* valueNode = get( VirtualRegister(value));6410 Node* scopeNode = get(bytecode.scope); 6411 Node* valueNode = get(bytecode.value); 6382 6412 6383 6413 addToGraph(PutClosureVar, OpInfo(operand), scopeNode, valueNode); … … 6442 6472 6443 6473 case op_create_lexical_environment: { 6444 VirtualRegister symbolTableRegister(currentInstruction[3].u.operand); 6445 VirtualRegister initialValueRegister(currentInstruction[4].u.operand); 6446 ASSERT(symbolTableRegister.isConstant() && initialValueRegister.isConstant()); 6447 FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(symbolTableRegister.offset())); 6448 FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(initialValueRegister.offset())); 6449 Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); 6474 auto bytecode = currentInstruction->as<OpCreateLexicalEnvironment>(); 6475 ASSERT(bytecode.symbolTable.isConstant() && bytecode.initialValue.isConstant()); 6476 FrozenValue* symbolTable = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.symbolTable.offset())); 6477 FrozenValue* initialValue = m_graph.freezeStrong(m_inlineStackTop->m_codeBlock->getConstant(bytecode.initialValue.offset())); 6478 Node* scope = get(bytecode.scope); 6450 6479 Node* lexicalEnvironment = addToGraph(CreateActivation, OpInfo(symbolTable), OpInfo(initialValue), scope); 6451 set( VirtualRegister(currentInstruction[1].u.operand), lexicalEnvironment);6480 set(bytecode.dst, lexicalEnvironment); 6452 6481 NEXT_OPCODE(op_create_lexical_environment); 6453 6482 } 6454 6483 6455 6484 case op_push_with_scope: { 6456 Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand)); 6457 Node* object = get(VirtualRegister(currentInstruction[3].u.operand)); 6458 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(PushWithScope, currentScope, object)); 6485 auto bytecode = currentInstruction->as<OpPushWithScope>(); 6486 Node* currentScope = get(bytecode.currentScope); 6487 Node* object = get(bytecode.newScope); 6488 set(bytecode.dst, addToGraph(PushWithScope, currentScope, object)); 6459 6489 NEXT_OPCODE(op_push_with_scope); 6460 6490 } 6461 6491 6462 6492 case op_get_parent_scope: { 6463 Node* currentScope = get(VirtualRegister(currentInstruction[2].u.operand)); 6493 auto bytecode = currentInstruction->as<OpGetParentScope>(); 6494 Node* currentScope = get(bytecode.scope); 6464 6495 Node* newScope = addToGraph(SkipScope, currentScope); 6465 set( VirtualRegister(currentInstruction[1].u.operand), newScope);6496 set(bytecode.dst, newScope); 6466 6497 addToGraph(Phantom, currentScope); 6467 6498 NEXT_OPCODE(op_get_parent_scope); … … 6473 6504 // loads from the scope register later, as that would prevent the DFG from tracking the 6474 6505 // bytecode-level liveness of the scope register. 6506 auto bytecode = currentInstruction->as<OpGetScope>(); 6475 6507 Node* callee = get(VirtualRegister(CallFrameSlot::callee)); 6476 6508 Node* result; … … 6479 6511 else 6480 6512 result = addToGraph(GetScope, callee); 6481 set( VirtualRegister(currentInstruction[1].u.operand), result);6513 set(bytecode.dst, result); 6482 6514 NEXT_OPCODE(op_get_scope); 6483 6515 } 6484 6516 6485 6517 case op_argument_count: { 6518 auto bytecode = currentInstruction->as<OpArgumentCount>(); 6486 6519 Node* sub = addToGraph(ArithSub, OpInfo(Arith::Unchecked), OpInfo(SpecInt32Only), getArgumentCount(), addToGraph(JSConstant, OpInfo(m_constantOne))); 6487 6488 set(VirtualRegister(currentInstruction[1].u.operand), sub); 6520 set(bytecode.dst, sub); 6489 6521 NEXT_OPCODE(op_argument_count); 6490 6522 } 6491 6523 6492 6524 case op_create_direct_arguments: { 6525 auto bytecode = currentInstruction->as<OpCreateDirectArguments>(); 6493 6526 noticeArgumentsUse(); 6494 6527 Node* createArguments = addToGraph(CreateDirectArguments); 6495 set( VirtualRegister(currentInstruction[1].u.operand), createArguments);6528 set(bytecode.dst, createArguments); 6496 6529 NEXT_OPCODE(op_create_direct_arguments); 6497 6530 } 6498 6531 6499 6532 case op_create_scoped_arguments: { 6533 auto bytecode = currentInstruction->as<OpCreateScopedArguments>(); 6500 6534 noticeArgumentsUse(); 6501 Node* createArguments = addToGraph(CreateScopedArguments, get( VirtualRegister(currentInstruction[2].u.operand)));6502 set( VirtualRegister(currentInstruction[1].u.operand), createArguments);6535 Node* createArguments = addToGraph(CreateScopedArguments, get(bytecode.scope)); 6536 set(bytecode.dst, createArguments); 6503 6537 NEXT_OPCODE(op_create_scoped_arguments); 6504 6538 } 6505 6539 6506 6540 case op_create_cloned_arguments: { 6541 auto bytecode = currentInstruction->as<OpCreateClonedArguments>(); 6507 6542 noticeArgumentsUse(); 6508 6543 Node* createArguments = addToGraph(CreateClonedArguments); 6509 set( VirtualRegister(currentInstruction[1].u.operand), createArguments);6544 set(bytecode.dst, createArguments); 6510 6545 NEXT_OPCODE(op_create_cloned_arguments); 6511 6546 } 6512 6547 6513 6548 case op_get_from_arguments: { 6514 set(VirtualRegister(currentInstruction[1].u.operand), 6549 auto bytecode = currentInstruction->as<OpGetFromArguments>(); 6550 set(bytecode.dst, 6515 6551 addToGraph( 6516 6552 GetFromArguments, 6517 OpInfo( currentInstruction[3].u.operand),6553 OpInfo(bytecode.index), 6518 6554 OpInfo(getPrediction()), 6519 get( VirtualRegister(currentInstruction[2].u.operand))));6555 get(bytecode.arguments))); 6520 6556 NEXT_OPCODE(op_get_from_arguments); 6521 6557 } 6522 6558 6523 6559 case op_put_to_arguments: { 6560 auto bytecode = currentInstruction->as<OpPutToArguments>(); 6524 6561 addToGraph( 6525 6562 PutToArguments, 6526 OpInfo( currentInstruction[2].u.operand),6527 get( VirtualRegister(currentInstruction[1].u.operand)),6528 get( VirtualRegister(currentInstruction[3].u.operand)));6563 OpInfo(bytecode.index), 6564 get(bytecode.arguments), 6565 get(bytecode.value)); 6529 6566 NEXT_OPCODE(op_put_to_arguments); 6530 6567 } 6531 6568 6532 6569 case op_get_argument: { 6570 auto bytecode = currentInstruction->as<OpGetArgument>(); 6533 6571 InlineCallFrame* inlineCallFrame = this->inlineCallFrame(); 6534 6572 Node* argument; 6535 int32_t argumentIndexIncludingThis = currentInstruction[2].u.operand;6573 int32_t argumentIndexIncludingThis = bytecode.index; 6536 6574 if (inlineCallFrame && !inlineCallFrame->isVarargs()) { 6537 6575 int32_t argumentCountIncludingThisWithFixup = inlineCallFrame->argumentsWithFixup.size(); … … 6542 6580 } else 6543 6581 argument = addToGraph(GetArgument, OpInfo(argumentIndexIncludingThis), OpInfo(getPrediction())); 6544 set( VirtualRegister(currentInstruction[1].u.operand), argument);6582 set(bytecode.dst, argument); 6545 6583 NEXT_OPCODE(op_get_argument); 6546 6584 } 6547 6585 case op_new_async_generator_func: 6586 handleNewFunc(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFunc>()); 6587 NEXT_OPCODE(op_new_async_generator_func); 6548 6588 case op_new_func: 6589 handleNewFunc(NewFunction, currentInstruction->as<OpNewFunc>()); 6590 NEXT_OPCODE(op_new_func); 6549 6591 case op_new_generator_func: 6550 case op_new_async_func: { 6551 FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(currentInstruction[3].u.operand); 6552 FrozenValue* frozen = m_graph.freezeStrong(decl); 6553 NodeType op; 6554 switch (opcodeID) { 6555 case op_new_generator_func: 6556 op = NewGeneratorFunction; 6557 break; 6558 case op_new_async_func: 6559 op = NewAsyncFunction; 6560 break; 6561 case op_new_async_generator_func: 6562 op = NewAsyncGeneratorFunction; 6563 break; 6564 default: 6565 op = NewFunction; 6566 } 6567 Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); 6568 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope)); 6569 // Ideally we wouldn't have to do this Phantom. But: 6570 // 6571 // For the constant case: we must do it because otherwise we would have no way of knowing 6572 // that the scope is live at OSR here. 6573 // 6574 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation 6575 // won't be able to handle an Undefined scope. 6576 addToGraph(Phantom, scope); 6577 static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_generator_func), "The length of op_new_func should be equal to one of op_new_generator_func"); 6578 static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_func), "The length of op_new_func should be equal to one of op_new_async_func"); 6579 static_assert(OPCODE_LENGTH(op_new_func) == OPCODE_LENGTH(op_new_async_generator_func), "The length of op_new_func should be equal to one of op_new_async_generator_func"); 6580 NEXT_OPCODE(op_new_func); 6581 } 6592 handleNewFunc(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFunc>()); 6593 NEXT_OPCODE(op_new_generator_func); 6594 case op_new_async_func: 6595 handleNewFunc(NewAsyncFunction, currentInstruction->as<OpNewAsyncFunc>()); 6596 NEXT_OPCODE(op_new_async_func); 6582 6597 6583 6598 case op_new_func_exp: 6599 handleNewFuncExp(NewFunction, currentInstruction->as<OpNewFuncExp>()); 6600 NEXT_OPCODE(op_new_func_exp); 6584 6601 case op_new_generator_func_exp: 6602 handleNewFuncExp(NewGeneratorFunction, currentInstruction->as<OpNewGeneratorFuncExp>()); 6603 NEXT_OPCODE(op_new_generator_func_exp); 6585 6604 case op_new_async_generator_func_exp: 6586 case op_new_async_func_exp: { 6587 FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(currentInstruction[3].u.operand); 6588 FrozenValue* frozen = m_graph.freezeStrong(expr); 6589 NodeType op; 6590 switch (opcodeID) { 6591 case op_new_generator_func_exp: 6592 op = NewGeneratorFunction; 6593 break; 6594 case op_new_async_func_exp: 6595 op = NewAsyncFunction; 6596 break; 6597 case op_new_async_generator_func_exp: 6598 op = NewAsyncGeneratorFunction; 6599 break; 6600 default: 6601 op = NewFunction; 6602 } 6603 Node* scope = get(VirtualRegister(currentInstruction[2].u.operand)); 6604 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(op, OpInfo(frozen), scope)); 6605 // Ideally we wouldn't have to do this Phantom. But: 6606 // 6607 // For the constant case: we must do it because otherwise we would have no way of knowing 6608 // that the scope is live at OSR here. 6609 // 6610 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation 6611 // won't be able to handle an Undefined scope. 6612 addToGraph(Phantom, scope); 6613 static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_generator_func_exp"); 6614 static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp"); 6615 static_assert(OPCODE_LENGTH(op_new_func_exp) == OPCODE_LENGTH(op_new_async_generator_func_exp), "The length of op_new_func_exp should be equal to one of op_new_async_func_exp"); 6616 NEXT_OPCODE(op_new_func_exp); 6617 } 6605 handleNewFuncExp(NewAsyncGeneratorFunction, currentInstruction->as<OpNewAsyncGeneratorFuncExp>()); 6606 NEXT_OPCODE(op_new_async_generator_func_exp); 6607 case op_new_async_func_exp: 6608 handleNewFuncExp(NewAsyncFunction, currentInstruction->as<OpNewAsyncFuncExp>()); 6609 NEXT_OPCODE(op_new_async_func_exp); 6618 6610 6619 6611 case op_set_function_name: { 6620 Node* func = get(VirtualRegister(currentInstruction[1].u.operand)); 6621 Node* name = get(VirtualRegister(currentInstruction[2].u.operand)); 6612 auto bytecode = currentInstruction->as<OpSetFunctionName>(); 6613 Node* func = get(bytecode.function); 6614 Node* name = get(bytecode.name); 6622 6615 addToGraph(SetFunctionName, func, name); 6623 6616 NEXT_OPCODE(op_set_function_name); … … 6625 6618 6626 6619 case op_typeof: { 6627 set(VirtualRegister(currentInstruction[1].u.operand),6628 addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand))));6620 auto bytecode = currentInstruction->as<OpTypeof>(); 6621 set(bytecode.dst, addToGraph(TypeOf, get(bytecode.value))); 6629 6622 NEXT_OPCODE(op_typeof); 6630 6623 } 6631 6624 6632 6625 case op_to_number: { 6626 auto bytecode = currentInstruction->as<OpToNumber>(); 6633 6627 SpeculatedType prediction = getPrediction(); 6634 Node* value = get( VirtualRegister(currentInstruction[2].u.operand));6635 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value));6628 Node* value = get(bytecode.operand); 6629 set(bytecode.dst, addToGraph(ToNumber, OpInfo(0), OpInfo(prediction), value)); 6636 6630 NEXT_OPCODE(op_to_number); 6637 6631 } 6638 6632 6639 6633 case op_to_string: { 6640 Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); 6641 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToString, value)); 6634 auto bytecode = currentInstruction->as<OpToString>(); 6635 Node* value = get(bytecode.operand); 6636 set(bytecode.dst, addToGraph(ToString, value)); 6642 6637 NEXT_OPCODE(op_to_string); 6643 6638 } 6644 6639 6645 6640 case op_to_object: { 6641 auto bytecode = currentInstruction->as<OpToObject>(); 6646 6642 SpeculatedType prediction = getPrediction(); 6647 Node* value = get( VirtualRegister(currentInstruction[2].u.operand));6648 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[ currentInstruction[3].u.operand];6649 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value));6643 Node* value = get(bytecode.operand); 6644 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.message]; 6645 set(bytecode.dst, addToGraph(ToObject, OpInfo(identifierNumber), OpInfo(prediction), value)); 6650 6646 NEXT_OPCODE(op_to_object); 6651 6647 } 6652 6648 6653 6649 case op_in_by_val: { 6654 ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpInByValShape>(currentInstruction), Array::Read);6655 set(VirtualRegister(currentInstruction[1].u.operand),6656 addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand))));6650 auto bytecode = currentInstruction->as<OpInByVal>(); 6651 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).arrayProfile, Array::Read); 6652 set(bytecode.dst, addToGraph(InByVal, OpInfo(arrayMode.asWord()), get(bytecode.base), get(bytecode.property))); 6657 6653 NEXT_OPCODE(op_in_by_val); 6658 6654 } 6659 6655 6660 6656 case op_in_by_id: { 6661 Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); 6662 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; 6657 auto bytecode = currentInstruction->as<OpInById>(); 6658 Node* base = get(bytecode.base); 6659 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 6663 6660 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; 6664 6661 … … 6689 6686 6690 6687 Node* match = addToGraph(MatchStructure, OpInfo(data), base); 6691 set( VirtualRegister(currentInstruction[1].u.operand), match);6688 set(bytecode.dst, match); 6692 6689 NEXT_OPCODE(op_in_by_id); 6693 6690 } 6694 6691 } 6695 6692 6696 set( VirtualRegister(currentInstruction[1].u.operand), addToGraph(InById, OpInfo(identifierNumber), base));6693 set(bytecode.dst, addToGraph(InById, OpInfo(identifierNumber), base)); 6697 6694 NEXT_OPCODE(op_in_by_id); 6698 6695 } 6699 6696 6700 6697 case op_get_enumerable_length: { 6701 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumerableLength,6702 get(VirtualRegister(currentInstruction[2].u.operand))));6698 auto bytecode = currentInstruction->as<OpGetEnumerableLength>(); 6699 set(bytecode.dst, addToGraph(GetEnumerableLength, get(bytecode.base))); 6703 6700 NEXT_OPCODE(op_get_enumerable_length); 6704 6701 } 6705 6702 6706 6703 case op_has_generic_property: { 6707 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasGenericProperty, 6708 get(VirtualRegister(currentInstruction[2].u.operand)), 6709 get(VirtualRegister(currentInstruction[3].u.operand)))); 6704 auto bytecode = currentInstruction->as<OpHasGenericProperty>(); 6705 set(bytecode.dst, addToGraph(HasGenericProperty, get(bytecode.base), get(bytecode.property))); 6710 6706 NEXT_OPCODE(op_has_generic_property); 6711 6707 } 6712 6708 6713 6709 case op_has_structure_property: { 6714 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(HasStructureProperty, 6715 get(VirtualRegister(currentInstruction[2].u.operand)), 6716 get(VirtualRegister(currentInstruction[3].u.operand)), 6717 get(VirtualRegister(currentInstruction[4].u.operand)))); 6710 auto bytecode = currentInstruction->as<OpHasStructureProperty>(); 6711 set(bytecode.dst, addToGraph(HasStructureProperty, 6712 get(bytecode.base), 6713 get(bytecode.property), 6714 get(bytecode.enumerator))); 6718 6715 NEXT_OPCODE(op_has_structure_property); 6719 6716 } 6720 6717 6721 6718 case op_has_indexed_property: { 6722 Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); 6723 ArrayMode arrayMode = getArrayMode(arrayProfileFor<OpHasIndexedPropertyShape>(currentInstruction), Array::Read); 6724 Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); 6719 auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); 6720 Node* base = get(bytecode.base); 6721 ArrayMode arrayMode = getArrayMode(bytecode.metadata(codeBlock).arrayProfile, Array::Read); 6722 Node* property = get(bytecode.property); 6725 6723 Node* hasIterableProperty = addToGraph(HasIndexedProperty, OpInfo(arrayMode.asWord()), OpInfo(static_cast<uint32_t>(PropertySlot::InternalMethodType::GetOwnProperty)), base, property); 6726 set( VirtualRegister(currentInstruction[1].u.operand), hasIterableProperty);6724 set(bytecode.dst, hasIterableProperty); 6727 6725 NEXT_OPCODE(op_has_indexed_property); 6728 6726 } 6729 6727 6730 6728 case op_get_direct_pname: { 6729 auto bytecode = currentInstruction->as<OpGetDirectPname>(); 6731 6730 SpeculatedType prediction = getPredictionWithoutOSRExit(); 6732 6731 6733 Node* base = get( VirtualRegister(currentInstruction[2].u.operand));6734 Node* property = get( VirtualRegister(currentInstruction[3].u.operand));6735 Node* index = get( VirtualRegister(currentInstruction[4].u.operand));6736 Node* enumerator = get( VirtualRegister(currentInstruction[5].u.operand));6732 Node* base = get(bytecode.base); 6733 Node* property = get(bytecode.property); 6734 Node* index = get(bytecode.index); 6735 Node* enumerator = get(bytecode.enumerator); 6737 6736 6738 6737 addVarArgChild(base); … … 6740 6739 addVarArgChild(index); 6741 6740 addVarArgChild(enumerator); 6742 set(VirtualRegister(currentInstruction[1].u.operand), 6743 addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); 6741 set(bytecode.dst, addToGraph(Node::VarArg, GetDirectPname, OpInfo(0), OpInfo(prediction))); 6744 6742 6745 6743 NEXT_OPCODE(op_get_direct_pname); … … 6747 6745 6748 6746 case op_get_property_enumerator: { 6749 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetPropertyEnumerator,6750 get(VirtualRegister(currentInstruction[2].u.operand))));6747 auto bytecode = currentInstruction->as<OpGetPropertyEnumerator>(); 6748 set(bytecode.dst, addToGraph(GetPropertyEnumerator, get(bytecode.base))); 6751 6749 NEXT_OPCODE(op_get_property_enumerator); 6752 6750 } 6753 6751 6754 6752 case op_enumerator_structure_pname: { 6755 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorStructurePname, 6756 get(VirtualRegister(currentInstruction[2].u.operand)), 6757 get(VirtualRegister(currentInstruction[3].u.operand)))); 6753 auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); 6754 set(bytecode.dst, addToGraph(GetEnumeratorStructurePname, 6755 get(bytecode.enumerator), 6756 get(bytecode.index))); 6758 6757 NEXT_OPCODE(op_enumerator_structure_pname); 6759 6758 } 6760 6759 6761 6760 case op_enumerator_generic_pname: { 6762 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetEnumeratorGenericPname, 6763 get(VirtualRegister(currentInstruction[2].u.operand)), 6764 get(VirtualRegister(currentInstruction[3].u.operand)))); 6761 auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); 6762 set(bytecode.dst, addToGraph(GetEnumeratorGenericPname, 6763 get(bytecode.enumerator), 6764 get(bytecode.index))); 6765 6765 NEXT_OPCODE(op_enumerator_generic_pname); 6766 6766 } 6767 6767 6768 6768 case op_to_index_string: { 6769 set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToIndexString,6770 get(VirtualRegister(currentInstruction[2].u.operand))));6769 auto bytecode = currentInstruction->as<OpToIndexString>(); 6770 set(bytecode.dst, addToGraph(ToIndexString, get(bytecode.index))); 6771 6771 NEXT_OPCODE(op_to_index_string); 6772 6772 } 6773 6773 6774 6774 case op_log_shadow_chicken_prologue: { 6775 auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); 6775 6776 if (!m_inlineStackTop->m_inlineCallFrame) 6776 addToGraph(LogShadowChickenPrologue, get( VirtualRegister(currentInstruction[1].u.operand)));6777 addToGraph(LogShadowChickenPrologue, get(bytecode.scope)); 6777 6778 NEXT_OPCODE(op_log_shadow_chicken_prologue); 6778 6779 } 6779 6780 6780 6781 case op_log_shadow_chicken_tail: { 6782 auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); 6781 6783 if (!m_inlineStackTop->m_inlineCallFrame) { 6782 6784 // FIXME: The right solution for inlining is to elide these whenever the tail call 6783 6785 // ends up being inlined. 6784 6786 // https://bugs.webkit.org/show_bug.cgi?id=155686 6785 addToGraph(LogShadowChickenTail, get( VirtualRegister(currentInstruction[1].u.operand)), get(VirtualRegister(currentInstruction[2].u.operand)));6787 addToGraph(LogShadowChickenTail, get(bytecode.thisValue), get(bytecode.scope)); 6786 6788 } 6787 6789 NEXT_OPCODE(op_log_shadow_chicken_tail); … … 6987 6989 } 6988 6990 6989 Vector< unsigned, 32> jumpTargets;6991 Vector<InstructionStream::Offset, 32> jumpTargets; 6990 6992 computePreciseJumpTargets(codeBlock, jumpTargets); 6991 6993 if (Options::dumpBytecodeAtDFGTime()) { … … 7042 7044 7043 7045 VERBOSE_LOG("Done parsing ", *codeBlock, " (fell off end)\n"); 7046 } 7047 7048 template <typename Bytecode> 7049 void ByteCodeParser::handlePutByVal(Bytecode bytecode, unsigned instructionSize) 7050 { 7051 Node* base = get(bytecode.base); 7052 Node* property = get(bytecode.property); 7053 Node* value = get(bytecode.value); 7054 bool isDirect = Bytecode::opcodeID == op_put_by_val_direct; 7055 bool compiledAsPutById = false; 7056 { 7057 unsigned identifierNumber = std::numeric_limits<unsigned>::max(); 7058 PutByIdStatus putByIdStatus; 7059 { 7060 ConcurrentJSLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); 7061 ByValInfo* byValInfo = m_inlineStackTop->m_baselineMap.get(CodeOrigin(currentCodeOrigin().bytecodeIndex)).byValInfo; 7062 // FIXME: When the bytecode is not compiled in the baseline JIT, byValInfo becomes null. 7063 // At that time, there is no information. 7064 if (byValInfo 7065 && byValInfo->stubInfo 7066 && !byValInfo->tookSlowPath 7067 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIdent) 7068 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadType) 7069 && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCell)) { 7070 compiledAsPutById = true; 7071 identifierNumber = m_graph.identifiers().ensure(byValInfo->cachedId.impl()); 7072 UniquedStringImpl* uid = m_graph.identifiers()[identifierNumber]; 7073 7074 if (Symbol* symbol = byValInfo->cachedSymbol.get()) { 7075 FrozenValue* frozen = m_graph.freezeStrong(symbol); 7076 addToGraph(CheckCell, OpInfo(frozen), property); 7077 } else { 7078 ASSERT(!uid->isSymbol()); 7079 addToGraph(CheckStringIdent, OpInfo(uid), property); 7080 } 7081 7082 putByIdStatus = PutByIdStatus::computeForStubInfo( 7083 locker, m_inlineStackTop->m_profiledBlock, 7084 byValInfo->stubInfo, currentCodeOrigin(), uid); 7085 7086 } 7087 } 7088 7089 if (compiledAsPutById) 7090 handlePutById(base, identifierNumber, value, putByIdStatus, isDirect, instructionSize); 7091 } 7092 7093 if (!compiledAsPutById) { 7094 ArrayMode arrayMode = getArrayMode(bytecode.metadata(m_inlineStackTop->m_codeBlock).arrayProfile, Array::Write); 7095 7096 addVarArgChild(base); 7097 addVarArgChild(property); 7098 addVarArgChild(value); 7099 addVarArgChild(0); // Leave room for property storage. 7100 addVarArgChild(0); // Leave room for length. 7101 addToGraph(Node::VarArg, isDirect ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); 7102 } 7103 } 7104 7105 template <typename Bytecode> 7106 void ByteCodeParser::handlePutAccessorById(NodeType op, Bytecode bytecode) 7107 { 7108 Node* base = get(bytecode.base); 7109 unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[bytecode.property]; 7110 Node* accessor = get(bytecode.accessor); 7111 addToGraph(op, OpInfo(identifierNumber), OpInfo(bytecode.attributes), base, accessor); 7112 } 7113 7114 template <typename Bytecode> 7115 void ByteCodeParser::handlePutAccessorByVal(NodeType op, Bytecode bytecode) 7116 { 7117 Node* base = get(bytecode.base); 7118 Node* subscript = get(bytecode.property); 7119 Node* accessor = get(bytecode.accessor); 7120 addToGraph(op, OpInfo(bytecode.attributes), base, subscript, accessor); 7121 } 7122 7123 template <typename Bytecode> 7124 void ByteCodeParser::handleNewFunc(NodeType op, Bytecode bytecode) 7125 { 7126 FunctionExecutable* decl = m_inlineStackTop->m_profiledBlock->functionDecl(bytecode.functionDecl); 7127 FrozenValue* frozen = m_graph.freezeStrong(decl); 7128 Node* scope = get(bytecode.scope); 7129 set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope)); 7130 // Ideally we wouldn't have to do this Phantom. But: 7131 // 7132 // For the constant case: we must do it because otherwise we would have no way of knowing 7133 // that the scope is live at OSR here. 7134 // 7135 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation 7136 // won't be able to handle an Undefined scope. 7137 addToGraph(Phantom, scope); 7138 } 7139 7140 template <typename Bytecode> 7141 void ByteCodeParser::handleNewFuncExp(NodeType op, Bytecode bytecode) 7142 { 7143 FunctionExecutable* expr = m_inlineStackTop->m_profiledBlock->functionExpr(bytecode.functionDecl); 7144 FrozenValue* frozen = m_graph.freezeStrong(expr); 7145 Node* scope = get(bytecode.scope); 7146 set(bytecode.dst, addToGraph(op, OpInfo(frozen), scope)); 7147 // Ideally we wouldn't have to do this Phantom. But: 7148 // 7149 // For the constant case: we must do it because otherwise we would have no way of knowing 7150 // that the scope is live at OSR here. 7151 // 7152 // For the non-constant case: NewFunction could be DCE'd, but baseline's implementation 7153 // won't be able to handle an Undefined scope. 7154 addToGraph(Phantom, scope); 7044 7155 } 7045 7156 … … 7132 7243 const Vector<ArgumentPosition*>& arguments = m_inlineCallFrameToArgumentPositions.get(inlineCallFrame); 7133 7244 arguments[argument.toArgument()]->addVariable(variable); 7134 } 7135 7136 insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable)); 7245 } insertionSet.insertNode(block->size(), SpecNone, op, endOrigin, OpInfo(variable)); 7137 7246 }; 7138 7247 auto addFlushDirect = [&] (InlineCallFrame* inlineCallFrame, VirtualRegister operand) { -
trunk/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
r237486 r237547 104 104 } 105 105 106 CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction* pc)106 CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock, const Instruction* pc) 107 107 { 108 108 UNUSED_PARAM(codeBlock); // This function does some bytecode parsing. Ordinarily bytecode parsing requires the owning CodeBlock. It's sort of strange that we don't use it here right now. … … 110 110 111 111 switch (opcodeID) { 112 case op_wide: 113 RELEASE_ASSERT_NOT_REACHED(); 112 114 case op_enter: 113 115 case op_to_this: … … 165 167 case op_try_get_by_id: 166 168 case op_get_by_id: 167 case op_get_by_id_proto_load:168 case op_get_by_id_unset:169 169 case op_get_by_id_with_this: 170 170 case op_get_by_id_direct: 171 171 case op_get_by_val_with_this: 172 case op_get_array_length:173 172 case op_put_by_id: 174 173 case op_put_by_id_with_this: … … 303 302 CapabilityLevel capabilityLevel(CodeBlock* codeBlock) 304 303 { 305 Instruction* instructionsBegin = codeBlock->instructions().begin();306 unsigned instructionCount = codeBlock->instructions().size();307 304 CapabilityLevel result = CanCompileAndInline; 308 305 309 for ( unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount;) {310 switch ( Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {306 for (const auto& instruction : codeBlock->instructions()) { 307 switch (instruction->opcodeID()) { 311 308 #define DEFINE_OP(opcode, length) \ 312 309 case opcode: { \ 313 CapabilityLevel newResult = leastUpperBound(result, capabilityLevel(opcode, codeBlock, instruction sBegin + bytecodeOffset)); \310 CapabilityLevel newResult = leastUpperBound(result, capabilityLevel(opcode, codeBlock, instruction.ptr())); \ 314 311 if (newResult != result) { \ 315 312 debugFail(codeBlock, opcode, newResult); \ 316 313 result = newResult; \ 317 314 } \ 318 bytecodeOffset += length; \319 315 break; \ 320 316 } -
trunk/Source/JavaScriptCore/dfg/DFGCapabilities.h
r237486 r237547 46 46 bool canUseOSRExitFuzzing(CodeBlock*); 47 47 48 inline CapabilityLevel capabilityLevel(OpcodeID opcodeID, CodeBlock* codeBlock,Instruction* pc);48 inline CapabilityLevel capabilityLevel(OpcodeID, CodeBlock*, const Instruction* pc); 49 49 50 50 CapabilityLevel capabilityLevel(CodeBlock*); … … 59 59 inline bool canUseOSRExitFuzzing(CodeBlock*) { return false; } 60 60 61 inline CapabilityLevel capabilityLevel(OpcodeID, CodeBlock*, Instruction*) { return CannotCompile; }61 inline CapabilityLevel capabilityLevel(OpcodeID, CodeBlock*, const Instruction*) { return CannotCompile; } 62 62 inline CapabilityLevel capabilityLevel(CodeBlock*) { return CannotCompile; } 63 63 #endif // ENABLE(DFG_JIT) -
trunk/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
r237486 r237547 391 391 return nullptr; 392 392 393 ASSERT(Interpreter::getOpcodeID(exec->codeBlock()->instructions()[exec->bytecodeOffset()].u.opcode) == op_catch); 394 ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(exec->codeBlock()->instructions()[exec->bytecodeOffset() + 3].u.pointer); 393 auto instruction = exec->codeBlock()->instructions().at(exec->bytecodeOffset()); 394 ASSERT(instruction->is<OpCatch>()); 395 ValueProfileAndOperandBuffer* buffer = instruction->as<OpCatch>().metadata(exec).buffer; 395 396 JSValue* dataBuffer = reinterpret_cast<JSValue*>(dfgCommon->catchOSREntryBuffer->dataBuffer()); 396 397 unsigned index = 0; -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
r237486 r237547 3864 3864 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); 3865 3865 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); 3866 Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex];3866 const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); 3867 3867 JITAddIC* addIC = m_jit.codeBlock()->addJITAddIC(arithProfile, instruction); 3868 3868 auto repatchingFunction = operationValueAddOptimize; … … 3888 3888 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); 3889 3889 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); 3890 Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex];3890 const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); 3891 3891 JITSubIC* subIC = m_jit.codeBlock()->addJITSubIC(arithProfile, instruction); 3892 3892 auto repatchingFunction = operationValueSubOptimize; … … 4562 4562 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); 4563 4563 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); 4564 Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex];4564 const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); 4565 4565 JITNegIC* negIC = m_jit.codeBlock()->addJITNegIC(arithProfile, instruction); 4566 4566 auto repatchingFunction = operationArithNegateOptimize; … … 4904 4904 CodeBlock* baselineCodeBlock = m_jit.graph().baselineCodeBlockFor(node->origin.semantic); 4905 4905 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(node->origin.semantic.bytecodeIndex); 4906 Instruction* instruction = &baselineCodeBlock->instructions()[node->origin.semantic.bytecodeIndex];4906 const Instruction* instruction = baselineCodeBlock->instructions().at(node->origin.semantic.bytecodeIndex).ptr(); 4907 4907 JITMulIC* mulIC = m_jit.codeBlock()->addJITMulIC(arithProfile, instruction); 4908 4908 auto repatchingFunction = operationValueMulOptimize; -
trunk/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
r237486 r237547 1864 1864 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); 1865 1865 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); 1866 Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex];1866 const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); 1867 1867 auto repatchingFunction = operationValueAddOptimize; 1868 1868 auto nonRepatchingFunction = operationValueAdd; … … 1883 1883 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); 1884 1884 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); 1885 Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex];1885 const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); 1886 1886 auto repatchingFunction = operationValueSubOptimize; 1887 1887 auto nonRepatchingFunction = operationValueSub; … … 1891 1891 template <typename Generator, typename Func1, typename Func2, 1892 1892 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>> 1893 void compileUnaryMathIC(ArithProfile* arithProfile, Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction)1893 void compileUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) 1894 1894 { 1895 1895 Node* node = m_node; … … 1977 1977 template <typename Generator, typename Func1, typename Func2, 1978 1978 typename = std::enable_if_t<std::is_function<typename std::remove_pointer<Func1>::type>::value && std::is_function<typename std::remove_pointer<Func2>::type>::value>> 1979 void compileBinaryMathIC(ArithProfile* arithProfile, Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction)1979 void compileBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction, Func1 repatchingFunction, Func2 nonRepatchingFunction) 1980 1980 { 1981 1981 Node* node = m_node; … … 2144 2144 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); 2145 2145 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); 2146 Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex];2146 const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); 2147 2147 auto repatchingFunction = operationValueSubOptimize; 2148 2148 auto nonRepatchingFunction = operationValueSub; … … 2240 2240 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); 2241 2241 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); 2242 Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex];2242 const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); 2243 2243 auto repatchingFunction = operationValueMulOptimize; 2244 2244 auto nonRepatchingFunction = operationValueMul; … … 2777 2777 CodeBlock* baselineCodeBlock = m_ftlState.graph.baselineCodeBlockFor(m_node->origin.semantic); 2778 2778 ArithProfile* arithProfile = baselineCodeBlock->arithProfileForBytecodeOffset(m_node->origin.semantic.bytecodeIndex); 2779 Instruction* instruction = &baselineCodeBlock->instructions()[m_node->origin.semantic.bytecodeIndex];2779 const Instruction* instruction = baselineCodeBlock->instructions().at(m_node->origin.semantic.bytecodeIndex).ptr(); 2780 2780 auto repatchingFunction = operationArithNegateOptimize; 2781 2781 auto nonRepatchingFunction = operationArithNegate; -
trunk/Source/JavaScriptCore/ftl/FTLOperations.cpp
r237486 r237547 475 475 // and PhantomNewArrayBuffer are always bound to a specific op_new_array_buffer. 476 476 CodeBlock* codeBlock = baselineCodeBlockForOriginAndBaselineCodeBlock(materialization->origin(), exec->codeBlock()->baselineAlternative()); 477 Instruction* currentInstruction = &codeBlock->instructions()[materialization->origin().bytecodeIndex];478 RELEASE_ASSERT( Interpreter::getOpcodeID(currentInstruction[0].u.opcode) == op_new_array_buffer);479 auto * newArrayBuffer = bitwise_cast<OpNewArrayBuffer*>(currentInstruction);480 ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile;477 const Instruction* currentInstruction = codeBlock->instructions().at(materialization->origin().bytecodeIndex).ptr(); 478 RELEASE_ASSERT(currentInstruction->is<OpNewArrayBuffer>()); 479 auto newArrayBuffer = currentInstruction->as<OpNewArrayBuffer>(); 480 ArrayAllocationProfile* profile = &newArrayBuffer.metadata(codeBlock).arrayAllocationProfile; 481 481 482 482 // FIXME: Share the code with CommonSlowPaths. Currently, codeBlock etc. are slightly different. … … 496 496 // a compilation thread. 497 497 WTF::storeStoreFence(); 498 codeBlock->constantRegister(newArrayBuffer ->immutableButterfly()).set(vm, codeBlock, immutableButterfly);498 codeBlock->constantRegister(newArrayBuffer.immutableButterfly.offset()).set(vm, codeBlock, immutableButterfly); 499 499 WTF::storeStoreFence(); 500 500 } -
trunk/Source/JavaScriptCore/interpreter/AbstractPC.h
r237486 r237547 63 63 private: 64 64 #if ENABLE(JIT) 65 void* m_pointer { nullptr };65 const void* m_pointer { nullptr }; 66 66 #endif 67 67 -
trunk/Source/JavaScriptCore/interpreter/CallFrame.cpp
r237486 r237547 108 108 109 109 #if USE(JSVALUE32_64) 110 Instruction* CallFrame::currentVPC() const110 const Instruction* CallFrame::currentVPC() const 111 111 { 112 112 return bitwise_cast<Instruction*>(callSiteIndex().bits()); 113 113 } 114 114 115 void CallFrame::setCurrentVPC( Instruction* vpc)115 void CallFrame::setCurrentVPC(const Instruction* vpc) 116 116 { 117 117 CallSiteIndex callSite(vpc); … … 127 127 128 128 #else // USE(JSVALUE32_64) 129 Instruction* CallFrame::currentVPC() const129 const Instruction* CallFrame::currentVPC() const 130 130 { 131 131 ASSERT(callSiteBitsAreBytecodeOffset()); 132 return &codeBlock()->instructions()[callSiteBitsAsBytecodeOffset()];133 } 134 135 void CallFrame::setCurrentVPC( Instruction* vpc)132 return codeBlock()->instructions().at(callSiteBitsAsBytecodeOffset()).ptr(); 133 } 134 135 void CallFrame::setCurrentVPC(const Instruction* vpc) 136 136 { 137 137 CallSiteIndex callSite(codeBlock()->bytecodeOffset(vpc)); -
trunk/Source/JavaScriptCore/interpreter/CallFrame.h
r237486 r237547 54 54 { } 55 55 #if USE(JSVALUE32_64) 56 explicit CallSiteIndex( Instruction* instruction)56 explicit CallSiteIndex(const Instruction* instruction) 57 57 : m_bits(bitwise_cast<uint32_t>(instruction)) 58 58 { } … … 71 71 struct CallerFrameAndPC { 72 72 alignas(CPURegister) CallFrame* callerFrame; 73 alignas(CPURegister) Instruction* returnPC;73 alignas(CPURegister) const Instruction* returnPC; 74 74 static const int sizeInRegisters = 2 * sizeof(CPURegister) / sizeof(Register); 75 75 }; … … 184 184 } 185 185 186 Instruction* currentVPC() const; // This only makes sense in the LLInt and baseline.187 void setCurrentVPC( Instruction* vpc);186 const Instruction* currentVPC() const; // This only makes sense in the LLInt and baseline. 187 void setCurrentVPC(const Instruction*); 188 188 189 189 void setCallerFrame(CallFrame* frame) { callerFrameAndPC().callerFrame = frame; } … … 265 265 void setCallee(JSObject* callee) { static_cast<Register*>(this)[CallFrameSlot::callee] = callee; } 266 266 void setCodeBlock(CodeBlock* codeBlock) { static_cast<Register*>(this)[CallFrameSlot::codeBlock] = codeBlock; } 267 void setReturnPC(void* value) { callerFrameAndPC().returnPC = reinterpret_cast< Instruction*>(value); }267 void setReturnPC(void* value) { callerFrameAndPC().returnPC = reinterpret_cast<const Instruction*>(value); } 268 268 269 269 String friendlyFunctionName(); -
trunk/Source/JavaScriptCore/interpreter/Interpreter.cpp
r237486 r237547 1230 1230 1231 1231 } // namespace JSC 1232 1233 namespace WTF { 1234 1235 void printInternal(PrintStream& out, JSC::DebugHookType type) 1236 { 1237 switch (type) { 1238 case JSC::WillExecuteProgram: 1239 out.print("WillExecuteProgram"); 1240 return; 1241 case JSC::DidExecuteProgram: 1242 out.print("DidExecuteProgram"); 1243 return; 1244 case JSC::DidEnterCallFrame: 1245 out.print("DidEnterCallFrame"); 1246 return; 1247 case JSC::DidReachBreakpoint: 1248 out.print("DidReachBreakpoint"); 1249 return; 1250 case JSC::WillLeaveCallFrame: 1251 out.print("WillLeaveCallFrame"); 1252 return; 1253 case JSC::WillExecuteStatement: 1254 out.print("WillExecuteStatement"); 1255 return; 1256 case JSC::WillExecuteExpression: 1257 out.print("WillExecuteExpression"); 1258 return; 1259 } 1260 } 1261 1262 } // namespace WTF -
trunk/Source/JavaScriptCore/interpreter/Interpreter.h
r237486 r237547 63 63 struct Instruction; 64 64 struct ProtoCallFrame; 65 struct UnlinkedInstruction;66 65 67 66 enum DebugHookType { … … 101 100 102 101 static inline OpcodeID getOpcodeID(Opcode); 103 static inline OpcodeID getOpcodeID(const Instruction&);104 static inline OpcodeID getOpcodeID(const UnlinkedInstruction&);105 102 106 103 #if !ASSERT_DISABLED … … 187 184 188 185 } // namespace JSC 186 187 namespace WTF { 188 189 class PrintStream; 190 191 void printInternal(PrintStream&, JSC::DebugHookType); 192 193 } // namespace WTF -
trunk/Source/JavaScriptCore/interpreter/InterpreterInlines.h
r237486 r237547 65 65 } 66 66 67 inline OpcodeID Interpreter::getOpcodeID(const Instruction& instruction)68 {69 return getOpcodeID(instruction.u.opcode);70 }71 72 inline OpcodeID Interpreter::getOpcodeID(const UnlinkedInstruction& instruction)73 {74 return instruction.u.opcode;75 }76 77 67 ALWAYS_INLINE JSValue Interpreter::execute(CallFrameClosure& closure) 78 68 { -
trunk/Source/JavaScriptCore/interpreter/StackVisitor.cpp
r237486 r237547 449 449 CallFrame* callFrame = m_callFrame; 450 450 CallFrame* callerFrame = this->callerFrame(); 451 void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr;451 const void* returnPC = callFrame->hasReturnPC() ? callFrame->returnPC().value() : nullptr; 452 452 453 453 out.print(indent, "name: ", functionName(), "\n"); -
trunk/Source/JavaScriptCore/interpreter/VMEntryRecord.h
r237486 r237547 49 49 #if !ENABLE(C_LOOP) && NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 50 50 CPURegister calleeSaveRegistersBuffer[NUMBER_OF_CALLEE_SAVES_REGISTERS]; 51 #elif ENABLE(C_LOOP) 52 CPURegister calleeSaveRegistersBuffer[1]; 51 53 #endif 52 54 -
trunk/Source/JavaScriptCore/jit/JIT.cpp
r237486 r237547 77 77 : JSInterfaceJIT(vm, codeBlock) 78 78 , m_interpreter(vm->interpreter) 79 , m_labels(codeBlock ? codeBlock-> numberOfInstructions() : 0)79 , m_labels(codeBlock ? codeBlock->instructions().size() : 0) 80 80 , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) 81 81 , m_pcToCodeOriginMapBuilder(*vm) … … 138 138 139 139 #define NEXT_OPCODE(name) \ 140 m_bytecodeOffset += OPCODE_LENGTH(name); \140 m_bytecodeOffset += currentInstruction->size(); \ 141 141 break; 142 142 … … 170 170 } 171 171 172 void JIT::emitSlowCaseCall( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, SlowPathFunction stub)172 void JIT::emitSlowCaseCall(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, SlowPathFunction stub) 173 173 { 174 174 linkAllSlowCases(iter); … … 186 186 jitAssertArgumentCountSane(); 187 187 188 Instruction* instructionsBegin = m_codeBlock->instructions().begin();189 unsigned instructionCount = m_ instructions.size();188 auto& instructions = m_codeBlock->instructions(); 189 unsigned instructionCount = m_codeBlock->instructions().size(); 190 190 191 191 m_callLinkInfoIndex = 0; … … 207 207 // compile code from that bytecode offset onwards. 208 208 209 BytecodeGraph graph(m_codeBlock, m_ instructions);209 BytecodeGraph graph(m_codeBlock, m_codeBlock->instructions()); 210 210 BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset); 211 211 RELEASE_ASSERT(block); … … 222 222 if (m_codeBlock->numberOfExceptionHandlers()) { 223 223 for (unsigned bytecodeOffset = block->leaderOffset(); bytecodeOffset < block->leaderOffset() + block->totalLength();) { 224 OpcodeID opcodeID = Interpreter::getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode);224 auto instruction = instructions.at(bytecodeOffset); 225 225 if (auto* handler = m_codeBlock->handlerForBytecodeOffset(bytecodeOffset)) 226 226 worklist.push(graph.findBasicBlockWithLeaderOffset(handler->target)); 227 227 228 unsigned opcodeLength = opcodeLengths[opcodeID]; 229 bytecodeOffset += opcodeLength; 228 bytecodeOffset += instruction->size(); 230 229 } 231 230 } … … 243 242 if (m_disassembler) 244 243 m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); 245 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;246 ASSERT_WITH_MESSAGE( Interpreter::isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);244 const Instruction* currentInstruction = instructions.at(m_bytecodeOffset).ptr(); 245 ASSERT_WITH_MESSAGE(currentInstruction->size(), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); 247 246 248 247 m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); … … 258 257 dataLogF("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); 259 258 260 OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode);259 OpcodeID opcodeID = currentInstruction->opcodeID(); 261 260 262 261 if (UNLIKELY(m_compilation)) { … … 338 337 DEFINE_OP(op_try_get_by_id) 339 338 DEFINE_OP(op_in_by_id) 340 case op_get_array_length:341 case op_get_by_id_proto_load:342 case op_get_by_id_unset:343 339 DEFINE_OP(op_get_by_id) 344 340 DEFINE_OP(op_get_by_id_with_this) … … 406 402 DEFINE_OP(op_get_parent_scope) 407 403 DEFINE_OP(op_put_by_id) 408 case op_put_by_val_direct:404 DEFINE_OP(op_put_by_val_direct) 409 405 DEFINE_OP(op_put_by_val) 410 406 DEFINE_OP(op_put_getter_by_id) … … 470 466 void JIT::privateCompileSlowCases() 471 467 { 472 Instruction* instructionsBegin = m_codeBlock->instructions().begin();473 474 468 m_getByIdIndex = 0; 475 469 m_getByIdWithThisIndex = 0; … … 480 474 m_callLinkInfoIndex = 0; 481 475 482 // Use this to assert that slow-path code associates new profiling sites with existing483 // ValueProfiles rather than creating new ones. This ensures that for a given instruction484 // (say, get_by_id) we get combined statistics for both the fast-path executions of that485 // instructions and the slow-path executions. Furthermore, if the slow-path code created486 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,487 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().488 unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();489 490 476 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { 491 477 m_bytecodeOffset = iter->to; … … 495 481 unsigned firstTo = m_bytecodeOffset; 496 482 497 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;483 const Instruction* currentInstruction = m_codeBlock->instructions().at(m_bytecodeOffset).ptr(); 498 484 499 485 RareCaseProfile* rareCaseProfile = 0; … … 507 493 m_disassembler->setForBytecodeSlowPath(m_bytecodeOffset, label()); 508 494 509 switch ( Interpreter::getOpcodeID(currentInstruction->u.opcode)) {495 switch (currentInstruction->opcodeID()) { 510 496 DEFINE_SLOWCASE_OP(op_add) 511 497 DEFINE_SLOWCASE_OP(op_call) … … 520 506 DEFINE_SLOWCASE_OP(op_try_get_by_id) 521 507 DEFINE_SLOWCASE_OP(op_in_by_id) 522 case op_get_array_length:523 case op_get_by_id_proto_load:524 case op_get_by_id_unset:525 508 DEFINE_SLOWCASE_OP(op_get_by_id) 526 509 DEFINE_SLOWCASE_OP(op_get_by_id_with_this) … … 602 585 RELEASE_ASSERT(m_instanceOfIndex == m_instanceOfs.size()); 603 586 RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); 604 RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());605 587 606 588 #ifndef NDEBUG … … 616 598 before = MonotonicTime::now(); 617 599 618 {619 ConcurrentJSLocker locker(m_codeBlock->m_lock);620 m_instructions = m_codeBlock->instructions().clone();621 }622 623 600 DFG::CapabilityLevel level = m_codeBlock->capabilityLevel(); 624 601 switch (level) { … … 908 885 m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( 909 886 static_cast<double>(result.size()) / 910 static_cast<double>(m_ instructions.size()));887 static_cast<double>(m_codeBlock->instructionCount())); 911 888 912 889 m_codeBlock->shrinkToFit(CodeBlock::LateShrink); -
trunk/Source/JavaScriptCore/jit/JIT.h
r237486 r237547 43 43 #include "JITInlineCacheGenerator.h" 44 44 #include "JITMathIC.h" 45 #include "JITRightShiftGenerator.h" 45 46 #include "JSInterfaceJIT.h" 46 47 #include "PCToCodeOriginMap.h" … … 223 224 JIT jit(vm, codeBlock); 224 225 jit.m_bytecodeOffset = byValInfo->bytecodeIndex; 225 jit.privateCompilePutByVal (byValInfo, returnAddress, arrayMode);226 jit.privateCompilePutByVal<OpPutByVal>(byValInfo, returnAddress, arrayMode); 226 227 } 227 228 … … 230 231 JIT jit(vm, codeBlock); 231 232 jit.m_bytecodeOffset = byValInfo->bytecodeIndex; 232 jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode); 233 } 234 233 jit.privateCompilePutByVal<OpPutByValDirect>(byValInfo, returnAddress, arrayMode); 234 } 235 236 template<typename Op> 235 237 static void compilePutByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) 236 238 { 237 239 JIT jit(vm, codeBlock); 238 240 jit.m_bytecodeOffset = byValInfo->bytecodeIndex; 239 jit.privateCompilePutByValWithCachedId (byValInfo, returnAddress, putKind, propertyName);241 jit.privateCompilePutByValWithCachedId<Op>(byValInfo, returnAddress, putKind, propertyName); 240 242 } 241 243 … … 261 263 void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); 262 264 void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&); 265 template<typename Op> 263 266 void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); 267 template<typename Op> 264 268 void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); 265 269 … … 309 313 void emitJumpSlowToHot(Jump, int); 310 314 311 void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex); 312 void compileOpCallSlowCase(OpcodeID, Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex); 313 void compileSetupVarargsFrame(OpcodeID, Instruction*, CallLinkInfo*); 314 void compileCallEval(Instruction*); 315 void compileCallEvalSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&); 316 void emitPutCallResult(Instruction*); 315 template<typename Op> 316 void compileOpCall(const Instruction*, unsigned callLinkInfoIndex); 317 template<typename Op> 318 void compileOpCallSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex); 319 template<typename Op> 320 std::enable_if_t< 321 Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs 322 && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments 323 , void> compileSetupFrame(const Op&, CallLinkInfo*); 324 325 template<typename Op> 326 std::enable_if_t< 327 Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs 328 || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments 329 , void> compileSetupFrame(const Op&, CallLinkInfo*); 330 331 template<typename Op> 332 bool compileTailCall(const Op&, CallLinkInfo*, unsigned callLinkInfoIndex); 333 template<typename Op> 334 bool compileCallEval(const Op&); 335 void compileCallEvalSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&); 336 template<typename Op> 337 void emitPutCallResult(const Op&); 317 338 318 339 enum class CompileOpStrictEqType { StrictEq, NStrictEq }; 319 void compileOpStrictEq(Instruction*, CompileOpStrictEqType); 320 void compileOpStrictEqJump(Instruction*, CompileOpStrictEqType); 340 template<typename Op> 341 void compileOpStrictEq(const Instruction*, CompileOpStrictEqType); 342 template<typename Op> 343 void compileOpStrictEqJump(const Instruction*, CompileOpStrictEqType); 321 344 enum class CompileOpEqType { Eq, NEq }; 322 345 void compileOpEqJumpSlow(Vector<SlowCaseEntry>::iterator&, CompileOpEqType, int jumpTarget); … … 336 359 // scratch. 337 360 void emitValueProfilingSite(ValueProfile&); 338 void emitValueProfilingSite(unsigned bytecodeOffset); 339 void emitValueProfilingSite(); 361 template<typename Metadata> void emitValueProfilingSite(Metadata&); 362 void emitValueProfilingSiteIfProfiledOpcode(...); 363 template<typename Op> 364 std::enable_if_t<std::is_same<decltype(Op::Metadata::profile), ValueProfile>::value, void> 365 emitValueProfilingSiteIfProfiledOpcode(Op bytecode); 366 340 367 void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*); 341 368 void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex); … … 349 376 // Structure is already profiled. Returns the slow cases. Fall-through 350 377 // case contains result in regT0, and it is not yet profiled. 351 JumpList emitInt32Load( Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); }352 JumpList emitDoubleLoad( Instruction*, PatchableJump& badType);353 JumpList emitContiguousLoad( Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);354 JumpList emitArrayStorageLoad( Instruction*, PatchableJump& badType);355 JumpList emitLoadForArrayMode( Instruction*, JITArrayMode, PatchableJump& badType);356 357 JumpList emitInt32GetByVal( Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); }358 JumpList emitDoubleGetByVal( Instruction*, PatchableJump& badType);359 JumpList emitContiguousGetByVal( Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);360 JumpList emitArrayStorageGetByVal( Instruction*, PatchableJump& badType);361 JumpList emitDirectArgumentsGetByVal( Instruction*, PatchableJump& badType);362 JumpList emitScopedArgumentsGetByVal( Instruction*, PatchableJump& badType);363 JumpList emitIntTypedArrayGetByVal( Instruction*, PatchableJump& badType, TypedArrayType);364 JumpList emitFloatTypedArrayGetByVal( Instruction*, PatchableJump& badType, TypedArrayType);378 JumpList emitInt32Load(const Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); } 379 JumpList emitDoubleLoad(const Instruction*, PatchableJump& badType); 380 JumpList emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); 381 JumpList emitArrayStorageLoad(const Instruction*, PatchableJump& badType); 382 JumpList emitLoadForArrayMode(const Instruction*, JITArrayMode, PatchableJump& badType); 383 384 JumpList emitInt32GetByVal(const Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); } 385 JumpList emitDoubleGetByVal(const Instruction*, PatchableJump& badType); 386 JumpList emitContiguousGetByVal(const Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); 387 JumpList emitArrayStorageGetByVal(const Instruction*, PatchableJump& badType); 388 JumpList emitDirectArgumentsGetByVal(const Instruction*, PatchableJump& badType); 389 JumpList emitScopedArgumentsGetByVal(const Instruction*, PatchableJump& badType); 390 JumpList emitIntTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType); 391 JumpList emitFloatTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType); 365 392 366 393 // Property is in regT1, base is in regT0. regT2 contains indecing type. … … 368 395 // zero-extended. Base is cell checked. Structure is already profiled. 369 396 // returns the slow cases. 370 JumpList emitInt32PutByVal(Instruction* currentInstruction, PatchableJump& badType) 371 { 372 return emitGenericContiguousPutByVal(currentInstruction, badType, Int32Shape); 373 } 374 JumpList emitDoublePutByVal(Instruction* currentInstruction, PatchableJump& badType) 375 { 376 return emitGenericContiguousPutByVal(currentInstruction, badType, DoubleShape); 377 } 378 JumpList emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType) 379 { 380 return emitGenericContiguousPutByVal(currentInstruction, badType); 381 } 382 JumpList emitGenericContiguousPutByVal(Instruction*, PatchableJump& badType, IndexingType indexingShape = ContiguousShape); 383 JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType); 384 JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); 385 JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); 397 template<typename Op> 398 JumpList emitInt32PutByVal(Op bytecode, PatchableJump& badType) 399 { 400 return emitGenericContiguousPutByVal(bytecode, badType, Int32Shape); 401 } 402 template<typename Op> 403 JumpList emitDoublePutByVal(Op bytecode, PatchableJump& badType) 404 { 405 return emitGenericContiguousPutByVal(bytecode, badType, DoubleShape); 406 } 407 template<typename Op> 408 JumpList emitContiguousPutByVal(Op bytecode, PatchableJump& badType) 409 { 410 return emitGenericContiguousPutByVal(bytecode, badType); 411 } 412 template<typename Op> 413 JumpList emitGenericContiguousPutByVal(Op, PatchableJump& badType, IndexingType indexingShape = ContiguousShape); 414 template<typename Op> 415 JumpList emitArrayStoragePutByVal(Op, PatchableJump& badType); 416 template<typename Op> 417 JumpList emitIntTypedArrayPutByVal(Op, PatchableJump& badType, TypedArrayType); 418 template<typename Op> 419 JumpList emitFloatTypedArrayPutByVal(Op, PatchableJump& badType, TypedArrayType); 386 420 387 421 // Identifier check helper for GetByVal and PutByVal. 388 422 void emitByValIdentifierCheck(ByValInfo*, RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases); 389 423 390 JITGetByIdGenerator emitGetByValWithCachedId(ByValInfo*, Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases); 391 JITPutByIdGenerator emitPutByValWithCachedId(ByValInfo*, Instruction*, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases); 424 JITGetByIdGenerator emitGetByValWithCachedId(ByValInfo*, OpGetByVal, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases); 425 template<typename Op> 426 JITPutByIdGenerator emitPutByValWithCachedId(ByValInfo*, Op, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases); 392 427 393 428 enum FinalObjectMode { MayBeFinal, KnownNotFinal }; … … 455 490 #endif // USE(JSVALUE32_64) 456 491 457 void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition); 458 void emit_compareUnsigned(int dst, int op1, int op2, RelationalCondition); 459 void emit_compareUnsignedAndJump(int op1, int op2, unsigned target, RelationalCondition); 460 void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&); 492 template<typename Op> 493 void emit_compareAndJump(const Instruction*, RelationalCondition); 494 template<typename Op> 495 void emit_compareUnsigned(const Instruction*, RelationalCondition); 496 template<typename Op> 497 void emit_compareUnsignedAndJump(const Instruction*, RelationalCondition); 498 template<typename Op> 499 void emit_compareAndJumpSlow(const Instruction*, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&); 461 500 462 501 void assertStackPointerOffset(); 463 502 464 void emit_op_add(Instruction*); 465 void emit_op_bitand(Instruction*); 466 void emit_op_bitor(Instruction*); 467 void emit_op_bitxor(Instruction*); 468 void emit_op_call(Instruction*); 469 void emit_op_tail_call(Instruction*); 470 void emit_op_call_eval(Instruction*); 471 void emit_op_call_varargs(Instruction*); 472 void emit_op_tail_call_varargs(Instruction*); 473 void emit_op_tail_call_forward_arguments(Instruction*); 474 void emit_op_construct_varargs(Instruction*); 475 void emit_op_catch(Instruction*); 476 void emit_op_construct(Instruction*); 477 void emit_op_create_this(Instruction*); 478 void emit_op_to_this(Instruction*); 479 void emit_op_get_argument(Instruction*); 480 void emit_op_argument_count(Instruction*); 481 void emit_op_get_rest_length(Instruction*); 482 void emit_op_check_tdz(Instruction*); 483 void emit_op_identity_with_profile(Instruction*); 484 void emit_op_debug(Instruction*); 485 void emit_op_del_by_id(Instruction*); 486 void emit_op_del_by_val(Instruction*); 487 void emit_op_div(Instruction*); 488 void emit_op_end(Instruction*); 489 void emit_op_enter(Instruction*); 490 void emit_op_get_scope(Instruction*); 491 void emit_op_eq(Instruction*); 492 void emit_op_eq_null(Instruction*); 493 void emit_op_below(Instruction*); 494 void emit_op_beloweq(Instruction*); 495 void emit_op_try_get_by_id(Instruction*); 496 void emit_op_get_by_id(Instruction*); 497 void emit_op_get_by_id_with_this(Instruction*); 498 void emit_op_get_by_id_direct(Instruction*); 499 void emit_op_get_arguments_length(Instruction*); 500 void emit_op_get_by_val(Instruction*); 501 void emit_op_get_argument_by_val(Instruction*); 502 void emit_op_in_by_id(Instruction*); 503 void emit_op_init_lazy_reg(Instruction*); 504 void emit_op_overrides_has_instance(Instruction*); 505 void emit_op_instanceof(Instruction*); 506 void emit_op_instanceof_custom(Instruction*); 507 void emit_op_is_empty(Instruction*); 508 void emit_op_is_undefined(Instruction*); 509 void emit_op_is_boolean(Instruction*); 510 void emit_op_is_number(Instruction*); 511 void emit_op_is_object(Instruction*); 512 void emit_op_is_cell_with_type(Instruction*); 513 void emit_op_jeq_null(Instruction*); 514 void emit_op_jfalse(Instruction*); 515 void emit_op_jmp(Instruction*); 516 void emit_op_jneq_null(Instruction*); 517 void emit_op_jneq_ptr(Instruction*); 518 void emit_op_jless(Instruction*); 519 void emit_op_jlesseq(Instruction*); 520 void emit_op_jgreater(Instruction*); 521 void emit_op_jgreatereq(Instruction*); 522 void emit_op_jnless(Instruction*); 523 void emit_op_jnlesseq(Instruction*); 524 void emit_op_jngreater(Instruction*); 525 void emit_op_jngreatereq(Instruction*); 526 void emit_op_jeq(Instruction*); 527 void emit_op_jneq(Instruction*); 528 void emit_op_jstricteq(Instruction*); 529 void emit_op_jnstricteq(Instruction*); 530 void emit_op_jbelow(Instruction*); 531 void emit_op_jbeloweq(Instruction*); 532 void emit_op_jtrue(Instruction*); 533 void emit_op_loop_hint(Instruction*); 534 void emit_op_check_traps(Instruction*); 535 void emit_op_nop(Instruction*); 536 void emit_op_super_sampler_begin(Instruction*); 537 void emit_op_super_sampler_end(Instruction*); 538 void emit_op_lshift(Instruction*); 539 void emit_op_mod(Instruction*); 540 void emit_op_mov(Instruction*); 541 void emit_op_mul(Instruction*); 542 void emit_op_negate(Instruction*); 543 void emit_op_neq(Instruction*); 544 void emit_op_neq_null(Instruction*); 545 void emit_op_new_array(Instruction*); 546 void emit_op_new_array_with_size(Instruction*); 547 void emit_op_new_func(Instruction*); 548 void emit_op_new_func_exp(Instruction*); 549 void emit_op_new_generator_func(Instruction*); 550 void emit_op_new_generator_func_exp(Instruction*); 551 void emit_op_new_async_func(Instruction*); 552 void emit_op_new_async_func_exp(Instruction*); 553 void emit_op_new_async_generator_func(Instruction*); 554 void emit_op_new_async_generator_func_exp(Instruction*); 555 void emit_op_new_object(Instruction*); 556 void emit_op_new_regexp(Instruction*); 557 void emit_op_not(Instruction*); 558 void emit_op_nstricteq(Instruction*); 559 void emit_op_dec(Instruction*); 560 void emit_op_inc(Instruction*); 561 void emit_op_profile_type(Instruction*); 562 void emit_op_profile_control_flow(Instruction*); 563 void emit_op_get_parent_scope(Instruction*); 564 void emit_op_put_by_id(Instruction*); 565 void emit_op_put_by_val(Instruction*); 566 void emit_op_put_getter_by_id(Instruction*); 567 void emit_op_put_setter_by_id(Instruction*); 568 void emit_op_put_getter_setter_by_id(Instruction*); 569 void emit_op_put_getter_by_val(Instruction*); 570 void emit_op_put_setter_by_val(Instruction*); 571 void emit_op_ret(Instruction*); 572 void emit_op_rshift(Instruction*); 573 void emit_op_set_function_name(Instruction*); 574 void emit_op_stricteq(Instruction*); 575 void emit_op_sub(Instruction*); 576 void emit_op_switch_char(Instruction*); 577 void emit_op_switch_imm(Instruction*); 578 void emit_op_switch_string(Instruction*); 579 void emit_op_tear_off_arguments(Instruction*); 580 void emit_op_throw(Instruction*); 581 void emit_op_to_number(Instruction*); 582 void emit_op_to_string(Instruction*); 583 void emit_op_to_object(Instruction*); 584 void emit_op_to_primitive(Instruction*); 585 void emit_op_unexpected_load(Instruction*); 586 void emit_op_unsigned(Instruction*); 587 void emit_op_urshift(Instruction*); 588 void emit_op_has_structure_property(Instruction*); 589 void emit_op_has_indexed_property(Instruction*); 590 void emit_op_get_direct_pname(Instruction*); 591 void emit_op_enumerator_structure_pname(Instruction*); 592 void emit_op_enumerator_generic_pname(Instruction*); 593 void emit_op_log_shadow_chicken_prologue(Instruction*); 594 void emit_op_log_shadow_chicken_tail(Instruction*); 595 596 void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&); 597 void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&); 598 void emitSlow_op_tail_call(Instruction*, Vector<SlowCaseEntry>::iterator&); 599 void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&); 600 void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); 601 void emitSlow_op_tail_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); 602 void emitSlow_op_tail_call_forward_arguments(Instruction*, Vector<SlowCaseEntry>::iterator&); 603 void emitSlow_op_construct_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); 604 void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&); 605 void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&); 606 void emitSlow_op_get_callee(Instruction*, Vector<SlowCaseEntry>::iterator&); 607 void emitSlow_op_try_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); 608 void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); 609 void emitSlow_op_get_by_id_with_this(Instruction*, Vector<SlowCaseEntry>::iterator&); 610 void emitSlow_op_get_by_id_direct(Instruction*, Vector<SlowCaseEntry>::iterator&); 611 void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&); 612 void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); 613 void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); 614 void emitSlow_op_in_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); 615 void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&); 616 void emitSlow_op_instanceof_custom(Instruction*, Vector<SlowCaseEntry>::iterator&); 617 void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&); 618 void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&); 619 void emitSlow_op_jgreater(Instruction*, Vector<SlowCaseEntry>::iterator&); 620 void emitSlow_op_jgreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&); 621 void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&); 622 void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&); 623 void emitSlow_op_jngreater(Instruction*, Vector<SlowCaseEntry>::iterator&); 624 void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&); 625 void emitSlow_op_jeq(Instruction*, Vector<SlowCaseEntry>::iterator&); 626 void emitSlow_op_jneq(Instruction*, Vector<SlowCaseEntry>::iterator&); 627 void emitSlow_op_jstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&); 628 void emitSlow_op_jnstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&); 629 void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&); 630 void emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator&); 631 void emitSlow_op_check_traps(Instruction*, Vector<SlowCaseEntry>::iterator&); 632 void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&); 633 void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&); 634 void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&); 635 void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&); 636 void emitSlow_op_new_object(Instruction*, Vector<SlowCaseEntry>::iterator&); 637 void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&); 638 void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); 639 void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&); 640 void emitSlow_op_has_indexed_property(Instruction*, Vector<SlowCaseEntry>::iterator&); 641 642 void emit_op_resolve_scope(Instruction*); 643 void emit_op_get_from_scope(Instruction*); 644 void emit_op_put_to_scope(Instruction*); 645 void emit_op_get_from_arguments(Instruction*); 646 void emit_op_put_to_arguments(Instruction*); 647 void emitSlow_op_get_from_scope(Instruction*, Vector<SlowCaseEntry>::iterator&); 648 void emitSlow_op_put_to_scope(Instruction*, Vector<SlowCaseEntry>::iterator&); 649 650 void emitSlowCaseCall(Instruction*, Vector<SlowCaseEntry>::iterator&, SlowPathFunction); 651 652 void emitRightShift(Instruction*, bool isUnsigned); 653 void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned); 654 655 void emitNewFuncCommon(Instruction*); 656 void emitNewFuncExprCommon(Instruction*); 503 void emit_op_add(const Instruction*); 504 void emit_op_bitand(const Instruction*); 505 void emit_op_bitor(const Instruction*); 506 void emit_op_bitxor(const Instruction*); 507 void emit_op_call(const Instruction*); 508 void emit_op_tail_call(const Instruction*); 509 void emit_op_call_eval(const Instruction*); 510 void emit_op_call_varargs(const Instruction*); 511 void emit_op_tail_call_varargs(const Instruction*); 512 void emit_op_tail_call_forward_arguments(const Instruction*); 513 void emit_op_construct_varargs(const Instruction*); 514 void emit_op_catch(const Instruction*); 515 void emit_op_construct(const Instruction*); 516 void emit_op_create_this(const Instruction*); 517 void emit_op_to_this(const Instruction*); 518 void emit_op_get_argument(const Instruction*); 519 void emit_op_argument_count(const Instruction*); 520 void emit_op_get_rest_length(const Instruction*); 521 void emit_op_check_tdz(const Instruction*); 522 void emit_op_identity_with_profile(const Instruction*); 523 void emit_op_debug(const Instruction*); 524 void emit_op_del_by_id(const Instruction*); 525 void emit_op_del_by_val(const Instruction*); 526 void emit_op_div(const Instruction*); 527 void emit_op_end(const Instruction*); 528 void emit_op_enter(const Instruction*); 529 void emit_op_get_scope(const Instruction*); 530 void emit_op_eq(const Instruction*); 531 void emit_op_eq_null(const Instruction*); 532 void emit_op_below(const Instruction*); 533 void emit_op_beloweq(const Instruction*); 534 void emit_op_try_get_by_id(const Instruction*); 535 void emit_op_get_by_id(const Instruction*); 536 void emit_op_get_by_id_with_this(const Instruction*); 537 void emit_op_get_by_id_direct(const Instruction*); 538 void emit_op_get_by_val(const Instruction*); 539 void emit_op_get_argument_by_val(const Instruction*); 540 void emit_op_in_by_id(const Instruction*); 541 void emit_op_init_lazy_reg(const Instruction*); 542 void emit_op_overrides_has_instance(const Instruction*); 543 void emit_op_instanceof(const Instruction*); 544 void emit_op_instanceof_custom(const Instruction*); 545 void emit_op_is_empty(const Instruction*); 546 void emit_op_is_undefined(const Instruction*); 547 void emit_op_is_boolean(const Instruction*); 548 void emit_op_is_number(const Instruction*); 549 void emit_op_is_object(const Instruction*); 550 void emit_op_is_cell_with_type(const Instruction*); 551 void emit_op_jeq_null(const Instruction*); 552 void emit_op_jfalse(const Instruction*); 553 void emit_op_jmp(const Instruction*); 554 void emit_op_jneq_null(const Instruction*); 555 void emit_op_jneq_ptr(const Instruction*); 556 void emit_op_jless(const Instruction*); 557 void emit_op_jlesseq(const Instruction*); 558 void emit_op_jgreater(const Instruction*); 559 void emit_op_jgreatereq(const Instruction*); 560 void emit_op_jnless(const Instruction*); 561 void emit_op_jnlesseq(const Instruction*); 562 void emit_op_jngreater(const Instruction*); 563 void emit_op_jngreatereq(const Instruction*); 564 void emit_op_jeq(const Instruction*); 565 void emit_op_jneq(const Instruction*); 566 void emit_op_jstricteq(const Instruction*); 567 void emit_op_jnstricteq(const Instruction*); 568 void emit_op_jbelow(const Instruction*); 569 void emit_op_jbeloweq(const Instruction*); 570 void emit_op_jtrue(const Instruction*); 571 void emit_op_loop_hint(const Instruction*); 572 void emit_op_check_traps(const Instruction*); 573 void emit_op_nop(const Instruction*); 574 void emit_op_super_sampler_begin(const Instruction*); 575 void emit_op_super_sampler_end(const Instruction*); 576 void emit_op_lshift(const Instruction*); 577 void emit_op_mod(const Instruction*); 578 void emit_op_mov(const Instruction*); 579 void emit_op_mul(const Instruction*); 580 void emit_op_negate(const Instruction*); 581 void emit_op_neq(const Instruction*); 582 void emit_op_neq_null(const Instruction*); 583 void emit_op_new_array(const Instruction*); 584 void emit_op_new_array_with_size(const Instruction*); 585 void emit_op_new_func(const Instruction*); 586 void emit_op_new_func_exp(const Instruction*); 587 void emit_op_new_generator_func(const Instruction*); 588 void emit_op_new_generator_func_exp(const Instruction*); 589 void emit_op_new_async_func(const Instruction*); 590 void emit_op_new_async_func_exp(const Instruction*); 591 void emit_op_new_async_generator_func(const Instruction*); 592 void emit_op_new_async_generator_func_exp(const Instruction*); 593 void emit_op_new_object(const Instruction*); 594 void emit_op_new_regexp(const Instruction*); 595 void emit_op_not(const Instruction*); 596 void emit_op_nstricteq(const Instruction*); 597 void emit_op_dec(const Instruction*); 598 void emit_op_inc(const Instruction*); 599 void emit_op_profile_type(const Instruction*); 600 void emit_op_profile_control_flow(const Instruction*); 601 void emit_op_get_parent_scope(const Instruction*); 602 void emit_op_put_by_id(const Instruction*); 603 template<typename Op = OpPutByVal> 604 void emit_op_put_by_val(const Instruction*); 605 void emit_op_put_by_val_direct(const Instruction*); 606 void emit_op_put_getter_by_id(const Instruction*); 607 void emit_op_put_setter_by_id(const Instruction*); 608 void emit_op_put_getter_setter_by_id(const Instruction*); 609 void emit_op_put_getter_by_val(const Instruction*); 610 void emit_op_put_setter_by_val(const Instruction*); 611 void emit_op_ret(const Instruction*); 612 void emit_op_rshift(const Instruction*); 613 void emit_op_set_function_name(const Instruction*); 614 void emit_op_stricteq(const Instruction*); 615 void emit_op_sub(const Instruction*); 616 void emit_op_switch_char(const Instruction*); 617 void emit_op_switch_imm(const Instruction*); 618 void emit_op_switch_string(const Instruction*); 619 void emit_op_tear_off_arguments(const Instruction*); 620 void emit_op_throw(const Instruction*); 621 void emit_op_to_number(const Instruction*); 622 void emit_op_to_string(const Instruction*); 623 void emit_op_to_object(const Instruction*); 624 void emit_op_to_primitive(const Instruction*); 625 void emit_op_unexpected_load(const Instruction*); 626 void emit_op_unsigned(const Instruction*); 627 void emit_op_urshift(const Instruction*); 628 void emit_op_has_structure_property(const Instruction*); 629 void emit_op_has_indexed_property(const Instruction*); 630 void emit_op_get_direct_pname(const Instruction*); 631 void emit_op_enumerator_structure_pname(const Instruction*); 632 void emit_op_enumerator_generic_pname(const Instruction*); 633 void emit_op_log_shadow_chicken_prologue(const Instruction*); 634 void emit_op_log_shadow_chicken_tail(const Instruction*); 635 636 void emitSlow_op_add(const Instruction*, Vector<SlowCaseEntry>::iterator&); 637 void emitSlow_op_call(const Instruction*, Vector<SlowCaseEntry>::iterator&); 638 void emitSlow_op_tail_call(const Instruction*, Vector<SlowCaseEntry>::iterator&); 639 void emitSlow_op_call_eval(const Instruction*, Vector<SlowCaseEntry>::iterator&); 640 void emitSlow_op_call_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); 641 void emitSlow_op_tail_call_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); 642 void emitSlow_op_tail_call_forward_arguments(const Instruction*, Vector<SlowCaseEntry>::iterator&); 643 void emitSlow_op_construct_varargs(const Instruction*, Vector<SlowCaseEntry>::iterator&); 644 void emitSlow_op_construct(const Instruction*, Vector<SlowCaseEntry>::iterator&); 645 void emitSlow_op_eq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 646 void emitSlow_op_get_callee(const Instruction*, Vector<SlowCaseEntry>::iterator&); 647 void emitSlow_op_try_get_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); 648 void emitSlow_op_get_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); 649 void emitSlow_op_get_by_id_with_this(const Instruction*, Vector<SlowCaseEntry>::iterator&); 650 void emitSlow_op_get_by_id_direct(const Instruction*, Vector<SlowCaseEntry>::iterator&); 651 void emitSlow_op_get_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); 652 void emitSlow_op_get_argument_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); 653 void emitSlow_op_in_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); 654 void emitSlow_op_instanceof(const Instruction*, Vector<SlowCaseEntry>::iterator&); 655 void emitSlow_op_instanceof_custom(const Instruction*, Vector<SlowCaseEntry>::iterator&); 656 void emitSlow_op_jless(const Instruction*, Vector<SlowCaseEntry>::iterator&); 657 void emitSlow_op_jlesseq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 658 void emitSlow_op_jgreater(const Instruction*, Vector<SlowCaseEntry>::iterator&); 659 void emitSlow_op_jgreatereq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 660 void emitSlow_op_jnless(const Instruction*, Vector<SlowCaseEntry>::iterator&); 661 void emitSlow_op_jnlesseq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 662 void emitSlow_op_jngreater(const Instruction*, Vector<SlowCaseEntry>::iterator&); 663 void emitSlow_op_jngreatereq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 664 void emitSlow_op_jeq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 665 void emitSlow_op_jneq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 666 void emitSlow_op_jstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 667 void emitSlow_op_jnstricteq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 668 void emitSlow_op_jtrue(const Instruction*, Vector<SlowCaseEntry>::iterator&); 669 void emitSlow_op_loop_hint(const Instruction*, Vector<SlowCaseEntry>::iterator&); 670 void emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator&); 671 void emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&); 672 void emitSlow_op_mul(const Instruction*, Vector<SlowCaseEntry>::iterator&); 673 void emitSlow_op_negate(const Instruction*, Vector<SlowCaseEntry>::iterator&); 674 void emitSlow_op_neq(const Instruction*, Vector<SlowCaseEntry>::iterator&); 675 void emitSlow_op_new_object(const Instruction*, Vector<SlowCaseEntry>::iterator&); 676 void emitSlow_op_put_by_id(const Instruction*, Vector<SlowCaseEntry>::iterator&); 677 void emitSlow_op_put_by_val(const Instruction*, Vector<SlowCaseEntry>::iterator&); 678 void emitSlow_op_sub(const Instruction*, Vector<SlowCaseEntry>::iterator&); 679 void emitSlow_op_has_indexed_property(const Instruction*, Vector<SlowCaseEntry>::iterator&); 680 681 void emit_op_resolve_scope(const Instruction*); 682 void emit_op_get_from_scope(const Instruction*); 683 void emit_op_put_to_scope(const Instruction*); 684 void emit_op_get_from_arguments(const Instruction*); 685 void emit_op_put_to_arguments(const Instruction*); 686 void emitSlow_op_get_from_scope(const Instruction*, Vector<SlowCaseEntry>::iterator&); 687 void emitSlow_op_put_to_scope(const Instruction*, Vector<SlowCaseEntry>::iterator&); 688 689 void emitSlowCaseCall(const Instruction*, Vector<SlowCaseEntry>::iterator&, SlowPathFunction); 690 691 void emitRightShift(const Instruction*, bool isUnsigned); 692 void emitRightShiftSlowCase(const Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned); 693 694 template<typename Op> 695 void emitNewFuncCommon(const Instruction*); 696 template<typename Op> 697 void emitNewFuncExprCommon(const Instruction*); 657 698 void emitVarInjectionCheck(bool needsVarInjectionChecks); 658 699 void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth); … … 680 721 bool isOperandConstantChar(int src); 681 722 682 template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>683 void emitMathICFast(JITUnaryMathIC<Generator>*, Instruction*, ProfiledFunction, NonProfiledFunction);684 template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>685 void emitMathICFast(JITBinaryMathIC<Generator>*, Instruction*, ProfiledFunction, NonProfiledFunction);686 687 template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>688 void emitMathICSlow(JITBinaryMathIC<Generator>*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction);689 template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>690 void emitMathICSlow(JITUnaryMathIC<Generator>*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction);723 template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> 724 void emitMathICFast(JITUnaryMathIC<Generator>*, const Instruction*, ProfiledFunction, NonProfiledFunction); 725 template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> 726 void emitMathICFast(JITBinaryMathIC<Generator>*, const Instruction*, ProfiledFunction, NonProfiledFunction); 727 728 template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> 729 void emitMathICSlow(JITBinaryMathIC<Generator>*, const Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); 730 template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> 731 void emitMathICSlow(JITUnaryMathIC<Generator>*, const Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); 691 732 692 733 Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter) … … 720 761 MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr<CFunctionPtrTag>); 721 762 MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr<CFunctionPtrTag>, int); 722 MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr<CFunctionPtrTag>, int); 763 template<typename Metadata> 764 MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata&, const FunctionPtr<CFunctionPtrTag>, int); 723 765 724 766 template<typename OperationType, typename... Args> … … 762 804 #endif // OS(WINDOWS) && CPU(X86_64) 763 805 764 template<typename OperationType, typename... Args>806 template<typename Metadata, typename OperationType, typename... Args> 765 807 std::enable_if_t<FunctionTraits<OperationType>::hasResult, MacroAssembler::Call> 766 callOperationWithProfile( OperationType operation, int result, Args... args)808 callOperationWithProfile(Metadata& metadata, OperationType operation, int result, Args... args) 767 809 { 768 810 setupArguments<OperationType>(args...); 769 return appendCallWithExceptionCheckSetJSValueResultWithProfile( operation, result);811 return appendCallWithExceptionCheckSetJSValueResultWithProfile(metadata, operation, result); 770 812 } 771 813 … … 799 841 }; 800 842 801 template<typename SnippetGenerator> 802 void emitBitBinaryOpFastPath(Instruction* currentInstruction, ProfilingPolicy shouldEmitProfiling = ProfilingPolicy::NoProfiling); 803 804 void emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID); 843 template<typename Op, typename SnippetGenerator> 844 void emitBitBinaryOpFastPath(const Instruction* currentInstruction, ProfilingPolicy shouldEmitProfiling = ProfilingPolicy::NoProfiling); 845 846 void emitRightShiftFastPath(const Instruction* currentInstruction, OpcodeID); 847 848 template<typename Op> 849 void emitRightShiftFastPath(const Instruction* currentInstruction, JITRightShiftGenerator::ShiftType); 805 850 806 851 void updateTopCallFrame(); … … 811 856 // Loads the character value of a single character string into dst. 812 857 void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures); 858 859 int jumpTarget(const Instruction*, int target); 813 860 814 861 #if ENABLE(DFG_JIT) … … 832 879 833 880 #if ENABLE(OPCODE_SAMPLING) 834 void sampleInstruction( Instruction*, bool = false);881 void sampleInstruction(const Instruction*, bool = false); 835 882 #endif 836 883 … … 856 903 static bool computeCompileTimes(); 857 904 858 // If you need to check the value of an instruction multiple times and the instruction is 859 // part of a LLInt inline cache, then you want to use this. It will give you the value of 860 // the instruction at the start of JITing. 861 Instruction* copiedInstruction(Instruction*); 905 // If you need to check a value from the metadata table and you need it to 906 // be consistent across the fast and slow path, then you want to use this. 907 // It will give the slow path the same value read by the fast path. 908 GetPutInfo copiedGetPutInfo(OpPutToScope); 909 template<typename BinaryOp> 910 ArithProfile copiedArithProfile(BinaryOp); 862 911 863 912 Interpreter* m_interpreter; 864 865 PoisonedRefCountedArray<CodeBlockPoison, Instruction> m_instructions;866 913 867 914 Vector<CallRecord> m_calls; … … 880 927 Vector<SwitchRecord> m_switches; 881 928 929 HashMap<unsigned, unsigned> m_copiedGetPutInfos; 930 HashMap<uint64_t, ArithProfile> m_copiedArithProfiles; 931 882 932 JumpList m_exceptionChecks; 883 933 JumpList m_exceptionChecksWithCallFrameRollback; … … 900 950 PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder; 901 951 902 HashMap< Instruction*, void*> m_instructionToMathIC;903 HashMap< Instruction*, MathICGenerationState> m_instructionToMathICGenerationState;952 HashMap<const Instruction*, void*> m_instructionToMathIC; 953 HashMap<const Instruction*, MathICGenerationState> m_instructionToMathICGenerationState; 904 954 905 955 bool m_canBeOptimized; … … 912 962 } // namespace JSC 913 963 964 914 965 #endif // ENABLE(JIT) -
trunk/Source/JavaScriptCore/jit/JITArithmetic.cpp
r237486 r237547 42 42 #include "JITNegGenerator.h" 43 43 #include "JITOperations.h" 44 #include "JITRightShiftGenerator.h"45 44 #include "JITSubGenerator.h" 46 45 #include "JSArray.h" … … 54 53 namespace JSC { 55 54 56 void JIT::emit_op_jless(Instruction* currentInstruction) 57 { 58 int op1 = currentInstruction[1].u.operand; 59 int op2 = currentInstruction[2].u.operand; 60 unsigned target = currentInstruction[3].u.operand; 61 62 emit_compareAndJump(op_jless, op1, op2, target, LessThan); 63 } 64 65 void JIT::emit_op_jlesseq(Instruction* currentInstruction) 66 { 67 int op1 = currentInstruction[1].u.operand; 68 int op2 = currentInstruction[2].u.operand; 69 unsigned target = currentInstruction[3].u.operand; 70 71 emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual); 72 } 73 74 void JIT::emit_op_jgreater(Instruction* currentInstruction) 75 { 76 int op1 = currentInstruction[1].u.operand; 77 int op2 = currentInstruction[2].u.operand; 78 unsigned target = currentInstruction[3].u.operand; 79 80 emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan); 81 } 82 83 void JIT::emit_op_jgreatereq(Instruction* currentInstruction) 84 { 85 int op1 = currentInstruction[1].u.operand; 86 int op2 = currentInstruction[2].u.operand; 87 unsigned target = currentInstruction[3].u.operand; 88 89 emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual); 90 } 91 92 void JIT::emit_op_jnless(Instruction* currentInstruction) 93 { 94 int op1 = currentInstruction[1].u.operand; 95 int op2 = currentInstruction[2].u.operand; 96 unsigned target = currentInstruction[3].u.operand; 97 98 emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual); 99 } 100 101 void JIT::emit_op_jnlesseq(Instruction* currentInstruction) 102 { 103 int op1 = currentInstruction[1].u.operand; 104 int op2 = currentInstruction[2].u.operand; 105 unsigned target = currentInstruction[3].u.operand; 106 107 emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan); 108 } 109 110 void JIT::emit_op_jngreater(Instruction* currentInstruction) 111 { 112 int op1 = currentInstruction[1].u.operand; 113 int op2 = currentInstruction[2].u.operand; 114 unsigned target = currentInstruction[3].u.operand; 115 116 emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual); 117 } 118 119 void JIT::emit_op_jngreatereq(Instruction* currentInstruction) 120 { 121 int op1 = currentInstruction[1].u.operand; 122 int op2 = currentInstruction[2].u.operand; 123 unsigned target = currentInstruction[3].u.operand; 124 125 emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan); 126 } 127 128 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 129 { 130 int op1 = currentInstruction[1].u.operand; 131 int op2 = currentInstruction[2].u.operand; 132 unsigned target = currentInstruction[3].u.operand; 133 134 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, operationCompareLess, false, iter); 135 } 136 137 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 138 { 139 int op1 = currentInstruction[1].u.operand; 140 int op2 = currentInstruction[2].u.operand; 141 unsigned target = currentInstruction[3].u.operand; 142 143 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, operationCompareLessEq, false, iter); 144 } 145 146 void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 147 { 148 int op1 = currentInstruction[1].u.operand; 149 int op2 = currentInstruction[2].u.operand; 150 unsigned target = currentInstruction[3].u.operand; 151 152 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, operationCompareGreater, false, iter); 153 } 154 155 void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 156 { 157 int op1 = currentInstruction[1].u.operand; 158 int op2 = currentInstruction[2].u.operand; 159 unsigned target = currentInstruction[3].u.operand; 160 161 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter); 162 } 163 164 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 165 { 166 int op1 = currentInstruction[1].u.operand; 167 int op2 = currentInstruction[2].u.operand; 168 unsigned target = currentInstruction[3].u.operand; 169 170 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter); 171 } 172 173 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 174 { 175 int op1 = currentInstruction[1].u.operand; 176 int op2 = currentInstruction[2].u.operand; 177 unsigned target = currentInstruction[3].u.operand; 178 179 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter); 180 } 181 182 void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 183 { 184 int op1 = currentInstruction[1].u.operand; 185 int op2 = currentInstruction[2].u.operand; 186 unsigned target = currentInstruction[3].u.operand; 187 188 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter); 189 } 190 191 void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 192 { 193 int op1 = currentInstruction[1].u.operand; 194 int op2 = currentInstruction[2].u.operand; 195 unsigned target = currentInstruction[3].u.operand; 196 197 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter); 198 } 199 200 void JIT::emit_op_below(Instruction* currentInstruction) 201 { 202 int dst = currentInstruction[1].u.operand; 203 int op1 = currentInstruction[2].u.operand; 204 int op2 = currentInstruction[3].u.operand; 205 emit_compareUnsigned(dst, op1, op2, Below); 206 } 207 208 void JIT::emit_op_beloweq(Instruction* currentInstruction) 209 { 210 int dst = currentInstruction[1].u.operand; 211 int op1 = currentInstruction[2].u.operand; 212 int op2 = currentInstruction[3].u.operand; 213 emit_compareUnsigned(dst, op1, op2, BelowOrEqual); 214 } 215 216 void JIT::emit_op_jbelow(Instruction* currentInstruction) 217 { 218 int op1 = currentInstruction[1].u.operand; 219 int op2 = currentInstruction[2].u.operand; 220 unsigned target = currentInstruction[3].u.operand; 221 222 emit_compareUnsignedAndJump(op1, op2, target, Below); 223 } 224 225 void JIT::emit_op_jbeloweq(Instruction* currentInstruction) 226 { 227 int op1 = currentInstruction[1].u.operand; 228 int op2 = currentInstruction[2].u.operand; 229 unsigned target = currentInstruction[3].u.operand; 230 231 emit_compareUnsignedAndJump(op1, op2, target, BelowOrEqual); 55 void JIT::emit_op_jless(const Instruction* currentInstruction) 56 { 57 emit_compareAndJump<OpJless>(currentInstruction, LessThan); 58 } 59 60 void JIT::emit_op_jlesseq(const Instruction* currentInstruction) 61 { 62 emit_compareAndJump<OpJlesseq>(currentInstruction, LessThanOrEqual); 63 } 64 65 void JIT::emit_op_jgreater(const Instruction* currentInstruction) 66 { 67 emit_compareAndJump<OpJgreater>(currentInstruction, GreaterThan); 68 } 69 70 void JIT::emit_op_jgreatereq(const Instruction* currentInstruction) 71 { 72 emit_compareAndJump<OpJgreatereq>(currentInstruction, GreaterThanOrEqual); 73 } 74 75 void JIT::emit_op_jnless(const Instruction* currentInstruction) 76 { 77 emit_compareAndJump<OpJnless>(currentInstruction, GreaterThanOrEqual); 78 } 79 80 void JIT::emit_op_jnlesseq(const Instruction* currentInstruction) 81 { 82 emit_compareAndJump<OpJnlesseq>(currentInstruction, GreaterThan); 83 } 84 85 void JIT::emit_op_jngreater(const Instruction* currentInstruction) 86 { 87 emit_compareAndJump<OpJngreater>(currentInstruction, LessThanOrEqual); 88 } 89 90 void JIT::emit_op_jngreatereq(const Instruction* currentInstruction) 91 { 92 emit_compareAndJump<OpJngreatereq>(currentInstruction, LessThan); 93 } 94 95 void JIT::emitSlow_op_jless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 96 { 97 emit_compareAndJumpSlow<OpJless>(currentInstruction, DoubleLessThan, operationCompareLess, false, iter); 98 } 99 100 void JIT::emitSlow_op_jlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 101 { 102 emit_compareAndJumpSlow<OpJlesseq>(currentInstruction, DoubleLessThanOrEqual, operationCompareLessEq, false, iter); 103 } 104 105 void JIT::emitSlow_op_jgreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 106 { 107 emit_compareAndJumpSlow<OpJgreater>(currentInstruction, DoubleGreaterThan, operationCompareGreater, false, iter); 108 } 109 110 void JIT::emitSlow_op_jgreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 111 { 112 emit_compareAndJumpSlow<OpJgreatereq>(currentInstruction, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter); 113 } 114 115 void JIT::emitSlow_op_jnless(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 116 { 117 emit_compareAndJumpSlow<OpJnless>(currentInstruction, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter); 118 } 119 120 void JIT::emitSlow_op_jnlesseq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 121 { 122 emit_compareAndJumpSlow<OpJnlesseq>(currentInstruction, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter); 123 } 124 125 void JIT::emitSlow_op_jngreater(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 126 { 127 emit_compareAndJumpSlow<OpJngreater>(currentInstruction, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter); 128 } 129 130 void JIT::emitSlow_op_jngreatereq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 131 { 132 emit_compareAndJumpSlow<OpJngreatereq>(currentInstruction, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter); 133 } 134 135 void JIT::emit_op_below(const Instruction* currentInstruction) 136 { 137 emit_compareUnsigned<OpBelow>(currentInstruction, Below); 138 } 139 140 void JIT::emit_op_beloweq(const Instruction* currentInstruction) 141 { 142 emit_compareUnsigned<OpBeloweq>(currentInstruction, BelowOrEqual); 143 } 144 145 void JIT::emit_op_jbelow(const Instruction* currentInstruction) 146 { 147 emit_compareUnsignedAndJump<OpJbelow>(currentInstruction, Below); 148 } 149 150 void JIT::emit_op_jbeloweq(const Instruction* currentInstruction) 151 { 152 emit_compareUnsignedAndJump<OpJbeloweq>(currentInstruction, BelowOrEqual); 232 153 } 233 154 234 155 #if USE(JSVALUE64) 235 156 236 void JIT::emit_op_unsigned(Instruction* currentInstruction) 237 { 238 int result = currentInstruction[1].u.operand; 239 int op1 = currentInstruction[2].u.operand; 157 void JIT::emit_op_unsigned(const Instruction* currentInstruction) 158 { 159 auto bytecode = currentInstruction->as<OpUnsigned>(); 160 int result = bytecode.dst.offset(); 161 int op1 = bytecode.operand.offset(); 240 162 241 163 emitGetVirtualRegister(op1, regT0); … … 246 168 } 247 169 248 void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition condition) 170 template<typename Op> 171 void JIT::emit_compareAndJump(const Instruction* instruction, RelationalCondition condition) 249 172 { 250 173 // We generate inline code for the following cases in the fast path: … … 253 176 // - int immediate to int immediate 254 177 178 auto bytecode = instruction->as<Op>(); 179 int op1 = bytecode.lhs.offset(); 180 int op2 = bytecode.rhs.offset(); 181 unsigned target = jumpTarget(instruction, bytecode.target); 255 182 if (isOperandConstantChar(op1)) { 256 183 emitGetVirtualRegister(op2, regT0); … … 293 220 } 294 221 295 void JIT::emit_compareUnsignedAndJump(int op1, int op2, unsigned target, RelationalCondition condition) 296 { 222 template<typename Op> 223 void JIT::emit_compareUnsignedAndJump(const Instruction* instruction, RelationalCondition condition) 224 { 225 auto bytecode = instruction->as<Op>(); 226 int op1 = bytecode.lhs.offset(); 227 int op2 = bytecode.rhs.offset(); 228 unsigned target = jumpTarget(instruction, bytecode.target); 297 229 if (isOperandConstantInt(op2)) { 298 230 emitGetVirtualRegister(op1, regT0); … … 309 241 } 310 242 311 void JIT::emit_compareUnsigned(int dst, int op1, int op2, RelationalCondition condition) 312 { 243 template<typename Op> 244 void JIT::emit_compareUnsigned(const Instruction* instruction, RelationalCondition condition) 245 { 246 auto bytecode = instruction->as<Op>(); 247 int dst = bytecode.dst.offset(); 248 int op1 = bytecode.lhs.offset(); 249 int op2 = bytecode.rhs.offset(); 313 250 if (isOperandConstantInt(op2)) { 314 251 emitGetVirtualRegister(op1, regT0); … … 327 264 } 328 265 329 void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter) 330 { 331 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless); 332 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless); 333 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless); 334 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless); 335 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless); 336 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless); 337 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless); 338 266 template<typename Op> 267 void JIT::emit_compareAndJumpSlow(const Instruction* instruction, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter) 268 { 269 auto bytecode = instruction->as<Op>(); 270 int op1 = bytecode.lhs.offset(); 271 int op2 = bytecode.rhs.offset(); 272 unsigned target = jumpTarget(instruction, bytecode.target); 273 339 274 // We generate inline code for the following cases in the slow path: 340 275 // - floating-point number to constant int immediate … … 366 301 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target); 367 302 368 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));303 emitJumpSlowToHot(jump(), instruction->size()); 369 304 370 305 fail1.link(this); … … 392 327 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target); 393 328 394 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));329 emitJumpSlowToHot(jump(), instruction->size()); 395 330 396 331 fail1.link(this); … … 416 351 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target); 417 352 418 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));353 emitJumpSlowToHot(jump(), instruction->size()); 419 354 420 355 fail1.link(this); … … 428 363 } 429 364 430 void JIT::emit_op_inc(Instruction* currentInstruction) 431 { 432 int srcDst = currentInstruction[1].u.operand; 365 void JIT::emit_op_inc(const Instruction* currentInstruction) 366 { 367 auto bytecode = currentInstruction->as<OpInc>(); 368 int srcDst = bytecode.srcDst.offset(); 433 369 434 370 emitGetVirtualRegister(srcDst, regT0); … … 439 375 } 440 376 441 void JIT::emit_op_dec(Instruction* currentInstruction) 442 { 443 int srcDst = currentInstruction[1].u.operand; 377 void JIT::emit_op_dec(const Instruction* currentInstruction) 378 { 379 auto bytecode = currentInstruction->as<OpDec>(); 380 int srcDst = bytecode.srcDst.offset(); 444 381 445 382 emitGetVirtualRegister(srcDst, regT0); … … 454 391 #if CPU(X86_64) 455 392 456 void JIT::emit_op_mod(Instruction* currentInstruction) 457 { 458 int result = currentInstruction[1].u.operand; 459 int op1 = currentInstruction[2].u.operand; 460 int op2 = currentInstruction[3].u.operand; 393 void JIT::emit_op_mod(const Instruction* currentInstruction) 394 { 395 auto bytecode = currentInstruction->as<OpMod>(); 396 int result = bytecode.dst.offset(); 397 int op1 = bytecode.lhs.offset(); 398 int op2 = bytecode.rhs.offset(); 461 399 462 400 // Make sure registers are correct for x86 IDIV instructions. … … 485 423 } 486 424 487 void JIT::emitSlow_op_mod( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)425 void JIT::emitSlow_op_mod(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 488 426 { 489 427 linkAllSlowCases(iter); … … 495 433 #else // CPU(X86_64) 496 434 497 void JIT::emit_op_mod( Instruction* currentInstruction)435 void JIT::emit_op_mod(const Instruction* currentInstruction) 498 436 { 499 437 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod); … … 501 439 } 502 440 503 void JIT::emitSlow_op_mod( Instruction*, Vector<SlowCaseEntry>::iterator&)441 void JIT::emitSlow_op_mod(const Instruction*, Vector<SlowCaseEntry>::iterator&) 504 442 { 505 443 UNREACHABLE_FOR_PLATFORM(); … … 512 450 #endif // USE(JSVALUE64) 513 451 514 void JIT::emit_op_negate( Instruction* currentInstruction)515 { 516 ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);452 void JIT::emit_op_negate(const Instruction* currentInstruction) 453 { 454 ArithProfile* arithProfile = ¤tInstruction->as<OpNegate>().metadata(m_codeBlock).arithProfile; 517 455 JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile, currentInstruction); 518 456 m_instructionToMathIC.add(currentInstruction, negateIC); 519 emitMathICFast (negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);520 } 521 522 void JIT::emitSlow_op_negate( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)457 emitMathICFast<OpNegate>(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate); 458 } 459 460 void JIT::emitSlow_op_negate(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 523 461 { 524 462 linkAllSlowCases(iter); 525 463 526 464 JITNegIC* negIC = bitwise_cast<JITNegIC*>(m_instructionToMathIC.get(currentInstruction)); 527 emitMathICSlow(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize); 528 } 529 530 template<typename SnippetGenerator> 531 void JIT::emitBitBinaryOpFastPath(Instruction* currentInstruction, ProfilingPolicy profilingPolicy) 532 { 533 int result = currentInstruction[1].u.operand; 534 int op1 = currentInstruction[2].u.operand; 535 int op2 = currentInstruction[3].u.operand; 465 emitMathICSlow<OpNegate>(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize); 466 } 467 468 template<typename Op, typename SnippetGenerator> 469 void JIT::emitBitBinaryOpFastPath(const Instruction* currentInstruction, ProfilingPolicy profilingPolicy) 470 { 471 auto bytecode = currentInstruction->as<Op>(); 472 int result = bytecode.dst.offset(); 473 int op1 = bytecode.lhs.offset(); 474 int op2 = bytecode.rhs.offset(); 536 475 537 476 #if USE(JSVALUE64) … … 569 508 gen.endJumpList().link(this); 570 509 if (profilingPolicy == ProfilingPolicy::ShouldEmitProfiling) 571 emitValueProfilingSite ();510 emitValueProfilingSiteIfProfiledOpcode(bytecode); 572 511 emitPutVirtualRegister(result, resultRegs); 573 512 … … 575 514 } 576 515 577 void JIT::emit_op_bitand( Instruction* currentInstruction)578 { 579 emitBitBinaryOpFastPath< JITBitAndGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);580 } 581 582 void JIT::emit_op_bitor( Instruction* currentInstruction)583 { 584 emitBitBinaryOpFastPath< JITBitOrGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling);585 } 586 587 void JIT::emit_op_bitxor( Instruction* currentInstruction)588 { 589 emitBitBinaryOpFastPath< JITBitXorGenerator>(currentInstruction);590 } 591 592 void JIT::emit_op_lshift( Instruction* currentInstruction)593 { 594 emitBitBinaryOpFastPath< JITLeftShiftGenerator>(currentInstruction);595 } 596 597 void JIT::emitRightShiftFastPath( Instruction* currentInstruction, OpcodeID opcodeID)516 void JIT::emit_op_bitand(const Instruction* currentInstruction) 517 { 518 emitBitBinaryOpFastPath<OpBitand, JITBitAndGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling); 519 } 520 521 void JIT::emit_op_bitor(const Instruction* currentInstruction) 522 { 523 emitBitBinaryOpFastPath<OpBitor, JITBitOrGenerator>(currentInstruction, ProfilingPolicy::ShouldEmitProfiling); 524 } 525 526 void JIT::emit_op_bitxor(const Instruction* currentInstruction) 527 { 528 emitBitBinaryOpFastPath<OpBitxor, JITBitXorGenerator>(currentInstruction); 529 } 530 531 void JIT::emit_op_lshift(const Instruction* currentInstruction) 532 { 533 emitBitBinaryOpFastPath<OpLshift, JITLeftShiftGenerator>(currentInstruction); 534 } 535 536 void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, OpcodeID opcodeID) 598 537 { 599 538 ASSERT(opcodeID == op_rshift || opcodeID == op_urshift); 600 601 JITRightShiftGenerator::ShiftType snippetShiftType = opcodeID == op_rshift ? 602 JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift; 603 604 int result = currentInstruction[1].u.operand; 605 int op1 = currentInstruction[2].u.operand; 606 int op2 = currentInstruction[3].u.operand; 539 switch (opcodeID) { 540 case op_rshift: 541 emitRightShiftFastPath<OpRshift>(currentInstruction, JITRightShiftGenerator::SignedShift); 542 break; 543 case op_urshift: 544 emitRightShiftFastPath<OpUrshift>(currentInstruction, JITRightShiftGenerator::UnsignedShift); 545 break; 546 default: 547 ASSERT_NOT_REACHED(); 548 } 549 } 550 551 template<typename Op> 552 void JIT::emitRightShiftFastPath(const Instruction* currentInstruction, JITRightShiftGenerator::ShiftType snippetShiftType) 553 { 554 auto bytecode = currentInstruction->as<Op>(); 555 int result = bytecode.dst.offset(); 556 int op1 = bytecode.lhs.offset(); 557 int op2 = bytecode.rhs.offset(); 607 558 608 559 #if USE(JSVALUE64) … … 647 598 } 648 599 649 void JIT::emit_op_rshift( Instruction* currentInstruction)600 void JIT::emit_op_rshift(const Instruction* currentInstruction) 650 601 { 651 602 emitRightShiftFastPath(currentInstruction, op_rshift); 652 603 } 653 604 654 void JIT::emit_op_urshift( Instruction* currentInstruction)605 void JIT::emit_op_urshift(const Instruction* currentInstruction) 655 606 { 656 607 emitRightShiftFastPath(currentInstruction, op_urshift); 657 608 } 658 609 659 ALWAYS_INLINE static OperandTypes getOperandTypes( Instruction* instruction)660 { 661 return OperandTypes( ArithProfile::fromInt(instruction[4].u.operand).lhsResultType(), ArithProfile::fromInt(instruction[4].u.operand).rhsResultType());662 } 663 664 void JIT::emit_op_add( Instruction* currentInstruction)665 { 666 ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);610 ALWAYS_INLINE static OperandTypes getOperandTypes(const ArithProfile& arithProfile) 611 { 612 return OperandTypes(arithProfile.lhsResultType(), arithProfile.rhsResultType()); 613 } 614 615 void JIT::emit_op_add(const Instruction* currentInstruction) 616 { 617 ArithProfile* arithProfile = ¤tInstruction->as<OpAdd>().metadata(m_codeBlock).arithProfile; 667 618 JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile, currentInstruction); 668 619 m_instructionToMathIC.add(currentInstruction, addIC); 669 emitMathICFast (addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);670 } 671 672 void JIT::emitSlow_op_add( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)620 emitMathICFast<OpAdd>(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd); 621 } 622 623 void JIT::emitSlow_op_add(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 673 624 { 674 625 linkAllSlowCases(iter); 675 626 676 627 JITAddIC* addIC = bitwise_cast<JITAddIC*>(m_instructionToMathIC.get(currentInstruction)); 677 emitMathICSlow(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize); 678 } 679 680 template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction> 681 void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) 682 { 683 int result = currentInstruction[1].u.operand; 684 int operand = currentInstruction[2].u.operand; 628 emitMathICSlow<OpAdd>(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize); 629 } 630 631 template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> 632 void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) 633 { 634 auto bytecode = currentInstruction->as<Op>(); 635 int result = bytecode.dst.offset(); 636 int operand = bytecode.operand.offset(); 685 637 686 638 #if USE(JSVALUE64) … … 727 679 } 728 680 729 template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction> 730 void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) 731 { 732 int result = currentInstruction[1].u.operand; 733 int op1 = currentInstruction[2].u.operand; 734 int op2 = currentInstruction[3].u.operand; 681 template <typename Op, typename Generator, typename ProfiledFunction, typename NonProfiledFunction> 682 void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction) 683 { 684 auto bytecode = currentInstruction->as<Op>(); 685 OperandTypes types = getOperandTypes(copiedArithProfile(bytecode)); 686 int result = bytecode.dst.offset(); 687 int op1 = bytecode.lhs.offset(); 688 int op2 = bytecode.rhs.offset(); 735 689 736 690 #if USE(JSVALUE64) 737 OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));738 691 JSValueRegs leftRegs = JSValueRegs(regT1); 739 692 JSValueRegs rightRegs = JSValueRegs(regT2); … … 742 695 FPRReg scratchFPR = fpRegT2; 743 696 #else 744 OperandTypes types = getOperandTypes(currentInstruction);745 697 JSValueRegs leftRegs = JSValueRegs(regT1, regT0); 746 698 JSValueRegs rightRegs = JSValueRegs(regT3, regT2); … … 800 752 } 801 753 802 template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>803 void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)754 template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> 755 void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction) 804 756 { 805 757 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value; 806 758 mathICGenerationState.slowPathStart = label(); 807 759 808 int result = currentInstruction[1].u.operand; 760 auto bytecode = currentInstruction->as<Op>(); 761 int result = bytecode.dst.offset(); 809 762 810 763 #if USE(JSVALUE64) … … 845 798 } 846 799 847 template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>848 void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)800 template <typename Op, typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction> 801 void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, const Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction) 849 802 { 850 803 MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value; 851 804 mathICGenerationState.slowPathStart = label(); 852 805 853 int result = currentInstruction[1].u.operand; 854 int op1 = currentInstruction[2].u.operand; 855 int op2 = currentInstruction[3].u.operand; 806 auto bytecode = currentInstruction->as<Op>(); 807 OperandTypes types = getOperandTypes(copiedArithProfile(bytecode)); 808 int result = bytecode.dst.offset(); 809 int op1 = bytecode.lhs.offset(); 810 int op2 = bytecode.rhs.offset(); 856 811 857 812 #if USE(JSVALUE64) 858 OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));859 813 JSValueRegs leftRegs = JSValueRegs(regT1); 860 814 JSValueRegs rightRegs = JSValueRegs(regT2); 861 815 JSValueRegs resultRegs = JSValueRegs(regT0); 862 816 #else 863 OperandTypes types = getOperandTypes(currentInstruction);864 817 JSValueRegs leftRegs = JSValueRegs(regT1, regT0); 865 818 JSValueRegs rightRegs = JSValueRegs(regT3, regT2); … … 911 864 } 912 865 913 void JIT::emit_op_div(Instruction* currentInstruction) 914 { 915 int result = currentInstruction[1].u.operand; 916 int op1 = currentInstruction[2].u.operand; 917 int op2 = currentInstruction[3].u.operand; 866 void JIT::emit_op_div(const Instruction* currentInstruction) 867 { 868 auto bytecode = currentInstruction->as<OpDiv>(); 869 auto& metadata = bytecode.metadata(m_codeBlock); 870 int result = bytecode.dst.offset(); 871 int op1 = bytecode.lhs.offset(); 872 int op2 = bytecode.rhs.offset(); 918 873 919 874 #if USE(JSVALUE64) 920 OperandTypes types = getOperandTypes( copiedInstruction(currentInstruction));875 OperandTypes types = getOperandTypes(metadata.arithProfile); 921 876 JSValueRegs leftRegs = JSValueRegs(regT0); 922 877 JSValueRegs rightRegs = JSValueRegs(regT1); … … 924 879 GPRReg scratchGPR = regT2; 925 880 #else 926 OperandTypes types = getOperandTypes( currentInstruction);881 OperandTypes types = getOperandTypes(metadata.arithProfile); 927 882 JSValueRegs leftRegs = JSValueRegs(regT1, regT0); 928 883 JSValueRegs rightRegs = JSValueRegs(regT3, regT2); … … 934 889 ArithProfile* arithProfile = nullptr; 935 890 if (shouldEmitProfiling()) 936 arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);891 arithProfile = ¤tInstruction->as<OpDiv>().metadata(m_codeBlock).arithProfile; 937 892 938 893 SnippetOperand leftOperand(types.first()); … … 977 932 } 978 933 979 void JIT::emit_op_mul( Instruction* currentInstruction)980 { 981 ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);934 void JIT::emit_op_mul(const Instruction* currentInstruction) 935 { 936 ArithProfile* arithProfile = ¤tInstruction->as<OpMul>().metadata(m_codeBlock).arithProfile; 982 937 JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile, currentInstruction); 983 938 m_instructionToMathIC.add(currentInstruction, mulIC); 984 emitMathICFast (mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);985 } 986 987 void JIT::emitSlow_op_mul( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)939 emitMathICFast<OpMul>(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul); 940 } 941 942 void JIT::emitSlow_op_mul(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 988 943 { 989 944 linkAllSlowCases(iter); 990 945 991 946 JITMulIC* mulIC = bitwise_cast<JITMulIC*>(m_instructionToMathIC.get(currentInstruction)); 992 emitMathICSlow (mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize);993 } 994 995 void JIT::emit_op_sub( Instruction* currentInstruction)996 { 997 ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);947 emitMathICSlow<OpMul>(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize); 948 } 949 950 void JIT::emit_op_sub(const Instruction* currentInstruction) 951 { 952 ArithProfile* arithProfile = ¤tInstruction->as<OpSub>().metadata(m_codeBlock).arithProfile; 998 953 JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile, currentInstruction); 999 954 m_instructionToMathIC.add(currentInstruction, subIC); 1000 emitMathICFast (subIC, currentInstruction, operationValueSubProfiled, operationValueSub);1001 } 1002 1003 void JIT::emitSlow_op_sub( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)955 emitMathICFast<OpSub>(subIC, currentInstruction, operationValueSubProfiled, operationValueSub); 956 } 957 958 void JIT::emitSlow_op_sub(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1004 959 { 1005 960 linkAllSlowCases(iter); 1006 961 1007 962 JITSubIC* subIC = bitwise_cast<JITSubIC*>(m_instructionToMathIC.get(currentInstruction)); 1008 emitMathICSlow (subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize);963 emitMathICSlow<OpSub>(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize); 1009 964 } 1010 965 -
trunk/Source/JavaScriptCore/jit/JITCall.cpp
r237486 r237547 48 48 namespace JSC { 49 49 50 void JIT::emitPutCallResult(Instruction* instruction) 51 { 52 int dst = instruction[1].u.operand; 53 emitValueProfilingSite(); 54 emitPutVirtualRegister(dst); 55 } 56 57 void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info) 58 { 59 int thisValue = instruction[3].u.operand; 60 int arguments = instruction[4].u.operand; 61 int firstFreeRegister = instruction[5].u.operand; 62 int firstVarArgOffset = instruction[6].u.operand; 50 template<typename Op> 51 void JIT::emitPutCallResult(const Op& bytecode) 52 { 53 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 54 emitPutVirtualRegister(bytecode.dst.offset()); 55 } 56 57 template<typename Op> 58 std::enable_if_t< 59 Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs 60 && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments 61 , void> 62 JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*) 63 { 64 auto& metadata = bytecode.metadata(m_codeBlock); 65 int argCount = bytecode.argc; 66 int registerOffset = -static_cast<int>(bytecode.argv); 67 68 if (Op::opcodeID == op_call && shouldEmitProfiling()) { 69 emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); 70 Jump done = branchIfNotCell(regT0); 71 load32(Address(regT0, JSCell::structureIDOffset()), regT0); 72 store32(regT0, metadata.arrayProfile.addressOfLastSeenStructureID()); 73 done.link(this); 74 } 75 76 addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); 77 store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); 78 } 79 80 81 template<typename Op> 82 std::enable_if_t< 83 Op::opcodeID == op_call_varargs || Op::opcodeID == op_construct_varargs 84 || Op::opcodeID == op_tail_call_varargs || Op::opcodeID == op_tail_call_forward_arguments 85 , void> 86 JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo* info) 87 { 88 int thisValue = bytecode.thisValue.offset(); 89 int arguments = bytecode.arguments.offset(); 90 int firstFreeRegister = bytecode.firstFree.offset(); 91 int firstVarArgOffset = bytecode.firstVarArg; 63 92 64 93 emitGetVirtualRegister(arguments, regT1); 65 94 Z_JITOperation_EJZZ sizeOperation; 66 if ( opcode== op_tail_call_forward_arguments)95 if (Op::opcodeID == op_tail_call_forward_arguments) 67 96 sizeOperation = operationSizeFrameForForwardArguments; 68 97 else … … 74 103 emitGetVirtualRegister(arguments, regT2); 75 104 F_JITOperation_EFJZZ setupOperation; 76 if ( opcode== op_tail_call_forward_arguments)105 if (Op::opcodeID == op_tail_call_forward_arguments) 77 106 setupOperation = operationSetupForwardArgumentsFrame; 78 107 else … … 95 124 } 96 125 97 void JIT::compileCallEval(Instruction* instruction) 126 template<typename Op> 127 bool JIT::compileCallEval(const Op&) 128 { 129 return false; 130 } 131 132 template<> 133 bool JIT::compileCallEval(const OpCallEval& bytecode) 98 134 { 99 135 addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1); … … 109 145 sampleCodeBlock(m_codeBlock); 110 146 111 emitPutCallResult(instruction); 112 } 113 114 void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) 147 emitPutCallResult(bytecode); 148 149 return true; 150 } 151 152 void JIT::compileCallEvalSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) 115 153 { 116 154 linkAllSlowCases(iter); 117 155 156 auto bytecode = instruction->as<OpCallEval>(); 118 157 CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); 119 158 info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); 120 159 121 int registerOffset = - instruction[4].u.operand;160 int registerOffset = -bytecode.argv; 122 161 123 162 addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); … … 130 169 sampleCodeBlock(m_codeBlock); 131 170 132 emitPutCallResult(instruction); 133 } 134 135 void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) 136 { 137 int callee = instruction[2].u.operand; 171 emitPutCallResult(bytecode); 172 } 173 174 template<typename Op> 175 bool JIT::compileTailCall(const Op&, CallLinkInfo*, unsigned) 176 { 177 return false; 178 } 179 180 template<> 181 bool JIT::compileTailCall(const OpTailCall& bytecode, CallLinkInfo* info, unsigned callLinkInfoIndex) 182 { 183 CallFrameShuffleData shuffleData; 184 shuffleData.numPassedArgs = bytecode.argc; 185 shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; 186 shuffleData.numLocals = 187 bytecode.argv - sizeof(CallerFrameAndPC) / sizeof(Register); 188 shuffleData.args.resize(bytecode.argc); 189 for (unsigned i = 0; i < bytecode.argc; ++i) { 190 shuffleData.args[i] = 191 ValueRecovery::displacedInJSStack( 192 virtualRegisterForArgument(i) - bytecode.argv, 193 DataFormatJS); 194 } 195 shuffleData.callee = 196 ValueRecovery::inGPR(regT0, DataFormatJS); 197 shuffleData.setupCalleeSaveRegisters(m_codeBlock); 198 info->setFrameShuffleData(shuffleData); 199 CallFrameShuffler(*this, shuffleData).prepareForTailCall(); 200 m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); 201 return true; 202 } 203 204 template<typename Op> 205 void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoIndex) 206 { 207 OpcodeID opcodeID = Op::opcodeID; 208 auto bytecode = instruction->as<Op>(); 209 int callee = bytecode.callee.offset(); 138 210 139 211 /* Caller always: … … 149 221 - Caller restores callFrameRegister after return. 150 222 */ 151 COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length);152 COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length);153 COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length);154 COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length);155 COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length);156 COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length);157 158 223 CallLinkInfo* info = nullptr; 159 224 if (opcodeID != op_call_eval) 160 225 info = m_codeBlock->addCallLinkInfo(); 161 if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) 162 compileSetupVarargsFrame(opcodeID, instruction, info); 163 else { 164 int argCount = instruction[3].u.operand; 165 int registerOffset = -instruction[4].u.operand; 166 167 if (opcodeID == op_call && shouldEmitProfiling()) { 168 emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); 169 Jump done = branchIfNotCell(regT0); 170 load32(Address(regT0, JSCell::structureIDOffset()), regT0); 171 store32(regT0, arrayProfileFor<OpCallShape>(instruction)->addressOfLastSeenStructureID()); 172 done.link(this); 173 } 174 175 addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); 176 store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); 177 } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. 178 226 compileSetupFrame(bytecode, info); 227 228 // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. 179 229 uint32_t bytecodeOffset = m_codeBlock->bytecodeOffset(instruction); 180 230 uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits(); … … 184 234 store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); 185 235 186 if (opcodeID == op_call_eval) { 187 compileCallEval(instruction); 236 if (compileCallEval(bytecode)) { 188 237 return; 189 238 } … … 199 248 m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; 200 249 201 if (opcodeID == op_tail_call) { 202 CallFrameShuffleData shuffleData; 203 shuffleData.numPassedArgs = instruction[3].u.operand; 204 shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; 205 shuffleData.numLocals = 206 instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register); 207 shuffleData.args.resize(instruction[3].u.operand); 208 for (int i = 0; i < instruction[3].u.operand; ++i) { 209 shuffleData.args[i] = 210 ValueRecovery::displacedInJSStack( 211 virtualRegisterForArgument(i) - instruction[4].u.operand, 212 DataFormatJS); 213 } 214 shuffleData.callee = 215 ValueRecovery::inGPR(regT0, DataFormatJS); 216 shuffleData.setupCalleeSaveRegisters(m_codeBlock); 217 info->setFrameShuffleData(shuffleData); 218 CallFrameShuffler(*this, shuffleData).prepareForTailCall(); 219 m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); 250 if (compileTailCall(bytecode, info, callLinkInfoIndex)) { 220 251 return; 221 252 } … … 235 266 sampleCodeBlock(m_codeBlock); 236 267 237 emitPutCallResult(instruction); 238 } 239 240 void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) 241 { 242 if (opcodeID == op_call_eval) { 243 compileCallEvalSlowCase(instruction, iter); 244 return; 245 } 268 emitPutCallResult(bytecode); 269 } 270 271 template<typename Op> 272 void JIT::compileOpCallSlowCase(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) 273 { 274 OpcodeID opcodeID = Op::opcodeID; 275 ASSERT(opcodeID != op_call_eval); 246 276 247 277 linkAllSlowCases(iter); … … 265 295 sampleCodeBlock(m_codeBlock); 266 296 267 emitPutCallResult(instruction); 268 } 269 270 void JIT::emit_op_call(Instruction* currentInstruction) 271 { 272 compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); 273 } 274 275 void JIT::emit_op_tail_call(Instruction* currentInstruction) 276 { 277 compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++); 278 } 279 280 void JIT::emit_op_call_eval(Instruction* currentInstruction) 281 { 282 compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex); 283 } 284 285 void JIT::emit_op_call_varargs(Instruction* currentInstruction) 286 { 287 compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++); 288 } 289 290 void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction) 291 { 292 compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++); 293 } 294 295 void JIT::emit_op_tail_call_forward_arguments(Instruction* currentInstruction) 296 { 297 compileOpCall(op_tail_call_forward_arguments, currentInstruction, m_callLinkInfoIndex++); 298 } 299 300 void JIT::emit_op_construct_varargs(Instruction* currentInstruction) 301 { 302 compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++); 303 } 304 305 void JIT::emit_op_construct(Instruction* currentInstruction) 306 { 307 compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); 308 } 309 310 void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 311 { 312 compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++); 313 } 314 315 void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 316 { 317 compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++); 318 } 319 320 void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 321 { 322 compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex); 297 auto bytecode = instruction->as<Op>(); 298 emitPutCallResult(bytecode); 299 } 300 301 void JIT::emit_op_call(const Instruction* currentInstruction) 302 { 303 compileOpCall<OpCall>(currentInstruction, m_callLinkInfoIndex++); 304 } 305 306 void JIT::emit_op_tail_call(const Instruction* currentInstruction) 307 { 308 compileOpCall<OpTailCall>(currentInstruction, m_callLinkInfoIndex++); 309 } 310 311 void JIT::emit_op_call_eval(const Instruction* currentInstruction) 312 { 313 compileOpCall<OpCallEval>(currentInstruction, m_callLinkInfoIndex); 314 } 315 316 void JIT::emit_op_call_varargs(const Instruction* currentInstruction) 317 { 318 compileOpCall<OpCallVarargs>(currentInstruction, m_callLinkInfoIndex++); 319 } 320 321 void JIT::emit_op_tail_call_varargs(const Instruction* currentInstruction) 322 { 323 compileOpCall<OpTailCallVarargs>(currentInstruction, m_callLinkInfoIndex++); 324 } 325 326 void JIT::emit_op_tail_call_forward_arguments(const Instruction* currentInstruction) 327 { 328 compileOpCall<OpTailCallForwardArguments>(currentInstruction, m_callLinkInfoIndex++); 329 } 330 331 void JIT::emit_op_construct_varargs(const Instruction* currentInstruction) 332 { 333 compileOpCall<OpConstructVarargs>(currentInstruction, m_callLinkInfoIndex++); 334 } 335 336 void JIT::emit_op_construct(const Instruction* currentInstruction) 337 { 338 compileOpCall<OpConstruct>(currentInstruction, m_callLinkInfoIndex++); 339 } 340 341 void JIT::emitSlow_op_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 342 { 343 compileOpCallSlowCase<OpCall>(currentInstruction, iter, m_callLinkInfoIndex++); 344 } 345 346 void JIT::emitSlow_op_tail_call(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 347 { 348 compileOpCallSlowCase<OpTailCall>(currentInstruction, iter, m_callLinkInfoIndex++); 349 } 350 351 void JIT::emitSlow_op_call_eval(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 352 { 353 compileCallEvalSlowCase(currentInstruction, iter); 323 354 } 324 355 325 void JIT::emitSlow_op_call_varargs( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)326 { 327 compileOpCallSlowCase (op_call_varargs,currentInstruction, iter, m_callLinkInfoIndex++);328 } 329 330 void JIT::emitSlow_op_tail_call_varargs( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)331 { 332 compileOpCallSlowCase (op_tail_call_varargs,currentInstruction, iter, m_callLinkInfoIndex++);333 } 334 335 void JIT::emitSlow_op_tail_call_forward_arguments( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)336 { 337 compileOpCallSlowCase (op_tail_call_forward_arguments,currentInstruction, iter, m_callLinkInfoIndex++);338 } 339 340 void JIT::emitSlow_op_construct_varargs( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)341 { 342 compileOpCallSlowCase (op_construct_varargs,currentInstruction, iter, m_callLinkInfoIndex++);343 } 344 345 void JIT::emitSlow_op_construct( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)346 { 347 compileOpCallSlowCase (op_construct,currentInstruction, iter, m_callLinkInfoIndex++);356 void JIT::emitSlow_op_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 357 { 358 compileOpCallSlowCase<OpCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++); 359 } 360 361 void JIT::emitSlow_op_tail_call_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 362 { 363 compileOpCallSlowCase<OpTailCallVarargs>(currentInstruction, iter, m_callLinkInfoIndex++); 364 } 365 366 void JIT::emitSlow_op_tail_call_forward_arguments(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 367 { 368 compileOpCallSlowCase<OpTailCallForwardArguments>(currentInstruction, iter, m_callLinkInfoIndex++); 369 } 370 371 void JIT::emitSlow_op_construct_varargs(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 372 { 373 compileOpCallSlowCase<OpConstructVarargs>(currentInstruction, iter, m_callLinkInfoIndex++); 374 } 375 376 void JIT::emitSlow_op_construct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 377 { 378 compileOpCallSlowCase<OpConstruct>(currentInstruction, iter, m_callLinkInfoIndex++); 348 379 } 349 380 -
trunk/Source/JavaScriptCore/jit/JITDisassembler.cpp
r237486 r237547 42 42 JITDisassembler::JITDisassembler(CodeBlock *codeBlock) 43 43 : m_codeBlock(codeBlock) 44 , m_labelForBytecodeIndexInMainPath(codeBlock->instruction Count())45 , m_labelForBytecodeIndexInSlowPath(codeBlock->instruction Count())44 , m_labelForBytecodeIndexInMainPath(codeBlock->instructions().size()) 45 , m_labelForBytecodeIndexInSlowPath(codeBlock->instructions().size()) 46 46 { 47 47 } -
trunk/Source/JavaScriptCore/jit/JITExceptions.cpp
r237486 r237547 61 61 62 62 void* catchRoutine; 63 Instruction* catchPCForInterpreter = 0;63 const Instruction* catchPCForInterpreter = nullptr; 64 64 if (handler) { 65 65 // handler->target is meaningless for getting a code offset when catching … … 70 70 // in the proper frame. 71 71 if (!JITCode::isOptimizingJIT(callFrame->codeBlock()->jitType())) 72 catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];72 catchPCForInterpreter = callFrame->codeBlock()->instructions().at(handler->target).ptr(); 73 73 #if ENABLE(JIT) 74 74 catchRoutine = handler->nativeCode.executableAddress(); 75 75 #else 76 catchRoutine = catchPCForInterpreter->u.pointer;76 catchRoutine = LLInt::getCodePtr(catchPCForInterpreter->opcodeID()); 77 77 #endif 78 78 } else -
trunk/Source/JavaScriptCore/jit/JITInlines.h
r237486 r237547 32 32 namespace JSC { 33 33 34 inline MacroAssembler::JumpList JIT::emitDoubleGetByVal( Instruction* instruction, PatchableJump& badType)34 inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(const Instruction* instruction, PatchableJump& badType) 35 35 { 36 36 #if USE(JSVALUE64) … … 44 44 } 45 45 46 ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode( Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)46 ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(const Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType) 47 47 { 48 48 switch (arrayMode) { … … 62 62 } 63 63 64 inline MacroAssembler::JumpList JIT::emitContiguousGetByVal( Instruction* instruction, PatchableJump& badType, IndexingType expectedShape)64 inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(const Instruction* instruction, PatchableJump& badType, IndexingType expectedShape) 65 65 { 66 66 return emitContiguousLoad(instruction, badType, expectedShape); 67 67 } 68 68 69 inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal( Instruction* instruction, PatchableJump& badType)69 inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(const Instruction* instruction, PatchableJump& badType) 70 70 { 71 71 return emitArrayStorageLoad(instruction, badType); … … 182 182 } 183 183 184 ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr<CFunctionPtrTag> function, int dst) 184 template<typename Metadata> 185 ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(Metadata& metadata, const FunctionPtr<CFunctionPtrTag> function, int dst) 185 186 { 186 187 MacroAssembler::Call call = appendCallWithExceptionCheck(function); 187 emitValueProfilingSite( );188 emitValueProfilingSite(metadata); 188 189 #if USE(JSVALUE64) 189 190 emitPutVirtualRegister(dst, returnValueGPR); … … 276 277 #if ENABLE(OPCODE_SAMPLING) 277 278 #if CPU(X86_64) 278 ALWAYS_INLINE void JIT::sampleInstruction( Instruction* instruction, bool inHostFunction)279 ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction) 279 280 { 280 281 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx); … … 282 283 } 283 284 #else 284 ALWAYS_INLINE void JIT::sampleInstruction( Instruction* instruction, bool inHostFunction)285 ALWAYS_INLINE void JIT::sampleInstruction(const Instruction* instruction, bool inHostFunction) 285 286 { 286 287 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot()); … … 329 330 } 330 331 331 inline void JIT::emitValueProfilingSite(unsigned bytecodeOffset) 332 template<typename Op> 333 inline std::enable_if_t<std::is_same<decltype(Op::Metadata::profile), ValueProfile>::value, void> JIT::emitValueProfilingSiteIfProfiledOpcode(Op bytecode) 334 { 335 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 336 } 337 338 inline void JIT::emitValueProfilingSiteIfProfiledOpcode(...) { } 339 340 template<typename Metadata> 341 inline void JIT::emitValueProfilingSite(Metadata& metadata) 332 342 { 333 343 if (!shouldEmitProfiling()) 334 344 return; 335 emitValueProfilingSite(m_codeBlock->valueProfileForBytecodeOffset(bytecodeOffset)); 336 } 337 338 inline void JIT::emitValueProfilingSite() 339 { 340 emitValueProfilingSite(m_bytecodeOffset); 345 emitValueProfilingSite(metadata.profile); 341 346 } 342 347 … … 702 707 } 703 708 704 inline Instruction* JIT::copiedInstruction(Instruction* inst) 705 { 706 return &m_instructions[m_codeBlock->bytecodeOffset(inst)]; 709 ALWAYS_INLINE int JIT::jumpTarget(const Instruction* instruction, int target) 710 { 711 if (target) 712 return target; 713 return m_codeBlock->outOfLineJumpOffset(instruction); 714 } 715 716 ALWAYS_INLINE GetPutInfo JIT::copiedGetPutInfo(OpPutToScope bytecode) 717 { 718 unsigned key = bytecode.metadataID + 1; // HashMap doesn't like 0 as a key 719 auto iterator = m_copiedGetPutInfos.find(key); 720 if (iterator != m_copiedGetPutInfos.end()) 721 return GetPutInfo(iterator->value); 722 GetPutInfo getPutInfo = bytecode.metadata(m_codeBlock).getPutInfo; 723 m_copiedGetPutInfos.add(key, getPutInfo.operand()); 724 return getPutInfo; 725 } 726 727 template<typename BinaryOp> 728 ALWAYS_INLINE ArithProfile JIT::copiedArithProfile(BinaryOp bytecode) 729 { 730 uint64_t key = static_cast<uint64_t>(BinaryOp::opcodeID) << 32 | static_cast<uint64_t>(bytecode.metadataID); 731 auto iterator = m_copiedArithProfiles.find(key); 732 if (iterator != m_copiedArithProfiles.end()) 733 return iterator->value; 734 ArithProfile arithProfile = bytecode.metadata(m_codeBlock).arithProfile; 735 m_copiedArithProfiles.add(key, arithProfile); 736 return arithProfile; 707 737 } 708 738 -
trunk/Source/JavaScriptCore/jit/JITMathIC.h
r237486 r237547 57 57 WTF_MAKE_FAST_ALLOCATED; 58 58 public: 59 JITMathIC(ArithProfile* arithProfile, Instruction* instruction)59 JITMathIC(ArithProfile* arithProfile, const Instruction* instruction) 60 60 : m_arithProfile(arithProfile) 61 61 , m_instruction(instruction) … … 233 233 234 234 ArithProfile* arithProfile() const { return m_arithProfile; } 235 Instruction* instruction() const { return m_instruction; }235 const Instruction* instruction() const { return m_instruction; } 236 236 237 237 #if ENABLE(MATH_IC_STATS) … … 247 247 248 248 ArithProfile* m_arithProfile; 249 Instruction* m_instruction;249 const Instruction* m_instruction; 250 250 MacroAssemblerCodeRef<JITStubRoutinePtrTag> m_code; 251 251 CodeLocationLabel<JSInternalPtrTag> m_inlineStart; … … 264 264 class JITBinaryMathIC : public JITMathIC<GeneratorType, isBinaryProfileEmpty> { 265 265 public: 266 JITBinaryMathIC(ArithProfile* arithProfile, Instruction* instruction)266 JITBinaryMathIC(ArithProfile* arithProfile, const Instruction* instruction) 267 267 : JITMathIC<GeneratorType, isBinaryProfileEmpty>(arithProfile, instruction) 268 268 { … … 282 282 class JITUnaryMathIC : public JITMathIC<GeneratorType, isUnaryProfileEmpty> { 283 283 public: 284 JITUnaryMathIC(ArithProfile* arithProfile, Instruction* instruction)284 JITUnaryMathIC(ArithProfile* arithProfile, const Instruction* instruction) 285 285 : JITMathIC<GeneratorType, isUnaryProfileEmpty>(arithProfile, instruction) 286 286 { -
trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp
r237486 r237547 54 54 #if USE(JSVALUE64) 55 55 56 void JIT::emit_op_mov(Instruction* currentInstruction) 57 { 58 int dst = currentInstruction[1].u.operand; 59 int src = currentInstruction[2].u.operand; 56 void JIT::emit_op_mov(const Instruction* currentInstruction) 57 { 58 auto bytecode = currentInstruction->as<OpMov>(); 59 int dst = bytecode.dst.offset(); 60 int src = bytecode.src.offset(); 60 61 61 62 if (m_codeBlock->isConstantRegisterIndex(src)) { … … 73 74 74 75 75 void JIT::emit_op_end(Instruction* currentInstruction) 76 { 76 void JIT::emit_op_end(const Instruction* currentInstruction) 77 { 78 auto bytecode = currentInstruction->as<OpEnd>(); 77 79 RELEASE_ASSERT(returnValueGPR != callFrameRegister); 78 emitGetVirtualRegister( currentInstruction[1].u.operand, returnValueGPR);80 emitGetVirtualRegister(bytecode.value.offset(), returnValueGPR); 79 81 emitRestoreCalleeSaves(); 80 82 emitFunctionEpilogue(); … … 82 84 } 83 85 84 void JIT::emit_op_jmp(Instruction* currentInstruction) 85 { 86 unsigned target = currentInstruction[1].u.operand; 86 void JIT::emit_op_jmp(const Instruction* currentInstruction) 87 { 88 auto bytecode = currentInstruction->as<OpJmp>(); 89 unsigned target = jumpTarget(currentInstruction, bytecode.target); 87 90 addJump(jump(), target); 88 91 } 89 92 90 void JIT::emit_op_new_object(Instruction* currentInstruction) 91 { 92 Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); 93 void JIT::emit_op_new_object(const Instruction* currentInstruction) 94 { 95 auto bytecode = currentInstruction->as<OpNewObject>(); 96 auto& metadata = bytecode.metadata(m_codeBlock); 97 Structure* structure = metadata.objectAllocationProfile.structure(); 93 98 size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity()); 94 99 Allocator allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorForNonVirtual(allocationSize, AllocatorForMode::AllocatorIfExists); … … 106 111 emitInitializeInlineStorage(resultReg, structure->inlineCapacity()); 107 112 addSlowCase(slowCases); 108 emitPutVirtualRegister( currentInstruction[1].u.operand);113 emitPutVirtualRegister(bytecode.dst.offset()); 109 114 } 110 115 } 111 116 112 void JIT::emitSlow_op_new_object( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)117 void JIT::emitSlow_op_new_object(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 113 118 { 114 119 linkAllSlowCases(iter); 115 120 116 int dst = currentInstruction[1].u.operand; 117 Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure(); 121 auto bytecode = currentInstruction->as<OpNewObject>(); 122 auto& metadata = bytecode.metadata(m_codeBlock); 123 int dst = bytecode.dst.offset(); 124 Structure* structure = metadata.objectAllocationProfile.structure(); 118 125 callOperation(operationNewObject, structure); 119 126 emitStoreCell(dst, returnValueGPR); 120 127 } 121 128 122 void JIT::emit_op_overrides_has_instance( Instruction* currentInstruction)123 { 124 auto & bytecode = *reinterpret_cast<OpOverridesHasInstance*>(currentInstruction);125 int dst = bytecode.dst ();126 int constructor = bytecode.constructor ();127 int hasInstanceValue = bytecode.hasInstanceValue ();129 void JIT::emit_op_overrides_has_instance(const Instruction* currentInstruction) 130 { 131 auto bytecode = currentInstruction->as<OpOverridesHasInstance>(); 132 int dst = bytecode.dst.offset(); 133 int constructor = bytecode.constructor.offset(); 134 int hasInstanceValue = bytecode.hasInstanceValue.offset(); 128 135 129 136 emitGetVirtualRegister(hasInstanceValue, regT0); … … 146 153 } 147 154 148 void JIT::emit_op_instanceof( Instruction* currentInstruction)149 { 150 auto & bytecode = *reinterpret_cast<OpInstanceof*>(currentInstruction);151 int dst = bytecode.dst ();152 int value = bytecode.value ();153 int proto = bytecode.prototype ();155 void JIT::emit_op_instanceof(const Instruction* currentInstruction) 156 { 157 auto bytecode = currentInstruction->as<OpInstanceof>(); 158 int dst = bytecode.dst.offset(); 159 int value = bytecode.value.offset(); 160 int proto = bytecode.prototype.offset(); 154 161 155 162 // Load the operands (baseVal, proto, and value respectively) into registers. … … 175 182 } 176 183 177 void JIT::emitSlow_op_instanceof( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)184 void JIT::emitSlow_op_instanceof(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 178 185 { 179 186 linkAllSlowCases(iter); 180 187 181 int resultVReg = currentInstruction[1].u.operand; 188 auto bytecode = currentInstruction->as<OpInstanceof>(); 189 int resultVReg = bytecode.dst.offset(); 182 190 183 191 JITInstanceOfGenerator& gen = m_instanceOfs[m_instanceOfIndex++]; … … 188 196 } 189 197 190 void JIT::emit_op_instanceof_custom( Instruction*)198 void JIT::emit_op_instanceof_custom(const Instruction*) 191 199 { 192 200 // This always goes to slow path since we expect it to be rare. … … 194 202 } 195 203 196 void JIT::emit_op_is_empty(Instruction* currentInstruction) 197 { 198 int dst = currentInstruction[1].u.operand; 199 int value = currentInstruction[2].u.operand; 204 void JIT::emit_op_is_empty(const Instruction* currentInstruction) 205 { 206 auto bytecode = currentInstruction->as<OpIsEmpty>(); 207 int dst = bytecode.dst.offset(); 208 int value = bytecode.operand.offset(); 200 209 201 210 emitGetVirtualRegister(value, regT0); … … 206 215 } 207 216 208 void JIT::emit_op_is_undefined(Instruction* currentInstruction) 209 { 210 int dst = currentInstruction[1].u.operand; 211 int value = currentInstruction[2].u.operand; 217 void JIT::emit_op_is_undefined(const Instruction* currentInstruction) 218 { 219 auto bytecode = currentInstruction->as<OpIsUndefined>(); 220 int dst = bytecode.dst.offset(); 221 int value = bytecode.operand.offset(); 212 222 213 223 emitGetVirtualRegister(value, regT0); … … 234 244 } 235 245 236 void JIT::emit_op_is_boolean(Instruction* currentInstruction) 237 { 238 int dst = currentInstruction[1].u.operand; 239 int value = currentInstruction[2].u.operand; 246 void JIT::emit_op_is_boolean(const Instruction* currentInstruction) 247 { 248 auto bytecode = currentInstruction->as<OpIsBoolean>(); 249 int dst = bytecode.dst.offset(); 250 int value = bytecode.operand.offset(); 240 251 241 252 emitGetVirtualRegister(value, regT0); … … 246 257 } 247 258 248 void JIT::emit_op_is_number(Instruction* currentInstruction) 249 { 250 int dst = currentInstruction[1].u.operand; 251 int value = currentInstruction[2].u.operand; 259 void JIT::emit_op_is_number(const Instruction* currentInstruction) 260 { 261 auto bytecode = currentInstruction->as<OpIsNumber>(); 262 int dst = bytecode.dst.offset(); 263 int value = bytecode.operand.offset(); 252 264 253 265 emitGetVirtualRegister(value, regT0); … … 257 269 } 258 270 259 void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction) 260 { 261 int dst = currentInstruction[1].u.operand; 262 int value = currentInstruction[2].u.operand; 263 int type = currentInstruction[3].u.operand; 271 void JIT::emit_op_is_cell_with_type(const Instruction* currentInstruction) 272 { 273 auto bytecode = currentInstruction->as<OpIsCellWithType>(); 274 int dst = bytecode.dst.offset(); 275 int value = bytecode.operand.offset(); 276 int type = bytecode.type; 264 277 265 278 emitGetVirtualRegister(value, regT0); … … 277 290 } 278 291 279 void JIT::emit_op_is_object(Instruction* currentInstruction) 280 { 281 int dst = currentInstruction[1].u.operand; 282 int value = currentInstruction[2].u.operand; 292 void JIT::emit_op_is_object(const Instruction* currentInstruction) 293 { 294 auto bytecode = currentInstruction->as<OpIsObject>(); 295 int dst = bytecode.dst.offset(); 296 int value = bytecode.operand.offset(); 283 297 284 298 emitGetVirtualRegister(value, regT0); … … 296 310 } 297 311 298 void JIT::emit_op_ret( Instruction* currentInstruction)312 void JIT::emit_op_ret(const Instruction* currentInstruction) 299 313 { 300 314 ASSERT(callFrameRegister != regT1); … … 303 317 304 318 // Return the result in %eax. 305 emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); 319 auto bytecode = currentInstruction->as<OpRet>(); 320 emitGetVirtualRegister(bytecode.value.offset(), returnValueGPR); 306 321 307 322 checkStackPointerAlignment(); … … 311 326 } 312 327 313 void JIT::emit_op_to_primitive(Instruction* currentInstruction) 314 { 315 int dst = currentInstruction[1].u.operand; 316 int src = currentInstruction[2].u.operand; 328 void JIT::emit_op_to_primitive(const Instruction* currentInstruction) 329 { 330 auto bytecode = currentInstruction->as<OpToPrimitive>(); 331 int dst = bytecode.dst.offset(); 332 int src = bytecode.src.offset(); 317 333 318 334 emitGetVirtualRegister(src, regT0); … … 327 343 } 328 344 329 void JIT::emit_op_set_function_name(Instruction* currentInstruction) 330 { 331 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); 332 emitGetVirtualRegister(currentInstruction[2].u.operand, regT1); 345 void JIT::emit_op_set_function_name(const Instruction* currentInstruction) 346 { 347 auto bytecode = currentInstruction->as<OpSetFunctionName>(); 348 emitGetVirtualRegister(bytecode.function.offset(), regT0); 349 emitGetVirtualRegister(bytecode.name.offset(), regT1); 333 350 callOperation(operationSetFunctionName, regT0, regT1); 334 351 } 335 352 336 void JIT::emit_op_not(Instruction* currentInstruction) 337 { 338 emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); 353 void JIT::emit_op_not(const Instruction* currentInstruction) 354 { 355 auto bytecode = currentInstruction->as<OpNot>(); 356 emitGetVirtualRegister(bytecode.operand.offset(), regT0); 339 357 340 358 // Invert against JSValue(false); if the value was tagged as a boolean, then all bits will be … … 345 363 xor64(TrustedImm32(static_cast<int32_t>(ValueTrue)), regT0); 346 364 347 emitPutVirtualRegister(currentInstruction[1].u.operand); 348 } 349 350 void JIT::emit_op_jfalse(Instruction* currentInstruction) 351 { 352 unsigned target = currentInstruction[2].u.operand; 365 emitPutVirtualRegister(bytecode.dst.offset()); 366 } 367 368 void JIT::emit_op_jfalse(const Instruction* currentInstruction) 369 { 370 auto bytecode = currentInstruction->as<OpJfalse>(); 371 unsigned target = jumpTarget(currentInstruction, bytecode.target); 353 372 354 373 GPRReg value = regT0; … … 357 376 bool shouldCheckMasqueradesAsUndefined = true; 358 377 359 emitGetVirtualRegister( currentInstruction[1].u.operand, value);378 emitGetVirtualRegister(bytecode.condition.offset(), value); 360 379 addJump(branchIfFalsey(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); 361 380 } 362 381 363 void JIT::emit_op_jeq_null(Instruction* currentInstruction) 364 { 365 int src = currentInstruction[1].u.operand; 366 unsigned target = currentInstruction[2].u.operand; 382 void JIT::emit_op_jeq_null(const Instruction* currentInstruction) 383 { 384 auto bytecode = currentInstruction->as<OpJeqNull>(); 385 int src = bytecode.value.offset(); 386 unsigned target = jumpTarget(currentInstruction, bytecode.target); 367 387 368 388 emitGetVirtualRegister(src, regT0); … … 384 404 masqueradesGlobalObjectIsForeign.link(this); 385 405 }; 386 void JIT::emit_op_jneq_null(Instruction* currentInstruction) 387 { 388 int src = currentInstruction[1].u.operand; 389 unsigned target = currentInstruction[2].u.operand; 406 void JIT::emit_op_jneq_null(const Instruction* currentInstruction) 407 { 408 auto bytecode = currentInstruction->as<OpJneqNull>(); 409 int src = bytecode.value.offset(); 410 unsigned target = jumpTarget(currentInstruction, bytecode.target); 390 411 391 412 emitGetVirtualRegister(src, regT0); … … 407 428 } 408 429 409 void JIT::emit_op_jneq_ptr(Instruction* currentInstruction) 410 { 411 int src = currentInstruction[1].u.operand; 412 Special::Pointer ptr = currentInstruction[2].u.specialPointer; 413 unsigned target = currentInstruction[3].u.operand; 430 void JIT::emit_op_jneq_ptr(const Instruction* currentInstruction) 431 { 432 auto bytecode = currentInstruction->as<OpJneqPtr>(); 433 auto& metadata = bytecode.metadata(m_codeBlock); 434 int src = bytecode.value.offset(); 435 Special::Pointer ptr = bytecode.specialPointer; 436 unsigned target = jumpTarget(currentInstruction, bytecode.target); 414 437 415 438 emitGetVirtualRegister(src, regT0); 416 439 CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))); 417 store 32(TrustedImm32(1), ¤tInstruction[4].u.operand);440 store8(TrustedImm32(1), &metadata.hasJumped); 418 441 addJump(jump(), target); 419 442 equal.link(this); 420 443 } 421 444 422 void JIT::emit_op_eq(Instruction* currentInstruction) 423 { 424 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); 445 void JIT::emit_op_eq(const Instruction* currentInstruction) 446 { 447 auto bytecode = currentInstruction->as<OpEq>(); 448 emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); 425 449 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); 426 450 compare32(Equal, regT1, regT0, regT0); 427 451 boxBoolean(regT0, JSValueRegs { regT0 }); 428 emitPutVirtualRegister(currentInstruction[1].u.operand); 429 } 430 431 void JIT::emit_op_jeq(Instruction* currentInstruction) 432 { 433 unsigned target = currentInstruction[3].u.operand; 434 emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1); 452 emitPutVirtualRegister(bytecode.dst.offset()); 453 } 454 455 void JIT::emit_op_jeq(const Instruction* currentInstruction) 456 { 457 auto bytecode = currentInstruction->as<OpJeq>(); 458 unsigned target = jumpTarget(currentInstruction, bytecode.target); 459 emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); 435 460 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); 436 461 addJump(branch32(Equal, regT0, regT1), target); 437 462 } 438 463 439 void JIT::emit_op_jtrue(Instruction* currentInstruction) 440 { 441 unsigned target = currentInstruction[2].u.operand; 464 void JIT::emit_op_jtrue(const Instruction* currentInstruction) 465 { 466 auto bytecode = currentInstruction->as<OpJtrue>(); 467 unsigned target = jumpTarget(currentInstruction, bytecode.target); 442 468 443 469 GPRReg value = regT0; … … 445 471 GPRReg scratch2 = regT2; 446 472 bool shouldCheckMasqueradesAsUndefined = true; 447 emitGetVirtualRegister( currentInstruction[1].u.operand, value);473 emitGetVirtualRegister(bytecode.condition.offset(), value); 448 474 addJump(branchIfTruthy(*vm(), JSValueRegs(value), scratch1, scratch2, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject()), target); 449 475 } 450 476 451 void JIT::emit_op_neq(Instruction* currentInstruction) 452 { 453 emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1); 477 void JIT::emit_op_neq(const Instruction* currentInstruction) 478 { 479 auto bytecode = currentInstruction->as<OpNeq>(); 480 emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); 454 481 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); 455 482 compare32(NotEqual, regT1, regT0, regT0); 456 483 boxBoolean(regT0, JSValueRegs { regT0 }); 457 484 458 emitPutVirtualRegister(currentInstruction[1].u.operand); 459 } 460 461 void JIT::emit_op_jneq(Instruction* currentInstruction) 462 { 463 unsigned target = currentInstruction[3].u.operand; 464 emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1); 485 emitPutVirtualRegister(bytecode.dst.offset()); 486 } 487 488 void JIT::emit_op_jneq(const Instruction* currentInstruction) 489 { 490 auto bytecode = currentInstruction->as<OpJneq>(); 491 unsigned target = jumpTarget(currentInstruction, bytecode.target); 492 emitGetVirtualRegisters(bytecode.lhs.offset(), regT0, bytecode.rhs.offset(), regT1); 465 493 emitJumpSlowCaseIfNotInt(regT0, regT1, regT2); 466 494 addJump(branch32(NotEqual, regT0, regT1), target); 467 495 } 468 496 469 void JIT::emit_op_throw(Instruction* currentInstruction) 470 { 497 void JIT::emit_op_throw(const Instruction* currentInstruction) 498 { 499 auto bytecode = currentInstruction->as<OpThrow>(); 471 500 ASSERT(regT0 == returnValueGPR); 472 501 copyCalleeSavesToEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); 473 emitGetVirtualRegister( currentInstruction[1].u.operand, regT0);502 emitGetVirtualRegister(bytecode.value.offset(), regT0); 474 503 callOperationNoExceptionCheck(operationThrow, regT0); 475 504 jumpToExceptionHandler(*vm()); 476 505 } 477 506 478 void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) 479 { 480 int dst = currentInstruction[1].u.operand; 481 int src1 = currentInstruction[2].u.operand; 482 int src2 = currentInstruction[3].u.operand; 507 template<typename Op> 508 void JIT::compileOpStrictEq(const Instruction* currentInstruction, CompileOpStrictEqType type) 509 { 510 auto bytecode = currentInstruction->as<Op>(); 511 int dst = bytecode.dst.offset(); 512 int src1 = bytecode.lhs.offset(); 513 int src2 = bytecode.rhs.offset(); 483 514 484 515 emitGetVirtualRegisters(src1, regT0, src2, regT1); … … 507 538 } 508 539 509 void JIT::emit_op_stricteq(Instruction* currentInstruction) 510 { 511 compileOpStrictEq(currentInstruction, CompileOpStrictEqType::StrictEq); 512 } 513 514 void JIT::emit_op_nstricteq(Instruction* currentInstruction) 515 { 516 compileOpStrictEq(currentInstruction, CompileOpStrictEqType::NStrictEq); 517 } 518 519 void JIT::compileOpStrictEqJump(Instruction* currentInstruction, CompileOpStrictEqType type) 520 { 521 int target = currentInstruction[3].u.operand; 522 int src1 = currentInstruction[1].u.operand; 523 int src2 = currentInstruction[2].u.operand; 540 void JIT::emit_op_stricteq(const Instruction* currentInstruction) 541 { 542 compileOpStrictEq<OpStricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); 543 } 544 545 void JIT::emit_op_nstricteq(const Instruction* currentInstruction) 546 { 547 compileOpStrictEq<OpNstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); 548 } 549 550 template<typename Op> 551 void JIT::compileOpStrictEqJump(const Instruction* currentInstruction, CompileOpStrictEqType type) 552 { 553 auto bytecode = currentInstruction->as<Op>(); 554 int target = jumpTarget(currentInstruction, bytecode.target); 555 int src1 = bytecode.lhs.offset(); 556 int src2 = bytecode.rhs.offset(); 524 557 525 558 emitGetVirtualRegisters(src1, regT0, src2, regT1); … … 545 578 } 546 579 547 void JIT::emit_op_jstricteq( Instruction* currentInstruction)548 { 549 compileOpStrictEqJump (currentInstruction, CompileOpStrictEqType::StrictEq);550 } 551 552 void JIT::emit_op_jnstricteq( Instruction* currentInstruction)553 { 554 compileOpStrictEqJump (currentInstruction, CompileOpStrictEqType::NStrictEq);555 } 556 557 void JIT::emitSlow_op_jstricteq( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)580 void JIT::emit_op_jstricteq(const Instruction* currentInstruction) 581 { 582 compileOpStrictEqJump<OpJstricteq>(currentInstruction, CompileOpStrictEqType::StrictEq); 583 } 584 585 void JIT::emit_op_jnstricteq(const Instruction* currentInstruction) 586 { 587 compileOpStrictEqJump<OpJnstricteq>(currentInstruction, CompileOpStrictEqType::NStrictEq); 588 } 589 590 void JIT::emitSlow_op_jstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 558 591 { 559 592 linkAllSlowCases(iter); 560 593 561 unsigned target = currentInstruction[3].u.operand; 594 auto bytecode = currentInstruction->as<OpJstricteq>(); 595 unsigned target = jumpTarget(currentInstruction, bytecode.target); 562 596 callOperation(operationCompareStrictEq, regT0, regT1); 563 597 emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); 564 598 } 565 599 566 void JIT::emitSlow_op_jnstricteq( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)600 void JIT::emitSlow_op_jnstricteq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 567 601 { 568 602 linkAllSlowCases(iter); 569 603 570 unsigned target = currentInstruction[3].u.operand; 604 auto bytecode = currentInstruction->as<OpJnstricteq>(); 605 unsigned target = jumpTarget(currentInstruction, bytecode.target); 571 606 callOperation(operationCompareStrictEq, regT0, regT1); 572 607 emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); 573 608 } 574 609 575 void JIT::emit_op_to_number(Instruction* currentInstruction) 576 { 577 int dstVReg = currentInstruction[1].u.operand; 578 int srcVReg = currentInstruction[2].u.operand; 610 void JIT::emit_op_to_number(const Instruction* currentInstruction) 611 { 612 auto bytecode = currentInstruction->as<OpToNumber>(); 613 int dstVReg = bytecode.dst.offset(); 614 int srcVReg = bytecode.operand.offset(); 579 615 emitGetVirtualRegister(srcVReg, regT0); 580 616 581 617 addSlowCase(branchIfNotNumber(regT0)); 582 618 583 emitValueProfilingSite( );619 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 584 620 if (srcVReg != dstVReg) 585 621 emitPutVirtualRegister(dstVReg); 586 622 } 587 623 588 void JIT::emit_op_to_string(Instruction* currentInstruction) 589 { 590 int srcVReg = currentInstruction[2].u.operand; 624 void JIT::emit_op_to_string(const Instruction* currentInstruction) 625 { 626 auto bytecode = currentInstruction->as<OpToString>(); 627 int srcVReg = bytecode.operand.offset(); 591 628 emitGetVirtualRegister(srcVReg, regT0); 592 629 … … 594 631 addSlowCase(branchIfNotString(regT0)); 595 632 596 emitPutVirtualRegister(currentInstruction[1].u.operand); 597 } 598 599 void JIT::emit_op_to_object(Instruction* currentInstruction) 600 { 601 int dstVReg = currentInstruction[1].u.operand; 602 int srcVReg = currentInstruction[2].u.operand; 633 emitPutVirtualRegister(bytecode.dst.offset()); 634 } 635 636 void JIT::emit_op_to_object(const Instruction* currentInstruction) 637 { 638 auto bytecode = currentInstruction->as<OpToObject>(); 639 int dstVReg = bytecode.dst.offset(); 640 int srcVReg = bytecode.operand.offset(); 603 641 emitGetVirtualRegister(srcVReg, regT0); 604 642 … … 606 644 addSlowCase(branchIfNotObject(regT0)); 607 645 608 emitValueProfilingSite( );646 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 609 647 if (srcVReg != dstVReg) 610 648 emitPutVirtualRegister(dstVReg); 611 649 } 612 650 613 void JIT::emit_op_catch(Instruction* currentInstruction) 614 { 651 void JIT::emit_op_catch(const Instruction* currentInstruction) 652 { 653 auto bytecode = currentInstruction->as<OpCatch>(); 654 auto& metadata = bytecode.metadata(m_codeBlock); 655 615 656 restoreCalleeSavesFromEntryFrameCalleeSavesBuffer(vm()->topEntryFrame); 616 657 … … 629 670 load64(Address(regT3, VM::exceptionOffset()), regT0); 630 671 store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset())); 631 emitPutVirtualRegister( currentInstruction[1].u.operand);672 emitPutVirtualRegister(bytecode.exception.offset()); 632 673 633 674 load64(Address(regT0, Exception::valueOffset()), regT0); 634 emitPutVirtualRegister( currentInstruction[2].u.operand);675 emitPutVirtualRegister(bytecode.thrownValue.offset()); 635 676 636 677 #if ENABLE(DFG_JIT) … … 639 680 // https://bugs.webkit.org/show_bug.cgi?id=175598 640 681 641 ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(currentInstruction[3].u.pointer);682 ValueProfileAndOperandBuffer* buffer = metadata.buffer; 642 683 if (buffer || !shouldEmitProfiling()) 643 684 callOperation(operationTryOSREnterAtCatch, m_bytecodeOffset); … … 658 699 } 659 700 660 void JIT::emit_op_identity_with_profile( Instruction*)701 void JIT::emit_op_identity_with_profile(const Instruction*) 661 702 { 662 703 // We don't need to do anything here... 663 704 } 664 705 665 void JIT::emit_op_get_parent_scope(Instruction* currentInstruction) 666 { 667 int currentScope = currentInstruction[2].u.operand; 706 void JIT::emit_op_get_parent_scope(const Instruction* currentInstruction) 707 { 708 auto bytecode = currentInstruction->as<OpGetParentScope>(); 709 int currentScope = bytecode.scope.offset(); 668 710 emitGetVirtualRegister(currentScope, regT0); 669 711 loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); 670 emitStoreCell(currentInstruction[1].u.operand, regT0); 671 } 672 673 void JIT::emit_op_switch_imm(Instruction* currentInstruction) 674 { 675 size_t tableIndex = currentInstruction[1].u.operand; 676 unsigned defaultOffset = currentInstruction[2].u.operand; 677 unsigned scrutinee = currentInstruction[3].u.operand; 712 emitStoreCell(bytecode.dst.offset(), regT0); 713 } 714 715 void JIT::emit_op_switch_imm(const Instruction* currentInstruction) 716 { 717 auto bytecode = currentInstruction->as<OpSwitchImm>(); 718 size_t tableIndex = bytecode.tableIndex; 719 unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.defaultOffset); 720 unsigned scrutinee = bytecode.scrutinee.offset(); 678 721 679 722 // create jump table for switch destinations, track this switch statement. … … 687 730 } 688 731 689 void JIT::emit_op_switch_char(Instruction* currentInstruction) 690 { 691 size_t tableIndex = currentInstruction[1].u.operand; 692 unsigned defaultOffset = currentInstruction[2].u.operand; 693 unsigned scrutinee = currentInstruction[3].u.operand; 732 void JIT::emit_op_switch_char(const Instruction* currentInstruction) 733 { 734 auto bytecode = currentInstruction->as<OpSwitchChar>(); 735 size_t tableIndex = bytecode.tableIndex; 736 unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.defaultOffset); 737 unsigned scrutinee = bytecode.scrutinee.offset(); 694 738 695 739 // create jump table for switch destinations, track this switch statement. … … 703 747 } 704 748 705 void JIT::emit_op_switch_string(Instruction* currentInstruction) 706 { 707 size_t tableIndex = currentInstruction[1].u.operand; 708 unsigned defaultOffset = currentInstruction[2].u.operand; 709 unsigned scrutinee = currentInstruction[3].u.operand; 749 void JIT::emit_op_switch_string(const Instruction* currentInstruction) 750 { 751 auto bytecode = currentInstruction->as<OpSwitchString>(); 752 size_t tableIndex = bytecode.tableIndex; 753 unsigned defaultOffset = jumpTarget(currentInstruction, bytecode.defaultOffset); 754 unsigned scrutinee = bytecode.scrutinee.offset(); 710 755 711 756 // create jump table for switch destinations, track this switch statement. … … 718 763 } 719 764 720 void JIT::emit_op_debug(Instruction* currentInstruction) 721 { 765 void JIT::emit_op_debug(const Instruction* currentInstruction) 766 { 767 auto bytecode = currentInstruction->as<OpDebug>(); 722 768 load32(codeBlock()->debuggerRequestsAddress(), regT0); 723 769 Jump noDebuggerRequests = branchTest32(Zero, regT0); 724 callOperation(operationDebug, currentInstruction[1].u.operand);770 callOperation(operationDebug, static_cast<int>(bytecode.debugHookType)); 725 771 noDebuggerRequests.link(this); 726 772 } 727 773 728 void JIT::emit_op_eq_null(Instruction* currentInstruction) 729 { 730 int dst = currentInstruction[1].u.operand; 731 int src1 = currentInstruction[2].u.operand; 774 void JIT::emit_op_eq_null(const Instruction* currentInstruction) 775 { 776 auto bytecode = currentInstruction->as<OpEqNull>(); 777 int dst = bytecode.dst.offset(); 778 int src1 = bytecode.operand.offset(); 732 779 733 780 emitGetVirtualRegister(src1, regT0); … … 758 805 } 759 806 760 void JIT::emit_op_neq_null(Instruction* currentInstruction) 761 { 762 int dst = currentInstruction[1].u.operand; 763 int src1 = currentInstruction[2].u.operand; 807 void JIT::emit_op_neq_null(const Instruction* currentInstruction) 808 { 809 auto bytecode = currentInstruction->as<OpNeqNull>(); 810 int dst = bytecode.dst.offset(); 811 int src1 = bytecode.operand.offset(); 764 812 765 813 emitGetVirtualRegister(src1, regT0); … … 789 837 } 790 838 791 void JIT::emit_op_enter( Instruction*)839 void JIT::emit_op_enter(const Instruction*) 792 840 { 793 841 // Even though CTI doesn't use them, we initialize our constant … … 803 851 } 804 852 805 void JIT::emit_op_get_scope(Instruction* currentInstruction) 806 { 807 int dst = currentInstruction[1].u.operand; 853 void JIT::emit_op_get_scope(const Instruction* currentInstruction) 854 { 855 auto bytecode = currentInstruction->as<OpGetScope>(); 856 int dst = bytecode.dst.offset(); 808 857 emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0); 809 858 loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); … … 811 860 } 812 861 813 void JIT::emit_op_to_this(Instruction* currentInstruction) 814 { 815 WriteBarrierBase<Structure>* cachedStructure = ¤tInstruction[2].u.structure; 816 emitGetVirtualRegister(currentInstruction[1].u.operand, regT1); 862 void JIT::emit_op_to_this(const Instruction* currentInstruction) 863 { 864 auto bytecode = currentInstruction->as<OpToThis>(); 865 auto& metadata = bytecode.metadata(m_codeBlock); 866 WriteBarrierBase<Structure>* cachedStructure = &metadata.cachedStructure; 867 emitGetVirtualRegister(bytecode.srcDst.offset(), regT1); 817 868 818 869 emitJumpSlowCaseIfNotJSCell(regT1); … … 825 876 } 826 877 827 void JIT::emit_op_create_this(Instruction* currentInstruction) 828 { 829 int callee = currentInstruction[2].u.operand; 830 WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[4].u.jsCell; 878 void JIT::emit_op_create_this(const Instruction* currentInstruction) 879 { 880 auto bytecode = currentInstruction->as<OpCreateThis>(); 881 auto& metadata = bytecode.metadata(m_codeBlock); 882 int callee = bytecode.callee.offset(); 883 WriteBarrierBase<JSCell>* cachedFunction = &metadata.cachedCallee; 831 884 RegisterID calleeReg = regT0; 832 885 RegisterID rareDataReg = regT4; … … 859 912 emitInitializeInlineStorage(resultReg, scratchReg); 860 913 addSlowCase(slowCases); 861 emitPutVirtualRegister(currentInstruction[1].u.operand); 862 } 863 864 void JIT::emit_op_check_tdz(Instruction* currentInstruction) 865 { 866 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); 914 emitPutVirtualRegister(bytecode.dst.offset()); 915 } 916 917 void JIT::emit_op_check_tdz(const Instruction* currentInstruction) 918 { 919 auto bytecode = currentInstruction->as<OpCheckTdz>(); 920 emitGetVirtualRegister(bytecode.target.offset(), regT0); 867 921 addSlowCase(branchIfEmpty(regT0)); 868 922 } … … 871 925 // Slow cases 872 926 873 void JIT::emitSlow_op_eq( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)927 void JIT::emitSlow_op_eq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 874 928 { 875 929 linkAllSlowCases(iter); 876 930 931 auto bytecode = currentInstruction->as<OpEq>(); 877 932 callOperation(operationCompareEq, regT0, regT1); 878 933 boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR }); 879 emitPutVirtualRegister( currentInstruction[1].u.operand, returnValueGPR);880 } 881 882 void JIT::emitSlow_op_neq( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)934 emitPutVirtualRegister(bytecode.dst.offset(), returnValueGPR); 935 } 936 937 void JIT::emitSlow_op_neq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 883 938 { 884 939 linkAllSlowCases(iter); 885 940 941 auto bytecode = currentInstruction->as<OpNeq>(); 886 942 callOperation(operationCompareEq, regT0, regT1); 887 943 xor32(TrustedImm32(0x1), regT0); 888 944 boxBoolean(returnValueGPR, JSValueRegs { returnValueGPR }); 889 emitPutVirtualRegister( currentInstruction[1].u.operand, returnValueGPR);890 } 891 892 void JIT::emitSlow_op_jeq( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)945 emitPutVirtualRegister(bytecode.dst.offset(), returnValueGPR); 946 } 947 948 void JIT::emitSlow_op_jeq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 893 949 { 894 950 linkAllSlowCases(iter); 895 951 896 unsigned target = currentInstruction[3].u.operand; 952 auto bytecode = currentInstruction->as<OpJeq>(); 953 unsigned target = jumpTarget(currentInstruction, bytecode.target); 897 954 callOperation(operationCompareEq, regT0, regT1); 898 955 emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target); 899 956 } 900 957 901 void JIT::emitSlow_op_jneq( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)958 void JIT::emitSlow_op_jneq(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 902 959 { 903 960 linkAllSlowCases(iter); 904 961 905 unsigned target = currentInstruction[3].u.operand; 962 auto bytecode = currentInstruction->as<OpJneq>(); 963 unsigned target = jumpTarget(currentInstruction, bytecode.target); 906 964 callOperation(operationCompareEq, regT0, regT1); 907 965 emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); 908 966 } 909 967 910 void JIT::emitSlow_op_instanceof_custom( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)968 void JIT::emitSlow_op_instanceof_custom(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 911 969 { 912 970 linkAllSlowCases(iter); 913 971 914 auto & bytecode = *reinterpret_cast<OpInstanceofCustom*>(currentInstruction);915 int dst = bytecode.dst ();916 int value = bytecode.value ();917 int constructor = bytecode.constructor ();918 int hasInstanceValue = bytecode.hasInstanceValue ();972 auto bytecode = currentInstruction->as<OpInstanceofCustom>(); 973 int dst = bytecode.dst.offset(); 974 int value = bytecode.value.offset(); 975 int constructor = bytecode.constructor.offset(); 976 int hasInstanceValue = bytecode.hasInstanceValue.offset(); 919 977 920 978 emitGetVirtualRegister(value, regT0); … … 928 986 #endif // USE(JSVALUE64) 929 987 930 void JIT::emit_op_loop_hint( Instruction*)988 void JIT::emit_op_loop_hint(const Instruction*) 931 989 { 932 990 // Emit the JIT optimization check: … … 937 995 } 938 996 939 void JIT::emitSlow_op_loop_hint( Instruction*, Vector<SlowCaseEntry>::iterator& iter)997 void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 940 998 { 941 999 #if ENABLE(DFG_JIT) … … 956 1014 noOptimizedEntry.link(this); 957 1015 958 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));1016 emitJumpSlowToHot(jump(), currentInstruction->size()); 959 1017 } 960 1018 #else … … 963 1021 } 964 1022 965 void JIT::emit_op_check_traps( Instruction*)1023 void JIT::emit_op_check_traps(const Instruction*) 966 1024 { 967 1025 addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->needTrapHandlingAddress()))); 968 1026 } 969 1027 970 void JIT::emit_op_nop( Instruction*)971 { 972 } 973 974 void JIT::emit_op_super_sampler_begin( Instruction*)1028 void JIT::emit_op_nop(const Instruction*) 1029 { 1030 } 1031 1032 void JIT::emit_op_super_sampler_begin(const Instruction*) 975 1033 { 976 1034 add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); 977 1035 } 978 1036 979 void JIT::emit_op_super_sampler_end( Instruction*)1037 void JIT::emit_op_super_sampler_end(const Instruction*) 980 1038 { 981 1039 sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<void*>(&g_superSamplerCount))); 982 1040 } 983 1041 984 void JIT::emitSlow_op_check_traps( Instruction*, Vector<SlowCaseEntry>::iterator& iter)1042 void JIT::emitSlow_op_check_traps(const Instruction*, Vector<SlowCaseEntry>::iterator& iter) 985 1043 { 986 1044 linkAllSlowCases(iter); … … 989 1047 } 990 1048 991 void JIT::emit_op_new_regexp(Instruction* currentInstruction) 992 { 993 int dst = currentInstruction[1].u.operand; 994 int regexp = currentInstruction[2].u.operand; 1049 void JIT::emit_op_new_regexp(const Instruction* currentInstruction) 1050 { 1051 auto bytecode = currentInstruction->as<OpNewRegexp>(); 1052 int dst = bytecode.dst.offset(); 1053 int regexp = bytecode.regexp.offset(); 995 1054 callOperation(operationNewRegexp, jsCast<RegExp*>(m_codeBlock->getConstant(regexp))); 996 1055 emitStoreCell(dst, returnValueGPR); 997 1056 } 998 1057 999 void JIT::emitNewFuncCommon(Instruction* currentInstruction) 1058 template<typename Op> 1059 void JIT::emitNewFuncCommon(const Instruction* currentInstruction) 1000 1060 { 1001 1061 Jump lazyJump; 1002 int dst = currentInstruction[1].u.operand; 1062 auto bytecode = currentInstruction->as<Op>(); 1063 int dst = bytecode.dst.offset(); 1003 1064 1004 1065 #if USE(JSVALUE64) 1005 emitGetVirtualRegister( currentInstruction[2].u.operand, regT0);1066 emitGetVirtualRegister(bytecode.scope.offset(), regT0); 1006 1067 #else 1007 emitLoadPayload( currentInstruction[2].u.operand, regT0);1068 emitLoadPayload(bytecode.scope.offset(), regT0); 1008 1069 #endif 1009 FunctionExecutable* funcExec = m_codeBlock->functionDecl( currentInstruction[3].u.operand);1010 1011 OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode);1070 FunctionExecutable* funcExec = m_codeBlock->functionDecl(bytecode.functionDecl); 1071 1072 OpcodeID opcodeID = Op::opcodeID; 1012 1073 if (opcodeID == op_new_func) 1013 1074 callOperation(operationNewFunction, dst, regT0, funcExec); … … 1022 1083 } 1023 1084 1024 void JIT::emit_op_new_func(Instruction* currentInstruction) 1025 { 1026 emitNewFuncCommon(currentInstruction); 1027 } 1028 1029 void JIT::emit_op_new_generator_func(Instruction* currentInstruction) 1030 { 1031 emitNewFuncCommon(currentInstruction); 1032 } 1033 1034 void JIT::emit_op_new_async_generator_func(Instruction* currentInstruction) 1035 { 1036 emitNewFuncCommon(currentInstruction); 1037 } 1038 1039 void JIT::emit_op_new_async_func(Instruction* currentInstruction) 1040 { 1041 emitNewFuncCommon(currentInstruction); 1042 } 1043 1044 void JIT::emitNewFuncExprCommon(Instruction* currentInstruction) 1045 { 1046 int dst = currentInstruction[1].u.operand; 1085 void JIT::emit_op_new_func(const Instruction* currentInstruction) 1086 { 1087 emitNewFuncCommon<OpNewFunc>(currentInstruction); 1088 } 1089 1090 void JIT::emit_op_new_generator_func(const Instruction* currentInstruction) 1091 { 1092 emitNewFuncCommon<OpNewGeneratorFunc>(currentInstruction); 1093 } 1094 1095 void JIT::emit_op_new_async_generator_func(const Instruction* currentInstruction) 1096 { 1097 emitNewFuncCommon<OpNewAsyncGeneratorFunc>(currentInstruction); 1098 } 1099 1100 void JIT::emit_op_new_async_func(const Instruction* currentInstruction) 1101 { 1102 emitNewFuncCommon<OpNewAsyncFunc>(currentInstruction); 1103 } 1104 1105 template<typename Op> 1106 void JIT::emitNewFuncExprCommon(const Instruction* currentInstruction) 1107 { 1108 auto bytecode = currentInstruction->as<Op>(); 1109 int dst = bytecode.dst.offset(); 1047 1110 #if USE(JSVALUE64) 1048 emitGetVirtualRegister( currentInstruction[2].u.operand, regT0);1111 emitGetVirtualRegister(bytecode.scope.offset(), regT0); 1049 1112 #else 1050 emitLoadPayload( currentInstruction[2].u.operand, regT0);1113 emitLoadPayload(bytecode.scope.offset(), regT0); 1051 1114 #endif 1052 1115 1053 FunctionExecutable* function = m_codeBlock->functionExpr( currentInstruction[3].u.operand);1054 OpcodeID opcodeID = Interpreter::getOpcodeID(currentInstruction->u.opcode);1116 FunctionExecutable* function = m_codeBlock->functionExpr(bytecode.functionDecl); 1117 OpcodeID opcodeID = Op::opcodeID; 1055 1118 1056 1119 if (opcodeID == op_new_func_exp) … … 1066 1129 } 1067 1130 1068 void JIT::emit_op_new_func_exp(Instruction* currentInstruction) 1069 { 1070 emitNewFuncExprCommon(currentInstruction); 1071 } 1072 1073 void JIT::emit_op_new_generator_func_exp(Instruction* currentInstruction) 1074 { 1075 emitNewFuncExprCommon(currentInstruction); 1076 } 1077 1078 void JIT::emit_op_new_async_func_exp(Instruction* currentInstruction) 1079 { 1080 emitNewFuncExprCommon(currentInstruction); 1081 } 1082 1083 void JIT::emit_op_new_async_generator_func_exp(Instruction* currentInstruction) 1084 { 1085 emitNewFuncExprCommon(currentInstruction); 1086 } 1087 1088 void JIT::emit_op_new_array(Instruction* currentInstruction) 1089 { 1090 int dst = currentInstruction[1].u.operand; 1091 int valuesIndex = currentInstruction[2].u.operand; 1092 int size = currentInstruction[3].u.operand; 1131 void JIT::emit_op_new_func_exp(const Instruction* currentInstruction) 1132 { 1133 emitNewFuncExprCommon<OpNewFuncExp>(currentInstruction); 1134 } 1135 1136 void JIT::emit_op_new_generator_func_exp(const Instruction* currentInstruction) 1137 { 1138 emitNewFuncExprCommon<OpNewGeneratorFuncExp>(currentInstruction); 1139 } 1140 1141 void JIT::emit_op_new_async_func_exp(const Instruction* currentInstruction) 1142 { 1143 emitNewFuncExprCommon<OpNewAsyncFuncExp>(currentInstruction); 1144 } 1145 1146 void JIT::emit_op_new_async_generator_func_exp(const Instruction* currentInstruction) 1147 { 1148 emitNewFuncExprCommon<OpNewAsyncGeneratorFuncExp>(currentInstruction); 1149 } 1150 1151 void JIT::emit_op_new_array(const Instruction* currentInstruction) 1152 { 1153 auto bytecode = currentInstruction->as<OpNewArray>(); 1154 auto& metadata = bytecode.metadata(m_codeBlock); 1155 int dst = bytecode.dst.offset(); 1156 int valuesIndex = bytecode.argv.offset(); 1157 int size = bytecode.argc; 1093 1158 addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0); 1094 1159 callOperation(operationNewArrayWithProfile, dst, 1095 currentInstruction[4].u.arrayAllocationProfile, regT0, size); 1096 } 1097 1098 void JIT::emit_op_new_array_with_size(Instruction* currentInstruction) 1099 { 1100 int dst = currentInstruction[1].u.operand; 1101 int sizeIndex = currentInstruction[2].u.operand; 1160 &metadata.arrayAllocationProfile, regT0, size); 1161 } 1162 1163 void JIT::emit_op_new_array_with_size(const Instruction* currentInstruction) 1164 { 1165 auto bytecode = currentInstruction->as<OpNewArrayWithSize>(); 1166 auto& metadata = bytecode.metadata(m_codeBlock); 1167 int dst = bytecode.dst.offset(); 1168 int sizeIndex = bytecode.length.offset(); 1102 1169 #if USE(JSVALUE64) 1103 1170 emitGetVirtualRegister(sizeIndex, regT0); 1104 1171 callOperation(operationNewArrayWithSizeAndProfile, dst, 1105 currentInstruction[3].u.arrayAllocationProfile, regT0);1172 &metadata.arrayAllocationProfile, regT0); 1106 1173 #else 1107 1174 emitLoad(sizeIndex, regT1, regT0); 1108 1175 callOperation(operationNewArrayWithSizeAndProfile, dst, 1109 currentInstruction[3].u.arrayAllocationProfile, JSValueRegs(regT1, regT0));1176 &metadata.arrayAllocationProfile, JSValueRegs(regT1, regT0)); 1110 1177 #endif 1111 1178 } 1112 1179 1113 1180 #if USE(JSVALUE64) 1114 void JIT::emit_op_has_structure_property(Instruction* currentInstruction) 1115 { 1116 int dst = currentInstruction[1].u.operand; 1117 int base = currentInstruction[2].u.operand; 1118 int enumerator = currentInstruction[4].u.operand; 1181 void JIT::emit_op_has_structure_property(const Instruction* currentInstruction) 1182 { 1183 auto bytecode = currentInstruction->as<OpHasStructureProperty>(); 1184 int dst = bytecode.dst.offset(); 1185 int base = bytecode.base.offset(); 1186 int enumerator = bytecode.enumerator.offset(); 1119 1187 1120 1188 emitGetVirtualRegister(base, regT0); … … 1131 1199 void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) 1132 1200 { 1133 Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex];1201 const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); 1134 1202 1135 1203 PatchableJump badType; … … 1156 1224 } 1157 1225 1158 void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) 1159 { 1160 int dst = currentInstruction[1].u.operand; 1161 int base = currentInstruction[2].u.operand; 1162 int property = currentInstruction[3].u.operand; 1163 ArrayProfile* profile = arrayProfileFor<OpHasIndexedPropertyShape>(currentInstruction); 1226 void JIT::emit_op_has_indexed_property(const Instruction* currentInstruction) 1227 { 1228 auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); 1229 auto& metadata = bytecode.metadata(m_codeBlock); 1230 int dst = bytecode.dst.offset(); 1231 int base = bytecode.base.offset(); 1232 int property = bytecode.property.offset(); 1233 ArrayProfile* profile = &metadata.arrayProfile; 1164 1234 ByValInfo* byValInfo = m_codeBlock->addByValInfo(); 1165 1235 … … 1199 1269 } 1200 1270 1201 void JIT::emitSlow_op_has_indexed_property( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)1271 void JIT::emitSlow_op_has_indexed_property(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1202 1272 { 1203 1273 linkAllSlowCases(iter); 1204 1274 1205 int dst = currentInstruction[1].u.operand; 1206 int base = currentInstruction[2].u.operand; 1207 int property = currentInstruction[3].u.operand; 1275 auto bytecode = currentInstruction->as<OpHasIndexedProperty>(); 1276 int dst = bytecode.dst.offset(); 1277 int base = bytecode.base.offset(); 1278 int property = bytecode.property.offset(); 1208 1279 ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; 1209 1280 … … 1219 1290 } 1220 1291 1221 void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) 1222 { 1223 int dst = currentInstruction[1].u.operand; 1224 int base = currentInstruction[2].u.operand; 1225 int index = currentInstruction[4].u.operand; 1226 int enumerator = currentInstruction[5].u.operand; 1292 void JIT::emit_op_get_direct_pname(const Instruction* currentInstruction) 1293 { 1294 auto bytecode = currentInstruction->as<OpGetDirectPname>(); 1295 int dst = bytecode.dst.offset(); 1296 int base = bytecode.base.offset(); 1297 int index = bytecode.index.offset(); 1298 int enumerator = bytecode.enumerator.offset(); 1227 1299 1228 1300 // Check that base is a cell … … 1255 1327 1256 1328 done.link(this); 1257 emitValueProfilingSite( );1329 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 1258 1330 emitPutVirtualRegister(dst, regT0); 1259 1331 } 1260 1332 1261 void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) 1262 { 1263 int dst = currentInstruction[1].u.operand; 1264 int enumerator = currentInstruction[2].u.operand; 1265 int index = currentInstruction[3].u.operand; 1333 void JIT::emit_op_enumerator_structure_pname(const Instruction* currentInstruction) 1334 { 1335 auto bytecode = currentInstruction->as<OpEnumeratorStructurePname>(); 1336 int dst = bytecode.dst.offset(); 1337 int enumerator = bytecode.enumerator.offset(); 1338 int index = bytecode.index.offset(); 1266 1339 1267 1340 emitGetVirtualRegister(index, regT0); … … 1282 1355 } 1283 1356 1284 void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) 1285 { 1286 int dst = currentInstruction[1].u.operand; 1287 int enumerator = currentInstruction[2].u.operand; 1288 int index = currentInstruction[3].u.operand; 1357 void JIT::emit_op_enumerator_generic_pname(const Instruction* currentInstruction) 1358 { 1359 auto bytecode = currentInstruction->as<OpEnumeratorGenericPname>(); 1360 int dst = bytecode.dst.offset(); 1361 int enumerator = bytecode.enumerator.offset(); 1362 int index = bytecode.index.offset(); 1289 1363 1290 1364 emitGetVirtualRegister(index, regT0); … … 1305 1379 } 1306 1380 1307 void JIT::emit_op_profile_type(Instruction* currentInstruction) 1308 { 1309 TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; 1310 int valueToProfile = currentInstruction[1].u.operand; 1381 void JIT::emit_op_profile_type(const Instruction* currentInstruction) 1382 { 1383 auto bytecode = currentInstruction->as<OpProfileType>(); 1384 auto& metadata = bytecode.metadata(m_codeBlock); 1385 TypeLocation* cachedTypeLocation = metadata.typeLocation; 1386 int valueToProfile = bytecode.target.offset(); 1311 1387 1312 1388 emitGetVirtualRegister(valueToProfile, regT0); … … 1367 1443 } 1368 1444 1369 void JIT::emit_op_log_shadow_chicken_prologue( Instruction* currentInstruction)1445 void JIT::emit_op_log_shadow_chicken_prologue(const Instruction* currentInstruction) 1370 1446 { 1371 1447 updateTopCallFrame(); 1372 1448 static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true."); 1449 auto bytecode = currentInstruction->as<OpLogShadowChickenPrologue>(); 1373 1450 GPRReg shadowPacketReg = regT0; 1374 1451 GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. 1375 1452 GPRReg scratch2Reg = regT2; 1376 1453 ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg); 1377 emitGetVirtualRegister( currentInstruction[1].u.operand, regT3);1454 emitGetVirtualRegister(bytecode.scope.offset(), regT3); 1378 1455 logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3); 1379 1456 } 1380 1457 1381 void JIT::emit_op_log_shadow_chicken_tail( Instruction* currentInstruction)1458 void JIT::emit_op_log_shadow_chicken_tail(const Instruction* currentInstruction) 1382 1459 { 1383 1460 updateTopCallFrame(); 1384 1461 static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true."); 1462 auto bytecode = currentInstruction->as<OpLogShadowChickenTail>(); 1385 1463 GPRReg shadowPacketReg = regT0; 1386 1464 GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register. 1387 1465 GPRReg scratch2Reg = regT2; 1388 1466 ensureShadowChickenPacket(*vm(), shadowPacketReg, scratch1Reg, scratch2Reg); 1389 emitGetVirtualRegister( currentInstruction[1].u.operand, regT2);1390 emitGetVirtualRegister( currentInstruction[2].u.operand, regT3);1467 emitGetVirtualRegister(bytecode.thisValue.offset(), regT2); 1468 emitGetVirtualRegister(bytecode.scope.offset(), regT3); 1391 1469 logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset)); 1392 1470 } … … 1394 1472 #endif // USE(JSVALUE64) 1395 1473 1396 void JIT::emit_op_profile_control_flow(Instruction* currentInstruction) 1397 { 1398 BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; 1474 void JIT::emit_op_profile_control_flow(const Instruction* currentInstruction) 1475 { 1476 auto bytecode = currentInstruction->as<OpProfileControlFlow>(); 1477 auto& metadata = bytecode.metadata(m_codeBlock); 1478 BasicBlockLocation* basicBlockLocation = metadata.basicBlockLocation; 1399 1479 #if USE(JSVALUE64) 1400 1480 basicBlockLocation->emitExecuteCode(*this); … … 1404 1484 } 1405 1485 1406 void JIT::emit_op_argument_count(Instruction* currentInstruction) 1407 { 1408 int dst = currentInstruction[1].u.operand; 1486 void JIT::emit_op_argument_count(const Instruction* currentInstruction) 1487 { 1488 auto bytecode = currentInstruction->as<OpArgumentCount>(); 1489 int dst = bytecode.dst.offset(); 1409 1490 load32(payloadFor(CallFrameSlot::argumentCount), regT0); 1410 1491 sub32(TrustedImm32(1), regT0); … … 1414 1495 } 1415 1496 1416 void JIT::emit_op_get_rest_length(Instruction* currentInstruction) 1417 { 1418 int dst = currentInstruction[1].u.operand; 1419 unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue; 1497 void JIT::emit_op_get_rest_length(const Instruction* currentInstruction) 1498 { 1499 auto bytecode = currentInstruction->as<OpGetRestLength>(); 1500 int dst = bytecode.dst.offset(); 1501 unsigned numParamsToSkip = bytecode.numParametersToSkip; 1420 1502 load32(payloadFor(CallFrameSlot::argumentCount), regT0); 1421 1503 sub32(TrustedImm32(1), regT0); … … 1443 1525 } 1444 1526 1445 void JIT::emit_op_get_argument(Instruction* currentInstruction) 1446 { 1447 int dst = currentInstruction[1].u.operand; 1448 int index = currentInstruction[2].u.operand; 1527 void JIT::emit_op_get_argument(const Instruction* currentInstruction) 1528 { 1529 auto bytecode = currentInstruction->as<OpGetArgument>(); 1530 int dst = bytecode.dst.offset(); 1531 int index = bytecode.index; 1449 1532 #if USE(JSVALUE64) 1450 1533 JSValueRegs resultRegs(regT0); … … 1462 1545 1463 1546 done.link(this); 1464 emitValueProfilingSite( );1547 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 1465 1548 emitPutVirtualRegister(dst, resultRegs); 1466 1549 } -
trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
r237486 r237547 973 973 void JIT::emit_op_to_this(Instruction* currentInstruction) 974 974 { 975 WriteBarrierBase<Structure>* cachedStructure = ¤tInstruction[2].u.structure; 976 int thisRegister = currentInstruction[1].u.operand; 975 auto bytecode = currentInstruction->as<OpToThis>(); 976 auto& metadata = bytecode.metadata(m_codeBlock); 977 WriteBarrierBase<Structure>* cachedStructure = &metadata.cachedStructure; 978 int thisRegister = bytecode.srcDst.offset(); 977 979 978 980 emitLoad(thisRegister, regT3, regT2); -
trunk/Source/JavaScriptCore/jit/JITOperations.cpp
r237486 r237547 754 754 if (byValInfo->seen) { 755 755 if (byValInfo->cachedId == propertyName) { 756 JIT::compilePutByValWithCachedId (&vm, exec->codeBlock(), byValInfo, returnAddress, NotDirect, propertyName);756 JIT::compilePutByValWithCachedId<OpPutByVal>(&vm, exec->codeBlock(), byValInfo, returnAddress, NotDirect, propertyName); 757 757 optimizationResult = OptimizationResult::Optimized; 758 758 } else { … … 836 836 if (byValInfo->seen) { 837 837 if (byValInfo->cachedId == propertyName) { 838 JIT::compilePutByValWithCachedId (&vm, exec->codeBlock(), byValInfo, returnAddress, Direct, propertyName);838 JIT::compilePutByValWithCachedId<OpPutByValDirect>(&vm, exec->codeBlock(), byValInfo, returnAddress, Direct, propertyName); 839 839 optimizationResult = OptimizationResult::Optimized; 840 840 } else { … … 1658 1658 1659 1659 codeBlock->ensureCatchLivenessIsComputedForBytecodeOffset(bytecodeIndex); 1660 ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(codeBlock->instructions()[bytecodeIndex + 3].u.pointer); 1661 buffer->forEach([&] (ValueProfileAndOperand& profile) { 1660 auto bytecode = codeBlock->instructions().at(bytecodeIndex)->as<OpCatch>(); 1661 auto& metadata = bytecode.metadata(codeBlock); 1662 metadata.buffer->forEach([&] (ValueProfileAndOperand& profile) { 1662 1663 profile.m_profile.m_buckets[0] = JSValue::encode(exec->uncheckedR(profile.m_operand).jsValue()); 1663 1664 }); … … 2281 2282 } 2282 2283 2283 EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* bytecodePC)2284 EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, const Instruction* pc) 2284 2285 { 2285 2286 VM& vm = exec->vm(); … … 2288 2289 2289 2290 CodeBlock* codeBlock = exec->codeBlock(); 2290 Instruction* pc = bytecodePC; 2291 2292 const Identifier& ident = codeBlock->identifier( pc[3].u.operand);2293 JSObject* scope = jsCast<JSObject*>(exec->uncheckedR( pc[2].u.operand).jsValue());2294 GetPutInfo getPutInfo(pc[4].u.operand);2291 2292 auto bytecode = pc->as<OpGetFromScope>(); 2293 const Identifier& ident = codeBlock->identifier(bytecode.var); 2294 JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(bytecode.scope.offset()).jsValue()); 2295 GetPutInfo& getPutInfo = bytecode.metadata(codeBlock).getPutInfo; 2295 2296 2296 2297 // ModuleVar is always converted to ClosureVar for get_from_scope. … … 2314 2315 } 2315 2316 2316 CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident);2317 CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, bytecode, scope, slot, ident); 2317 2318 2318 2319 if (!result) … … 2322 2323 } 2323 2324 2324 void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC)2325 void JIT_OPERATION operationPutToScope(ExecState* exec, const Instruction* pc) 2325 2326 { 2326 2327 VM& vm = exec->vm(); … … 2328 2329 auto throwScope = DECLARE_THROW_SCOPE(vm); 2329 2330 2330 Instruction* pc = bytecodePC;2331 2332 2331 CodeBlock* codeBlock = exec->codeBlock(); 2333 const Identifier& ident = codeBlock->identifier(pc[2].u.operand); 2334 JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[1].u.operand).jsValue()); 2335 JSValue value = exec->r(pc[3].u.operand).jsValue(); 2336 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); 2332 auto bytecode = pc->as<OpPutToScope>(); 2333 auto& metadata = bytecode.metadata(codeBlock); 2334 2335 const Identifier& ident = codeBlock->identifier(bytecode.var); 2336 JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(bytecode.scope.offset()).jsValue()); 2337 JSValue value = exec->r(bytecode.value.offset()).jsValue(); 2338 GetPutInfo& getPutInfo = metadata.getPutInfo; 2337 2339 2338 2340 // ModuleVar does not keep the scope register value alive in DFG. … … 2341 2343 if (getPutInfo.resolveType() == LocalClosureVar) { 2342 2344 JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope); 2343 environment->variableAt(ScopeOffset( pc[6].u.operand)).set(vm, environment, value);2344 if (WatchpointSet* set = pc[5].u.watchpointSet)2345 environment->variableAt(ScopeOffset(metadata.operand)).set(vm, environment, value); 2346 if (WatchpointSet* set = metadata.watchpointSet) 2345 2347 set->touch(vm, "Executed op_put_scope<LocalClosureVar>"); 2346 2348 return; … … 2371 2373 RETURN_IF_EXCEPTION(throwScope, void()); 2372 2374 2373 CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident);2375 CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, bytecode, scope, slot, ident); 2374 2376 } 2375 2377 -
trunk/Source/JavaScriptCore/jit/JITOperations.h
r237486 r237547 463 463 char* JIT_OPERATION operationSwitchImmWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; 464 464 char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; 465 EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;466 void JIT_OPERATION operationPutToScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;465 EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, const Instruction* bytecodePC) WTF_INTERNAL; 466 void JIT_OPERATION operationPutToScope(ExecState*, const Instruction* bytecodePC) WTF_INTERNAL; 467 467 468 468 char* JIT_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState*, JSObject*) WTF_INTERNAL; -
trunk/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
r237486 r237547 52 52 #if USE(JSVALUE64) 53 53 54 void JIT::emit_op_get_by_val(Instruction* currentInstruction) 55 { 56 int dst = currentInstruction[1].u.operand; 57 int base = currentInstruction[2].u.operand; 58 int property = currentInstruction[3].u.operand; 59 ArrayProfile* profile = arrayProfileFor<OpGetByValShape>(currentInstruction); 54 void JIT::emit_op_get_by_val(const Instruction* currentInstruction) 55 { 56 auto bytecode = currentInstruction->as<OpGetByVal>(); 57 auto& metadata = bytecode.metadata(m_codeBlock); 58 int dst = bytecode.dst.offset(); 59 int base = bytecode.base.offset(); 60 int property = bytecode.property.offset(); 61 ArrayProfile* profile = &metadata.arrayProfile; 60 62 ByValInfo* byValInfo = m_codeBlock->addByValInfo(); 61 63 … … 119 121 } 120 122 121 emitValueProfilingSite( );123 emitValueProfilingSite(metadata); 122 124 emitPutVirtualRegister(dst); 123 125 … … 127 129 } 128 130 129 JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)131 JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, OpGetByVal bytecode, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases) 130 132 { 131 133 // base: regT0 … … 133 135 // scratch: regT3 134 136 135 int dst = currentInstruction[1].u.operand;137 int dst = bytecode.dst.offset(); 136 138 137 139 slowCases.append(branchIfNotCell(regT1)); … … 148 150 gen.slowPathJump().link(this); 149 151 150 Call call = callOperationWithProfile( operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl());152 Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl()); 151 153 gen.reportSlowPathCall(coldPathBegin, call); 152 154 slowDoneCase = jump(); … … 155 157 } 156 158 157 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 158 { 159 int dst = currentInstruction[1].u.operand; 160 int base = currentInstruction[2].u.operand; 161 int property = currentInstruction[3].u.operand; 159 void JIT::emitSlow_op_get_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 160 { 161 auto bytecode = currentInstruction->as<OpGetByVal>(); 162 int dst = bytecode.dst.offset(); 163 int base = bytecode.base.offset(); 164 int property = bytecode.property.offset(); 162 165 ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; 163 166 … … 172 175 Jump failed = branchTest64(Zero, regT0); 173 176 emitPutVirtualRegister(dst, regT0); 174 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));177 emitJumpSlowToHot(jump(), currentInstruction->size()); 175 178 failed.link(this); 176 179 notString.link(this); … … 190 193 m_byValInstructionIndex++; 191 194 192 emitValueProfilingSite(); 193 } 194 195 void JIT::emit_op_put_by_val(Instruction* currentInstruction) 196 { 197 int base = currentInstruction[1].u.operand; 198 int property = currentInstruction[2].u.operand; 199 ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction); 195 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 196 } 197 198 void JIT::emit_op_put_by_val_direct(const Instruction* currentInstruction) 199 { 200 emit_op_put_by_val<OpPutByValDirect>(currentInstruction); 201 } 202 203 template<typename Op> 204 void JIT::emit_op_put_by_val(const Instruction* currentInstruction) 205 { 206 auto bytecode = currentInstruction->as<Op>(); 207 auto& metadata = bytecode.metadata(m_codeBlock); 208 int base = bytecode.base.offset(); 209 int property = bytecode.property.offset(); 210 ArrayProfile* profile = &metadata.arrayProfile; 200 211 ByValInfo* byValInfo = m_codeBlock->addByValInfo(); 201 212 … … 227 238 switch (mode) { 228 239 case JITInt32: 229 slowCases = emitInt32PutByVal( currentInstruction, badType);240 slowCases = emitInt32PutByVal(bytecode, badType); 230 241 break; 231 242 case JITDouble: 232 slowCases = emitDoublePutByVal( currentInstruction, badType);243 slowCases = emitDoublePutByVal(bytecode, badType); 233 244 break; 234 245 case JITContiguous: 235 slowCases = emitContiguousPutByVal( currentInstruction, badType);246 slowCases = emitContiguousPutByVal(bytecode, badType); 236 247 break; 237 248 case JITArrayStorage: 238 slowCases = emitArrayStoragePutByVal( currentInstruction, badType);249 slowCases = emitArrayStoragePutByVal(bytecode, badType); 239 250 break; 240 251 default: … … 251 262 } 252 263 253 JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape) 254 { 255 int value = currentInstruction[3].u.operand; 256 ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction); 257 264 template<typename Op> 265 JIT::JumpList JIT::emitGenericContiguousPutByVal(Op bytecode, PatchableJump& badType, IndexingType indexingShape) 266 { 267 auto& metadata = bytecode.metadata(m_codeBlock); 268 int value = bytecode.value.offset(); 269 ArrayProfile* profile = &metadata.arrayProfile; 270 258 271 JumpList slowCases; 259 272 … … 284 297 case ContiguousShape: 285 298 store64(regT3, BaseIndex(regT2, regT1, TimesEight)); 286 emitWriteBarrier( currentInstruction[1].u.operand, value, ShouldFilterValue);299 emitWriteBarrier(bytecode.base.offset(), value, ShouldFilterValue); 287 300 break; 288 301 default: … … 307 320 } 308 321 309 JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType) 310 { 311 int value = currentInstruction[3].u.operand; 312 ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction); 313 322 template<typename Op> 323 JIT::JumpList JIT::emitArrayStoragePutByVal(Op bytecode, PatchableJump& badType) 324 { 325 auto& metadata = bytecode.metadata(m_codeBlock); 326 int value = bytecode.value.offset(); 327 ArrayProfile* profile = &metadata.arrayProfile; 328 314 329 JumpList slowCases; 315 330 … … 323 338 emitGetVirtualRegister(value, regT3); 324 339 store64(regT3, BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset())); 325 emitWriteBarrier( currentInstruction[1].u.operand, value, ShouldFilterValue);340 emitWriteBarrier(bytecode.base.offset(), value, ShouldFilterValue); 326 341 Jump end = jump(); 327 342 … … 341 356 } 342 357 343 JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases) 358 template<typename Op> 359 JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Op bytecode, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases) 344 360 { 345 361 // base: regT0 … … 347 363 // scratch: regT2 348 364 349 int base = currentInstruction[1].u.operand;350 int value = currentInstruction[3].u.operand;365 int base = bytecode.base.offset(); 366 int value = bytecode.value.offset(); 351 367 352 368 slowCases.append(branchIfNotCell(regT1)); … … 374 390 } 375 391 376 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 377 { 378 int base = currentInstruction[1].u.operand; 379 int property = currentInstruction[2].u.operand; 380 int value = currentInstruction[3].u.operand; 392 void JIT::emitSlow_op_put_by_val(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 393 { 394 bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; 395 int base; 396 int property; 397 int value; 398 399 auto load = [&](auto bytecode) { 400 base = bytecode.base.offset(); 401 property = bytecode.property.offset(); 402 value = bytecode.value.offset(); 403 }; 404 405 if (isDirect) 406 load(currentInstruction->as<OpPutByValDirect>()); 407 else 408 load(currentInstruction->as<OpPutByVal>()); 409 381 410 ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; 382 411 … … 387 416 emitGetVirtualRegister(property, regT1); 388 417 emitGetVirtualRegister(value, regT2); 389 bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;390 418 Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo); 391 419 … … 395 423 } 396 424 397 void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction) 398 { 399 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); 400 int32_t options = currentInstruction[3].u.operand; 401 emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); 402 callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1); 403 } 404 405 void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction) 406 { 407 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); 408 int32_t options = currentInstruction[3].u.operand; 409 emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); 410 callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1); 411 } 412 413 void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction) 414 { 415 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); 416 int32_t attribute = currentInstruction[3].u.operand; 417 emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); 418 emitGetVirtualRegister(currentInstruction[5].u.operand, regT2); 419 callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), attribute, regT1, regT2); 420 } 421 422 void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction) 423 { 424 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); 425 emitGetVirtualRegister(currentInstruction[2].u.operand, regT1); 426 int32_t attributes = currentInstruction[3].u.operand; 427 emitGetVirtualRegister(currentInstruction[4].u.operand, regT2); 425 void JIT::emit_op_put_getter_by_id(const Instruction* currentInstruction) 426 { 427 auto bytecode = currentInstruction->as<OpPutGetterById>(); 428 emitGetVirtualRegister(bytecode.base.offset(), regT0); 429 int32_t options = bytecode.attributes; 430 emitGetVirtualRegister(bytecode.accessor.offset(), regT1); 431 callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(bytecode.property).impl(), options, regT1); 432 } 433 434 void JIT::emit_op_put_setter_by_id(const Instruction* currentInstruction) 435 { 436 auto bytecode = currentInstruction->as<OpPutSetterById>(); 437 emitGetVirtualRegister(bytecode.base.offset(), regT0); 438 int32_t options = bytecode.attributes; 439 emitGetVirtualRegister(bytecode.accessor.offset(), regT1); 440 callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(bytecode.property).impl(), options, regT1); 441 } 442 443 void JIT::emit_op_put_getter_setter_by_id(const Instruction* currentInstruction) 444 { 445 auto bytecode = currentInstruction->as<OpPutGetterSetterById>(); 446 emitGetVirtualRegister(bytecode.base.offset(), regT0); 447 int32_t attribute = bytecode.attributes; 448 emitGetVirtualRegister(bytecode.getter.offset(), regT1); 449 emitGetVirtualRegister(bytecode.setter.offset(), regT2); 450 callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(bytecode.property).impl(), attribute, regT1, regT2); 451 } 452 453 void JIT::emit_op_put_getter_by_val(const Instruction* currentInstruction) 454 { 455 auto bytecode = currentInstruction->as<OpPutGetterByVal>(); 456 emitGetVirtualRegister(bytecode.base.offset(), regT0); 457 emitGetVirtualRegister(bytecode.property.offset(), regT1); 458 int32_t attributes = bytecode.attributes; 459 emitGetVirtualRegister(bytecode.accessor, regT2); 428 460 callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2); 429 461 } 430 462 431 void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction) 432 { 433 emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); 434 emitGetVirtualRegister(currentInstruction[2].u.operand, regT1); 435 int32_t attributes = currentInstruction[3].u.operand; 436 emitGetVirtualRegister(currentInstruction[4].u.operand, regT2); 463 void JIT::emit_op_put_setter_by_val(const Instruction* currentInstruction) 464 { 465 auto bytecode = currentInstruction->as<OpPutSetterByVal>(); 466 emitGetVirtualRegister(bytecode.base.offset(), regT0); 467 emitGetVirtualRegister(bytecode.property.offset(), regT1); 468 int32_t attributes = bytecode.attributes; 469 emitGetVirtualRegister(bytecode.accessor.offset(), regT2); 437 470 callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2); 438 471 } 439 472 440 void JIT::emit_op_del_by_id(Instruction* currentInstruction) 441 { 442 int dst = currentInstruction[1].u.operand; 443 int base = currentInstruction[2].u.operand; 444 int property = currentInstruction[3].u.operand; 473 void JIT::emit_op_del_by_id(const Instruction* currentInstruction) 474 { 475 auto bytecode = currentInstruction->as<OpDelById>(); 476 int dst = bytecode.dst.offset(); 477 int base = bytecode.base.offset(); 478 int property = bytecode.property; 445 479 emitGetVirtualRegister(base, regT0); 446 480 callOperation(operationDeleteByIdJSResult, dst, regT0, m_codeBlock->identifier(property).impl()); 447 481 } 448 482 449 void JIT::emit_op_del_by_val(Instruction* currentInstruction) 450 { 451 int dst = currentInstruction[1].u.operand; 452 int base = currentInstruction[2].u.operand; 453 int property = currentInstruction[3].u.operand; 483 void JIT::emit_op_del_by_val(const Instruction* currentInstruction) 484 { 485 auto bytecode = currentInstruction->as<OpDelByVal>(); 486 int dst = bytecode.dst.offset(); 487 int base = bytecode.base.offset(); 488 int property = bytecode.property.offset(); 454 489 emitGetVirtualRegister(base, regT0); 455 490 emitGetVirtualRegister(property, regT1); … … 457 492 } 458 493 459 void JIT::emit_op_try_get_by_id(Instruction* currentInstruction) 460 { 461 int resultVReg = currentInstruction[1].u.operand; 462 int baseVReg = currentInstruction[2].u.operand; 463 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 494 void JIT::emit_op_try_get_by_id(const Instruction* currentInstruction) 495 { 496 auto bytecode = currentInstruction->as<OpTryGetById>(); 497 int resultVReg = bytecode.dst.offset(); 498 int baseVReg = bytecode.base.offset(); 499 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 464 500 465 501 emitGetVirtualRegister(baseVReg, regT0); … … 474 510 m_getByIds.append(gen); 475 511 476 emitValueProfilingSite( );512 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 477 513 emitPutVirtualRegister(resultVReg); 478 514 } 479 515 480 void JIT::emitSlow_op_try_get_by_id( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)516 void JIT::emitSlow_op_try_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 481 517 { 482 518 linkAllSlowCases(iter); 483 519 484 int resultVReg = currentInstruction[1].u.operand; 485 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 520 auto bytecode = currentInstruction->as<OpTryGetById>(); 521 int resultVReg = bytecode.dst.offset(); 522 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 486 523 487 524 JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; … … 494 531 } 495 532 496 void JIT::emit_op_get_by_id_direct(Instruction* currentInstruction) 497 { 498 int resultVReg = currentInstruction[1].u.operand; 499 int baseVReg = currentInstruction[2].u.operand; 500 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 533 void JIT::emit_op_get_by_id_direct(const Instruction* currentInstruction) 534 { 535 auto bytecode = currentInstruction->as<OpGetByIdDirect>(); 536 int resultVReg = bytecode.dst.offset(); 537 int baseVReg = bytecode.base.offset(); 538 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 501 539 502 540 emitGetVirtualRegister(baseVReg, regT0); … … 511 549 m_getByIds.append(gen); 512 550 513 emitValueProfilingSite( );551 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 514 552 emitPutVirtualRegister(resultVReg); 515 553 } 516 554 517 void JIT::emitSlow_op_get_by_id_direct( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)555 void JIT::emitSlow_op_get_by_id_direct(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 518 556 { 519 557 linkAllSlowCases(iter); 520 558 521 int resultVReg = currentInstruction[1].u.operand; 522 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 559 auto bytecode = currentInstruction->as<OpGetByIdDirect>(); 560 int resultVReg = bytecode.dst.offset(); 561 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 523 562 524 563 JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; … … 526 565 Label coldPathBegin = label(); 527 566 528 Call call = callOperationWithProfile( operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());567 Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdDirectOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); 529 568 530 569 gen.reportSlowPathCall(coldPathBegin, call); 531 570 } 532 571 533 void JIT::emit_op_get_by_id(Instruction* currentInstruction) 534 { 535 int resultVReg = currentInstruction[1].u.operand; 536 int baseVReg = currentInstruction[2].u.operand; 537 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 572 void JIT::emit_op_get_by_id(const Instruction* currentInstruction) 573 { 574 auto bytecode = currentInstruction->as<OpGetById>(); 575 int resultVReg = bytecode.dst.offset(); 576 int baseVReg = bytecode.base.offset(); 577 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 538 578 539 579 emitGetVirtualRegister(baseVReg, regT0); … … 551 591 m_getByIds.append(gen); 552 592 553 emitValueProfilingSite( );593 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 554 594 emitPutVirtualRegister(resultVReg); 555 595 } 556 596 557 void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction) 558 { 559 int resultVReg = currentInstruction[1].u.operand; 560 int baseVReg = currentInstruction[2].u.operand; 561 int thisVReg = currentInstruction[3].u.operand; 562 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand)); 597 void JIT::emit_op_get_by_id_with_this(const Instruction* currentInstruction) 598 { 599 auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); 600 int resultVReg = bytecode.dst.offset(); 601 int baseVReg = bytecode.base.offset(); 602 int thisVReg = bytecode.thisValue.offset(); 603 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 563 604 564 605 emitGetVirtualRegister(baseVReg, regT0); … … 574 615 m_getByIdsWithThis.append(gen); 575 616 576 emitValueProfilingSite( );617 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 577 618 emitPutVirtualRegister(resultVReg); 578 619 } 579 620 580 void JIT::emitSlow_op_get_by_id( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)621 void JIT::emitSlow_op_get_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 581 622 { 582 623 linkAllSlowCases(iter); 583 624 584 int resultVReg = currentInstruction[1].u.operand; 585 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 625 auto bytecode = currentInstruction->as<OpGetById>(); 626 int resultVReg = bytecode.dst.offset(); 627 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 586 628 587 629 JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++]; … … 589 631 Label coldPathBegin = label(); 590 632 591 Call call = callOperationWithProfile( operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());633 Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl()); 592 634 593 635 gen.reportSlowPathCall(coldPathBegin, call); 594 636 } 595 637 596 void JIT::emitSlow_op_get_by_id_with_this( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)638 void JIT::emitSlow_op_get_by_id_with_this(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 597 639 { 598 640 linkAllSlowCases(iter); 599 641 600 int resultVReg = currentInstruction[1].u.operand; 601 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[4].u.operand)); 642 auto bytecode = currentInstruction->as<OpGetByIdWithThis>(); 643 int resultVReg = bytecode.dst.offset(); 644 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 602 645 603 646 JITGetByIdWithThisGenerator& gen = m_getByIdsWithThis[m_getByIdWithThisIndex++]; … … 605 648 Label coldPathBegin = label(); 606 649 607 Call call = callOperationWithProfile( operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT0, regT1, ident->impl());650 Call call = callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetByIdWithThisOptimize, resultVReg, gen.stubInfo(), regT0, regT1, ident->impl()); 608 651 609 652 gen.reportSlowPathCall(coldPathBegin, call); 610 653 } 611 654 612 void JIT::emit_op_put_by_id(Instruction* currentInstruction) 613 { 614 int baseVReg = currentInstruction[1].u.operand; 615 int valueVReg = currentInstruction[3].u.operand; 616 unsigned direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect; 655 void JIT::emit_op_put_by_id(const Instruction* currentInstruction) 656 { 657 auto bytecode = currentInstruction->as<OpPutById>(); 658 auto& metadata = bytecode.metadata(m_codeBlock); 659 int baseVReg = bytecode.base.offset(); 660 int valueVReg = bytecode.value.offset(); 661 unsigned direct = metadata.flags & PutByIdIsDirect; 617 662 618 663 // In order to be able to patch both the Structure, and the object offset, we store one pointer, … … 637 682 } 638 683 639 void JIT::emitSlow_op_put_by_id( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)684 void JIT::emitSlow_op_put_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 640 685 { 641 686 linkAllSlowCases(iter); 642 687 643 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand)); 688 auto bytecode = currentInstruction->as<OpPutById>(); 689 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 644 690 645 691 Label coldPathBegin(this); … … 652 698 } 653 699 654 void JIT::emit_op_in_by_id(Instruction* currentInstruction) 655 { 656 int resultVReg = currentInstruction[1].u.operand; 657 int baseVReg = currentInstruction[2].u.operand; 658 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 700 void JIT::emit_op_in_by_id(const Instruction* currentInstruction) 701 { 702 auto bytecode = currentInstruction->as<OpInById>(); 703 int resultVReg = bytecode.dst.offset(); 704 int baseVReg = bytecode.base.offset(); 705 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 659 706 660 707 emitGetVirtualRegister(baseVReg, regT0); … … 672 719 } 673 720 674 void JIT::emitSlow_op_in_by_id( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)721 void JIT::emitSlow_op_in_by_id(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 675 722 { 676 723 linkAllSlowCases(iter); 677 724 678 int resultVReg = currentInstruction[1].u.operand; 679 const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand)); 725 auto bytecode = currentInstruction->as<OpInById>(); 726 int resultVReg = bytecode.dst.offset(); 727 const Identifier* ident = &(m_codeBlock->identifier(bytecode.property)); 680 728 681 729 JITInByIdGenerator& gen = m_inByIds[m_inByIdIndex++]; … … 704 752 } 705 753 706 void JIT::emit_op_resolve_scope(Instruction* currentInstruction) 707 { 708 int dst = currentInstruction[1].u.operand; 709 int scope = currentInstruction[2].u.operand; 710 ResolveType resolveType = static_cast<ResolveType>(copiedInstruction(currentInstruction)[4].u.operand); 711 unsigned depth = currentInstruction[5].u.operand; 754 void JIT::emit_op_resolve_scope(const Instruction* currentInstruction) 755 { 756 auto bytecode = currentInstruction->as<OpResolveScope>(); 757 auto& metadata = bytecode.metadata(m_codeBlock); 758 int dst = bytecode.dst.offset(); 759 int scope = bytecode.scope.offset(); 760 ResolveType resolveType = metadata.resolveType; 761 unsigned depth = metadata.localScopeDepth; 712 762 713 763 auto emitCode = [&] (ResolveType resolveType) { … … 731 781 break; 732 782 case ModuleVar: 733 move(TrustedImmPtr( currentInstruction[6].u.jsCell.get()), regT0);783 move(TrustedImmPtr(metadata.lexicalEnvironment.get()), regT0); 734 784 emitPutVirtualRegister(dst); 735 785 break; … … 748 798 case UnresolvedPropertyWithVarInjectionChecks: { 749 799 JumpList skipToEnd; 750 load32(& currentInstruction[4], regT0);800 load32(&metadata.resolveType, regT0); 751 801 752 802 Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty)); … … 807 857 } 808 858 809 void JIT::emit_op_get_from_scope(Instruction* currentInstruction) 810 { 811 int dst = currentInstruction[1].u.operand; 812 int scope = currentInstruction[2].u.operand; 813 ResolveType resolveType = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand).resolveType(); 814 Structure** structureSlot = currentInstruction[5].u.structure.slot(); 815 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(¤tInstruction[6].u.pointer); 859 void JIT::emit_op_get_from_scope(const Instruction* currentInstruction) 860 { 861 auto bytecode = currentInstruction->as<OpGetFromScope>(); 862 auto& metadata = bytecode.metadata(m_codeBlock); 863 int dst = bytecode.dst.offset(); 864 int scope = bytecode.scope.offset(); 865 ResolveType resolveType = metadata.getPutInfo.resolveType(); 866 Structure** structureSlot = metadata.structure.slot(); 867 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.operand); 816 868 817 869 auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { … … 873 925 case UnresolvedPropertyWithVarInjectionChecks: { 874 926 JumpList skipToEnd; 875 load32(& currentInstruction[4], regT0);927 load32(&metadata.getPutInfo, regT0); 876 928 and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 877 929 … … 904 956 } 905 957 emitPutVirtualRegister(dst); 906 emitValueProfilingSite( );907 } 908 909 void JIT::emitSlow_op_get_from_scope( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)958 emitValueProfilingSite(metadata); 959 } 960 961 void JIT::emitSlow_op_get_from_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 910 962 { 911 963 linkAllSlowCases(iter); 912 964 913 int dst = currentInstruction[1].u.operand; 914 callOperationWithProfile(operationGetFromScope, dst, currentInstruction); 965 auto bytecode = currentInstruction->as<OpGetFromScope>(); 966 int dst = bytecode.dst.offset(); 967 callOperationWithProfile(bytecode.metadata(m_codeBlock), operationGetFromScope, dst, currentInstruction); 915 968 } 916 969 … … 938 991 } 939 992 940 void JIT::emit_op_put_to_scope(Instruction* currentInstruction) 941 { 942 int scope = currentInstruction[1].u.operand; 943 int value = currentInstruction[3].u.operand; 944 GetPutInfo getPutInfo = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand); 993 void JIT::emit_op_put_to_scope(const Instruction* currentInstruction) 994 { 995 auto bytecode = currentInstruction->as<OpPutToScope>(); 996 auto& metadata = bytecode.metadata(m_codeBlock); 997 int scope = bytecode.scope.offset(); 998 int value = bytecode.value.offset(); 999 GetPutInfo getPutInfo = copiedGetPutInfo(bytecode); 945 1000 ResolveType resolveType = getPutInfo.resolveType(); 946 Structure** structureSlot = currentInstruction[5].u.structure.slot();947 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(& currentInstruction[6].u.pointer);1001 Structure** structureSlot = metadata.structure.slot(); 1002 uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&metadata.operand); 948 1003 949 1004 auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) { … … 981 1036 } 982 1037 if (indirectLoadForOperand) 983 emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(¤tInstruction[5]));1038 emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, &metadata.watchpointSet); 984 1039 else 985 emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);1040 emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, metadata.watchpointSet); 986 1041 emitWriteBarrier(constantScope, value, ShouldFilterValue); 987 1042 break; … … 991 1046 case ClosureVarWithVarInjectionChecks: 992 1047 emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); 993 emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);1048 emitPutClosureVar(scope, *operandSlot, value, metadata.watchpointSet); 994 1049 emitWriteBarrier(scope, value, ShouldFilterValue); 995 1050 break; … … 1009 1064 case UnresolvedPropertyWithVarInjectionChecks: { 1010 1065 JumpList skipToEnd; 1011 load32(& currentInstruction[4], regT0);1066 load32(&metadata.getPutInfo, regT0); 1012 1067 and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0 1013 1068 … … 1041 1096 } 1042 1097 1043 void JIT::emitSlow_op_put_to_scope( Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)1098 void JIT::emitSlow_op_put_to_scope(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1044 1099 { 1045 1100 linkAllSlowCases(iter); 1046 1101 1047 GetPutInfo getPutInfo = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand);1048 ResolveType resolveType = getPutInfo.resolveType();1102 auto bytecode = currentInstruction->as<OpPutToScope>(); 1103 ResolveType resolveType = copiedGetPutInfo(bytecode).resolveType(); 1049 1104 if (resolveType == ModuleVar) { 1050 1105 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error); … … 1054 1109 } 1055 1110 1056 void JIT::emit_op_get_from_arguments(Instruction* currentInstruction) 1057 { 1058 int dst = currentInstruction[1].u.operand; 1059 int arguments = currentInstruction[2].u.operand; 1060 int index = currentInstruction[3].u.operand; 1111 void JIT::emit_op_get_from_arguments(const Instruction* currentInstruction) 1112 { 1113 auto bytecode = currentInstruction->as<OpGetFromArguments>(); 1114 int dst = bytecode.dst.offset(); 1115 int arguments = bytecode.arguments.offset(); 1116 int index = bytecode.index; 1061 1117 1062 1118 emitGetVirtualRegister(arguments, regT0); 1063 1119 load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0); 1064 emitValueProfilingSite( );1120 emitValueProfilingSite(bytecode.metadata(m_codeBlock)); 1065 1121 emitPutVirtualRegister(dst); 1066 1122 } 1067 1123 1068 void JIT::emit_op_put_to_arguments(Instruction* currentInstruction) 1069 { 1070 int arguments = currentInstruction[1].u.operand; 1071 int index = currentInstruction[2].u.operand; 1072 int value = currentInstruction[3].u.operand; 1124 void JIT::emit_op_put_to_arguments(const Instruction* currentInstruction) 1125 { 1126 auto bytecode = currentInstruction->as<OpPutToArguments>(); 1127 int arguments = bytecode.arguments.offset(); 1128 int index = bytecode.index; 1129 int value = bytecode.value.offset(); 1073 1130 1074 1131 emitGetVirtualRegister(arguments, regT0); … … 1176 1233 void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) 1177 1234 { 1178 Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex];1235 const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); 1179 1236 1180 1237 PatchableJump badType; … … 1228 1285 void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) 1229 1286 { 1230 Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; 1287 const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); 1288 auto bytecode = currentInstruction->as<OpGetByVal>(); 1231 1289 1232 1290 Jump fastDoneCase; … … 1234 1292 JumpList slowCases; 1235 1293 1236 JITGetByIdGenerator gen = emitGetByValWithCachedId(byValInfo, currentInstruction, propertyName, fastDoneCase, slowDoneCase, slowCases);1294 JITGetByIdGenerator gen = emitGetByValWithCachedId(byValInfo, bytecode, propertyName, fastDoneCase, slowDoneCase, slowCases); 1237 1295 1238 1296 ConcurrentJSLocker locker(m_codeBlock->m_lock); … … 1259 1317 } 1260 1318 1319 template<typename Op> 1261 1320 void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) 1262 1321 { 1263 Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; 1322 const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); 1323 auto bytecode = currentInstruction->as<Op>(); 1264 1324 1265 1325 PatchableJump badType; … … 1270 1330 switch (arrayMode) { 1271 1331 case JITInt32: 1272 slowCases = emitInt32PutByVal( currentInstruction, badType);1332 slowCases = emitInt32PutByVal(bytecode, badType); 1273 1333 break; 1274 1334 case JITDouble: 1275 slowCases = emitDoublePutByVal( currentInstruction, badType);1335 slowCases = emitDoublePutByVal(bytecode, badType); 1276 1336 break; 1277 1337 case JITContiguous: 1278 slowCases = emitContiguousPutByVal( currentInstruction, badType);1338 slowCases = emitContiguousPutByVal(bytecode, badType); 1279 1339 needsLinkForWriteBarrier = true; 1280 1340 break; 1281 1341 case JITArrayStorage: 1282 slowCases = emitArrayStoragePutByVal( currentInstruction, badType);1342 slowCases = emitArrayStoragePutByVal(bytecode, badType); 1283 1343 needsLinkForWriteBarrier = true; 1284 1344 break; … … 1286 1346 TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode); 1287 1347 if (isInt(type)) 1288 slowCases = emitIntTypedArrayPutByVal( currentInstruction, badType, type);1348 slowCases = emitIntTypedArrayPutByVal(bytecode, badType, type); 1289 1349 else 1290 slowCases = emitFloatTypedArrayPutByVal( currentInstruction, badType, type);1350 slowCases = emitFloatTypedArrayPutByVal(bytecode, badType, type); 1291 1351 break; 1292 1352 } … … 1303 1363 } 1304 1364 1305 bool isDirect = Interpreter::getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;1365 bool isDirect = currentInstruction->opcodeID() == op_put_by_val_direct; 1306 1366 if (!isDirect) { 1307 1367 byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( … … 1318 1378 } 1319 1379 1380 template<typename Op> 1320 1381 void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) 1321 1382 { 1322 Instruction* currentInstruction = &m_codeBlock->instructions()[byValInfo->bytecodeIndex]; 1383 ASSERT((putKind == Direct && Op::opcodeID == op_put_by_val_direct) || (putKind == NotDirect && Op::opcodeID == op_put_by_val)); 1384 const Instruction* currentInstruction = m_codeBlock->instructions().at(byValInfo->bytecodeIndex).ptr(); 1385 auto bytecode = currentInstruction->as<Op>(); 1323 1386 1324 1387 JumpList doneCases; 1325 1388 JumpList slowCases; 1326 1389 1327 JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, currentInstruction, putKind, propertyName, doneCases, slowCases);1390 JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, bytecode, putKind, propertyName, doneCases, slowCases); 1328 1391 1329 1392 ConcurrentJSLocker locker(m_codeBlock->m_lock); … … 1349 1412 } 1350 1413 1351 JIT::JumpList JIT::emitDoubleLoad( Instruction*, PatchableJump& badType)1414 JIT::JumpList JIT::emitDoubleLoad(const Instruction*, PatchableJump& badType) 1352 1415 { 1353 1416 #if USE(JSVALUE64) … … 1374 1437 } 1375 1438 1376 JIT::JumpList JIT::emitContiguousLoad( Instruction*, PatchableJump& badType, IndexingType expectedShape)1439 JIT::JumpList JIT::emitContiguousLoad(const Instruction*, PatchableJump& badType, IndexingType expectedShape) 1377 1440 { 1378 1441 #if USE(JSVALUE64) … … 1401 1464 } 1402 1465 1403 JIT::JumpList JIT::emitArrayStorageLoad( Instruction*, PatchableJump& badType)1466 JIT::JumpList JIT::emitArrayStorageLoad(const Instruction*, PatchableJump& badType) 1404 1467 { 1405 1468 #if USE(JSVALUE64) … … 1431 1494 } 1432 1495 1433 JIT::JumpList JIT::emitDirectArgumentsGetByVal( Instruction*, PatchableJump& badType)1496 JIT::JumpList JIT::emitDirectArgumentsGetByVal(const Instruction*, PatchableJump& badType) 1434 1497 { 1435 1498 JumpList slowCases; … … 1461 1524 } 1462 1525 1463 JIT::JumpList JIT::emitScopedArgumentsGetByVal( Instruction*, PatchableJump& badType)1526 JIT::JumpList JIT::emitScopedArgumentsGetByVal(const Instruction*, PatchableJump& badType) 1464 1527 { 1465 1528 JumpList slowCases; … … 1512 1575 } 1513 1576 1514 JIT::JumpList JIT::emitIntTypedArrayGetByVal( Instruction*, PatchableJump& badType, TypedArrayType type)1577 JIT::JumpList JIT::emitIntTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type) 1515 1578 { 1516 1579 ASSERT(isInt(type)); … … 1579 1642 } 1580 1643 1581 JIT::JumpList JIT::emitFloatTypedArrayGetByVal( Instruction*, PatchableJump& badType, TypedArrayType type)1644 JIT::JumpList JIT::emitFloatTypedArrayGetByVal(const Instruction*, PatchableJump& badType, TypedArrayType type) 1582 1645 { 1583 1646 ASSERT(isFloat(type)); … … 1624 1687 } 1625 1688 1626 JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type) 1627 { 1628 ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction); 1689 template<typename Op> 1690 JIT::JumpList JIT::emitIntTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type) 1691 { 1692 auto& metadata = bytecode.metadata(m_codeBlock); 1693 ArrayProfile* profile = &metadata.arrayProfile; 1629 1694 ASSERT(isInt(type)); 1630 1695 1631 int value = currentInstruction[3].u.operand;1696 int value = bytecode.value.offset(); 1632 1697 1633 1698 #if USE(JSVALUE64) … … 1697 1762 } 1698 1763 1699 JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type) 1700 { 1701 ArrayProfile* profile = arrayProfileFor<OpPutByValShape>(currentInstruction); 1764 template<typename Op> 1765 JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Op bytecode, PatchableJump& badType, TypedArrayType type) 1766 { 1767 auto& metadata = bytecode.metadata(m_codeBlock); 1768 ArrayProfile* profile = &metadata.arrayProfile; 1702 1769 ASSERT(isFloat(type)); 1703 1770 1704 int value = currentInstruction[3].u.operand;1771 int value = bytecode.value.offset(); 1705 1772 1706 1773 #if USE(JSVALUE64) … … 1768 1835 } 1769 1836 1837 template void JIT::emit_op_put_by_val<OpPutByVal>(const Instruction*); 1838 1770 1839 } // namespace JSC 1771 1840 -
trunk/Source/JavaScriptCore/jit/RegisterSet.cpp
r237486 r237547 222 222 #elif CPU(X86_64) 223 223 #if !OS(WINDOWS) 224 result.set(GPRInfo::regCS1); 224 225 result.set(GPRInfo::regCS2); 225 226 ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister); … … 228 229 result.set(GPRInfo::regCS4); 229 230 #else 231 result.set(GPRInfo::regCS3); 230 232 result.set(GPRInfo::regCS4); 231 233 ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister); … … 237 239 #elif CPU(ARM_TRADITIONAL) 238 240 #elif CPU(ARM64) 241 result.set(GPRInfo::regCS6); 239 242 result.set(GPRInfo::regCS7); 240 243 ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister); -
trunk/Source/JavaScriptCore/jit/SlowPathCall.h
r237486 r237547 35 35 class JITSlowPathCall { 36 36 public: 37 JITSlowPathCall(JIT* jit, Instruction* pc, SlowPathFunction slowPathFunction)37 JITSlowPathCall(JIT* jit, const Instruction* pc, SlowPathFunction slowPathFunction) 38 38 : m_jit(jit) 39 39 , m_slowPathFunction(slowPathFunction) … … 85 85 JIT* m_jit; 86 86 SlowPathFunction m_slowPathFunction; 87 Instruction* m_pc;87 const Instruction* m_pc; 88 88 }; 89 89 -
trunk/Source/JavaScriptCore/llint/LLIntData.cpp
r237486 r237547 42 42 #define STATIC_ASSERT(cond) static_assert(cond, "LLInt assumes " #cond) 43 43 44 namespace JSC { namespace LLInt {45 44 46 Instruction Data::s_exceptionInstructions[maxOpcodeLength + 1] = { }; 47 Opcode Data::s_opcodeMap[numOpcodeIDs] = { }; 45 namespace JSC { 46 47 namespace LLInt { 48 49 50 uint8_t Data::s_exceptionInstructions[maxOpcodeLength + 1] = { }; 51 Opcode g_opcodeMap[numOpcodeIDs] = { }; 52 Opcode g_opcodeMapWide[numOpcodeIDs] = { }; 48 53 49 54 #if !ENABLE(C_LOOP) 50 extern "C" void llint_entry(void* );55 extern "C" void llint_entry(void*, void*); 51 56 #endif 52 57 … … 57 62 58 63 #else // !ENABLE(C_LOOP) 59 llint_entry(& Data::s_opcodeMap);64 llint_entry(&g_opcodeMap, &g_opcodeMapWide); 60 65 61 for (int i = 0; i < numOpcodeIDs; ++i) 62 Data::s_opcodeMap[i] = tagCodePtr(Data::s_opcodeMap[i], BytecodePtrTag); 66 for (int i = 0; i < numOpcodeIDs; ++i) { 67 g_opcodeMap[i] = tagCodePtr(g_opcodeMap[i], BytecodePtrTag); 68 g_opcodeMapWide[i] = tagCodePtr(g_opcodeMapWide[i], BytecodePtrTag); 69 } 63 70 64 void* handler = Data::s_opcodeMap[llint_throw_from_slow_path_trampoline];71 ASSERT(llint_throw_from_slow_path_trampoline < UINT8_MAX); 65 72 for (int i = 0; i < maxOpcodeLength + 1; ++i) 66 Data::s_exceptionInstructions[i] .u.pointer = handler;73 Data::s_exceptionInstructions[i] = llint_throw_from_slow_path_trampoline; 67 74 #endif // ENABLE(C_LOOP) 68 75 } … … 125 132 #endif 126 133 127 #if ENABLE(C_LOOP) || USE(JSVALUE32_64) 134 #if ENABLE(C_LOOP) 135 ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 1); 136 #elif USE(JSVALUE32_64) 128 137 ASSERT(!CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()); 129 138 #elif (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) 130 ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);139 ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 4); 131 140 #elif (CPU(X86_64) && OS(WINDOWS)) 132 ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);141 ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 4); 133 142 #endif 134 143 -
trunk/Source/JavaScriptCore/llint/LLIntData.h
r237486 r237547 43 43 namespace LLInt { 44 44 45 extern "C" JS_EXPORT_PRIVATE Opcode g_opcodeMap[numOpcodeIDs]; 46 extern "C" JS_EXPORT_PRIVATE Opcode g_opcodeMapWide[numOpcodeIDs]; 47 45 48 class Data { 49 46 50 public: 47 51 static void performAssertions(VM&); 48 52 49 53 private: 50 static Instruction s_exceptionInstructions[maxOpcodeLength + 1]; 51 static Opcode s_opcodeMap[numOpcodeIDs]; 54 static uint8_t s_exceptionInstructions[maxOpcodeLength + 1]; 52 55 53 56 friend void initialize(); … … 55 58 friend Instruction* exceptionInstructions(); 56 59 friend Opcode* opcodeMap(); 60 friend Opcode* opcodeMapWide(); 57 61 friend Opcode getOpcode(OpcodeID); 62 friend Opcode getOpcodeWide(OpcodeID); 58 63 template<PtrTag tag> friend MacroAssemblerCodePtr<tag> getCodePtr(OpcodeID); 64 template<PtrTag tag> friend MacroAssemblerCodePtr<tag> getWideCodePtr(OpcodeID); 59 65 template<PtrTag tag> friend MacroAssemblerCodeRef<tag> getCodeRef(OpcodeID); 60 66 }; … … 64 70 inline Instruction* exceptionInstructions() 65 71 { 66 return Data::s_exceptionInstructions;72 return reinterpret_cast<Instruction*>(Data::s_exceptionInstructions); 67 73 } 68 74 69 75 inline Opcode* opcodeMap() 70 76 { 71 return Data::s_opcodeMap; 77 return g_opcodeMap; 78 } 79 80 inline Opcode* opcodeMapWide() 81 { 82 return g_opcodeMapWide; 72 83 } 73 84 … … 75 86 { 76 87 #if ENABLE(COMPUTED_GOTO_OPCODES) 77 return Data::s_opcodeMap[id];88 return g_opcodeMap[id]; 78 89 #else 79 90 return static_cast<Opcode>(id); 91 #endif 92 } 93 94 inline Opcode getOpcodeWide(OpcodeID id) 95 { 96 #if ENABLE(COMPUTED_GOTO_OPCODES) 97 return g_opcodeMapWide[id]; 98 #else 99 UNUSED_PARAM(id); 100 RELEASE_ASSERT_NOT_REACHED(); 80 101 #endif 81 102 } … … 85 106 { 86 107 void* address = reinterpret_cast<void*>(getOpcode(opcodeID)); 108 address = retagCodePtr<BytecodePtrTag, tag>(address); 109 return MacroAssemblerCodePtr<tag>::createFromExecutableAddress(address); 110 } 111 112 template<PtrTag tag> 113 ALWAYS_INLINE MacroAssemblerCodePtr<tag> getWideCodePtr(OpcodeID opcodeID) 114 { 115 void* address = reinterpret_cast<void*>(getOpcodeWide(opcodeID)); 87 116 address = retagCodePtr<BytecodePtrTag, tag>(address); 88 117 return MacroAssemblerCodePtr<tag>::createFromExecutableAddress(address); -
trunk/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
r237486 r237547 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 … … 28 28 #include "ArithProfile.h" 29 29 #include "ArrayProfile.h" 30 #include "BytecodeIndices.h" 30 31 #include "BytecodeStructs.h" 31 32 #include "CodeBlock.h" … … 35 36 #include "EvalExecutable.h" 36 37 #include "Exception.h" 38 #include "GetByIdMetadata.h" 37 39 #include "Heap.h" 38 40 #include "IndirectEvalExecutable.h" … … 50 52 #include "JSTypeInfo.h" 51 53 #include "JumpTable.h" 54 #include "LLIntData.h" 52 55 #include "LLIntOfflineAsmConfig.h" 53 56 #include "MarkedSpace.h" -
trunk/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
r237486 r237547 57 57 #include "LLIntData.h" 58 58 #include "LLIntExceptions.h" 59 #include "LLIntPrototypeLoadAdaptiveStructureWatchpoint.h" 59 60 #include "LowLevelInterpreter.h" 60 61 #include "ModuleProgramCodeBlock.h" … … 94 95 LLINT_SET_PC_FOR_STUBS() 95 96 96 #define LLINT_OP( index) (exec->uncheckedR(pc[index].u.operand))97 #define LLINT_OP_C( index) (exec->r(pc[index].u.operand))97 #define LLINT_OP(__r) (exec->uncheckedR(__r.offset())) 98 #define LLINT_OP_C(__r) (exec->r(__r.offset())) 98 99 99 100 #define LLINT_RETURN_TWO(first, second) do { \ … … 122 123 } while (false) 123 124 124 #define LLINT_BRANCH(opcode, condition) do { \ 125 #define JUMP_OFFSET(target) \ 126 ((target) ? (target) : exec->codeBlock()->outOfLineJumpOffset(pc)) 127 128 #define JUMP_TO(target) do { \ 129 pc = reinterpret_cast<const Instruction*>(reinterpret_cast<const uint8_t*>(pc) + (target)); \ 130 } while (false) 131 132 #define LLINT_BRANCH(condition) do { \ 125 133 bool __b_condition = (condition); \ 126 134 LLINT_CHECK_EXCEPTION(); \ 127 135 if (__b_condition) \ 128 pc += pc[OPCODE_LENGTH(opcode) - 1].u.operand;\136 JUMP_TO(JUMP_OFFSET(bytecode.target)); \ 129 137 else \ 130 pc += OPCODE_LENGTH(opcode);\138 JUMP_TO(pc->size()); \ 131 139 LLINT_END_IMPL(); \ 132 140 } while (false) … … 135 143 JSValue __r_returnValue = (value); \ 136 144 LLINT_CHECK_EXCEPTION(); \ 137 LLINT_OP( 1) = __r_returnValue; \145 LLINT_OP(bytecode.dst) = __r_returnValue; \ 138 146 LLINT_END_IMPL(); \ 139 147 } while (false) 140 148 141 #define LLINT_RETURN_WITH_PC_ADJUSTMENT(value, pcAdjustment) do { \ 142 JSValue __r_returnValue = (value); \ 143 LLINT_CHECK_EXCEPTION(); \ 144 LLINT_OP(1) = __r_returnValue; \ 145 pc += (pcAdjustment); \ 146 LLINT_END_IMPL(); \ 147 } while (false) 148 149 #define LLINT_RETURN_PROFILED(opcode, value) do { \ 149 #define LLINT_RETURN_PROFILED(value) do { \ 150 150 JSValue __rp_returnValue = (value); \ 151 151 LLINT_CHECK_EXCEPTION(); \ 152 LLINT_OP( 1) = __rp_returnValue; \153 LLINT_PROFILE_VALUE( opcode,__rp_returnValue); \152 LLINT_OP(bytecode.dst) = __rp_returnValue; \ 153 LLINT_PROFILE_VALUE(__rp_returnValue); \ 154 154 LLINT_END_IMPL(); \ 155 155 } while (false) 156 156 157 #define LLINT_PROFILE_VALUE(opcode, value) do { \ 158 pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \ 159 JSValue::encode(value); \ 157 #define LLINT_PROFILE_VALUE(value) do { \ 158 bytecode.metadata(exec).profile.m_buckets[0] = JSValue::encode(value); \ 160 159 } while (false) 161 160 … … 223 222 #endif // LLINT_TRACING 224 223 225 extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* pc, int fromWhere, int operand)224 extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, const Instruction* pc, int fromWhere, int operand) 226 225 { 227 226 if (!Options::traceLLIntExecution()) … … 229 228 230 229 LLINT_BEGIN(); 231 dataLogF( "<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d\n",232 &Thread::current(),233 exec->codeBlock(),234 exec,235 static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)),236 Interpreter::getOpcodeID(pc[0].u.opcode),237 fromWhere,238 operand,239 pc[operand].u.operand);230 dataLogF( 231 "<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d): %d\n", 232 &Thread::current(), 233 exec->codeBlock(), 234 exec, 235 static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), 236 pc->opcodeID(), 237 fromWhere, 238 operand); 240 239 LLINT_END(); 241 240 } 242 241 243 extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc, int fromWhere, intoperand)242 extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, const Instruction* pc, int fromWhere, VirtualRegister operand) 244 243 { 245 244 if (!Options::traceLLIntExecution()) … … 256 255 u.asValue = JSValue::encode(value); 257 256 dataLogF( 258 "<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d): %d: % d: %08x:%08x: %s\n",257 "<%p> %p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %08x:%08x: %s\n", 259 258 &Thread::current(), 260 259 exec->codeBlock(), 261 260 exec, 262 261 static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), 263 Interpreter::getOpcodeID(pc[0].u.opcode),262 pc->opcodeID(), 264 263 fromWhere, 265 operand, 266 pc[operand].u.operand, 264 operand.offset(), 267 265 u.bits.tag, 268 266 u.bits.payload, … … 324 322 LLINT_END_IMPL(); 325 323 326 OpcodeID opcodeID = Interpreter::getOpcodeID(pc[0].u.opcode);324 OpcodeID opcodeID = pc->opcodeID(); 327 325 dataLogF("<%p> %p / %p: executing bc#%zu, %s, pc = %p\n", 328 326 &Thread::current(), … … 330 328 exec, 331 329 static_cast<intptr_t>(exec->codeBlock()->bytecodeOffset(pc)), 332 opcodeNames[opcodeID], pc); 330 pc->name(), 331 pc); 333 332 if (opcodeID == op_enter) { 334 333 dataLogF("Frame will eventually return to %p\n", exec->returnPC().value()); … … 400 399 } 401 400 402 static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind)401 static SlowPathReturnType entryOSR(ExecState* exec, const Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind) 403 402 { 404 403 if (Options::verboseOSR()) { … … 423 422 } 424 423 #else // ENABLE(JIT) 425 static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char*, EntryKind)424 static SlowPathReturnType entryOSR(ExecState* exec, const Instruction*, CodeBlock* codeBlock, const char*, EntryKind) 426 425 { 427 426 codeBlock->dontJITAnytimeSoon(); … … 567 566 { 568 567 LLINT_BEGIN(); 569 LLINT_RETURN(constructEmptyObject(exec, pc[3].u.objectAllocationProfile->structure())); 568 auto bytecode = pc->as<OpNewObject>(); 569 auto& metadata = bytecode.metadata(exec); 570 LLINT_RETURN(constructEmptyObject(exec, metadata.objectAllocationProfile.structure())); 570 571 } 571 572 … … 573 574 { 574 575 LLINT_BEGIN(); 575 LLINT_RETURN(constructArrayNegativeIndexed(exec, pc[4].u.arrayAllocationProfile, bitwise_cast<JSValue*>(&LLINT_OP(2)), pc[3].u.operand)); 576 auto bytecode = pc->as<OpNewArray>(); 577 auto& metadata = bytecode.metadata(exec); 578 LLINT_RETURN(constructArrayNegativeIndexed(exec, &metadata.arrayAllocationProfile, bitwise_cast<JSValue*>(&LLINT_OP(bytecode.argv)), bytecode.argc)); 576 579 } 577 580 … … 579 582 { 580 583 LLINT_BEGIN(); 581 LLINT_RETURN(constructArrayWithSizeQuirk(exec, pc[3].u.arrayAllocationProfile, exec->lexicalGlobalObject(), LLINT_OP_C(2).jsValue())); 584 auto bytecode = pc->as<OpNewArrayWithSize>(); 585 auto& metadata = bytecode.metadata(exec); 586 LLINT_RETURN(constructArrayWithSizeQuirk(exec, &metadata.arrayAllocationProfile, exec->lexicalGlobalObject(), LLINT_OP_C(bytecode.length).jsValue())); 582 587 } 583 588 … … 585 590 { 586 591 LLINT_BEGIN(); 587 RegExp* regExp = jsCast<RegExp*>(LLINT_OP_C(2).jsValue()); 592 auto bytecode = pc->as<OpNewRegexp>(); 593 RegExp* regExp = jsCast<RegExp*>(LLINT_OP_C(bytecode.regexp).jsValue()); 588 594 ASSERT(regExp->isValid()); 589 595 LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regExp)); … … 593 599 { 594 600 LLINT_BEGIN(); 595 JSValue value = LLINT_OP_C(2).jsValue(); 596 JSValue proto = LLINT_OP_C(3).jsValue(); 601 auto bytecode = pc->as<OpInstanceof>(); 602 JSValue value = LLINT_OP_C(bytecode.value).jsValue(); 603 JSValue proto = LLINT_OP_C(bytecode.prototype).jsValue(); 597 604 LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto))); 598 605 } … … 602 609 LLINT_BEGIN(); 603 610 604 JSValue value = LLINT_OP_C(2).jsValue(); 605 JSValue constructor = LLINT_OP_C(3).jsValue(); 606 JSValue hasInstanceValue = LLINT_OP_C(4).jsValue(); 611 auto bytecode = pc->as<OpInstanceofCustom>(); 612 JSValue value = LLINT_OP_C(bytecode.value).jsValue(); 613 JSValue constructor = LLINT_OP_C(bytecode.constructor).jsValue(); 614 JSValue hasInstanceValue = LLINT_OP_C(bytecode.hasInstanceValue).jsValue(); 607 615 608 616 ASSERT(constructor.isObject()); … … 616 624 { 617 625 LLINT_BEGIN(); 626 auto bytecode = pc->as<OpTryGetById>(); 618 627 CodeBlock* codeBlock = exec->codeBlock(); 619 const Identifier& ident = codeBlock->identifier( pc[3].u.operand);620 JSValue baseValue = LLINT_OP_C( 2).jsValue();628 const Identifier& ident = codeBlock->identifier(bytecode.property); 629 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 621 630 PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::VMInquiry); 622 631 … … 624 633 JSValue result = slot.getPureResult(); 625 634 626 LLINT_RETURN_PROFILED( op_try_get_by_id,result);635 LLINT_RETURN_PROFILED(result); 627 636 } 628 637 … … 630 639 { 631 640 LLINT_BEGIN(); 641 auto bytecode = pc->as<OpGetByIdDirect>(); 632 642 CodeBlock* codeBlock = exec->codeBlock(); 633 const Identifier& ident = codeBlock->identifier( pc[3].u.operand);634 JSValue baseValue = LLINT_OP_C( 2).jsValue();643 const Identifier& ident = codeBlock->identifier(bytecode.property); 644 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 635 645 PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::GetOwnProperty); 636 646 … … 641 651 642 652 if (!LLINT_ALWAYS_ACCESS_SLOW && slot.isCacheable()) { 653 auto& metadata = bytecode.metadata(exec); 643 654 { 644 StructureID oldStructureID = pc[4].u.structureID;655 StructureID oldStructureID = metadata.structure; 645 656 if (oldStructureID) { 646 657 Structure* a = vm.heap.structureIDTable().get(oldStructureID); … … 658 669 if (slot.isValue()) { 659 670 // Start out by clearing out the old cache. 660 pc[4].u.pointer = nullptr; // old structure661 pc[5].u.pointer = nullptr; // offset671 metadata.structure = 0; 672 metadata.offset = 0; 662 673 663 674 if (structure->propertyAccessesAreCacheable() … … 667 678 ConcurrentJSLocker locker(codeBlock->m_lock); 668 679 669 pc[4].u.structureID= structure->id();670 pc[5].u.operand= slot.cachedOffset();680 metadata.structure = structure->id(); 681 metadata.offset = slot.cachedOffset(); 671 682 } 672 683 } 673 684 } 674 685 675 LLINT_RETURN_PROFILED( op_get_by_id_direct,result);676 } 677 678 679 static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, Instruction* pc, JSCell* baseCell, PropertySlot& slot, const Identifier& ident)686 LLINT_RETURN_PROFILED(result); 687 } 688 689 690 static void setupGetByIdPrototypeCache(ExecState* exec, VM& vm, const Instruction* pc, OpGetById::Metadata& metadata, JSCell* baseCell, PropertySlot& slot, const Identifier& ident) 680 691 { 681 692 CodeBlock* codeBlock = exec->codeBlock(); … … 711 722 if (condition.condition().kind() == PropertyCondition::Presence) 712 723 offset = condition.condition().offset(); 713 watchpoints.add(condition, pc)->install(vm);724 watchpoints.add(condition, metadata)->install(vm); 714 725 } 715 726 … … 721 732 722 733 if (slot.isUnset()) { 723 pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_unset);724 pc[4].u.structureID= structure->id();734 metadata.mode = GetByIdMode::Unset; 735 metadata.modeMetadata.unsetMode.structure = structure->id(); 725 736 return; 726 737 } 727 738 ASSERT(slot.isValue()); 728 739 729 pc[0].u.opcode = LLInt::getOpcode(op_get_by_id_proto_load); 730 pc[4].u.structureID = structure->id(); 731 pc[5].u.operand = offset; 740 metadata.mode = GetByIdMode::ProtoLoad; 741 metadata.modeMetadata.protoLoadMode.structure = structure->id(); 742 metadata.modeMetadata.protoLoadMode.cachedOffset = offset; 743 metadata.modeMetadata.protoLoadMode.cachedSlot = slot.slotBase(); 732 744 // We know that this pointer will remain valid because it will be cleared by either a watchpoint fire or 733 745 // during GC when we clear the LLInt caches. 734 pc[6].u.pointer= slot.slotBase();746 metadata.modeMetadata.protoLoadMode.cachedSlot = slot.slotBase(); 735 747 } 736 748 … … 739 751 { 740 752 LLINT_BEGIN(); 753 auto bytecode = pc->as<OpGetById>(); 754 auto& metadata = bytecode.metadata(exec); 741 755 CodeBlock* codeBlock = exec->codeBlock(); 742 const Identifier& ident = codeBlock->identifier( pc[3].u.operand);743 JSValue baseValue = LLINT_OP_C( 2).jsValue();756 const Identifier& ident = codeBlock->identifier(bytecode.property); 757 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 744 758 PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::Get); 745 759 746 760 JSValue result = baseValue.get(exec, ident, slot); 747 761 LLINT_CHECK_EXCEPTION(); 748 LLINT_OP( 1) = result;762 LLINT_OP(bytecode.dst) = result; 749 763 750 764 if (!LLINT_ALWAYS_ACCESS_SLOW 751 765 && baseValue.isCell() 752 766 && slot.isCacheable()) { 753 754 767 { 755 StructureID oldStructureID = pc[4].u.structureID; 768 StructureID oldStructureID; 769 auto mode = metadata.mode; 770 switch (mode) { 771 case GetByIdMode::Default: 772 oldStructureID = metadata.modeMetadata.defaultMode.structure; 773 break; 774 case GetByIdMode::Unset: 775 oldStructureID = metadata.modeMetadata.unsetMode.structure; 776 break; 777 case GetByIdMode::ProtoLoad: 778 oldStructureID = metadata.modeMetadata.protoLoadMode.structure; 779 break; 780 default: 781 oldStructureID = 0; 782 } 756 783 if (oldStructureID) { 757 auto opcode = Interpreter::getOpcodeID(pc[0]); 758 if (opcode == op_get_by_id 759 || opcode == op_get_by_id_unset 760 || opcode == op_get_by_id_proto_load) { 761 Structure* a = vm.heap.structureIDTable().get(oldStructureID); 762 Structure* b = baseValue.asCell()->structure(vm); 763 764 if (Structure::shouldConvertToPolyProto(a, b)) { 765 ASSERT(a->rareData()->sharedPolyProtoWatchpoint().get() == b->rareData()->sharedPolyProtoWatchpoint().get()); 766 a->rareData()->sharedPolyProtoWatchpoint()->invalidate(vm, StringFireDetail("Detected poly proto opportunity.")); 767 } 784 Structure* a = vm.heap.structureIDTable().get(oldStructureID); 785 Structure* b = baseValue.asCell()->structure(vm); 786 787 if (Structure::shouldConvertToPolyProto(a, b)) { 788 ASSERT(a->rareData()->sharedPolyProtoWatchpoint().get() == b->rareData()->sharedPolyProtoWatchpoint().get()); 789 a->rareData()->sharedPolyProtoWatchpoint()->invalidate(vm, StringFireDetail("Detected poly proto opportunity.")); 768 790 } 769 791 } … … 774 796 if (slot.isValue() && slot.slotBase() == baseValue) { 775 797 // Start out by clearing out the old cache. 776 pc[0].u.opcode = LLInt::getOpcode(op_get_by_id);777 pc[4].u.pointer = nullptr; // old structure778 pc[5].u.pointer = nullptr; // offset798 metadata.mode = GetByIdMode::Default; 799 metadata.modeMetadata.defaultMode.structure = 0; 800 metadata.modeMetadata.defaultMode.cachedOffset = 0; 779 801 780 802 // Prevent the prototype cache from ever happening. 781 pc[7].u.operand= 0;803 metadata.hitCountForLLIntCaching = 0; 782 804 783 805 if (structure->propertyAccessesAreCacheable() … … 787 809 ConcurrentJSLocker locker(codeBlock->m_lock); 788 810 789 pc[4].u.structureID= structure->id();790 pc[5].u.operand= slot.cachedOffset();811 metadata.modeMetadata.defaultMode.structure = structure->id(); 812 metadata.modeMetadata.defaultMode.cachedOffset = slot.cachedOffset(); 791 813 } 792 } else if (UNLIKELY( pc[7].u.operand&& (slot.isValue() || slot.isUnset()))) {814 } else if (UNLIKELY(metadata.hitCountForLLIntCaching && (slot.isValue() || slot.isUnset()))) { 793 815 ASSERT(slot.slotBase() != baseValue); 794 816 795 if (!(-- pc[7].u.operand))796 setupGetByIdPrototypeCache(exec, vm, pc, baseCell, slot, ident);817 if (!(--metadata.hitCountForLLIntCaching)) 818 setupGetByIdPrototypeCache(exec, vm, pc, metadata, baseCell, slot, ident); 797 819 } 798 820 } else if (!LLINT_ALWAYS_ACCESS_SLOW 799 821 && isJSArray(baseValue) 800 822 && ident == vm.propertyNames->length) { 801 pc[0].u.opcode = LLInt::getOpcode(op_get_array_length); 802 ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(codeBlock->bytecodeOffset(pc)); 803 arrayProfile->observeStructure(baseValue.asCell()->structure(vm)); 804 pc[4].u.arrayProfile = arrayProfile; 805 ASSERT(arrayProfileFor<OpGetArrayLengthShape>(pc) == arrayProfile); 823 metadata.mode = GetByIdMode::ArrayLength; 824 metadata.modeMetadata.arrayLengthMode.arrayProfile.observeStructure(baseValue.asCell()->structure(vm)); 806 825 807 826 // Prevent the prototype cache from ever happening. 808 pc[7].u.operand= 0;809 } 810 811 pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result);827 metadata.hitCountForLLIntCaching = 0; 828 } 829 830 LLINT_PROFILE_VALUE(result); 812 831 LLINT_END(); 813 832 } 814 833 815 LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length) 816 { 817 LLINT_BEGIN(); 834 LLINT_SLOW_PATH_DECL(slow_path_put_by_id) 835 { 836 LLINT_BEGIN(); 837 auto bytecode = pc->as<OpPutById>(); 838 auto& metadata = bytecode.metadata(exec); 818 839 CodeBlock* codeBlock = exec->codeBlock(); 819 const Identifier& ident = codeBlock->identifier(pc[3].u.operand); 820 JSValue baseValue = LLINT_OP(2).jsValue(); 821 PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get); 822 LLINT_RETURN(baseValue.get(exec, ident, slot)); 823 } 824 825 LLINT_SLOW_PATH_DECL(slow_path_put_by_id) 826 { 827 LLINT_BEGIN(); 828 CodeBlock* codeBlock = exec->codeBlock(); 829 const Identifier& ident = codeBlock->identifier(pc[2].u.operand); 830 831 JSValue baseValue = LLINT_OP_C(1).jsValue(); 840 const Identifier& ident = codeBlock->identifier(bytecode.property); 841 842 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 832 843 PutPropertySlot slot(baseValue, codeBlock->isStrictMode(), codeBlock->putByIdContext()); 833 if ( pc[8].u.putByIdFlags & PutByIdIsDirect)834 CommonSlowPaths::putDirectWithReify(vm, exec, asObject(baseValue), ident, LLINT_OP_C( 3).jsValue(), slot);844 if (metadata.flags & PutByIdIsDirect) 845 CommonSlowPaths::putDirectWithReify(vm, exec, asObject(baseValue), ident, LLINT_OP_C(bytecode.value).jsValue(), slot); 835 846 else 836 baseValue.putInline(exec, ident, LLINT_OP_C( 3).jsValue(), slot);847 baseValue.putInline(exec, ident, LLINT_OP_C(bytecode.value).jsValue(), slot); 837 848 LLINT_CHECK_EXCEPTION(); 838 849 … … 841 852 && slot.isCacheablePut()) { 842 853 843 844 854 { 845 StructureID oldStructureID = pc[4].u.structureID;855 StructureID oldStructureID = metadata.oldStructure; 846 856 if (oldStructureID) { 847 857 Structure* a = vm.heap.structureIDTable().get(oldStructureID); … … 858 868 859 869 // Start out by clearing out the old cache. 860 pc[4].u.pointer = nullptr; // old structure 861 pc[5].u.pointer = nullptr; // offset 862 pc[6].u.pointer = nullptr; // new structure 863 pc[7].u.pointer = nullptr; // structure chain 864 pc[8].u.putByIdFlags = 865 static_cast<PutByIdFlags>(pc[8].u.putByIdFlags & PutByIdPersistentFlagsMask); 870 metadata.oldStructure = 0; 871 metadata.offset = 0; 872 metadata.newStructure = 0; 873 metadata.structureChain.clear(); 874 metadata.flags = static_cast<PutByIdFlags>(metadata.flags & PutByIdPersistentFlagsMask); 866 875 867 876 JSCell* baseCell = baseValue.asCell(); … … 884 893 if (result != InvalidPrototypeChain && !sawPolyProto) { 885 894 ASSERT(structure->previousID()->isObject()); 886 pc[4].u.structureID= structure->previousID()->id();887 pc[5].u.operand= slot.cachedOffset();888 pc[6].u.structureID= structure->id();889 if (!( pc[8].u.putByIdFlags & PutByIdIsDirect)) {895 metadata.oldStructure = structure->previousID()->id(); 896 metadata.offset = slot.cachedOffset(); 897 metadata.newStructure = structure->id(); 898 if (!(metadata.flags & PutByIdIsDirect)) { 890 899 StructureChain* chain = structure->prototypeChain(exec, asObject(baseCell)); 891 900 ASSERT(chain); 892 pc[7].u.structureChain.set(vm, codeBlock, chain);901 metadata.structureChain.set(vm, codeBlock, chain); 893 902 } 894 pc[8].u.putByIdFlags = static_cast<PutByIdFlags>(895 pc[8].u.putByIdFlags |903 metadata.flags = static_cast<PutByIdFlags>( 904 metadata.flags | 896 905 structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags()); 897 906 } … … 899 908 } else { 900 909 structure->didCachePropertyReplacement(vm, slot.cachedOffset()); 901 pc[4].u.structureID= structure->id();902 pc[5].u.operand= slot.cachedOffset();903 pc[8].u.putByIdFlags = static_cast<PutByIdFlags>(904 pc[8].u.putByIdFlags |910 metadata.oldStructure = structure->id(); 911 metadata.offset = slot.cachedOffset(); 912 metadata.flags = static_cast<PutByIdFlags>( 913 metadata.flags | 905 914 structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags()); 906 915 } … … 914 923 { 915 924 LLINT_BEGIN(); 925 auto bytecode = pc->as<OpDelById>(); 916 926 CodeBlock* codeBlock = exec->codeBlock(); 917 JSObject* baseObject = LLINT_OP_C( 2).jsValue().toObject(exec);927 JSObject* baseObject = LLINT_OP_C(bytecode.base).jsValue().toObject(exec); 918 928 LLINT_CHECK_EXCEPTION(); 919 bool couldDelete = baseObject->methodTable(vm)->deleteProperty(baseObject, exec, codeBlock->identifier( pc[3].u.operand));929 bool couldDelete = baseObject->methodTable(vm)->deleteProperty(baseObject, exec, codeBlock->identifier(bytecode.property)); 920 930 LLINT_CHECK_EXCEPTION(); 921 931 if (!couldDelete && codeBlock->isStrictMode()) … … 924 934 } 925 935 926 static ALWAYS_INLINE JSValue getByVal(VM& vm, ExecState* exec, Instruction* pc, JSValue baseValue, JSValue subscript) 927 { 936 static ALWAYS_INLINE JSValue getByVal(VM& vm, ExecState* exec, OpGetByVal bytecode) 937 { 938 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 939 JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); 928 940 auto scope = DECLARE_THROW_SCOPE(vm); 929 941 … … 940 952 if (subscript.isUInt32()) { 941 953 uint32_t i = subscript.asUInt32(); 942 ArrayProfile* arrayProfile = arrayProfileFor<OpGetByValShape>(pc); 954 auto& metadata = bytecode.metadata(exec); 955 ArrayProfile* arrayProfile = &metadata.arrayProfile; 943 956 944 957 if (isJSString(baseValue)) { … … 981 994 { 982 995 LLINT_BEGIN(); 983 LLINT_RETURN_PROFILED(op_get_by_val, getByVal(vm, exec, pc, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())); 996 auto bytecode = pc->as<OpGetByVal>(); 997 LLINT_RETURN_PROFILED(getByVal(vm, exec, bytecode)); 984 998 } 985 999 … … 988 1002 LLINT_BEGIN(); 989 1003 990 JSValue baseValue = LLINT_OP_C(1).jsValue(); 991 JSValue subscript = LLINT_OP_C(2).jsValue(); 992 JSValue value = LLINT_OP_C(3).jsValue(); 1004 auto bytecode = pc->as<OpPutByVal>(); 1005 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 1006 JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); 1007 JSValue value = LLINT_OP_C(bytecode.value).jsValue(); 993 1008 bool isStrictMode = exec->codeBlock()->isStrictMode(); 994 1009 … … 1018 1033 LLINT_BEGIN(); 1019 1034 1020 JSValue baseValue = LLINT_OP_C(1).jsValue(); 1021 JSValue subscript = LLINT_OP_C(2).jsValue(); 1022 JSValue value = LLINT_OP_C(3).jsValue(); 1035 auto bytecode = pc->as<OpPutByValDirect>(); 1036 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 1037 JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); 1038 JSValue value = LLINT_OP_C(bytecode.value).jsValue(); 1023 1039 RELEASE_ASSERT(baseValue.isObject()); 1024 1040 JSObject* baseObject = asObject(baseValue); … … 1057 1073 { 1058 1074 LLINT_BEGIN(); 1059 JSValue baseValue = LLINT_OP_C(2).jsValue(); 1075 auto bytecode = pc->as<OpDelByVal>(); 1076 JSValue baseValue = LLINT_OP_C(bytecode.base).jsValue(); 1060 1077 JSObject* baseObject = baseValue.toObject(exec); 1061 1078 LLINT_CHECK_EXCEPTION(); 1062 1079 1063 JSValue subscript = LLINT_OP_C( 3).jsValue();1080 JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); 1064 1081 1065 1082 bool couldDelete; … … 1085 1102 { 1086 1103 LLINT_BEGIN(); 1087 ASSERT(LLINT_OP(1).jsValue().isObject()); 1088 JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); 1089 1090 unsigned options = pc[3].u.operand; 1091 1092 JSValue getter = LLINT_OP(4).jsValue(); 1104 auto bytecode = pc->as<OpPutGetterById>(); 1105 ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); 1106 JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); 1107 1108 unsigned options = bytecode.attributes; 1109 1110 JSValue getter = LLINT_OP(bytecode.accessor).jsValue(); 1093 1111 ASSERT(getter.isObject()); 1094 1112 1095 baseObj->putGetter(exec, exec->codeBlock()->identifier( pc[2].u.operand), asObject(getter), options);1113 baseObj->putGetter(exec, exec->codeBlock()->identifier(bytecode.property), asObject(getter), options); 1096 1114 LLINT_END(); 1097 1115 } … … 1100 1118 { 1101 1119 LLINT_BEGIN(); 1102 ASSERT(LLINT_OP(1).jsValue().isObject()); 1103 JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); 1104 1105 unsigned options = pc[3].u.operand; 1106 1107 JSValue setter = LLINT_OP(4).jsValue(); 1120 auto bytecode = pc->as<OpPutSetterById>(); 1121 ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); 1122 JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); 1123 1124 unsigned options = bytecode.attributes; 1125 1126 JSValue setter = LLINT_OP(bytecode.accessor).jsValue(); 1108 1127 ASSERT(setter.isObject()); 1109 1128 1110 baseObj->putSetter(exec, exec->codeBlock()->identifier( pc[2].u.operand), asObject(setter), options);1129 baseObj->putSetter(exec, exec->codeBlock()->identifier(bytecode.property), asObject(setter), options); 1111 1130 LLINT_END(); 1112 1131 } … … 1115 1134 { 1116 1135 LLINT_BEGIN(); 1117 ASSERT(LLINT_OP(1).jsValue().isObject()); 1118 JSObject* baseObject = asObject(LLINT_OP(1).jsValue()); 1119 1120 JSValue getter = LLINT_OP(4).jsValue(); 1121 JSValue setter = LLINT_OP(5).jsValue(); 1136 auto bytecode = pc->as<OpPutGetterSetterById>(); 1137 ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); 1138 JSObject* baseObject = asObject(LLINT_OP(bytecode.base).jsValue()); 1139 1140 JSValue getter = LLINT_OP(bytecode.getter).jsValue(); 1141 JSValue setter = LLINT_OP(bytecode.setter).jsValue(); 1122 1142 ASSERT(getter.isObject() || setter.isObject()); 1123 1143 GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject(), getter, setter); 1124 1144 1125 CommonSlowPaths::putDirectAccessorWithReify(vm, exec, baseObject, exec->codeBlock()->identifier( pc[2].u.operand), accessor, pc[3].u.operand);1145 CommonSlowPaths::putDirectAccessorWithReify(vm, exec, baseObject, exec->codeBlock()->identifier(bytecode.property), accessor, bytecode.attributes); 1126 1146 LLINT_END(); 1127 1147 } … … 1130 1150 { 1131 1151 LLINT_BEGIN(); 1132 ASSERT(LLINT_OP(1).jsValue().isObject()); 1133 JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); 1134 JSValue subscript = LLINT_OP_C(2).jsValue(); 1135 1136 unsigned options = pc[3].u.operand; 1137 1138 JSValue getter = LLINT_OP(4).jsValue(); 1152 auto bytecode = pc->as<OpPutGetterByVal>(); 1153 ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); 1154 JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); 1155 JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); 1156 1157 unsigned options = bytecode.attributes; 1158 1159 JSValue getter = LLINT_OP(bytecode.accessor).jsValue(); 1139 1160 ASSERT(getter.isObject()); 1140 1161 … … 1149 1170 { 1150 1171 LLINT_BEGIN(); 1151 ASSERT(LLINT_OP(1).jsValue().isObject()); 1152 JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); 1153 JSValue subscript = LLINT_OP_C(2).jsValue(); 1154 1155 unsigned options = pc[3].u.operand; 1156 1157 JSValue setter = LLINT_OP(4).jsValue(); 1172 auto bytecode = pc->as<OpPutSetterByVal>(); 1173 ASSERT(LLINT_OP(bytecode.base).jsValue().isObject()); 1174 JSObject* baseObj = asObject(LLINT_OP(bytecode.base).jsValue()); 1175 JSValue subscript = LLINT_OP_C(bytecode.property).jsValue(); 1176 1177 unsigned options = bytecode.attributes; 1178 1179 JSValue setter = LLINT_OP(bytecode.accessor).jsValue(); 1158 1180 ASSERT(setter.isObject()); 1159 1181 … … 1168 1190 { 1169 1191 LLINT_BEGIN(); 1170 LLINT_BRANCH(op_jtrue, LLINT_OP_C(1).jsValue().toBoolean(exec)); 1192 auto bytecode = pc->as<OpJtrue>(); 1193 LLINT_BRANCH(LLINT_OP_C(bytecode.condition).jsValue().toBoolean(exec)); 1171 1194 } 1172 1195 … … 1174 1197 { 1175 1198 LLINT_BEGIN(); 1176 LLINT_BRANCH(op_jfalse, !LLINT_OP_C(1).jsValue().toBoolean(exec)); 1199 auto bytecode = pc->as<OpJfalse>(); 1200 LLINT_BRANCH(!LLINT_OP_C(bytecode.condition).jsValue().toBoolean(exec)); 1177 1201 } 1178 1202 … … 1180 1204 { 1181 1205 LLINT_BEGIN(); 1182 LLINT_BRANCH(op_jless, jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1206 auto bytecode = pc->as<OpJless>(); 1207 LLINT_BRANCH(jsLess<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1183 1208 } 1184 1209 … … 1186 1211 { 1187 1212 LLINT_BEGIN(); 1188 LLINT_BRANCH(op_jnless, !jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1213 auto bytecode = pc->as<OpJnless>(); 1214 LLINT_BRANCH(!jsLess<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1189 1215 } 1190 1216 … … 1192 1218 { 1193 1219 LLINT_BEGIN(); 1194 LLINT_BRANCH(op_jgreater, jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); 1220 auto bytecode = pc->as<OpJgreater>(); 1221 LLINT_BRANCH(jsLess<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); 1195 1222 } 1196 1223 … … 1198 1225 { 1199 1226 LLINT_BEGIN(); 1200 LLINT_BRANCH(op_jngreater, !jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); 1227 auto bytecode = pc->as<OpJngreater>(); 1228 LLINT_BRANCH(!jsLess<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); 1201 1229 } 1202 1230 … … 1204 1232 { 1205 1233 LLINT_BEGIN(); 1206 LLINT_BRANCH(op_jlesseq, jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1234 auto bytecode = pc->as<OpJlesseq>(); 1235 LLINT_BRANCH(jsLessEq<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1207 1236 } 1208 1237 … … 1210 1239 { 1211 1240 LLINT_BEGIN(); 1212 LLINT_BRANCH(op_jnlesseq, !jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1241 auto bytecode = pc->as<OpJnlesseq>(); 1242 LLINT_BRANCH(!jsLessEq<true>(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1213 1243 } 1214 1244 … … 1216 1246 { 1217 1247 LLINT_BEGIN(); 1218 LLINT_BRANCH(op_jgreatereq, jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); 1248 auto bytecode = pc->as<OpJgreatereq>(); 1249 LLINT_BRANCH(jsLessEq<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); 1219 1250 } 1220 1251 … … 1222 1253 { 1223 1254 LLINT_BEGIN(); 1224 LLINT_BRANCH(op_jngreatereq, !jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue())); 1255 auto bytecode = pc->as<OpJngreatereq>(); 1256 LLINT_BRANCH(!jsLessEq<false>(exec, LLINT_OP_C(bytecode.rhs).jsValue(), LLINT_OP_C(bytecode.lhs).jsValue())); 1225 1257 } 1226 1258 … … 1228 1260 { 1229 1261 LLINT_BEGIN(); 1230 LLINT_BRANCH(op_jeq, JSValue::equal(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1262 auto bytecode = pc->as<OpJeq>(); 1263 LLINT_BRANCH(JSValue::equal(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1231 1264 } 1232 1265 … … 1234 1267 { 1235 1268 LLINT_BEGIN(); 1236 LLINT_BRANCH(op_jneq, !JSValue::equal(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1269 auto bytecode = pc->as<OpJneq>(); 1270 LLINT_BRANCH(!JSValue::equal(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1237 1271 } 1238 1272 … … 1240 1274 { 1241 1275 LLINT_BEGIN(); 1242 LLINT_BRANCH(op_jstricteq, JSValue::strictEqual(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1276 auto bytecode = pc->as<OpJstricteq>(); 1277 LLINT_BRANCH(JSValue::strictEqual(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1243 1278 } 1244 1279 … … 1246 1281 { 1247 1282 LLINT_BEGIN(); 1248 LLINT_BRANCH(op_jnstricteq, !JSValue::strictEqual(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue())); 1283 auto bytecode = pc->as<OpJnstricteq>(); 1284 LLINT_BRANCH(!JSValue::strictEqual(exec, LLINT_OP_C(bytecode.lhs).jsValue(), LLINT_OP_C(bytecode.rhs).jsValue())); 1249 1285 } 1250 1286 … … 1252 1288 { 1253 1289 LLINT_BEGIN(); 1254 JSValue scrutinee = LLINT_OP_C(3).jsValue(); 1290 auto bytecode = pc->as<OpSwitchImm>(); 1291 JSValue scrutinee = LLINT_OP_C(bytecode.scrutinee).jsValue(); 1255 1292 ASSERT(scrutinee.isDouble()); 1256 1293 double value = scrutinee.asDouble(); 1257 1294 int32_t intValue = static_cast<int32_t>(value); 1258 int defaultOffset = pc[2].u.operand;1295 int defaultOffset = JUMP_OFFSET(bytecode.defaultOffset); 1259 1296 if (value == intValue) { 1260 1297 CodeBlock* codeBlock = exec->codeBlock(); 1261 pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue(intValue, defaultOffset);1298 JUMP_TO(codeBlock->switchJumpTable(bytecode.tableIndex).offsetForValue(intValue, defaultOffset)); 1262 1299 } else 1263 pc += defaultOffset;1300 JUMP_TO(defaultOffset); 1264 1301 LLINT_END(); 1265 1302 } … … 1268 1305 { 1269 1306 LLINT_BEGIN(); 1270 JSValue scrutinee = LLINT_OP_C(3).jsValue(); 1307 auto bytecode = pc->as<OpSwitchChar>(); 1308 JSValue scrutinee = LLINT_OP_C(bytecode.scrutinee).jsValue(); 1271 1309 ASSERT(scrutinee.isString()); 1272 1310 JSString* string = asString(scrutinee); 1273 1311 ASSERT(string->length() == 1); 1274 int defaultOffset = pc[2].u.operand;1312 int defaultOffset = JUMP_OFFSET(bytecode.defaultOffset); 1275 1313 StringImpl* impl = string->value(exec).impl(); 1276 1314 CodeBlock* codeBlock = exec->codeBlock(); 1277 pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue((*impl)[0], defaultOffset);1315 JUMP_TO(codeBlock->switchJumpTable(bytecode.tableIndex).offsetForValue((*impl)[0], defaultOffset)); 1278 1316 LLINT_END(); 1279 1317 } … … 1282 1320 { 1283 1321 LLINT_BEGIN(); 1284 JSValue scrutinee = LLINT_OP_C(3).jsValue(); 1285 int defaultOffset = pc[2].u.operand; 1322 auto bytecode = pc->as<OpSwitchString>(); 1323 JSValue scrutinee = LLINT_OP_C(bytecode.scrutinee).jsValue(); 1324 int defaultOffset = JUMP_OFFSET(bytecode.defaultOffset); 1286 1325 if (!scrutinee.isString()) 1287 pc += defaultOffset;1326 JUMP_TO(defaultOffset); 1288 1327 else { 1289 1328 CodeBlock* codeBlock = exec->codeBlock(); 1290 pc += codeBlock->stringSwitchJumpTable(pc[1].u.operand).offsetForValue(asString(scrutinee)->value(exec).impl(), defaultOffset);1329 JUMP_TO(codeBlock->stringSwitchJumpTable(bytecode.tableIndex).offsetForValue(asString(scrutinee)->value(exec).impl(), defaultOffset)); 1291 1330 } 1292 1331 LLINT_END(); … … 1296 1335 { 1297 1336 LLINT_BEGIN(); 1337 auto bytecode = pc->as<OpNewFunc>(); 1298 1338 CodeBlock* codeBlock = exec->codeBlock(); 1299 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1339 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1300 1340 slowPathLogF("Creating function!\n"); 1301 LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl( pc[3].u.operand), scope));1341 LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); 1302 1342 } 1303 1343 … … 1305 1345 { 1306 1346 LLINT_BEGIN(); 1347 auto bytecode = pc->as<OpNewGeneratorFunc>(); 1307 1348 CodeBlock* codeBlock = exec->codeBlock(); 1308 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1349 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1309 1350 slowPathLogF("Creating function!\n"); 1310 LLINT_RETURN(JSGeneratorFunction::create(vm, codeBlock->functionDecl( pc[3].u.operand), scope));1351 LLINT_RETURN(JSGeneratorFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); 1311 1352 } 1312 1353 … … 1314 1355 { 1315 1356 LLINT_BEGIN(); 1357 auto bytecode = pc->as<OpNewAsyncFunc>(); 1316 1358 CodeBlock* codeBlock = exec->codeBlock(); 1317 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1359 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1318 1360 slowPathLogF("Creating async function!\n"); 1319 LLINT_RETURN(JSAsyncFunction::create(vm, codeBlock->functionDecl( pc[3].u.operand), scope));1361 LLINT_RETURN(JSAsyncFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); 1320 1362 } 1321 1363 … … 1323 1365 { 1324 1366 LLINT_BEGIN(); 1367 auto bytecode = pc->as<OpNewAsyncGeneratorFunc>(); 1325 1368 CodeBlock* codeBlock = exec->codeBlock(); 1326 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1369 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1327 1370 slowPathLogF("Creating async generator function!\n"); 1328 LLINT_RETURN(JSAsyncGeneratorFunction::create(vm, codeBlock->functionDecl( pc[3].u.operand), scope));1371 LLINT_RETURN(JSAsyncGeneratorFunction::create(vm, codeBlock->functionDecl(bytecode.functionDecl), scope)); 1329 1372 } 1330 1373 … … 1333 1376 LLINT_BEGIN(); 1334 1377 1378 auto bytecode = pc->as<OpNewFuncExp>(); 1335 1379 CodeBlock* codeBlock = exec->codeBlock(); 1336 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1337 FunctionExecutable* executable = codeBlock->functionExpr( pc[3].u.operand);1380 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1381 FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); 1338 1382 1339 1383 LLINT_RETURN(JSFunction::create(vm, executable, scope)); … … 1344 1388 LLINT_BEGIN(); 1345 1389 1390 auto bytecode = pc->as<OpNewGeneratorFuncExp>(); 1346 1391 CodeBlock* codeBlock = exec->codeBlock(); 1347 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1348 FunctionExecutable* executable = codeBlock->functionExpr( pc[3].u.operand);1392 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1393 FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); 1349 1394 1350 1395 LLINT_RETURN(JSGeneratorFunction::create(vm, executable, scope)); … … 1355 1400 LLINT_BEGIN(); 1356 1401 1402 auto bytecode = pc->as<OpNewAsyncFuncExp>(); 1357 1403 CodeBlock* codeBlock = exec->codeBlock(); 1358 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1359 FunctionExecutable* executable = codeBlock->functionExpr( pc[3].u.operand);1404 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1405 FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); 1360 1406 1361 1407 LLINT_RETURN(JSAsyncFunction::create(vm, executable, scope)); … … 1366 1412 LLINT_BEGIN(); 1367 1413 1414 auto bytecode = pc->as<OpNewAsyncGeneratorFuncExp>(); 1368 1415 CodeBlock* codeBlock = exec->codeBlock(); 1369 JSScope* scope = exec->uncheckedR( pc[2].u.operand).Register::scope();1370 FunctionExecutable* executable = codeBlock->functionExpr( pc[3].u.operand);1416 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1417 FunctionExecutable* executable = codeBlock->functionExpr(bytecode.functionDecl); 1371 1418 1372 1419 LLINT_RETURN(JSAsyncGeneratorFunction::create(vm, executable, scope)); … … 1376 1423 { 1377 1424 LLINT_BEGIN(); 1378 JSFunction* func = jsCast<JSFunction*>(LLINT_OP(1).Register::unboxedCell()); 1379 JSValue name = LLINT_OP_C(2).Register::jsValue(); 1425 auto bytecode = pc->as<OpSetFunctionName>(); 1426 JSFunction* func = jsCast<JSFunction*>(LLINT_OP(bytecode.function).Register::unboxedCell()); 1427 JSValue name = LLINT_OP_C(bytecode.name).Register::jsValue(); 1380 1428 func->setFunctionName(exec, name); 1381 1429 LLINT_END(); 1382 1430 } 1383 1431 1384 static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind) 1385 { 1386 UNUSED_PARAM(pc); 1387 1432 static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind) 1433 { 1388 1434 slowPathLog("Performing host call.\n"); 1389 1435 … … 1438 1484 } 1439 1485 1440 inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = 0)1486 inline SlowPathReturnType setUpCall(ExecState* execCallee, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = nullptr) 1441 1487 { 1442 1488 ExecState* exec = execCallee->callerFrame(); … … 1468 1514 LLINT_CALL_RETURN(exec, execCallee, codePtr.executableAddress(), JSEntryPtrTag); 1469 1515 } 1470 RELEASE_AND_RETURN(throwScope, handleHostCall(execCallee, pc,calleeAsValue, kind));1516 RELEASE_AND_RETURN(throwScope, handleHostCall(execCallee, calleeAsValue, kind)); 1471 1517 } 1472 1518 JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); … … 1520 1566 } 1521 1567 1522 inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpecializationKind kind) 1568 template<typename Op> 1569 inline SlowPathReturnType genericCall(ExecState* exec, Op&& bytecode, CodeSpecializationKind kind) 1523 1570 { 1524 1571 // This needs to: … … 1528 1575 // - Return a tuple of machine code address to call and the new call frame. 1529 1576 1530 JSValue calleeAsValue = LLINT_OP_C( 2).jsValue();1531 1532 ExecState* execCallee = exec - pc[4].u.operand;1533 1534 execCallee->setArgumentCountIncludingThis( pc[3].u.operand);1577 JSValue calleeAsValue = LLINT_OP_C(bytecode.callee).jsValue(); 1578 1579 ExecState* execCallee = exec - bytecode.argv; 1580 1581 execCallee->setArgumentCountIncludingThis(bytecode.argc); 1535 1582 execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue; 1536 1583 execCallee->setCallerFrame(exec); 1537 1584 1538 ASSERT(pc[5].u.callLinkInfo);1539 return setUpCall(execCallee, pc, kind, calleeAsValue, pc[5].u.callLinkInfo);1585 auto& metadata = bytecode.metadata(exec); 1586 return setUpCall(execCallee, kind, calleeAsValue, &metadata.callLinkInfo); 1540 1587 } 1541 1588 … … 1543 1590 { 1544 1591 LLINT_BEGIN_NO_SET_PC(); 1545 RELEASE_AND_RETURN(throwScope, genericCall(exec, pc, CodeForCall)); 1592 RELEASE_AND_RETURN(throwScope, genericCall(exec, pc->as<OpCall>(), CodeForCall)); 1593 } 1594 1595 LLINT_SLOW_PATH_DECL(slow_path_tail_call) 1596 { 1597 LLINT_BEGIN_NO_SET_PC(); 1598 RELEASE_AND_RETURN(throwScope, genericCall(exec, pc->as<OpTailCall>(), CodeForCall)); 1546 1599 } 1547 1600 … … 1549 1602 { 1550 1603 LLINT_BEGIN_NO_SET_PC(); 1551 RELEASE_AND_RETURN(throwScope, genericCall(exec, pc , CodeForConstruct));1604 RELEASE_AND_RETURN(throwScope, genericCall(exec, pc->as<OpConstruct>(), CodeForConstruct)); 1552 1605 } 1553 1606 … … 1558 1611 // - Set up a call frame while respecting the variable arguments. 1559 1612 1560 unsigned numUsedStackSlots = -pc[5].u.operand; 1561 unsigned length = sizeFrameForVarargs(exec, vm, 1562 LLINT_OP_C(4).jsValue(), numUsedStackSlots, pc[6].u.operand); 1613 unsigned numUsedStackSlots; 1614 JSValue arguments; 1615 int firstVarArg; 1616 switch (pc->opcodeID()) { 1617 case op_call_varargs: { 1618 auto bytecode = pc->as<OpCallVarargs>(); 1619 numUsedStackSlots = -bytecode.firstFree.offset(); 1620 arguments = LLINT_OP_C(bytecode.arguments).jsValue(); 1621 firstVarArg = bytecode.firstVarArg; 1622 break; 1623 } 1624 case op_tail_call_varargs: { 1625 auto bytecode = pc->as<OpTailCallVarargs>(); 1626 numUsedStackSlots = -bytecode.firstFree.offset(); 1627 arguments = LLINT_OP_C(bytecode.arguments).jsValue(); 1628 firstVarArg = bytecode.firstVarArg; 1629 break; 1630 } 1631 case op_construct_varargs: { 1632 auto bytecode = pc->as<OpConstructVarargs>(); 1633 numUsedStackSlots = -bytecode.firstFree.offset(); 1634 arguments = LLINT_OP_C(bytecode.arguments).jsValue(); 1635 firstVarArg = bytecode.firstVarArg; 1636 break; 1637 } 1638 default: 1639 RELEASE_ASSERT_NOT_REACHED(); 1640 } 1641 unsigned length = sizeFrameForVarargs(exec, vm, arguments, numUsedStackSlots, firstVarArg); 1563 1642 LLINT_CALL_CHECK_EXCEPTION(exec, exec); 1564 1643 … … 1576 1655 // - Set up a call frame with the same arguments as the current frame. 1577 1656 1578 unsigned numUsedStackSlots = -pc[5].u.operand; 1657 auto bytecode = pc->as<OpTailCallForwardArguments>(); 1658 unsigned numUsedStackSlots = -bytecode.firstFree.offset(); 1579 1659 1580 1660 unsigned arguments = sizeFrameForForwardArguments(exec, vm, numUsedStackSlots); … … 1594 1674 }; 1595 1675 1596 inline SlowPathReturnType varargsSetup(ExecState* exec, Instruction* pc, CodeSpecializationKind kind, SetArgumentsWith set) 1676 template<typename Op> 1677 inline SlowPathReturnType varargsSetup(ExecState* exec, const Instruction* pc, CodeSpecializationKind kind, SetArgumentsWith set) 1597 1678 { 1598 1679 LLINT_BEGIN_NO_SET_PC(); … … 1601 1682 // - Return a tuple of machine code address to call and the new call frame. 1602 1683 1603 JSValue calleeAsValue = LLINT_OP_C(2).jsValue(); 1684 auto bytecode = pc->as<Op>(); 1685 JSValue calleeAsValue = LLINT_OP_C(bytecode.callee).jsValue(); 1604 1686 1605 1687 ExecState* execCallee = vm.newCallFrameReturnValue; 1606 1688 1607 1689 if (set == SetArgumentsWith::Object) { 1608 setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C( 3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength);1690 setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(bytecode.thisValue).jsValue(), LLINT_OP_C(bytecode.arguments).jsValue(), bytecode.firstVarArg, vm.varargsLength); 1609 1691 LLINT_CALL_CHECK_EXCEPTION(exec, exec); 1610 1692 } else 1611 setupForwardArgumentsFrameAndSetThis(exec, execCallee, LLINT_OP_C( 3).jsValue(), vm.varargsLength);1693 setupForwardArgumentsFrameAndSetThis(exec, execCallee, LLINT_OP_C(bytecode.thisValue).jsValue(), vm.varargsLength); 1612 1694 1613 1695 execCallee->setCallerFrame(exec); … … 1615 1697 exec->setCurrentVPC(pc); 1616 1698 1617 RELEASE_AND_RETURN(throwScope, setUpCall(execCallee, pc,kind, calleeAsValue));1699 RELEASE_AND_RETURN(throwScope, setUpCall(execCallee, kind, calleeAsValue)); 1618 1700 } 1619 1701 1620 1702 LLINT_SLOW_PATH_DECL(slow_path_call_varargs) 1621 1703 { 1622 return varargsSetup(exec, pc, CodeForCall, SetArgumentsWith::Object); 1704 return varargsSetup<OpCallVarargs>(exec, pc, CodeForCall, SetArgumentsWith::Object); 1705 } 1706 1707 LLINT_SLOW_PATH_DECL(slow_path_tail_call_varargs) 1708 { 1709 return varargsSetup<OpTailCallVarargs>(exec, pc, CodeForCall, SetArgumentsWith::Object); 1623 1710 } 1624 1711 1625 1712 LLINT_SLOW_PATH_DECL(slow_path_tail_call_forward_arguments) 1626 1713 { 1627 return varargsSetup (exec, pc, CodeForCall, SetArgumentsWith::CurrentArguments);1714 return varargsSetup<OpTailCallForwardArguments>(exec, pc, CodeForCall, SetArgumentsWith::CurrentArguments); 1628 1715 } 1629 1716 1630 1717 LLINT_SLOW_PATH_DECL(slow_path_construct_varargs) 1631 1718 { 1632 return varargsSetup(exec, pc, CodeForConstruct, SetArgumentsWith::Object); 1633 } 1634 1635 1636 LLINT_SLOW_PATH_DECL(slow_path_call_eval) 1719 return varargsSetup<OpConstructVarargs>(exec, pc, CodeForConstruct, SetArgumentsWith::Object); 1720 } 1721 1722 inline SlowPathReturnType commonCallEval(ExecState* exec, const Instruction* pc, MacroAssemblerCodePtr<JSEntryPtrTag> returnPoint) 1637 1723 { 1638 1724 LLINT_BEGIN_NO_SET_PC(); 1639 JSValue calleeAsValue = LLINT_OP(2).jsValue(); 1640 1641 ExecState* execCallee = exec - pc[4].u.operand; 1642 1643 execCallee->setArgumentCountIncludingThis(pc[3].u.operand); 1725 auto bytecode = pc->as<OpCallEval>(); 1726 JSValue calleeAsValue = LLINT_OP(bytecode.callee).jsValue(); 1727 1728 ExecState* execCallee = exec - bytecode.argv; 1729 1730 execCallee->setArgumentCountIncludingThis(bytecode.argc); 1644 1731 execCallee->setCallerFrame(exec); 1645 1732 execCallee->uncheckedR(CallFrameSlot::callee) = calleeAsValue; 1646 execCallee->setReturnPC( LLInt::getCodePtr<JSEntryPtrTag>(llint_generic_return_point).executableAddress());1733 execCallee->setReturnPC(returnPoint.executableAddress()); 1647 1734 execCallee->setCodeBlock(0); 1648 1735 exec->setCurrentVPC(pc); 1649 1736 1650 1737 if (!isHostFunction(calleeAsValue, globalFuncEval)) 1651 RELEASE_AND_RETURN(throwScope, setUpCall(execCallee, pc,CodeForCall, calleeAsValue));1738 RELEASE_AND_RETURN(throwScope, setUpCall(execCallee, CodeForCall, calleeAsValue)); 1652 1739 1653 1740 vm.hostCallReturnValue = eval(execCallee); 1654 1741 LLINT_CALL_RETURN(exec, execCallee, LLInt::getCodePtr(getHostCallReturnValue), CFunctionPtrTag); 1655 1742 } 1743 1744 LLINT_SLOW_PATH_DECL(slow_path_call_eval) 1745 { 1746 return commonCallEval(exec, pc, LLInt::getCodePtr<JSEntryPtrTag>(llint_generic_return_point)); 1747 } 1748 1749 LLINT_SLOW_PATH_DECL(slow_path_call_eval_wide) 1750 { 1751 return commonCallEval(exec, pc, LLInt::getWideCodePtr<JSEntryPtrTag>(llint_generic_return_point)); 1752 } 1656 1753 1657 1754 LLINT_SLOW_PATH_DECL(slow_path_strcat) 1658 1755 { 1659 1756 LLINT_BEGIN(); 1660 LLINT_RETURN(jsStringFromRegisterArray(exec, &LLINT_OP(2), pc[3].u.operand)); 1757 auto bytecode = pc->as<OpStrcat>(); 1758 LLINT_RETURN(jsStringFromRegisterArray(exec, &LLINT_OP(bytecode.src), bytecode.count)); 1661 1759 } 1662 1760 … … 1664 1762 { 1665 1763 LLINT_BEGIN(); 1666 LLINT_RETURN(LLINT_OP_C(2).jsValue().toPrimitive(exec)); 1764 auto bytecode = pc->as<OpToPrimitive>(); 1765 LLINT_RETURN(LLINT_OP_C(bytecode.src).jsValue().toPrimitive(exec)); 1667 1766 } 1668 1767 … … 1670 1769 { 1671 1770 LLINT_BEGIN(); 1672 LLINT_THROW(LLINT_OP_C(1).jsValue()); 1771 auto bytecode = pc->as<OpThrow>(); 1772 LLINT_THROW(LLINT_OP_C(bytecode.value).jsValue()); 1673 1773 } 1674 1774 … … 1685 1785 { 1686 1786 LLINT_BEGIN(); 1687 int debugHookType = pc[1].u.operand;1688 vm.interpreter->debug(exec, static_cast<DebugHookType>(debugHookType));1787 auto bytecode = pc->as<OpDebug>(); 1788 vm.interpreter->debug(exec, bytecode.debugHookType); 1689 1789 1690 1790 LLINT_END(); … … 1702 1802 { 1703 1803 LLINT_BEGIN(); 1704 const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); 1705 JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue()); 1706 GetPutInfo getPutInfo(pc[4].u.operand); 1804 auto bytecode = pc->as<OpGetFromScope>(); 1805 auto& metadata = bytecode.metadata(exec); 1806 const Identifier& ident = exec->codeBlock()->identifier(bytecode.var); 1807 JSObject* scope = jsCast<JSObject*>(LLINT_OP(bytecode.scope).jsValue()); 1707 1808 1708 1809 // ModuleVar is always converted to ClosureVar for get_from_scope. 1709 ASSERT( getPutInfo.resolveType() != ModuleVar);1810 ASSERT(metadata.getPutInfo.resolveType() != ModuleVar); 1710 1811 1711 1812 LLINT_RETURN(scope->getPropertySlot(exec, ident, [&] (bool found, PropertySlot& slot) -> JSValue { 1712 1813 if (!found) { 1713 if ( getPutInfo.resolveMode() == ThrowIfNotFound)1814 if (metadata.getPutInfo.resolveMode() == ThrowIfNotFound) 1714 1815 return throwException(exec, throwScope, createUndefinedVariableError(exec, ident)); 1715 1816 return jsUndefined(); … … 1724 1825 } 1725 1826 1726 CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident);1827 CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, bytecode, scope, slot, ident); 1727 1828 1728 1829 if (!result) … … 1736 1837 LLINT_BEGIN(); 1737 1838 1839 auto bytecode = pc->as<OpPutToScope>(); 1840 auto& metadata = bytecode.metadata(exec); 1738 1841 CodeBlock* codeBlock = exec->codeBlock(); 1739 const Identifier& ident = codeBlock->identifier(pc[2].u.operand); 1740 JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue()); 1741 JSValue value = LLINT_OP_C(3).jsValue(); 1742 GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); 1743 if (getPutInfo.resolveType() == LocalClosureVar) { 1842 const Identifier& ident = codeBlock->identifier(bytecode.var); 1843 JSObject* scope = jsCast<JSObject*>(LLINT_OP(bytecode.scope).jsValue()); 1844 JSValue value = LLINT_OP_C(bytecode.value).jsValue(); 1845 if (metadata.getPutInfo.resolveType() == LocalClosureVar) { 1744 1846 JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope); 1745 environment->variableAt(ScopeOffset( pc[6].u.operand)).set(vm, environment, value);1847 environment->variableAt(ScopeOffset(metadata.operand)).set(vm, environment, value); 1746 1848 1747 1849 // Have to do this *after* the write, because if this puts the set into IsWatched, then we need 1748 1850 // to have already changed the value of the variable. Otherwise we might watch and constant-fold 1749 1851 // to the Undefined value from before the assignment. 1750 if ( WatchpointSet* set = pc[5].u.watchpointSet)1751 set->touch(vm, "Executed op_put_scope<LocalClosureVar>");1852 if (metadata.watchpointSet) 1853 metadata.watchpointSet->touch(vm, "Executed op_put_scope<LocalClosureVar>"); 1752 1854 LLINT_END(); 1753 1855 } … … 1757 1859 if (hasProperty 1758 1860 && scope->isGlobalLexicalEnvironment() 1759 && !isInitialization( getPutInfo.initializationMode())) {1861 && !isInitialization(metadata.getPutInfo.initializationMode())) { 1760 1862 // When we can't statically prove we need a TDZ check, we must perform the check on the slow path. 1761 1863 PropertySlot slot(scope, PropertySlot::InternalMethodType::Get); … … 1765 1867 } 1766 1868 1767 if ( getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty)1869 if (metadata.getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) 1768 1870 LLINT_THROW(createUndefinedVariableError(exec, ident)); 1769 1871 1770 PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, isInitialization( getPutInfo.initializationMode()));1872 PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, isInitialization(metadata.getPutInfo.initializationMode())); 1771 1873 scope->methodTable(vm)->put(scope, exec, ident, value, slot); 1772 1874 1773 CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident);1875 CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, bytecode, scope, slot, ident); 1774 1876 1775 1877 LLINT_END(); … … 1790 1892 LLINT_BEGIN(); 1791 1893 1792 JSScope* scope = exec->uncheckedR(pc[1].u.operand).Register::scope(); 1894 auto bytecode = pc->as<OpLogShadowChickenPrologue>(); 1895 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1793 1896 vm.shadowChicken().log(vm, exec, ShadowChicken::Packet::prologue(exec->jsCallee(), exec, exec->callerFrame(), scope)); 1794 1897 … … 1800 1903 LLINT_BEGIN(); 1801 1904 1802 JSValue thisValue = LLINT_OP(1).jsValue(); 1803 JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); 1905 auto bytecode = pc->as<OpLogShadowChickenTail>(); 1906 JSValue thisValue = LLINT_OP(bytecode.thisValue).jsValue(); 1907 JSScope* scope = exec->uncheckedR(bytecode.scope).Register::scope(); 1804 1908 1805 1909 #if USE(JSVALUE64) … … 1819 1923 exec->codeBlock()->ensureCatchLivenessIsComputedForBytecodeOffset(exec->bytecodeOffset()); 1820 1924 1821 ValueProfileAndOperandBuffer* buffer = static_cast<ValueProfileAndOperandBuffer*>(pc[3].u.pointer); 1822 buffer->forEach([&] (ValueProfileAndOperand& profile) { 1925 auto bytecode = pc->as<OpCatch>(); 1926 auto& metadata = bytecode.metadata(exec); 1927 metadata.buffer->forEach([&] (ValueProfileAndOperand& profile) { 1823 1928 profile.m_profile.m_buckets[0] = JSValue::encode(exec->uncheckedR(profile.m_operand).jsValue()); 1824 1929 }); … … 1845 1950 } 1846 1951 1952 LLINT_SLOW_PATH_DECL(slow_path_out_of_line_jump_target) 1953 { 1954 CodeBlock* codeBlock = exec->codeBlock(); 1955 pc = codeBlock->outOfLineJumpTarget(pc); 1956 LLINT_END_IMPL(); 1957 } 1958 1847 1959 extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame) 1848 1960 { -
trunk/Source/JavaScriptCore/llint/LLIntSlowPaths.h
r237486 r237547 37 37 namespace LLInt { 38 38 39 extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand);40 extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, intoperand);39 extern "C" SlowPathReturnType llint_trace_operand(ExecState*, const Instruction*, int fromWhere, int operand); 40 extern "C" SlowPathReturnType llint_trace_value(ExecState*, const Instruction*, int fromWhere, VirtualRegister operand); 41 41 extern "C" void llint_write_barrier_slow(ExecState*, JSCell*) WTF_INTERNAL; 42 42 43 43 #define LLINT_SLOW_PATH_DECL(name) \ 44 extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc)44 extern "C" SlowPathReturnType llint_##name(ExecState* exec, const Instruction* pc) 45 45 46 46 #define LLINT_SLOW_PATH_HIDDEN_DECL(name) \ … … 70 70 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id_direct); 71 71 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id); 72 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_arguments_length);73 72 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_id); 74 73 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_id); … … 110 109 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_set_function_name); 111 110 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call); 111 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tail_call); 112 112 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct); 113 113 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_frame_for_varargs); 114 114 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_frame_for_forward_arguments); 115 115 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_varargs); 116 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tail_call_varargs); 116 117 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tail_call_forward_arguments); 117 118 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct_varargs); 118 119 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval); 120 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval_wide); 119 121 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_arguments); 120 122 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_strcat); … … 132 134 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_begin); 133 135 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_super_sampler_end); 136 LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_out_of_line_jump_target); 134 137 extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL; 135 138 #if ENABLE(C_LOOP) -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
r237486 r237547 228 228 229 229 if X86_64 or X86_64_WIN or ARM64 or ARM64E 230 const CalleeSaveSpaceAsVirtualRegisters = 3 230 const CalleeSaveSpaceAsVirtualRegisters = 4 231 elsif C_LOOP 232 const CalleeSaveSpaceAsVirtualRegisters = 1 231 233 else 232 234 const CalleeSaveSpaceAsVirtualRegisters = 0 … … 268 270 const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h 269 271 if ARM64 or ARM64E 272 const metadataTable = csr6 270 273 const PB = csr7 271 274 const tagTypeNumber = csr8 272 275 const tagMask = csr9 273 276 elsif X86_64 277 const metadataTable = csr1 274 278 const PB = csr2 275 279 const tagTypeNumber = csr3 276 280 const tagMask = csr4 277 281 elsif X86_64_WIN 282 const metadataTable = csr3 278 283 const PB = csr4 279 284 const tagTypeNumber = csr5 … … 283 288 const tagTypeNumber = csr1 284 289 const tagMask = csr2 285 end 286 287 macro loadisFromInstruction(offset, dest) 288 loadis offset * PtrSize[PB, PC, PtrSize], dest 289 end 290 291 macro loadpFromInstruction(offset, dest) 292 loadp offset * PtrSize[PB, PC, PtrSize], dest 293 end 294 295 macro loadisFromStruct(offset, dest) 296 loadis offset[PB, PC, PtrSize], dest 297 end 298 299 macro loadpFromStruct(offset, dest) 300 loadp offset[PB, PC, PtrSize], dest 301 end 302 303 macro storeisToInstruction(value, offset) 304 storei value, offset * PtrSize[PB, PC, PtrSize] 305 end 306 307 macro storepToInstruction(value, offset) 308 storep value, offset * PtrSize[PB, PC, PtrSize] 309 end 310 311 macro storeisFromStruct(value, offset) 312 storei value, offset[PB, PC, PtrSize] 313 end 314 315 macro storepFromStruct(value, offset) 316 storep value, offset[PB, PC, PtrSize] 290 const metadataTable = csr3 317 291 end 318 292 319 293 else 320 294 const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h 321 macro loadisFromInstruction(offset, dest) 322 loadis offset * 4[PC], dest 323 end 324 325 macro loadpFromInstruction(offset, dest) 326 loadp offset * 4[PC], dest 327 end 328 329 macro storeisToInstruction(value, offset) 330 storei value, offset * 4[PC] 331 end 332 333 macro loadisFromStruct(offset, dest) 334 loadis offset[PC], dest 335 end 336 337 macro loadpFromStruct(offset, dest) 338 loadp offset[PC], dest 339 end 340 341 macro storeisToStruct(value, offset) 342 storei value, offset[PC] 343 end 344 end 295 if C_LOOP 296 const metadataTable = csr3 297 end 298 end 299 300 macro dispatch(advance) 301 addp advance, PC 302 nextInstruction() 303 end 304 305 macro dispatchIndirect(offset) 306 dispatch(offset) 307 end 308 309 macro dispatchOp(size, op) 310 macro dispatchNarrow() 311 dispatch(constexpr %op%_length) 312 end 313 314 macro dispatchWide() 315 dispatch(constexpr %op%_length * 4 + 1) 316 end 317 318 size(dispatchNarrow, dispatchWide, macro (dispatch) dispatch() end) 319 end 320 321 macro getu(size, op, field, dst) 322 size(getuOperandNarrow, getuOperandWide, macro (getu) 323 getu(op, field, dst) 324 end) 325 end 326 327 macro get(size, op, field, dst) 328 size(getOperandNarrow, getOperandWide, macro (get) 329 get(op, field, dst) 330 end) 331 end 332 333 macro narrow(narrowFn, wideFn, k) 334 k(narrowFn) 335 end 336 337 macro wide(narrowFn, wideFn, k) 338 k(wideFn) 339 end 340 341 macro metadata(size, opcode, dst, scratch) 342 loadp constexpr %opcode%::opcodeID * 4[metadataTable], dst # offset = metadataTable<unsigned*>[opcodeID] 343 getu(size, opcode, metadataID, scratch) # scratch = bytecode.metadataID 344 muli sizeof %opcode%::Metadata, scratch # scratch *= sizeof(Op::Metadata) 345 addi scratch, dst # offset += scratch 346 addp metadataTable, dst # return &metadataTable[offset] 347 end 348 349 macro jumpImpl(target) 350 btiz target, .outOfLineJumpTarget 351 dispatchIndirect(target) 352 .outOfLineJumpTarget: 353 callSlowPath(_llint_slow_path_out_of_line_jump_target) 354 nextInstruction() 355 end 356 357 macro commonOp(label, prologue, fn) 358 _%label%: 359 prologue() 360 fn(narrow) 361 362 _%label%_wide: 363 prologue() 364 fn(wide) 365 end 366 367 macro op(l, fn) 368 commonOp(l, macro () end, macro (unused) 369 fn() 370 end) 371 end 372 373 macro llintOp(name, op, fn) 374 commonOp(llint_%name%, traceExecution, macro(size) 375 macro getImpl(field, dst) 376 get(size, op, field, dst) 377 end 378 379 macro dispatchImpl() 380 dispatchOp(size, name) 381 end 382 383 fn(size, getImpl, dispatchImpl) 384 end) 385 end 386 387 macro llintOpWithReturn(name, op, fn) 388 llintOp(name, op, macro(size, get, dispatch) 389 makeReturn(get, dispatch, macro (return) 390 fn(size, get, dispatch, return) 391 end) 392 end) 393 end 394 395 macro llintOpWithMetadata(name, op, fn) 396 llintOpWithReturn(name, op, macro (size, get, dispatch, return) 397 macro meta(dst, scratch) 398 metadata(size, op, dst, scratch) 399 end 400 fn(size, get, dispatch, meta, return) 401 end) 402 end 403 404 macro llintOpWithJump(name, op, impl) 405 llintOpWithMetadata(name, op, macro(size, get, dispatch, metadata, return) 406 macro jump(field) 407 get(field, t0) 408 jumpImpl(t0) 409 end 410 411 impl(size, get, jump, dispatch) 412 end) 413 end 414 415 macro llintOpWithProfile(name, op, fn) 416 llintOpWithMetadata(name, op, macro(size, get, dispatch, metadata, return) 417 makeReturnProfiled(op, get, metadata, dispatch, macro (returnProfiled) 418 fn(size, get, dispatch, returnProfiled) 419 end) 420 end) 421 end 422 345 423 346 424 if X86_64_WIN … … 395 473 396 474 # Bytecode operand constants. 397 const FirstConstantRegisterIndex = constexpr FirstConstantRegisterIndex 475 const FirstConstantRegisterIndexNarrow = 16 476 const FirstConstantRegisterIndexWide = constexpr FirstConstantRegisterIndex 398 477 399 478 # Code type constants. … … 631 710 subp CalleeSaveSpaceStackAligned, sp 632 711 if C_LOOP 712 storep metadataTable, -PtrSize[cfr] 633 713 elsif ARM or ARMv7_TRADITIONAL 634 714 elsif ARMv7 635 715 elsif ARM64 or ARM64E 636 716 emit "stp x27, x28, [x29, #-16]" 637 emit "stp x zr, x26, [x29, #-32]"717 emit "stp x25, x26, [x29, #-32]" 638 718 elsif MIPS 639 719 elsif X86 … … 643 723 storep csr3, -16[cfr] 644 724 storep csr2, -24[cfr] 725 storep csr1, -32[cfr] 645 726 elsif X86_64_WIN 646 727 storep csr6, -8[cfr] 647 728 storep csr5, -16[cfr] 648 729 storep csr4, -24[cfr] 730 storep csr3, -32[cfr] 649 731 end 650 732 end … … 652 734 macro restoreCalleeSavesUsedByLLInt() 653 735 if C_LOOP 736 loadp -PtrSize[cfr], metadataTable 654 737 elsif ARM or ARMv7_TRADITIONAL 655 738 elsif ARMv7 656 739 elsif ARM64 or ARM64E 657 emit "ldp x zr, x26, [x29, #-32]"740 emit "ldp x25, x26, [x29, #-32]" 658 741 emit "ldp x27, x28, [x29, #-16]" 659 742 elsif MIPS … … 661 744 elsif X86_WIN 662 745 elsif X86_64 746 loadp -32[cfr], csr1 663 747 loadp -24[cfr], csr2 664 748 loadp -16[cfr], csr3 665 749 loadp -8[cfr], csr4 666 750 elsif X86_64_WIN 751 loadp -32[cfr], csr3 667 752 loadp -24[cfr], csr4 668 753 loadp -16[cfr], csr5 … … 824 909 end 825 910 826 macro callTargetFunction( callee, callPtrTag)911 macro callTargetFunction(size, op, dispatch, callee, callPtrTag) 827 912 if C_LOOP 828 913 cloopCallJSFunction callee … … 831 916 end 832 917 restoreStackPointerAfterCall() 833 dispatchAfterCall( )918 dispatchAfterCall(size, op, dispatch) 834 919 end 835 920 … … 899 984 end 900 985 901 macro slowPathForCall(s lowPath, prepareCall)986 macro slowPathForCall(size, op, dispatch, slowPath, prepareCall) 902 987 callCallSlowPath( 903 988 slowPath, … … 908 993 prepareCall(callee, t2, t3, t4, SlowPathPtrTag) 909 994 .dontUpdateSP: 910 callTargetFunction( callee, SlowPathPtrTag)995 callTargetFunction(size, op, dispatch, callee, SlowPathPtrTag) 911 996 end) 912 997 end 913 998 914 macro arrayProfile( cellAndIndexingType, profile, scratch)999 macro arrayProfile(offset, cellAndIndexingType, metadata, scratch) 915 1000 const cell = cellAndIndexingType 916 1001 const indexingType = cellAndIndexingType 917 1002 loadi JSCell::m_structureID[cell], scratch 918 storei scratch, ArrayProfile::m_lastSeenStructureID[profile]1003 storei scratch, offset + ArrayProfile::m_lastSeenStructureID[metadata] 919 1004 loadb JSCell::m_indexingTypeAndMisc[cell], indexingType 920 1005 end … … 946 1031 end 947 1032 948 macro assertNotConstant(index) 949 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end) 1033 macro assertNotConstant(size, index) 1034 size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) 1035 assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end) 1036 end) 950 1037 end 951 1038 … … 1038 1125 # Set up the PC. 1039 1126 if JSVALUE64 1127 # FIXME: cleanup double load 1128 # https://bugs.webkit.org/show_bug.cgi?id=190932 1040 1129 loadp CodeBlock::m_instructions[t1], PB 1130 loadp [PB], PB 1041 1131 unpoison(_g_CodeBlockPoison, PB, t3) 1042 1132 move 0, PC 1043 1133 else 1134 # FIXME: cleanup double load 1135 # https://bugs.webkit.org/show_bug.cgi?id=190932 1044 1136 loadp CodeBlock::m_instructions[t1], PC 1137 loadp [PC], PC 1045 1138 end 1046 1139 … … 1094 1187 end 1095 1188 1189 if JSVALUE64 or C_LOOP 1190 # FIXME: cleanup double load 1191 # https://bugs.webkit.org/show_bug.cgi?id=190933 1192 loadp CodeBlock::m_metadata[t1], metadataTable 1193 loadp MetadataTable::m_buffer[metadataTable], metadataTable 1194 end 1096 1195 if JSVALUE64 1097 1196 move TagTypeNumber, tagTypeNumber … … 1236 1335 end 1237 1336 1238 # The PC base is in t 1, as this is what _llint_entry leaves behind through1239 # initPCRelative(t 1)1337 # The PC base is in t2, as this is what _llint_entry leaves behind through 1338 # initPCRelative(t2) 1240 1339 macro setEntryAddress(index, label) 1340 setEntryAddressCommon(index, label, a0) 1341 end 1342 1343 macro setEntryAddressWide(index, label) 1344 setEntryAddressCommon(index, label, a1) 1345 end 1346 1347 macro setEntryAddressCommon(index, label, map) 1241 1348 if X86_64 or X86_64_WIN 1242 leap (label - _relativePCBase)[t 1], t31349 leap (label - _relativePCBase)[t2], t3 1243 1350 move index, t4 1244 storep t3, [ a0, t4, 8]1351 storep t3, [map, t4, 8] 1245 1352 elsif X86 or X86_WIN 1246 leap (label - _relativePCBase)[t 1], t31353 leap (label - _relativePCBase)[t2], t3 1247 1354 move index, t4 1248 storep t3, [ a0, t4, 4]1355 storep t3, [map, t4, 4] 1249 1356 elsif ARM64 or ARM64E 1250 pcrtoaddr label, t 11357 pcrtoaddr label, t2 1251 1358 move index, t4 1252 storep t 1, [a0, t4, PtrSize]1359 storep t2, [map, t4, PtrSize] 1253 1360 elsif ARM or ARMv7 or ARMv7_TRADITIONAL 1254 1361 mvlbl (label - _relativePCBase), t4 1255 addp t4, t 1, t41362 addp t4, t2, t4 1256 1363 move index, t3 1257 storep t4, [ a0, t3, 4]1364 storep t4, [map, t3, 4] 1258 1365 elsif MIPS 1259 1366 la label, t4 1260 1367 la _relativePCBase, t3 1261 1368 subp t3, t4 1262 addp t4, t 1, t41369 addp t4, t2, t4 1263 1370 move index, t3 1264 storep t4, [ a0, t3, 4]1371 storep t4, [map, t3, 4] 1265 1372 end 1266 1373 end … … 1273 1380 if X86 or X86_WIN 1274 1381 loadp 20[sp], a0 1275 end 1276 initPCRelative(t1) 1382 loadp 24[sp], a1 1383 end 1384 1385 initPCRelative(t2) 1277 1386 1278 1387 # Include generated bytecode initialization file. … … 1284 1393 end 1285 1394 1286 _llint_program_prologue: 1395 _llint_op_wide: 1396 nextInstructionWide() 1397 1398 _llint_op_wide_wide: 1399 crash() 1400 1401 _llint_op_enter_wide: 1402 crash() 1403 1404 op(llint_program_prologue, macro () 1287 1405 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) 1288 1406 dispatch(0) 1289 1290 1291 _llint_module_program_prologue: 1407 end) 1408 1409 1410 op(llint_module_program_prologue, macro () 1292 1411 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) 1293 1412 dispatch(0) 1294 1295 1296 _llint_eval_prologue: 1413 end) 1414 1415 1416 op(llint_eval_prologue, macro () 1297 1417 prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) 1298 1418 dispatch(0) 1299 1300 1301 _llint_function_for_call_prologue: 1419 end) 1420 1421 1422 op(llint_function_for_call_prologue, macro () 1302 1423 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call) 1303 1424 functionInitialization(0) 1304 1425 dispatch(0) 1426 end) 1305 1427 1306 1428 1307 _llint_function_for_construct_prologue: 1429 op(llint_function_for_construct_prologue, macro () 1308 1430 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct) 1309 1431 functionInitialization(1) 1310 1432 dispatch(0) 1433 end) 1311 1434 1312 1435 1313 _llint_function_for_call_arity_check: 1436 op(llint_function_for_call_arity_check, macro () 1314 1437 prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call) 1315 1438 functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck) … … 1317 1440 functionInitialization(0) 1318 1441 dispatch(0) 1319 1320 1321 _llint_function_for_construct_arity_check: 1442 end) 1443 1444 1445 op(llint_function_for_construct_arity_check, macro () 1322 1446 prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct) 1323 1447 functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck) … … 1325 1449 functionInitialization(1) 1326 1450 dispatch(0) 1451 end) 1327 1452 1328 1453 … … 1336 1461 1337 1462 # Value-representation-agnostic code. 1338 _llint_op_create_direct_arguments: 1339 traceExecution() 1340 callSlowPath(_slow_path_create_direct_arguments) 1341 dispatch(constexpr op_create_direct_arguments_length) 1342 1343 1344 _llint_op_create_scoped_arguments: 1345 traceExecution() 1346 callSlowPath(_slow_path_create_scoped_arguments) 1347 dispatch(constexpr op_create_scoped_arguments_length) 1348 1349 1350 _llint_op_create_cloned_arguments: 1351 traceExecution() 1352 callSlowPath(_slow_path_create_cloned_arguments) 1353 dispatch(constexpr op_create_cloned_arguments_length) 1354 1355 1356 _llint_op_create_this: 1357 traceExecution() 1358 callSlowPath(_slow_path_create_this) 1359 dispatch(constexpr op_create_this_length) 1360 1361 1362 _llint_op_new_object: 1363 traceExecution() 1364 callSlowPath(_llint_slow_path_new_object) 1365 dispatch(constexpr op_new_object_length) 1366 1367 1368 _llint_op_new_func: 1369 traceExecution() 1370 callSlowPath(_llint_slow_path_new_func) 1371 dispatch(constexpr op_new_func_length) 1372 1373 1374 _llint_op_new_generator_func: 1375 traceExecution() 1376 callSlowPath(_llint_slow_path_new_generator_func) 1377 dispatch(constexpr op_new_generator_func_length) 1378 1379 _llint_op_new_async_generator_func: 1380 traceExecution() 1381 callSlowPath(_llint_slow_path_new_async_generator_func) 1382 dispatch(constexpr op_new_async_generator_func_length) 1383 1384 _llint_op_new_async_generator_func_exp: 1385 traceExecution() 1386 callSlowPath(_llint_slow_path_new_async_generator_func_exp) 1387 dispatch(constexpr op_new_async_generator_func_exp_length) 1388 1389 _llint_op_new_async_func: 1390 traceExecution() 1391 callSlowPath(_llint_slow_path_new_async_func) 1392 dispatch(constexpr op_new_async_func_length) 1393 1394 1395 _llint_op_new_array: 1396 traceExecution() 1397 callSlowPath(_llint_slow_path_new_array) 1398 dispatch(constexpr op_new_array_length) 1399 1400 1401 _llint_op_new_array_with_spread: 1402 traceExecution() 1403 callSlowPath(_slow_path_new_array_with_spread) 1404 dispatch(constexpr op_new_array_with_spread_length) 1405 1406 1407 _llint_op_spread: 1408 traceExecution() 1409 callSlowPath(_slow_path_spread) 1410 dispatch(constexpr op_spread_length) 1411 1412 1413 _llint_op_new_array_with_size: 1414 traceExecution() 1415 callSlowPath(_llint_slow_path_new_array_with_size) 1416 dispatch(constexpr op_new_array_with_size_length) 1417 1418 1419 _llint_op_new_array_buffer: 1420 traceExecution() 1421 callSlowPath(_slow_path_new_array_buffer) 1422 dispatch(constexpr op_new_array_buffer_length) 1423 1424 1425 _llint_op_new_regexp: 1426 traceExecution() 1427 callSlowPath(_llint_slow_path_new_regexp) 1428 dispatch(constexpr op_new_regexp_length) 1429 1430 1431 _llint_op_less: 1432 traceExecution() 1433 callSlowPath(_slow_path_less) 1434 dispatch(constexpr op_less_length) 1435 1436 1437 _llint_op_lesseq: 1438 traceExecution() 1439 callSlowPath(_slow_path_lesseq) 1440 dispatch(constexpr op_lesseq_length) 1441 1442 1443 _llint_op_greater: 1444 traceExecution() 1445 callSlowPath(_slow_path_greater) 1446 dispatch(constexpr op_greater_length) 1447 1448 1449 _llint_op_greatereq: 1450 traceExecution() 1451 callSlowPath(_slow_path_greatereq) 1452 dispatch(constexpr op_greatereq_length) 1453 1454 1455 _llint_op_eq: 1456 traceExecution() 1457 equalityComparison( 1458 macro (left, right, result) cieq left, right, result end, 1459 _slow_path_eq) 1460 1461 1462 _llint_op_neq: 1463 traceExecution() 1464 equalityComparison( 1465 macro (left, right, result) cineq left, right, result end, 1466 _slow_path_neq) 1467 1468 1469 _llint_op_below: 1470 traceExecution() 1471 compareUnsigned( 1463 macro slowPathOp(op) 1464 llintOp(op_%op%, unused, macro (unused, unused, dispatch) 1465 callSlowPath(_slow_path_%op%) 1466 dispatch() 1467 end) 1468 end 1469 1470 slowPathOp(create_cloned_arguments) 1471 slowPathOp(create_direct_arguments) 1472 slowPathOp(create_lexical_environment) 1473 slowPathOp(create_rest) 1474 slowPathOp(create_scoped_arguments) 1475 slowPathOp(create_this) 1476 slowPathOp(define_accessor_property) 1477 slowPathOp(define_data_property) 1478 slowPathOp(enumerator_generic_pname) 1479 slowPathOp(enumerator_structure_pname) 1480 slowPathOp(get_by_id_with_this) 1481 slowPathOp(get_by_val_with_this) 1482 slowPathOp(get_direct_pname) 1483 slowPathOp(get_enumerable_length) 1484 slowPathOp(get_property_enumerator) 1485 slowPathOp(greater) 1486 slowPathOp(greatereq) 1487 slowPathOp(has_generic_property) 1488 slowPathOp(has_indexed_property) 1489 slowPathOp(has_structure_property) 1490 slowPathOp(in_by_id) 1491 slowPathOp(in_by_val) 1492 slowPathOp(is_function) 1493 slowPathOp(is_object_or_null) 1494 slowPathOp(less) 1495 slowPathOp(lesseq) 1496 slowPathOp(mod) 1497 slowPathOp(new_array_buffer) 1498 slowPathOp(new_array_with_spread) 1499 slowPathOp(pow) 1500 slowPathOp(push_with_scope) 1501 slowPathOp(put_by_id_with_this) 1502 slowPathOp(put_by_val_with_this) 1503 slowPathOp(resolve_scope_for_hoisting_func_decl_in_eval) 1504 slowPathOp(spread) 1505 slowPathOp(strcat) 1506 slowPathOp(throw_static_error) 1507 slowPathOp(to_index_string) 1508 slowPathOp(typeof) 1509 slowPathOp(unreachable) 1510 1511 macro llintSlowPathOp(op) 1512 llintOp(op_%op%, unused, macro (unused, unused, dispatch) 1513 callSlowPath(_llint_slow_path_%op%) 1514 dispatch() 1515 end) 1516 end 1517 1518 llintSlowPathOp(del_by_id) 1519 llintSlowPathOp(del_by_val) 1520 llintSlowPathOp(instanceof) 1521 llintSlowPathOp(instanceof_custom) 1522 llintSlowPathOp(new_array) 1523 llintSlowPathOp(new_array_with_size) 1524 llintSlowPathOp(new_async_func) 1525 llintSlowPathOp(new_async_func_exp) 1526 llintSlowPathOp(new_async_generator_func) 1527 llintSlowPathOp(new_async_generator_func_exp) 1528 llintSlowPathOp(new_func) 1529 llintSlowPathOp(new_func_exp) 1530 llintSlowPathOp(new_generator_func) 1531 llintSlowPathOp(new_generator_func_exp) 1532 llintSlowPathOp(new_object) 1533 llintSlowPathOp(new_regexp) 1534 llintSlowPathOp(put_getter_by_id) 1535 llintSlowPathOp(put_getter_by_val) 1536 llintSlowPathOp(put_getter_setter_by_id) 1537 llintSlowPathOp(put_setter_by_id) 1538 llintSlowPathOp(put_setter_by_val) 1539 llintSlowPathOp(set_function_name) 1540 llintSlowPathOp(super_sampler_begin) 1541 llintSlowPathOp(super_sampler_end) 1542 llintSlowPathOp(throw) 1543 llintSlowPathOp(try_get_by_id) 1544 1545 llintOp(op_switch_string, unused, macro (unused, unused, unused) 1546 callSlowPath(_llint_slow_path_switch_string) 1547 nextInstruction() 1548 end) 1549 1550 1551 equalityComparisonOp(eq, OpEq, 1552 macro (left, right, result) cieq left, right, result end) 1553 1554 1555 equalityComparisonOp(neq, OpNeq, 1556 macro (left, right, result) cineq left, right, result end) 1557 1558 1559 compareUnsignedOp(below, OpBelow, 1472 1560 macro (left, right, result) cib left, right, result end) 1473 1561 1474 1562 1475 _llint_op_beloweq: 1476 traceExecution() 1477 compareUnsigned( 1563 compareUnsignedOp(beloweq, OpBeloweq, 1478 1564 macro (left, right, result) cibeq left, right, result end) 1479 1565 1480 1566 1481 _llint_op_mod: 1482 traceExecution() 1483 callSlowPath(_slow_path_mod) 1484 dispatch(constexpr op_mod_length) 1485 1486 1487 _llint_op_pow: 1488 traceExecution() 1489 callSlowPath(_slow_path_pow) 1490 dispatch(constexpr op_pow_length) 1491 1492 1493 _llint_op_typeof: 1494 traceExecution() 1495 callSlowPath(_slow_path_typeof) 1496 dispatch(constexpr op_typeof_length) 1497 1498 1499 _llint_op_is_object_or_null: 1500 traceExecution() 1501 callSlowPath(_slow_path_is_object_or_null) 1502 dispatch(constexpr op_is_object_or_null_length) 1503 1504 _llint_op_is_function: 1505 traceExecution() 1506 callSlowPath(_slow_path_is_function) 1507 dispatch(constexpr op_is_function_length) 1508 1509 1510 _llint_op_in_by_id: 1511 traceExecution() 1512 callSlowPath(_slow_path_in_by_id) 1513 dispatch(constexpr op_in_by_id_length) 1514 1515 1516 _llint_op_in_by_val: 1517 traceExecution() 1518 callSlowPath(_slow_path_in_by_val) 1519 dispatch(constexpr op_in_by_val_length) 1520 1521 1522 _llint_op_try_get_by_id: 1523 traceExecution() 1524 callSlowPath(_llint_slow_path_try_get_by_id) 1525 dispatch(constexpr op_try_get_by_id_length) 1526 1527 1528 _llint_op_del_by_id: 1529 traceExecution() 1530 callSlowPath(_llint_slow_path_del_by_id) 1531 dispatch(constexpr op_del_by_id_length) 1532 1533 1534 _llint_op_del_by_val: 1535 traceExecution() 1536 callSlowPath(_llint_slow_path_del_by_val) 1537 dispatch(constexpr op_del_by_val_length) 1538 1539 1540 _llint_op_put_getter_by_id: 1541 traceExecution() 1542 callSlowPath(_llint_slow_path_put_getter_by_id) 1543 dispatch(constexpr op_put_getter_by_id_length) 1544 1545 1546 _llint_op_put_setter_by_id: 1547 traceExecution() 1548 callSlowPath(_llint_slow_path_put_setter_by_id) 1549 dispatch(constexpr op_put_setter_by_id_length) 1550 1551 1552 _llint_op_put_getter_setter_by_id: 1553 traceExecution() 1554 callSlowPath(_llint_slow_path_put_getter_setter_by_id) 1555 dispatch(constexpr op_put_getter_setter_by_id_length) 1556 1557 1558 _llint_op_put_getter_by_val: 1559 traceExecution() 1560 callSlowPath(_llint_slow_path_put_getter_by_val) 1561 dispatch(constexpr op_put_getter_by_val_length) 1562 1563 1564 _llint_op_put_setter_by_val: 1565 traceExecution() 1566 callSlowPath(_llint_slow_path_put_setter_by_val) 1567 dispatch(constexpr op_put_setter_by_val_length) 1568 1569 1570 _llint_op_define_data_property: 1571 traceExecution() 1572 callSlowPath(_slow_path_define_data_property) 1573 dispatch(constexpr op_define_data_property_length) 1574 1575 1576 _llint_op_define_accessor_property: 1577 traceExecution() 1578 callSlowPath(_slow_path_define_accessor_property) 1579 dispatch(constexpr op_define_accessor_property_length) 1580 1581 1582 _llint_op_jtrue: 1583 traceExecution() 1584 jumpTrueOrFalse( 1585 macro (value, target) btinz value, 1, target end, 1586 _llint_slow_path_jtrue) 1587 1588 1589 _llint_op_jfalse: 1590 traceExecution() 1591 jumpTrueOrFalse( 1592 macro (value, target) btiz value, 1, target end, 1593 _llint_slow_path_jfalse) 1594 1595 1596 _llint_op_jless: 1597 traceExecution() 1598 compareJump( 1599 macro (left, right, target) bilt left, right, target end, 1600 macro (left, right, target) bdlt left, right, target end, 1601 _llint_slow_path_jless) 1602 1603 1604 _llint_op_jnless: 1605 traceExecution() 1606 compareJump( 1607 macro (left, right, target) bigteq left, right, target end, 1608 macro (left, right, target) bdgtequn left, right, target end, 1609 _llint_slow_path_jnless) 1610 1611 1612 _llint_op_jgreater: 1613 traceExecution() 1614 compareJump( 1615 macro (left, right, target) bigt left, right, target end, 1616 macro (left, right, target) bdgt left, right, target end, 1617 _llint_slow_path_jgreater) 1618 1619 1620 _llint_op_jngreater: 1621 traceExecution() 1622 compareJump( 1623 macro (left, right, target) bilteq left, right, target end, 1624 macro (left, right, target) bdltequn left, right, target end, 1625 _llint_slow_path_jngreater) 1626 1627 1628 _llint_op_jlesseq: 1629 traceExecution() 1630 compareJump( 1631 macro (left, right, target) bilteq left, right, target end, 1632 macro (left, right, target) bdlteq left, right, target end, 1633 _llint_slow_path_jlesseq) 1634 1635 1636 _llint_op_jnlesseq: 1637 traceExecution() 1638 compareJump( 1639 macro (left, right, target) bigt left, right, target end, 1640 macro (left, right, target) bdgtun left, right, target end, 1641 _llint_slow_path_jnlesseq) 1642 1643 1644 _llint_op_jgreatereq: 1645 traceExecution() 1646 compareJump( 1647 macro (left, right, target) bigteq left, right, target end, 1648 macro (left, right, target) bdgteq left, right, target end, 1649 _llint_slow_path_jgreatereq) 1650 1651 1652 _llint_op_jngreatereq: 1653 traceExecution() 1654 compareJump( 1655 macro (left, right, target) bilt left, right, target end, 1656 macro (left, right, target) bdltun left, right, target end, 1657 _llint_slow_path_jngreatereq) 1658 1659 1660 _llint_op_jeq: 1661 traceExecution() 1662 equalityJump( 1663 macro (left, right, target) bieq left, right, target end, 1664 _llint_slow_path_jeq) 1665 1666 1667 _llint_op_jneq: 1668 traceExecution() 1669 equalityJump( 1670 macro (left, right, target) bineq left, right, target end, 1671 _llint_slow_path_jneq) 1672 1673 1674 _llint_op_jbelow: 1675 traceExecution() 1676 compareUnsignedJump( 1677 macro (left, right, target) bib left, right, target end) 1678 1679 1680 _llint_op_jbeloweq: 1681 traceExecution() 1682 compareUnsignedJump( 1683 macro (left, right, target) bibeq left, right, target end) 1684 1685 1686 _llint_op_loop_hint: 1687 traceExecution() 1567 llintOpWithJump(op_jmp, OpJmp, macro (size, get, jump, dispatch) 1568 jump(target) 1569 end) 1570 1571 1572 llintJumpTrueOrFalseOp( 1573 jtrue, OpJtrue, 1574 macro (value, target) btinz value, 1, target end) 1575 1576 1577 llintJumpTrueOrFalseOp( 1578 jfalse, OpJfalse, 1579 macro (value, target) btiz value, 1, target end) 1580 1581 1582 compareJumpOp( 1583 jless, OpJless, 1584 macro (left, right, target) bilt left, right, target end, 1585 macro (left, right, target) bdlt left, right, target end) 1586 1587 1588 compareJumpOp( 1589 jnless, OpJnless, 1590 macro (left, right, target) bigteq left, right, target end, 1591 macro (left, right, target) bdgtequn left, right, target end) 1592 1593 1594 compareJumpOp( 1595 jgreater, OpJgreater, 1596 macro (left, right, target) bigt left, right, target end, 1597 macro (left, right, target) bdgt left, right, target end) 1598 1599 1600 compareJumpOp( 1601 jngreater, OpJngreater, 1602 macro (left, right, target) bilteq left, right, target end, 1603 macro (left, right, target) bdltequn left, right, target end) 1604 1605 1606 compareJumpOp( 1607 jlesseq, OpJlesseq, 1608 macro (left, right, target) bilteq left, right, target end, 1609 macro (left, right, target) bdlteq left, right, target end) 1610 1611 1612 compareJumpOp( 1613 jnlesseq, OpJnlesseq, 1614 macro (left, right, target) bigt left, right, target end, 1615 macro (left, right, target) bdgtun left, right, target end) 1616 1617 1618 compareJumpOp( 1619 jgreatereq, OpJgreatereq, 1620 macro (left, right, target) bigteq left, right, target end, 1621 macro (left, right, target) bdgteq left, right, target end) 1622 1623 1624 compareJumpOp( 1625 jngreatereq, OpJngreatereq, 1626 macro (left, right, target) bilt left, right, target end, 1627 macro (left, right, target) bdltun left, right, target end) 1628 1629 1630 equalityJumpOp( 1631 jeq, OpJeq, 1632 macro (left, right, target) bieq left, right, target end) 1633 1634 1635 equalityJumpOp( 1636 jneq, OpJneq, 1637 macro (left, right, target) bineq left, right, target end) 1638 1639 1640 compareUnsignedJumpOp( 1641 jbelow, OpJbelow, 1642 macro (left, right, target) bib left, right, target end) 1643 1644 1645 compareUnsignedJumpOp( 1646 jbeloweq, OpJbeloweq, 1647 macro (left, right, target) bibeq left, right, target end) 1648 1649 1650 preOp(inc, OpInc, 1651 macro (value, slow) baddio 1, value, slow end) 1652 1653 1654 preOp(dec, OpDec, 1655 macro (value, slow) bsubio 1, value, slow end) 1656 1657 1658 llintOp(op_loop_hint, OpLoopHint, macro (unused, unused, dispatch) 1688 1659 checkSwitchToJITForLoop() 1689 dispatch( constexpr op_loop_hint_length)1690 1691 1692 _llint_op_check_traps: 1693 traceExecution()1660 dispatch() 1661 end) 1662 1663 1664 llintOp(op_check_traps, OpCheckTraps, macro (unused, unused, dispatch) 1694 1665 loadp CodeBlock[cfr], t1 1695 1666 loadp CodeBlock::m_poisonedVM[t1], t1 … … 1698 1669 btpnz t0, .handleTraps 1699 1670 .afterHandlingTraps: 1700 dispatch( constexpr op_check_traps_length)1671 dispatch() 1701 1672 .handleTraps: 1702 1673 callTrapHandler(.throwHandler) … … 1704 1675 .throwHandler: 1705 1676 jmp _llint_throw_from_slow_path_trampoline 1677 end) 1706 1678 1707 1679 … … 1719 1691 1720 1692 1721 _llint_op_nop: 1722 dispatch(constexpr op_nop_length) 1723 1724 1725 _llint_op_super_sampler_begin: 1726 callSlowPath(_llint_slow_path_super_sampler_begin) 1727 dispatch(constexpr op_super_sampler_begin_length) 1728 1729 1730 _llint_op_super_sampler_end: 1731 traceExecution() 1732 callSlowPath(_llint_slow_path_super_sampler_end) 1733 dispatch(constexpr op_super_sampler_end_length) 1734 1735 1736 _llint_op_switch_string: 1737 traceExecution() 1738 callSlowPath(_llint_slow_path_switch_string) 1739 dispatch(0) 1740 1741 1742 _llint_op_new_func_exp: 1743 traceExecution() 1744 callSlowPath(_llint_slow_path_new_func_exp) 1745 dispatch(constexpr op_new_func_exp_length) 1746 1747 _llint_op_new_generator_func_exp: 1748 traceExecution() 1749 callSlowPath(_llint_slow_path_new_generator_func_exp) 1750 dispatch(constexpr op_new_generator_func_exp_length) 1751 1752 _llint_op_new_async_func_exp: 1753 traceExecution() 1754 callSlowPath(_llint_slow_path_new_async_func_exp) 1755 dispatch(constexpr op_new_async_func_exp_length) 1756 1757 1758 _llint_op_set_function_name: 1759 traceExecution() 1760 callSlowPath(_llint_slow_path_set_function_name) 1761 dispatch(constexpr op_set_function_name_length) 1762 1763 _llint_op_call: 1764 traceExecution() 1765 arrayProfileForCall() 1766 doCall(_llint_slow_path_call, prepareForRegularCall) 1767 1768 _llint_op_tail_call: 1769 traceExecution() 1770 arrayProfileForCall() 1693 llintOp(op_nop, OpNop, macro (unused, unused, dispatch) 1694 dispatch() 1695 end) 1696 1697 1698 # we can't use callOp because we can't pass `call` as the opcode name, since it's an instruction name 1699 commonCallOp(op_call, _llint_slow_path_call, OpCall, prepareForRegularCall, macro (getu, metadata) 1700 arrayProfileForCall(OpCall, getu) 1701 end) 1702 1703 1704 macro callOp(name, op, prepareCall, fn) 1705 commonCallOp(op_%name%, _llint_slow_path_%name%, op, prepareCall, fn) 1706 end 1707 1708 1709 callOp(tail_call, OpTailCall, prepareForTailCall, macro (getu, metadata) 1710 arrayProfileForCall(OpTailCall, getu) 1771 1711 checkSwitchToJITForEpilogue() 1772 doCall(_llint_slow_path_call, prepareForTailCall) 1773 1774 _llint_op_construct: 1775 traceExecution() 1776 doCall(_llint_slow_path_construct, prepareForRegularCall) 1777 1778 macro doCallVarargs(frameSlowPath, slowPath, prepareCall) 1712 # reload metadata since checkSwitchToJITForEpilogue() might have trashed t5 1713 metadata(t5, t0) 1714 end) 1715 1716 1717 callOp(construct, OpConstruct, prepareForRegularCall, macro (getu, metadata) end) 1718 1719 1720 macro doCallVarargs(size, op, dispatch, frameSlowPath, slowPath, prepareCall) 1779 1721 callSlowPath(frameSlowPath) 1780 1722 branchIfException(_llint_throw_from_slow_path_trampoline) … … 1791 1733 end 1792 1734 end 1793 slowPathForCall(s lowPath, prepareCall)1794 end 1795 1796 _llint_op_call_varargs: 1797 traceExecution()1798 doCallVarargs( _llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForRegularCall)1799 1800 _llint_op_tail_call_varargs: 1801 traceExecution()1735 slowPathForCall(size, op, dispatch, slowPath, prepareCall) 1736 end 1737 1738 1739 llintOp(op_call_varargs, OpCallVarargs, macro (size, get, dispatch) 1740 doCallVarargs(size, OpCallVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForRegularCall) 1741 end) 1742 1743 llintOp(op_tail_call_varargs, OpTailCallVarargs, macro (size, get, dispatch) 1802 1744 checkSwitchToJITForEpilogue() 1803 1745 # We lie and perform the tail call instead of preparing it since we can't 1804 1746 # prepare the frame for a call opcode 1805 doCallVarargs( _llint_slow_path_size_frame_for_varargs, _llint_slow_path_call_varargs, prepareForTailCall)1806 1807 1808 _llint_op_tail_call_forward_arguments: 1809 traceExecution()1747 doCallVarargs(size, OpTailCallVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_tail_call_varargs, prepareForTailCall) 1748 end) 1749 1750 1751 llintOp(op_tail_call_forward_arguments, OpTailCallForwardArguments, macro (size, get, dispatch) 1810 1752 checkSwitchToJITForEpilogue() 1811 1753 # We lie and perform the tail call instead of preparing it since we can't 1812 1754 # prepare the frame for a call opcode 1813 doCallVarargs(_llint_slow_path_size_frame_for_forward_arguments, _llint_slow_path_tail_call_forward_arguments, prepareForTailCall) 1814 1815 1816 _llint_op_construct_varargs: 1817 traceExecution() 1818 doCallVarargs(_llint_slow_path_size_frame_for_varargs, _llint_slow_path_construct_varargs, prepareForRegularCall) 1819 1755 doCallVarargs(size, OpTailCallForwardArguments, dispatch, _llint_slow_path_size_frame_for_forward_arguments, _llint_slow_path_tail_call_forward_arguments, prepareForTailCall) 1756 end) 1757 1758 1759 llintOp(op_construct_varargs, OpConstructVarargs, macro (size, get, dispatch) 1760 doCallVarargs(size, OpConstructVarargs, dispatch, _llint_slow_path_size_frame_for_varargs, _llint_slow_path_construct_varargs, prepareForRegularCall) 1761 end) 1762 1763 1764 # Eval is executed in one of two modes: 1765 # 1766 # 1) We find that we're really invoking eval() in which case the 1767 # execution is perfomed entirely inside the slow_path, and it 1768 # returns the PC of a function that just returns the return value 1769 # that the eval returned. 1770 # 1771 # 2) We find that we're invoking something called eval() that is not 1772 # the real eval. Then the slow_path returns the PC of the thing to 1773 # call, and we call it. 1774 # 1775 # This allows us to handle two cases, which would require a total of 1776 # up to four pieces of state that cannot be easily packed into two 1777 # registers (C functions can return up to two registers, easily): 1778 # 1779 # - The call frame register. This may or may not have been modified 1780 # by the slow_path, but the convention is that it returns it. It's not 1781 # totally clear if that's necessary, since the cfr is callee save. 1782 # But that's our style in this here interpreter so we stick with it. 1783 # 1784 # - A bit to say if the slow_path successfully executed the eval and has 1785 # the return value, or did not execute the eval but has a PC for us 1786 # to call. 1787 # 1788 # - Either: 1789 # - The JS return value (two registers), or 1790 # 1791 # - The PC to call. 1792 # 1793 # It turns out to be easier to just always have this return the cfr 1794 # and a PC to call, and that PC may be a dummy thunk that just 1795 # returns the JS value that the eval returned. 1820 1796 1821 1797 _llint_op_call_eval: 1822 traceExecution() 1823 1824 # Eval is executed in one of two modes: 1825 # 1826 # 1) We find that we're really invoking eval() in which case the 1827 # execution is perfomed entirely inside the slow_path, and it 1828 # returns the PC of a function that just returns the return value 1829 # that the eval returned. 1830 # 1831 # 2) We find that we're invoking something called eval() that is not 1832 # the real eval. Then the slow_path returns the PC of the thing to 1833 # call, and we call it. 1834 # 1835 # This allows us to handle two cases, which would require a total of 1836 # up to four pieces of state that cannot be easily packed into two 1837 # registers (C functions can return up to two registers, easily): 1838 # 1839 # - The call frame register. This may or may not have been modified 1840 # by the slow_path, but the convention is that it returns it. It's not 1841 # totally clear if that's necessary, since the cfr is callee save. 1842 # But that's our style in this here interpreter so we stick with it. 1843 # 1844 # - A bit to say if the slow_path successfully executed the eval and has 1845 # the return value, or did not execute the eval but has a PC for us 1846 # to call. 1847 # 1848 # - Either: 1849 # - The JS return value (two registers), or 1850 # 1851 # - The PC to call. 1852 # 1853 # It turns out to be easier to just always have this return the cfr 1854 # and a PC to call, and that PC may be a dummy thunk that just 1855 # returns the JS value that the eval returned. 1856 1857 slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall) 1858 1798 slowPathForCall( 1799 narrow, 1800 OpCallEval, 1801 macro () dispatchOp(narrow, op_call_eval) end, 1802 _llint_slow_path_call_eval, 1803 prepareForRegularCall) 1804 1805 _llint_op_call_eval_wide: 1806 slowPathForCall( 1807 wide, 1808 OpCallEval, 1809 macro () dispatchOp(wide, op_call_eval) end, 1810 _llint_slow_path_call_eval_wide, 1811 prepareForRegularCall) 1859 1812 1860 1813 _llint_generic_return_point: 1861 dispatchAfterCall() 1862 1863 1864 _llint_op_strcat: 1865 traceExecution() 1866 callSlowPath(_slow_path_strcat) 1867 dispatch(constexpr op_strcat_length) 1868 1869 1870 _llint_op_push_with_scope: 1871 traceExecution() 1872 callSlowPath(_slow_path_push_with_scope) 1873 dispatch(constexpr op_push_with_scope_length) 1874 1875 1876 _llint_op_identity_with_profile: 1877 traceExecution() 1878 dispatch(constexpr op_identity_with_profile_length) 1879 1880 1881 _llint_op_unreachable: 1882 traceExecution() 1883 callSlowPath(_slow_path_unreachable) 1884 dispatch(constexpr op_unreachable_length) 1885 1886 1887 _llint_op_yield: 1814 dispatchAfterCall(narrow, OpCallEval, macro () 1815 dispatchOp(narrow, op_call_eval) 1816 end) 1817 1818 _llint_generic_return_point_wide: 1819 dispatchAfterCall(wide, OpCallEval, macro() 1820 dispatchOp(wide, op_call_eval) 1821 end) 1822 1823 llintOp(op_identity_with_profile, OpIdentityWithProfile, macro (unused, unused, dispatch) 1824 dispatch() 1825 end) 1826 1827 1828 llintOp(op_yield, OpYield, macro (unused, unused, unused) 1888 1829 notSupported() 1889 1890 1891 _llint_op_create_lexical_environment: 1892 traceExecution() 1893 callSlowPath(_slow_path_create_lexical_environment) 1894 dispatch(constexpr op_create_lexical_environment_length) 1895 1896 1897 _llint_op_throw: 1898 traceExecution() 1899 callSlowPath(_llint_slow_path_throw) 1900 dispatch(constexpr op_throw_length) 1901 1902 1903 _llint_op_throw_static_error: 1904 traceExecution() 1905 callSlowPath(_slow_path_throw_static_error) 1906 dispatch(constexpr op_throw_static_error_length) 1907 1908 1909 _llint_op_debug: 1910 traceExecution() 1830 end) 1831 1832 1833 llintOp(op_debug, OpDebug, macro (unused, unused, dispatch) 1911 1834 loadp CodeBlock[cfr], t0 1912 1835 loadi CodeBlock::m_debuggerRequests[t0], t0 … … 1914 1837 callSlowPath(_llint_slow_path_debug) 1915 1838 .opDebugDone: 1916 dispatch(constexpr op_debug_length) 1917 1918 1919 _llint_native_call_trampoline: 1839 dispatch() 1840 end) 1841 1842 1843 op(llint_native_call_trampoline, macro () 1920 1844 nativeCallTrampoline(NativeExecutable::m_function) 1921 1922 1923 _llint_native_construct_trampoline: 1845 end) 1846 1847 1848 op(llint_native_construct_trampoline, macro () 1924 1849 nativeCallTrampoline(NativeExecutable::m_constructor) 1925 1926 1927 _llint_internal_function_call_trampoline: 1850 end) 1851 1852 1853 op(llint_internal_function_call_trampoline, macro () 1928 1854 internalFunctionCallTrampoline(InternalFunction::m_functionForCall) 1929 1930 1931 _llint_internal_function_construct_trampoline: 1855 end) 1856 1857 1858 op(llint_internal_function_construct_trampoline, macro () 1932 1859 internalFunctionCallTrampoline(InternalFunction::m_functionForConstruct) 1933 1934 1935 _llint_op_get_enumerable_length: 1936 traceExecution() 1937 callSlowPath(_slow_path_get_enumerable_length) 1938 dispatch(constexpr op_get_enumerable_length_length) 1939 1940 _llint_op_has_indexed_property: 1941 traceExecution() 1942 callSlowPath(_slow_path_has_indexed_property) 1943 dispatch(constexpr op_has_indexed_property_length) 1944 1945 _llint_op_has_structure_property: 1946 traceExecution() 1947 callSlowPath(_slow_path_has_structure_property) 1948 dispatch(constexpr op_has_structure_property_length) 1949 1950 _llint_op_has_generic_property: 1951 traceExecution() 1952 callSlowPath(_slow_path_has_generic_property) 1953 dispatch(constexpr op_has_generic_property_length) 1954 1955 _llint_op_get_direct_pname: 1956 traceExecution() 1957 callSlowPath(_slow_path_get_direct_pname) 1958 dispatch(constexpr op_get_direct_pname_length) 1959 1960 _llint_op_get_property_enumerator: 1961 traceExecution() 1962 callSlowPath(_slow_path_get_property_enumerator) 1963 dispatch(constexpr op_get_property_enumerator_length) 1964 1965 _llint_op_enumerator_structure_pname: 1966 traceExecution() 1967 callSlowPath(_slow_path_next_structure_enumerator_pname) 1968 dispatch(constexpr op_enumerator_structure_pname_length) 1969 1970 _llint_op_enumerator_generic_pname: 1971 traceExecution() 1972 callSlowPath(_slow_path_next_generic_enumerator_pname) 1973 dispatch(constexpr op_enumerator_generic_pname_length) 1974 1975 _llint_op_to_index_string: 1976 traceExecution() 1977 callSlowPath(_slow_path_to_index_string) 1978 dispatch(constexpr op_to_index_string_length) 1979 1980 _llint_op_create_rest: 1981 traceExecution() 1982 callSlowPath(_slow_path_create_rest) 1983 dispatch(constexpr op_create_rest_length) 1984 1985 _llint_op_instanceof: 1986 traceExecution() 1987 callSlowPath(_llint_slow_path_instanceof) 1988 dispatch(constexpr op_instanceof_length) 1989 1990 _llint_op_get_by_id_with_this: 1991 traceExecution() 1992 callSlowPath(_slow_path_get_by_id_with_this) 1993 dispatch(constexpr op_get_by_id_with_this_length) 1994 1995 _llint_op_get_by_val_with_this: 1996 traceExecution() 1997 callSlowPath(_slow_path_get_by_val_with_this) 1998 dispatch(constexpr op_get_by_val_with_this_length) 1999 2000 _llint_op_put_by_id_with_this: 2001 traceExecution() 2002 callSlowPath(_slow_path_put_by_id_with_this) 2003 dispatch(constexpr op_put_by_id_with_this_length) 2004 2005 _llint_op_put_by_val_with_this: 2006 traceExecution() 2007 callSlowPath(_slow_path_put_by_val_with_this) 2008 dispatch(constexpr op_put_by_val_with_this_length) 2009 2010 _llint_op_resolve_scope_for_hoisting_func_decl_in_eval: 2011 traceExecution() 2012 callSlowPath(_slow_path_resolve_scope_for_hoisting_func_decl_in_eval) 2013 dispatch(constexpr op_resolve_scope_for_hoisting_func_decl_in_eval_length) 1860 end) 1861 2014 1862 2015 1863 # Lastly, make sure that we can link even though we don't support all opcodes. -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
r237486 r237547 109 109 #define OFFLINE_ASM_GLOBAL_LABEL(label) label: USE_LABEL(label); 110 110 111 #if ENABLE(LABEL_TRACING) 112 #define TRACE_LABEL(prefix, label) dataLog(#prefix, ": ", #label, "\n") 113 #else 114 #define TRACE_LABEL(prefix, label) do { } while (false); 115 #endif 116 117 111 118 #if ENABLE(COMPUTED_GOTO_OPCODES) 112 #define OFFLINE_ASM_GLUE_LABEL(label) label:USE_LABEL(label);119 #define OFFLINE_ASM_GLUE_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_GLUE_LABEL", label); USE_LABEL(label); 113 120 #else 114 121 #define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label); 115 122 #endif 116 123 117 #define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);124 #define OFFLINE_ASM_LOCAL_LABEL(label) label: TRACE_LABEL("OFFLINE_ASM_LOCAL_LABEL", #label); USE_LABEL(label); 118 125 119 126 … … 226 233 int8_t* i8p; 227 234 void* vp; 235 const void* cvp; 228 236 CallFrame* callFrame; 229 237 ExecState* execState; 230 void* instruction;238 const void* instruction; 231 239 VM* vm; 232 240 JSCell* cell; … … 243 251 244 252 operator ExecState*() { return execState; } 245 operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); }253 operator const Instruction*() { return reinterpret_cast<const Instruction*>(instruction); } 246 254 operator VM*() { return vm; } 247 255 operator ProtoCallFrame*() { return protoCallFrame; } … … 271 279 // are at play. 272 280 if (UNLIKELY(isInitializationPass)) { 281 Opcode* opcodeMap = LLInt::opcodeMap(); 282 Opcode* opcodeMapWide = LLInt::opcodeMapWide(); 283 273 284 #if ENABLE(COMPUTED_GOTO_OPCODES) 274 Opcode* opcodeMap = LLInt::opcodeMap();275 285 #define OPCODE_ENTRY(__opcode, length) \ 276 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); 277 FOR_EACH_OPCODE_ID(OPCODE_ENTRY) 278 #undef OPCODE_ENTRY 286 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); \ 287 opcodeMapWide[__opcode] = bitwise_cast<void*>(&&__opcode##_wide); 279 288 280 289 #define LLINT_OPCODE_ENTRY(__opcode, length) \ 281 290 opcodeMap[__opcode] = bitwise_cast<void*>(&&__opcode); 282 291 #else 292 // FIXME: this mapping is unnecessarily expensive in the absence of COMPUTED_GOTO 293 // narrow opcodes don't need any mapping and wide opcodes just need to add numOpcodeIDs 294 #define OPCODE_ENTRY(__opcode, length) \ 295 opcodeMap[__opcode] = __opcode; \ 296 opcodeMapWide[__opcode] = static_cast<OpcodeID>(__opcode##_wide); 297 298 #define LLINT_OPCODE_ENTRY(__opcode, length) \ 299 opcodeMap[__opcode] = __opcode; 300 #endif 301 FOR_EACH_BYTECODE_ID(OPCODE_ENTRY) 302 FOR_EACH_CLOOP_BYTECODE_HELPER_ID(LLINT_OPCODE_ENTRY) 283 303 FOR_EACH_LLINT_NATIVE_HELPER(LLINT_OPCODE_ENTRY) 304 #undef OPCODE_ENTRY 284 305 #undef LLINT_OPCODE_ENTRY 285 #endif 306 286 307 // Note: we can only set the exceptionInstructions after we have 287 308 // initialized the opcodeMap above. This is because getCodePtr() 288 309 // can depend on the opcodeMap. 289 Instruction* exceptionInstructions = LLInt::exceptionInstructions();310 uint8_t* exceptionInstructions = reinterpret_cast<uint8_t*>(LLInt::exceptionInstructions()); 290 311 for (int i = 0; i < maxOpcodeLength + 1; ++i) 291 exceptionInstructions[i].u.pointer = 292 LLInt::getCodePtr(llint_throw_from_slow_path_trampoline); 312 exceptionInstructions[i] = llint_throw_from_slow_path_trampoline; 293 313 294 314 return JSValue(); … … 333 353 CLoopRegister pcBase, tagTypeNumber, tagMask; 334 354 #endif 355 CLoopRegister metadataTable; 335 356 CLoopDoubleRegister d0, d1; 336 357 … … 398 419 399 420 #if USE(JSVALUE32_64) 400 #define FETCH_OPCODE() pc.opcode421 #define FETCH_OPCODE() *pc.i8p 401 422 #else // USE(JSVALUE64) 402 #define FETCH_OPCODE() *bitwise_cast<Opcode *>(pcBase.i8p + pc.i * 8)423 #define FETCH_OPCODE() *bitwise_cast<OpcodeID*>(pcBase.i8p + pc.i) 403 424 #endif // USE(JSVALUE64) 404 425 … … 436 457 // Dispatch to the current PC's bytecode: 437 458 dispatchOpcode: 438 switch ( opcode)459 switch (static_cast<unsigned>(opcode)) 439 460 440 461 #endif // !ENABLE(COMPUTED_GOTO_OPCODES) -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
r237486 r237547 24 24 25 25 # Utilities 26 macro dispatch(advance) 27 addp advance * 4, PC 28 jmp [PC] 29 end 30 31 macro dispatchBranchWithOffset(pcOffset) 32 lshifti 2, pcOffset 33 addp pcOffset, PC 34 jmp [PC] 35 end 36 37 macro dispatchBranch(pcOffset) 38 loadi pcOffset, t0 39 dispatchBranchWithOffset(t0) 40 end 41 42 macro dispatchAfterCall() 26 macro nextInstruction() 27 loadb [PC], t0 28 leap _g_opcodeMap, t1 29 loadp [t1, t0, 4], t2 30 jmp t2, BytecodePtrTag 31 end 32 33 macro nextInstructionWide() 34 loadi 1[PC], t0 35 leap _g_opcodeMapWide, t1 36 loadp [t1, t0, 4], t2 37 jmp t2, BytecodePtrTag 38 end 39 40 macro getuOperandNarrow(op, field, dst) 41 loadb constexpr %op%_%field%_index[PC], dst 42 end 43 44 macro getOperandNarrow(op, field, dst) 45 loadbsp constexpr %op%_%field%_index[PC], dst 46 end 47 48 macro getuOperandWide(op, field, dst) 49 loadi constexpr %op%_%field%_index * 4 + 1[PC], dst 50 end 51 52 macro getOperandWide(op, field, dst) 53 loadis constexpr %op%_%field%_index * 4 + 1[PC], dst 54 end 55 56 macro makeReturn(get, dispatch, fn) 57 fn(macro(tag, payload) 58 move tag, t5 59 move payload, t3 60 get(dst, t2) 61 storei t5, TagOffset[cfr, t2, 8] 62 storei t3, PayloadOffset[cfr, t2, 8] 63 dispatch() 64 end) 65 end 66 67 macro makeReturnProfiled(op, get, metadata, dispatch, fn) 68 fn(macro (tag, payload) 69 move tag, t1 70 move payload, t0 71 72 metadata(t5, t2) 73 valueProfile(op, t5, t1, t0) 74 get(dst, t2) 75 storei t1, TagOffset[cfr, t2, 8] 76 storei t0, PayloadOffset[cfr, t2, 8] 77 dispatch() 78 end) 79 end 80 81 82 macro dispatchAfterCall(size, op, dispatch) 43 83 loadi ArgumentCount + TagOffset[cfr], PC 44 loadi 4[PC], t384 get(size, op, dst, t3) 45 85 storei r1, TagOffset[cfr, t3, 8] 46 86 storei r0, PayloadOffset[cfr, t3, 8] 47 valueProfile(r1, r0, 4 * (CallOpCodeSize - 1), t3) 48 dispatch(CallOpCodeSize) 87 metadata(size, op, t2, t3) 88 valueProfile(op, t2, r1, r0) 89 dispatch() 49 90 end 50 91 … … 403 444 end 404 445 405 macro loadVariable( operand, index, tag, payload)406 loadisFromInstruction(operand, index)446 macro loadVariable(get, operand, index, tag, payload) 447 get(operand, index) 407 448 loadi TagOffset[cfr, index, 8], tag 408 449 loadi PayloadOffset[cfr, index, 8], payload … … 411 452 # Index, tag, and payload must be different registers. Index is not 412 453 # changed. 413 macro loadConstantOrVariable(index, tag, payload) 414 bigteq index, FirstConstantRegisterIndex, .constant 415 loadi TagOffset[cfr, index, 8], tag 416 loadi PayloadOffset[cfr, index, 8], payload 417 jmp .done 418 .constant: 419 loadp CodeBlock[cfr], payload 420 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload 421 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex, 422 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3. 423 loadp TagOffset[payload, index, 8], tag 424 loadp PayloadOffset[payload, index, 8], payload 425 .done: 426 end 427 428 macro loadConstantOrVariableTag(index, tag) 429 bigteq index, FirstConstantRegisterIndex, .constant 430 loadi TagOffset[cfr, index, 8], tag 431 jmp .done 432 .constant: 433 loadp CodeBlock[cfr], tag 434 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag 435 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex, 436 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3. 437 loadp TagOffset[tag, index, 8], tag 438 .done: 454 macro loadConstantOrVariable(size, index, tag, payload) 455 size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) 456 bigteq index, FirstConstantRegisterIndex, .constant 457 loadi TagOffset[cfr, index, 8], tag 458 loadi PayloadOffset[cfr, index, 8], payload 459 jmp .done 460 .constant: 461 loadp CodeBlock[cfr], payload 462 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload 463 subp FirstConstantRegisterIndex, index 464 loadp TagOffset[payload, index, 8], tag 465 loadp PayloadOffset[payload, index, 8], payload 466 .done: 467 end) 468 end 469 470 macro loadConstantOrVariableTag(size, index, tag) 471 size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) 472 bigteq index, FirstConstantRegisterIndex, .constant 473 loadi TagOffset[cfr, index, 8], tag 474 jmp .done 475 .constant: 476 loadp CodeBlock[cfr], tag 477 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag 478 subp FirstConstantRegisterIndex, index 479 loadp TagOffset[tag, index, 8], tag 480 .done: 481 end) 439 482 end 440 483 441 484 # Index and payload may be the same register. Index may be clobbered. 442 macro loadConstantOrVariable2Reg(index, tag, payload) 443 bigteq index, FirstConstantRegisterIndex, .constant 444 loadi TagOffset[cfr, index, 8], tag 445 loadi PayloadOffset[cfr, index, 8], payload 446 jmp .done 447 .constant: 448 loadp CodeBlock[cfr], tag 449 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag 450 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex, 451 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3. 452 lshifti 3, index 453 addp index, tag 454 loadp PayloadOffset[tag], payload 455 loadp TagOffset[tag], tag 456 .done: 457 end 458 459 macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload) 460 bigteq index, FirstConstantRegisterIndex, .constant 461 tagCheck(TagOffset[cfr, index, 8]) 462 loadi PayloadOffset[cfr, index, 8], payload 463 jmp .done 464 .constant: 465 loadp CodeBlock[cfr], payload 466 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload 467 # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex, 468 # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3. 469 tagCheck(TagOffset[payload, index, 8]) 470 loadp PayloadOffset[payload, index, 8], payload 471 .done: 485 macro loadConstantOrVariable2Reg(size, index, tag, payload) 486 size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) 487 bigteq index, FirstConstantRegisterIndex, .constant 488 loadi TagOffset[cfr, index, 8], tag 489 loadi PayloadOffset[cfr, index, 8], payload 490 jmp .done 491 .constant: 492 loadp CodeBlock[cfr], tag 493 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag 494 subp FirstConstantRegisterIndex, index 495 lshifti 3, index 496 addp index, tag 497 loadp PayloadOffset[tag], payload 498 loadp TagOffset[tag], tag 499 .done: 500 end) 501 end 502 503 macro loadConstantOrVariablePayloadTagCustom(size, index, tagCheck, payload) 504 size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) 505 bigteq index, FirstConstantRegisterIndex, .constant 506 tagCheck(TagOffset[cfr, index, 8]) 507 loadi PayloadOffset[cfr, index, 8], payload 508 jmp .done 509 .constant: 510 loadp CodeBlock[cfr], payload 511 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload 512 subp FirstConstantRegisterIndex, index 513 tagCheck(TagOffset[payload, index, 8]) 514 loadp PayloadOffset[payload, index, 8], payload 515 .done: 516 end) 472 517 end 473 518 … … 477 522 # be faster than doing loadConstantOrVariable followed by a branch on the 478 523 # tag. 479 macro loadConstantOrVariablePayload( index, expectedTag, payload, slow)524 macro loadConstantOrVariablePayload(size, index, expectedTag, payload, slow) 480 525 loadConstantOrVariablePayloadTagCustom( 526 size, 481 527 index, 482 528 macro (actualTag) bineq actualTag, expectedTag, slow end, … … 484 530 end 485 531 486 macro loadConstantOrVariablePayloadUnchecked( index, payload)532 macro loadConstantOrVariablePayloadUnchecked(size, index, payload) 487 533 loadConstantOrVariablePayloadTagCustom( 534 size, 488 535 index, 489 536 macro (actualTag) end, … … 491 538 end 492 539 493 macro writeBarrierOnOperand( cellOperand)494 loadisFromInstruction(cellOperand, t1)495 loadConstantOrVariablePayload( t1, CellTag, t2, .writeBarrierDone)540 macro writeBarrierOnOperand(size, get, cellOperand) 541 get(cellOperand, t1) 542 loadConstantOrVariablePayload(size, t1, CellTag, t2, .writeBarrierDone) 496 543 skipIfIsRememberedOrInEden( 497 544 t2, … … 509 556 end 510 557 511 macro writeBarrierOnOperands( cellOperand, valueOperand)512 loadisFromInstruction(valueOperand, t1)513 loadConstantOrVariableTag( t1, t0)558 macro writeBarrierOnOperands(size, get, cellOperand, valueOperand) 559 get(valueOperand, t1) 560 loadConstantOrVariableTag(size, t1, t0) 514 561 bineq t0, CellTag, .writeBarrierDone 515 562 516 writeBarrierOnOperand( cellOperand)563 writeBarrierOnOperand(size, get, cellOperand) 517 564 .writeBarrierDone: 518 565 end 519 566 520 macro writeBarrierOnGlobal( valueOperand, loadHelper)521 loadisFromInstruction(valueOperand, t1)522 loadConstantOrVariableTag( t1, t0)567 macro writeBarrierOnGlobal(size, get, valueOperand, loadHelper) 568 get(valueOperand, t1) 569 loadConstantOrVariableTag(size, t1, t0) 523 570 bineq t0, CellTag, .writeBarrierDone 524 571 … … 540 587 end 541 588 542 macro writeBarrierOnGlobalObject( valueOperand)543 writeBarrierOnGlobal( valueOperand,589 macro writeBarrierOnGlobalObject(size, get, valueOperand) 590 writeBarrierOnGlobal(size, get, valueOperand, 544 591 macro(registerToStoreGlobal) 545 592 loadp CodeBlock[cfr], registerToStoreGlobal … … 548 595 end 549 596 550 macro writeBarrierOnGlobalLexicalEnvironment( valueOperand)551 writeBarrierOnGlobal( valueOperand,597 macro writeBarrierOnGlobalLexicalEnvironment(size, get, valueOperand) 598 writeBarrierOnGlobal(size, get, valueOperand, 552 599 macro(registerToStoreGlobal) 553 600 loadp CodeBlock[cfr], registerToStoreGlobal … … 557 604 end 558 605 559 macro valueProfile(tag, payload, operand, scratch) 560 loadp operand[PC], scratch 561 storei tag, ValueProfile::m_buckets + TagOffset[scratch] 562 storei payload, ValueProfile::m_buckets + PayloadOffset[scratch] 606 macro valueProfile(op, metadata, tag, payload) 607 storei tag, %op%::Metadata::profile.m_buckets + TagOffset[metadata] 608 storei payload, %op%::Metadata::profile.m_buckets + PayloadOffset[metadata] 563 609 end 564 610 … … 607 653 negi t1 608 654 move cfr, t3 655 subp CalleeSaveSpaceAsVirtualRegisters * SlotSize, t3 656 addi CalleeSaveSpaceAsVirtualRegisters, t2 609 657 move t1, t0 610 658 lshiftp 3, t0 … … 632 680 # Reload CodeBlock and PC, since the slow_path clobbered it. 633 681 loadp CodeBlock[cfr], t1 682 # FIXME: cleanup double load 683 # https://bugs.webkit.org/show_bug.cgi?id=190932 634 684 loadp CodeBlock::m_instructions[t1], PC 685 loadp [PC], PC 635 686 jmp doneLabel 636 687 end … … 653 704 loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock 654 705 loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars 706 subi CalleeSaveSpaceAsVirtualRegisters, t2 707 move cfr, t3 708 subp CalleeSaveSpaceAsVirtualRegisters * SlotSize, t3 655 709 btiz t2, .opEnterDone 656 710 move UndefinedTag, t0 … … 658 712 negi t2 659 713 .opEnterLoop: 660 storei t0, TagOffset[ cfr, t2, 8]661 storei t1, PayloadOffset[ cfr, t2, 8]714 storei t0, TagOffset[t3, t2, 8] 715 storei t1, PayloadOffset[t3, t2, 8] 662 716 addi 1, t2 663 717 btinz t2, .opEnterLoop 664 718 .opEnterDone: 665 719 callSlowPath(_slow_path_enter) 666 dispatch(constexpr op_enter_length) 667 668 669 _llint_op_get_argument: 670 traceExecution() 671 loadisFromInstruction(1, t1) 672 loadisFromInstruction(2, t2) 720 dispatchOp(narrow, op_enter) 721 722 723 llintOpWithProfile(op_get_argument, OpGetArgument, macro (size, get, dispatch, return) 724 get(index, t2) 673 725 loadi PayloadOffset + ArgumentCount[cfr], t0 674 726 bilteq t0, t2, .opGetArgumentOutOfBounds 675 727 loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0 676 728 loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t3 677 storei t0, TagOffset[cfr, t1, 8] 678 storei t3, PayloadOffset[cfr, t1, 8] 679 valueProfile(t0, t3, 12, t1) 680 dispatch(constexpr op_get_argument_length) 729 return (t0, t3) 681 730 682 731 .opGetArgumentOutOfBounds: 683 storei UndefinedTag, TagOffset[cfr, t1, 8] 684 storei 0, PayloadOffset[cfr, t1, 8] 685 valueProfile(UndefinedTag, 0, 12, t1) 686 dispatch(constexpr op_get_argument_length) 687 688 689 _llint_op_argument_count: 690 traceExecution() 691 loadisFromInstruction(1, t2) 732 return (UndefinedTag, 0) 733 end) 734 735 736 llintOpWithReturn(op_argument_count, OpArgumentCount, macro (size, get, dispatch, return) 692 737 loadi PayloadOffset + ArgumentCount[cfr], t0 693 738 subi 1, t0 694 move Int32Tag, t1 695 storei t1, TagOffset[cfr, t2, 8] 696 storei t0, PayloadOffset[cfr, t2, 8] 697 dispatch(constexpr op_argument_count_length) 698 699 700 _llint_op_get_scope: 701 traceExecution() 739 return(Int32Tag, t0) 740 end) 741 742 743 llintOpWithReturn(op_get_scope, OpGetScope, macro (size, get, dispatch, return) 702 744 loadi Callee + PayloadOffset[cfr], t0 703 745 loadi JSCallee::m_scope[t0], t0 704 loadisFromInstruction(1, t1) 705 storei CellTag, TagOffset[cfr, t1, 8] 706 storei t0, PayloadOffset[cfr, t1, 8] 707 dispatch(constexpr op_get_scope_length) 708 709 710 _llint_op_to_this: 711 traceExecution() 712 loadi 4[PC], t0 746 return (CellTag, t0) 747 end) 748 749 750 llintOpWithMetadata(op_to_this, OpToThis, macro (size, get, dispatch, metadata, return) 751 get(srcDst, t0) 713 752 bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow 714 753 loadi PayloadOffset[cfr, t0, 8], t0 715 754 bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow 716 loadpFromInstruction(2, t2) 755 metadata(t2, t3) 756 loadp OpToThis::Metadata::cachedStructure[t2], t2 717 757 bpneq JSCell::m_structureID[t0], t2, .opToThisSlow 718 dispatch( constexpr op_to_this_length)758 dispatch() 719 759 720 760 .opToThisSlow: 721 761 callSlowPath(_slow_path_to_this) 722 dispatch( constexpr op_to_this_length)723 724 725 _llint_op_check_tdz: 726 traceExecution()727 loadisFromInstruction(1, t0)728 loadConstantOrVariableTag( t0, t1)762 dispatch() 763 end) 764 765 766 llintOp(op_check_tdz, OpCheckTdz, macro (size, get, dispatch) 767 get(target, t0) 768 loadConstantOrVariableTag(size, t0, t1) 729 769 bineq t1, EmptyValueTag, .opNotTDZ 730 770 callSlowPath(_slow_path_throw_tdz_error) 731 771 732 772 .opNotTDZ: 733 dispatch(constexpr op_check_tdz_length) 734 735 736 _llint_op_mov: 737 traceExecution() 738 loadi 8[PC], t1 739 loadi 4[PC], t0 740 loadConstantOrVariable(t1, t2, t3) 741 storei t2, TagOffset[cfr, t0, 8] 742 storei t3, PayloadOffset[cfr, t0, 8] 743 dispatch(constexpr op_mov_length) 744 745 746 _llint_op_not: 747 traceExecution() 748 loadi 8[PC], t0 749 loadi 4[PC], t1 750 loadConstantOrVariable(t0, t2, t3) 773 dispatch() 774 end) 775 776 777 llintOpWithReturn(op_mov, OpMov, macro (size, get, dispatch, return) 778 get(src, t1) 779 loadConstantOrVariable(size, t1, t2, t3) 780 return(t2, t3) 781 end) 782 783 784 llintOpWithReturn(op_not, OpNot, macro (size, get, dispatch, return) 785 get(operand, t0) 786 loadConstantOrVariable(size, t0, t2, t3) 751 787 bineq t2, BooleanTag, .opNotSlow 752 788 xori 1, t3 753 storei t2, TagOffset[cfr, t1, 8] 754 storei t3, PayloadOffset[cfr, t1, 8] 755 dispatch(constexpr op_not_length) 789 return(t2, t3) 756 790 757 791 .opNotSlow: 758 792 callSlowPath(_slow_path_not) 759 dispatch(constexpr op_not_length) 760 761 762 macro equalityComparison(integerComparison, slowPath) 763 loadi 12[PC], t2 764 loadi 8[PC], t0 765 loadConstantOrVariable(t2, t3, t1) 766 loadConstantOrVariable2Reg(t0, t2, t0) 767 bineq t2, t3, .opEqSlow 768 bieq t2, CellTag, .opEqSlow 769 bib t2, LowestTag, .opEqSlow 770 loadi 4[PC], t2 771 integerComparison(t0, t1, t0) 772 storei BooleanTag, TagOffset[cfr, t2, 8] 773 storei t0, PayloadOffset[cfr, t2, 8] 774 dispatch(constexpr op_eq_length) 775 776 .opEqSlow: 777 callSlowPath(slowPath) 778 dispatch(constexpr op_eq_length) 779 end 780 781 782 macro equalityJump(integerComparison, slowPath) 783 loadi 8[PC], t2 784 loadi 4[PC], t0 785 loadConstantOrVariable(t2, t3, t1) 786 loadConstantOrVariable2Reg(t0, t2, t0) 787 bineq t2, t3, .slow 788 bieq t2, CellTag, .slow 789 bib t2, LowestTag, .slow 790 integerComparison(t0, t1, .jumpTarget) 791 dispatch(constexpr op_jeq_length) 792 793 .jumpTarget: 794 dispatchBranch(12[PC]) 795 796 .slow: 797 callSlowPath(slowPath) 798 dispatch(0) 799 end 800 801 802 _llint_op_eq_null: 803 traceExecution() 804 loadi 8[PC], t0 805 loadi 4[PC], t3 806 assertNotConstant(t0) 807 loadi TagOffset[cfr, t0, 8], t1 808 loadi PayloadOffset[cfr, t0, 8], t0 809 bineq t1, CellTag, .opEqNullImmediate 810 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined 811 move 0, t1 812 jmp .opEqNullNotImmediate 813 .opEqNullMasqueradesAsUndefined: 814 loadp JSCell::m_structureID[t0], t1 815 loadp CodeBlock[cfr], t0 816 loadp CodeBlock::m_globalObject[t0], t0 817 cpeq Structure::m_globalObject[t1], t0, t1 818 jmp .opEqNullNotImmediate 819 .opEqNullImmediate: 820 cieq t1, NullTag, t2 821 cieq t1, UndefinedTag, t1 822 ori t2, t1 823 .opEqNullNotImmediate: 824 storei BooleanTag, TagOffset[cfr, t3, 8] 825 storei t1, PayloadOffset[cfr, t3, 8] 826 dispatch(constexpr op_eq_null_length) 827 828 829 _llint_op_neq_null: 830 traceExecution() 831 loadi 8[PC], t0 832 loadi 4[PC], t3 833 assertNotConstant(t0) 834 loadi TagOffset[cfr, t0, 8], t1 835 loadi PayloadOffset[cfr, t0, 8], t0 836 bineq t1, CellTag, .opNeqNullImmediate 837 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined 838 move 1, t1 839 jmp .opNeqNullNotImmediate 840 .opNeqNullMasqueradesAsUndefined: 841 loadp JSCell::m_structureID[t0], t1 842 loadp CodeBlock[cfr], t0 843 loadp CodeBlock::m_globalObject[t0], t0 844 cpneq Structure::m_globalObject[t1], t0, t1 845 jmp .opNeqNullNotImmediate 846 .opNeqNullImmediate: 847 cineq t1, NullTag, t2 848 cineq t1, UndefinedTag, t1 849 andi t2, t1 850 .opNeqNullNotImmediate: 851 storei BooleanTag, TagOffset[cfr, t3, 8] 852 storei t1, PayloadOffset[cfr, t3, 8] 853 dispatch(constexpr op_neq_null_length) 854 855 856 macro strictEq(equalityOperation, slowPath) 857 loadi 12[PC], t2 858 loadi 8[PC], t0 859 loadConstantOrVariable(t2, t3, t1) 860 loadConstantOrVariable2Reg(t0, t2, t0) 861 bineq t2, t3, .slow 862 bib t2, LowestTag, .slow 863 bineq t2, CellTag, .notStringOrSymbol 864 bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol 865 bbb JSCell::m_type[t1], ObjectType, .slow 866 .notStringOrSymbol: 867 loadi 4[PC], t2 868 equalityOperation(t0, t1, t0) 869 storei BooleanTag, TagOffset[cfr, t2, 8] 870 storei t0, PayloadOffset[cfr, t2, 8] 871 dispatch(4) 872 873 .slow: 874 callSlowPath(slowPath) 875 dispatch(4) 876 end 877 878 879 macro strictEqualityJump(equalityOperation, slowPath) 880 loadi 8[PC], t2 881 loadi 4[PC], t0 882 loadConstantOrVariable(t2, t3, t1) 883 loadConstantOrVariable2Reg(t0, t2, t0) 884 bineq t2, t3, .slow 885 bib t2, LowestTag, .slow 886 bineq t2, CellTag, .notStringOrSymbol 887 bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol 888 bbb JSCell::m_type[t1], ObjectType, .slow 889 .notStringOrSymbol: 890 equalityOperation(t0, t1, .jumpTarget) 891 dispatch(constexpr op_jstricteq_length) 892 893 .jumpTarget: 894 dispatchBranch(12[PC]) 895 896 .slow: 897 callSlowPath(slowPath) 898 dispatch(0) 899 end 900 901 902 _llint_op_stricteq: 903 traceExecution() 904 strictEq(macro (left, right, result) cieq left, right, result end, _slow_path_stricteq) 905 906 907 _llint_op_nstricteq: 908 traceExecution() 909 strictEq(macro (left, right, result) cineq left, right, result end, _slow_path_nstricteq) 910 911 912 _llint_op_jstricteq: 913 traceExecution() 914 strictEqualityJump( 915 macro (left, right, target) bieq left, right, target end, 916 _llint_slow_path_jstricteq) 917 918 919 _llint_op_jnstricteq: 920 traceExecution() 921 strictEqualityJump( 922 macro (left, right, target) bineq left, right, target end, 923 _llint_slow_path_jnstricteq) 924 925 926 _llint_op_inc: 927 traceExecution() 928 loadi 4[PC], t0 929 bineq TagOffset[cfr, t0, 8], Int32Tag, .opIncSlow 930 loadi PayloadOffset[cfr, t0, 8], t1 931 baddio 1, t1, .opIncSlow 932 storei t1, PayloadOffset[cfr, t0, 8] 933 dispatch(constexpr op_inc_length) 934 935 .opIncSlow: 936 callSlowPath(_slow_path_inc) 937 dispatch(constexpr op_inc_length) 938 939 940 _llint_op_dec: 941 traceExecution() 942 loadi 4[PC], t0 943 bineq TagOffset[cfr, t0, 8], Int32Tag, .opDecSlow 944 loadi PayloadOffset[cfr, t0, 8], t1 945 bsubio 1, t1, .opDecSlow 946 storei t1, PayloadOffset[cfr, t0, 8] 947 dispatch(constexpr op_dec_length) 948 949 .opDecSlow: 950 callSlowPath(_slow_path_dec) 951 dispatch(constexpr op_dec_length) 952 953 954 _llint_op_to_number: 955 traceExecution() 956 loadi 8[PC], t0 957 loadi 4[PC], t1 958 loadConstantOrVariable(t0, t2, t3) 793 dispatch() 794 end) 795 796 797 macro equalityComparisonOp(name, op, integerComparison) 798 llintOpWithReturn(op_%name%, op, macro (size, get, dispatch, return) 799 get(rhs, t2) 800 get(lhs, t0) 801 loadConstantOrVariable(size, t2, t3, t1) 802 loadConstantOrVariable2Reg(size, t0, t2, t0) 803 bineq t2, t3, .opEqSlow 804 bieq t2, CellTag, .opEqSlow 805 bib t2, LowestTag, .opEqSlow 806 integerComparison(t0, t1, t0) 807 return(BooleanTag, t0) 808 809 .opEqSlow: 810 callSlowPath(_slow_path_%name%) 811 dispatch() 812 end) 813 end 814 815 816 macro equalityJumpOp(name, op, integerComparison) 817 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 818 get(rhs, t2) 819 get(lhs, t0) 820 loadConstantOrVariable(size, t2, t3, t1) 821 loadConstantOrVariable2Reg(size, t0, t2, t0) 822 bineq t2, t3, .slow 823 bieq t2, CellTag, .slow 824 bib t2, LowestTag, .slow 825 integerComparison(t0, t1, .jumpTarget) 826 dispatch() 827 828 .jumpTarget: 829 jump(target) 830 831 .slow: 832 callSlowPath(_llint_slow_path_%name%) 833 nextInstruction() 834 end) 835 end 836 837 838 macro equalNullComparisonOp(name, op, fn) 839 llintOpWithReturn(name, op, macro (size, get, dispatch, return) 840 get(operand, t0) 841 assertNotConstant(size, t0) 842 loadi TagOffset[cfr, t0, 8], t1 843 loadi PayloadOffset[cfr, t0, 8], t0 844 bineq t1, CellTag, .opEqNullImmediate 845 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined 846 move 0, t1 847 jmp .opEqNullNotImmediate 848 .opEqNullMasqueradesAsUndefined: 849 loadp JSCell::m_structureID[t0], t1 850 loadp CodeBlock[cfr], t0 851 loadp CodeBlock::m_globalObject[t0], t0 852 cpeq Structure::m_globalObject[t1], t0, t1 853 jmp .opEqNullNotImmediate 854 .opEqNullImmediate: 855 cieq t1, NullTag, t2 856 cieq t1, UndefinedTag, t1 857 ori t2, t1 858 .opEqNullNotImmediate: 859 fn(t1) 860 return(BooleanTag, t1) 861 end) 862 end 863 864 equalNullComparisonOp(op_eq_null, OpEqNull, macro (value) end) 865 866 equalNullComparisonOp(op_neq_null, OpNeqNull, 867 macro (value) xori 1, value end) 868 869 870 macro strictEqOp(name, op, equalityOperation) 871 llintOpWithReturn(op_%name%, op, macro (size, get, dispatch, return) 872 get(rhs, t2) 873 get(lhs, t0) 874 loadConstantOrVariable(size, t2, t3, t1) 875 loadConstantOrVariable2Reg(size, t0, t2, t0) 876 bineq t2, t3, .slow 877 bib t2, LowestTag, .slow 878 bineq t2, CellTag, .notStringOrSymbol 879 bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol 880 bbb JSCell::m_type[t1], ObjectType, .slow 881 .notStringOrSymbol: 882 equalityOperation(t0, t1, t0) 883 return(BooleanTag, t0) 884 885 .slow: 886 callSlowPath(_slow_path_%name%) 887 dispatch() 888 end) 889 end 890 891 892 macro strictEqualityJumpOp(name, op, equalityOperation) 893 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 894 get(rhs, t2) 895 get(lhs, t0) 896 loadConstantOrVariable(size, t2, t3, t1) 897 loadConstantOrVariable2Reg(size, t0, t2, t0) 898 bineq t2, t3, .slow 899 bib t2, LowestTag, .slow 900 bineq t2, CellTag, .notStringOrSymbol 901 bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol 902 bbb JSCell::m_type[t1], ObjectType, .slow 903 .notStringOrSymbol: 904 equalityOperation(t0, t1, .jumpTarget) 905 dispatch() 906 907 .jumpTarget: 908 jump(target) 909 910 .slow: 911 callSlowPath(_llint_slow_path_%name%) 912 nextInstruction() 913 end) 914 end 915 916 917 strictEqOp(stricteq, OpStricteq, 918 macro (left, right, result) cieq left, right, result end) 919 920 921 strictEqOp(nstricteq, OpNstricteq, 922 macro (left, right, result) cineq left, right, result end) 923 924 925 strictEqualityJumpOp(jstricteq, OpJstricteq, 926 macro (left, right, target) bieq left, right, target end) 927 928 929 strictEqualityJumpOp(jnstricteq, OpJnstricteq, 930 macro (left, right, target) bineq left, right, target end) 931 932 933 macro preOp(name, op, operation) 934 llintOp(op_%name%, op, macro (size, get, dispatch) 935 get(srcDst, t0) 936 bineq TagOffset[cfr, t0, 8], Int32Tag, .slow 937 loadi PayloadOffset[cfr, t0, 8], t1 938 operation(t1, .slow) 939 storei t1, PayloadOffset[cfr, t0, 8] 940 dispatch() 941 942 .slow: 943 callSlowPath(_slow_path_%name%) 944 dispatch() 945 end) 946 end 947 948 949 llintOpWithProfile(op_to_number, OpToNumber, macro (size, get, dispatch, return) 950 get(operand, t0) 951 loadConstantOrVariable(size, t0, t2, t3) 959 952 bieq t2, Int32Tag, .opToNumberIsInt 960 953 biaeq t2, LowestTag, .opToNumberSlow 961 954 .opToNumberIsInt: 962 storei t2, TagOffset[cfr, t1, 8] 963 storei t3, PayloadOffset[cfr, t1, 8] 964 valueProfile(t2, t3, 12, t1) 965 dispatch(constexpr op_to_number_length) 955 return(t2, t3) 966 956 967 957 .opToNumberSlow: 968 958 callSlowPath(_slow_path_to_number) 969 dispatch(constexpr op_to_number_length) 970 971 972 _llint_op_to_string: 973 traceExecution() 974 loadi 8[PC], t0 975 loadi 4[PC], t1 976 loadConstantOrVariable(t0, t2, t3) 959 dispatch() 960 end) 961 962 963 llintOpWithReturn(op_to_string, OpToString, macro (size, get, dispatch, return) 964 get(operand, t0) 965 loadConstantOrVariable(size, t0, t2, t3) 977 966 bineq t2, CellTag, .opToStringSlow 978 967 bbneq JSCell::m_type[t3], StringType, .opToStringSlow 979 968 .opToStringIsString: 980 storei t2, TagOffset[cfr, t1, 8] 981 storei t3, PayloadOffset[cfr, t1, 8] 982 dispatch(constexpr op_to_string_length) 969 return(t2, t3) 983 970 984 971 .opToStringSlow: 985 972 callSlowPath(_slow_path_to_string) 986 dispatch(constexpr op_to_string_length) 987 988 989 _llint_op_to_object: 990 traceExecution() 991 loadi 8[PC], t0 992 loadi 4[PC], t1 993 loadConstantOrVariable(t0, t2, t3) 973 dispatch() 974 end) 975 976 977 llintOpWithProfile(op_to_object, OpToObject, macro (size, get, dispatch, return) 978 get(operand, t0) 979 loadConstantOrVariable(size, t0, t2, t3) 994 980 bineq t2, CellTag, .opToObjectSlow 995 981 bbb JSCell::m_type[t3], ObjectType, .opToObjectSlow 996 storei t2, TagOffset[cfr, t1, 8] 997 storei t3, PayloadOffset[cfr, t1, 8] 998 valueProfile(t2, t3, 16, t1) 999 dispatch(constexpr op_to_object_length) 982 return(t2, t3) 1000 983 1001 984 .opToObjectSlow: 1002 985 callSlowPath(_slow_path_to_object) 1003 dispatch(constexpr op_to_object_length) 1004 1005 1006 _llint_op_negate: 1007 traceExecution() 1008 loadi 8[PC], t0 1009 loadi 4[PC], t3 1010 loadConstantOrVariable(t0, t1, t2) 1011 loadisFromInstruction(3, t0) 986 dispatch() 987 end) 988 989 990 llintOpWithMetadata(op_negate, OpNegate, macro (size, get, dispatch, metadata, return) 991 992 macro arithProfile(type) 993 ori type, OpNegate::Metadata::arithProfile[t5] 994 end 995 996 metadata(t5, t0) 997 get(operand, t0) 998 loadConstantOrVariable(size, t0, t1, t2) 1012 999 bineq t1, Int32Tag, .opNegateSrcNotInt 1013 1000 btiz t2, 0x7fffffff, .opNegateSlow 1014 1001 negi t2 1015 ori ArithProfileInt, t0 1016 storei Int32Tag, TagOffset[cfr, t3, 8] 1017 storeisToInstruction(t0, 3) 1018 storei t2, PayloadOffset[cfr, t3, 8] 1019 dispatch(constexpr op_negate_length) 1002 arithProfile(ArithProfileInt) 1003 return (Int32Tag, t2) 1020 1004 .opNegateSrcNotInt: 1021 1005 bia t1, LowestTag, .opNegateSlow 1022 1006 xori 0x80000000, t1 1023 ori ArithProfileNumber, t0 1024 storei t2, PayloadOffset[cfr, t3, 8] 1025 storeisToInstruction(t0, 3) 1026 storei t1, TagOffset[cfr, t3, 8] 1027 dispatch(constexpr op_negate_length) 1007 arithProfile(ArithProfileNumber) 1008 return(t1, t2) 1028 1009 1029 1010 .opNegateSlow: 1030 1011 callSlowPath(_slow_path_negate) 1031 dispatch( constexpr op_negate_length)1032 1033 1034 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath) 1035 loadi 12[PC], t2 1036 l oadi 8[PC], t01037 loadConstantOrVariable(t2, t3, t1)1038 loadConstantOrVariable2Reg(t0, t2, t0)1039 bineq t2, Int32Tag, .op1NotInt1040 bineq t3, Int32Tag, .op2NotInt 1041 loadisFromInstruction(4, t5)1042 ori ArithProfileIntInt, t51043 storeisToInstruction(t5, 4)1044 loadi 4[PC], t21045 integerOperationAndStore(t3, t1, t0, .slow, t2)1046 dispatch(5)1047 1048 .op1NotInt: 1049 # First operand is definitely not an int, the second operand could be anything.1050 bia t2, LowestTag, .slow1051 bib t3, LowestTag, .op1NotIntOp2Double1052 bineq t3, Int32Tag, .slow 1053 loadisFromInstruction(4, t5)1054 ori ArithProfileNumberInt, t51055 storeisToInstruction(t5, 4)1056 ci2d t1, ft11057 jmp .op1NotIntReady1058 .op1NotIntOp2Double: 1059 fii2d t1, t3, ft11060 loadisFromInstruction(4, t5)1061 ori ArithProfileNumberNumber, t51062 storeisToInstruction(t5, 4)1063 .op1NotIntReady: 1064 loadi 4[PC], t11065 fii2d t0, t2, ft01066 doubleOperation(ft1, ft0)1067 stored ft0, [cfr, t1, 8]1068 dispatch(5)1069 1070 .op2NotInt: 1071 # First operand is definitely an int, the second operand is definitely not.1072 loadi 4[PC], t21073 bia t3, LowestTag, .slow1074 loadisFromInstruction(4, t5)1075 ori ArithProfileIntNumber, t51076 storeisToInstruction(t5, 4)1077 ci2d t0, ft01078 fii2d t1, t3, ft11079 doubleOperation(ft1, ft0)1080 stored ft0, [cfr, t2, 8]1081 dispatch(5) 1082 1083 .slow: 1084 callSlowPath(slowPath)1085 dispatch(5)1086 end 1087 1088 macro binaryOp( integerOperation, doubleOperation, slowPath)1089 binaryOpCustomStore( 1012 dispatch() 1013 end) 1014 1015 1016 macro binaryOpCustomStore(name, op, integerOperationAndStore, doubleOperation) 1017 llintOpWithMetadata(op_%name%, op, macro (size, get, dispatch, metadata, return) 1018 macro arithProfile(type) 1019 ori type, %op%::Metadata::arithProfile[t5] 1020 end 1021 1022 metadata(t5, t2) 1023 get(rhs, t2) 1024 get(lhs, t0) 1025 loadConstantOrVariable(size, t2, t3, t1) 1026 loadConstantOrVariable2Reg(size, t0, t2, t0) 1027 bineq t2, Int32Tag, .op1NotInt 1028 bineq t3, Int32Tag, .op2NotInt 1029 arithProfile(ArithProfileIntInt) 1030 get(dst, t2) 1031 integerOperationAndStore(t3, t1, t0, .slow, t2) 1032 dispatch() 1033 1034 .op1NotInt: 1035 # First operand is definitely not an int, the second operand could be anything. 1036 bia t2, LowestTag, .slow 1037 bib t3, LowestTag, .op1NotIntOp2Double 1038 bineq t3, Int32Tag, .slow 1039 arithProfile(ArithProfileNumberInt) 1040 ci2d t1, ft1 1041 jmp .op1NotIntReady 1042 .op1NotIntOp2Double: 1043 fii2d t1, t3, ft1 1044 arithProfile(ArithProfileNumberNumber) 1045 .op1NotIntReady: 1046 get(dst, t1) 1047 fii2d t0, t2, ft0 1048 doubleOperation(ft1, ft0) 1049 stored ft0, [cfr, t1, 8] 1050 dispatch() 1051 1052 .op2NotInt: 1053 # First operand is definitely an int, the second operand is definitely not. 1054 get(dst, t2) 1055 bia t3, LowestTag, .slow 1056 arithProfile(ArithProfileIntNumber) 1057 ci2d t0, ft0 1058 fii2d t1, t3, ft1 1059 doubleOperation(ft1, ft0) 1060 stored ft0, [cfr, t2, 8] 1061 dispatch() 1062 1063 .slow: 1064 callSlowPath(_slow_path_%name%) 1065 dispatch() 1066 end) 1067 end 1068 1069 macro binaryOp(name, op, integerOperation, doubleOperation) 1070 binaryOpCustomStore(name, op, 1090 1071 macro (int32Tag, left, right, slow, index) 1091 1072 integerOperation(left, right, slow) … … 1093 1074 storei right, PayloadOffset[cfr, index, 8] 1094 1075 end, 1095 doubleOperation, slowPath) 1096 end 1097 1098 _llint_op_add: 1099 traceExecution() 1100 binaryOp( 1101 macro (left, right, slow) baddio left, right, slow end, 1102 macro (left, right) addd left, right end, 1103 _slow_path_add) 1104 1105 1106 _llint_op_mul: 1107 traceExecution() 1108 binaryOpCustomStore( 1109 macro (int32Tag, left, right, slow, index) 1110 const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant. 1111 move right, scratch 1112 bmulio left, scratch, slow 1113 btinz scratch, .done 1114 bilt left, 0, slow 1115 bilt right, 0, slow 1116 .done: 1117 storei Int32Tag, TagOffset[cfr, index, 8] 1118 storei scratch, PayloadOffset[cfr, index, 8] 1119 end, 1120 macro (left, right) muld left, right end, 1121 _slow_path_mul) 1122 1123 1124 _llint_op_sub: 1125 traceExecution() 1126 binaryOp( 1127 macro (left, right, slow) bsubio left, right, slow end, 1128 macro (left, right) subd left, right end, 1129 _slow_path_sub) 1130 1131 1132 _llint_op_div: 1133 traceExecution() 1134 binaryOpCustomStore( 1135 macro (int32Tag, left, right, slow, index) 1136 ci2d left, ft0 1137 ci2d right, ft1 1138 divd ft0, ft1 1139 bcd2i ft1, right, .notInt 1140 storei int32Tag, TagOffset[cfr, index, 8] 1141 storei right, PayloadOffset[cfr, index, 8] 1142 jmp .done 1143 .notInt: 1144 stored ft1, [cfr, index, 8] 1145 .done: 1146 end, 1147 macro (left, right) divd left, right end, 1148 _slow_path_div) 1149 1150 1151 macro bitOpProfiled(operation, slowPath, advance) 1152 loadi 12[PC], t2 1153 loadi 8[PC], t0 1154 loadConstantOrVariable(t2, t3, t1) 1155 loadConstantOrVariable2Reg(t0, t2, t0) 1156 bineq t3, Int32Tag, .slow 1157 bineq t2, Int32Tag, .slow 1158 loadi 4[PC], t2 1159 operation(t1, t0) 1160 storei t3, TagOffset[cfr, t2, 8] 1161 storei t0, PayloadOffset[cfr, t2, 8] 1162 valueProfile(t3, t0, (advance - 1) * 4, t2) 1163 dispatch(advance) 1164 1165 .slow: 1166 callSlowPath(slowPath) 1167 dispatch(advance) 1168 end 1169 1170 macro bitOp(operation, slowPath, advance) 1171 loadi 12[PC], t2 1172 loadi 8[PC], t0 1173 loadConstantOrVariable(t2, t3, t1) 1174 loadConstantOrVariable2Reg(t0, t2, t0) 1175 bineq t3, Int32Tag, .slow 1176 bineq t2, Int32Tag, .slow 1177 loadi 4[PC], t2 1178 operation(t1, t0) 1179 storei t3, TagOffset[cfr, t2, 8] 1180 storei t0, PayloadOffset[cfr, t2, 8] 1181 dispatch(advance) 1182 1183 .slow: 1184 callSlowPath(slowPath) 1185 dispatch(advance) 1186 end 1187 1188 _llint_op_lshift: 1189 traceExecution() 1190 bitOp( 1191 macro (left, right) lshifti left, right end, 1192 _slow_path_lshift, 1193 constexpr op_lshift_length) 1194 1195 1196 _llint_op_rshift: 1197 traceExecution() 1198 bitOp( 1199 macro (left, right) rshifti left, right end, 1200 _slow_path_rshift, 1201 constexpr op_rshift_length) 1202 1203 1204 _llint_op_urshift: 1205 traceExecution() 1206 bitOp( 1207 macro (left, right) urshifti left, right end, 1208 _slow_path_urshift, 1209 constexpr op_urshift_length) 1210 1211 1212 _llint_op_unsigned: 1213 traceExecution() 1214 loadi 4[PC], t0 1215 loadi 8[PC], t1 1216 loadConstantOrVariablePayload(t1, Int32Tag, t2, .opUnsignedSlow) 1076 doubleOperation) 1077 end 1078 1079 binaryOp(add, OpAdd, 1080 macro (left, right, slow) baddio left, right, slow end, 1081 macro (left, right) addd left, right end) 1082 1083 1084 binaryOpCustomStore(mul, OpMul, 1085 macro (int32Tag, left, right, slow, index) 1086 const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant. 1087 move right, scratch 1088 bmulio left, scratch, slow 1089 btinz scratch, .done 1090 bilt left, 0, slow 1091 bilt right, 0, slow 1092 .done: 1093 storei Int32Tag, TagOffset[cfr, index, 8] 1094 storei scratch, PayloadOffset[cfr, index, 8] 1095 end, 1096 macro (left, right) muld left, right end) 1097 1098 1099 binaryOp(sub, OpSub, 1100 macro (left, right, slow) bsubio left, right, slow end, 1101 macro (left, right) subd left, right end) 1102 1103 1104 binaryOpCustomStore(div, OpDiv, 1105 macro (int32Tag, left, right, slow, index) 1106 ci2d left, ft0 1107 ci2d right, ft1 1108 divd ft0, ft1 1109 bcd2i ft1, right, .notInt 1110 storei int32Tag, TagOffset[cfr, index, 8] 1111 storei right, PayloadOffset[cfr, index, 8] 1112 jmp .done 1113 .notInt: 1114 stored ft1, [cfr, index, 8] 1115 .done: 1116 end, 1117 macro (left, right) divd left, right end) 1118 1119 1120 llintOpWithReturn(op_unsigned, OpUnsigned, macro (size, get, dispatch, return) 1121 get(operand, t1) 1122 loadConstantOrVariablePayload(size, t1, Int32Tag, t2, .opUnsignedSlow) 1217 1123 bilt t2, 0, .opUnsignedSlow 1218 storei t2, PayloadOffset[cfr, t0, 8] 1219 storei Int32Tag, TagOffset[cfr, t0, 8] 1220 dispatch(constexpr op_unsigned_length) 1124 return (Int32Tag, t2) 1221 1125 .opUnsignedSlow: 1222 1126 callSlowPath(_slow_path_unsigned) 1223 dispatch(constexpr op_unsigned_length) 1224 1225 1226 _llint_op_bitand: 1227 traceExecution() 1228 bitOpProfiled( 1229 macro (left, right) andi left, right end, 1230 _slow_path_bitand, 1231 constexpr op_bitand_length) 1232 1233 1234 _llint_op_bitxor: 1235 traceExecution() 1236 bitOp( 1237 macro (left, right) xori left, right end, 1238 _slow_path_bitxor, 1239 constexpr op_bitxor_length) 1240 1241 1242 _llint_op_bitor: 1243 traceExecution() 1244 bitOpProfiled( 1245 macro (left, right) ori left, right end, 1246 _slow_path_bitor, 1247 constexpr op_bitor_length) 1248 1249 1250 _llint_op_overrides_has_instance: 1251 traceExecution() 1252 1253 loadisFromStruct(OpOverridesHasInstance::m_dst, t3) 1127 dispatch() 1128 end) 1129 1130 1131 macro commonBitOp(opKind, name, op, operation) 1132 opKind(op_%name%, op, macro (size, get, dispatch, return) 1133 get(rhs, t2) 1134 get(lhs, t0) 1135 loadConstantOrVariable(size, t2, t3, t1) 1136 loadConstantOrVariable2Reg(size, t0, t2, t0) 1137 bineq t3, Int32Tag, .slow 1138 bineq t2, Int32Tag, .slow 1139 operation(t1, t0) 1140 return (t3, t0) 1141 1142 .slow: 1143 callSlowPath(_slow_path_%name%) 1144 dispatch() 1145 end) 1146 end 1147 1148 macro bitOp(name, op, operation) 1149 commonBitOp(llintOpWithReturn, name, op, operation) 1150 end 1151 1152 macro bitOpProfiled(name, op, operation) 1153 commonBitOp(llintOpWithProfile, name, op, operation) 1154 end 1155 1156 1157 bitOp(lshift, OpLshift, 1158 macro (left, right) lshifti left, right end) 1159 1160 1161 bitOp(rshift, OpRshift, 1162 macro (left, right) rshifti left, right end) 1163 1164 1165 bitOp(urshift, OpUrshift, 1166 macro (left, right) urshifti left, right end) 1167 1168 bitOp(bitxor, OpBitxor, 1169 macro (left, right) xori left, right end) 1170 1171 bitOpProfiled(bitand, OpBitand, 1172 macro (left, right) andi left, right end) 1173 1174 bitOpProfiled(bitor, OpBitor, 1175 macro (left, right) ori left, right end) 1176 1177 1178 llintOp(op_overrides_has_instance, OpOverridesHasInstance, macro (size, get, dispatch) 1179 get(dst, t3) 1254 1180 storei BooleanTag, TagOffset[cfr, t3, 8] 1255 1181 1256 1182 # First check if hasInstanceValue is the one on Function.prototype[Symbol.hasInstance] 1257 loadisFromStruct(OpOverridesHasInstance::m_hasInstanceValue, t0)1258 loadConstantOrVariablePayload( t0, CellTag, t2, .opOverrideshasInstanceValueNotCell)1259 loadConstantOrVariable( t0, t1, t2)1183 get(hasInstanceValue, t0) 1184 loadConstantOrVariablePayload(size, t0, CellTag, t2, .opOverrideshasInstanceValueNotCell) 1185 loadConstantOrVariable(size, t0, t1, t2) 1260 1186 bineq t1, CellTag, .opOverrideshasInstanceValueNotCell 1261 1187 … … 1267 1193 1268 1194 # We know the constructor is a cell. 1269 loadisFromStruct(OpOverridesHasInstance::m_constructor, t0)1270 loadConstantOrVariablePayloadUnchecked( t0, t1)1195 get(constructor, t0) 1196 loadConstantOrVariablePayloadUnchecked(size, t0, t1) 1271 1197 tbz JSCell::m_flags[t1], ImplementsDefaultHasInstance, t0 1272 1198 storei t0, PayloadOffset[cfr, t3, 8] 1273 dispatch( constexpr op_overrides_has_instance_length)1199 dispatch() 1274 1200 1275 1201 .opOverrideshasInstanceValueNotCell: 1276 1202 .opOverrideshasInstanceValueNotDefault: 1277 1203 storei 1, PayloadOffset[cfr, t3, 8] 1278 dispatch(constexpr op_overrides_has_instance_length) 1279 1280 _llint_op_instanceof_custom: 1281 traceExecution() 1282 callSlowPath(_llint_slow_path_instanceof_custom) 1283 dispatch(constexpr op_instanceof_custom_length) 1284 1285 1286 _llint_op_is_empty: 1287 traceExecution() 1288 loadi 8[PC], t1 1289 loadi 4[PC], t0 1290 loadConstantOrVariable(t1, t2, t3) 1204 dispatch() 1205 end) 1206 1207 1208 llintOpWithReturn(op_is_empty, OpIsEmpty, macro (size, get, dispatch, return) 1209 get(operand, t1) 1210 loadConstantOrVariable(size, t1, t2, t3) 1291 1211 cieq t2, EmptyValueTag, t3 1292 storei BooleanTag, TagOffset[cfr, t0, 8] 1293 storei t3, PayloadOffset[cfr, t0, 8] 1294 dispatch(constexpr op_is_empty_length) 1295 1296 1297 _llint_op_is_undefined: 1298 traceExecution() 1299 loadi 8[PC], t1 1300 loadi 4[PC], t0 1301 loadConstantOrVariable(t1, t2, t3) 1302 storei BooleanTag, TagOffset[cfr, t0, 8] 1212 return(BooleanTag, t3) 1213 end) 1214 1215 1216 llintOpWithReturn(op_is_undefined, OpIsUndefined, macro (size, get, dispatch, return) 1217 get(operand, t1) 1218 loadConstantOrVariable(size, t1, t2, t3) 1303 1219 bieq t2, CellTag, .opIsUndefinedCell 1304 1220 cieq t2, UndefinedTag, t3 1305 storei t3, PayloadOffset[cfr, t0, 8] 1306 dispatch(constexpr op_is_undefined_length) 1221 return(BooleanTag, t3) 1307 1222 .opIsUndefinedCell: 1308 1223 btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined 1309 move 0, t1 1310 storei t1, PayloadOffset[cfr, t0, 8] 1311 dispatch(constexpr op_is_undefined_length) 1224 return(BooleanTag, 0) 1312 1225 .opIsUndefinedMasqueradesAsUndefined: 1313 1226 loadp JSCell::m_structureID[t3], t1 … … 1315 1228 loadp CodeBlock::m_globalObject[t3], t3 1316 1229 cpeq Structure::m_globalObject[t1], t3, t1 1317 storei t1, PayloadOffset[cfr, t0, 8] 1318 dispatch(constexpr op_is_undefined_length) 1319 1320 1321 _llint_op_is_boolean: 1322 traceExecution() 1323 loadi 8[PC], t1 1324 loadi 4[PC], t2 1325 loadConstantOrVariableTag(t1, t0) 1230 return(BooleanTag, t1) 1231 end) 1232 1233 1234 llintOpWithReturn(op_is_boolean, OpIsBoolean, macro (size, get, dispatch, return) 1235 get(operand, t1) 1236 loadConstantOrVariableTag(size, t1, t0) 1326 1237 cieq t0, BooleanTag, t0 1327 storei BooleanTag, TagOffset[cfr, t2, 8] 1328 storei t0, PayloadOffset[cfr, t2, 8] 1329 dispatch(constexpr op_is_boolean_length) 1330 1331 1332 _llint_op_is_number: 1333 traceExecution() 1334 loadi 8[PC], t1 1335 loadi 4[PC], t2 1336 loadConstantOrVariableTag(t1, t0) 1337 storei BooleanTag, TagOffset[cfr, t2, 8] 1238 return(BooleanTag, t0) 1239 end) 1240 1241 1242 llintOpWithReturn(op_is_number, OpIsNumber, macro (size, get, dispatch, return) 1243 get(operand, t1) 1244 loadConstantOrVariableTag(size, t1, t0) 1338 1245 addi 1, t0 1339 1246 cib t0, LowestTag + 1, t1 1340 storei t1, PayloadOffset[cfr, t2, 8] 1341 dispatch(constexpr op_is_number_length) 1342 1343 1344 _llint_op_is_cell_with_type: 1345 traceExecution() 1346 loadi 8[PC], t1 1347 loadi 4[PC], t2 1348 loadConstantOrVariable(t1, t0, t3) 1349 storei BooleanTag, TagOffset[cfr, t2, 8] 1247 return(BooleanTag, t1) 1248 end) 1249 1250 1251 llintOpWithReturn(op_is_cell_with_type, OpIsCellWithType, macro (size, get, dispatch, return) 1252 get(operand, t1) 1253 loadConstantOrVariable(size, t1, t0, t3) 1350 1254 bineq t0, CellTag, .notCellCase 1351 loadi 12[PC], t01255 get(type, t0) 1352 1256 cbeq JSCell::m_type[t3], t0, t1 1353 storei t1, PayloadOffset[cfr, t2, 8] 1354 dispatch(constexpr op_is_cell_with_type_length) 1257 return(BooleanTag, t1) 1355 1258 .notCellCase: 1356 storep 0, PayloadOffset[cfr, t2, 8] 1357 dispatch(constexpr op_is_cell_with_type_length) 1358 1359 1360 _llint_op_is_object: 1361 traceExecution() 1362 loadi 8[PC], t1 1363 loadi 4[PC], t2 1364 loadConstantOrVariable(t1, t0, t3) 1365 storei BooleanTag, TagOffset[cfr, t2, 8] 1259 return(BooleanTag, 0) 1260 end) 1261 1262 1263 llintOpWithReturn(op_is_object, OpIsObject, macro (size, get, dispatch, return) 1264 get(operand, t1) 1265 loadConstantOrVariable(size, t1, t0, t3) 1366 1266 bineq t0, CellTag, .opIsObjectNotCell 1367 1267 cbaeq JSCell::m_type[t3], ObjectType, t1 1368 storei t1, PayloadOffset[cfr, t2, 8] 1369 dispatch(constexpr op_is_object_length) 1268 return(BooleanTag, t1) 1370 1269 .opIsObjectNotCell: 1371 storep 0, PayloadOffset[cfr, t2, 8]1372 dispatch(constexpr op_is_object_length)1270 return(BooleanTag, 0) 1271 end) 1373 1272 1374 1273 … … 1414 1313 # execution counter hits zero. 1415 1314 1416 _llint_op_get_by_id_direct: 1417 traceExecution()1418 loadi 8[PC], t01419 loadi 16[PC], t11420 loadConstantOrVariablePayload( t0, CellTag, t3, .opGetByIdDirectSlow)1421 loadi 20[PC], t21315 llintOpWithMetadata(op_get_by_id_direct, OpGetByIdDirect, macro (size, get, dispatch, metadata, return) 1316 metadata(t5, t0) 1317 get(base, t0) 1318 loadi OpGetByIdDirect::Metadata::structure[t5], t1 1319 loadConstantOrVariablePayload(size, t0, CellTag, t3, .opGetByIdDirectSlow) 1320 loadi OpGetByIdDirect::Metadata::offset[t5], t2 1422 1321 bineq JSCell::m_structureID[t3], t1, .opGetByIdDirectSlow 1423 1322 loadPropertyAtVariableOffset(t2, t3, t0, t1) 1424 loadi 4[PC], t2 1425 storei t0, TagOffset[cfr, t2, 8] 1426 storei t1, PayloadOffset[cfr, t2, 8] 1427 valueProfile(t0, t1, 24, t2) 1428 dispatch(constexpr op_get_by_id_direct_length) 1323 valueProfile(OpGetByIdDirect, t5, t0, t1) 1324 return(t0, t1) 1429 1325 1430 1326 .opGetByIdDirectSlow: 1431 1327 callSlowPath(_llint_slow_path_get_by_id_direct) 1432 dispatch(constexpr op_get_by_id_direct_length) 1433 1434 1435 _llint_op_get_by_id: 1436 traceExecution() 1437 loadi 8[PC], t0 1438 loadi 16[PC], t1 1439 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow) 1440 loadi 20[PC], t2 1328 dispatch() 1329 end) 1330 1331 1332 llintOpWithMetadata(op_get_by_id, OpGetById, macro (size, get, dispatch, metadata, return) 1333 metadata(t5, t0) 1334 loadb OpGetById::Metadata::mode[t5], t1 1335 get(base, t0) 1336 1337 .opGetByIdProtoLoad: 1338 bbneq t1, constexpr GetByIdMode::ProtoLoad, .opGetByIdArrayLength 1339 loadi OpGetById::Metadata::modeMetadata.protoLoadMode.structure[t5], t1 1340 loadConstantOrVariablePayload(size, t0, CellTag, t3, .opGetByIdSlow) 1341 loadi OpGetById::Metadata::modeMetadata.protoLoadMode.cachedOffset[t5], t2 1342 bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow 1343 loadp OpGetById::Metadata::modeMetadata.protoLoadMode.cachedSlot[t5], t3 1344 loadPropertyAtVariableOffset(t2, t3, t0, t1) 1345 valueProfile(OpGetById, t5, t0, t1) 1346 return(t0, t1) 1347 1348 .opGetByIdArrayLength: 1349 bbneq t1, constexpr GetByIdMode::ArrayLength, .opGetByIdUnset 1350 loadConstantOrVariablePayload(size, t0, CellTag, t3, .opGetByIdSlow) 1351 move t3, t2 1352 arrayProfile(OpGetById::Metadata::modeMetadata.arrayLengthMode.arrayProfile, t2, t5, t0) 1353 btiz t2, IsArray, .opGetByIdSlow 1354 btiz t2, IndexingShapeMask, .opGetByIdSlow 1355 loadp JSObject::m_butterfly[t3], t0 1356 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 1357 bilt t0, 0, .opGetByIdSlow 1358 valueProfile(OpGetById, t5, Int32Tag, t0) 1359 return(Int32Tag, t0) 1360 1361 .opGetByIdUnset: 1362 bbneq t1, constexpr GetByIdMode::Unset, .opGetByIdDefault 1363 loadi OpGetById::Metadata::modeMetadata.unsetMode.structure[t5], t1 1364 loadConstantOrVariablePayload(size, t0, CellTag, t3, .opGetByIdSlow) 1365 bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow 1366 valueProfile(OpGetById, t5, UndefinedTag, 0) 1367 return(UndefinedTag, 0) 1368 1369 .opGetByIdDefault: 1370 loadi OpGetById::Metadata::modeMetadata.defaultMode.structure[t5], t1 1371 loadConstantOrVariablePayload(size, t0, CellTag, t3, .opGetByIdSlow) 1372 loadis OpGetById::Metadata::modeMetadata.defaultMode.cachedOffset[t5], t2 1441 1373 bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow 1442 1374 loadPropertyAtVariableOffset(t2, t3, t0, t1) 1443 loadi 4[PC], t2 1444 storei t0, TagOffset[cfr, t2, 8] 1445 storei t1, PayloadOffset[cfr, t2, 8] 1446 valueProfile(t0, t1, 32, t2) 1447 dispatch(constexpr op_get_by_id_length) 1375 valueProfile(OpGetById, t5, t0, t1) 1376 return(t0, t1) 1448 1377 1449 1378 .opGetByIdSlow: 1450 1379 callSlowPath(_llint_slow_path_get_by_id) 1451 dispatch(constexpr op_get_by_id_length) 1452 1453 1454 _llint_op_get_by_id_proto_load: 1455 traceExecution() 1456 loadi 8[PC], t0 1457 loadi 16[PC], t1 1458 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdProtoSlow) 1459 loadi 20[PC], t2 1460 bineq JSCell::m_structureID[t3], t1, .opGetByIdProtoSlow 1461 loadpFromInstruction(6, t3) 1462 loadPropertyAtVariableOffset(t2, t3, t0, t1) 1463 loadi 4[PC], t2 1464 storei t0, TagOffset[cfr, t2, 8] 1465 storei t1, PayloadOffset[cfr, t2, 8] 1466 valueProfile(t0, t1, 32, t2) 1467 dispatch(constexpr op_get_by_id_proto_load_length) 1468 1469 .opGetByIdProtoSlow: 1470 callSlowPath(_llint_slow_path_get_by_id) 1471 dispatch(constexpr op_get_by_id_proto_load_length) 1472 1473 1474 _llint_op_get_by_id_unset: 1475 traceExecution() 1476 loadi 8[PC], t0 1477 loadi 16[PC], t1 1478 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdUnsetSlow) 1479 bineq JSCell::m_structureID[t3], t1, .opGetByIdUnsetSlow 1480 loadi 4[PC], t2 1481 storei UndefinedTag, TagOffset[cfr, t2, 8] 1482 storei 0, PayloadOffset[cfr, t2, 8] 1483 valueProfile(UndefinedTag, 0, 32, t2) 1484 dispatch(constexpr op_get_by_id_unset_length) 1485 1486 .opGetByIdUnsetSlow: 1487 callSlowPath(_llint_slow_path_get_by_id) 1488 dispatch(constexpr op_get_by_id_unset_length) 1489 1490 1491 _llint_op_get_array_length: 1492 traceExecution() 1493 loadi 8[PC], t0 1494 loadp 16[PC], t1 1495 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow) 1496 move t3, t2 1497 arrayProfile(t2, t1, t0) 1498 btiz t2, IsArray, .opGetArrayLengthSlow 1499 btiz t2, IndexingShapeMask, .opGetArrayLengthSlow 1500 loadi 4[PC], t1 1501 loadp JSObject::m_butterfly[t3], t0 1502 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 1503 bilt t0, 0, .opGetArrayLengthSlow 1504 valueProfile(Int32Tag, t0, 32, t2) 1505 storep t0, PayloadOffset[cfr, t1, 8] 1506 storep Int32Tag, TagOffset[cfr, t1, 8] 1507 dispatch(constexpr op_get_array_length_length) 1508 1509 .opGetArrayLengthSlow: 1510 callSlowPath(_llint_slow_path_get_by_id) 1511 dispatch(constexpr op_get_array_length_length) 1512 1513 1514 _llint_op_put_by_id: 1515 traceExecution() 1516 writeBarrierOnOperands(1, 3) 1517 loadi 4[PC], t3 1518 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow) 1380 dispatch() 1381 end) 1382 1383 1384 llintOpWithMetadata(op_put_by_id, OpPutById, macro (size, get, dispatch, metadata, return) 1385 writeBarrierOnOperands(size, get, base, value) 1386 metadata(t5, t3) 1387 get(base, t3) 1388 loadConstantOrVariablePayload(size, t3, CellTag, t0, .opPutByIdSlow) 1519 1389 loadi JSCell::m_structureID[t0], t2 1520 bineq t2, 16[PC], .opPutByIdSlow1390 bineq t2, OpPutById::Metadata::oldStructure[t5], .opPutByIdSlow 1521 1391 1522 1392 # At this point, we have: 1393 # t5 -> metadata 1523 1394 # t2 -> currentStructureID 1524 1395 # t0 -> object base 1525 1396 # We will lose currentStructureID in the shenanigans below. 1526 1397 1527 loadi 12[PC], t11528 loadConstantOrVariable( t1, t2, t3)1529 loadi 32[PC], t11398 get(value, t1) 1399 loadConstantOrVariable(size, t1, t2, t3) 1400 loadi OpPutById::Metadata::flags[t5], t1 1530 1401 1531 1402 # At this point, we have: … … 1602 1473 1603 1474 .opPutByIdDoneCheckingTypes: 1604 loadi 24[PC], t11475 loadi OpPutById::Metadata::newStructure[t5], t1 1605 1476 1606 1477 btiz t1, .opPutByIdNotTransition … … 1608 1479 # This is the transition case. t1 holds the new Structure*. If we have a chain, we need to 1609 1480 # check it. t0 is the base. We may clobber t1 to use it as scratch. 1610 loadp 28[PC], t31481 loadp OpPutById::Metadata::structureChain[t5], t3 1611 1482 btpz t3, .opPutByIdTransitionDirect 1612 1483 1613 loadi 16[PC], t2 # Need old structure again.1484 loadi OpPutById::Metadata::oldStructure[t5], t2 # Need old structure again. 1614 1485 loadp StructureChain::m_vector[t3], t3 1615 1486 assert(macro (ok) btpnz t3, ok end) … … 1625 1496 1626 1497 .opPutByIdTransitionChainDone: 1627 loadi 24[PC], t11498 loadi OpPutById::Metadata::newStructure[t5], t1 1628 1499 1629 1500 .opPutByIdTransitionDirect: 1630 1501 storei t1, JSCell::m_structureID[t0] 1631 loadi 12[PC], t11632 loadConstantOrVariable( t1, t2, t3)1633 loadi 20[PC], t11502 get(value, t1) 1503 loadConstantOrVariable(size, t1, t2, t3) 1504 loadi OpPutById::Metadata::offset[t5], t1 1634 1505 storePropertyAtVariableOffset(t1, t0, t2, t3) 1635 writeBarrierOnOperand( 1)1636 dispatch( constexpr op_put_by_id_length)1506 writeBarrierOnOperand(size, get, base) 1507 dispatch() 1637 1508 1638 1509 .opPutByIdNotTransition: 1639 1510 # The only thing live right now is t0, which holds the base. 1640 loadi 12[PC], t11641 loadConstantOrVariable( t1, t2, t3)1642 loadi 20[PC], t11511 get(value, t1) 1512 loadConstantOrVariable(size, t1, t2, t3) 1513 loadi OpPutById::Metadata::offset[t5], t1 1643 1514 storePropertyAtVariableOffset(t1, t0, t2, t3) 1644 dispatch( constexpr op_put_by_id_length)1515 dispatch() 1645 1516 1646 1517 .opPutByIdSlow: 1647 1518 callSlowPath(_llint_slow_path_put_by_id) 1648 dispatch(constexpr op_put_by_id_length) 1649 1650 1651 _llint_op_get_by_val: 1652 traceExecution() 1653 loadi 8[PC], t2 1654 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow) 1519 dispatch() 1520 end) 1521 1522 1523 llintOpWithMetadata(op_get_by_val, OpGetByVal, macro (size, get, dispatch, metadata, return) 1524 metadata(t5, t2) 1525 get(base, t2) 1526 loadConstantOrVariablePayload(size, t2, CellTag, t0, .opGetByValSlow) 1655 1527 move t0, t2 1656 loadp 16[PC], t3 1657 arrayProfile(t2, t3, t1) 1658 loadi 12[PC], t3 1659 loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow) 1528 arrayProfile(OpGetByVal::Metadata::arrayProfile, t2, t5, t1) 1529 get(property, t3) 1530 loadConstantOrVariablePayload(size, t3, Int32Tag, t1, .opGetByValSlow) 1660 1531 loadp JSObject::m_butterfly[t0], t3 1661 1532 andi IndexingShapeMask, t2 … … 1676 1547 # FIXME: This could be massively optimized. 1677 1548 fd2ii ft0, t1, t2 1678 loadi 4[PC], t01549 get(dst, t0) 1679 1550 jmp .opGetByValNotEmpty 1680 1551 … … 1687 1558 1688 1559 .opGetByValDone: 1689 loadi 4[PC], t01560 get(dst, t0) 1690 1561 bieq t2, EmptyValueTag, .opGetByValSlow 1691 1562 .opGetByValNotEmpty: 1692 1563 storei t2, TagOffset[cfr, t0, 8] 1693 1564 storei t1, PayloadOffset[cfr, t0, 8] 1694 valueProfile( t2, t1, 20, t0)1695 dispatch( constexpr op_get_by_val_length)1565 valueProfile(OpGetByVal, t5, t2, t1) 1566 dispatch() 1696 1567 1697 1568 .opGetByValSlow: 1698 1569 callSlowPath(_llint_slow_path_get_by_val) 1699 dispatch(constexpr op_get_by_val_length) 1700 1701 1702 macro contiguousPutByVal(storeCallback) 1703 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds 1704 .storeResult: 1705 loadi 12[PC], t2 1706 storeCallback(t2, t1, t0, t3) 1707 dispatch(5) 1708 1709 .outOfBounds: 1710 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1711 loadp 16[PC], t2 1712 storeb 1, ArrayProfile::m_mayStoreToHole[t2] 1713 addi 1, t3, t2 1714 storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1715 jmp .storeResult 1716 end 1717 1718 macro putByVal(slowPath) 1719 traceExecution() 1720 writeBarrierOnOperands(1, 3) 1721 loadi 4[PC], t0 1722 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow) 1723 move t1, t2 1724 loadp 16[PC], t3 1725 arrayProfile(t2, t3, t0) 1726 loadi 8[PC], t0 1727 loadConstantOrVariablePayload(t0, Int32Tag, t3, .opPutByValSlow) 1728 loadp JSObject::m_butterfly[t1], t0 1729 btinz t2, CopyOnWrite, .opPutByValSlow 1730 andi IndexingShapeMask, t2 1731 bineq t2, Int32Shape, .opPutByValNotInt32 1732 contiguousPutByVal( 1733 macro (operand, scratch, base, index) 1734 loadConstantOrVariablePayload(operand, Int32Tag, scratch, .opPutByValSlow) 1735 storei Int32Tag, TagOffset[base, index, 8] 1736 storei scratch, PayloadOffset[base, index, 8] 1737 end) 1738 1739 .opPutByValNotInt32: 1740 bineq t2, DoubleShape, .opPutByValNotDouble 1741 contiguousPutByVal( 1742 macro (operand, scratch, base, index) 1743 const tag = scratch 1744 const payload = operand 1745 loadConstantOrVariable2Reg(operand, tag, payload) 1746 bineq tag, Int32Tag, .notInt 1747 ci2d payload, ft0 1748 jmp .ready 1749 .notInt: 1750 fii2d payload, tag, ft0 1751 bdnequn ft0, ft0, .opPutByValSlow 1752 .ready: 1753 stored ft0, [base, index, 8] 1754 end) 1755 1756 .opPutByValNotDouble: 1757 bineq t2, ContiguousShape, .opPutByValNotContiguous 1758 contiguousPutByVal( 1759 macro (operand, scratch, base, index) 1760 const tag = scratch 1761 const payload = operand 1762 loadConstantOrVariable2Reg(operand, tag, payload) 1763 storei tag, TagOffset[base, index, 8] 1764 storei payload, PayloadOffset[base, index, 8] 1765 end) 1766 1767 .opPutByValNotContiguous: 1768 bineq t2, ArrayStorageShape, .opPutByValSlow 1769 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1770 bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty 1771 .opPutByValArrayStorageStoreResult: 1772 loadi 12[PC], t2 1773 loadConstantOrVariable2Reg(t2, t1, t2) 1774 storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8] 1775 storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8] 1776 dispatch(5) 1777 1778 .opPutByValArrayStorageEmpty: 1779 loadp 16[PC], t1 1780 storeb 1, ArrayProfile::m_mayStoreToHole[t1] 1781 addi 1, ArrayStorage::m_numValuesInVector[t0] 1782 bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult 1783 addi 1, t3, t1 1784 storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1785 jmp .opPutByValArrayStorageStoreResult 1786 1787 .opPutByValOutOfBounds: 1788 loadpFromInstruction(4, t0) 1789 storeb 1, ArrayProfile::m_outOfBounds[t0] 1790 .opPutByValSlow: 1791 callSlowPath(slowPath) 1792 dispatch(5) 1793 end 1794 1795 _llint_op_put_by_val: 1796 putByVal(_llint_slow_path_put_by_val) 1797 1798 _llint_op_put_by_val_direct: 1799 putByVal(_llint_slow_path_put_by_val_direct) 1800 1801 _llint_op_jmp: 1802 traceExecution() 1803 dispatchBranch(4[PC]) 1804 1805 1806 macro jumpTrueOrFalse(conditionOp, slow) 1807 loadi 4[PC], t1 1808 loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow) 1809 conditionOp(t0, .target) 1810 dispatch(3) 1811 1812 .target: 1813 dispatchBranch(8[PC]) 1814 1815 .slow: 1816 callSlowPath(slow) 1817 dispatch(0) 1818 end 1819 1820 1821 macro equalNull(cellHandler, immediateHandler) 1822 loadi 4[PC], t0 1823 assertNotConstant(t0) 1824 loadi TagOffset[cfr, t0, 8], t1 1825 loadi PayloadOffset[cfr, t0, 8], t0 1826 bineq t1, CellTag, .immediate 1827 loadp JSCell::m_structureID[t0], t2 1828 cellHandler(t2, JSCell::m_flags[t0], .target) 1829 dispatch(3) 1830 1831 .target: 1832 dispatchBranch(8[PC]) 1833 1834 .immediate: 1835 ori 1, t1 1836 immediateHandler(t1, .target) 1837 dispatch(3) 1838 end 1839 1840 _llint_op_jeq_null: 1841 traceExecution() 1842 equalNull( 1843 macro (structure, value, target) 1844 btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined 1845 loadp CodeBlock[cfr], t0 1846 loadp CodeBlock::m_globalObject[t0], t0 1847 bpeq Structure::m_globalObject[structure], t0, target 1848 .opJeqNullNotMasqueradesAsUndefined: 1849 end, 1850 macro (value, target) bieq value, NullTag, target end) 1570 dispatch() 1571 end) 1572 1573 1574 macro putByValOp(name, op) 1575 llintOpWithMetadata(op_%name%, op, macro (size, get, dispatch, metadata, return) 1576 macro contiguousPutByVal(storeCallback) 1577 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds 1578 .storeResult: 1579 get(value, t2) 1580 storeCallback(t2, t1, t0, t3) 1581 dispatch() 1582 1583 .outOfBounds: 1584 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1585 storeb 1, %op%::Metadata::arrayProfile.m_mayStoreToHole[t5] 1586 addi 1, t3, t2 1587 storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1588 jmp .storeResult 1589 end 1590 1591 writeBarrierOnOperands(size, get, base, value) 1592 metadata(t5, t0) 1593 get(base, t0) 1594 loadConstantOrVariablePayload(size, t0, CellTag, t1, .opPutByValSlow) 1595 move t1, t2 1596 arrayProfile(%op%::Metadata::arrayProfile, t2, t5, t0) 1597 get(property, t0) 1598 loadConstantOrVariablePayload(size, t0, Int32Tag, t3, .opPutByValSlow) 1599 loadp JSObject::m_butterfly[t1], t0 1600 btinz t2, CopyOnWrite, .opPutByValSlow 1601 andi IndexingShapeMask, t2 1602 bineq t2, Int32Shape, .opPutByValNotInt32 1603 contiguousPutByVal( 1604 macro (operand, scratch, base, index) 1605 loadConstantOrVariablePayload(size, operand, Int32Tag, scratch, .opPutByValSlow) 1606 storei Int32Tag, TagOffset[base, index, 8] 1607 storei scratch, PayloadOffset[base, index, 8] 1608 end) 1609 1610 .opPutByValNotInt32: 1611 bineq t2, DoubleShape, .opPutByValNotDouble 1612 contiguousPutByVal( 1613 macro (operand, scratch, base, index) 1614 const tag = scratch 1615 const payload = operand 1616 loadConstantOrVariable2Reg(size, operand, tag, payload) 1617 bineq tag, Int32Tag, .notInt 1618 ci2d payload, ft0 1619 jmp .ready 1620 .notInt: 1621 fii2d payload, tag, ft0 1622 bdnequn ft0, ft0, .opPutByValSlow 1623 .ready: 1624 stored ft0, [base, index, 8] 1625 end) 1626 1627 .opPutByValNotDouble: 1628 bineq t2, ContiguousShape, .opPutByValNotContiguous 1629 contiguousPutByVal( 1630 macro (operand, scratch, base, index) 1631 const tag = scratch 1632 const payload = operand 1633 loadConstantOrVariable2Reg(size, operand, tag, payload) 1634 storei tag, TagOffset[base, index, 8] 1635 storei payload, PayloadOffset[base, index, 8] 1636 end) 1637 1638 .opPutByValNotContiguous: 1639 bineq t2, ArrayStorageShape, .opPutByValSlow 1640 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1641 bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty 1642 .opPutByValArrayStorageStoreResult: 1643 get(value, t2) 1644 loadConstantOrVariable2Reg(size, t2, t1, t2) 1645 storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8] 1646 storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8] 1647 dispatch() 1648 1649 .opPutByValArrayStorageEmpty: 1650 storeb 1, %op%::Metadata::arrayProfile.m_mayStoreToHole[t5] 1651 addi 1, ArrayStorage::m_numValuesInVector[t0] 1652 bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult 1653 addi 1, t3, t1 1654 storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1655 jmp .opPutByValArrayStorageStoreResult 1656 1657 .opPutByValOutOfBounds: 1658 storeb 1, %op%::Metadata::arrayProfile.m_outOfBounds[t5] 1659 .opPutByValSlow: 1660 callSlowPath(_llint_slow_path_%name%) 1661 dispatch() 1662 end) 1663 end 1664 1665 1666 putByValOp(put_by_val, OpPutByVal) 1667 1668 putByValOp(put_by_val_direct, OpPutByValDirect) 1669 1670 1671 macro llintJumpTrueOrFalseOp(name, op, conditionOp) 1672 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1673 get(condition, t1) 1674 loadConstantOrVariablePayload(size, t1, BooleanTag, t0, .slow) 1675 conditionOp(t0, .target) 1676 dispatch() 1677 1678 .target: 1679 jump(target) 1680 1681 .slow: 1682 callSlowPath(_llint_slow_path_%name%) 1683 nextInstruction() 1684 end) 1685 end 1686 1687 1688 macro equalNullJumpOp(name, op, cellHandler, immediateHandler) 1689 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1690 get(value, t0) 1691 assertNotConstant(size, t0) 1692 loadi TagOffset[cfr, t0, 8], t1 1693 loadi PayloadOffset[cfr, t0, 8], t0 1694 bineq t1, CellTag, .immediate 1695 loadp JSCell::m_structureID[t0], t2 1696 cellHandler(t2, JSCell::m_flags[t0], .target) 1697 dispatch() 1698 1699 .target: 1700 jump(target) 1701 1702 .immediate: 1703 ori 1, t1 1704 immediateHandler(t1, .target) 1705 dispatch() 1706 end) 1707 end 1708 1709 equalNullJumpOp(jeq_null, OpJeqNull, 1710 macro (structure, value, target) 1711 btbz value, MasqueradesAsUndefined, .opJeqNullNotMasqueradesAsUndefined 1712 loadp CodeBlock[cfr], t0 1713 loadp CodeBlock::m_globalObject[t0], t0 1714 bpeq Structure::m_globalObject[structure], t0, target 1715 .opJeqNullNotMasqueradesAsUndefined: 1716 end, 1717 macro (value, target) bieq value, NullTag, target end) 1851 1718 1852 1719 1853 _llint_op_jneq_null: 1854 traceExecution() 1855 equalNull( 1856 macro (structure, value, target) 1857 btbz value, MasqueradesAsUndefined, target 1858 loadp CodeBlock[cfr], t0 1859 loadp CodeBlock::m_globalObject[t0], t0 1860 bpneq Structure::m_globalObject[structure], t0, target 1861 end, 1862 macro (value, target) bineq value, NullTag, target end) 1863 1864 1865 _llint_op_jneq_ptr: 1866 traceExecution() 1867 loadi 4[PC], t0 1868 loadi 8[PC], t1 1720 equalNullJumpOp(jneq_null, OpJneqNull, 1721 macro (structure, value, target) 1722 btbz value, MasqueradesAsUndefined, target 1723 loadp CodeBlock[cfr], t0 1724 loadp CodeBlock::m_globalObject[t0], t0 1725 bpneq Structure::m_globalObject[structure], t0, target 1726 end, 1727 macro (value, target) bineq value, NullTag, target end) 1728 1729 1730 llintOpWithMetadata(op_jneq_ptr, OpJneqPtr, macro (size, get, dispatch, metadata, return) 1731 get(value, t0) 1732 get(specialPointer, t1) 1869 1733 loadp CodeBlock[cfr], t2 1870 1734 loadp CodeBlock::m_globalObject[t2], t2 … … 1873 1737 bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough 1874 1738 .opJneqPtrBranch: 1875 storei 1, 16[PC] 1876 dispatchBranch(12[PC]) 1739 metadata(t5, t2) 1740 storeb 1, OpJneqPtr::Metadata::hasJumped[t5] 1741 get(target, t0) 1742 jumpImpl(t0) 1877 1743 .opJneqPtrFallThrough: 1878 dispatch(constexpr op_jneq_ptr_length) 1879 1880 1881 macro compareUnsignedJump(integerCompare) 1882 loadi 4[PC], t2 1883 loadi 8[PC], t3 1884 loadConstantOrVariable(t2, t0, t1) 1885 loadConstantOrVariable2Reg(t3, t2, t3) 1886 integerCompare(t1, t3, .jumpTarget) 1887 dispatch(4) 1888 1889 .jumpTarget: 1890 dispatchBranch(12[PC]) 1891 end 1892 1893 1894 macro compareUnsigned(integerCompareAndSet) 1895 loadi 12[PC], t2 1896 loadi 8[PC], t0 1897 loadConstantOrVariable(t2, t3, t1) 1898 loadConstantOrVariable2Reg(t0, t2, t0) 1899 integerCompareAndSet(t0, t1, t0) 1900 loadi 4[PC], t2 1901 storei BooleanTag, TagOffset[cfr, t2, 8] 1902 storei t0, PayloadOffset[cfr, t2, 8] 1903 dispatch(4) 1904 end 1905 1906 1907 macro compareJump(integerCompare, doubleCompare, slowPath) 1908 loadi 4[PC], t2 1909 loadi 8[PC], t3 1910 loadConstantOrVariable(t2, t0, t1) 1911 loadConstantOrVariable2Reg(t3, t2, t3) 1912 bineq t0, Int32Tag, .op1NotInt 1913 bineq t2, Int32Tag, .op2NotInt 1914 integerCompare(t1, t3, .jumpTarget) 1915 dispatch(4) 1916 1917 .op1NotInt: 1918 bia t0, LowestTag, .slow 1919 bib t2, LowestTag, .op1NotIntOp2Double 1920 bineq t2, Int32Tag, .slow 1921 ci2d t3, ft1 1922 jmp .op1NotIntReady 1923 .op1NotIntOp2Double: 1924 fii2d t3, t2, ft1 1925 .op1NotIntReady: 1926 fii2d t1, t0, ft0 1927 doubleCompare(ft0, ft1, .jumpTarget) 1928 dispatch(4) 1929 1930 .op2NotInt: 1931 ci2d t1, ft0 1932 bia t2, LowestTag, .slow 1933 fii2d t3, t2, ft1 1934 doubleCompare(ft0, ft1, .jumpTarget) 1935 dispatch(4) 1936 1937 .jumpTarget: 1938 dispatchBranch(12[PC]) 1939 1940 .slow: 1941 callSlowPath(slowPath) 1942 dispatch(0) 1943 end 1944 1945 1946 _llint_op_switch_imm: 1947 traceExecution() 1948 loadi 12[PC], t2 1949 loadi 4[PC], t3 1950 loadConstantOrVariable(t2, t1, t0) 1744 dispatch() 1745 end) 1746 1747 1748 macro compareUnsignedJumpOp(name, op, integerCompare) 1749 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1750 get(lhs, t2) 1751 get(rhs, t3) 1752 loadConstantOrVariable(size, t2, t0, t1) 1753 loadConstantOrVariable2Reg(size, t3, t2, t3) 1754 integerCompare(t1, t3, .jumpTarget) 1755 dispatch() 1756 1757 .jumpTarget: 1758 jump(target) 1759 end) 1760 end 1761 1762 1763 macro compareUnsignedOp(name, op, integerCompareAndSet) 1764 llintOpWithReturn(op_%name%, op, macro (size, get, dispatch, return) 1765 get(rhs, t2) 1766 get(lhs, t0) 1767 loadConstantOrVariable(size, t2, t3, t1) 1768 loadConstantOrVariable2Reg(size, t0, t2, t0) 1769 integerCompareAndSet(t0, t1, t0) 1770 return(BooleanTag, t0) 1771 end) 1772 end 1773 1774 1775 macro compareJumpOp(name, op, integerCompare, doubleCompare) 1776 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1777 get(lhs, t2) 1778 get(rhs, t3) 1779 loadConstantOrVariable(size, t2, t0, t1) 1780 loadConstantOrVariable2Reg(size, t3, t2, t3) 1781 bineq t0, Int32Tag, .op1NotInt 1782 bineq t2, Int32Tag, .op2NotInt 1783 integerCompare(t1, t3, .jumpTarget) 1784 dispatch() 1785 1786 .op1NotInt: 1787 bia t0, LowestTag, .slow 1788 bib t2, LowestTag, .op1NotIntOp2Double 1789 bineq t2, Int32Tag, .slow 1790 ci2d t3, ft1 1791 jmp .op1NotIntReady 1792 .op1NotIntOp2Double: 1793 fii2d t3, t2, ft1 1794 .op1NotIntReady: 1795 fii2d t1, t0, ft0 1796 doubleCompare(ft0, ft1, .jumpTarget) 1797 dispatch() 1798 1799 .op2NotInt: 1800 ci2d t1, ft0 1801 bia t2, LowestTag, .slow 1802 fii2d t3, t2, ft1 1803 doubleCompare(ft0, ft1, .jumpTarget) 1804 dispatch() 1805 1806 .jumpTarget: 1807 jump(target) 1808 1809 .slow: 1810 callSlowPath(_llint_slow_path_%name%) 1811 nextInstruction() 1812 end) 1813 end 1814 1815 1816 llintOpWithJump(op_switch_imm, OpSwitchImm, macro (size, get, jump, dispatch) 1817 get(scrutinee, t2) 1818 get(tableIndex, t3) 1819 loadConstantOrVariable(size, t2, t1, t0) 1951 1820 loadp CodeBlock[cfr], t2 1952 1821 loadp CodeBlock::m_rareData[t2], t2 … … 1960 1829 loadi [t3, t0, 4], t1 1961 1830 btiz t1, .opSwitchImmFallThrough 1962 dispatch BranchWithOffset(t1)1831 dispatchIndirect(t1) 1963 1832 1964 1833 .opSwitchImmNotInt: 1965 1834 bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double. 1966 1835 .opSwitchImmFallThrough: 1967 dispatchBranch(8[PC])1836 jump(defaultOffset) 1968 1837 1969 1838 .opSwitchImmSlow: 1970 1839 callSlowPath(_llint_slow_path_switch_imm) 1971 dispatch(0)1972 1973 1974 _llint_op_switch_char: 1975 traceExecution()1976 loadi 12[PC], t21977 loadi 4[PC], t31978 loadConstantOrVariable( t2, t1, t0)1840 nextInstruction() 1841 end) 1842 1843 1844 llintOpWithJump(op_switch_char, OpSwitchChar, macro (size, get, jump, dispatch) 1845 get(scrutinee, t2) 1846 get(tableIndex, t3) 1847 loadConstantOrVariable(size, t2, t1, t0) 1979 1848 loadp CodeBlock[cfr], t2 1980 1849 loadp CodeBlock::m_rareData[t2], t2 … … 1999 1868 loadi [t2, t0, 4], t1 2000 1869 btiz t1, .opSwitchCharFallThrough 2001 dispatch BranchWithOffset(t1)1870 dispatchIndirect(t1) 2002 1871 2003 1872 .opSwitchCharFallThrough: 2004 dispatchBranch(8[PC])1873 jump(defaultOffset) 2005 1874 2006 1875 .opSwitchOnRope: 2007 1876 callSlowPath(_llint_slow_path_switch_char) 2008 dispatch(0) 2009 2010 2011 macro arrayProfileForCall() 2012 loadi 16[PC], t3 1877 nextInstruction() 1878 end) 1879 1880 1881 macro arrayProfileForCall(op, getu) 1882 getu(argv, t3) 2013 1883 negi t3 2014 1884 bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done 2015 1885 loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0 2016 1886 loadp JSCell::m_structureID[t0], t0 2017 loadpFromInstruction(CallOpCodeSize - 2, t1) 2018 storep t0, ArrayProfile::m_lastSeenStructureID[t1] 1887 storep t0, %op%::Metadata::arrayProfile.m_lastSeenStructureID[t5] 2019 1888 .done: 2020 1889 end 2021 1890 2022 macro doCall(slowPath, prepareCall) 2023 loadi 8[PC], t0 2024 loadi 20[PC], t1 2025 loadp LLIntCallLinkInfo::callee[t1], t2 2026 loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow) 2027 bineq t3, t2, .opCallSlow 2028 loadi 16[PC], t3 2029 lshifti 3, t3 2030 negi t3 2031 addp cfr, t3 # t3 contains the new value of cfr 2032 storei t2, Callee + PayloadOffset[t3] 2033 loadi 12[PC], t2 2034 storei PC, ArgumentCount + TagOffset[cfr] 2035 storei t2, ArgumentCount + PayloadOffset[t3] 2036 storei CellTag, Callee + TagOffset[t3] 2037 move t3, sp 2038 prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4, JSEntryPtrTag) 2039 callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1], JSEntryPtrTag) 2040 2041 .opCallSlow: 2042 slowPathForCall(slowPath, prepareCall) 2043 end 2044 2045 _llint_op_ret: 2046 traceExecution() 1891 macro commonCallOp(name, slowPath, op, prepareCall, prologue) 1892 llintOpWithMetadata(name, op, macro (size, get, dispatch, metadata, return) 1893 metadata(t5, t0) 1894 1895 prologue(macro (field, dst) 1896 getu(size, op, field, dst) 1897 end, metadata) 1898 1899 get(callee, t0) 1900 loadp %op%::Metadata::callLinkInfo.callee[t5], t2 1901 loadConstantOrVariablePayload(size, t0, CellTag, t3, .opCallSlow) 1902 bineq t3, t2, .opCallSlow 1903 get(argv, t3) 1904 lshifti 3, t3 1905 negi t3 1906 addp cfr, t3 # t3 contains the new value of cfr 1907 storei t2, Callee + PayloadOffset[t3] 1908 get(argc, t2) 1909 storei PC, ArgumentCount + TagOffset[cfr] 1910 storei t2, ArgumentCount + PayloadOffset[t3] 1911 storei CellTag, Callee + TagOffset[t3] 1912 move t3, sp 1913 prepareCall(%op%::Metadata::callLinkInfo.machineCodeTarget[t5], t2, t3, t4, JSEntryPtrTag) 1914 callTargetFunction(size, op, dispatch, %op%::Metadata::callLinkInfo.machineCodeTarget[t5], JSEntryPtrTag) 1915 1916 .opCallSlow: 1917 slowPathForCall(size, op, dispatch, slowPath, prepareCall) 1918 end) 1919 end 1920 1921 llintOp(op_ret, OpRet, macro (size, get, dispatch) 2047 1922 checkSwitchToJITForEpilogue() 2048 loadi 4[PC], t22049 loadConstantOrVariable( t2, t1, t0)1923 get(value, t2) 1924 loadConstantOrVariable(size, t2, t1, t0) 2050 1925 doReturn() 2051 2052 2053 _llint_op_to_primitive: 2054 traceExecution() 2055 loadi 8[PC], t2 2056 loadi 4[PC], t3 2057 loadConstantOrVariable(t2, t1, t0) 1926 end) 1927 1928 1929 llintOpWithReturn(op_to_primitive, OpToPrimitive, macro (size, get, dispatch, return) 1930 get(src, t2) 1931 loadConstantOrVariable(size, t2, t1, t0) 2058 1932 bineq t1, CellTag, .opToPrimitiveIsImm 2059 1933 bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase 2060 1934 .opToPrimitiveIsImm: 2061 storei t1, TagOffset[cfr, t3, 8] 2062 storei t0, PayloadOffset[cfr, t3, 8] 2063 dispatch(constexpr op_to_primitive_length) 1935 return(t1, t0) 2064 1936 2065 1937 .opToPrimitiveSlowCase: 2066 1938 callSlowPath(_slow_path_to_primitive) 2067 dispatch(constexpr op_to_primitive_length) 2068 2069 2070 _llint_op_catch: 1939 dispatch() 1940 end) 1941 1942 1943 commonOp(op_catch, macro() end, macro (size) 2071 1944 # This is where we end up from the JIT's throw trampoline (because the 2072 1945 # machine code return address will be set to _llint_op_catch), and from … … 2082 1955 restoreStackPointerAfterCall() 2083 1956 1957 if C_LOOP 1958 # restore metadataTable since we don't restore callee saves for CLoop during unwinding 1959 loadp CodeBlock[cfr], t1 1960 # FIXME: cleanup double load 1961 # https://bugs.webkit.org/show_bug.cgi?id=190933 1962 loadp CodeBlock::m_metadata[t1], metadataTable 1963 loadp MetadataTable::m_buffer[metadataTable], metadataTable 1964 end 1965 2084 1966 loadi VM::targetInterpreterPCForThrow[t3], PC 2085 1967 … … 2095 1977 loadi VM::m_exception[t3], t0 2096 1978 storei 0, VM::m_exception[t3] 2097 loadi 4[PC], t21979 get(size, OpCatch, exception, t2) 2098 1980 storei t0, PayloadOffset[cfr, t2, 8] 2099 1981 storei CellTag, TagOffset[cfr, t2, 8] … … 2101 1983 loadi Exception::m_value + TagOffset[t0], t1 2102 1984 loadi Exception::m_value + PayloadOffset[t0], t0 2103 loadi 8[PC], t21985 get(size, OpCatch, thrownValue, t2) 2104 1986 storei t0, PayloadOffset[cfr, t2, 8] 2105 1987 storei t1, TagOffset[cfr, t2, 8] … … 2109 1991 callSlowPath(_llint_slow_path_profile_catch) 2110 1992 2111 dispatch (constexpr op_catch_length)2112 2113 _llint_op_end: 2114 traceExecution()1993 dispatchOp(size, op_catch) 1994 end) 1995 1996 llintOp(op_end, OpEnd, macro (size, get, dispatch) 2115 1997 checkSwitchToJITForEpilogue() 2116 loadi 4[PC], t02117 assertNotConstant( t0)1998 get(value, t0) 1999 assertNotConstant(size, t0) 2118 2000 loadi TagOffset[cfr, t0, 8], t1 2119 2001 loadi PayloadOffset[cfr, t0, 8], t0 2120 2002 doReturn() 2121 2122 2123 _llint_throw_from_slow_path_trampoline: 2003 end) 2004 2005 2006 op(llint_throw_from_slow_path_trampoline, macro() 2124 2007 callSlowPath(_llint_slow_path_handle_exception) 2125 2008 … … 2132 2015 copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(t1, t2) 2133 2016 jmp VM::targetMachinePCForThrow[t1] 2134 2135 2136 _llint_throw_during_call_trampoline: 2017 end) 2018 2019 2020 op(llint_throw_during_call_trampoline, macro() 2137 2021 preserveReturnAddressAfterCall(t2) 2138 2022 jmp _llint_throw_from_slow_path_trampoline 2023 end) 2139 2024 2140 2025 … … 2263 2148 2264 2149 2265 macro getConstantScope(dst)2266 loadpFromInstruction(6, t0)2267 loadisFromInstruction(dst, t1)2268 storei CellTag, TagOffset[cfr, t1, 8]2269 storei t0, PayloadOffset[cfr, t1, 8]2270 end2271 2272 2150 macro varInjectionCheck(slowPath) 2273 2151 loadp CodeBlock[cfr], t0 … … 2277 2155 end 2278 2156 2279 macro resolveScope() 2280 loadp CodeBlock[cfr], t0 2281 loadisFromInstruction(5, t2) 2282 2283 loadisFromInstruction(2, t0) 2284 loadp PayloadOffset[cfr, t0, 8], t0 2285 btiz t2, .resolveScopeLoopEnd 2286 2287 .resolveScopeLoop: 2288 loadp JSScope::m_next[t0], t0 2289 subi 1, t2 2290 btinz t2, .resolveScopeLoop 2291 2292 .resolveScopeLoopEnd: 2293 loadisFromInstruction(1, t1) 2294 storei CellTag, TagOffset[cfr, t1, 8] 2295 storei t0, PayloadOffset[cfr, t1, 8] 2296 end 2297 2298 2299 _llint_op_resolve_scope: 2300 traceExecution() 2301 loadisFromInstruction(4, t0) 2157 2158 llintOpWithMetadata(op_resolve_scope, OpResolveScope, macro (size, get, dispatch, metadata, return) 2159 2160 macro getConstantScope() 2161 loadp OpResolveScope::Metadata::constantScope[t5], t0 2162 return(CellTag, t0) 2163 end 2164 2165 macro resolveScope() 2166 loadi OpResolveScope::Metadata::localScopeDepth[t5], t2 2167 get(scope, t0) 2168 loadp PayloadOffset[cfr, t0, 8], t0 2169 btiz t2, .resolveScopeLoopEnd 2170 2171 .resolveScopeLoop: 2172 loadp JSScope::m_next[t0], t0 2173 subi 1, t2 2174 btinz t2, .resolveScopeLoop 2175 2176 .resolveScopeLoopEnd: 2177 return(CellTag, t0) 2178 end 2179 2180 metadata(t5, t0) 2181 loadp OpResolveScope::Metadata::resolveType[t5], t0 2302 2182 2303 2183 #rGlobalProperty: 2304 2184 bineq t0, GlobalProperty, .rGlobalVar 2305 getConstantScope(1) 2306 dispatch(7) 2185 getConstantScope() 2307 2186 2308 2187 .rGlobalVar: 2309 2188 bineq t0, GlobalVar, .rGlobalLexicalVar 2310 getConstantScope(1) 2311 dispatch(7) 2189 getConstantScope() 2312 2190 2313 2191 .rGlobalLexicalVar: 2314 2192 bineq t0, GlobalLexicalVar, .rClosureVar 2315 getConstantScope(1) 2316 dispatch(7) 2193 getConstantScope() 2317 2194 2318 2195 .rClosureVar: 2319 2196 bineq t0, ClosureVar, .rModuleVar 2320 2197 resolveScope() 2321 dispatch(7)2322 2198 2323 2199 .rModuleVar: 2324 2200 bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks 2325 getConstantScope(1) 2326 dispatch(7) 2201 getConstantScope() 2327 2202 2328 2203 .rGlobalPropertyWithVarInjectionChecks: 2329 2204 bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks 2330 2205 varInjectionCheck(.rDynamic) 2331 getConstantScope(1) 2332 dispatch(7) 2206 getConstantScope() 2333 2207 2334 2208 .rGlobalVarWithVarInjectionChecks: 2335 2209 bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks 2336 2210 varInjectionCheck(.rDynamic) 2337 getConstantScope(1) 2338 dispatch(7) 2211 getConstantScope() 2339 2212 2340 2213 .rGlobalLexicalVarWithVarInjectionChecks: 2341 2214 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks 2342 2215 varInjectionCheck(.rDynamic) 2343 getConstantScope(1) 2344 dispatch(7) 2216 getConstantScope() 2345 2217 2346 2218 .rClosureVarWithVarInjectionChecks: … … 2348 2220 varInjectionCheck(.rDynamic) 2349 2221 resolveScope() 2350 dispatch(7)2351 2222 2352 2223 .rDynamic: 2353 2224 callSlowPath(_slow_path_resolve_scope) 2354 dispatch(7) 2355 2356 2357 macro loadWithStructureCheck(operand, slowPath) 2358 loadisFromInstruction(operand, t0) 2225 dispatch() 2226 end) 2227 2228 2229 macro loadWithStructureCheck(op, get, operand, slowPath) 2230 get(scope, t0) 2359 2231 loadp PayloadOffset[cfr, t0, 8], t0 2360 loadp FromInstruction(5, t1)2232 loadp %op%::Metadata::structure[t5], t1 2361 2233 bpneq JSCell::m_structureID[t0], t1, slowPath 2362 2234 end 2363 2235 2364 macro getProperty() 2365 loadisFromInstruction(6, t3) 2366 loadPropertyAtVariableOffset(t3, t0, t1, t2) 2367 valueProfile(t1, t2, 28, t0) 2368 loadisFromInstruction(1, t0) 2369 storei t1, TagOffset[cfr, t0, 8] 2370 storei t2, PayloadOffset[cfr, t0, 8] 2371 end 2372 2373 macro getGlobalVar(tdzCheckIfNecessary) 2374 loadpFromInstruction(6, t0) 2375 loadp TagOffset[t0], t1 2376 loadp PayloadOffset[t0], t2 2377 tdzCheckIfNecessary(t1) 2378 valueProfile(t1, t2, 28, t0) 2379 loadisFromInstruction(1, t0) 2380 storei t1, TagOffset[cfr, t0, 8] 2381 storei t2, PayloadOffset[cfr, t0, 8] 2382 end 2383 2384 macro getClosureVar() 2385 loadisFromInstruction(6, t3) 2386 loadp JSLexicalEnvironment_variables + TagOffset[t0, t3, 8], t1 2387 loadp JSLexicalEnvironment_variables + PayloadOffset[t0, t3, 8], t2 2388 valueProfile(t1, t2, 28, t0) 2389 loadisFromInstruction(1, t0) 2390 storei t1, TagOffset[cfr, t0, 8] 2391 storei t2, PayloadOffset[cfr, t0, 8] 2392 end 2393 2394 _llint_op_get_from_scope: 2395 traceExecution() 2396 loadisFromInstruction(4, t0) 2236 2237 llintOpWithMetadata(op_get_from_scope, OpGetFromScope, macro (size, get, dispatch, metadata, return) 2238 macro getProperty() 2239 loadis OpGetFromScope::Metadata::operand[t5], t3 2240 loadPropertyAtVariableOffset(t3, t0, t1, t2) 2241 valueProfile(OpGetFromScope, t5, t1, t2) 2242 return(t1, t2) 2243 end 2244 2245 macro getGlobalVar(tdzCheckIfNecessary) 2246 loadp OpGetFromScope::Metadata::operand[t5], t0 2247 loadp TagOffset[t0], t1 2248 loadp PayloadOffset[t0], t2 2249 tdzCheckIfNecessary(t1) 2250 valueProfile(OpGetFromScope, t5, t1, t2) 2251 return(t1, t2) 2252 end 2253 2254 macro getClosureVar() 2255 loadis OpGetFromScope::Metadata::operand[t5], t3 2256 loadp JSLexicalEnvironment_variables + TagOffset[t0, t3, 8], t1 2257 loadp JSLexicalEnvironment_variables + PayloadOffset[t0, t3, 8], t2 2258 valueProfile(OpGetFromScope, t5, t1, t2) 2259 return(t1, t2) 2260 end 2261 2262 metadata(t5, t0) 2263 loadi OpGetFromScope::Metadata::getPutInfo[t5], t0 2397 2264 andi ResolveTypeMask, t0 2398 2265 2399 2266 #gGlobalProperty: 2400 2267 bineq t0, GlobalProperty, .gGlobalVar 2401 loadWithStructureCheck( 2, .gDynamic)2268 loadWithStructureCheck(OpGetFromScope, get, scope, .gDynamic) 2402 2269 getProperty() 2403 dispatch(8)2404 2270 2405 2271 .gGlobalVar: 2406 2272 bineq t0, GlobalVar, .gGlobalLexicalVar 2407 2273 getGlobalVar(macro(t) end) 2408 dispatch(8)2409 2274 2410 2275 .gGlobalLexicalVar: … … 2414 2279 bieq tag, EmptyValueTag, .gDynamic 2415 2280 end) 2416 dispatch(8)2417 2281 2418 2282 .gClosureVar: 2419 2283 bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks 2420 loadVariable( 2, t2, t1, t0)2284 loadVariable(get, scope, t2, t1, t0) 2421 2285 getClosureVar() 2422 dispatch(8)2423 2286 2424 2287 .gGlobalPropertyWithVarInjectionChecks: 2425 2288 bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks 2426 loadWithStructureCheck( 2, .gDynamic)2289 loadWithStructureCheck(OpGetFromScope, get, scope, .gDynamic) 2427 2290 getProperty() 2428 dispatch(8)2429 2291 2430 2292 .gGlobalVarWithVarInjectionChecks: … … 2432 2294 varInjectionCheck(.gDynamic) 2433 2295 getGlobalVar(macro(t) end) 2434 dispatch(8)2435 2296 2436 2297 .gGlobalLexicalVarWithVarInjectionChecks: … … 2441 2302 bieq tag, EmptyValueTag, .gDynamic 2442 2303 end) 2443 dispatch(8)2444 2304 2445 2305 .gClosureVarWithVarInjectionChecks: 2446 2306 bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic 2447 2307 varInjectionCheck(.gDynamic) 2448 loadVariable( 2, t2, t1, t0)2308 loadVariable(get, scope, t2, t1, t0) 2449 2309 getClosureVar() 2450 dispatch(8)2451 2310 2452 2311 .gDynamic: 2453 2312 callSlowPath(_llint_slow_path_get_from_scope) 2454 dispatch(8) 2455 2456 2457 macro putProperty() 2458 loadisFromInstruction(3, t1) 2459 loadConstantOrVariable(t1, t2, t3) 2460 loadisFromInstruction(6, t1) 2461 storePropertyAtVariableOffset(t1, t0, t2, t3) 2462 end 2463 2464 macro putGlobalVariable() 2465 loadisFromInstruction(3, t0) 2466 loadConstantOrVariable(t0, t1, t2) 2467 loadpFromInstruction(5, t3) 2468 notifyWrite(t3, .pDynamic) 2469 loadpFromInstruction(6, t0) 2470 storei t1, TagOffset[t0] 2471 storei t2, PayloadOffset[t0] 2472 end 2473 2474 macro putClosureVar() 2475 loadisFromInstruction(3, t1) 2476 loadConstantOrVariable(t1, t2, t3) 2477 loadisFromInstruction(6, t1) 2478 storei t2, JSLexicalEnvironment_variables + TagOffset[t0, t1, 8] 2479 storei t3, JSLexicalEnvironment_variables + PayloadOffset[t0, t1, 8] 2480 end 2481 2482 macro putLocalClosureVar() 2483 loadisFromInstruction(3, t1) 2484 loadConstantOrVariable(t1, t2, t3) 2485 loadpFromInstruction(5, t5) 2486 btpz t5, .noVariableWatchpointSet 2487 notifyWrite(t5, .pDynamic) 2488 .noVariableWatchpointSet: 2489 loadisFromInstruction(6, t1) 2490 storei t2, JSLexicalEnvironment_variables + TagOffset[t0, t1, 8] 2491 storei t3, JSLexicalEnvironment_variables + PayloadOffset[t0, t1, 8] 2492 end 2493 2494 2495 _llint_op_put_to_scope: 2496 traceExecution() 2497 loadisFromInstruction(4, t0) 2313 dispatch() 2314 end) 2315 2316 2317 llintOpWithMetadata(op_put_to_scope, OpPutToScope, macro (size, get, dispatch, metadata, return) 2318 macro putProperty() 2319 get(value, t1) 2320 loadConstantOrVariable(size, t1, t2, t3) 2321 loadis OpPutToScope::Metadata::operand[t5], t1 2322 storePropertyAtVariableOffset(t1, t0, t2, t3) 2323 end 2324 2325 macro putGlobalVariable() 2326 get(value, t0) 2327 loadConstantOrVariable(size, t0, t1, t2) 2328 loadp OpPutToScope::Metadata::watchpointSet[t5], t3 2329 notifyWrite(t3, .pDynamic) 2330 loadp OpPutToScope::Metadata::operand[t5], t0 2331 storei t1, TagOffset[t0] 2332 storei t2, PayloadOffset[t0] 2333 end 2334 2335 macro putClosureVar() 2336 get(value, t1) 2337 loadConstantOrVariable(size, t1, t2, t3) 2338 loadis OpPutToScope::Metadata::operand[t5], t1 2339 storei t2, JSLexicalEnvironment_variables + TagOffset[t0, t1, 8] 2340 storei t3, JSLexicalEnvironment_variables + PayloadOffset[t0, t1, 8] 2341 end 2342 2343 macro putLocalClosureVar() 2344 get(value, t1) 2345 loadConstantOrVariable(size, t1, t2, t3) 2346 loadp OpPutToScope::Metadata::watchpointSet[t5], t1 2347 btpz t1, .noVariableWatchpointSet 2348 notifyWrite(t1, .pDynamic) 2349 .noVariableWatchpointSet: 2350 loadis OpPutToScope::Metadata::operand[t5], t1 2351 storei t2, JSLexicalEnvironment_variables + TagOffset[t0, t1, 8] 2352 storei t3, JSLexicalEnvironment_variables + PayloadOffset[t0, t1, 8] 2353 end 2354 2355 2356 metadata(t5, t0) 2357 loadi OpPutToScope::Metadata::getPutInfo[t5], t0 2498 2358 andi ResolveTypeMask, t0 2499 2359 2500 2360 #pLocalClosureVar: 2501 2361 bineq t0, LocalClosureVar, .pGlobalProperty 2502 writeBarrierOnOperands(1, 3) 2503 loadVariable(1, t2, t1, t0) 2362 loadVariable(get, scope, t2, t1, t0) 2504 2363 putLocalClosureVar() 2505 dispatch(7) 2364 writeBarrierOnOperands(size, get, scope, value) 2365 dispatch() 2506 2366 2507 2367 .pGlobalProperty: 2508 2368 bineq t0, GlobalProperty, .pGlobalVar 2509 writeBarrierOnOperands(1, 3) 2510 loadWithStructureCheck(1, .pDynamic) 2369 loadWithStructureCheck(OpPutToScope, get, scope, .pDynamic) 2511 2370 putProperty() 2512 dispatch(7) 2371 writeBarrierOnOperands(size, get, scope, value) 2372 dispatch() 2513 2373 2514 2374 .pGlobalVar: 2515 2375 bineq t0, GlobalVar, .pGlobalLexicalVar 2516 writeBarrierOnGlobalObject(3)2517 2376 putGlobalVariable() 2518 dispatch(7) 2377 writeBarrierOnGlobalObject(size, get, value) 2378 dispatch() 2519 2379 2520 2380 .pGlobalLexicalVar: 2521 2381 bineq t0, GlobalLexicalVar, .pClosureVar 2522 writeBarrierOnGlobalLexicalEnvironment(3)2523 2382 putGlobalVariable() 2524 dispatch(7) 2383 writeBarrierOnGlobalLexicalEnvironment(size, get, value) 2384 dispatch() 2525 2385 2526 2386 .pClosureVar: 2527 2387 bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks 2528 writeBarrierOnOperands(1, 3) 2529 loadVariable(1, t2, t1, t0) 2388 loadVariable(get, scope, t2, t1, t0) 2530 2389 putClosureVar() 2531 dispatch(7) 2390 writeBarrierOnOperands(size, get, scope, value) 2391 dispatch() 2532 2392 2533 2393 .pGlobalPropertyWithVarInjectionChecks: 2534 2394 bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks 2535 writeBarrierOnOperands(1, 3) 2536 loadWithStructureCheck(1, .pDynamic) 2395 loadWithStructureCheck(OpPutToScope, get, scope, .pDynamic) 2537 2396 putProperty() 2538 dispatch(7) 2397 writeBarrierOnOperands(size, get, scope, value) 2398 dispatch() 2539 2399 2540 2400 .pGlobalVarWithVarInjectionChecks: 2541 2401 bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks 2542 writeBarrierOnGlobalObject(3)2543 2402 varInjectionCheck(.pDynamic) 2544 2403 putGlobalVariable() 2545 dispatch(7) 2404 writeBarrierOnGlobalObject(size, get, value) 2405 dispatch() 2546 2406 2547 2407 .pGlobalLexicalVarWithVarInjectionChecks: 2548 2408 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks 2549 writeBarrierOnGlobalLexicalEnvironment(3)2550 2409 varInjectionCheck(.pDynamic) 2551 2410 putGlobalVariable() 2552 dispatch(7) 2411 writeBarrierOnGlobalLexicalEnvironment(size, get, value) 2412 dispatch() 2553 2413 2554 2414 .pClosureVarWithVarInjectionChecks: 2555 2415 bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar 2556 writeBarrierOnOperands(1, 3)2557 2416 varInjectionCheck(.pDynamic) 2558 loadVariable( 1, t2, t1, t0)2417 loadVariable(get, scope, t2, t1, t0) 2559 2418 putClosureVar() 2560 dispatch(7) 2419 writeBarrierOnOperands(size, get, scope, value) 2420 dispatch() 2561 2421 2562 2422 .pModuleVar: 2563 2423 bineq t0, ModuleVar, .pDynamic 2564 2424 callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error) 2565 dispatch( 7)2425 dispatch() 2566 2426 2567 2427 .pDynamic: 2568 2428 callSlowPath(_llint_slow_path_put_to_scope) 2569 dispatch( 7)2570 2571 2572 _llint_op_get_from_arguments: 2573 traceExecution()2574 loadisFromInstruction(2, t0)2429 dispatch() 2430 end) 2431 2432 2433 llintOpWithProfile(op_get_from_arguments, OpGetFromArguments, macro (size, get, dispatch, return) 2434 get(arguments, t0) 2575 2435 loadi PayloadOffset[cfr, t0, 8], t0 2576 loadi 12[PC], t12436 get(index, t1) 2577 2437 loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2 2578 2438 loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3 2579 loadisFromInstruction(1, t1) 2580 valueProfile(t2, t3, 16, t0) 2581 storei t2, TagOffset[cfr, t1, 8] 2582 storei t3, PayloadOffset[cfr, t1, 8] 2583 dispatch(5) 2584 2585 2586 _llint_op_put_to_arguments: 2587 traceExecution() 2588 writeBarrierOnOperands(1, 3) 2589 loadisFromInstruction(1, t0) 2439 return(t2, t3) 2440 end) 2441 2442 2443 llintOp(op_put_to_arguments, OpPutToArguments, macro (size, get, dispatch) 2444 writeBarrierOnOperands(size, get, arguments, value) 2445 get(arguments, t0) 2590 2446 loadi PayloadOffset[cfr, t0, 8], t0 2591 loadisFromInstruction(3, t1)2592 loadConstantOrVariable( t1, t2, t3)2593 loadi 8[PC], t12447 get(value, t1) 2448 loadConstantOrVariable(size, t1, t2, t3) 2449 get(index, t1) 2594 2450 storei t2, DirectArguments_storage + TagOffset[t0, t1, 8] 2595 2451 storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8] 2596 dispatch( 4)2597 2598 2599 _llint_op_get_parent_scope: 2600 traceExecution()2601 loadisFromInstruction(2, t0)2452 dispatch() 2453 end) 2454 2455 2456 llintOpWithReturn(op_get_parent_scope, OpGetParentScope, macro (size, get, dispatch, return) 2457 get(scope, t0) 2602 2458 loadp PayloadOffset[cfr, t0, 8], t0 2603 2459 loadp JSScope::m_next[t0], t0 2604 loadisFromInstruction(1, t1) 2605 storei CellTag, TagOffset[cfr, t1, 8] 2606 storei t0, PayloadOffset[cfr, t1, 8] 2607 dispatch(3) 2608 2609 2610 _llint_op_profile_type: 2611 traceExecution() 2460 return(CellTag, t0) 2461 end) 2462 2463 2464 llintOpWithMetadata(op_profile_type, OpProfileType, macro (size, get, dispatch, metadata, return) 2612 2465 loadp CodeBlock[cfr], t1 2613 2466 loadp CodeBlock::m_poisonedVM[t1], t1 … … 2617 2470 2618 2471 # t0 is holding the payload, t5 is holding the tag. 2619 loadisFromInstruction(1, t2)2620 loadConstantOrVariable( t2, t5, t0)2472 get(target, t2) 2473 loadConstantOrVariable(size, t2, t5, t0) 2621 2474 2622 2475 bieq t5, EmptyValueTag, .opProfileTypeDone 2623 2476 2477 metadata(t3, t2) 2624 2478 # t2 is holding the pointer to the current log entry. 2625 2479 loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2 … … 2630 2484 2631 2485 # Store the TypeLocation onto the log entry. 2632 loadp FromInstruction(2, t3)2486 loadp OpProfileType::Metadata::typeLocation[t3], t3 2633 2487 storep t3, TypeProfilerLog::LogEntry::location[t2] 2634 2488 … … 2650 2504 2651 2505 .opProfileTypeDone: 2652 dispatch(6) 2653 2654 2655 _llint_op_profile_control_flow: 2656 traceExecution() 2657 loadpFromInstruction(1, t0) 2506 dispatch() 2507 end) 2508 2509 2510 llintOpWithMetadata(op_profile_control_flow, OpProfileControlFlow, macro (size, get, dispatch, metadata, return) 2511 metadata(t5, t0) 2512 loadp OpProfileControlFlow::Metadata::basicBlockLocation[t5], t0 2658 2513 loadi BasicBlockLocation::m_executionCount[t0], t1 2659 addi 1, t1 2660 bieq t1, 0, .done # We overflowed. 2514 baddio 1, t1, .done 2661 2515 storei t1, BasicBlockLocation::m_executionCount[t0] 2662 2516 .done: 2663 dispatch( 2)2664 2665 2666 _llint_op_get_rest_length: 2667 traceExecution()2517 dispatch() 2518 end) 2519 2520 2521 llintOpWithReturn(op_get_rest_length, OpGetRestLength, macro (size, get, dispatch, return) 2668 2522 loadi PayloadOffset + ArgumentCount[cfr], t0 2669 2523 subi 1, t0 2670 loadisFromInstruction(2, t1)2524 get(numParametersToSkip, t1) 2671 2525 bilteq t0, t1, .storeZero 2672 2526 subi t1, t0 … … 2675 2529 move 0, t0 2676 2530 .finish: 2677 loadisFromInstruction(1, t1) 2678 storei t0, PayloadOffset[cfr, t1, 8] 2679 storei Int32Tag, TagOffset[cfr, t1, 8] 2680 dispatch(3) 2681 2682 2683 _llint_op_log_shadow_chicken_prologue: 2684 traceExecution() 2531 return(Int32Tag, t0) 2532 end) 2533 2534 2535 llintOp(op_log_shadow_chicken_prologue, OpLogShadowChickenPrologue, macro (size, get, dispatch) 2685 2536 acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow) 2686 2537 storep cfr, ShadowChicken::Packet::frame[t0] … … 2689 2540 loadp Callee + PayloadOffset[cfr], t1 2690 2541 storep t1, ShadowChicken::Packet::callee[t0] 2691 loadisFromInstruction(1, t1)2542 get(scope, t1) 2692 2543 loadi PayloadOffset[cfr, t1, 8], t1 2693 2544 storep t1, ShadowChicken::Packet::scope[t0] 2694 dispatch( 2)2545 dispatch() 2695 2546 .opLogShadowChickenPrologueSlow: 2696 2547 callSlowPath(_llint_slow_path_log_shadow_chicken_prologue) 2697 dispatch( 2)2698 2699 2700 _llint_op_log_shadow_chicken_tail: 2701 traceExecution()2548 dispatch() 2549 end) 2550 2551 2552 llintOp(op_log_shadow_chicken_tail, OpLogShadowChickenTail, macro (size, get, dispatch) 2702 2553 acquireShadowChickenPacket(.opLogShadowChickenTailSlow) 2703 2554 storep cfr, ShadowChicken::Packet::frame[t0] 2704 2555 storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0] 2705 loadVariable( 1, t3, t2, t1)2556 loadVariable(get, thisValue, t3, t2, t1) 2706 2557 storei t2, TagOffset + ShadowChicken::Packet::thisValue[t0] 2707 2558 storei t1, PayloadOffset + ShadowChicken::Packet::thisValue[t0] 2708 loadisFromInstruction(2, t1)2559 get(scope, t1) 2709 2560 loadi PayloadOffset[cfr, t1, 8], t1 2710 2561 storep t1, ShadowChicken::Packet::scope[t0] … … 2712 2563 storep t1, ShadowChicken::Packet::codeBlock[t0] 2713 2564 storei PC, ShadowChicken::Packet::callSiteIndex[t0] 2714 dispatch( 3)2565 dispatch() 2715 2566 .opLogShadowChickenTailSlow: 2716 2567 callSlowPath(_llint_slow_path_log_shadow_chicken_tail) 2717 dispatch(3) 2568 dispatch() 2569 end) -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
r237486 r237547 1 1 2 # Copyright (C) 2011-2018 Apple Inc. All rights reserved. 2 3 # … … 24 25 25 26 # Utilities. 26 macro jumpToInstruction() 27 jmp [PB, PC, PtrSize], BytecodePtrTag 28 end 29 30 macro dispatch(advance) 31 addp advance, PC 32 jumpToInstruction() 33 end 34 35 macro dispatchInt(advance) 36 addi advance, PC 37 jumpToInstruction() 38 end 39 40 macro dispatchIntIndirect(offset) 41 dispatchInt(offset * PtrSize[PB, PC, PtrSize]) 42 end 43 44 macro dispatchAfterCall() 27 28 macro nextInstruction() 29 loadb [PB, PC, 1], t0 30 leap _g_opcodeMap, t1 31 loadp [t1, t0, PtrSize], t2 32 jmp t2, BytecodePtrTag 33 end 34 35 macro nextInstructionWide() 36 loadi 1[PB, PC, 1], t0 37 leap _g_opcodeMapWide, t1 38 loadp [t1, t0, PtrSize], t2 39 jmp t2, BytecodePtrTag 40 end 41 42 macro getuOperandNarrow(op, field, dst) 43 loadb constexpr %op%_%field%_index[PB, PC, 1], dst 44 end 45 46 macro getOperandNarrow(op, field, dst) 47 loadbsp constexpr %op%_%field%_index[PB, PC, 1], dst 48 end 49 50 macro getuOperandWide(op, field, dst) 51 loadi constexpr %op%_%field%_index * 4 + 1[PB, PC, 1], dst 52 end 53 54 macro getOperandWide(op, field, dst) 55 loadis constexpr %op%_%field%_index * 4 + 1[PB, PC, 1], dst 56 end 57 58 macro makeReturn(get, dispatch, fn) 59 fn(macro (value) 60 move value, t2 61 get(dst, t1) 62 storeq t2, [cfr, t1, 8] 63 dispatch() 64 end) 65 end 66 67 macro makeReturnProfiled(op, get, metadata, dispatch, fn) 68 fn(macro (value) 69 move value, t3 70 metadata(t1, t2) 71 valueProfile(op, t1, t3) 72 get(dst, t1) 73 storeq t3, [cfr, t1, 8] 74 dispatch() 75 end) 76 end 77 78 macro valueProfile(op, metadata, value) 79 storeq value, %op%::Metadata::profile.m_buckets[metadata] 80 end 81 82 macro dispatchAfterCall(size, op, dispatch) 45 83 loadi ArgumentCount + TagOffset[cfr], PC 46 84 loadp CodeBlock[cfr], PB 85 # FIXME: cleanup double load 86 # https://bugs.webkit.org/show_bug.cgi?id=190932 47 87 loadp CodeBlock::m_instructions[PB], PB 88 loadp [PB], PB 48 89 unpoison(_g_CodeBlockPoison, PB, t1) 49 loadisFromInstruction(1, t1)90 get(size, op, dst, t1) 50 91 storeq r0, [cfr, t1, 8] 51 valueProfile(r0, (CallOpCodeSize - 1), t3) 52 dispatch(CallOpCodeSize) 92 metadata(size, op, t2, t1) 93 valueProfile(op, t2, r0) 94 dispatch() 53 95 end 54 96 … … 207 249 checkStackPointerAlignment(extraTempReg, 0xbad0dc02) 208 250 209 makeCall(entry, t3 )251 makeCall(entry, t3, t4) 210 252 211 253 # We may have just made a call into a JS function, so we can't rely on sp … … 249 291 250 292 251 macro makeJavaScriptCall(entry, temp )293 macro makeJavaScriptCall(entry, temp, unused) 252 294 addp 16, sp 253 295 if C_LOOP … … 259 301 end 260 302 261 262 macro makeHostFunctionCall(entry, temp) 303 macro makeHostFunctionCall(entry, temp, unused) 263 304 move entry, temp 264 305 storep cfr, [sp] … … 277 318 end 278 319 279 _handleUncaughtException: 320 op(handleUncaughtException, macro () 280 321 loadp Callee[cfr], t3 281 322 andp MarkedBlockMask, t3 … … 298 339 functionEpilogue() 299 340 ret 341 end) 300 342 301 343 302 344 macro prepareStateForCCall() 303 leap [PB, PC, PtrSize], PC345 addp PB, PC 304 346 end 305 347 … … 307 349 move r0, PC 308 350 subp PB, PC 309 rshiftp constexpr (getLSBSet(sizeof(void*))), PC310 351 end 311 352 … … 390 431 end 391 432 392 macro loadVariable( operand, value)393 loadisFromInstruction(operand, value)433 macro loadVariable(get, field, value) 434 get(field, value) 394 435 loadq [cfr, value, 8], value 395 436 end 396 437 397 438 # Index and value must be different registers. Index may be clobbered. 398 macro loadConstantOrVariable(index, value) 399 bpgteq index, FirstConstantRegisterIndex, .constant 400 loadq [cfr, index, 8], value 401 jmp .done 402 .constant: 403 loadp CodeBlock[cfr], value 404 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value 405 subp FirstConstantRegisterIndex, index 406 loadq [value, index, 8], value 407 .done: 408 end 409 410 macro loadConstantOrVariableInt32(index, value, slow) 411 loadConstantOrVariable(index, value) 439 macro loadConstantOrVariable(size, index, value) 440 size(FirstConstantRegisterIndexNarrow, FirstConstantRegisterIndexWide, macro (FirstConstantRegisterIndex) 441 bpgteq index, FirstConstantRegisterIndex, .constant 442 loadq [cfr, index, 8], value 443 jmp .done 444 .constant: 445 loadp CodeBlock[cfr], value 446 loadp CodeBlock::m_constantRegisters + VectorBufferOffset[value], value 447 subp FirstConstantRegisterIndex, index 448 loadq [value, index, 8], value 449 .done: 450 end) 451 end 452 453 macro loadConstantOrVariableInt32(size, index, value, slow) 454 loadConstantOrVariable(size, index, value) 412 455 bqb value, tagTypeNumber, slow 413 456 end 414 457 415 macro loadConstantOrVariableCell( index, value, slow)416 loadConstantOrVariable( index, value)458 macro loadConstantOrVariableCell(size, index, value, slow) 459 loadConstantOrVariable(size, index, value) 417 460 btqnz value, tagMask, slow 418 461 end 419 462 420 macro writeBarrierOnOperand (cellOperand)421 loadisFromInstruction(cellOperand, t1)422 loadConstantOrVariableCell( t1, t2, .writeBarrierDone)463 macro writeBarrierOnOperandWithReload(size, get, cellOperand, reloadAfterSlowPath) 464 get(cellOperand, t1) 465 loadConstantOrVariableCell(size, t1, t2, .writeBarrierDone) 423 466 skipIfIsRememberedOrInEden( 424 467 t2, … … 429 472 cCall2Void(_llint_write_barrier_slow) 430 473 pop PC, PB 474 reloadAfterSlowPath() 431 475 end) 432 476 .writeBarrierDone: 433 477 end 434 478 435 macro writeBarrierOnOperands(cellOperand, valueOperand) 436 loadisFromInstruction(valueOperand, t1) 437 loadConstantOrVariableCell(t1, t0, .writeBarrierDone) 479 macro writeBarrierOnOperand(size, get, cellOperand) 480 writeBarrierOnOperandWithReload(size, get, cellOperand, macro () end) 481 end 482 483 macro writeBarrierOnOperands(size, get, cellOperand, valueOperand) 484 get(valueOperand, t1) 485 loadConstantOrVariableCell(size, t1, t0, .writeBarrierDone) 438 486 btpz t0, .writeBarrierDone 439 487 440 writeBarrierOnOperand( cellOperand)488 writeBarrierOnOperand(size, get, cellOperand) 441 489 .writeBarrierDone: 442 490 end 443 491 444 macro writeBarrierOnGlobal( valueOperand, loadHelper)445 loadisFromInstruction(valueOperand, t1)446 loadConstantOrVariableCell( t1, t0, .writeBarrierDone)492 macro writeBarrierOnGlobal(size, get, valueOperand, loadHelper) 493 get(valueOperand, t1) 494 loadConstantOrVariableCell(size, t1, t0, .writeBarrierDone) 447 495 btpz t0, .writeBarrierDone 448 496 … … 456 504 cCall2Void(_llint_write_barrier_slow) 457 505 pop PC, PB 458 end 459 ) 506 end) 460 507 .writeBarrierDone: 461 508 end 462 509 463 macro writeBarrierOnGlobalObject( valueOperand)464 writeBarrierOnGlobal( valueOperand,510 macro writeBarrierOnGlobalObject(size, get, valueOperand) 511 writeBarrierOnGlobal(size, get, valueOperand, 465 512 macro(registerToStoreGlobal) 466 513 loadp CodeBlock[cfr], registerToStoreGlobal … … 469 516 end 470 517 471 macro writeBarrierOnGlobalLexicalEnvironment( valueOperand)472 writeBarrierOnGlobal( valueOperand,518 macro writeBarrierOnGlobalLexicalEnvironment(size, get, valueOperand) 519 writeBarrierOnGlobal(size, get, valueOperand, 473 520 macro(registerToStoreGlobal) 474 521 loadp CodeBlock[cfr], registerToStoreGlobal … … 476 523 loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal 477 524 end) 478 end479 480 macro valueProfile(value, operand, scratch)481 loadpFromInstruction(operand, scratch)482 storeq value, ValueProfile::m_buckets[scratch]483 525 end 484 526 … … 580 622 # Reload CodeBlock and reset PC, since the slow_path clobbered them. 581 623 loadp CodeBlock[cfr], t1 624 # FIXME: cleanup double load 625 # https://bugs.webkit.org/show_bug.cgi?id=190932 582 626 loadp CodeBlock::m_instructions[t1], PB 627 loadp [PB], PB 583 628 unpoison(_g_CodeBlockPoison, PB, t2) 584 629 move 0, PC … … 594 639 .noException: 595 640 end 596 597 641 598 642 # Instruction implementations … … 615 659 .opEnterDone: 616 660 callSlowPath(_slow_path_enter) 617 dispatch(constexpr op_enter_length) 618 619 620 _llint_op_get_argument: 621 traceExecution() 622 loadisFromInstruction(1, t1) 623 loadisFromInstruction(2, t2) 661 dispatchOp(narrow, op_enter) 662 663 664 llintOpWithProfile(op_get_argument, OpGetArgument, macro (size, get, dispatch, return) 665 get(index, t2) 624 666 loadi PayloadOffset + ArgumentCount[cfr], t0 625 667 bilteq t0, t2, .opGetArgumentOutOfBounds 626 668 loadq ThisArgumentOffset[cfr, t2, 8], t0 627 storeq t0, [cfr, t1, 8] 628 valueProfile(t0, 3, t2) 629 dispatch(constexpr op_get_argument_length) 669 return(t0) 630 670 631 671 .opGetArgumentOutOfBounds: 632 storeq ValueUndefined, [cfr, t1, 8] 633 valueProfile(ValueUndefined, 3, t2) 634 dispatch(constexpr op_get_argument_length) 635 636 637 _llint_op_argument_count: 638 traceExecution() 639 loadisFromInstruction(1, t1) 672 return(ValueUndefined) 673 end) 674 675 676 llintOpWithReturn(op_argument_count, OpArgumentCount, macro (size, get, dispatch, return) 640 677 loadi PayloadOffset + ArgumentCount[cfr], t0 641 678 subi 1, t0 642 679 orq TagTypeNumber, t0 643 storeq t0, [cfr, t1, 8] 644 dispatch(constexpr op_argument_count_length) 645 646 647 _llint_op_get_scope: 648 traceExecution() 680 return(t0) 681 end) 682 683 684 llintOpWithReturn(op_get_scope, OpGetScope, macro (size, get, dispatch, return) 649 685 loadp Callee[cfr], t0 650 686 loadp JSCallee::m_scope[t0], t0 651 loadisFromInstruction(1, t1) 652 storeq t0, [cfr, t1, 8] 653 dispatch(constexpr op_get_scope_length) 654 655 656 _llint_op_to_this: 657 traceExecution() 658 loadisFromInstruction(1, t0) 687 return(t0) 688 end) 689 690 691 llintOpWithMetadata(op_to_this, OpToThis, macro (size, get, dispatch, metadata, return) 692 get(srcDst, t0) 659 693 loadq [cfr, t0, 8], t0 660 694 btqnz t0, tagMask, .opToThisSlow 661 695 bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow 662 696 loadStructureWithScratch(t0, t1, t2, t3) 663 loadpFromInstruction(2, t2) 697 metadata(t2, t3) 698 loadp OpToThis::Metadata::cachedStructure[t2], t2 664 699 bpneq t1, t2, .opToThisSlow 665 dispatch( constexpr op_to_this_length)700 dispatch() 666 701 667 702 .opToThisSlow: 668 703 callSlowPath(_slow_path_to_this) 669 dispatch( constexpr op_to_this_length)670 671 672 _llint_op_check_tdz: 673 traceExecution()674 loadisFromInstruction(1, t0)675 loadConstantOrVariable( t0, t1)704 dispatch() 705 end) 706 707 708 llintOp(op_check_tdz, OpCheckTdz, macro (size, get, dispatch) 709 get(target, t0) 710 loadConstantOrVariable(size, t0, t1) 676 711 bqneq t1, ValueEmpty, .opNotTDZ 677 712 callSlowPath(_slow_path_throw_tdz_error) 678 713 679 714 .opNotTDZ: 680 dispatch(constexpr op_check_tdz_length) 681 682 683 _llint_op_mov: 684 traceExecution() 685 loadisFromInstruction(2, t1) 686 loadisFromInstruction(1, t0) 687 loadConstantOrVariable(t1, t2) 688 storeq t2, [cfr, t0, 8] 689 dispatch(constexpr op_mov_length) 690 691 692 _llint_op_not: 693 traceExecution() 694 loadisFromInstruction(2, t0) 695 loadisFromInstruction(1, t1) 696 loadConstantOrVariable(t0, t2) 715 dispatch() 716 end) 717 718 719 llintOpWithReturn(op_mov, OpMov, macro (size, get, dispatch, return) 720 get(src, t1) 721 loadConstantOrVariable(size, t1, t2) 722 return(t2) 723 end) 724 725 726 llintOpWithReturn(op_not, OpNot, macro (size, get, dispatch, return) 727 get(operand, t0) 728 loadConstantOrVariable(size, t0, t2) 697 729 xorq ValueFalse, t2 698 730 btqnz t2, ~1, .opNotSlow 699 731 xorq ValueTrue, t2 700 storeq t2, [cfr, t1, 8] 701 dispatch(constexpr op_not_length) 732 return(t2) 702 733 703 734 .opNotSlow: 704 735 callSlowPath(_slow_path_not) 705 dispatch(constexpr op_not_length) 706 707 708 macro equalityComparison(integerComparison, slowPath) 709 loadisFromInstruction(3, t0) 710 loadisFromInstruction(2, t2) 711 loadisFromInstruction(1, t3) 712 loadConstantOrVariableInt32(t0, t1, .slow) 713 loadConstantOrVariableInt32(t2, t0, .slow) 714 integerComparison(t0, t1, t0) 715 orq ValueFalse, t0 716 storeq t0, [cfr, t3, 8] 717 dispatch(4) 718 719 .slow: 720 callSlowPath(slowPath) 721 dispatch(4) 722 end 723 724 725 macro equalityJump(integerComparison, slowPath) 726 loadisFromInstruction(1, t2) 727 loadisFromInstruction(2, t3) 728 loadConstantOrVariableInt32(t2, t0, .slow) 729 loadConstantOrVariableInt32(t3, t1, .slow) 730 integerComparison(t0, t1, .jumpTarget) 731 dispatch(constexpr op_jeq_length) 732 733 .jumpTarget: 734 dispatchIntIndirect(3) 735 736 .slow: 737 callSlowPath(slowPath) 738 dispatch(0) 739 end 740 741 742 macro equalNullComparison() 743 loadisFromInstruction(2, t0) 744 loadq [cfr, t0, 8], t0 745 btqnz t0, tagMask, .immediate 746 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined 747 move 0, t0 748 jmp .done 749 .masqueradesAsUndefined: 750 loadStructureWithScratch(t0, t2, t1, t3) 751 loadp CodeBlock[cfr], t0 752 loadp CodeBlock::m_globalObject[t0], t0 753 cpeq Structure::m_globalObject[t2], t0, t0 754 jmp .done 755 .immediate: 756 andq ~TagBitUndefined, t0 757 cqeq t0, ValueNull, t0 758 .done: 759 end 760 761 _llint_op_eq_null: 762 traceExecution() 763 equalNullComparison() 764 loadisFromInstruction(1, t1) 765 orq ValueFalse, t0 766 storeq t0, [cfr, t1, 8] 767 dispatch(constexpr op_eq_null_length) 768 769 770 _llint_op_neq_null: 771 traceExecution() 772 equalNullComparison() 773 loadisFromInstruction(1, t1) 774 xorq ValueTrue, t0 775 storeq t0, [cfr, t1, 8] 776 dispatch(constexpr op_neq_null_length) 777 778 779 macro strictEq(equalityOperation, slowPath) 780 loadisFromInstruction(3, t0) 781 loadisFromInstruction(2, t2) 782 loadConstantOrVariable(t0, t1) 783 loadConstantOrVariable(t2, t0) 784 move t0, t2 785 orq t1, t2 786 btqz t2, tagMask, .slow 787 bqaeq t0, tagTypeNumber, .leftOK 788 btqnz t0, tagTypeNumber, .slow 789 .leftOK: 790 bqaeq t1, tagTypeNumber, .rightOK 791 btqnz t1, tagTypeNumber, .slow 792 .rightOK: 793 equalityOperation(t0, t1, t0) 794 loadisFromInstruction(1, t1) 795 orq ValueFalse, t0 796 storeq t0, [cfr, t1, 8] 797 dispatch(4) 798 799 .slow: 800 callSlowPath(slowPath) 801 dispatch(4) 802 end 803 804 805 macro strictEqualityJump(equalityOperation, slowPath) 806 loadisFromInstruction(1, t2) 807 loadisFromInstruction(2, t3) 808 loadConstantOrVariable(t2, t0) 809 loadConstantOrVariable(t3, t1) 810 move t0, t2 811 orq t1, t2 812 btqz t2, tagMask, .slow 813 bqaeq t0, tagTypeNumber, .leftOK 814 btqnz t0, tagTypeNumber, .slow 815 .leftOK: 816 bqaeq t1, tagTypeNumber, .rightOK 817 btqnz t1, tagTypeNumber, .slow 818 .rightOK: 819 equalityOperation(t0, t1, .jumpTarget) 820 dispatch(constexpr op_jstricteq_length) 821 822 .jumpTarget: 823 dispatchIntIndirect(3) 824 825 .slow: 826 callSlowPath(slowPath) 827 dispatch(0) 828 end 829 830 831 _llint_op_stricteq: 832 traceExecution() 833 strictEq( 834 macro (left, right, result) cqeq left, right, result end, 835 _slow_path_stricteq) 836 837 838 _llint_op_nstricteq: 839 traceExecution() 840 strictEq( 841 macro (left, right, result) cqneq left, right, result end, 842 _slow_path_nstricteq) 843 844 845 _llint_op_jstricteq: 846 traceExecution() 847 strictEqualityJump( 848 macro (left, right, target) bqeq left, right, target end, 849 _llint_slow_path_jstricteq) 850 851 852 _llint_op_jnstricteq: 853 traceExecution() 854 strictEqualityJump( 855 macro (left, right, target) bqneq left, right, target end, 856 _llint_slow_path_jnstricteq) 857 858 859 macro preOp(arithmeticOperation, slowPath) 860 traceExecution() 861 loadisFromInstruction(1, t0) 862 loadq [cfr, t0, 8], t1 863 bqb t1, tagTypeNumber, .slow 864 arithmeticOperation(t1, .slow) 865 orq tagTypeNumber, t1 866 storeq t1, [cfr, t0, 8] 867 dispatch(2) 868 869 .slow: 870 callSlowPath(slowPath) 871 dispatch(2) 872 end 873 874 _llint_op_inc: 875 preOp( 876 macro (value, slow) baddio 1, value, slow end, 877 _slow_path_inc) 878 879 880 _llint_op_dec: 881 preOp( 882 macro (value, slow) bsubio 1, value, slow end, 883 _slow_path_dec) 884 885 886 _llint_op_to_number: 887 traceExecution() 888 loadisFromInstruction(2, t0) 889 loadisFromInstruction(1, t1) 890 loadConstantOrVariable(t0, t2) 736 dispatch() 737 end) 738 739 740 macro equalityComparisonOp(name, op, integerComparison) 741 llintOpWithReturn(op_%name%, op, macro (size, get, dispatch, return) 742 get(rhs, t0) 743 get(lhs, t2) 744 loadConstantOrVariableInt32(size, t0, t1, .slow) 745 loadConstantOrVariableInt32(size, t2, t0, .slow) 746 integerComparison(t0, t1, t0) 747 orq ValueFalse, t0 748 return(t0) 749 750 .slow: 751 callSlowPath(_slow_path_%name%) 752 dispatch() 753 end) 754 end 755 756 757 macro equalNullComparisonOp(name, op, fn) 758 llintOpWithReturn(name, op, macro (size, get, dispatch, return) 759 get(operand, t0) 760 loadq [cfr, t0, 8], t0 761 btqnz t0, tagMask, .immediate 762 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined 763 move 0, t0 764 jmp .done 765 .masqueradesAsUndefined: 766 loadStructureWithScratch(t0, t2, t1, t3) 767 loadp CodeBlock[cfr], t0 768 loadp CodeBlock::m_globalObject[t0], t0 769 cpeq Structure::m_globalObject[t2], t0, t0 770 jmp .done 771 .immediate: 772 andq ~TagBitUndefined, t0 773 cqeq t0, ValueNull, t0 774 .done: 775 fn(t0) 776 return(t0) 777 end) 778 end 779 780 equalNullComparisonOp(op_eq_null, OpEqNull, 781 macro (value) orq ValueFalse, value end) 782 783 784 equalNullComparisonOp(op_neq_null, OpNeqNull, 785 macro (value) xorq ValueTrue, value end) 786 787 788 macro strictEqOp(name, op, equalityOperation) 789 llintOpWithReturn(op_%name%, op, macro (size, get, dispatch, return) 790 get(rhs, t0) 791 get(lhs, t2) 792 loadConstantOrVariable(size, t0, t1) 793 loadConstantOrVariable(size, t2, t0) 794 move t0, t2 795 orq t1, t2 796 btqz t2, tagMask, .slow 797 bqaeq t0, tagTypeNumber, .leftOK 798 btqnz t0, tagTypeNumber, .slow 799 .leftOK: 800 bqaeq t1, tagTypeNumber, .rightOK 801 btqnz t1, tagTypeNumber, .slow 802 .rightOK: 803 equalityOperation(t0, t1, t0) 804 orq ValueFalse, t0 805 return(t0) 806 807 .slow: 808 callSlowPath(_slow_path_%name%) 809 dispatch() 810 end) 811 end 812 813 814 strictEqOp(stricteq, OpStricteq, 815 macro (left, right, result) cqeq left, right, result end) 816 817 818 strictEqOp(nstricteq, OpNstricteq, 819 macro (left, right, result) cqneq left, right, result end) 820 821 822 macro strictEqualityJumpOp(name, op, equalityOperation) 823 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 824 get(lhs, t2) 825 get(rhs, t3) 826 loadConstantOrVariable(size, t2, t0) 827 loadConstantOrVariable(size, t3, t1) 828 move t0, t2 829 orq t1, t2 830 btqz t2, tagMask, .slow 831 bqaeq t0, tagTypeNumber, .leftOK 832 btqnz t0, tagTypeNumber, .slow 833 .leftOK: 834 bqaeq t1, tagTypeNumber, .rightOK 835 btqnz t1, tagTypeNumber, .slow 836 .rightOK: 837 equalityOperation(t0, t1, .jumpTarget) 838 dispatch() 839 840 .jumpTarget: 841 jump(target) 842 843 .slow: 844 callSlowPath(_llint_slow_path_%name%) 845 nextInstruction() 846 end) 847 end 848 849 850 strictEqualityJumpOp(jstricteq, OpJstricteq, 851 macro (left, right, target) bqeq left, right, target end) 852 853 854 strictEqualityJumpOp(jnstricteq, OpJnstricteq, 855 macro (left, right, target) bqneq left, right, target end) 856 857 858 macro preOp(name, op, arithmeticOperation) 859 llintOp(op_%name%, op, macro (size, get, dispatch) 860 get(srcDst, t0) 861 loadq [cfr, t0, 8], t1 862 bqb t1, tagTypeNumber, .slow 863 arithmeticOperation(t1, .slow) 864 orq tagTypeNumber, t1 865 storeq t1, [cfr, t0, 8] 866 dispatch() 867 .slow: 868 callSlowPath(_slow_path_%name%) 869 dispatch() 870 end) 871 end 872 873 llintOpWithProfile(op_to_number, OpToNumber, macro (size, get, dispatch, return) 874 get(operand, t0) 875 loadConstantOrVariable(size, t0, t2) 891 876 bqaeq t2, tagTypeNumber, .opToNumberIsImmediate 892 877 btqz t2, tagTypeNumber, .opToNumberSlow 893 878 .opToNumberIsImmediate: 894 storeq t2, [cfr, t1, 8] 895 valueProfile(t2, 3, t0) 896 dispatch(constexpr op_to_number_length) 879 return(t2) 897 880 898 881 .opToNumberSlow: 899 882 callSlowPath(_slow_path_to_number) 900 dispatch(constexpr op_to_number_length) 901 902 903 _llint_op_to_string: 904 traceExecution() 905 loadisFromInstruction(2, t1) 906 loadisFromInstruction(1, t2) 907 loadConstantOrVariable(t1, t0) 883 dispatch() 884 end) 885 886 887 llintOpWithReturn(op_to_string, OpToString, macro (size, get, dispatch, return) 888 get(operand, t1) 889 loadConstantOrVariable(size, t1, t0) 908 890 btqnz t0, tagMask, .opToStringSlow 909 891 bbneq JSCell::m_type[t0], StringType, .opToStringSlow 910 892 .opToStringIsString: 911 storeq t0, [cfr, t2, 8] 912 dispatch(constexpr op_to_string_length) 893 return(t0) 913 894 914 895 .opToStringSlow: 915 896 callSlowPath(_slow_path_to_string) 916 dispatch(constexpr op_to_string_length) 917 918 919 _llint_op_to_object: 920 traceExecution() 921 loadisFromInstruction(2, t0) 922 loadisFromInstruction(1, t1) 923 loadConstantOrVariable(t0, t2) 897 dispatch() 898 end) 899 900 901 llintOpWithProfile(op_to_object, OpToObject, macro (size, get, dispatch, return) 902 get(operand, t0) 903 loadConstantOrVariable(size, t0, t2) 924 904 btqnz t2, tagMask, .opToObjectSlow 925 905 bbb JSCell::m_type[t2], ObjectType, .opToObjectSlow 926 storeq t2, [cfr, t1, 8] 927 valueProfile(t2, 4, t0) 928 dispatch(constexpr op_to_object_length) 906 return(t2) 929 907 930 908 .opToObjectSlow: 931 909 callSlowPath(_slow_path_to_object) 932 dispatch( constexpr op_to_object_length)933 934 935 _llint_op_negate: 936 traceExecution()937 loadisFromInstruction(2, t0)938 load isFromInstruction(1, t1)939 loadConstantOrVariable(t0, t3)940 loadis FromInstruction(3, t2)910 dispatch() 911 end) 912 913 914 llintOpWithMetadata(op_negate, OpNegate, macro (size, get, dispatch, metadata, return) 915 get(operand, t0) 916 loadConstantOrVariable(size, t0, t3) 917 metadata(t1, t2) 918 loadis OpNegate::Metadata::arithProfile[t1], t2 941 919 bqb t3, tagTypeNumber, .opNegateNotInt 942 920 btiz t3, 0x7fffffff, .opNegateSlow 943 921 negi t3 922 orq tagTypeNumber, t3 944 923 ori ArithProfileInt, t2 945 orq tagTypeNumber, t3 946 storeisToInstruction(t2, 3) 947 storeq t3, [cfr, t1, 8] 948 dispatch(constexpr op_negate_length) 924 storei t2, OpNegate::Metadata::arithProfile[t1] 925 return(t3) 949 926 .opNegateNotInt: 950 927 btqz t3, tagTypeNumber, .opNegateSlow 951 928 xorq 0x8000000000000000, t3 952 929 ori ArithProfileNumber, t2 953 storeq t3, [cfr, t1, 8] 954 storeisToInstruction(t2, 3) 955 dispatch(constexpr op_negate_length) 930 storei t2, OpNegate::Metadata::arithProfile[t1] 931 return(t3) 956 932 957 933 .opNegateSlow: 958 934 callSlowPath(_slow_path_negate) 959 dispatch(constexpr op_negate_length) 960 961 962 macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slowPath) 963 loadisFromInstruction(3, t0) 964 loadisFromInstruction(2, t2) 965 loadConstantOrVariable(t0, t1) 966 loadConstantOrVariable(t2, t0) 967 bqb t0, tagTypeNumber, .op1NotInt 968 bqb t1, tagTypeNumber, .op2NotInt 969 loadisFromInstruction(1, t2) 970 integerOperationAndStore(t1, t0, .slow, t2) 971 loadisFromInstruction(4, t1) 972 ori ArithProfileIntInt, t1 973 storeisToInstruction(t1, 4) 974 dispatch(5) 975 976 .op1NotInt: 977 # First operand is definitely not an int, the second operand could be anything. 978 btqz t0, tagTypeNumber, .slow 979 bqaeq t1, tagTypeNumber, .op1NotIntOp2Int 980 btqz t1, tagTypeNumber, .slow 981 addq tagTypeNumber, t1 982 fq2d t1, ft1 983 loadisFromInstruction(4, t2) 984 ori ArithProfileNumberNumber, t2 985 storeisToInstruction(t2, 4) 986 jmp .op1NotIntReady 987 .op1NotIntOp2Int: 988 loadisFromInstruction(4, t2) 989 ori ArithProfileNumberInt, t2 990 storeisToInstruction(t2, 4) 991 ci2d t1, ft1 992 .op1NotIntReady: 993 loadisFromInstruction(1, t2) 994 addq tagTypeNumber, t0 995 fq2d t0, ft0 996 doubleOperation(ft1, ft0) 997 fd2q ft0, t0 998 subq tagTypeNumber, t0 999 storeq t0, [cfr, t2, 8] 1000 dispatch(5) 1001 1002 .op2NotInt: 1003 # First operand is definitely an int, the second is definitely not. 1004 loadisFromInstruction(1, t2) 1005 btqz t1, tagTypeNumber, .slow 1006 loadisFromInstruction(4, t3) 1007 ori ArithProfileIntNumber, t3 1008 storeisToInstruction(t3, 4) 1009 ci2d t0, ft0 1010 addq tagTypeNumber, t1 1011 fq2d t1, ft1 1012 doubleOperation(ft1, ft0) 1013 fd2q ft0, t0 1014 subq tagTypeNumber, t0 1015 storeq t0, [cfr, t2, 8] 1016 dispatch(5) 1017 1018 .slow: 1019 callSlowPath(slowPath) 1020 dispatch(5) 1021 end 1022 1023 macro binaryOp(integerOperation, doubleOperation, slowPath) 1024 binaryOpCustomStore( 935 dispatch() 936 end) 937 938 939 macro binaryOpCustomStore(name, op, integerOperationAndStore, doubleOperation) 940 llintOpWithMetadata(op_%name%, op, macro (size, get, dispatch, metadata, return) 941 metadata(t5, t0) 942 943 macro profile(type) 944 ori type, %op%::Metadata::arithProfile[t5] 945 end 946 947 get(rhs, t0) 948 get(lhs, t2) 949 loadConstantOrVariable(size, t0, t1) 950 loadConstantOrVariable(size, t2, t0) 951 bqb t0, tagTypeNumber, .op1NotInt 952 bqb t1, tagTypeNumber, .op2NotInt 953 get(dst, t2) 954 integerOperationAndStore(t1, t0, .slow, t2) 955 956 profile(ArithProfileIntInt) 957 dispatch() 958 959 .op1NotInt: 960 # First operand is definitely not an int, the second operand could be anything. 961 btqz t0, tagTypeNumber, .slow 962 bqaeq t1, tagTypeNumber, .op1NotIntOp2Int 963 btqz t1, tagTypeNumber, .slow 964 addq tagTypeNumber, t1 965 fq2d t1, ft1 966 profile(ArithProfileNumberNumber) 967 jmp .op1NotIntReady 968 .op1NotIntOp2Int: 969 profile(ArithProfileNumberInt) 970 ci2d t1, ft1 971 .op1NotIntReady: 972 get(dst, t2) 973 addq tagTypeNumber, t0 974 fq2d t0, ft0 975 doubleOperation(ft1, ft0) 976 fd2q ft0, t0 977 subq tagTypeNumber, t0 978 storeq t0, [cfr, t2, 8] 979 dispatch() 980 981 .op2NotInt: 982 # First operand is definitely an int, the second is definitely not. 983 get(dst, t2) 984 btqz t1, tagTypeNumber, .slow 985 profile(ArithProfileIntNumber) 986 ci2d t0, ft0 987 addq tagTypeNumber, t1 988 fq2d t1, ft1 989 doubleOperation(ft1, ft0) 990 fd2q ft0, t0 991 subq tagTypeNumber, t0 992 storeq t0, [cfr, t2, 8] 993 dispatch() 994 995 .slow: 996 callSlowPath(_slow_path_%name%) 997 dispatch() 998 end) 999 end 1000 1001 if X86_64 or X86_64_WIN 1002 binaryOpCustomStore(div, OpDiv, 1003 macro (left, right, slow, index) 1004 # Assume t3 is scratchable. 1005 btiz left, slow 1006 bineq left, -1, .notNeg2TwoThe31DivByNeg1 1007 bieq right, -2147483648, .slow 1008 .notNeg2TwoThe31DivByNeg1: 1009 btinz right, .intOK 1010 bilt left, 0, slow 1011 .intOK: 1012 move left, t3 1013 move right, t0 1014 cdqi 1015 idivi t3 1016 btinz t1, slow 1017 orq tagTypeNumber, t0 1018 storeq t0, [cfr, index, 8] 1019 end, 1020 macro (left, right) divd left, right end) 1021 else 1022 slowPathOp(div) 1023 end 1024 1025 1026 binaryOpCustomStore(mul, OpMul, 1027 macro (left, right, slow, index) 1028 # Assume t3 is scratchable. 1029 move right, t3 1030 bmulio left, t3, slow 1031 btinz t3, .done 1032 bilt left, 0, slow 1033 bilt right, 0, slow 1034 .done: 1035 orq tagTypeNumber, t3 1036 storeq t3, [cfr, index, 8] 1037 end, 1038 macro (left, right) muld left, right end) 1039 1040 1041 macro binaryOp(name, op, integerOperation, doubleOperation) 1042 binaryOpCustomStore(name, op, 1025 1043 macro (left, right, slow, index) 1026 1044 integerOperation(left, right, slow) … … 1028 1046 storeq right, [cfr, index, 8] 1029 1047 end, 1030 doubleOperation, slowPath) 1031 end 1032 1033 _llint_op_add: 1034 traceExecution() 1035 binaryOp( 1036 macro (left, right, slow) baddio left, right, slow end, 1037 macro (left, right) addd left, right end, 1038 _slow_path_add) 1039 1040 1041 _llint_op_mul: 1042 traceExecution() 1043 binaryOpCustomStore( 1044 macro (left, right, slow, index) 1045 # Assume t3 is scratchable. 1046 move right, t3 1047 bmulio left, t3, slow 1048 btinz t3, .done 1049 bilt left, 0, slow 1050 bilt right, 0, slow 1051 .done: 1052 orq tagTypeNumber, t3 1053 storeq t3, [cfr, index, 8] 1054 end, 1055 macro (left, right) muld left, right end, 1056 _slow_path_mul) 1057 1058 1059 _llint_op_sub: 1060 traceExecution() 1061 binaryOp( 1062 macro (left, right, slow) bsubio left, right, slow end, 1063 macro (left, right) subd left, right end, 1064 _slow_path_sub) 1065 1066 1067 _llint_op_div: 1068 traceExecution() 1069 if X86_64 or X86_64_WIN 1070 binaryOpCustomStore( 1071 macro (left, right, slow, index) 1072 # Assume t3 is scratchable. 1073 btiz left, slow 1074 bineq left, -1, .notNeg2TwoThe31DivByNeg1 1075 bieq right, -2147483648, .slow 1076 .notNeg2TwoThe31DivByNeg1: 1077 btinz right, .intOK 1078 bilt left, 0, slow 1079 .intOK: 1080 move left, t3 1081 move right, t0 1082 cdqi 1083 idivi t3 1084 btinz t1, slow 1085 orq tagTypeNumber, t0 1086 storeq t0, [cfr, index, 8] 1087 end, 1088 macro (left, right) divd left, right end, 1089 _slow_path_div) 1090 else 1091 callSlowPath(_slow_path_div) 1092 dispatch(constexpr op_div_length) 1093 end 1094 1095 1096 macro bitOpProfiled(operation, slowPath, advance) 1097 loadisFromInstruction(3, t0) 1098 loadisFromInstruction(2, t2) 1099 loadisFromInstruction(1, t3) 1100 loadConstantOrVariable(t0, t1) 1101 loadConstantOrVariable(t2, t0) 1102 bqb t0, tagTypeNumber, .slow 1103 bqb t1, tagTypeNumber, .slow 1104 operation(t1, t0) 1105 orq tagTypeNumber, t0 1106 storeq t0, [cfr, t3, 8] 1107 valueProfile(t0, advance - 1, t2) 1108 dispatch(advance) 1109 1110 .slow: 1111 callSlowPath(slowPath) 1112 dispatch(advance) 1113 end 1114 1115 macro bitOp(operation, slowPath, advance) 1116 loadisFromInstruction(3, t0) 1117 loadisFromInstruction(2, t2) 1118 loadisFromInstruction(1, t3) 1119 loadConstantOrVariable(t0, t1) 1120 loadConstantOrVariable(t2, t0) 1121 bqb t0, tagTypeNumber, .slow 1122 bqb t1, tagTypeNumber, .slow 1123 operation(t1, t0) 1124 orq tagTypeNumber, t0 1125 storeq t0, [cfr, t3, 8] 1126 dispatch(advance) 1127 1128 .slow: 1129 callSlowPath(slowPath) 1130 dispatch(advance) 1131 end 1132 1133 _llint_op_lshift: 1134 traceExecution() 1135 bitOp( 1136 macro (left, right) lshifti left, right end, 1137 _slow_path_lshift, 1138 constexpr op_lshift_length) 1139 1140 1141 _llint_op_rshift: 1142 traceExecution() 1143 bitOp( 1144 macro (left, right) rshifti left, right end, 1145 _slow_path_rshift, 1146 constexpr op_rshift_length) 1147 1148 1149 _llint_op_urshift: 1150 traceExecution() 1151 bitOp( 1152 macro (left, right) urshifti left, right end, 1153 _slow_path_urshift, 1154 constexpr op_urshift_length) 1155 1156 1157 _llint_op_unsigned: 1158 traceExecution() 1159 loadisFromInstruction(1, t0) 1160 loadisFromInstruction(2, t1) 1161 loadConstantOrVariable(t1, t2) 1048 doubleOperation) 1049 end 1050 1051 binaryOp(add, OpAdd, 1052 macro (left, right, slow) baddio left, right, slow end, 1053 macro (left, right) addd left, right end) 1054 1055 1056 binaryOp(sub, OpSub, 1057 macro (left, right, slow) bsubio left, right, slow end, 1058 macro (left, right) subd left, right end) 1059 1060 1061 llintOpWithReturn(op_unsigned, OpUnsigned, macro (size, get, dispatch, return) 1062 get(operand, t1) 1063 loadConstantOrVariable(size, t1, t2) 1162 1064 bilt t2, 0, .opUnsignedSlow 1163 storeq t2, [cfr, t0, 8] 1164 dispatch(constexpr op_unsigned_length) 1065 return(t2) 1165 1066 .opUnsignedSlow: 1166 1067 callSlowPath(_slow_path_unsigned) 1167 dispatch(constexpr op_unsigned_length) 1168 1169 1170 _llint_op_bitand: 1171 traceExecution() 1172 bitOpProfiled( 1173 macro (left, right) andi left, right end, 1174 _slow_path_bitand, 1175 constexpr op_bitand_length) 1176 1177 1178 _llint_op_bitxor: 1179 traceExecution() 1180 bitOp( 1181 macro (left, right) xori left, right end, 1182 _slow_path_bitxor, 1183 constexpr op_bitxor_length) 1184 1185 1186 _llint_op_bitor: 1187 traceExecution() 1188 bitOpProfiled( 1189 macro (left, right) ori left, right end, 1190 _slow_path_bitor, 1191 constexpr op_bitor_length) 1192 1193 1194 _llint_op_overrides_has_instance: 1195 traceExecution() 1196 loadisFromStruct(OpOverridesHasInstance::m_dst, t3) 1197 1198 loadisFromStruct(OpOverridesHasInstance::m_hasInstanceValue, t1) 1199 loadConstantOrVariable(t1, t0) 1068 dispatch() 1069 end) 1070 1071 1072 macro commonBitOp(opKind, name, op, operation) 1073 opKind(op_%name%, op, macro (size, get, dispatch, return) 1074 get(rhs, t0) 1075 get(lhs, t2) 1076 loadConstantOrVariable(size, t0, t1) 1077 loadConstantOrVariable(size, t2, t0) 1078 bqb t0, tagTypeNumber, .slow 1079 bqb t1, tagTypeNumber, .slow 1080 operation(t1, t0) 1081 orq tagTypeNumber, t0 1082 return(t0) 1083 1084 .slow: 1085 callSlowPath(_slow_path_%name%) 1086 dispatch() 1087 end) 1088 end 1089 1090 macro bitOp(name, op, operation) 1091 commonBitOp(llintOpWithReturn, name, op, operation) 1092 end 1093 1094 macro bitOpProfiled(name, op, operation) 1095 commonBitOp(llintOpWithProfile, name, op, operation) 1096 end 1097 1098 bitOp(lshift, OpLshift, 1099 macro (left, right) lshifti left, right end) 1100 1101 1102 bitOp(rshift, OpRshift, 1103 macro (left, right) rshifti left, right end) 1104 1105 1106 bitOp(urshift, OpUrshift, 1107 macro (left, right) urshifti left, right end) 1108 1109 1110 bitOp(bitxor, OpBitxor, 1111 macro (left, right) xori left, right end) 1112 1113 1114 bitOpProfiled(bitand, OpBitand, 1115 macro (left, right) andi left, right end) 1116 1117 1118 bitOpProfiled(bitor, OpBitor, 1119 macro (left, right) ori left, right end) 1120 1121 1122 llintOp(op_overrides_has_instance, OpOverridesHasInstance, macro (size, get, dispatch) 1123 get(dst, t3) 1124 1125 get(hasInstanceValue, t1) 1126 loadConstantOrVariable(size, t1, t0) 1200 1127 loadp CodeBlock[cfr], t2 1201 1128 loadp CodeBlock::m_globalObject[t2], t2 … … 1203 1130 bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol 1204 1131 1205 loadisFromStruct(OpOverridesHasInstance::m_constructor, t1)1206 loadConstantOrVariable( t1, t0)1132 get(constructor, t1) 1133 loadConstantOrVariable(size, t1, t0) 1207 1134 tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1 1208 1135 orq ValueFalse, t1 1209 1136 storeq t1, [cfr, t3, 8] 1210 dispatch( constexpr op_overrides_has_instance_length)1137 dispatch() 1211 1138 1212 1139 .opOverridesHasInstanceNotDefaultSymbol: 1213 1140 storeq ValueTrue, [cfr, t3, 8] 1214 dispatch(constexpr op_overrides_has_instance_length) 1215 1216 1217 _llint_op_instanceof_custom: 1218 traceExecution() 1219 callSlowPath(_llint_slow_path_instanceof_custom) 1220 dispatch(constexpr op_instanceof_custom_length) 1221 1222 1223 _llint_op_is_empty: 1224 traceExecution() 1225 loadisFromInstruction(2, t1) 1226 loadisFromInstruction(1, t2) 1227 loadConstantOrVariable(t1, t0) 1141 dispatch() 1142 end) 1143 1144 1145 llintOpWithReturn(op_is_empty, OpIsEmpty, macro (size, get, dispatch, return) 1146 get(operand, t1) 1147 loadConstantOrVariable(size, t1, t0) 1228 1148 cqeq t0, ValueEmpty, t3 1229 1149 orq ValueFalse, t3 1230 storeq t3, [cfr, t2, 8] 1231 dispatch(constexpr op_is_empty_length) 1232 1233 1234 _llint_op_is_undefined: 1235 traceExecution() 1236 loadisFromInstruction(2, t1) 1237 loadisFromInstruction(1, t2) 1238 loadConstantOrVariable(t1, t0) 1150 return(t3) 1151 end) 1152 1153 1154 llintOpWithReturn(op_is_undefined, OpIsUndefined, macro (size, get, dispatch, return) 1155 get(operand, t1) 1156 loadConstantOrVariable(size, t1, t0) 1239 1157 btqz t0, tagMask, .opIsUndefinedCell 1240 1158 cqeq t0, ValueUndefined, t3 1241 1159 orq ValueFalse, t3 1242 storeq t3, [cfr, t2, 8] 1243 dispatch(constexpr op_is_undefined_length) 1160 return(t3) 1244 1161 .opIsUndefinedCell: 1245 1162 btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined 1246 1163 move ValueFalse, t1 1247 storeq t1, [cfr, t2, 8] 1248 dispatch(constexpr op_is_undefined_length) 1164 return(t1) 1249 1165 .masqueradesAsUndefined: 1250 1166 loadStructureWithScratch(t0, t3, t1, t5) … … 1253 1169 cpeq Structure::m_globalObject[t3], t1, t0 1254 1170 orq ValueFalse, t0 1255 storeq t0, [cfr, t2, 8] 1256 dispatch(constexpr op_is_undefined_length) 1257 1258 1259 _llint_op_is_boolean: 1260 traceExecution() 1261 loadisFromInstruction(2, t1) 1262 loadisFromInstruction(1, t2) 1263 loadConstantOrVariable(t1, t0) 1171 return(t0) 1172 end) 1173 1174 1175 llintOpWithReturn(op_is_boolean, OpIsBoolean, macro (size, get, dispatch, return) 1176 get(operand, t1) 1177 loadConstantOrVariable(size, t1, t0) 1264 1178 xorq ValueFalse, t0 1265 1179 tqz t0, ~1, t0 1266 1180 orq ValueFalse, t0 1267 storeq t0, [cfr, t2, 8] 1268 dispatch(constexpr op_is_boolean_length) 1269 1270 1271 _llint_op_is_number: 1272 traceExecution() 1273 loadisFromInstruction(2, t1) 1274 loadisFromInstruction(1, t2) 1275 loadConstantOrVariable(t1, t0) 1181 return(t0) 1182 end) 1183 1184 1185 llintOpWithReturn(op_is_number, OpIsNumber, macro (size, get, dispatch, return) 1186 get(operand, t1) 1187 loadConstantOrVariable(size, t1, t0) 1276 1188 tqnz t0, tagTypeNumber, t1 1277 1189 orq ValueFalse, t1 1278 storeq t1, [cfr, t2, 8] 1279 dispatch(constexpr op_is_number_length) 1280 1281 1282 _llint_op_is_cell_with_type: 1283 traceExecution() 1284 loadisFromInstruction(3, t0) 1285 loadisFromInstruction(2, t1) 1286 loadisFromInstruction(1, t2) 1287 loadConstantOrVariable(t1, t3) 1190 return(t1) 1191 end) 1192 1193 1194 llintOpWithReturn(op_is_cell_with_type, OpIsCellWithType, macro (size, get, dispatch, return) 1195 get(type, t0) 1196 get(operand, t1) 1197 loadConstantOrVariable(size, t1, t3) 1288 1198 btqnz t3, tagMask, .notCellCase 1289 1199 cbeq JSCell::m_type[t3], t0, t1 1290 1200 orq ValueFalse, t1 1291 storeq t1, [cfr, t2, 8] 1292 dispatch(constexpr op_is_cell_with_type_length) 1201 return(t1) 1293 1202 .notCellCase: 1294 storeq ValueFalse, [cfr, t2, 8] 1295 dispatch(constexpr op_is_cell_with_type_length) 1296 1297 1298 _llint_op_is_object: 1299 traceExecution() 1300 loadisFromInstruction(2, t1) 1301 loadisFromInstruction(1, t2) 1302 loadConstantOrVariable(t1, t0) 1203 return(ValueFalse) 1204 end) 1205 1206 1207 llintOpWithReturn(op_is_object, OpIsObject, macro (size, get, dispatch, return) 1208 get(operand, t1) 1209 loadConstantOrVariable(size, t1, t0) 1303 1210 btqnz t0, tagMask, .opIsObjectNotCell 1304 1211 cbaeq JSCell::m_type[t0], ObjectType, t1 1305 1212 orq ValueFalse, t1 1306 storeq t1, [cfr, t2, 8] 1307 dispatch(constexpr op_is_object_length) 1213 return(t1) 1308 1214 .opIsObjectNotCell: 1309 storeq ValueFalse, [cfr, t2, 8]1310 dispatch(constexpr op_is_object_length)1215 return(ValueFalse) 1216 end) 1311 1217 1312 1218 … … 1337 1243 1338 1244 1339 _llint_op_get_by_id_direct: 1340 traceExecution()1341 loadisFromInstruction(2, t0)1342 loadConstantOrVariableCell( t0, t3, .opGetByIdDirectSlow)1245 llintOpWithMetadata(op_get_by_id_direct, OpGetByIdDirect, macro (size, get, dispatch, metadata, return) 1246 metadata(t2, t0) 1247 get(base, t0) 1248 loadConstantOrVariableCell(size, t0, t3, .opGetByIdDirectSlow) 1343 1249 loadi JSCell::m_structureID[t3], t1 1344 loadisFromInstruction(4, t2) 1345 bineq t2, t1, .opGetByIdDirectSlow 1346 loadisFromInstruction(5, t1) 1347 loadisFromInstruction(1, t2) 1250 loadi OpGetByIdDirect::Metadata::structure[t2], t0 1251 bineq t0, t1, .opGetByIdDirectSlow 1252 loadi OpGetByIdDirect::Metadata::offset[t2], t1 1348 1253 loadPropertyAtVariableOffset(t1, t3, t0) 1349 storeq t0, [cfr, t2, 8] 1350 valueProfile(t0, 6, t1) 1351 dispatch(constexpr op_get_by_id_direct_length) 1254 valueProfile(OpGetByIdDirect, t2, t0) 1255 return(t0) 1352 1256 1353 1257 .opGetByIdDirectSlow: 1354 1258 callSlowPath(_llint_slow_path_get_by_id_direct) 1355 dispatch(constexpr op_get_by_id_direct_length) 1356 1357 1358 _llint_op_get_by_id: 1359 traceExecution() 1360 loadisFromInstruction(2, t0) 1361 loadConstantOrVariableCell(t0, t3, .opGetByIdSlow) 1259 dispatch() 1260 end) 1261 1262 1263 llintOpWithMetadata(op_get_by_id, OpGetById, macro (size, get, dispatch, metadata, return) 1264 metadata(t2, t1) 1265 loadb OpGetById::Metadata::mode[t2], t1 1266 get(base, t0) 1267 loadConstantOrVariableCell(size, t0, t3, .opGetByIdSlow) 1268 1269 .opGetByIdDefault: 1270 bbneq t1, constexpr GetByIdMode::Default, .opGetByIdProtoLoad 1362 1271 loadi JSCell::m_structureID[t3], t1 1363 loadisFromInstruction(4, t2) 1364 bineq t2, t1, .opGetByIdSlow 1365 loadisFromInstruction(5, t1) 1366 loadisFromInstruction(1, t2) 1272 loadi OpGetById::Metadata::modeMetadata.defaultMode.structure[t2], t0 1273 bineq t0, t1, .opGetByIdSlow 1274 loadis OpGetById::Metadata::modeMetadata.defaultMode.cachedOffset[t2], t1 1367 1275 loadPropertyAtVariableOffset(t1, t3, t0) 1368 storeq t0, [cfr, t2, 8] 1369 valueProfile(t0, 8, t1) 1370 dispatch(constexpr op_get_by_id_length) 1276 valueProfile(OpGetById, t2, t0) 1277 return(t0) 1278 1279 .opGetByIdProtoLoad: 1280 bbneq t1, constexpr GetByIdMode::ProtoLoad, .opGetByIdArrayLength 1281 loadi JSCell::m_structureID[t3], t1 1282 loadis OpGetById::Metadata::modeMetadata.protoLoadMode.structure[t2], t3 1283 bineq t3, t1, .opGetByIdSlow 1284 loadi OpGetById::Metadata::modeMetadata.protoLoadMode.cachedOffset[t2], t1 1285 loadp OpGetById::Metadata::modeMetadata.protoLoadMode.cachedSlot[t2], t3 1286 loadPropertyAtVariableOffset(t1, t3, t0) 1287 valueProfile(OpGetById, t2, t0) 1288 return(t0) 1289 1290 .opGetByIdArrayLength: 1291 bbneq t1, constexpr GetByIdMode::ArrayLength, .opGetByIdUnset 1292 move t3, t0 1293 arrayProfile(OpGetById::Metadata::modeMetadata.arrayLengthMode.arrayProfile, t0, t2, t5) 1294 btiz t0, IsArray, .opGetByIdSlow 1295 btiz t0, IndexingShapeMask, .opGetByIdSlow 1296 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t3], t0, t1) 1297 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 1298 bilt t0, 0, .opGetByIdSlow 1299 orq tagTypeNumber, t0 1300 valueProfile(OpGetById, t2, t0) 1301 return(t0) 1302 1303 .opGetByIdUnset: 1304 loadi JSCell::m_structureID[t3], t1 1305 loadi OpGetById::Metadata::modeMetadata.unsetMode.structure[t2], t0 1306 bineq t0, t1, .opGetByIdSlow 1307 valueProfile(OpGetById, t2, ValueUndefined) 1308 return(ValueUndefined) 1371 1309 1372 1310 .opGetByIdSlow: 1373 1311 callSlowPath(_llint_slow_path_get_by_id) 1374 dispatch(constexpr op_get_by_id_length) 1375 1376 1377 _llint_op_get_by_id_proto_load: 1378 traceExecution() 1379 loadisFromInstruction(2, t0) 1380 loadConstantOrVariableCell(t0, t3, .opGetByIdProtoSlow) 1381 loadi JSCell::m_structureID[t3], t1 1382 loadisFromInstruction(4, t2) 1383 bineq t2, t1, .opGetByIdProtoSlow 1384 loadisFromInstruction(5, t1) 1385 loadpFromInstruction(6, t3) 1386 loadisFromInstruction(1, t2) 1387 loadPropertyAtVariableOffset(t1, t3, t0) 1388 storeq t0, [cfr, t2, 8] 1389 valueProfile(t0, 8, t1) 1390 dispatch(constexpr op_get_by_id_proto_load_length) 1391 1392 .opGetByIdProtoSlow: 1393 callSlowPath(_llint_slow_path_get_by_id) 1394 dispatch(constexpr op_get_by_id_proto_load_length) 1395 1396 1397 _llint_op_get_by_id_unset: 1398 traceExecution() 1399 loadisFromInstruction(2, t0) 1400 loadConstantOrVariableCell(t0, t3, .opGetByIdUnsetSlow) 1401 loadi JSCell::m_structureID[t3], t1 1402 loadisFromInstruction(4, t2) 1403 bineq t2, t1, .opGetByIdUnsetSlow 1404 loadisFromInstruction(1, t2) 1405 storeq ValueUndefined, [cfr, t2, 8] 1406 valueProfile(ValueUndefined, 8, t1) 1407 dispatch(constexpr op_get_by_id_unset_length) 1408 1409 .opGetByIdUnsetSlow: 1410 callSlowPath(_llint_slow_path_get_by_id) 1411 dispatch(constexpr op_get_by_id_unset_length) 1412 1413 1414 _llint_op_get_array_length: 1415 traceExecution() 1416 loadisFromInstruction(2, t0) 1417 loadpFromInstruction(4, t1) 1418 loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow) 1419 move t3, t2 1420 arrayProfile(t2, t1, t0) 1421 btiz t2, IsArray, .opGetArrayLengthSlow 1422 btiz t2, IndexingShapeMask, .opGetArrayLengthSlow 1423 loadisFromInstruction(1, t1) 1424 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t3], t0, t2) 1425 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 1426 bilt t0, 0, .opGetArrayLengthSlow 1427 orq tagTypeNumber, t0 1428 valueProfile(t0, 8, t2) 1429 storeq t0, [cfr, t1, 8] 1430 dispatch(constexpr op_get_array_length_length) 1431 1432 .opGetArrayLengthSlow: 1433 callSlowPath(_llint_slow_path_get_by_id) 1434 dispatch(constexpr op_get_array_length_length) 1435 1436 1437 _llint_op_put_by_id: 1438 traceExecution() 1439 loadisFromInstruction(1, t3) 1440 loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) 1441 loadisFromInstruction(4, t2) 1312 dispatch() 1313 end) 1314 1315 1316 llintOpWithMetadata(op_put_by_id, OpPutById, macro (size, get, dispatch, metadata, return) 1317 get(base, t3) 1318 loadConstantOrVariableCell(size, t3, t0, .opPutByIdSlow) 1319 metadata(t5, t2) 1320 loadis OpPutById::Metadata::oldStructure[t5], t2 1442 1321 bineq t2, JSCell::m_structureID[t0], .opPutByIdSlow 1443 1322 1444 1323 # At this point, we have: 1324 # t5 -> metadata 1445 1325 # t2 -> current structure ID 1446 1326 # t0 -> object base 1447 1327 1448 loadisFromInstruction(3, t1)1449 loadConstantOrVariable( t1, t3)1450 1451 loadp FromInstruction(8, t1)1328 get(value, t1) 1329 loadConstantOrVariable(size, t1, t3) 1330 1331 loadp OpPutById::Metadata::flags[t5], t1 1452 1332 1453 1333 # At this point, we have: … … 1456 1336 # t2 -> current structure ID 1457 1337 # t3 -> value to put 1338 # t5 -> metadata 1458 1339 1459 1340 btpnz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther … … 1525 1406 1526 1407 .opPutByIdDoneCheckingTypes: 1527 loadi sFromInstruction(6, t1)1408 loadi OpPutById::Metadata::newStructure[t5], t1 1528 1409 1529 1410 btiz t1, .opPutByIdNotTransition … … 1532 1413 # If we have a chain, we need to check it. t0 is the base. We may clobber t1 to use it as 1533 1414 # scratch. 1534 loadp FromInstruction(7, t3)1415 loadp OpPutById::Metadata::structureChain[t5], t3 1535 1416 btpz t3, .opPutByIdTransitionDirect 1417 1418 structureIDToStructureWithScratch(t2, t1, t3) 1419 1420 # reload the StructureChain since we used t3 as a scratch above 1421 loadp OpPutById::Metadata::structureChain[t5], t3 1536 1422 1537 1423 loadp StructureChain::m_vector[t3], t3 1538 1424 assert(macro (ok) btpnz t3, ok end) 1539 1425 1540 structureIDToStructureWithScratch(t2, t1, t5)1541 1426 loadq Structure::m_prototype[t2], t2 1542 1427 bqeq t2, ValueNull, .opPutByIdTransitionChainDone … … 1555 1440 .opPutByIdTransitionChainDone: 1556 1441 # Reload the new structure, since we clobbered it above. 1557 loadi sFromInstruction(6, t1)1442 loadi OpPutById::Metadata::newStructure[t5], t1 1558 1443 1559 1444 .opPutByIdTransitionDirect: 1560 1445 storei t1, JSCell::m_structureID[t0] 1561 writeBarrierOnOperand(1) 1562 # Reload base into t0 1563 loadisFromInstruction(1, t1) 1564 loadConstantOrVariable(t1, t0) 1446 writeBarrierOnOperandWithReload(size, get, base, macro () 1447 # Reload metadata into t5 1448 metadata(t5, t1) 1449 # Reload base into t0 1450 get(base, t1) 1451 loadConstantOrVariable(size, t1, t0) 1452 end) 1565 1453 1566 1454 .opPutByIdNotTransition: 1567 1455 # The only thing live right now is t0, which holds the base. 1568 loadisFromInstruction(3, t1)1569 loadConstantOrVariable( t1, t2)1570 loadi sFromInstruction(5, t1)1456 get(value, t1) 1457 loadConstantOrVariable(size, t1, t2) 1458 loadi OpPutById::Metadata::offset[t5], t1 1571 1459 storePropertyAtVariableOffset(t1, t0, t2) 1572 writeBarrierOnOperands( 1, 3)1573 dispatch( constexpr op_put_by_id_length)1460 writeBarrierOnOperands(size, get, base, value) 1461 dispatch() 1574 1462 1575 1463 .opPutByIdSlow: 1576 1464 callSlowPath(_llint_slow_path_put_by_id) 1577 dispatch(constexpr op_put_by_id_length) 1578 1579 1580 macro finishGetByVal(result, scratch) 1581 loadisFromInstruction(1, scratch) 1582 storeq result, [cfr, scratch, 8] 1583 valueProfile(result, 5, scratch) 1584 dispatch(6) 1585 end 1586 1587 macro finishIntGetByVal(result, scratch) 1588 orq tagTypeNumber, result 1589 finishGetByVal(result, scratch) 1590 end 1591 1592 macro finishDoubleGetByVal(result, scratch1, scratch2) 1593 fd2q result, scratch1 1594 subq tagTypeNumber, scratch1 1595 finishGetByVal(scratch1, scratch2) 1596 end 1597 1598 _llint_op_get_by_val: 1599 traceExecution() 1600 loadisFromInstruction(2, t2) 1601 loadConstantOrVariableCell(t2, t0, .opGetByValSlow) 1602 loadpFromInstruction(4, t3) 1465 dispatch() 1466 end) 1467 1468 1469 llintOpWithMetadata(op_get_by_val, OpGetByVal, macro (size, get, dispatch, metadata, return) 1470 macro finishGetByVal(result, scratch) 1471 get(dst, scratch) 1472 storeq result, [cfr, scratch, 8] 1473 valueProfile(OpGetByVal, t5, result) 1474 dispatch() 1475 end 1476 1477 macro finishIntGetByVal(result, scratch) 1478 orq tagTypeNumber, result 1479 finishGetByVal(result, scratch) 1480 end 1481 1482 macro finishDoubleGetByVal(result, scratch1, scratch2) 1483 fd2q result, scratch1 1484 subq tagTypeNumber, scratch1 1485 finishGetByVal(scratch1, scratch2) 1486 end 1487 1488 metadata(t5, t2) 1489 1490 get(base, t2) 1491 loadConstantOrVariableCell(size, t2, t0, .opGetByValSlow) 1492 1603 1493 move t0, t2 1604 arrayProfile(t2, t3, t1) 1605 loadisFromInstruction(3, t3) 1606 loadConstantOrVariableInt32(t3, t1, .opGetByValSlow) 1494 arrayProfile(OpGetByVal::Metadata::arrayProfile, t2, t5, t1) 1495 1496 get(property, t3) 1497 loadConstantOrVariableInt32(size, t3, t1, .opGetByValSlow) 1607 1498 sxi2q t1, t1 1608 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t0], t3, t5) 1499 1500 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t0], t3, tagTypeNumber) 1501 move TagTypeNumber, tagTypeNumber 1502 1609 1503 andi IndexingShapeMask, t2 1610 1504 bieq t2, Int32Shape, .opGetByValIsContiguous … … 1613 1507 .opGetByValIsContiguous: 1614 1508 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValSlow 1615 loadisFromInstruction(1, t0)1509 get(dst, t0) 1616 1510 loadq [t3, t1, 8], t2 1617 1511 btqz t2, .opGetByValSlow … … 1621 1515 bineq t2, DoubleShape, .opGetByValNotDouble 1622 1516 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t3], .opGetByValSlow 1623 loadisFromInstruction(1 ,t0)1517 get(dst, t0) 1624 1518 loadd [t3, t1, 8], ft0 1625 1519 bdnequn ft0, ft0, .opGetByValSlow … … 1632 1526 bia t2, SlowPutArrayStorageShape - ArrayStorageShape, .opGetByValNotIndexedStorage 1633 1527 biaeq t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t3], .opGetByValSlow 1634 loadisFromInstruction(1, t0)1528 get(dst, t0) 1635 1529 loadq ArrayStorage::m_vector[t3, t1, 8], t2 1636 1530 btqz t2, .opGetByValSlow … … 1638 1532 .opGetByValDone: 1639 1533 storeq t2, [cfr, t0, 8] 1640 valueProfile( t2, 5, t0)1641 dispatch( constexpr op_get_by_val_length)1534 valueProfile(OpGetByVal, t5, t2) 1535 dispatch() 1642 1536 1643 1537 .opGetByValNotIndexedStorage: … … 1736 1630 .opGetByValSlow: 1737 1631 callSlowPath(_llint_slow_path_get_by_val) 1738 dispatch(constexpr op_get_by_val_length) 1739 1740 1741 macro contiguousPutByVal(storeCallback) 1742 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds 1743 .storeResult: 1744 loadisFromInstruction(3, t2) 1745 storeCallback(t2, t1, [t0, t3, 8]) 1746 dispatch(5) 1747 1748 .outOfBounds: 1749 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1750 loadpFromInstruction(4, t2) 1751 storeb 1, ArrayProfile::m_mayStoreToHole[t2] 1752 addi 1, t3, t2 1753 storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1754 jmp .storeResult 1755 end 1756 1757 macro putByVal(slowPath) 1758 traceExecution() 1759 loadisFromInstruction(1, t0) 1760 loadConstantOrVariableCell(t0, t1, .opPutByValSlow) 1761 loadpFromInstruction(4, t3) 1762 move t1, t2 1763 arrayProfile(t2, t3, t0) 1764 loadisFromInstruction(2, t0) 1765 loadConstantOrVariableInt32(t0, t3, .opPutByValSlow) 1766 sxi2q t3, t3 1767 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t1], t0, t5) 1768 btinz t2, CopyOnWrite, .opPutByValSlow 1769 andi IndexingShapeMask, t2 1770 bineq t2, Int32Shape, .opPutByValNotInt32 1771 contiguousPutByVal( 1772 macro (operand, scratch, address) 1773 loadConstantOrVariable(operand, scratch) 1774 bqb scratch, tagTypeNumber, .opPutByValSlow 1775 storeq scratch, address 1776 writeBarrierOnOperands(1, 3) 1777 end) 1778 1779 .opPutByValNotInt32: 1780 bineq t2, DoubleShape, .opPutByValNotDouble 1781 contiguousPutByVal( 1782 macro (operand, scratch, address) 1783 loadConstantOrVariable(operand, scratch) 1784 bqb scratch, tagTypeNumber, .notInt 1785 ci2d scratch, ft0 1786 jmp .ready 1787 .notInt: 1788 addq tagTypeNumber, scratch 1789 fq2d scratch, ft0 1790 bdnequn ft0, ft0, .opPutByValSlow 1791 .ready: 1792 stored ft0, address 1793 writeBarrierOnOperands(1, 3) 1794 end) 1795 1796 .opPutByValNotDouble: 1797 bineq t2, ContiguousShape, .opPutByValNotContiguous 1798 contiguousPutByVal( 1799 macro (operand, scratch, address) 1800 loadConstantOrVariable(operand, scratch) 1801 storeq scratch, address 1802 writeBarrierOnOperands(1, 3) 1803 end) 1804 1805 .opPutByValNotContiguous: 1806 bineq t2, ArrayStorageShape, .opPutByValSlow 1807 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1808 btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty 1809 .opPutByValArrayStorageStoreResult: 1810 loadisFromInstruction(3, t2) 1811 loadConstantOrVariable(t2, t1) 1812 storeq t1, ArrayStorage::m_vector[t0, t3, 8] 1813 writeBarrierOnOperands(1, 3) 1814 dispatch(5) 1815 1816 .opPutByValArrayStorageEmpty: 1817 loadpFromInstruction(4, t1) 1818 storeb 1, ArrayProfile::m_mayStoreToHole[t1] 1819 addi 1, ArrayStorage::m_numValuesInVector[t0] 1820 bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult 1821 addi 1, t3, t1 1822 storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1823 jmp .opPutByValArrayStorageStoreResult 1824 1825 .opPutByValOutOfBounds: 1826 loadpFromInstruction(4, t0) 1827 storeb 1, ArrayProfile::m_outOfBounds[t0] 1828 .opPutByValSlow: 1829 callSlowPath(slowPath) 1830 dispatch(5) 1831 end 1832 1833 _llint_op_put_by_val: 1834 putByVal(_llint_slow_path_put_by_val) 1835 1836 _llint_op_put_by_val_direct: 1837 putByVal(_llint_slow_path_put_by_val_direct) 1838 1839 1840 _llint_op_jmp: 1841 traceExecution() 1842 dispatchIntIndirect(1) 1843 1844 1845 macro jumpTrueOrFalse(conditionOp, slow) 1846 loadisFromInstruction(1, t1) 1847 loadConstantOrVariable(t1, t0) 1848 btqnz t0, ~0xf, .slow 1849 conditionOp(t0, .target) 1850 dispatch(3) 1851 1852 .target: 1853 dispatchIntIndirect(2) 1854 1855 .slow: 1856 callSlowPath(slow) 1857 dispatch(0) 1858 end 1859 1860 1861 macro equalNull(cellHandler, immediateHandler) 1862 loadisFromInstruction(1, t0) 1863 assertNotConstant(t0) 1864 loadq [cfr, t0, 8], t0 1865 btqnz t0, tagMask, .immediate 1866 loadStructureWithScratch(t0, t2, t1, t3) 1867 cellHandler(t2, JSCell::m_flags[t0], .target) 1868 dispatch(3) 1869 1870 .target: 1871 dispatchIntIndirect(2) 1872 1873 .immediate: 1874 andq ~TagBitUndefined, t0 1875 immediateHandler(t0, .target) 1876 dispatch(3) 1877 end 1878 1879 _llint_op_jeq_null: 1880 traceExecution() 1881 equalNull( 1882 macro (structure, value, target) 1883 btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined 1884 loadp CodeBlock[cfr], t0 1885 loadp CodeBlock::m_globalObject[t0], t0 1886 bpeq Structure::m_globalObject[structure], t0, target 1632 dispatch() 1633 end) 1634 1635 1636 macro putByValOp(name, op) 1637 llintOpWithMetadata(op_%name%, op, macro (size, get, dispatch, metadata, return) 1638 macro contiguousPutByVal(storeCallback) 1639 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds 1640 .storeResult: 1641 get(value, t2) 1642 storeCallback(t2, t1, [t0, t3, 8]) 1643 dispatch() 1644 1645 .outOfBounds: 1646 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1647 storeb 1, %op%::Metadata::arrayProfile.m_mayStoreToHole[t5] 1648 addi 1, t3, t2 1649 storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1650 jmp .storeResult 1651 end 1652 1653 get(base, t0) 1654 loadConstantOrVariableCell(size, t0, t1, .opPutByValSlow) 1655 move t1, t2 1656 metadata(t5, t0) 1657 arrayProfile(%op%::Metadata::arrayProfile, t2, t5, t0) 1658 get(property, t0) 1659 loadConstantOrVariableInt32(size, t0, t3, .opPutByValSlow) 1660 sxi2q t3, t3 1661 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t1], t0, tagTypeNumber) 1662 move TagTypeNumber, tagTypeNumber 1663 btinz t2, CopyOnWrite, .opPutByValSlow 1664 andi IndexingShapeMask, t2 1665 bineq t2, Int32Shape, .opPutByValNotInt32 1666 contiguousPutByVal( 1667 macro (operand, scratch, address) 1668 loadConstantOrVariable(size, operand, scratch) 1669 bqb scratch, tagTypeNumber, .opPutByValSlow 1670 storeq scratch, address 1671 writeBarrierOnOperands(size, get, base, value) 1672 end) 1673 1674 .opPutByValNotInt32: 1675 bineq t2, DoubleShape, .opPutByValNotDouble 1676 contiguousPutByVal( 1677 macro (operand, scratch, address) 1678 loadConstantOrVariable(size, operand, scratch) 1679 bqb scratch, tagTypeNumber, .notInt 1680 ci2d scratch, ft0 1681 jmp .ready 1682 .notInt: 1683 addp tagTypeNumber, scratch 1684 fq2d scratch, ft0 1685 bdnequn ft0, ft0, .opPutByValSlow 1686 .ready: 1687 stored ft0, address 1688 writeBarrierOnOperands(size, get, base, value) 1689 end) 1690 1691 .opPutByValNotDouble: 1692 bineq t2, ContiguousShape, .opPutByValNotContiguous 1693 contiguousPutByVal( 1694 macro (operand, scratch, address) 1695 loadConstantOrVariable(size, operand, scratch) 1696 storeq scratch, address 1697 writeBarrierOnOperands(size, get, base, value) 1698 end) 1699 1700 .opPutByValNotContiguous: 1701 bineq t2, ArrayStorageShape, .opPutByValSlow 1702 biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds 1703 btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty 1704 .opPutByValArrayStorageStoreResult: 1705 get(value, t2) 1706 loadConstantOrVariable(size, t2, t1) 1707 storeq t1, ArrayStorage::m_vector[t0, t3, 8] 1708 writeBarrierOnOperands(size, get, base, value) 1709 dispatch() 1710 1711 .opPutByValArrayStorageEmpty: 1712 storeb 1, %op%::Metadata::arrayProfile.m_mayStoreToHole[t5] 1713 addi 1, ArrayStorage::m_numValuesInVector[t0] 1714 bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult 1715 addi 1, t3, t1 1716 storei t1, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0] 1717 jmp .opPutByValArrayStorageStoreResult 1718 1719 .opPutByValOutOfBounds: 1720 storeb 1, %op%::Metadata::arrayProfile.m_outOfBounds[t5] 1721 .opPutByValSlow: 1722 callSlowPath(_llint_slow_path_%name%) 1723 dispatch() 1724 end) 1725 end 1726 1727 putByValOp(put_by_val, OpPutByVal) 1728 1729 putByValOp(put_by_val_direct, OpPutByValDirect) 1730 1731 1732 macro llintJumpTrueOrFalseOp(name, op, conditionOp) 1733 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1734 get(condition, t1) 1735 loadConstantOrVariable(size, t1, t0) 1736 btqnz t0, ~0xf, .slow 1737 conditionOp(t0, .target) 1738 dispatch() 1739 1740 .target: 1741 jump(target) 1742 1743 .slow: 1744 callSlowPath(_llint_slow_path_%name%) 1745 nextInstruction() 1746 end) 1747 end 1748 1749 1750 macro equalNullJumpOp(name, op, cellHandler, immediateHandler) 1751 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1752 get(value, t0) 1753 assertNotConstant(size, t0) 1754 loadq [cfr, t0, 8], t0 1755 btqnz t0, tagMask, .immediate 1756 loadStructureWithScratch(t0, t2, t1, t3) 1757 cellHandler(t2, JSCell::m_flags[t0], .target) 1758 dispatch() 1759 1760 .target: 1761 jump(target) 1762 1763 .immediate: 1764 andq ~TagBitUndefined, t0 1765 immediateHandler(t0, .target) 1766 dispatch() 1767 end) 1768 end 1769 1770 equalNullJumpOp(jeq_null, OpJeqNull, 1771 macro (structure, value, target) 1772 btbz value, MasqueradesAsUndefined, .notMasqueradesAsUndefined 1773 loadp CodeBlock[cfr], t0 1774 loadp CodeBlock::m_globalObject[t0], t0 1775 bpeq Structure::m_globalObject[structure], t0, target 1887 1776 .notMasqueradesAsUndefined: 1888 end, 1889 macro (value, target) bqeq value, ValueNull, target end) 1890 1891 1892 _llint_op_jneq_null: 1893 traceExecution() 1894 equalNull( 1895 macro (structure, value, target) 1896 btbz value, MasqueradesAsUndefined, target 1897 loadp CodeBlock[cfr], t0 1898 loadp CodeBlock::m_globalObject[t0], t0 1899 bpneq Structure::m_globalObject[structure], t0, target 1900 end, 1901 macro (value, target) bqneq value, ValueNull, target end) 1902 1903 1904 _llint_op_jneq_ptr: 1905 traceExecution() 1906 loadisFromInstruction(1, t0) 1907 loadisFromInstruction(2, t1) 1777 end, 1778 macro (value, target) bqeq value, ValueNull, target end) 1779 1780 1781 equalNullJumpOp(jneq_null, OpJneqNull, 1782 macro (structure, value, target) 1783 btbz value, MasqueradesAsUndefined, target 1784 loadp CodeBlock[cfr], t0 1785 loadp CodeBlock::m_globalObject[t0], t0 1786 bpneq Structure::m_globalObject[structure], t0, target 1787 end, 1788 macro (value, target) bqneq value, ValueNull, target end) 1789 1790 1791 llintOpWithMetadata(op_jneq_ptr, OpJneqPtr, macro (size, get, dispatch, metadata, return) 1792 get(value, t0) 1793 get(specialPointer, t1) 1908 1794 loadp CodeBlock[cfr], t2 1909 1795 loadp CodeBlock::m_globalObject[t2], t2 1910 1796 loadp JSGlobalObject::m_specialPointers[t2, t1, PtrSize], t1 1911 1797 bpneq t1, [cfr, t0, 8], .opJneqPtrTarget 1912 dispatch( 5)1798 dispatch() 1913 1799 1914 1800 .opJneqPtrTarget: 1915 storeisToInstruction(1, 4) 1916 dispatchIntIndirect(3) 1917 1918 1919 macro compareJump(integerCompare, doubleCompare, slowPath) 1920 loadisFromInstruction(1, t2) 1921 loadisFromInstruction(2, t3) 1922 loadConstantOrVariable(t2, t0) 1923 loadConstantOrVariable(t3, t1) 1924 bqb t0, tagTypeNumber, .op1NotInt 1925 bqb t1, tagTypeNumber, .op2NotInt 1926 integerCompare(t0, t1, .jumpTarget) 1927 dispatch(4) 1928 1929 .op1NotInt: 1930 btqz t0, tagTypeNumber, .slow 1931 bqb t1, tagTypeNumber, .op1NotIntOp2NotInt 1932 ci2d t1, ft1 1933 jmp .op1NotIntReady 1934 .op1NotIntOp2NotInt: 1935 btqz t1, tagTypeNumber, .slow 1936 addq tagTypeNumber, t1 1937 fq2d t1, ft1 1938 .op1NotIntReady: 1939 addq tagTypeNumber, t0 1940 fq2d t0, ft0 1941 doubleCompare(ft0, ft1, .jumpTarget) 1942 dispatch(4) 1943 1944 .op2NotInt: 1945 ci2d t0, ft0 1946 btqz t1, tagTypeNumber, .slow 1947 addq tagTypeNumber, t1 1948 fq2d t1, ft1 1949 doubleCompare(ft0, ft1, .jumpTarget) 1950 dispatch(4) 1951 1952 .jumpTarget: 1953 dispatchIntIndirect(3) 1954 1955 .slow: 1956 callSlowPath(slowPath) 1957 dispatch(0) 1958 end 1959 1960 1961 macro compareUnsignedJump(integerCompare) 1962 loadisFromInstruction(1, t2) 1963 loadisFromInstruction(2, t3) 1964 loadConstantOrVariable(t2, t0) 1965 loadConstantOrVariable(t3, t1) 1966 integerCompare(t0, t1, .jumpTarget) 1967 dispatch(4) 1968 1969 .jumpTarget: 1970 dispatchIntIndirect(3) 1971 end 1972 1973 1974 macro compareUnsigned(integerCompareAndSet) 1975 loadisFromInstruction(3, t0) 1976 loadisFromInstruction(2, t2) 1977 loadisFromInstruction(1, t3) 1978 loadConstantOrVariable(t0, t1) 1979 loadConstantOrVariable(t2, t0) 1980 integerCompareAndSet(t0, t1, t0) 1981 orq ValueFalse, t0 1982 storeq t0, [cfr, t3, 8] 1983 dispatch(4) 1984 end 1985 1986 1987 _llint_op_switch_imm: 1988 traceExecution() 1989 loadisFromInstruction(3, t2) 1990 loadisFromInstruction(1, t3) 1991 loadConstantOrVariable(t2, t1) 1801 metadata(t5, t0) 1802 storeb 1, OpJneqPtr::Metadata::hasJumped[t5] 1803 get(target, t0) 1804 jumpImpl(t0) 1805 end) 1806 1807 1808 macro compareJumpOp(name, op, integerCompare, doubleCompare) 1809 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1810 get(lhs, t2) 1811 get(rhs, t3) 1812 loadConstantOrVariable(size, t2, t0) 1813 loadConstantOrVariable(size, t3, t1) 1814 bqb t0, tagTypeNumber, .op1NotInt 1815 bqb t1, tagTypeNumber, .op2NotInt 1816 integerCompare(t0, t1, .jumpTarget) 1817 dispatch() 1818 1819 .op1NotInt: 1820 btqz t0, tagTypeNumber, .slow 1821 bqb t1, tagTypeNumber, .op1NotIntOp2NotInt 1822 ci2d t1, ft1 1823 jmp .op1NotIntReady 1824 .op1NotIntOp2NotInt: 1825 btqz t1, tagTypeNumber, .slow 1826 addq tagTypeNumber, t1 1827 fq2d t1, ft1 1828 .op1NotIntReady: 1829 addq tagTypeNumber, t0 1830 fq2d t0, ft0 1831 doubleCompare(ft0, ft1, .jumpTarget) 1832 dispatch() 1833 1834 .op2NotInt: 1835 ci2d t0, ft0 1836 btqz t1, tagTypeNumber, .slow 1837 addq tagTypeNumber, t1 1838 fq2d t1, ft1 1839 doubleCompare(ft0, ft1, .jumpTarget) 1840 dispatch() 1841 1842 .jumpTarget: 1843 jump(target) 1844 1845 .slow: 1846 callSlowPath(_llint_slow_path_%name%) 1847 nextInstruction() 1848 end) 1849 end 1850 1851 1852 macro equalityJumpOp(name, op, integerComparison) 1853 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1854 get(lhs, t2) 1855 get(rhs, t3) 1856 loadConstantOrVariableInt32(size, t2, t0, .slow) 1857 loadConstantOrVariableInt32(size, t3, t1, .slow) 1858 integerComparison(t0, t1, .jumpTarget) 1859 dispatch() 1860 1861 .jumpTarget: 1862 jump(target) 1863 1864 .slow: 1865 callSlowPath(_llint_slow_path_%name%) 1866 nextInstruction() 1867 end) 1868 end 1869 1870 1871 macro compareUnsignedJumpOp(name, op, integerCompare) 1872 llintOpWithJump(op_%name%, op, macro (size, get, jump, dispatch) 1873 get(lhs, t2) 1874 get(rhs, t3) 1875 loadConstantOrVariable(size, t2, t0) 1876 loadConstantOrVariable(size, t3, t1) 1877 integerCompare(t0, t1, .jumpTarget) 1878 dispatch() 1879 1880 .jumpTarget: 1881 jump(target) 1882 end) 1883 end 1884 1885 1886 macro compareUnsignedOp(name, op, integerCompareAndSet) 1887 llintOpWithReturn(op_%name%, op, macro (size, get, dispatch, return) 1888 get(lhs, t2) 1889 get(rhs, t0) 1890 loadConstantOrVariable(size, t0, t1) 1891 loadConstantOrVariable(size, t2, t0) 1892 integerCompareAndSet(t0, t1, t0) 1893 orq ValueFalse, t0 1894 return(t0) 1895 end) 1896 end 1897 1898 1899 llintOpWithJump(op_switch_imm, OpSwitchImm, macro (size, get, jump, dispatch) 1900 get(scrutinee, t2) 1901 get(tableIndex, t3) 1902 loadConstantOrVariable(size, t2, t1) 1992 1903 loadp CodeBlock[cfr], t2 1993 1904 loadp CodeBlock::m_rareData[t2], t2 … … 2001 1912 loadis [t3, t1, 4], t1 2002 1913 btiz t1, .opSwitchImmFallThrough 2003 dispatch (t1)1914 dispatchIndirect(t1) 2004 1915 2005 1916 .opSwitchImmNotInt: 2006 1917 btqnz t1, tagTypeNumber, .opSwitchImmSlow # Go slow if it's a double. 2007 1918 .opSwitchImmFallThrough: 2008 dispatchIntIndirect(2)1919 jump(defaultOffset) 2009 1920 2010 1921 .opSwitchImmSlow: 2011 1922 callSlowPath(_llint_slow_path_switch_imm) 2012 dispatch(0)2013 2014 2015 _llint_op_switch_char: 2016 traceExecution()2017 loadisFromInstruction(3, t2)2018 loadisFromInstruction(1, t3)2019 loadConstantOrVariable( t2, t1)1923 nextInstruction() 1924 end) 1925 1926 1927 llintOpWithJump(op_switch_char, OpSwitchChar, macro (size, get, jump, dispatch) 1928 get(scrutinee, t2) 1929 get(tableIndex, t3) 1930 loadConstantOrVariable(size, t2, t1) 2020 1931 loadp CodeBlock[cfr], t2 2021 1932 loadp CodeBlock::m_rareData[t2], t2 … … 2040 1951 loadis [t2, t0, 4], t1 2041 1952 btiz t1, .opSwitchCharFallThrough 2042 dispatch (t1)1953 dispatchIndirect(t1) 2043 1954 2044 1955 .opSwitchCharFallThrough: 2045 dispatchIntIndirect(2)1956 jump(defaultOffset) 2046 1957 2047 1958 .opSwitchOnRope: 2048 1959 callSlowPath(_llint_slow_path_switch_char) 2049 dispatch(0) 2050 2051 2052 macro arrayProfileForCall() 2053 loadisFromInstruction(4, t3) 1960 nextInstruction() 1961 end) 1962 1963 1964 # we assume t5 contains the metadata, and we should not scratch that 1965 macro arrayProfileForCall(op, getu) 1966 getu(argv, t3) 2054 1967 negp t3 2055 1968 loadq ThisArgumentOffset[cfr, t3, 8], t0 2056 1969 btqnz t0, tagMask, .done 2057 loadpFromInstruction((CallOpCodeSize - 2), t1)2058 1970 loadi JSCell::m_structureID[t0], t3 2059 storei t3, ArrayProfile::m_lastSeenStructureID[t1]1971 storei t3, %op%::Metadata::arrayProfile.m_lastSeenStructureID[t5] 2060 1972 .done: 2061 1973 end 2062 1974 2063 macro doCall(slowPath, prepareCall) 2064 loadisFromInstruction(2, t0) 2065 loadpFromInstruction(5, t1) 2066 if POINTER_PROFILING 2067 move t1, t5 2068 end 2069 loadp LLIntCallLinkInfo::callee[t1], t2 2070 loadConstantOrVariable(t0, t3) 2071 bqneq t3, t2, .opCallSlow 2072 loadisFromInstruction(4, t3) 2073 lshifti 3, t3 2074 negp t3 2075 addp cfr, t3 2076 storeq t2, Callee[t3] 2077 loadisFromInstruction(3, t2) 2078 storei PC, ArgumentCount + TagOffset[cfr] 2079 storei t2, ArgumentCount + PayloadOffset[t3] 2080 move t3, sp 2081 if POISON 2082 loadp _g_JITCodePoison, t2 2083 xorp LLIntCallLinkInfo::machineCodeTarget[t1], t2 2084 prepareCall(t2, t1, t3, t4, JSEntryPtrTag) 2085 callTargetFunction(t2, JSEntryPtrTag) 2086 else 2087 prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4, JSEntryPtrTag) 2088 callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1], JSEntryPtrTag) 2089 end 2090 2091 .opCallSlow: 2092 slowPathForCall(slowPath, prepareCall) 2093 end 2094 2095 _llint_op_ret: 2096 traceExecution() 1975 macro commonCallOp(name, slowPath, op, prepareCall, prologue) 1976 llintOpWithMetadata(name, op, macro (size, get, dispatch, metadata, return) 1977 metadata(t5, t0) 1978 1979 prologue(macro (field, dst) 1980 getu(size, op, field, dst) 1981 end, metadata) 1982 1983 get(callee, t0) 1984 if POINTER_PROFILING 1985 move t1, t5 1986 end 1987 loadp %op%::Metadata::callLinkInfo.callee[t5], t2 1988 loadConstantOrVariable(size, t0, t3) 1989 bqneq t3, t2, .opCallSlow 1990 getu(size, op, argv, t3) 1991 lshifti 3, t3 1992 negp t3 1993 addp cfr, t3 1994 storeq t2, Callee[t3] 1995 getu(size, op, argc, t2) 1996 storei PC, ArgumentCount + TagOffset[cfr] 1997 storei t2, ArgumentCount + PayloadOffset[t3] 1998 move t3, sp 1999 if POISON 2000 loadp _g_JITCodePoison, t2 2001 xorp %op%::Metadata::callLinkInfo.machineCodeTarget[t5], t2 2002 prepareCall(t2, t1, t3, t4, JSEntryPtrTag) 2003 callTargetFunction(size, op, dispatch, t2, JSEntryPtrTag) 2004 else 2005 prepareCall(%op%::Metadata::callLinkInfo.machineCodeTarget[t5], t2, t3, t4, JSEntryPtrTag) 2006 callTargetFunction(size, op, dispatch, %op%::Metadata::callLinkInfo.machineCodeTarget[t5], JSEntryPtrTag) 2007 end 2008 2009 .opCallSlow: 2010 slowPathForCall(size, op, dispatch, slowPath, prepareCall) 2011 end) 2012 end 2013 2014 llintOp(op_ret, OpRet, macro (size, get, dispatch) 2097 2015 checkSwitchToJITForEpilogue() 2098 loadisFromInstruction(1, t2)2099 loadConstantOrVariable( t2, r0)2016 get(value, t2) 2017 loadConstantOrVariable(size, t2, r0) 2100 2018 doReturn() 2101 2102 2103 _llint_op_to_primitive: 2104 traceExecution() 2105 loadisFromInstruction(2, t2) 2106 loadisFromInstruction(1, t3) 2107 loadConstantOrVariable(t2, t0) 2019 end) 2020 2021 2022 llintOpWithReturn(op_to_primitive, OpToPrimitive, macro (size, get, dispatch, return) 2023 get(src, t2) 2024 loadConstantOrVariable(size, t2, t0) 2108 2025 btqnz t0, tagMask, .opToPrimitiveIsImm 2109 2026 bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase 2110 2027 .opToPrimitiveIsImm: 2111 storeq t0, [cfr, t3, 8] 2112 dispatch(constexpr op_to_primitive_length) 2028 return(t0) 2113 2029 2114 2030 .opToPrimitiveSlowCase: 2115 2031 callSlowPath(_slow_path_to_primitive) 2116 dispatch(constexpr op_to_primitive_length) 2117 2118 2119 _llint_op_catch: 2032 dispatch() 2033 end) 2034 2035 2036 commonOp(llint_op_catch, macro() end, macro (size) 2120 2037 # This is where we end up from the JIT's throw trampoline (because the 2121 2038 # machine code return address will be set to _llint_op_catch), and from … … 2132 2049 2133 2050 loadp CodeBlock[cfr], PB 2051 # FIXME: cleanup double load 2052 # https://bugs.webkit.org/show_bug.cgi?id=190932 2134 2053 loadp CodeBlock::m_instructions[PB], PB 2054 loadp [PB], PB 2135 2055 unpoison(_g_CodeBlockPoison, PB, t2) 2136 2056 loadp VM::targetInterpreterPCForThrow[t3], PC 2137 2057 subp PB, PC 2138 rshiftp constexpr (getLSBSet(sizeof(void*))), PC2139 2058 2140 2059 callSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler) … … 2149 2068 loadp VM::m_exception[t3], t0 2150 2069 storep 0, VM::m_exception[t3] 2151 loadisFromInstruction(1, t2)2070 get(size, OpCatch, exception, t2) 2152 2071 storeq t0, [cfr, t2, 8] 2153 2072 2154 2073 loadq Exception::m_value[t0], t3 2155 loadisFromInstruction(2, t2)2074 get(size, OpCatch, thrownValue, t2) 2156 2075 storeq t3, [cfr, t2, 8] 2157 2076 … … 2160 2079 callSlowPath(_llint_slow_path_profile_catch) 2161 2080 2162 dispatch (constexpr op_catch_length)2163 2164 2165 _llint_op_end: 2166 traceExecution()2081 dispatchOp(size, op_catch) 2082 end) 2083 2084 2085 llintOp(op_end, OpEnd, macro (size, get, dispatch) 2167 2086 checkSwitchToJITForEpilogue() 2168 loadisFromInstruction(1, t0)2169 assertNotConstant( t0)2087 get(value, t0) 2088 assertNotConstant(size, t0) 2170 2089 loadq [cfr, t0, 8], r0 2171 2090 doReturn() 2172 2173 2174 _llint_throw_from_slow_path_trampoline: 2091 end) 2092 2093 2094 op(llint_throw_from_slow_path_trampoline, macro () 2175 2095 loadp Callee[cfr], t1 2176 2096 andp MarkedBlockMask, t1 … … 2187 2107 loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[t1], t1 2188 2108 jmp VM::targetMachinePCForThrow[t1], ExceptionHandlerPtrTag 2189 2190 2191 _llint_throw_during_call_trampoline: 2109 end) 2110 2111 2112 op(llint_throw_during_call_trampoline, macro () 2192 2113 preserveReturnAddressAfterCall(t2) 2193 2114 jmp _llint_throw_from_slow_path_trampoline 2115 end) 2194 2116 2195 2117 … … 2283 2205 end 2284 2206 2285 macro getConstantScope(dst) 2286 loadpFromInstruction(6, t0) 2287 loadisFromInstruction(dst, t1) 2288 storeq t0, [cfr, t1, 8] 2289 end 2290 2291 macro varInjectionCheck(slowPath) 2292 loadp CodeBlock[cfr], t0 2293 loadp CodeBlock::m_globalObject[t0], t0 2294 loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0 2295 bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath 2296 end 2297 2298 macro resolveScope() 2299 loadisFromInstruction(5, t2) 2300 loadisFromInstruction(2, t0) 2301 loadq [cfr, t0, 8], t0 2302 btiz t2, .resolveScopeLoopEnd 2303 2304 .resolveScopeLoop: 2305 loadp JSScope::m_next[t0], t0 2306 subi 1, t2 2307 btinz t2, .resolveScopeLoop 2308 2309 .resolveScopeLoopEnd: 2310 loadisFromInstruction(1, t1) 2311 storeq t0, [cfr, t1, 8] 2312 end 2313 2314 2315 _llint_op_resolve_scope: 2316 traceExecution() 2317 loadisFromInstruction(4, t0) 2207 macro varInjectionCheck(slowPath, scratch) 2208 loadp CodeBlock[cfr], scratch 2209 loadp CodeBlock::m_globalObject[scratch], scratch 2210 loadp JSGlobalObject::m_varInjectionWatchpoint[scratch], scratch 2211 bbeq WatchpointSet::m_state[scratch], IsInvalidated, slowPath 2212 end 2213 2214 llintOpWithMetadata(op_resolve_scope, OpResolveScope, macro (size, get, dispatch, metadata, return) 2215 metadata(t5, t0) 2216 2217 macro getConstantScope() 2218 loadp OpResolveScope::Metadata::constantScope[t5], t0 2219 return(t0) 2220 end 2221 2222 macro resolveScope() 2223 loadi OpResolveScope::Metadata::localScopeDepth[t5], t2 2224 get(scope, t0) 2225 loadq [cfr, t0, 8], t0 2226 btiz t2, .resolveScopeLoopEnd 2227 2228 .resolveScopeLoop: 2229 loadp JSScope::m_next[t0], t0 2230 subi 1, t2 2231 btinz t2, .resolveScopeLoop 2232 2233 .resolveScopeLoopEnd: 2234 return(t0) 2235 end 2236 2237 loadp OpResolveScope::Metadata::resolveType[t5], t0 2318 2238 2319 2239 #rGlobalProperty: 2320 2240 bineq t0, GlobalProperty, .rGlobalVar 2321 getConstantScope(1) 2322 dispatch(constexpr op_resolve_scope_length) 2241 getConstantScope() 2323 2242 2324 2243 .rGlobalVar: 2325 2244 bineq t0, GlobalVar, .rGlobalLexicalVar 2326 getConstantScope(1) 2327 dispatch(constexpr op_resolve_scope_length) 2245 getConstantScope() 2328 2246 2329 2247 .rGlobalLexicalVar: 2330 2248 bineq t0, GlobalLexicalVar, .rClosureVar 2331 getConstantScope(1) 2332 dispatch(constexpr op_resolve_scope_length) 2249 getConstantScope() 2333 2250 2334 2251 .rClosureVar: 2335 2252 bineq t0, ClosureVar, .rModuleVar 2336 2253 resolveScope() 2337 dispatch(constexpr op_resolve_scope_length)2338 2254 2339 2255 .rModuleVar: 2340 2256 bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks 2341 getConstantScope(1) 2342 dispatch(constexpr op_resolve_scope_length) 2257 getConstantScope() 2343 2258 2344 2259 .rGlobalPropertyWithVarInjectionChecks: 2345 2260 bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks 2346 varInjectionCheck(.rDynamic) 2347 getConstantScope(1) 2348 dispatch(constexpr op_resolve_scope_length) 2261 varInjectionCheck(.rDynamic, t2) 2262 getConstantScope() 2349 2263 2350 2264 .rGlobalVarWithVarInjectionChecks: 2351 2265 bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks 2352 varInjectionCheck(.rDynamic) 2353 getConstantScope(1) 2354 dispatch(constexpr op_resolve_scope_length) 2266 varInjectionCheck(.rDynamic, t2) 2267 getConstantScope() 2355 2268 2356 2269 .rGlobalLexicalVarWithVarInjectionChecks: 2357 2270 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks 2358 varInjectionCheck(.rDynamic) 2359 getConstantScope(1) 2360 dispatch(constexpr op_resolve_scope_length) 2271 varInjectionCheck(.rDynamic, t2) 2272 getConstantScope() 2361 2273 2362 2274 .rClosureVarWithVarInjectionChecks: 2363 2275 bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic 2364 varInjectionCheck(.rDynamic )2276 varInjectionCheck(.rDynamic, t2) 2365 2277 resolveScope() 2366 dispatch(constexpr op_resolve_scope_length)2367 2278 2368 2279 .rDynamic: 2369 2280 callSlowPath(_slow_path_resolve_scope) 2370 dispatch(constexpr op_resolve_scope_length) 2371 2372 2373 macro loadWithStructureCheck(operand, slowPath) 2374 loadisFromInstruction(operand, t0) 2281 dispatch() 2282 end) 2283 2284 2285 macro loadWithStructureCheck(op, get, slowPath) 2286 get(scope, t0) 2375 2287 loadq [cfr, t0, 8], t0 2376 2288 loadStructureWithScratch(t0, t2, t1, t3) 2377 loadp FromInstruction(5, t1)2289 loadp %op%::Metadata::structure[t5], t1 2378 2290 bpneq t2, t1, slowPath 2379 2291 end 2380 2292 2381 macro getProperty() 2382 loadisFromInstruction(6, t1) 2383 loadPropertyAtVariableOffset(t1, t0, t2) 2384 valueProfile(t2, 7, t0) 2385 loadisFromInstruction(1, t0) 2386 storeq t2, [cfr, t0, 8] 2387 end 2388 2389 macro getGlobalVar(tdzCheckIfNecessary) 2390 loadpFromInstruction(6, t0) 2391 loadq [t0], t0 2392 tdzCheckIfNecessary(t0) 2393 valueProfile(t0, 7, t1) 2394 loadisFromInstruction(1, t1) 2395 storeq t0, [cfr, t1, 8] 2396 end 2397 2398 macro getClosureVar() 2399 loadisFromInstruction(6, t1) 2400 loadq JSLexicalEnvironment_variables[t0, t1, 8], t0 2401 valueProfile(t0, 7, t1) 2402 loadisFromInstruction(1, t1) 2403 storeq t0, [cfr, t1, 8] 2404 end 2405 2406 _llint_op_get_from_scope: 2407 traceExecution() 2408 loadisFromInstruction(4, t0) 2293 llintOpWithMetadata(op_get_from_scope, OpGetFromScope, macro (size, get, dispatch, metadata, return) 2294 metadata(t5, t0) 2295 2296 macro getProperty() 2297 loadis OpGetFromScope::Metadata::operand[t5], t1 2298 loadPropertyAtVariableOffset(t1, t0, t2) 2299 valueProfile(OpGetFromScope, t5, t2) 2300 return(t2) 2301 end 2302 2303 macro getGlobalVar(tdzCheckIfNecessary) 2304 loadp OpGetFromScope::Metadata::operand[t5], t0 2305 loadq [t0], t0 2306 tdzCheckIfNecessary(t0) 2307 valueProfile(OpGetFromScope, t5, t0) 2308 return(t0) 2309 end 2310 2311 macro getClosureVar() 2312 loadis OpGetFromScope::Metadata::operand[t5], t1 2313 loadq JSLexicalEnvironment_variables[t0, t1, 8], t0 2314 valueProfile(OpGetFromScope, t5, t0) 2315 return(t0) 2316 end 2317 2318 loadi OpGetFromScope::Metadata::getPutInfo[t5], t0 2409 2319 andi ResolveTypeMask, t0 2410 2320 2411 2321 #gGlobalProperty: 2412 2322 bineq t0, GlobalProperty, .gGlobalVar 2413 loadWithStructureCheck( 2, .gDynamic)2323 loadWithStructureCheck(OpGetFromScope, get, .gDynamic) 2414 2324 getProperty() 2415 dispatch(constexpr op_get_from_scope_length)2416 2325 2417 2326 .gGlobalVar: 2418 2327 bineq t0, GlobalVar, .gGlobalLexicalVar 2419 2328 getGlobalVar(macro(v) end) 2420 dispatch(constexpr op_get_from_scope_length)2421 2329 2422 2330 .gGlobalLexicalVar: … … 2426 2334 bqeq value, ValueEmpty, .gDynamic 2427 2335 end) 2428 dispatch(constexpr op_get_from_scope_length)2429 2336 2430 2337 .gClosureVar: 2431 2338 bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks 2432 loadVariable( 2, t0)2339 loadVariable(get, scope, t0) 2433 2340 getClosureVar() 2434 dispatch(constexpr op_get_from_scope_length)2435 2341 2436 2342 .gGlobalPropertyWithVarInjectionChecks: 2437 2343 bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks 2438 loadWithStructureCheck( 2, .gDynamic)2344 loadWithStructureCheck(OpGetFromScope, get, .gDynamic) 2439 2345 getProperty() 2440 dispatch(constexpr op_get_from_scope_length)2441 2346 2442 2347 .gGlobalVarWithVarInjectionChecks: 2443 2348 bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks 2444 varInjectionCheck(.gDynamic )2349 varInjectionCheck(.gDynamic, t2) 2445 2350 getGlobalVar(macro(v) end) 2446 dispatch(constexpr op_get_from_scope_length)2447 2351 2448 2352 .gGlobalLexicalVarWithVarInjectionChecks: 2449 2353 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks 2450 varInjectionCheck(.gDynamic )2354 varInjectionCheck(.gDynamic, t2) 2451 2355 getGlobalVar( 2452 2356 macro (value) 2453 2357 bqeq value, ValueEmpty, .gDynamic 2454 2358 end) 2455 dispatch(constexpr op_get_from_scope_length)2456 2359 2457 2360 .gClosureVarWithVarInjectionChecks: 2458 2361 bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic 2459 varInjectionCheck(.gDynamic )2460 loadVariable( 2, t0)2362 varInjectionCheck(.gDynamic, t2) 2363 loadVariable(get, scope, t0) 2461 2364 getClosureVar() 2462 dispatch(constexpr op_get_from_scope_length)2463 2365 2464 2366 .gDynamic: 2465 2367 callSlowPath(_llint_slow_path_get_from_scope) 2466 dispatch( constexpr op_get_from_scope_length)2467 2468 2469 macro putProperty() 2470 loadisFromInstruction(3, t1)2471 loadConstantOrVariable(t1, t2)2472 loadisFromInstruction(6, t1)2473 storePropertyAtVariableOffset(t1, t0, t2)2474 end 2475 2476 macro putGlobalVariable() 2477 loadisFromInstruction(3, t0) 2478 loadConstantOrVariable(t0, t1)2479 loadpFromInstruction(5, t2)2480 loadpFromInstruction(6, t0)2481 notifyWrite(t2, .pDynamic)2482 storeq t1, [t0]2483 end 2484 2485 macro putClosureVar() 2486 loadisFromInstruction(3, t1) 2487 loadConstantOrVariable(t1, t2)2488 loadisFromInstruction(6, t1)2489 storeq t2, JSLexicalEnvironment_variables[t0, t1, 8]2490 end 2491 2492 macro putLocalClosureVar() 2493 loadisFromInstruction(3, t1) 2494 loadConstantOrVariable(t1, t2)2495 loadpFromInstruction(5, t3)2496 btpz t3, .noVariableWatchpointSet2497 notifyWrite(t3, .pDynamic)2498 .noVariableWatchpointSet: 2499 loadisFromInstruction(6, t1)2500 storeq t2, JSLexicalEnvironment_variables[t0, t1, 8]2501 end 2502 2503 macro checkTDZInGlobalPutToScopeIfNecessary() 2504 loadisFromInstruction(4, t0) 2505 andi InitializationModeMask, t02506 rshifti InitializationModeShift, t02507 bineq t0, NotInitialization, .noNeedForTDZCheck2508 loadpFromInstruction(6, t0)2509 loadq [t0], t02510 bqeq t0, ValueEmpty, .pDynamic2511 .noNeedForTDZCheck: 2512 end 2513 2514 2515 _llint_op_put_to_scope: 2516 traceExecution()2517 loadi sFromInstruction(4, t0)2368 dispatch() 2369 end) 2370 2371 2372 llintOpWithMetadata(op_put_to_scope, OpPutToScope, macro (size, get, dispatch, metadata, return) 2373 macro putProperty() 2374 get(value, t1) 2375 loadConstantOrVariable(size, t1, t2) 2376 loadis OpPutToScope::Metadata::operand[t5], t1 2377 storePropertyAtVariableOffset(t1, t0, t2) 2378 end 2379 2380 macro putGlobalVariable() 2381 get(value, t0) 2382 loadConstantOrVariable(size, t0, t1) 2383 loadp OpPutToScope::Metadata::watchpointSet[t5], t2 2384 loadp OpPutToScope::Metadata::operand[t5], t0 2385 notifyWrite(t2, .pDynamic) 2386 storeq t1, [t0] 2387 end 2388 2389 macro putClosureVar() 2390 get(value, t1) 2391 loadConstantOrVariable(size, t1, t2) 2392 loadis OpPutToScope::Metadata::operand[t5], t1 2393 storeq t2, JSLexicalEnvironment_variables[t0, t1, 8] 2394 end 2395 2396 macro putLocalClosureVar() 2397 get(value, t1) 2398 loadConstantOrVariable(size, t1, t2) 2399 loadp OpPutToScope::Metadata::watchpointSet[t5], t3 2400 btpz t3, .noVariableWatchpointSet 2401 notifyWrite(t3, .pDynamic) 2402 .noVariableWatchpointSet: 2403 loadis OpPutToScope::Metadata::operand[t5], t1 2404 storeq t2, JSLexicalEnvironment_variables[t0, t1, 8] 2405 end 2406 2407 macro checkTDZInGlobalPutToScopeIfNecessary() 2408 loadis OpPutToScope::Metadata::getPutInfo[t5], t0 2409 andi InitializationModeMask, t0 2410 rshifti InitializationModeShift, t0 2411 bineq t0, NotInitialization, .noNeedForTDZCheck 2412 loadp OpPutToScope::Metadata::operand[t5], t0 2413 loadq [t0], t0 2414 bqeq t0, ValueEmpty, .pDynamic 2415 .noNeedForTDZCheck: 2416 end 2417 2418 metadata(t5, t0) 2419 loadi OpPutToScope::Metadata::getPutInfo[t5], t0 2518 2420 andi ResolveTypeMask, t0 2519 2421 2520 2422 #pLocalClosureVar: 2521 2423 bineq t0, LocalClosureVar, .pGlobalProperty 2522 loadVariable( 1, t0)2424 loadVariable(get, scope, t0) 2523 2425 putLocalClosureVar() 2524 writeBarrierOnOperands( 1, 3)2525 dispatch( constexpr op_put_to_scope_length)2426 writeBarrierOnOperands(size, get, scope, value) 2427 dispatch() 2526 2428 2527 2429 .pGlobalProperty: 2528 2430 bineq t0, GlobalProperty, .pGlobalVar 2529 loadWithStructureCheck( 1, .pDynamic)2431 loadWithStructureCheck(OpPutToScope, get, .pDynamic) 2530 2432 putProperty() 2531 writeBarrierOnOperands( 1, 3)2532 dispatch( constexpr op_put_to_scope_length)2433 writeBarrierOnOperands(size, get, scope, value) 2434 dispatch() 2533 2435 2534 2436 .pGlobalVar: 2535 2437 bineq t0, GlobalVar, .pGlobalLexicalVar 2536 writeBarrierOnGlobalObject(3)2537 2438 putGlobalVariable() 2538 dispatch(constexpr op_put_to_scope_length) 2439 writeBarrierOnGlobalObject(size, get, value) 2440 dispatch() 2539 2441 2540 2442 .pGlobalLexicalVar: 2541 2443 bineq t0, GlobalLexicalVar, .pClosureVar 2542 writeBarrierOnGlobalLexicalEnvironment(3)2543 2444 checkTDZInGlobalPutToScopeIfNecessary() 2544 2445 putGlobalVariable() 2545 dispatch(constexpr op_put_to_scope_length) 2446 writeBarrierOnGlobalLexicalEnvironment(size, get, value) 2447 dispatch() 2546 2448 2547 2449 .pClosureVar: 2548 2450 bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks 2549 loadVariable( 1, t0)2451 loadVariable(get, scope, t0) 2550 2452 putClosureVar() 2551 writeBarrierOnOperands( 1, 3)2552 dispatch( constexpr op_put_to_scope_length)2453 writeBarrierOnOperands(size, get, scope, value) 2454 dispatch() 2553 2455 2554 2456 .pGlobalPropertyWithVarInjectionChecks: 2555 2457 bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks 2556 loadWithStructureCheck( 1, .pDynamic)2458 loadWithStructureCheck(OpPutToScope, get, .pDynamic) 2557 2459 putProperty() 2558 writeBarrierOnOperands( 1, 3)2559 dispatch( constexpr op_put_to_scope_length)2460 writeBarrierOnOperands(size, get, scope, value) 2461 dispatch() 2560 2462 2561 2463 .pGlobalVarWithVarInjectionChecks: 2562 2464 bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks 2563 writeBarrierOnGlobalObject(3) 2564 varInjectionCheck(.pDynamic) 2465 varInjectionCheck(.pDynamic, t2) 2565 2466 putGlobalVariable() 2566 dispatch(constexpr op_put_to_scope_length) 2467 writeBarrierOnGlobalObject(size, get, value) 2468 dispatch() 2567 2469 2568 2470 .pGlobalLexicalVarWithVarInjectionChecks: 2569 2471 bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks 2570 writeBarrierOnGlobalLexicalEnvironment(3) 2571 varInjectionCheck(.pDynamic) 2472 varInjectionCheck(.pDynamic, t2) 2572 2473 checkTDZInGlobalPutToScopeIfNecessary() 2573 2474 putGlobalVariable() 2574 dispatch(constexpr op_put_to_scope_length) 2475 writeBarrierOnGlobalLexicalEnvironment(size, get, value) 2476 dispatch() 2575 2477 2576 2478 .pClosureVarWithVarInjectionChecks: 2577 2479 bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar 2578 varInjectionCheck(.pDynamic )2579 loadVariable( 1, t0)2480 varInjectionCheck(.pDynamic, t2) 2481 loadVariable(get, scope, t0) 2580 2482 putClosureVar() 2581 writeBarrierOnOperands( 1, 3)2582 dispatch( constexpr op_put_to_scope_length)2483 writeBarrierOnOperands(size, get, scope, value) 2484 dispatch() 2583 2485 2584 2486 .pModuleVar: 2585 2487 bineq t0, ModuleVar, .pDynamic 2586 2488 callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error) 2587 dispatch( constexpr op_put_to_scope_length)2489 dispatch() 2588 2490 2589 2491 .pDynamic: 2590 2492 callSlowPath(_llint_slow_path_put_to_scope) 2591 dispatch( constexpr op_put_to_scope_length)2592 2593 2594 _llint_op_get_from_arguments: 2595 traceExecution()2596 loadVariable( 2, t0)2597 loadi 3 * PtrSize[PB, PC, PtrSize], t12493 dispatch() 2494 end) 2495 2496 2497 llintOpWithProfile(op_get_from_arguments, OpGetFromArguments, macro (size, get, dispatch, return) 2498 loadVariable(get, arguments, t0) 2499 getu(size, OpGetFromArguments, index, t1) 2598 2500 loadq DirectArguments_storage[t0, t1, 8], t0 2599 valueProfile(t0, 4, t1) 2600 loadisFromInstruction(1, t1) 2601 storeq t0, [cfr, t1, 8] 2602 dispatch(constexpr op_get_from_arguments_length) 2603 2604 2605 _llint_op_put_to_arguments: 2606 traceExecution() 2607 loadVariable(1, t0) 2608 loadi 2 * PtrSize[PB, PC, PtrSize], t1 2609 loadisFromInstruction(3, t3) 2610 loadConstantOrVariable(t3, t2) 2501 return(t0) 2502 end) 2503 2504 2505 llintOp(op_put_to_arguments, OpPutToArguments, macro (size, get, dispatch) 2506 loadVariable(get, arguments, t0) 2507 getu(size, OpPutToArguments, index, t1) 2508 get(value, t3) 2509 loadConstantOrVariable(size, t3, t2) 2611 2510 storeq t2, DirectArguments_storage[t0, t1, 8] 2612 writeBarrierOnOperands( 1, 3)2613 dispatch( constexpr op_put_to_arguments_length)2614 2615 2616 _llint_op_get_parent_scope: 2617 traceExecution()2618 loadVariable( 2, t0)2511 writeBarrierOnOperands(size, get, arguments, value) 2512 dispatch() 2513 end) 2514 2515 2516 llintOpWithReturn(op_get_parent_scope, OpGetParentScope, macro (size, get, dispatch, return) 2517 loadVariable(get, scope, t0) 2619 2518 loadp JSScope::m_next[t0], t0 2620 loadisFromInstruction(1, t1) 2621 storeq t0, [cfr, t1, 8] 2622 dispatch(constexpr op_get_parent_scope_length) 2623 2624 2625 _llint_op_profile_type: 2626 traceExecution() 2519 return(t0) 2520 end) 2521 2522 2523 llintOpWithMetadata(op_profile_type, OpProfileType, macro (size, get, dispatch, metadata, return) 2627 2524 loadp CodeBlock[cfr], t1 2628 2525 loadp CodeBlock::m_poisonedVM[t1], t1 … … 2634 2531 2635 2532 # t0 is holding the JSValue argument. 2636 loadisFromInstruction(1, t3)2637 loadConstantOrVariable( t3, t0)2533 get(target, t3) 2534 loadConstantOrVariable(size, t3, t0) 2638 2535 2639 2536 bqeq t0, ValueEmpty, .opProfileTypeDone … … 2642 2539 2643 2540 # Store the TypeLocation onto the log entry. 2644 loadpFromInstruction(2, t3) 2541 metadata(t5, t3) 2542 loadp OpProfileType::Metadata::typeLocation[t5], t3 2645 2543 storep t3, TypeProfilerLog::LogEntry::location[t2] 2646 2544 … … 2662 2560 2663 2561 .opProfileTypeDone: 2664 dispatch(constexpr op_profile_type_length) 2665 2666 _llint_op_profile_control_flow: 2667 traceExecution() 2668 loadpFromInstruction(1, t0) 2562 dispatch() 2563 end) 2564 2565 2566 llintOpWithMetadata(op_profile_control_flow, OpProfileControlFlow, macro (size, get, dispatch, metadata, return) 2567 metadata(t5, t0) 2568 loadp OpProfileControlFlow::Metadata::basicBlockLocation[t5], t0 2669 2569 addq 1, BasicBlockLocation::m_executionCount[t0] 2670 dispatch( constexpr op_profile_control_flow_length)2671 2672 2673 _llint_op_get_rest_length: 2674 traceExecution()2570 dispatch() 2571 end) 2572 2573 2574 llintOpWithReturn(op_get_rest_length, OpGetRestLength, macro (size, get, dispatch, return) 2675 2575 loadi PayloadOffset + ArgumentCount[cfr], t0 2676 2576 subi 1, t0 2677 loadisFromInstruction(2, t1)2577 getu(size, OpGetRestLength, numParametersToSkip, t1) 2678 2578 bilteq t0, t1, .storeZero 2679 2579 subi t1, t0 … … 2683 2583 .boxUp: 2684 2584 orq tagTypeNumber, t0 2685 loadisFromInstruction(1, t1) 2686 storeq t0, [cfr, t1, 8] 2687 dispatch(constexpr op_get_rest_length_length) 2688 2689 2690 _llint_op_log_shadow_chicken_prologue: 2691 traceExecution() 2585 return(t0) 2586 end) 2587 2588 2589 llintOp(op_log_shadow_chicken_prologue, OpLogShadowChickenPrologue, macro (size, get, dispatch) 2692 2590 acquireShadowChickenPacket(.opLogShadowChickenPrologueSlow) 2693 2591 storep cfr, ShadowChicken::Packet::frame[t0] … … 2696 2594 loadp Callee[cfr], t1 2697 2595 storep t1, ShadowChicken::Packet::callee[t0] 2698 loadVariable( 1, t1)2596 loadVariable(get, scope, t1) 2699 2597 storep t1, ShadowChicken::Packet::scope[t0] 2700 dispatch( constexpr op_log_shadow_chicken_prologue_length)2598 dispatch() 2701 2599 .opLogShadowChickenPrologueSlow: 2702 2600 callSlowPath(_llint_slow_path_log_shadow_chicken_prologue) 2703 dispatch( constexpr op_log_shadow_chicken_prologue_length)2704 2705 2706 _llint_op_log_shadow_chicken_tail: 2707 traceExecution()2601 dispatch() 2602 end) 2603 2604 2605 llintOp(op_log_shadow_chicken_tail, OpLogShadowChickenTail, macro (size, get, dispatch) 2708 2606 acquireShadowChickenPacket(.opLogShadowChickenTailSlow) 2709 2607 storep cfr, ShadowChicken::Packet::frame[t0] 2710 2608 storep ShadowChickenTailMarker, ShadowChicken::Packet::callee[t0] 2711 loadVariable( 1, t1)2609 loadVariable(get, thisValue, t1) 2712 2610 storep t1, ShadowChicken::Packet::thisValue[t0] 2713 loadVariable( 2, t1)2611 loadVariable(get, scope, t1) 2714 2612 storep t1, ShadowChicken::Packet::scope[t0] 2715 2613 loadp CodeBlock[cfr], t1 2716 2614 storep t1, ShadowChicken::Packet::codeBlock[t0] 2717 2615 storei PC, ShadowChicken::Packet::callSiteIndex[t0] 2718 dispatch( constexpr op_log_shadow_chicken_tail_length)2616 dispatch() 2719 2617 .opLogShadowChickenTailSlow: 2720 2618 callSlowPath(_llint_slow_path_log_shadow_chicken_tail) 2721 dispatch(constexpr op_log_shadow_chicken_tail_length) 2619 dispatch() 2620 end) -
trunk/Source/JavaScriptCore/offlineasm/arm64.rb
r237486 r237547 277 277 if node.is_a? Instruction 278 278 case node.opcode 279 when "loadi", "loadis", "loadp", "loadq", "loadb", "loadbs", "loadh", "loadhs" 279 when "loadi", "loadis", "loadp", "loadq", "loadb", "loadbs", "loadh", "loadhs", "leap" 280 280 labelRef = node.operands[0] 281 281 if labelRef.is_a? LabelReference … … 374 374 | node, address | 375 375 case node.opcode 376 when "loadb", "loadbs", " storeb", /^bb/, /^btb/, /^cb/, /^tb/376 when "loadb", "loadbs", "loadbsp", "storeb", /^bb/, /^btb/, /^cb/, /^tb/ 377 377 size = 1 378 378 when "loadh", "loadhs" … … 670 670 when "loadbs" 671 671 emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :word) 672 when "loadbsp" 673 emitARM64Access("ldrsb", "ldursb", operands[1], operands[0], :ptr) 672 674 when "storeb" 673 675 emitARM64Unflipped("strb", operands, :word) -
trunk/Source/JavaScriptCore/offlineasm/asm.rb
r237486 r237547 372 372 373 373 ast = parse(asmFile) 374 settingsCombinations = computeSettingsCombinations(ast) 374 375 375 376 configurationList.each { … … 377 378 offsetsList = configuration[0] 378 379 configIndex = configuration[1] 379 forSettings( computeSettingsCombinations(ast)[configIndex], ast) {380 forSettings(settingsCombinations[configIndex], ast) { 380 381 | concreteSettings, lowLevelAST, backend | 381 382 … … 387 388 end 388 389 390 lowLevelAST = lowLevelAST.demacroify({}) 389 391 lowLevelAST = lowLevelAST.resolve(buildOffsetsMap(lowLevelAST, offsetsList)) 390 392 lowLevelAST.validate -
trunk/Source/JavaScriptCore/offlineasm/ast.rb
r237486 r237547 971 971 972 972 class ConstExpr < NoChildren 973 attr_reader :va riable, :value973 attr_reader :value 974 974 975 975 def initialize(codeOrigin, value) … … 1026 1026 1027 1027 class Label < NoChildren 1028 attr_reader :name 1029 1030 def initialize(codeOrigin, name) 1028 def initialize(codeOrigin, name, definedInFile = false) 1031 1029 super(codeOrigin) 1032 1030 @name = name 1031 @definedInFile = definedInFile 1033 1032 @extern = true 1034 1033 @global = false … … 1039 1038 raise "Label name collision: #{name}" unless $labelMapping[name].is_a? Label 1040 1039 else 1041 $labelMapping[name] = Label.new(codeOrigin, name )1040 $labelMapping[name] = Label.new(codeOrigin, name, definedInFile) 1042 1041 end 1043 1042 if definedInFile … … 1086 1085 end 1087 1086 1087 def name 1088 @name 1089 end 1090 1088 1091 def dump 1089 1092 "#{name}:" … … 1112 1115 def self.unique(comment) 1113 1116 newName = "_#{comment}" 1117 if $emitWinAsm and newName.length > 90 1118 newName = newName[0...45] + "___" + newName[-45..-1] 1119 end 1114 1120 if $labelMapping[newName] 1115 1121 while $labelMapping[newName = "_#{@@uniqueNameCounter}_#{comment}"] … … 1431 1437 1432 1438 def mapChildren 1433 IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase), (yield @elseCase)) 1439 ifThenElse = IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase)) 1440 ifThenElse.elseCase = yield @elseCase 1441 ifThenElse 1434 1442 end 1435 1443 -
trunk/Source/JavaScriptCore/offlineasm/cloop.rb
r237486 r237547 89 89 when "csr2" 90 90 "tagMask" 91 when "csr3" 92 "metadataTable" 91 93 when "cfr" 92 94 "cfr" … … 417 419 # The result is a boolean. Hence, it doesn't need to be based on the type 418 420 # of the arguments being compared. 419 $asm.putc "#{operands[2].clValue} = (#{operands[0].clValue(type)} #{comparator} #{op 2 = operands[1].clValue(type)});"421 $asm.putc "#{operands[2].clValue} = (#{operands[0].clValue(type)} #{comparator} #{operands[1].clValue(type)});" 420 422 end 421 423 … … 510 512 $asm.putc " cloopStack.setCurrentStackPointer(sp.vp);" 511 513 $asm.putc " SlowPathReturnType result = #{operands[0].cLabel}(#{operands[1].clDump}, #{operands[2].clDump});" 512 $asm.putc " decodeResult(result, t0. vp, t1.vp);"514 $asm.putc " decodeResult(result, t0.cvp, t1.cvp);" 513 515 $asm.putc "}" 514 516 end … … 613 615 when "loadb" 614 616 $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].uint8MemRef};" 615 when "loadbs" 617 when "loadbs", "loadbsp" 616 618 $asm.putc "#{operands[1].clValue(:int)} = #{operands[0].int8MemRef};" 617 619 when "storeb" -
trunk/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
r237486 r237547 85 85 | concreteSettings, lowLevelAST, backend | 86 86 87 lowLevelAST = lowLevelAST.demacroify({}) 87 88 offsetsList = offsetsList(lowLevelAST) 88 89 sizesList = sizesList(lowLevelAST) -
trunk/Source/JavaScriptCore/offlineasm/instructions.rb
r237486 r237547 55 55 "loadb", 56 56 "loadbs", 57 "loadbsp", 57 58 "loadh", 58 59 "loadhs", -
trunk/Source/JavaScriptCore/offlineasm/parser.rb
r237486 r237547 178 178 result << Token.new(CodeOrigin.new(file, lineNumber), $&) 179 179 lineNumber += 1 180 when /\A[a-zA-Z ]([a-zA-Z0-9_.]*)/180 when /\A[a-zA-Z%]([a-zA-Z0-9_.%]*)/ 181 181 result << Token.new(CodeOrigin.new(file, lineNumber), $&) 182 182 when /\A\.([a-zA-Z0-9_]*)/ 183 183 result << Token.new(CodeOrigin.new(file, lineNumber), $&) 184 when /\A_([a-zA-Z0-9_ ]*)/184 when /\A_([a-zA-Z0-9_%]*)/ 185 185 result << Token.new(CodeOrigin.new(file, lineNumber), $&) 186 186 when /\A([ \t]+)/ … … 229 229 230 230 def isIdentifier(token) 231 token =~ /\A[a-zA-Z ]([a-zA-Z0-9_.]*)\Z/ and not isKeyword(token)231 token =~ /\A[a-zA-Z%]([a-zA-Z0-9_.%]*)\Z/ and not isKeyword(token) 232 232 end 233 233 234 234 def isLabel(token) 235 token =~ /\A_([a-zA-Z0-9_ ]*)\Z/235 token =~ /\A_([a-zA-Z0-9_%]*)\Z/ 236 236 end 237 237 -
trunk/Source/JavaScriptCore/offlineasm/transform.rb
r237486 r237547 119 119 } 120 120 end 121 121 122 def freshVariables(mapping) 123 mapChildren { 124 | child | 125 child.freshVariables(mapping) 126 } 127 end 128 122 129 def substitute(mapping) 123 130 mapChildren { … … 137 144 $uniqueMacroVarID = 0 138 145 class Macro 139 def capture140 m apping = {}146 def freshVariables(mapping = {}) 147 myMapping = mapping.dup 141 148 newVars = [] 142 149 variables.each do |var| … … 144 151 newVar = Variable.forName(var.codeOrigin, "_var#{$uniqueMacroVarID}", var.originalName) 145 152 newVars << newVar 146 m apping[var] = newVar147 end 148 Macro.new(codeOrigin, name, newVars, body. substitute(mapping))153 myMapping[var] = newVar 154 end 155 Macro.new(codeOrigin, name, newVars, body.freshVariables(myMapping)) 149 156 end 150 157 … … 165 172 166 173 class MacroCall 167 def substitute(mapping)168 newName = Variable.forName(codeOrigin, name )174 def freshVariables(mapping) 175 newName = Variable.forName(codeOrigin, name, originalName) 169 176 if mapping[newName] 170 177 newName = mapping[newName] 171 178 end 172 newOperands = operands.map { |operand| operand. substitute(mapping) }179 newOperands = operands.map { |operand| operand.freshVariables(mapping) } 173 180 MacroCall.new(codeOrigin, newName.name, newOperands, annotation, originalName) 174 181 end 175 182 end 176 183 184 $concatenation = /%([a-zA-Z0-9_]+)%/ 177 185 class Variable 186 def freshVariables(mapping) 187 if @name =~ $concatenation 188 name = @name.gsub($concatenation) { |match| 189 var = Variable.forName(codeOrigin, match[1...-1]) 190 if mapping[var] 191 "%#{mapping[var].name}%" 192 else 193 match 194 end 195 } 196 Variable.forName(codeOrigin, name) 197 elsif mapping[self] 198 mapping[self] 199 else 200 self 201 end 202 end 203 178 204 def substitute(mapping) 179 if mapping[self] 205 if @name =~ $concatenation 206 name = @name.gsub($concatenation) { |match| 207 var = Variable.forName(codeOrigin, match[1...-1]) 208 raise "Unknown variable `#{var.originalName}` in substitution at #{codeOrigin} - #{mapping} " unless mapping[var] 209 mapping[var].name 210 } 211 Variable.forName(codeOrigin, name) 212 elsif mapping[self] 180 213 mapping[self] 214 else 215 self 216 end 217 end 218 end 219 220 class StructOffset 221 def freshVariables(mapping) 222 if dump =~ $concatenation 223 names = dump.gsub($concatenation) { |match| 224 var = Variable.forName(codeOrigin, match[1...-1]) 225 if mapping[var] 226 "%#{mapping[var].name}%" 227 else 228 match 229 end 230 }.split('::') 231 StructOffset.forField(codeOrigin, names[0..-2].join('::'), names[-1]) 232 else 233 self 234 end 235 end 236 237 def substitute(mapping) 238 if dump =~ $concatenation 239 names = dump.gsub($concatenation) { |match| 240 var = Variable.forName(codeOrigin, match[1...-1]) 241 raise "Unknown variable `#{var.originalName}` in substitution at #{codeOrigin}" unless mapping[var] 242 mapping[var].name 243 }.split('::') 244 StructOffset.forField(codeOrigin, names[0..-2].join('::'), names[-1]) 245 else 246 self 247 end 248 end 249 end 250 251 class Label 252 def freshVariables(mapping) 253 if @name =~ $concatenation 254 name = @name.gsub($concatenation) { |match| 255 var = Variable.forName(codeOrigin, match[1...-1]) 256 if mapping[var] 257 "%#{mapping[var].name}%" 258 else 259 match 260 end 261 } 262 Label.forName(codeOrigin, name, @definedInFile) 263 else 264 self 265 end 266 end 267 268 def substitute(mapping) 269 if @name =~ $concatenation 270 name = @name.gsub($concatenation) { |match| 271 var = Variable.forName(codeOrigin, match[1...-1]) 272 raise "Unknown variable `#{var.originalName}` in substitution at #{codeOrigin}" unless mapping[var] 273 mapping[var].name 274 } 275 Label.forName(codeOrigin, name, @definedInFile) 276 else 277 self 278 end 279 end 280 end 281 282 class ConstExpr 283 def freshVariables(mapping) 284 if @value =~ $concatenation 285 value = @value.gsub($concatenation) { |match| 286 var = Variable.forName(codeOrigin, match[1...-1]) 287 if mapping[var] 288 "%#{mapping[var].name}%" 289 else 290 match 291 end 292 } 293 ConstExpr.forName(codeOrigin, value) 294 else 295 self 296 end 297 end 298 299 def substitute(mapping) 300 if @value =~ $concatenation 301 value = @value.gsub($concatenation) { |match| 302 var = Variable.forName(codeOrigin, match[1...-1]) 303 raise "Unknown variable `#{var.originalName}` in substitution at #{codeOrigin}" unless mapping[var] 304 mapping[var].name 305 } 306 ConstExpr.forName(codeOrigin, value) 307 else 308 self 309 end 310 end 311 end 312 313 class Sizeof 314 def freshVariables(mapping) 315 if struct =~ $concatenation 316 value = struct.gsub($concatenation) { |match| 317 var = Variable.forName(codeOrigin, match[1...-1]) 318 if mapping[var] 319 "%#{mapping[var].name}%" 320 else 321 match 322 end 323 } 324 Sizeof.forName(codeOrigin, value) 325 else 326 self 327 end 328 end 329 330 def substitute(mapping) 331 if struct =~ $concatenation 332 value = struct.gsub($concatenation) { |match| 333 var = Variable.forName(codeOrigin, match[1...-1]) 334 raise "Unknown variable `#{var.originalName}` in substitution at #{codeOrigin}" unless mapping[var] 335 mapping[var].name 336 } 337 Sizeof.forName(codeOrigin, value) 181 338 else 182 339 self … … 192 349 self 193 350 end 351 end 352 end 353 354 class MacroError < RuntimeError 355 attr_reader :message 356 attr_reader :backtrace 357 def initialize(message, backtrace) 358 @message = message 359 @backtrace = backtrace 194 360 end 195 361 end … … 223 389 end 224 390 391 @@demacroifyStack = [] 392 def macroError(msg) 393 backtrace = @@demacroifyStack.reverse.map { |macroCall| 394 "#{macroCall.codeOrigin} in call to #{macroCall.originalName}" 395 } 396 raise MacroError.new(msg, backtrace) 397 end 398 225 399 def demacroify(macros) 226 400 myMacros = macros.dup … … 228 402 | item | 229 403 if item.is_a? Macro 230 myMacros[item.name] = item. capture404 myMacros[item.name] = item.freshVariables 231 405 end 232 406 } … … 237 411 # Ignore. 238 412 elsif item.is_a? MacroCall 413 @@demacroifyStack << item 239 414 mapping = {} 240 415 myMyMacros = myMacros.dup 241 raise "Could not find macro #{item.originalName} at #{item.codeOriginString}" unless myMacros[item.name] 242 raise "Argument count mismatch for call to #{item.originalName} at #{item.codeOriginString}" unless item.operands.size == myMacros[item.name].variables.size 416 macro = myMacros[item.name] 417 macroError "Could not find macro #{item.originalName}" unless macro 418 macroError "Argument count mismatch for call to #{item.originalName} (expected #{macro.variables.size} but got #{item.operands.size} arguments for macro #{item.originalName} defined at #{macro.codeOrigin})" unless item.operands.size == macro.variables.size 243 419 item.operands.size.times { 244 420 | idx | 245 421 if item.operands[idx].is_a? Variable and myMacros[item.operands[idx].name] 246 myMyMacros[m yMacros[item.name].variables[idx].name] = myMacros[item.operands[idx].name]247 mapping[m yMacros[item.name].variables[idx].name] = nil422 myMyMacros[macro.variables[idx].name] = myMacros[item.operands[idx].name] 423 mapping[macro.variables[idx]] = nil 248 424 elsif item.operands[idx].is_a? Macro 249 myMyMacros[m yMacros[item.name].variables[idx].name] = item.operands[idx].capture250 mapping[m yMacros[item.name].variables[idx].name] = nil425 myMyMacros[macro.variables[idx].name] = item.operands[idx].freshVariables 426 mapping[macro.variables[idx]] = nil 251 427 else 252 myMyMacros[m yMacros[item.name].variables[idx]] = nil253 mapping[m yMacros[item.name].variables[idx]] = item.operands[idx]428 myMyMacros[macro.variables[idx]] = nil 429 mapping[macro.variables[idx]] = item.operands[idx] 254 430 end 255 431 } … … 257 433 newList << Instruction.new(item.codeOrigin, "localAnnotation", [], item.annotation) 258 434 end 259 newList += myMacros[item.name].body.substitute(mapping).demacroify(myMyMacros).renameLabels(item.originalName).list 435 newList += macro.body.substitute(mapping).demacroify(myMyMacros).renameLabels(item.originalName).list 436 @@demacroifyStack.pop 260 437 else 261 438 newList << item.demacroify(myMacros) … … 522 699 class Label 523 700 def validate 701 raise "Unresolved substitution in Label #{name} at #{codeOrigin}" if name =~ /%/ 524 702 end 525 703 end … … 544 722 end 545 723 end 546 -
trunk/Source/JavaScriptCore/offlineasm/x86.rb
r237486 r237547 284 284 isWin ? "r10" : "r8" 285 285 when "t5" 286 raise "cannot use register #{name} on X86-64 Windows" unless not isWin 287 "r10" 286 isWin ? "ecx" : "r10" 288 287 when "csr0" 289 288 "ebx" … … 466 465 # FIXME: Implement this on platforms that aren't Mach-O. 467 466 # https://bugs.webkit.org/show_bug.cgi?id=175104 468 $asm.puts "movq #{asmLabel}@GOTPCREL(%rip), #{dst.x86Operand(:ptr)}" 467 used 468 if !isIntelSyntax 469 $asm.puts "movq #{asmLabel}@GOTPCREL(%rip), #{dst.x86Operand(:ptr)}" 470 else 471 $asm.puts "lea #{dst.x86Operand(:ptr)}, #{asmLabel}" 472 end 469 473 "#{offset}(#{dst.x86Operand(kind)})" 470 474 end … … 585 589 def emitX86Lea(src, dst, kind) 586 590 if src.is_a? LabelReference 587 $asm.puts "movq #{src.asmLabel}@GOTPCREL(%rip), #{dst.x86Operand(:ptr)}" 591 src.used 592 if !isIntelSyntax 593 $asm.puts "movq #{src.asmLabel}@GOTPCREL(%rip), #{dst.x86Operand(:ptr)}" 594 else 595 $asm.puts "lea #{dst.x86Operand(:ptr)}, #{src.asmLabel}" 596 end 588 597 else 589 598 $asm.puts "lea#{x86Suffix(kind)} #{orderOperands(src.x86AddressOperand(kind), dst.x86Operand(kind))}" … … 948 957 when "xorq" 949 958 handleX86Op("xor#{x86Suffix(:quad)}", :quad) 959 when "leap" 960 emitX86Lea(operands[0], operands[1], :ptr) 950 961 when "loadi" 951 962 $asm.puts "mov#{x86Suffix(:int)} #{x86LoadOperands(:int, :int)}" … … 981 992 else 982 993 $asm.puts "movsx #{x86LoadOperands(:byte, :int)}" 994 end 995 when "loadbsp" 996 if !isIntelSyntax 997 $asm.puts "movsb#{x86Suffix(:ptr)} #{x86LoadOperands(:byte, :ptr)}" 998 else 999 $asm.puts "movsx #{x86LoadOperands(:byte, :ptr)}" 983 1000 end 984 1001 when "loadh" -
trunk/Source/JavaScriptCore/parser/ResultType.h
r237486 r237547 175 175 constexpr Type bits() const { return m_bits; } 176 176 177 void dump(PrintStream& out) const 178 { 179 // FIXME: more meaningful information 180 // https://bugs.webkit.org/show_bug.cgi?id=190930 181 out.print(bits()); 182 } 183 177 184 private: 178 185 Type m_bits; … … 198 205 } m_u; 199 206 200 ResultType first() 207 ResultType first() const 201 208 { 202 209 return ResultType(m_u.rds.first); 203 210 } 204 211 205 ResultType second() 212 ResultType second() const 206 213 { 207 214 return ResultType(m_u.rds.second); … … 218 225 return types; 219 226 } 227 228 void dump(PrintStream& out) const 229 { 230 out.print("OperandTypes(", first(), ", ", second(), ")"); 231 } 220 232 }; 221 233 -
trunk/Source/JavaScriptCore/profiler/ProfilerBytecodeSequence.cpp
r237486 r237547 56 56 out.reset(); 57 57 codeBlock->dumpBytecode(out, bytecodeIndex, statusMap); 58 OpcodeID opcodeID = Interpreter::getOpcodeID(codeBlock->instructions()[bytecodeIndex].u.opcode); 58 auto instruction = codeBlock->instructions().at(bytecodeIndex); 59 OpcodeID opcodeID = instruction->opcodeID(); 59 60 m_sequence.append(Bytecode(bytecodeIndex, opcodeID, out.toCString())); 60 bytecodeIndex += opcodeLength(opcodeID);61 bytecodeIndex += instruction->size(); 61 62 } 62 63 } -
trunk/Source/JavaScriptCore/runtime/CommonSlowPaths.cpp
r237486 r237547 80 80 #define SET_PC_FOR_STUBS() do { \ 81 81 exec->codeBlock()->bytecodeOffset(pc); \ 82 exec->setCurrentVPC(pc + 1); \82 exec->setCurrentVPC(pc); \ 83 83 } while (false) 84 84 #else 85 85 #define SET_PC_FOR_STUBS() do { \ 86 exec->setCurrentVPC(pc + 1); \86 exec->setCurrentVPC(pc); \ 87 87 } while (false) 88 88 #endif … … 94 94 SET_PC_FOR_STUBS() 95 95 96 #define OP(index) (exec->uncheckedR(pc[index].u.operand)) 97 #define OP_C(index) (exec->r(pc[index].u.operand)) 98 99 #define GET(operand) (exec->uncheckedR(operand)) 100 #define GET_C(operand) (exec->r(operand)) 96 #define GET(operand) (exec->uncheckedR(operand.offset())) 97 #define GET_C(operand) (exec->r(operand.offset())) 101 98 102 99 #define RETURN_TWO(first, second) do { \ … … 125 122 } while (false) 126 123 127 #define BRANCH( opcode,condition) do { \124 #define BRANCH(condition) do { \ 128 125 bool bCondition = (condition); \ 129 126 CHECK_EXCEPTION(); \ 130 127 if (bCondition) \ 131 pc += pc[OPCODE_LENGTH(opcode) - 1].u.operand; \ 128 pc = bytecode.target \ 129 ? reinterpret_cast<const Instruction*>(reinterpret_cast<const uint8_t*>(pc) + bytecode.target) \ 130 : exec->codeBlock()->outOfLineJumpTarget(pc); \ 132 131 else \ 133 pc += OPCODE_LENGTH(opcode);\132 pc = reinterpret_cast<const Instruction*>(reinterpret_cast<const uint8_t*>(pc) + pc->size()); \ 134 133 END_IMPL(); \ 135 134 } while (false) 136 135 137 #define RETURN_WITH_PROFILING (value__, profilingAction__) do { \136 #define RETURN_WITH_PROFILING_CUSTOM(result__, value__, profilingAction__) do { \ 138 137 JSValue returnValue__ = (value__); \ 139 138 CHECK_EXCEPTION(); \ 140 OP(1) = returnValue__; \139 GET(result__) = returnValue__; \ 141 140 profilingAction__; \ 142 141 END_IMPL(); \ 143 142 } while (false) 144 143 144 #define RETURN_WITH_PROFILING(value__, profilingAction__) RETURN_WITH_PROFILING_CUSTOM(bytecode.dst, value__, profilingAction__) 145 145 146 #define RETURN(value) \ 146 147 RETURN_WITH_PROFILING(value, { }) 147 148 148 #define RETURN_PROFILED(opcode__, value__) \ 149 RETURN_WITH_PROFILING(value__, PROFILE_VALUE(opcode__, returnValue__)) 150 151 #define PROFILE_VALUE(opcode, value) do { \ 152 pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \ 153 JSValue::encode(value); \ 149 #define RETURN_PROFILED(value__) \ 150 RETURN_WITH_PROFILING(value__, PROFILE_VALUE(returnValue__)) 151 152 #define PROFILE_VALUE(value) do { \ 153 bytecode.metadata(exec).profile.m_buckets[0] = JSValue::encode(value); \ 154 154 } while (false) 155 155 … … 206 206 { 207 207 BEGIN(); 208 auto bytecode = pc->as<OpCreateDirectArguments>(); 208 209 RETURN(DirectArguments::createByCopying(exec)); 209 210 } … … 212 213 { 213 214 BEGIN(); 214 JSLexicalEnvironment* scope = jsCast<JSLexicalEnvironment*>(OP(2).jsValue()); 215 auto bytecode = pc->as<OpCreateScopedArguments>(); 216 JSLexicalEnvironment* scope = jsCast<JSLexicalEnvironment*>(GET(bytecode.scope).jsValue()); 215 217 ScopedArgumentsTable* table = scope->symbolTable()->arguments(); 216 218 RETURN(ScopedArguments::createByCopying(exec, table, scope)); … … 220 222 { 221 223 BEGIN(); 224 auto bytecode = pc->as<OpCreateClonedArguments>(); 222 225 RETURN(ClonedArguments::createWithMachineFrame(exec, exec, ArgumentsMode::Cloned)); 223 226 } … … 226 229 { 227 230 BEGIN(); 228 auto & bytecode = *reinterpret_cast<OpCreateThis*>(pc);231 auto bytecode = pc->as<OpCreateThis>(); 229 232 JSObject* result; 230 JSObject* constructorAsObject = asObject(GET(bytecode.callee ()).jsValue());233 JSObject* constructorAsObject = asObject(GET(bytecode.callee).jsValue()); 231 234 if (constructorAsObject->type() == JSFunctionType && jsCast<JSFunction*>(constructorAsObject)->canUseAllocationProfile()) { 232 235 JSFunction* constructor = jsCast<JSFunction*>(constructorAsObject); 233 WriteBarrier<JSCell>& cachedCallee = bytecode. cachedCallee();236 WriteBarrier<JSCell>& cachedCallee = bytecode.metadata(exec).cachedCallee; 234 237 if (!cachedCallee) 235 238 cachedCallee.set(vm, exec->codeBlock(), constructor); … … 237 240 cachedCallee.setWithoutWriteBarrier(JSCell::seenMultipleCalleeObjects()); 238 241 239 size_t inlineCapacity = bytecode.inlineCapacity ();242 size_t inlineCapacity = bytecode.inlineCapacity; 240 243 ObjectAllocationProfile* allocationProfile = constructor->ensureRareDataAndAllocationProfile(exec, inlineCapacity)->objectAllocationProfile(); 241 244 Structure* structure = allocationProfile->structure(); … … 263 266 { 264 267 BEGIN(); 265 JSValue v1 = OP(1).jsValue(); 268 auto bytecode = pc->as<OpToThis>(); 269 auto& metadata = bytecode.metadata(exec); 270 JSValue v1 = GET(bytecode.srcDst).jsValue(); 266 271 if (v1.isCell()) { 267 272 Structure* myStructure = v1.asCell()->structure(vm); 268 Structure* otherStructure = pc[2].u.structure.get();273 Structure* otherStructure = metadata.cachedStructure.get(); 269 274 if (myStructure != otherStructure) { 270 275 if (otherStructure) 271 pc[3].u.toThisStatus = ToThisConflicted;272 pc[2].u.structure.set(vm, exec->codeBlock(), myStructure);276 metadata.toThisStatus = ToThisConflicted; 277 metadata.cachedStructure.set(vm, exec->codeBlock(), myStructure); 273 278 } 274 279 } else { 275 pc[3].u.toThisStatus = ToThisConflicted;276 pc[2].u.structure.clear();280 metadata.toThisStatus = ToThisConflicted; 281 metadata.cachedStructure.clear(); 277 282 } 278 283 // Note: We only need to do this value profiling here on the slow path. The fast path … … 282 287 // the same SpeculatedType. Therefore, we don't need to worry about value profiling on the 283 288 // fast path. 284 RETURN_PROFILED(op_to_this, v1.toThis(exec, exec->codeBlock()->isStrictMode() ? StrictMode : NotStrictMode)); 289 auto value = v1.toThis(exec, exec->codeBlock()->isStrictMode() ? StrictMode : NotStrictMode); 290 RETURN_WITH_PROFILING_CUSTOM(bytecode.srcDst, value, PROFILE_VALUE(value)); 285 291 } 286 292 … … 306 312 { 307 313 BEGIN(); 308 RETURN(jsBoolean(!OP_C(2).jsValue().toBoolean(exec))); 314 auto bytecode = pc->as<OpNot>(); 315 RETURN(jsBoolean(!GET_C(bytecode.operand).jsValue().toBoolean(exec))); 309 316 } 310 317 … … 312 319 { 313 320 BEGIN(); 314 RETURN(jsBoolean(JSValue::equal(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); 321 auto bytecode = pc->as<OpEq>(); 322 RETURN(jsBoolean(JSValue::equal(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); 315 323 } 316 324 … … 318 326 { 319 327 BEGIN(); 320 RETURN(jsBoolean(!JSValue::equal(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); 328 auto bytecode = pc->as<OpNeq>(); 329 RETURN(jsBoolean(!JSValue::equal(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); 321 330 } 322 331 … … 324 333 { 325 334 BEGIN(); 326 RETURN(jsBoolean(JSValue::strictEqual(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); 335 auto bytecode = pc->as<OpStricteq>(); 336 RETURN(jsBoolean(JSValue::strictEqual(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); 327 337 } 328 338 … … 330 340 { 331 341 BEGIN(); 332 RETURN(jsBoolean(!JSValue::strictEqual(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); 342 auto bytecode = pc->as<OpNstricteq>(); 343 RETURN(jsBoolean(!JSValue::strictEqual(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); 333 344 } 334 345 … … 336 347 { 337 348 BEGIN(); 338 RETURN(jsBoolean(jsLess<true>(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); 349 auto bytecode = pc->as<OpLess>(); 350 RETURN(jsBoolean(jsLess<true>(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); 339 351 } 340 352 … … 342 354 { 343 355 BEGIN(); 344 RETURN(jsBoolean(jsLessEq<true>(exec, OP_C(2).jsValue(), OP_C(3).jsValue()))); 356 auto bytecode = pc->as<OpLesseq>(); 357 RETURN(jsBoolean(jsLessEq<true>(exec, GET_C(bytecode.lhs).jsValue(), GET_C(bytecode.rhs).jsValue()))); 345 358 } 346 359 … … 348 361 { 349 362 BEGIN(); 350 RETURN(jsBoolean(jsLess<false>(exec, OP_C(3).jsValue(), OP_C(2).jsValue()))); 363 auto bytecode = pc->as<OpGreater>(); 364 RETURN(jsBoolean(jsLess<false>(exec, GET_C(bytecode.rhs).jsValue(), GET_C(bytecode.lhs).jsValue()))); 351 365 } 352 366 … … 354 368 { 355 369 BEGIN(); 356 RETURN(jsBoolean(jsLessEq<false>(exec, OP_C(3).jsValue(), OP_C(2).jsValue()))); 370 auto bytecode = pc->as<OpGreatereq>(); 371 RETURN(jsBoolean(jsLessEq<false>(exec, GET_C(bytecode.rhs).jsValue(), GET_C(bytecode.lhs).jsValue()))); 357 372 } 358 373 … … 360 375 { 361 376 BEGIN(); 362 RETURN(jsNumber(OP(1).jsValue().toNumber(exec) + 1)); 377 auto bytecode = pc->as<OpInc>(); 378 RETURN_WITH_PROFILING_CUSTOM(bytecode.srcDst, jsNumber(GET(bytecode.srcDst).jsValue().toNumber(exec) + 1), { }); 363 379 } 364 380 … … 366 382 { 367 383 BEGIN(); 368 RETURN(jsNumber(OP(1).jsValue().toNumber(exec) - 1)); 384 auto bytecode = pc->as<OpDec>(); 385 RETURN_WITH_PROFILING_CUSTOM(bytecode.srcDst, jsNumber(GET(bytecode.srcDst).jsValue().toNumber(exec) - 1), { }); 369 386 } 370 387 … … 372 389 { 373 390 BEGIN(); 374 RETURN(OP_C(2).jsValue().toString(exec)); 391 auto bytecode = pc->as<OpToString>(); 392 RETURN(GET_C(bytecode.operand).jsValue().toString(exec)); 375 393 } 376 394 377 395 #if ENABLE(JIT) 378 static void updateArithProfileForUnaryArithOp( Instruction* pc, JSValue result, JSValue operand)379 { 380 ArithProfile& profile = *bitwise_cast<ArithProfile*>(&pc[3].u.operand);396 static void updateArithProfileForUnaryArithOp(OpNegate::Metadata& metadata, JSValue result, JSValue operand) 397 { 398 ArithProfile& profile = metadata.arithProfile; 381 399 profile.observeLHS(operand); 382 400 ASSERT(result.isNumber() || result.isBigInt()); … … 405 423 } 406 424 #else 407 static void updateArithProfileForUnaryArithOp( Instruction*, JSValue, JSValue) { }425 static void updateArithProfileForUnaryArithOp(OpNegate::Metadata&, JSValue, JSValue) { } 408 426 #endif 409 427 … … 411 429 { 412 430 BEGIN(); 413 JSValue operand = OP_C(2).jsValue(); 431 auto bytecode = pc->as<OpNegate>(); 432 auto& metadata = bytecode.metadata(exec); 433 JSValue operand = GET_C(bytecode.operand).jsValue(); 414 434 JSValue primValue = operand.toPrimitive(exec, PreferNumber); 415 435 CHECK_EXCEPTION(); … … 418 438 JSBigInt* result = JSBigInt::unaryMinus(vm, asBigInt(primValue)); 419 439 RETURN_WITH_PROFILING(result, { 420 updateArithProfileForUnaryArithOp( pc, result, operand);440 updateArithProfileForUnaryArithOp(metadata, result, operand); 421 441 }); 422 442 } … … 425 445 CHECK_EXCEPTION(); 426 446 RETURN_WITH_PROFILING(result, { 427 updateArithProfileForUnaryArithOp( pc, result, operand);447 updateArithProfileForUnaryArithOp(metadata, result, operand); 428 448 }); 429 449 } 430 450 431 451 #if ENABLE(DFG_JIT) 432 static void updateArithProfileForBinaryArithOp(ExecState* exec, Instruction* pc, JSValue result, JSValue left, JSValue right)452 static void updateArithProfileForBinaryArithOp(ExecState* exec, const Instruction* pc, JSValue result, JSValue left, JSValue right) 433 453 { 434 454 CodeBlock* codeBlock = exec->codeBlock(); … … 459 479 } 460 480 #else 461 static void updateArithProfileForBinaryArithOp(ExecState*, Instruction*, JSValue, JSValue, JSValue) { }481 static void updateArithProfileForBinaryArithOp(ExecState*, const Instruction*, JSValue, JSValue, JSValue) { } 462 482 #endif 463 483 … … 465 485 { 466 486 BEGIN(); 467 JSValue argument = OP_C(2).jsValue(); 487 auto bytecode = pc->as<OpToNumber>(); 488 JSValue argument = GET_C(bytecode.operand).jsValue(); 468 489 JSValue result = jsNumber(argument.toNumber(exec)); 469 RETURN_PROFILED( op_to_number,result);490 RETURN_PROFILED(result); 470 491 } 471 492 … … 473 494 { 474 495 BEGIN(); 475 JSValue argument = OP_C(2).jsValue(); 496 auto bytecode = pc->as<OpToObject>(); 497 JSValue argument = GET_C(bytecode.operand).jsValue(); 476 498 if (UNLIKELY(argument.isUndefinedOrNull())) { 477 const Identifier& ident = exec->codeBlock()->identifier( pc[3].u.operand);499 const Identifier& ident = exec->codeBlock()->identifier(bytecode.message); 478 500 if (!ident.isEmpty()) 479 501 THROW(createTypeError(exec, ident.impl())); 480 502 } 481 503 JSObject* result = argument.toObject(exec); 482 RETURN_PROFILED( op_to_object,result);504 RETURN_PROFILED(result); 483 505 } 484 506 … … 486 508 { 487 509 BEGIN(); 488 JSValue v1 = OP_C(2).jsValue(); 489 JSValue v2 = OP_C(3).jsValue(); 510 auto bytecode = pc->as<OpAdd>(); 511 JSValue v1 = GET_C(bytecode.lhs).jsValue(); 512 JSValue v2 = GET_C(bytecode.rhs).jsValue(); 490 513 JSValue result; 491 514 … … 514 537 { 515 538 BEGIN(); 516 JSValue left = OP_C(2).jsValue(); 517 JSValue right = OP_C(3).jsValue(); 539 auto bytecode = pc->as<OpMul>(); 540 JSValue left = GET_C(bytecode.lhs).jsValue(); 541 JSValue right = GET_C(bytecode.rhs).jsValue(); 518 542 JSValue result = jsMul(exec, left, right); 519 543 CHECK_EXCEPTION(); … … 526 550 { 527 551 BEGIN(); 528 JSValue left = OP_C(2).jsValue(); 529 JSValue right = OP_C(3).jsValue(); 552 auto bytecode = pc->as<OpSub>(); 553 JSValue left = GET_C(bytecode.lhs).jsValue(); 554 JSValue right = GET_C(bytecode.rhs).jsValue(); 530 555 auto leftNumeric = left.toNumeric(exec); 531 556 CHECK_EXCEPTION(); … … 553 578 { 554 579 BEGIN(); 555 JSValue left = OP_C(2).jsValue(); 556 JSValue right = OP_C(3).jsValue(); 580 auto bytecode = pc->as<OpDiv>(); 581 JSValue left = GET_C(bytecode.lhs).jsValue(); 582 JSValue right = GET_C(bytecode.rhs).jsValue(); 557 583 auto leftNumeric = left.toNumeric(exec); 558 584 CHECK_EXCEPTION(); … … 583 609 { 584 610 BEGIN(); 585 JSValue left = OP_C(2).jsValue(); 586 JSValue right = OP_C(3).jsValue(); 611 auto bytecode = pc->as<OpMod>(); 612 JSValue left = GET_C(bytecode.lhs).jsValue(); 613 JSValue right = GET_C(bytecode.rhs).jsValue(); 587 614 auto leftNumeric = left.toNumeric(exec); 588 615 CHECK_EXCEPTION(); … … 608 635 { 609 636 BEGIN(); 610 double a = OP_C(2).jsValue().toNumber(exec); 637 auto bytecode = pc->as<OpPow>(); 638 double a = GET_C(bytecode.lhs).jsValue().toNumber(exec); 611 639 if (UNLIKELY(throwScope.exception())) 612 640 RETURN(JSValue()); 613 double b = OP_C(3).jsValue().toNumber(exec);641 double b = GET_C(bytecode.rhs).jsValue().toNumber(exec); 614 642 if (UNLIKELY(throwScope.exception())) 615 643 RETURN(JSValue()); … … 620 648 { 621 649 BEGIN(); 622 int32_t a = OP_C(2).jsValue().toInt32(exec); 650 auto bytecode = pc->as<OpLshift>(); 651 int32_t a = GET_C(bytecode.lhs).jsValue().toInt32(exec); 623 652 if (UNLIKELY(throwScope.exception())) 624 653 RETURN(JSValue()); 625 uint32_t b = OP_C(3).jsValue().toUInt32(exec);654 uint32_t b = GET_C(bytecode.rhs).jsValue().toUInt32(exec); 626 655 RETURN(jsNumber(a << (b & 31))); 627 656 } … … 630 659 { 631 660 BEGIN(); 632 int32_t a = OP_C(2).jsValue().toInt32(exec); 661 auto bytecode = pc->as<OpRshift>(); 662 int32_t a = GET_C(bytecode.lhs).jsValue().toInt32(exec); 633 663 if (UNLIKELY(throwScope.exception())) 634 664 RETURN(JSValue()); 635 uint32_t b = OP_C(3).jsValue().toUInt32(exec);665 uint32_t b = GET_C(bytecode.rhs).jsValue().toUInt32(exec); 636 666 RETURN(jsNumber(a >> (b & 31))); 637 667 } … … 640 670 { 641 671 BEGIN(); 642 uint32_t a = OP_C(2).jsValue().toUInt32(exec); 672 auto bytecode = pc->as<OpUrshift>(); 673 uint32_t a = GET_C(bytecode.lhs).jsValue().toUInt32(exec); 643 674 if (UNLIKELY(throwScope.exception())) 644 675 RETURN(JSValue()); 645 uint32_t b = OP_C(3).jsValue().toUInt32(exec);676 uint32_t b = GET_C(bytecode.rhs).jsValue().toUInt32(exec); 646 677 RETURN(jsNumber(static_cast<int32_t>(a >> (b & 31)))); 647 678 } … … 650 681 { 651 682 BEGIN(); 652 uint32_t a = OP_C(2).jsValue().toUInt32(exec); 683 auto bytecode = pc->as<OpUnsigned>(); 684 uint32_t a = GET_C(bytecode.operand).jsValue().toUInt32(exec); 653 685 RETURN(jsNumber(a)); 654 686 } … … 657 689 { 658 690 BEGIN(); 659 auto leftNumeric = OP_C(2).jsValue().toBigIntOrInt32(exec); 660 CHECK_EXCEPTION(); 661 auto rightNumeric = OP_C(3).jsValue().toBigIntOrInt32(exec); 691 auto bytecode = pc->as<OpBitand>(); 692 auto leftNumeric = GET_C(bytecode.lhs).jsValue().toBigIntOrInt32(exec); 693 CHECK_EXCEPTION(); 694 auto rightNumeric = GET_C(bytecode.rhs).jsValue().toBigIntOrInt32(exec); 662 695 CHECK_EXCEPTION(); 663 696 if (WTF::holds_alternative<JSBigInt*>(leftNumeric) || WTF::holds_alternative<JSBigInt*>(rightNumeric)) { … … 665 698 JSBigInt* result = JSBigInt::bitwiseAnd(vm, WTF::get<JSBigInt*>(leftNumeric), WTF::get<JSBigInt*>(rightNumeric)); 666 699 CHECK_EXCEPTION(); 667 RETURN_PROFILED( op_bitand,result);700 RETURN_PROFILED(result); 668 701 } 669 702 … … 671 704 } 672 705 673 RETURN_PROFILED( op_bitand,jsNumber(WTF::get<int32_t>(leftNumeric) & WTF::get<int32_t>(rightNumeric)));706 RETURN_PROFILED(jsNumber(WTF::get<int32_t>(leftNumeric) & WTF::get<int32_t>(rightNumeric))); 674 707 } 675 708 … … 677 710 { 678 711 BEGIN(); 679 auto leftNumeric = OP_C(2).jsValue().toBigIntOrInt32(exec); 680 CHECK_EXCEPTION(); 681 auto rightNumeric = OP_C(3).jsValue().toBigIntOrInt32(exec); 712 auto bytecode = pc->as<OpBitor>(); 713 auto leftNumeric = GET_C(bytecode.lhs).jsValue().toBigIntOrInt32(exec); 714 CHECK_EXCEPTION(); 715 auto rightNumeric = GET_C(bytecode.rhs).jsValue().toBigIntOrInt32(exec); 682 716 CHECK_EXCEPTION(); 683 717 if (WTF::holds_alternative<JSBigInt*>(leftNumeric) || WTF::holds_alternative<JSBigInt*>(rightNumeric)) { … … 685 719 JSBigInt* result = JSBigInt::bitwiseOr(vm, WTF::get<JSBigInt*>(leftNumeric), WTF::get<JSBigInt*>(rightNumeric)); 686 720 CHECK_EXCEPTION(); 687 RETURN_PROFILED( op_bitor,result);721 RETURN_PROFILED(result); 688 722 } 689 723 … … 691 725 } 692 726 693 RETURN_PROFILED( op_bitor,jsNumber(WTF::get<int32_t>(leftNumeric) | WTF::get<int32_t>(rightNumeric)));727 RETURN_PROFILED(jsNumber(WTF::get<int32_t>(leftNumeric) | WTF::get<int32_t>(rightNumeric))); 694 728 } 695 729 … … 697 731 { 698 732 BEGIN(); 699 auto leftNumeric = OP_C(2).jsValue().toBigIntOrInt32(exec); 700 CHECK_EXCEPTION(); 701 auto rightNumeric = OP_C(3).jsValue().toBigIntOrInt32(exec); 733 auto bytecode = pc->as<OpBitxor>(); 734 auto leftNumeric = GET_C(bytecode.lhs).jsValue().toBigIntOrInt32(exec); 735 CHECK_EXCEPTION(); 736 auto rightNumeric = GET_C(bytecode.rhs).jsValue().toBigIntOrInt32(exec); 702 737 CHECK_EXCEPTION(); 703 738 if (WTF::holds_alternative<JSBigInt*>(leftNumeric) || WTF::holds_alternative<JSBigInt*>(rightNumeric)) { … … 717 752 { 718 753 BEGIN(); 719 RETURN(jsTypeStringForValue(exec, OP_C(2).jsValue())); 754 auto bytecode = pc->as<OpTypeof>(); 755 RETURN(jsTypeStringForValue(exec, GET_C(bytecode.value).jsValue())); 720 756 } 721 757 … … 723 759 { 724 760 BEGIN(); 725 RETURN(jsBoolean(jsIsObjectTypeOrNull(exec, OP_C(2).jsValue()))); 761 auto bytecode = pc->as<OpIsObjectOrNull>(); 762 RETURN(jsBoolean(jsIsObjectTypeOrNull(exec, GET_C(bytecode.operand).jsValue()))); 726 763 } 727 764 … … 729 766 { 730 767 BEGIN(); 731 RETURN(jsBoolean(OP_C(2).jsValue().isFunction(vm))); 768 auto bytecode = pc->as<OpIsFunction>(); 769 RETURN(jsBoolean(GET_C(bytecode.operand).jsValue().isFunction(vm))); 732 770 } 733 771 … … 735 773 { 736 774 BEGIN(); 737 RETURN(jsBoolean(CommonSlowPaths::opInByVal(exec, OP_C(2).jsValue(), OP_C(3).jsValue(), arrayProfileFor<OpInByValShape>(pc)))); 775 auto bytecode = pc->as<OpInByVal>(); 776 auto& metadata = bytecode.metadata(exec); 777 RETURN(jsBoolean(CommonSlowPaths::opInByVal(exec, GET_C(bytecode.base).jsValue(), GET_C(bytecode.property).jsValue(), &metadata.arrayProfile))); 738 778 } 739 779 … … 742 782 BEGIN(); 743 783 744 JSValue baseValue = OP_C(2).jsValue(); 784 auto bytecode = pc->as<OpInById>(); 785 JSValue baseValue = GET_C(bytecode.base).jsValue(); 745 786 if (!baseValue.isObject()) 746 787 THROW(createInvalidInParameterError(exec, baseValue)); 747 788 748 RETURN(jsBoolean(asObject(baseValue)->hasProperty(exec, exec->codeBlock()->identifier( pc[3].u.operand))));789 RETURN(jsBoolean(asObject(baseValue)->hasProperty(exec, exec->codeBlock()->identifier(bytecode.property)))); 749 790 } 750 791 … … 752 793 { 753 794 BEGIN(); 754 JSValue baseValue = OP_C(2).jsValue(); 795 auto bytecode = pc->as<OpDelByVal>(); 796 JSValue baseValue = GET_C(bytecode.base).jsValue(); 755 797 JSObject* baseObject = baseValue.toObject(exec); 756 798 CHECK_EXCEPTION(); 757 799 758 JSValue subscript = OP_C(3).jsValue();800 JSValue subscript = GET_C(bytecode.property).jsValue(); 759 801 760 802 bool couldDelete; … … 779 821 { 780 822 BEGIN(); 781 RETURN(jsStringFromRegisterArray(exec, &OP(2), pc[3].u.operand)); 823 auto bytecode = pc->as<OpStrcat>(); 824 RETURN(jsStringFromRegisterArray(exec, &GET(bytecode.src), bytecode.count)); 782 825 } 783 826 … … 785 828 { 786 829 BEGIN(); 787 RETURN(OP_C(2).jsValue().toPrimitive(exec)); 830 auto bytecode = pc->as<OpToPrimitive>(); 831 RETURN(GET_C(bytecode.src).jsValue().toPrimitive(exec)); 788 832 } 789 833 … … 799 843 { 800 844 BEGIN(); 801 JSValue enumeratorValue = OP(2).jsValue(); 845 auto bytecode = pc->as<OpGetEnumerableLength>(); 846 JSValue enumeratorValue = GET(bytecode.base).jsValue(); 802 847 if (enumeratorValue.isUndefinedOrNull()) 803 848 RETURN(jsNumber(0)); … … 811 856 { 812 857 BEGIN(); 813 JSObject* base = OP(2).jsValue().toObject(exec); 814 CHECK_EXCEPTION(); 815 JSValue property = OP(3).jsValue(); 816 arrayProfileFor<OpHasIndexedPropertyShape>(pc)->observeStructure(base->structure(vm)); 858 auto bytecode = pc->as<OpHasIndexedProperty>(); 859 auto& metadata = bytecode.metadata(exec); 860 JSObject* base = GET(bytecode.base).jsValue().toObject(exec); 861 CHECK_EXCEPTION(); 862 JSValue property = GET(bytecode.property).jsValue(); 863 metadata.arrayProfile.observeStructure(base->structure(vm)); 817 864 ASSERT(property.isUInt32()); 818 865 RETURN(jsBoolean(base->hasPropertyGeneric(exec, property.asUInt32(), PropertySlot::InternalMethodType::GetOwnProperty))); … … 822 869 { 823 870 BEGIN(); 824 JSObject* base = OP(2).jsValue().toObject(exec); 825 CHECK_EXCEPTION(); 826 JSValue property = OP(3).jsValue(); 871 auto bytecode = pc->as<OpHasStructureProperty>(); 872 JSObject* base = GET(bytecode.base).jsValue().toObject(exec); 873 CHECK_EXCEPTION(); 874 JSValue property = GET(bytecode.property).jsValue(); 827 875 ASSERT(property.isString()); 828 JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>( OP(4).jsValue().asCell());876 JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(GET(bytecode.enumerator).jsValue().asCell()); 829 877 if (base->structure(vm)->id() == enumerator->cachedStructureID()) 830 878 RETURN(jsBoolean(true)); … … 838 886 { 839 887 BEGIN(); 840 JSObject* base = OP(2).jsValue().toObject(exec); 841 CHECK_EXCEPTION(); 842 JSValue property = OP(3).jsValue(); 888 auto bytecode = pc->as<OpHasGenericProperty>(); 889 JSObject* base = GET(bytecode.base).jsValue().toObject(exec); 890 CHECK_EXCEPTION(); 891 JSValue property = GET(bytecode.property).jsValue(); 843 892 ASSERT(property.isString()); 844 893 JSString* string = asString(property); … … 851 900 { 852 901 BEGIN(); 853 JSValue baseValue = OP_C(2).jsValue(); 854 JSValue property = OP(3).jsValue(); 902 auto bytecode = pc->as<OpGetDirectPname>(); 903 JSValue baseValue = GET_C(bytecode.base).jsValue(); 904 JSValue property = GET(bytecode.property).jsValue(); 855 905 ASSERT(property.isString()); 856 906 JSString* string = asString(property); … … 863 913 { 864 914 BEGIN(); 865 JSValue baseValue = OP(2).jsValue(); 915 auto bytecode = pc->as<OpGetPropertyEnumerator>(); 916 JSValue baseValue = GET(bytecode.base).jsValue(); 866 917 if (baseValue.isUndefinedOrNull()) 867 918 RETURN(JSPropertyNameEnumerator::create(vm)); … … 873 924 } 874 925 875 SLOW_PATH_DECL(slow_path_next_structure_enumerator_pname) 876 { 877 BEGIN(); 878 JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(OP(2).jsValue().asCell()); 879 uint32_t index = OP(3).jsValue().asUInt32(); 926 SLOW_PATH_DECL(slow_path_enumerator_structure_pname) 927 { 928 BEGIN(); 929 auto bytecode = pc->as<OpEnumeratorStructurePname>(); 930 JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(GET(bytecode.enumerator).jsValue().asCell()); 931 uint32_t index = GET(bytecode.index).jsValue().asUInt32(); 880 932 881 933 JSString* propertyName = nullptr; … … 885 937 } 886 938 887 SLOW_PATH_DECL(slow_path_next_generic_enumerator_pname) 888 { 889 BEGIN(); 890 JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(OP(2).jsValue().asCell()); 891 uint32_t index = OP(3).jsValue().asUInt32(); 939 SLOW_PATH_DECL(slow_path_enumerator_generic_pname) 940 { 941 BEGIN(); 942 auto bytecode = pc->as<OpEnumeratorGenericPname>(); 943 JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(GET(bytecode.enumerator).jsValue().asCell()); 944 uint32_t index = GET(bytecode.index).jsValue().asUInt32(); 892 945 893 946 JSString* propertyName = nullptr; … … 900 953 { 901 954 BEGIN(); 902 RETURN(jsString(exec, Identifier::from(exec, OP(2).jsValue().asUInt32()).string())); 955 auto bytecode = pc->as<OpToIndexString>(); 956 RETURN(jsString(exec, Identifier::from(exec, GET(bytecode.index).jsValue().asUInt32()).string())); 903 957 } 904 958 … … 920 974 { 921 975 BEGIN(); 922 int scopeReg = pc[2].u.operand; 976 auto bytecode = pc->as<OpCreateLexicalEnvironment>(); 977 int scopeReg = bytecode.scope.offset(); 923 978 JSScope* currentScope = exec->uncheckedR(scopeReg).Register::scope(); 924 SymbolTable* symbolTable = jsCast<SymbolTable*>( OP_C(3).jsValue());925 JSValue initialValue = OP_C(4).jsValue();979 SymbolTable* symbolTable = jsCast<SymbolTable*>(GET_C(bytecode.symbolTable).jsValue()); 980 JSValue initialValue = GET_C(bytecode.initialValue).jsValue(); 926 981 ASSERT(initialValue == jsUndefined() || initialValue == jsTDZValue()); 927 982 JSScope* newScope = JSLexicalEnvironment::create(vm, exec->lexicalGlobalObject(), currentScope, symbolTable, initialValue); … … 932 987 { 933 988 BEGIN(); 934 JSObject* newScope = OP_C(3).jsValue().toObject(exec); 935 CHECK_EXCEPTION(); 936 937 int scopeReg = pc[2].u.operand; 989 auto bytecode = pc->as<OpPushWithScope>(); 990 JSObject* newScope = GET_C(bytecode.newScope).jsValue().toObject(exec); 991 CHECK_EXCEPTION(); 992 993 int scopeReg = bytecode.currentScope.offset(); 938 994 JSScope* currentScope = exec->uncheckedR(scopeReg).Register::scope(); 939 995 RETURN(JSWithScope::create(vm, exec->lexicalGlobalObject(), currentScope, newScope)); … … 943 999 { 944 1000 BEGIN(); 945 const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); 946 JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); 1001 auto bytecode = pc->as<OpResolveScopeForHoistingFuncDeclInEval>(); 1002 const Identifier& ident = exec->codeBlock()->identifier(bytecode.property); 1003 JSScope* scope = exec->uncheckedR(bytecode.scope.offset()).Register::scope(); 947 1004 JSValue resolvedScope = JSScope::resolveScopeForHoistingFuncDeclInEval(exec, scope, ident); 948 1005 … … 955 1012 { 956 1013 BEGIN(); 957 const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); 958 JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); 1014 auto bytecode = pc->as<OpResolveScope>(); 1015 auto& metadata = bytecode.metadata(exec); 1016 const Identifier& ident = exec->codeBlock()->identifier(bytecode.var); 1017 JSScope* scope = exec->uncheckedR(bytecode.scope.offset()).Register::scope(); 959 1018 JSObject* resolvedScope = JSScope::resolve(exec, scope, ident); 960 1019 // Proxy can throw an error here, e.g. Proxy in with statement's @unscopables. 961 1020 CHECK_EXCEPTION(); 962 1021 963 ResolveType resolveType = static_cast<ResolveType>(pc[4].u.operand);1022 ResolveType resolveType = metadata.resolveType; 964 1023 965 1024 // ModuleVar does not keep the scope register value alive in DFG. … … 974 1033 ConcurrentJSLocker locker(exec->codeBlock()->m_lock); 975 1034 if (resolveType == UnresolvedProperty) 976 pc[4].u.operand= GlobalProperty;1035 metadata.resolveType = GlobalProperty; 977 1036 else 978 pc[4].u.operand= GlobalPropertyWithVarInjectionChecks;979 980 pc[6].u.pointer= globalObject;1037 metadata.resolveType = GlobalPropertyWithVarInjectionChecks; 1038 1039 metadata.globalObject = globalObject; 981 1040 } 982 1041 } else if (resolvedScope->isGlobalLexicalEnvironment()) { … … 984 1043 ConcurrentJSLocker locker(exec->codeBlock()->m_lock); 985 1044 if (resolveType == UnresolvedProperty) 986 pc[4].u.operand= GlobalLexicalVar;1045 metadata.resolveType = GlobalLexicalVar; 987 1046 else 988 pc[4].u.operand= GlobalLexicalVarWithVarInjectionChecks;989 pc[6].u.pointer= globalLexicalEnvironment;1047 metadata.resolveType = GlobalLexicalVarWithVarInjectionChecks; 1048 metadata.globalLexicalEnvironment = globalLexicalEnvironment; 990 1049 } 991 1050 } … … 997 1056 { 998 1057 BEGIN(); 999 unsigned arraySize = OP_C(2).jsValue().asUInt32(); 1058 auto bytecode = pc->as<OpCreateRest>(); 1059 unsigned arraySize = GET_C(bytecode.arraySize).jsValue().asUInt32(); 1000 1060 JSGlobalObject* globalObject = exec->lexicalGlobalObject(); 1001 1061 Structure* structure = globalObject->restParameterStructure(); 1002 unsigned numParamsToSkip = pc[3].u.unsignedValue;1062 unsigned numParamsToSkip = bytecode.numParametersToSkip; 1003 1063 JSValue* argumentsToCopyRegion = exec->addressOfArgumentsStart() + numParamsToSkip; 1004 1064 RETURN(constructArray(exec, structure, argumentsToCopyRegion, arraySize)); … … 1008 1068 { 1009 1069 BEGIN(); 1010 const Identifier& ident = exec->codeBlock()->identifier(pc[4].u.operand); 1011 JSValue baseValue = OP_C(2).jsValue(); 1012 JSValue thisVal = OP_C(3).jsValue(); 1070 auto bytecode = pc->as<OpGetByIdWithThis>(); 1071 const Identifier& ident = exec->codeBlock()->identifier(bytecode.property); 1072 JSValue baseValue = GET_C(bytecode.base).jsValue(); 1073 JSValue thisVal = GET_C(bytecode.thisValue).jsValue(); 1013 1074 PropertySlot slot(thisVal, PropertySlot::PropertySlot::InternalMethodType::Get); 1014 1075 JSValue result = baseValue.get(exec, ident, slot); 1015 RETURN_PROFILED( op_get_by_id_with_this,result);1076 RETURN_PROFILED(result); 1016 1077 } 1017 1078 … … 1020 1081 BEGIN(); 1021 1082 1022 JSValue baseValue = OP_C(2).jsValue(); 1023 JSValue thisValue = OP_C(3).jsValue(); 1024 JSValue subscript = OP_C(4).jsValue(); 1083 auto bytecode = pc->as<OpGetByValWithThis>(); 1084 JSValue baseValue = GET_C(bytecode.base).jsValue(); 1085 JSValue thisValue = GET_C(bytecode.thisValue).jsValue(); 1086 JSValue subscript = GET_C(bytecode.property).jsValue(); 1025 1087 1026 1088 if (LIKELY(baseValue.isCell() && subscript.isString())) { … … 1029 1091 if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) { 1030 1092 if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get())) 1031 RETURN_PROFILED( op_get_by_val_with_this,result);1093 RETURN_PROFILED(result); 1032 1094 } 1033 1095 } … … 1038 1100 uint32_t i = subscript.asUInt32(); 1039 1101 if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) 1040 RETURN_PROFILED( op_get_by_val_with_this,asString(baseValue)->getIndex(exec, i));1102 RETURN_PROFILED(asString(baseValue)->getIndex(exec, i)); 1041 1103 1042 RETURN_PROFILED( op_get_by_val_with_this,baseValue.get(exec, i, slot));1104 RETURN_PROFILED(baseValue.get(exec, i, slot)); 1043 1105 } 1044 1106 … … 1047 1109 auto property = subscript.toPropertyKey(exec); 1048 1110 CHECK_EXCEPTION(); 1049 RETURN_PROFILED( op_get_by_val_with_this,baseValue.get(exec, property, slot));1111 RETURN_PROFILED(baseValue.get(exec, property, slot)); 1050 1112 } 1051 1113 … … 1053 1115 { 1054 1116 BEGIN(); 1117 auto bytecode = pc->as<OpPutByIdWithThis>(); 1055 1118 CodeBlock* codeBlock = exec->codeBlock(); 1056 const Identifier& ident = codeBlock->identifier( pc[3].u.operand);1057 JSValue baseValue = OP_C(1).jsValue();1058 JSValue thisVal = OP_C(2).jsValue();1059 JSValue putValue = OP_C(4).jsValue();1119 const Identifier& ident = codeBlock->identifier(bytecode.property); 1120 JSValue baseValue = GET_C(bytecode.base).jsValue(); 1121 JSValue thisVal = GET_C(bytecode.thisValue).jsValue(); 1122 JSValue putValue = GET_C(bytecode.value).jsValue(); 1060 1123 PutPropertySlot slot(thisVal, codeBlock->isStrictMode(), codeBlock->putByIdContext()); 1061 1124 baseValue.putInline(exec, ident, putValue, slot); … … 1066 1129 { 1067 1130 BEGIN(); 1068 JSValue baseValue = OP_C(1).jsValue(); 1069 JSValue thisValue = OP_C(2).jsValue(); 1070 JSValue subscript = OP_C(3).jsValue(); 1071 JSValue value = OP_C(4).jsValue(); 1131 auto bytecode = pc->as<OpPutByValWithThis>(); 1132 JSValue baseValue = GET_C(bytecode.base).jsValue(); 1133 JSValue thisValue = GET_C(bytecode.thisValue).jsValue(); 1134 JSValue subscript = GET_C(bytecode.property).jsValue(); 1135 JSValue value = GET_C(bytecode.value).jsValue(); 1072 1136 1073 1137 auto property = subscript.toPropertyKey(exec); … … 1081 1145 { 1082 1146 BEGIN(); 1083 JSObject* base = asObject(OP_C(1).jsValue()); 1084 JSValue property = OP_C(2).jsValue(); 1085 JSValue value = OP_C(3).jsValue(); 1086 JSValue attributes = OP_C(4).jsValue(); 1147 auto bytecode = pc->as<OpDefineDataProperty>(); 1148 JSObject* base = asObject(GET_C(bytecode.base).jsValue()); 1149 JSValue property = GET_C(bytecode.property).jsValue(); 1150 JSValue value = GET_C(bytecode.value).jsValue(); 1151 JSValue attributes = GET_C(bytecode.attributes).jsValue(); 1087 1152 ASSERT(attributes.isInt32()); 1088 1153 … … 1098 1163 { 1099 1164 BEGIN(); 1100 JSObject* base = asObject(OP_C(1).jsValue()); 1101 JSValue property = OP_C(2).jsValue(); 1102 JSValue getter = OP_C(3).jsValue(); 1103 JSValue setter = OP_C(4).jsValue(); 1104 JSValue attributes = OP_C(5).jsValue(); 1165 auto bytecode = pc->as<OpDefineAccessorProperty>(); 1166 JSObject* base = asObject(GET_C(bytecode.base).jsValue()); 1167 JSValue property = GET_C(bytecode.property).jsValue(); 1168 JSValue getter = GET_C(bytecode.getter).jsValue(); 1169 JSValue setter = GET_C(bytecode.setter).jsValue(); 1170 JSValue attributes = GET_C(bytecode.attributes).jsValue(); 1105 1171 ASSERT(attributes.isInt32()); 1106 1172 … … 1116 1182 { 1117 1183 BEGIN(); 1118 JSValue errorMessageValue = OP_C(1).jsValue(); 1184 auto bytecode = pc->as<OpThrowStaticError>(); 1185 JSValue errorMessageValue = GET_C(bytecode.message).jsValue(); 1119 1186 RELEASE_ASSERT(errorMessageValue.isString()); 1120 1187 String errorMessage = asString(errorMessageValue)->value(exec); 1121 ErrorType errorType = static_cast<ErrorType>(pc[2].u.unsignedValue);1188 ErrorType errorType = bytecode.errorType; 1122 1189 THROW(createError(exec, errorType, errorMessage)); 1123 1190 } … … 1126 1193 { 1127 1194 BEGIN(); 1128 int numItems = pc[3].u.operand; 1195 auto bytecode = pc->as<OpNewArrayWithSpread>(); 1196 int numItems = bytecode.argc; 1129 1197 ASSERT(numItems >= 0); 1130 const BitVector& bitVector = exec->codeBlock()->unlinkedCodeBlock()->bitVector( pc[4].u.unsignedValue);1131 1132 JSValue* values = bitwise_cast<JSValue*>(& OP(2));1198 const BitVector& bitVector = exec->codeBlock()->unlinkedCodeBlock()->bitVector(bytecode.bitVector); 1199 1200 JSValue* values = bitwise_cast<JSValue*>(&GET(bytecode.argv)); 1133 1201 1134 1202 Checked<unsigned, RecordOverflow> checkedArraySize = 0; … … 1179 1247 { 1180 1248 BEGIN(); 1181 auto * newArrayBuffer = bitwise_cast<OpNewArrayBuffer*>(pc);1182 ASSERT(exec->codeBlock()->isConstantRegisterIndex( newArrayBuffer->immutableButterfly()));1183 JSImmutableButterfly* immutableButterfly = bitwise_cast<JSImmutableButterfly*>(GET_C( newArrayBuffer->immutableButterfly()).jsValue().asCell());1184 auto * profile = newArrayBuffer->profile();1185 1186 IndexingType indexingMode = profile ->selectIndexingType();1249 auto bytecode = pc->as<OpNewArrayBuffer>(); 1250 ASSERT(exec->codeBlock()->isConstantRegisterIndex(bytecode.immutableButterfly.offset())); 1251 JSImmutableButterfly* immutableButterfly = bitwise_cast<JSImmutableButterfly*>(GET_C(bytecode.immutableButterfly).jsValue().asCell()); 1252 auto& profile = bytecode.metadata(exec).arrayAllocationProfile; 1253 1254 IndexingType indexingMode = profile.selectIndexingType(); 1187 1255 Structure* structure = exec->lexicalGlobalObject()->arrayStructureForIndexingTypeDuringAllocation(indexingMode); 1188 1256 ASSERT(isCopyOnWrite(indexingMode)); … … 1200 1268 // a compilation thread. 1201 1269 WTF::storeStoreFence(); 1202 codeBlock->constantRegister( newArrayBuffer->immutableButterfly()).set(vm, codeBlock, immutableButterfly);1270 codeBlock->constantRegister(bytecode.immutableButterfly.offset()).set(vm, codeBlock, immutableButterfly); 1203 1271 WTF::storeStoreFence(); 1204 1272 } … … 1206 1274 JSArray* result = CommonSlowPaths::allocateNewArrayBuffer(vm, structure, immutableButterfly); 1207 1275 ASSERT(isCopyOnWrite(result->indexingMode()) || exec->lexicalGlobalObject()->isHavingABadTime()); 1208 ArrayAllocationProfile::updateLastAllocationFor( profile, result);1276 ArrayAllocationProfile::updateLastAllocationFor(&profile, result); 1209 1277 RETURN(result); 1210 1278 } … … 1214 1282 BEGIN(); 1215 1283 1216 JSValue iterable = OP_C(2).jsValue(); 1284 auto bytecode = pc->as<OpSpread>(); 1285 JSValue iterable = GET_C(bytecode.argument).jsValue(); 1217 1286 1218 1287 if (iterable.isCell() && isJSArray(iterable.asCell())) { -
trunk/Source/JavaScriptCore/runtime/CommonSlowPaths.h
r237486 r237547 26 26 #pragma once 27 27 28 #include "BytecodeStructs.h" 28 29 #include "CodeBlock.h" 29 30 #include "CodeSpecializationKind.h" … … 113 114 114 115 inline void tryCachePutToScopeGlobal( 115 ExecState* exec, CodeBlock* codeBlock, Instruction* pc, JSObject* scope,116 GetPutInfo getPutInfo,PutPropertySlot& slot, const Identifier& ident)116 ExecState* exec, CodeBlock* codeBlock, OpPutToScope& bytecode, JSObject* scope, 117 PutPropertySlot& slot, const Identifier& ident) 117 118 { 118 119 // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time. 119 ResolveType resolveType = getPutInfo.resolveType(); 120 auto& metadata = bytecode.metadata(exec); 121 ResolveType resolveType = metadata.getPutInfo.resolveType(); 120 122 if (resolveType != GlobalProperty && resolveType != GlobalPropertyWithVarInjectionChecks 121 123 && resolveType != UnresolvedProperty && resolveType != UnresolvedPropertyWithVarInjectionChecks) … … 126 128 ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalProperty : GlobalPropertyWithVarInjectionChecks; 127 129 resolveType = newResolveType; 128 getPutInfo = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode());129 130 ConcurrentJSLocker locker(codeBlock->m_lock); 130 pc[4].u.operand = getPutInfo.operand();131 metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); 131 132 } else if (scope->isGlobalLexicalEnvironment()) { 132 133 JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(scope); 133 134 ResolveType newResolveType = resolveType == UnresolvedProperty ? GlobalLexicalVar : GlobalLexicalVarWithVarInjectionChecks; 134 pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand();135 metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); 135 136 SymbolTableEntry entry = globalLexicalEnvironment->symbolTable()->get(ident.impl()); 136 137 ASSERT(!entry.isNull()); 137 138 ConcurrentJSLocker locker(codeBlock->m_lock); 138 pc[5].u.watchpointSet = entry.watchpointSet();139 pc[6].u.pointer = static_cast<void*>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot());139 metadata.watchpointSet = entry.watchpointSet(); 140 metadata.operand = reinterpret_cast<uintptr_t>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); 140 141 } 141 142 } … … 160 161 161 162 ConcurrentJSLocker locker(codeBlock->m_lock); 162 pc[5].u.structure.set(vm, codeBlock, scope->structure(vm));163 pc[6].u.operand = slot.cachedOffset();163 metadata.structure.set(vm, codeBlock, scope->structure(vm)); 164 metadata.operand = slot.cachedOffset(); 164 165 } 165 166 } 166 167 167 168 inline void tryCacheGetFromScopeGlobal( 168 ExecState* exec, VM& vm, Instruction* pc, JSObject* scope, PropertySlot& slot, const Identifier& ident)169 { 170 GetPutInfo getPutInfo(pc[4].u.operand);171 ResolveType resolveType = getPutInfo.resolveType();169 ExecState* exec, VM& vm, OpGetFromScope& bytecode, JSObject* scope, PropertySlot& slot, const Identifier& ident) 170 { 171 auto& metadata = bytecode.metadata(exec); 172 ResolveType resolveType = metadata.getPutInfo.resolveType(); 172 173 173 174 if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) { … … 176 177 resolveType = newResolveType; // Allow below caching mechanism to kick in. 177 178 ConcurrentJSLocker locker(exec->codeBlock()->m_lock); 178 pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand();179 metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); 179 180 } else if (scope->isGlobalLexicalEnvironment()) { 180 181 JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(scope); … … 183 184 ASSERT(!entry.isNull()); 184 185 ConcurrentJSLocker locker(exec->codeBlock()->m_lock); 185 pc[4].u.operand = GetPutInfo(getPutInfo.resolveMode(), newResolveType, getPutInfo.initializationMode()).operand();186 pc[5].u.watchpointSet = entry.watchpointSet();187 pc[6].u.pointer = static_cast<void*>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot());186 metadata.getPutInfo = GetPutInfo(metadata.getPutInfo.resolveMode(), newResolveType, metadata.getPutInfo.initializationMode()); 187 metadata.watchpointSet = entry.watchpointSet(); 188 metadata.operand = reinterpret_cast<uintptr_t>(globalLexicalEnvironment->variableAt(entry.scopeOffset()).slot()); 188 189 } 189 190 } … … 198 199 { 199 200 ConcurrentJSLocker locker(codeBlock->m_lock); 200 pc[5].u.structure.set(vm, codeBlock, structure);201 pc[6].u.operand = slot.cachedOffset();201 metadata.structure.set(vm, codeBlock, structure); 202 metadata.operand = slot.cachedOffset(); 202 203 } 203 204 structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset()); … … 282 283 283 284 #define SLOW_PATH_DECL(name) \ 284 extern "C" SlowPathReturnType SLOW_PATH name(ExecState* exec, Instruction* pc)285 extern "C" SlowPathReturnType SLOW_PATH name(ExecState* exec, const Instruction* pc) 285 286 286 287 #define SLOW_PATH_HIDDEN_DECL(name) \ … … 342 343 SLOW_PATH_HIDDEN_DECL(slow_path_get_direct_pname); 343 344 SLOW_PATH_HIDDEN_DECL(slow_path_get_property_enumerator); 344 SLOW_PATH_HIDDEN_DECL(slow_path_ next_structure_enumerator_pname);345 SLOW_PATH_HIDDEN_DECL(slow_path_ next_generic_enumerator_pname);345 SLOW_PATH_HIDDEN_DECL(slow_path_enumerator_structure_pname); 346 SLOW_PATH_HIDDEN_DECL(slow_path_enumerator_generic_pname); 346 347 SLOW_PATH_HIDDEN_DECL(slow_path_to_index_string); 347 348 SLOW_PATH_HIDDEN_DECL(slow_path_profile_type_clear_log); … … 364 365 SLOW_PATH_HIDDEN_DECL(slow_path_spread); 365 366 366 using SlowPathFunction = SlowPathReturnType(SLOW_PATH *)(ExecState*, Instruction*);367 using SlowPathFunction = SlowPathReturnType(SLOW_PATH *)(ExecState*, const Instruction*); 367 368 368 369 } // namespace JSC -
trunk/Source/JavaScriptCore/runtime/ExceptionFuzz.cpp
r237486 r237547 37 37 38 38 // Call this only if you know that exception fuzzing is enabled. 39 void doExceptionFuzzing(ExecState* exec, ThrowScope& scope, const char* where, void* returnPC)39 void doExceptionFuzzing(ExecState* exec, ThrowScope& scope, const char* where, const void* returnPC) 40 40 { 41 41 VM& vm = scope.vm(); -
trunk/Source/JavaScriptCore/runtime/ExceptionFuzz.h
r237486 r237547 34 34 35 35 // Call this only if you know that exception fuzzing is enabled. 36 void doExceptionFuzzing(ExecState*, ThrowScope&, const char* where, void* returnPC);36 void doExceptionFuzzing(ExecState*, ThrowScope&, const char* where, const void* returnPC); 37 37 38 38 // This is what you should call if you don't know if fuzzing is enabled. 39 ALWAYS_INLINE void doExceptionFuzzingIfEnabled(ExecState* exec, ThrowScope& scope, const char* where, void* returnPC)39 ALWAYS_INLINE void doExceptionFuzzingIfEnabled(ExecState* exec, ThrowScope& scope, const char* where, const void* returnPC) 40 40 { 41 41 if (LIKELY(!Options::useExceptionFuzz())) -
trunk/Source/JavaScriptCore/runtime/GetPutInfo.cpp
r237546 r237547 1 1 /* 2 * Copyright (C) 201 2Apple Inc. All rights reserved.2 * Copyright (C) 2018 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 21 21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 24 */ 25 25 26 26 #include "config.h" 27 #include " SpecialPointer.h"27 #include "GetPutInfo.h" 28 28 29 #include "CodeBlock.h" 30 #include "JSGlobalObject.h" 31 #include "JSCInlines.h" 29 #include <wtf/PrintStream.h> 32 30 33 31 namespace JSC { 34 32 35 void * actualPointerFor(JSGlobalObject* globalObject, Special::Pointer pointer)33 void GetPutInfo::dump(PrintStream& out) const 36 34 { 37 return globalObject->actualPointerFor(pointer); 38 } 35 out.print(operand(), "<", resolveMode(), "|", resolveType(), "|", initializationMode(), ">"); 39 36 40 void* actualPointerFor(CodeBlock* codeBlock, Special::Pointer pointer)41 {42 return actualPointerFor(codeBlock->globalObject(), pointer);43 37 } 44 38 45 39 } // namespace JSC 46 40 41 namespace WTF { 42 43 void printInternal(PrintStream& out, JSC::ResolveMode mode) 44 { 45 out.print(resolveModeName(mode)); 46 } 47 48 void printInternal(PrintStream& out, JSC::ResolveType type) 49 { 50 out.print(resolveTypeName(type)); 51 } 52 53 void printInternal(PrintStream& out, JSC::InitializationMode mode) 54 { 55 out.print(initializationModeName(mode)); 56 } 57 58 } // namespace WTF -
trunk/Source/JavaScriptCore/runtime/GetPutInfo.h
r237486 r237547 1 1 /* 2 * Copyright (C) 2015 Apple Inc. All Rights Reserved.2 * Copyright (C) 2015-2018 Apple Inc. All Rights Reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 224 224 InitializationMode initializationMode() const { return static_cast<InitializationMode>((m_operand & initializationBits) >> initializationShift); } 225 225 ResolveMode resolveMode() const { return static_cast<ResolveMode>((m_operand & modeBits) >> modeShift); } 226 unsigned operand() { return m_operand; } 226 unsigned operand() const { return m_operand; } 227 228 void dump(PrintStream&) const; 227 229 228 230 private: … … 233 235 234 236 } // namespace JSC 237 238 namespace WTF { 239 240 class PrintStream; 241 242 void printInternal(PrintStream&, JSC::ResolveMode); 243 void printInternal(PrintStream&, JSC::ResolveType); 244 void printInternal(PrintStream&, JSC::InitializationMode); 245 246 } // namespace WTF -
trunk/Source/JavaScriptCore/runtime/JSCPoison.h
r237486 r237547 70 70 71 71 using PoisonedClassInfoPtr = Poisoned<GlobalDataPoison, const ClassInfo*>; 72 using PoisonedMasmPtr = Poisoned<JITCodePoison, void*>;72 using PoisonedMasmPtr = Poisoned<JITCodePoison, const void*>; 73 73 74 74 void initializePoison(); -
trunk/Source/JavaScriptCore/runtime/JSType.h
r237486 r237547 132 132 133 133 } // namespace JSC 134 135 namespace WTF { 136 137 class PrintStream; 138 139 void printInternal(PrintStream&, JSC::JSType); 140 141 } // namespace WTF -
trunk/Source/JavaScriptCore/runtime/SamplingProfiler.cpp
r237486 r237547 761 761 #if HAVE(DLADDR) 762 762 if (frameType == FrameType::C) { 763 auto demangled = WTF::StackTrace::demangle(c CodePC);763 auto demangled = WTF::StackTrace::demangle(const_cast<void*>(cCodePC)); 764 764 if (demangled) 765 765 return String(demangled->demangledName() ? demangled->demangledName() : demangled->mangledName()); -
trunk/Source/JavaScriptCore/runtime/SamplingProfiler.h
r237486 r237547 54 54 { } 55 55 56 UnprocessedStackFrame( void* pc)56 UnprocessedStackFrame(const void* pc) 57 57 : cCodePC(pc) 58 58 { } … … 60 60 UnprocessedStackFrame() = default; 61 61 62 void* cCodePC { nullptr };62 const void* cCodePC { nullptr }; 63 63 CalleeBits unverifiedCallee; 64 64 CodeBlock* verifiedCodeBlock { nullptr }; … … 83 83 84 84 FrameType frameType { FrameType::Unknown }; 85 void* cCodePC { nullptr };85 const void* cCodePC { nullptr }; 86 86 ExecutableBase* executable { nullptr }; 87 87 JSObject* callee { nullptr }; -
trunk/Source/JavaScriptCore/runtime/SlowPathReturnType.h
r237486 r237547 41 41 static_assert(sizeof(SlowPathReturnType) >= sizeof(void*) * 2, "SlowPathReturnType should fit in two machine registers"); 42 42 43 inline SlowPathReturnType encodeResult( void* a,void* b)43 inline SlowPathReturnType encodeResult(const void* a, const void* b) 44 44 { 45 45 SlowPathReturnType result; … … 49 49 } 50 50 51 inline void decodeResult(SlowPathReturnType result, void*& a,void*& b)51 inline void decodeResult(SlowPathReturnType result, const void*& a, const void*& b) 52 52 { 53 53 a = reinterpret_cast<void*>(result.a); … … 60 60 typedef union { 61 61 struct { 62 void* a;63 void* b;62 const void* a; 63 const void* b; 64 64 } pair; 65 65 int64_t i; 66 66 } SlowPathReturnTypeEncoding; 67 67 68 inline SlowPathReturnType encodeResult( void* a,void* b)68 inline SlowPathReturnType encodeResult(const void* a, const void* b) 69 69 { 70 70 SlowPathReturnTypeEncoding u; … … 74 74 } 75 75 76 inline void decodeResult(SlowPathReturnType result, void*& a,void*& b)76 inline void decodeResult(SlowPathReturnType result, const void*& a, const void*& b) 77 77 { 78 78 SlowPathReturnTypeEncoding u; -
trunk/Source/JavaScriptCore/runtime/VM.h
r237486 r237547 746 746 ExecState* callFrameForCatch; 747 747 void* targetMachinePCForThrow; 748 Instruction* targetInterpreterPCForThrow;748 const Instruction* targetInterpreterPCForThrow; 749 749 uint32_t osrExitIndex; 750 750 void* osrExitJumpDestination; -
trunk/Source/JavaScriptCore/runtime/Watchdog.h
r237486 r237547 27 27 28 28 #include <wtf/Lock.h> 29 #include <wtf/MonotonicTime.h> 29 30 #include <wtf/Ref.h> 30 31 #include <wtf/ThreadSafeRefCounted.h> -
trunk/Source/JavaScriptCore/tools/HeapVerifier.cpp
r237486 r237547 27 27 #include "HeapVerifier.h" 28 28 29 #include "CodeBlock .h"29 #include "CodeBlockInlines.h" 30 30 #include "HeapIterationScope.h" 31 31 #include "JSCInlines.h" -
trunk/Source/WTF/ChangeLog
r237522 r237547 1 2018-10-29 Tadeu Zagallo <tzagallo@apple.com> 2 3 New bytecode format for JSC 4 https://bugs.webkit.org/show_bug.cgi?id=187373 5 <rdar://problem/44186758> 6 7 Reviewed by Filip Pizlo. 8 9 * wtf/Forward.h: Fix WTF_LAZY_FOR_EACH_TERM on MSVC and add WTF_LAZY_HAS_REST to check whether 10 a macro was passed multiple arguments 11 * wtf/Platform.h: Force ENABLE_JIT=false on all 32-bit platforms 12 * wtf/Vector.h: 13 (WTF::minCapacity>::insertVector): Allow vectors with different overflow handlers to be passed to insertVector 14 1 15 2018-10-28 Geoffrey Garen <ggaren@apple.com> 2 16 -
trunk/Source/WTF/wtf/Forward.h
r237486 r237547 145 145 #define WTF_LAZY_JOIN_UNLAZE(A, B) A##B 146 146 #define WTF_LAZY_JOIN(A, B) WTF_LAZY_JOIN_UNLAZE(A, B) 147 #define WTF_LAZY_ARGUMENT_NUMBER(_1, _2, _3, _4, _5, _6, _7, N, ...) N 148 #define WTF_LAZY_REVERSE_SEQUENCE() 7, 6, 5, 4, 3, 2, 1, 0 149 #define WTF_LAZY_NUM_ARGS_(...) WTF_LAZY_ARGUMENT_NUMBER(__VA_ARGS__) 150 #define WTF_LAZY_NUM_ARGS(...) WTF_LAZY_NUM_ARGS_(__VA_ARGS__, WTF_LAZY_REVERSE_SEQUENCE()) 147 #define WTF_LAZY_ARGUMENT_NUMBER(_1, _2, _3, _4, _5, _6, _7, _8, N, ...) N 148 #define WTF_LAZY_AUGMENT(...) unused, __VA_ARGS__ 149 #define WTF_LAZY_EXPAND(x) x 150 #define WTF_LAZY_NUM_ARGS_(...) WTF_LAZY_EXPAND(WTF_LAZY_ARGUMENT_NUMBER(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)) 151 #define WTF_LAZY_NUM_ARGS(...) WTF_LAZY_NUM_ARGS_(WTF_LAZY_AUGMENT(__VA_ARGS__)) 151 152 #define WTF_LAZY_FOR_EACH_TERM(F, ...) \ 152 153 WTF_LAZY_JOIN(WTF_LAZY_FOR_EACH_TERM_, WTF_LAZY_NUM_ARGS(__VA_ARGS__))(F, (__VA_ARGS__)) 153 154 #define WTF_LAZY_FIRST(_1, ...) _1 154 155 #define WTF_LAZY_REST(_1, ...) (__VA_ARGS__) 156 #define WTF_LAZY_REST_(_1, ...) __VA_ARGS__ 157 #define WTF_LAZY_CALL(F, ARG) F(ARG) 155 158 #define WTF_LAZY_FOR_EACH_TERM_0(...) 156 #define WTF_LAZY_FOR_EACH_TERM_1(F, ARGS) F(WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_0(F, WTF_LAZY_REST ARGS)157 #define WTF_LAZY_FOR_EACH_TERM_2(F, ARGS) F(WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_1(F, WTF_LAZY_REST ARGS)158 #define WTF_LAZY_FOR_EACH_TERM_3(F, ARGS) F(WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_2(F, WTF_LAZY_REST ARGS)159 #define WTF_LAZY_FOR_EACH_TERM_4(F, ARGS) F(WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_3(F, WTF_LAZY_REST ARGS)160 #define WTF_LAZY_FOR_EACH_TERM_5(F, ARGS) F(WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_4(F, WTF_LAZY_REST ARGS)161 #define WTF_LAZY_FOR_EACH_TERM_6(F, ARGS) F(WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_5(F, WTF_LAZY_REST ARGS)162 #define WTF_LAZY_FOR_EACH_TERM_7(F, ARGS) F(WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_6(F, WTF_LAZY_REST ARGS)159 #define WTF_LAZY_FOR_EACH_TERM_1(F, ARGS) WTF_LAZY_CALL(F, WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_0(F, WTF_LAZY_REST ARGS) 160 #define WTF_LAZY_FOR_EACH_TERM_2(F, ARGS) WTF_LAZY_CALL(F, WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_1(F, WTF_LAZY_REST ARGS) 161 #define WTF_LAZY_FOR_EACH_TERM_3(F, ARGS) WTF_LAZY_CALL(F, WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_2(F, WTF_LAZY_REST ARGS) 162 #define WTF_LAZY_FOR_EACH_TERM_4(F, ARGS) WTF_LAZY_CALL(F, WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_3(F, WTF_LAZY_REST ARGS) 163 #define WTF_LAZY_FOR_EACH_TERM_5(F, ARGS) WTF_LAZY_CALL(F, WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_4(F, WTF_LAZY_REST ARGS) 164 #define WTF_LAZY_FOR_EACH_TERM_6(F, ARGS) WTF_LAZY_CALL(F, WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_5(F, WTF_LAZY_REST ARGS) 165 #define WTF_LAZY_FOR_EACH_TERM_7(F, ARGS) WTF_LAZY_CALL(F, WTF_LAZY_FIRST ARGS) WTF_LAZY_FOR_EACH_TERM_6(F, WTF_LAZY_REST ARGS) 163 166 #define WTF_LAZY_DECLARE_ALIAS_AND_TYPE(ALIAS_AND_TYPE) typename ALIAS_AND_TYPE, 164 167 #define WTF_LAZY_INSTANTIATE(...) \ … … 166 169 WTF_LAZY_FOR_EACH_TERM(WTF_LAZY_DECLARE_ALIAS_AND_TYPE, __VA_ARGS__) \ 167 170 typename = void> 171 172 #define WTF_LAZY_HAS_REST_0(...) 173 #define WTF_LAZY_HAS_REST_1(...) 174 #define WTF_LAZY_HAS_REST_2 WTF_LAZY_EXPAND 175 #define WTF_LAZY_HAS_REST_3 WTF_LAZY_EXPAND 176 #define WTF_LAZY_HAS_REST_4 WTF_LAZY_EXPAND 177 #define WTF_LAZY_HAS_REST_5 WTF_LAZY_EXPAND 178 #define WTF_LAZY_HAS_REST_6 WTF_LAZY_EXPAND 179 #define WTF_LAZY_HAS_REST_7 WTF_LAZY_EXPAND 180 #define WTF_LAZY_HAS_REST_8 WTF_LAZY_EXPAND 181 #define WTF_LAZY_HAS_REST(...) \ 182 WTF_LAZY_JOIN(WTF_LAZY_HAS_REST_, WTF_LAZY_NUM_ARGS(__VA_ARGS__)) -
trunk/Source/WTF/wtf/Platform.h
r237486 r237547 753 753 #endif /* !defined(USE_JSVALUE64) && !defined(USE_JSVALUE32_64) */ 754 754 755 /* The JIT is enabled by default on all x86 , x86-64, ARM & MIPS platforms except ARMv7k. */755 /* The JIT is enabled by default on all x86-64 & ARM64 platforms. */ 756 756 #if !defined(ENABLE_JIT) \ 757 && (CPU(X86 ) || CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)) \757 && (CPU(X86_64) || CPU(ARM64)) \ 758 758 && !CPU(APPLE_ARMV7K) 759 759 #define ENABLE_JIT 1 760 760 #endif 761 761 762 /* Cocoa ports should not use the jit on 32-bit ARM CPUs. */763 #if PLATFORM(COCOA) && (CPU(ARM) || CPU(APPLE_ARMV7K))762 /* Force C_LOOP for 32-bit builds. */ 763 #if USE(JSVALUE32_64) 764 764 #undef ENABLE_JIT 765 765 #define ENABLE_JIT 0 766 #endif 767 768 /* Disable the JIT for 32-bit Windows builds. */ 769 #if USE(JSVALUE32_64) && OS(WINDOWS) 770 #undef ENABLE_JIT 771 #define ENABLE_JIT 0 766 #undef ENABLE_C_LOOP 767 #define ENABLE_C_LOOP 1 772 768 #endif 773 769 -
trunk/Source/WTF/wtf/Vector.h
r237486 r237547 777 777 template<typename U> void insert(size_t position, const U*, size_t); 778 778 template<typename U> void insert(size_t position, U&&); 779 template<typename U, size_t c > void insertVector(size_t position, const Vector<U, c>&);779 template<typename U, size_t c, typename OH> void insertVector(size_t position, const Vector<U, c, OH>&); 780 780 781 781 void remove(size_t position); … … 1433 1433 1434 1434 template<typename T, size_t inlineCapacity, typename OverflowHandler, size_t minCapacity> 1435 template<typename U, size_t c >1436 inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insertVector(size_t position, const Vector<U, c >& val)1435 template<typename U, size_t c, typename OH> 1436 inline void Vector<T, inlineCapacity, OverflowHandler, minCapacity>::insertVector(size_t position, const Vector<U, c, OH>& val) 1437 1437 { 1438 1438 insert(position, val.begin(), val.size()); -
trunk/Source/cmake/WebKitFeatures.cmake
r237491 r237547 63 63 set(_SETTING_WEBKIT_OPTIONS TRUE) 64 64 65 if (WTF_CPU_X86_64 OR WTF_CPU_ARM64) 65 if (WTF_CPU_ARM64 OR WTF_CPU_X86_64) 66 set(ENABLE_JIT_DEFAULT ON) 66 67 set(ENABLE_FTL_DEFAULT ON) 67 else ()68 set(ENABLE_FTL_DEFAULT OFF)69 endif ()70 71 if (WTF_CPU_ARM OR WTF_CPU_ARM64 OR WTF_CPU_MIPS OR WTF_CPU_X86_64 OR WTF_CPU_X86)72 set(ENABLE_JIT_DEFAULT ON)73 68 set(USE_SYSTEM_MALLOC_DEFAULT OFF) 74 69 set(ENABLE_C_LOOP_DEFAULT OFF) … … 76 71 else () 77 72 set(ENABLE_JIT_DEFAULT OFF) 73 set(ENABLE_FTL_DEFAULT OFF) 78 74 set(USE_SYSTEM_MALLOC_DEFAULT ON) 79 75 set(ENABLE_C_LOOP_DEFAULT ON) -
trunk/Tools/ChangeLog
r237543 r237547 1 2018-10-29 Tadeu Zagallo <tzagallo@apple.com> 2 3 New bytecode format for JSC 4 https://bugs.webkit.org/show_bug.cgi?id=187373 5 <rdar://problem/44186758> 6 7 Reviewed by Filip Pizlo. 8 9 Do not force ENABLE_JIT=true when $forceCLoop is false. 10 11 * Scripts/build-jsc: 12 1 13 2018-10-29 Claudio Saavedra <csaavedra@igalia.com> 2 14 -
trunk/Tools/Scripts/build-jsc
r237536 r237547 142 142 143 143 if (isCMakeBuild()) { 144 push @cmakeArgs, $forceCLoop ? " -DENABLE_JIT=OFF" : " -DENABLE_JIT=ON"; 144 if ($forceCLoop) { 145 push @cmakeArgs, " -DENABLE_JIT=OFF" 146 } 145 147 push @cmakeArgs, $ftlJIT ? " -DENABLE_FTL_JIT=ON" : " -DENABLE_FTL_JIT=OFF"; 146 148 if ($buildDir) {
Note:
See TracChangeset
for help on using the changeset viewer.