Changeset 279049 in webkit
- Timestamp:
- Jun 19, 2021, 1:25:14 AM (4 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r279048 r279049 1 2021-06-19 Mark Lam <mark.lam@apple.com> 2 3 [Revert r278576] Put the Baseline JIT prologue and op_loop_hint code in JIT thunks. 4 https://bugs.webkit.org/show_bug.cgi?id=226375 5 6 Not reviewed. 7 8 Suspect regresses Speedometer2. 9 10 * assembler/AbstractMacroAssembler.h: 11 (JSC::AbstractMacroAssembler::untagReturnAddress): 12 (JSC::AbstractMacroAssembler::untagReturnAddressWithoutExtraValidation): Deleted. 13 * assembler/MacroAssemblerARM64E.h: 14 (JSC::MacroAssemblerARM64E::untagReturnAddress): 15 (JSC::MacroAssemblerARM64E::untagReturnAddressWithoutExtraValidation): Deleted. 16 * assembler/MacroAssemblerARMv7.h: 17 * assembler/MacroAssemblerMIPS.h: 18 * bytecode/CodeBlock.h: 19 (JSC::CodeBlock::addressOfNumParameters): 20 (JSC::CodeBlock::offsetOfNumParameters): 21 (JSC::CodeBlock::offsetOfInstructionsRawPointer): 22 (JSC::CodeBlock::offsetOfNumCalleeLocals): Deleted. 23 (JSC::CodeBlock::offsetOfNumVars): Deleted. 24 (JSC::CodeBlock::offsetOfArgumentValueProfiles): Deleted. 25 (JSC::CodeBlock::offsetOfShouldAlwaysBeInlined): Deleted. 26 * jit/AssemblyHelpers.h: 27 (JSC::AssemblyHelpers::emitSaveCalleeSavesFor): 28 (JSC::AssemblyHelpers::emitSaveCalleeSavesForBaselineJIT): Deleted. 29 (JSC::AssemblyHelpers::emitRestoreCalleeSavesForBaselineJIT): Deleted. 30 * jit/JIT.cpp: 31 (JSC::JIT::compileAndLinkWithoutFinalizing): 32 (JSC::JIT::privateCompileExceptionHandlers): 33 (JSC::prologueGeneratorSelector): Deleted. 34 (JSC::JIT::prologueGenerator): Deleted. 35 (JSC::JIT::arityFixupPrologueGenerator): Deleted. 36 * jit/JIT.h: 37 * jit/JITInlines.h: 38 (JSC::JIT::emitNakedNearCall): 39 * jit/JITOpcodes.cpp: 40 (JSC::JIT::op_ret_handlerGenerator): 41 (JSC::JIT::emit_op_enter): 42 (JSC::JIT::op_enter_handlerGenerator): 43 (JSC::JIT::emit_op_loop_hint): 44 (JSC::JIT::emitSlow_op_loop_hint): 45 (JSC::JIT::op_enter_Generator): Deleted. 46 (JSC::JIT::op_enter_canBeOptimized_Generator): Deleted. 47 (JSC::JIT::op_enter_cannotBeOptimized_Generator): Deleted. 48 (JSC::JIT::op_loop_hint_Generator): Deleted. 49 * jit/JITOpcodes32_64.cpp: 50 (JSC::JIT::emit_op_enter): 51 * jit/ThunkGenerators.cpp: 52 (JSC::popThunkStackPreservesAndHandleExceptionGenerator): 53 1 54 2021-06-19 Commit Queue <commit-queue@webkit.org> 2 55 -
trunk/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h
r278576 r279049 1004 1004 ALWAYS_INLINE void tagReturnAddress() { } 1005 1005 ALWAYS_INLINE void untagReturnAddress(RegisterID = RegisterID::InvalidGPRReg) { } 1006 ALWAYS_INLINE void untagReturnAddressWithoutExtraValidation() { }1007 1006 1008 1007 ALWAYS_INLINE void tagPtr(PtrTag, RegisterID) { } -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerARM64E.h
r279029 r279049 61 61 ALWAYS_INLINE void untagReturnAddress(RegisterID scratch = InvalidGPR) 62 62 { 63 untag ReturnAddressWithoutExtraValidation();63 untagPtr(ARM64Registers::sp, ARM64Registers::lr); 64 64 validateUntaggedPtr(ARM64Registers::lr, scratch); 65 }66 67 ALWAYS_INLINE void untagReturnAddressWithoutExtraValidation()68 {69 untagPtr(ARM64Registers::sp, ARM64Registers::lr);70 65 } 71 66 -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
r278576 r279049 1796 1796 } 1797 1797 1798 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, Address dest)1799 {1800 load32(dest, dataTempRegister);1801 1802 // Do the add.1803 ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);1804 if (armImm.isValid())1805 m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);1806 else {1807 move(imm, addressTempRegister);1808 m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);1809 }1810 1811 store32(dataTempRegister, dest);1812 return Jump(makeBranch(cond));1813 }1814 1815 1798 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) 1816 1799 { -
trunk/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
r278576 r279049 2311 2311 } 2312 2312 2313 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, ImplicitAddress destAddress)2314 {2315 bool useAddrTempRegister = !(destAddress.offset >= -32768 && destAddress.offset <= 327672316 && !m_fixedWidth);2317 2318 if (useAddrTempRegister) {2319 m_assembler.lui(addrTempRegister, (destAddress.offset + 0x8000) >> 16);2320 m_assembler.addu(addrTempRegister, addrTempRegister, destAddress.base);2321 }2322 2323 auto loadDest = [&] (RegisterID dest) {2324 if (useAddrTempRegister)2325 m_assembler.lw(dest, addrTempRegister, destAddress.offset);2326 else2327 m_assembler.lw(dest, destAddress.base, destAddress.offset);2328 };2329 2330 auto storeDest = [&] (RegisterID src) {2331 if (useAddrTempRegister)2332 m_assembler.sw(src, addrTempRegister, destAddress.offset);2333 else2334 m_assembler.sw(src, destAddress.base, destAddress.offset);2335 };2336 2337 ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));2338 if (cond == Overflow) {2339 if (m_fixedWidth) {2340 /*2341 load dest, dataTemp2342 move imm, immTemp2343 xor cmpTemp, dataTemp, immTemp2344 addu dataTemp, dataTemp, immTemp2345 store dataTemp, dest2346 bltz cmpTemp, No_overflow # diff sign bit -> no overflow2347 xor cmpTemp, dataTemp, immTemp2348 bgez cmpTemp, No_overflow # same sign big -> no overflow2349 nop2350 b Overflow2351 nop2352 b No_overflow2353 nop2354 nop2355 nop2356 No_overflow:2357 */2358 loadDest(dataTempRegister);2359 move(imm, immTempRegister);2360 m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);2361 m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);2362 storeDest(dataTempRegister);2363 m_assembler.bltz(cmpTempRegister, 9);2364 m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);2365 m_assembler.bgez(cmpTempRegister, 7);2366 m_assembler.nop();2367 } else {2368 loadDest(dataTempRegister);2369 if (imm.m_value >= 0 && imm.m_value <= 32767) {2370 move(dataTempRegister, cmpTempRegister);2371 m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);2372 m_assembler.bltz(cmpTempRegister, 9);2373 storeDest(dataTempRegister);2374 m_assembler.bgez(dataTempRegister, 7);2375 m_assembler.nop();2376 } else if (imm.m_value >= -32768 && imm.m_value < 0) {2377 move(dataTempRegister, cmpTempRegister);2378 m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);2379 m_assembler.bgez(cmpTempRegister, 9);2380 storeDest(dataTempRegister);2381 m_assembler.bltz(cmpTempRegister, 7);2382 m_assembler.nop();2383 } else {2384 move(imm, immTempRegister);2385 m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);2386 m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);2387 m_assembler.bltz(cmpTempRegister, 10);2388 storeDest(dataTempRegister);2389 m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);2390 m_assembler.bgez(cmpTempRegister, 7);2391 m_assembler.nop();2392 }2393 }2394 return jump();2395 }2396 move(imm, immTempRegister);2397 loadDest(dataTempRegister);2398 add32(immTempRegister, dataTempRegister);2399 storeDest(dataTempRegister);2400 if (cond == Signed) {2401 // Check if dest is negative.2402 m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);2403 return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);2404 }2405 if (cond == PositiveOrZero) {2406 // Check if dest is not negative.2407 m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);2408 return branchEqual(cmpTempRegister, MIPSRegisters::zero);2409 }2410 if (cond == Zero)2411 return branchEqual(dataTempRegister, MIPSRegisters::zero);2412 if (cond == NonZero)2413 return branchNotEqual(dataTempRegister, MIPSRegisters::zero);2414 ASSERT(0);2415 return Jump();2416 }2417 2418 2313 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) 2419 2314 { -
trunk/Source/JavaScriptCore/bytecode/CodeBlock.h
r278656 r279049 170 170 171 171 unsigned* addressOfNumParameters() { return &m_numParameters; } 172 173 static ptrdiff_t offsetOfNumCalleeLocals() { return OBJECT_OFFSETOF(CodeBlock, m_numCalleeLocals); }174 172 static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); } 175 static ptrdiff_t offsetOfNumVars() { return OBJECT_OFFSETOF(CodeBlock, m_numVars); }176 173 177 174 CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); } … … 494 491 return result; 495 492 } 496 497 static ptrdiff_t offsetOfArgumentValueProfiles() { return OBJECT_OFFSETOF(CodeBlock, m_argumentValueProfiles); }498 493 499 494 ValueProfile& valueProfileForBytecodeIndex(BytecodeIndex); … … 830 825 831 826 bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); } 832 827 833 828 // This is intentionally public; it's the responsibility of anyone doing any 834 829 // of the following to hold the lock: … … 917 912 static ptrdiff_t offsetOfMetadataTable() { return OBJECT_OFFSETOF(CodeBlock, m_metadata); } 918 913 static ptrdiff_t offsetOfInstructionsRawPointer() { return OBJECT_OFFSETOF(CodeBlock, m_instructionsRawPointer); } 919 static ptrdiff_t offsetOfShouldAlwaysBeInlined() { return OBJECT_OFFSETOF(CodeBlock, m_shouldAlwaysBeInlined); }920 914 921 915 bool loopHintsAreEligibleForFuzzingEarlyReturn() -
trunk/Source/JavaScriptCore/jit/AssemblyHelpers.h
r278937 r279049 327 327 328 328 const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); 329 emitSaveCalleeSavesFor(calleeSaves);330 }331 332 void emitSaveCalleeSavesFor(const RegisterAtOffsetList* calleeSaves)333 {334 329 RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters()); 335 330 unsigned registerCount = calleeSaves->size(); … … 407 402 } 408 403 409 void emitSaveCalleeSavesForBaselineJIT()410 {411 emitSaveCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());412 }413 414 404 void emitSaveThenMaterializeTagRegisters() 415 405 { … … 428 418 { 429 419 emitRestoreCalleeSavesFor(codeBlock()); 430 }431 432 void emitRestoreCalleeSavesForBaselineJIT()433 {434 emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());435 420 } 436 421 -
trunk/Source/JavaScriptCore/jit/JIT.cpp
r278656 r279049 56 56 } 57 57 58 #if ENABLE(EXTRA_CTI_THUNKS)59 #if CPU(ARM64) || (CPU(X86_64) && !OS(WINDOWS))60 // These are supported ports.61 #else62 // This is a courtesy reminder (and warning) that the implementation of EXTRA_CTI_THUNKS can63 // use up to 6 argument registers and/or 6/7 temp registers, and make use of ARM64 like64 // features. Hence, it may not work for many other ports without significant work. If you65 // plan on adding EXTRA_CTI_THUNKS support for your port, please remember to search the66 // EXTRA_CTI_THUNKS code for CPU(ARM64) and CPU(X86_64) conditional code, and add support67 // for your port there as well.68 #error "unsupported architecture"69 #endif70 #endif // ENABLE(EXTRA_CTI_THUNKS)71 72 58 Seconds totalBaselineCompileTime; 73 59 Seconds totalDFGCompileTime; … … 98 84 } 99 85 100 #if ENABLE(DFG_JIT) && !ENABLE(EXTRA_CTI_THUNKS)86 #if ENABLE(DFG_JIT) 101 87 void JIT::emitEnterOptimizationCheck() 102 88 { … … 116 102 skipOptimize.link(this); 117 103 } 118 #endif // ENABLE(DFG_JIT) && !ENABLE(EXTRA_CTI_THUNKS)(104 #endif 119 105 120 106 void JIT::emitNotifyWrite(WatchpointSet* set) … … 697 683 } 698 684 699 static inline unsigned prologueGeneratorSelector(bool doesProfiling, bool isConstructor, bool hasHugeFrame)700 {701 return doesProfiling << 2 | isConstructor << 1 | hasHugeFrame << 0;702 }703 704 #define FOR_EACH_NON_PROFILING_PROLOGUE_GENERATOR(v) \705 v(!doesProfiling, !isConstructor, !hasHugeFrame, prologueGenerator0, arityFixup_prologueGenerator0) \706 v(!doesProfiling, !isConstructor, hasHugeFrame, prologueGenerator1, arityFixup_prologueGenerator1) \707 v(!doesProfiling, isConstructor, !hasHugeFrame, prologueGenerator2, arityFixup_prologueGenerator2) \708 v(!doesProfiling, isConstructor, hasHugeFrame, prologueGenerator3, arityFixup_prologueGenerator3)709 710 #if ENABLE(DFG_JIT)711 #define FOR_EACH_PROFILING_PROLOGUE_GENERATOR(v) \712 v( doesProfiling, !isConstructor, !hasHugeFrame, prologueGenerator4, arityFixup_prologueGenerator4) \713 v( doesProfiling, !isConstructor, hasHugeFrame, prologueGenerator5, arityFixup_prologueGenerator5) \714 v( doesProfiling, isConstructor, !hasHugeFrame, prologueGenerator6, arityFixup_prologueGenerator6) \715 v( doesProfiling, isConstructor, hasHugeFrame, prologueGenerator7, arityFixup_prologueGenerator7)716 717 #else // not ENABLE(DFG_JIT)718 #define FOR_EACH_PROFILING_PROLOGUE_GENERATOR(v)719 #endif // ENABLE(DFG_JIT)720 721 #define FOR_EACH_PROLOGUE_GENERATOR(v) \722 FOR_EACH_NON_PROFILING_PROLOGUE_GENERATOR(v) \723 FOR_EACH_PROFILING_PROLOGUE_GENERATOR(v)724 725 685 void JIT::compileAndLinkWithoutFinalizing(JITCompilationEffort effort) 726 686 { … … 791 751 792 752 emitFunctionPrologue(); 793 794 #if !ENABLE(EXTRA_CTI_THUNKS)795 753 emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); 796 754 … … 814 772 ASSERT(!m_bytecodeIndex); 815 773 if (shouldEmitProfiling()) { 816 // If this is a constructor, then we want to put in a dummy profiling site (to 817 // keep things consistent) but we don't actually want to record the dummy value. 818 unsigned startArgument = m_codeBlock->isConstructor() ? 1 : 0; 819 for (unsigned argument = startArgument; argument < m_codeBlock->numParameters(); ++argument) { 774 for (unsigned argument = 0; argument < m_codeBlock->numParameters(); ++argument) { 775 // If this is a constructor, then we want to put in a dummy profiling site (to 776 // keep things consistent) but we don't actually want to record the dummy value. 777 if (m_codeBlock->isConstructor() && !argument) 778 continue; 820 779 int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register)); 821 780 #if USE(JSVALUE64) … … 831 790 } 832 791 } 833 #else // ENABLE(EXTRA_CTI_THUNKS) 834 constexpr GPRReg codeBlockGPR = regT7; 835 ASSERT(!m_bytecodeIndex); 836 837 int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register); 838 unsigned maxFrameSize = -frameTopOffset; 839 840 bool doesProfiling = (m_codeBlock->codeType() == FunctionCode) && shouldEmitProfiling(); 841 bool isConstructor = m_codeBlock->isConstructor(); 842 bool hasHugeFrame = maxFrameSize > Options::reservedZoneSize(); 843 844 static constexpr ThunkGenerator generators[] = { 845 #define USE_PROLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) name, 846 FOR_EACH_PROLOGUE_GENERATOR(USE_PROLOGUE_GENERATOR) 847 #undef USE_PROLOGUE_GENERATOR 848 }; 849 static constexpr unsigned numberOfGenerators = sizeof(generators) / sizeof(generators[0]); 850 851 move(TrustedImmPtr(m_codeBlock), codeBlockGPR); 852 853 unsigned generatorSelector = prologueGeneratorSelector(doesProfiling, isConstructor, hasHugeFrame); 854 RELEASE_ASSERT(generatorSelector < numberOfGenerators); 855 auto generator = generators[generatorSelector]; 856 emitNakedNearCall(vm().getCTIStub(generator).retaggedCode<NoPtrTag>()); 857 858 Label bodyLabel(this); 859 #endif // !ENABLE(EXTRA_CTI_THUNKS) 860 792 861 793 RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); 862 794 … … 872 804 m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); 873 805 874 #if !ENABLE(EXTRA_CTI_THUNKS)875 806 stackOverflow.link(this); 876 807 m_bytecodeIndex = BytecodeIndex(0); … … 878 809 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister); 879 810 callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); 880 #endif881 811 882 812 // If the number of parameters is 1, we never require arity fixup. … … 884 814 if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) { 885 815 m_arityCheck = label(); 886 #if !ENABLE(EXTRA_CTI_THUNKS)887 816 store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); 888 817 emitFunctionPrologue(); … … 903 832 emitNakedNearCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>()); 904 833 905 jump(beginLabel);906 907 #else // ENABLE(EXTRA_CTI_THUNKS)908 emitFunctionPrologue();909 910 static_assert(codeBlockGPR == regT7);911 ASSERT(!m_bytecodeIndex);912 913 static constexpr ThunkGenerator generators[] = {914 #define USE_PROLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) arityFixupName,915 FOR_EACH_PROLOGUE_GENERATOR(USE_PROLOGUE_GENERATOR)916 #undef USE_PROLOGUE_GENERATOR917 };918 static constexpr unsigned numberOfGenerators = sizeof(generators) / sizeof(generators[0]);919 920 move(TrustedImmPtr(m_codeBlock), codeBlockGPR);921 922 RELEASE_ASSERT(generatorSelector < numberOfGenerators);923 auto generator = generators[generatorSelector];924 RELEASE_ASSERT(generator);925 emitNakedNearCall(vm().getCTIStub(generator).retaggedCode<NoPtrTag>());926 927 jump(bodyLabel);928 #endif // !ENABLE(EXTRA_CTI_THUNKS)929 930 834 #if ASSERT_ENABLED 931 835 m_bytecodeIndex = BytecodeIndex(); // Reset this, in order to guard its use with ASSERTs. 932 836 #endif 837 838 jump(beginLabel); 933 839 } else 934 840 m_arityCheck = entryLabel; // Never require arity fixup. … … 936 842 ASSERT(m_jmpTable.isEmpty()); 937 843 938 #if !ENABLE(EXTRA_CTI_THUNKS)939 844 privateCompileExceptionHandlers(); 940 #endif941 845 942 846 if (m_disassembler) … … 947 851 link(); 948 852 } 949 950 #if ENABLE(EXTRA_CTI_THUNKS)951 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::prologueGenerator(VM& vm, bool doesProfiling, bool isConstructor, bool hasHugeFrame, const char* thunkName)952 {953 // This function generates the Baseline JIT's prologue code. It is not useable by other tiers.954 constexpr GPRReg codeBlockGPR = regT7; // incoming.955 956 constexpr int virtualRegisterSize = static_cast<int>(sizeof(Register));957 constexpr int virtualRegisterSizeShift = 3;958 static_assert((1 << virtualRegisterSizeShift) == virtualRegisterSize);959 960 tagReturnAddress();961 962 storePtr(codeBlockGPR, addressFor(CallFrameSlot::codeBlock));963 964 load32(Address(codeBlockGPR, CodeBlock::offsetOfNumCalleeLocals()), regT1);965 if constexpr (maxFrameExtentForSlowPathCallInRegisters)966 add32(TrustedImm32(maxFrameExtentForSlowPathCallInRegisters), regT1);967 lshift32(TrustedImm32(virtualRegisterSizeShift), regT1);968 neg64(regT1);969 #if ASSERT_ENABLED970 Probe::Function probeFunction = [] (Probe::Context& context) {971 CodeBlock* codeBlock = context.fp<CallFrame*>()->codeBlock();972 int64_t frameTopOffset = stackPointerOffsetFor(codeBlock) * sizeof(Register);973 RELEASE_ASSERT(context.gpr<intptr_t>(regT1) == frameTopOffset);974 };975 probe(tagCFunctionPtr<JITProbePtrTag>(probeFunction), nullptr);976 #endif977 978 addPtr(callFrameRegister, regT1);979 980 JumpList stackOverflow;981 if (hasHugeFrame)982 stackOverflow.append(branchPtr(Above, regT1, callFrameRegister));983 stackOverflow.append(branchPtr(Above, AbsoluteAddress(vm.addressOfSoftStackLimit()), regT1));984 985 // We'll be imminently returning with a `retab` (ARM64E's return with authentication986 // using the B key) in the normal path (see MacroAssemblerARM64E's implementation of987 // ret()), which will do validation. So, extra validation here is redundant and unnecessary.988 untagReturnAddressWithoutExtraValidation();989 #if CPU(X86_64)990 pop(regT2); // Save the return address.991 #endif992 move(regT1, stackPointerRegister);993 tagReturnAddress();994 checkStackPointerAlignment();995 #if CPU(X86_64)996 push(regT2); // Restore the return address.997 #endif998 999 emitSaveCalleeSavesForBaselineJIT();1000 emitMaterializeTagCheckRegisters();1001 1002 if (doesProfiling) {1003 constexpr GPRReg argumentValueProfileGPR = regT6;1004 constexpr GPRReg numParametersGPR = regT5;1005 constexpr GPRReg argumentGPR = regT4;1006 1007 load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);1008 loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfArgumentValueProfiles()), argumentValueProfileGPR);1009 if (isConstructor)1010 addPtr(TrustedImm32(sizeof(ValueProfile)), argumentValueProfileGPR);1011 1012 int startArgument = CallFrameSlot::thisArgument + (isConstructor ? 1 : 0);1013 int startArgumentOffset = startArgument * virtualRegisterSize;1014 move(TrustedImm64(startArgumentOffset), argumentGPR);1015 1016 add32(TrustedImm32(static_cast<int>(CallFrameSlot::thisArgument)), numParametersGPR);1017 lshift32(TrustedImm32(virtualRegisterSizeShift), numParametersGPR);1018 1019 addPtr(callFrameRegister, argumentGPR);1020 addPtr(callFrameRegister, numParametersGPR);1021 1022 Label loopStart(this);1023 Jump done = branchPtr(AboveOrEqual, argumentGPR, numParametersGPR);1024 {1025 load64(Address(argumentGPR), regT0);1026 store64(regT0, Address(argumentValueProfileGPR, OBJECT_OFFSETOF(ValueProfile, m_buckets)));1027 1028 // The argument ValueProfiles are stored in a FixedVector. Hence, the1029 // address of the next profile can be trivially computed with an increment.1030 addPtr(TrustedImm32(sizeof(ValueProfile)), argumentValueProfileGPR);1031 addPtr(TrustedImm32(virtualRegisterSize), argumentGPR);1032 jump().linkTo(loopStart, this);1033 }1034 done.link(this);1035 }1036 ret();1037 1038 stackOverflow.link(this);1039 #if CPU(X86_64)1040 addPtr(TrustedImm32(1 * sizeof(CPURegister)), stackPointerRegister); // discard return address.1041 #endif1042 1043 uint32_t locationBits = CallSiteIndex(0).bits();1044 store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCountIncludingThis));1045 1046 if (maxFrameExtentForSlowPathCall)1047 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);1048 1049 setupArguments<decltype(operationThrowStackOverflowError)>(codeBlockGPR);1050 prepareCallOperation(vm);1051 MacroAssembler::Call operationCall = call(OperationPtrTag);1052 Jump handleExceptionJump = jump();1053 1054 auto handler = vm.getCTIStub(handleExceptionWithCallFrameRollbackGenerator);1055 1056 LinkBuffer patchBuffer(*this, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);1057 patchBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));1058 patchBuffer.link(handleExceptionJump, CodeLocationLabel(handler.retaggedCode<NoPtrTag>()));1059 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, thunkName);1060 }1061 1062 static constexpr bool doesProfiling = true;1063 static constexpr bool isConstructor = true;1064 static constexpr bool hasHugeFrame = true;1065 1066 #define DEFINE_PROGLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) \1067 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::name(VM& vm) \1068 { \1069 JIT jit(vm); \1070 return jit.prologueGenerator(vm, doesProfiling, isConstructor, hasHugeFrame, "Baseline: " #name); \1071 }1072 1073 FOR_EACH_PROLOGUE_GENERATOR(DEFINE_PROGLOGUE_GENERATOR)1074 #undef DEFINE_PROGLOGUE_GENERATOR1075 1076 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::arityFixupPrologueGenerator(VM& vm, bool isConstructor, ThunkGenerator normalPrologueGenerator, const char* thunkName)1077 {1078 // This function generates the Baseline JIT's prologue code. It is not useable by other tiers.1079 constexpr GPRReg codeBlockGPR = regT7; // incoming.1080 constexpr GPRReg numParametersGPR = regT6;1081 1082 tagReturnAddress();1083 #if CPU(X86_64)1084 push(framePointerRegister);1085 #elif CPU(ARM64)1086 pushPair(framePointerRegister, linkRegister);1087 #endif1088 1089 storePtr(codeBlockGPR, addressFor(CallFrameSlot::codeBlock));1090 store8(TrustedImm32(0), Address(codeBlockGPR, CodeBlock::offsetOfShouldAlwaysBeInlined()));1091 1092 load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT1);1093 load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);1094 Jump noFixupNeeded = branch32(AboveOrEqual, regT1, numParametersGPR);1095 1096 if constexpr (maxFrameExtentForSlowPathCall)1097 addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);1098 1099 loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfGlobalObject()), argumentGPR0);1100 1101 static_assert(std::is_same<decltype(operationConstructArityCheck), decltype(operationCallArityCheck)>::value);1102 setupArguments<decltype(operationCallArityCheck)>(argumentGPR0);1103 prepareCallOperation(vm);1104 1105 MacroAssembler::Call arityCheckCall = call(OperationPtrTag);1106 Jump handleExceptionJump = emitNonPatchableExceptionCheck(vm);1107 1108 if constexpr (maxFrameExtentForSlowPathCall)1109 addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);1110 Jump needFixup = branchTest32(NonZero, returnValueGPR);1111 noFixupNeeded.link(this);1112 1113 // The normal prologue expects incoming codeBlockGPR.1114 load64(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);1115 1116 #if CPU(X86_64)1117 pop(framePointerRegister);1118 #elif CPU(ARM64)1119 popPair(framePointerRegister, linkRegister);1120 #endif1121 untagReturnAddress();1122 1123 JumpList normalPrologueJump;1124 normalPrologueJump.append(jump());1125 1126 needFixup.link(this);1127 1128 // Restore the stack for arity fixup, and preserve the return address.1129 // arityFixupGenerator will be shifting the stack. So, we can't use the stack to1130 // preserve the return address. We also can't use callee saved registers because1131 // they haven't been saved yet.1132 //1133 // arityFixupGenerator is carefully crafted to only use a0, a1, a2, t3, t4 and t5.1134 // So, the return address can be preserved in regT7.1135 #if CPU(X86_64)1136 pop(argumentGPR2); // discard.1137 pop(regT7); // save return address.1138 #elif CPU(ARM64)1139 popPair(framePointerRegister, linkRegister);1140 untagReturnAddress();1141 move(linkRegister, regT7);1142 auto randomReturnAddressTag = random();1143 move(TrustedImm32(randomReturnAddressTag), regT1);1144 tagPtr(regT1, regT7);1145 #endif1146 move(returnValueGPR, GPRInfo::argumentGPR0);1147 Call arityFixupCall = nearCall();1148 1149 #if CPU(X86_64)1150 push(regT7); // restore return address.1151 #elif CPU(ARM64)1152 move(TrustedImm32(randomReturnAddressTag), regT1);1153 untagPtr(regT1, regT7);1154 move(regT7, linkRegister);1155 #endif1156 1157 load64(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);1158 normalPrologueJump.append(jump());1159 1160 auto arityCheckOperation = isConstructor ? operationConstructArityCheck : operationCallArityCheck;1161 auto arityFixup = vm.getCTIStub(arityFixupGenerator);1162 auto normalPrologue = vm.getCTIStub(normalPrologueGenerator);1163 auto exceptionHandler = vm.getCTIStub(popThunkStackPreservesAndHandleExceptionGenerator);1164 1165 LinkBuffer patchBuffer(*this, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);1166 patchBuffer.link(arityCheckCall, FunctionPtr<OperationPtrTag>(arityCheckOperation));1167 patchBuffer.link(arityFixupCall, FunctionPtr(arityFixup.retaggedCode<NoPtrTag>()));1168 patchBuffer.link(normalPrologueJump, CodeLocationLabel(normalPrologue.retaggedCode<NoPtrTag>()));1169 patchBuffer.link(handleExceptionJump, CodeLocationLabel(exceptionHandler.retaggedCode<NoPtrTag>()));1170 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, thunkName);1171 }1172 1173 #define DEFINE_ARITY_PROGLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) \1174 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::arityFixupName(VM& vm) \1175 { \1176 JIT jit(vm); \1177 return jit.arityFixupPrologueGenerator(vm, isConstructor, name, "Baseline: " #arityFixupName); \1178 }1179 1180 FOR_EACH_PROLOGUE_GENERATOR(DEFINE_ARITY_PROGLOGUE_GENERATOR)1181 #undef DEFINE_ARITY_PROGLOGUE_GENERATOR1182 1183 #endif // ENABLE(EXTRA_CTI_THUNKS)1184 853 1185 854 void JIT::link() … … 1383 1052 } 1384 1053 1054 void JIT::privateCompileExceptionHandlers() 1055 { 1385 1056 #if !ENABLE(EXTRA_CTI_THUNKS) 1386 void JIT::privateCompileExceptionHandlers()1387 {1388 1057 if (!m_exceptionChecksWithCallFrameRollback.empty()) { 1389 1058 m_exceptionChecksWithCallFrameRollback.link(this); … … 1410 1079 jumpToExceptionHandler(vm()); 1411 1080 } 1412 } 1413 #endif // !ENABLE(EXTRA_CTI_THUNKS) 1081 #endif // ENABLE(EXTRA_CTI_THUNKS) 1082 } 1414 1083 1415 1084 void JIT::doMainThreadPreparationBeforeCompile() -
trunk/Source/JavaScriptCore/jit/JIT.h
r278687 r279049 324 324 } 325 325 326 #if !ENABLE(EXTRA_CTI_THUNKS)327 326 void privateCompileExceptionHandlers(); 328 #endif329 327 330 328 void advanceToNextCheckpoint(); … … 798 796 #if ENABLE(EXTRA_CTI_THUNKS) 799 797 // Thunk generators. 800 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator0(VM&);801 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator1(VM&);802 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator2(VM&);803 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator3(VM&);804 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator4(VM&);805 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator5(VM&);806 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator6(VM&);807 static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator7(VM&);808 MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator(VM&, bool doesProfiling, bool isConstructor, bool hasHugeFrame, const char* name);809 810 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator0(VM&);811 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator1(VM&);812 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator2(VM&);813 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator3(VM&);814 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator4(VM&);815 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator5(VM&);816 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator6(VM&);817 static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator7(VM&);818 MacroAssemblerCodeRef<JITThunkPtrTag> arityFixupPrologueGenerator(VM&, bool isConstructor, ThunkGenerator normalPrologueGenerator, const char* name);819 820 798 static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_del_by_id_prepareCallGenerator(VM&); 821 799 static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_del_by_val_prepareCallGenerator(VM&); … … 832 810 833 811 static MacroAssemblerCodeRef<JITThunkPtrTag> op_check_traps_handlerGenerator(VM&); 834 835 static MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_canBeOptimized_Generator(VM&); 836 static MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_cannotBeOptimized_Generator(VM&); 837 MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_Generator(VM&, bool canBeOptimized, const char* thunkName); 838 839 #if ENABLE(DFG_JIT) 840 static MacroAssemblerCodeRef<JITThunkPtrTag> op_loop_hint_Generator(VM&); 841 #endif 812 static MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_handlerGenerator(VM&); 842 813 static MacroAssemblerCodeRef<JITThunkPtrTag> op_ret_handlerGenerator(VM&); 843 814 static MacroAssemblerCodeRef<JITThunkPtrTag> op_throw_handlerGenerator(VM&); -
trunk/Source/JavaScriptCore/jit/JITInlines.h
r278656 r279049 92 92 ALWAYS_INLINE JIT::Call JIT::emitNakedNearCall(CodePtr<NoPtrTag> target) 93 93 { 94 ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set. 94 95 Call nakedCall = nearCall(); 95 96 m_nearCalls.append(NearCallRecord(nakedCall, FunctionPtr<JSInternalPtrTag>(target.retagged<JSInternalPtrTag>()))); -
trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp
r278656 r279049 389 389 390 390 jit.checkStackPointerAlignment(); 391 jit.emitRestoreCalleeSavesFor BaselineJIT();391 jit.emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters()); 392 392 jit.emitFunctionEpilogue(); 393 393 jit.ret(); … … 1199 1199 #else 1200 1200 ASSERT(m_bytecodeIndex.offset() == 0); 1201 constexpr GPRReg localsToInitGPR = argumentGPR0; 1202 constexpr GPRReg canBeOptimizedGPR = argumentGPR4; 1203 1201 1204 unsigned localsToInit = count - CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); 1202 1205 RELEASE_ASSERT(localsToInit < count); 1203 ThunkGenerator generator = canBeOptimized() ? op_enter_canBeOptimized_Generator : op_enter_cannotBeOptimized_Generator; 1204 emitNakedNearCall(vm().getCTIStub(generator).retaggedCode<NoPtrTag>()); 1206 move(TrustedImm32(localsToInit * sizeof(Register)), localsToInitGPR); 1207 move(TrustedImm32(canBeOptimized()), canBeOptimizedGPR); 1208 emitNakedNearCall(vm().getCTIStub(op_enter_handlerGenerator).retaggedCode<NoPtrTag>()); 1205 1209 #endif // ENABLE(EXTRA_CTI_THUNKS) 1206 1210 } 1207 1211 1208 1212 #if ENABLE(EXTRA_CTI_THUNKS) 1209 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_Generator(VM& vm, bool canBeOptimized, const char* thunkName) 1210 { 1213 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_handlerGenerator(VM& vm) 1214 { 1215 JIT jit(vm); 1216 1211 1217 #if CPU(X86_64) 1212 push(X86Registers::ebp);1218 jit.push(X86Registers::ebp); 1213 1219 #elif CPU(ARM64) 1214 tagReturnAddress();1215 pushPair(framePointerRegister, linkRegister);1220 jit.tagReturnAddress(); 1221 jit.pushPair(framePointerRegister, linkRegister); 1216 1222 #endif 1217 1223 // op_enter is always at bytecodeOffset 0. 1218 store32(TrustedImm32(0), tagFor(CallFrameSlot::argumentCountIncludingThis));1224 jit.store32(TrustedImm32(0), tagFor(CallFrameSlot::argumentCountIncludingThis)); 1219 1225 1220 1226 constexpr GPRReg localsToInitGPR = argumentGPR0; … … 1222 1228 constexpr GPRReg endGPR = argumentGPR2; 1223 1229 constexpr GPRReg undefinedGPR = argumentGPR3; 1224 constexpr GPRReg codeBlockGPR = argumentGPR4; 1225 1226 constexpr int virtualRegisterSizeShift = 3; 1227 static_assert((1 << virtualRegisterSizeShift) == sizeof(Register)); 1228 1229 loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR); 1230 load32(Address(codeBlockGPR, CodeBlock::offsetOfNumVars()), localsToInitGPR); 1231 sub32(TrustedImm32(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()), localsToInitGPR); 1232 lshift32(TrustedImm32(virtualRegisterSizeShift), localsToInitGPR); 1230 constexpr GPRReg canBeOptimizedGPR = argumentGPR4; 1233 1231 1234 1232 size_t startLocal = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); 1235 1233 int startOffset = virtualRegisterForLocal(startLocal).offset(); 1236 move(TrustedImm64(startOffset * sizeof(Register)), iteratorGPR);1237 sub64(iteratorGPR, localsToInitGPR, endGPR);1238 1239 move(TrustedImm64(JSValue::encode(jsUndefined())), undefinedGPR);1240 auto initLoop = label();1241 Jump initDone = branch32(LessThanOrEqual, iteratorGPR, endGPR);1234 jit.move(TrustedImm64(startOffset * sizeof(Register)), iteratorGPR); 1235 jit.sub64(iteratorGPR, localsToInitGPR, endGPR); 1236 1237 jit.move(TrustedImm64(JSValue::encode(jsUndefined())), undefinedGPR); 1238 auto initLoop = jit.label(); 1239 Jump initDone = jit.branch32(LessThanOrEqual, iteratorGPR, endGPR); 1242 1240 { 1243 store64(undefinedGPR, BaseIndex(GPRInfo::callFrameRegister, iteratorGPR, TimesOne));1244 sub64(TrustedImm32(sizeof(Register)), iteratorGPR);1245 j ump(initLoop);1241 jit.store64(undefinedGPR, BaseIndex(GPRInfo::callFrameRegister, iteratorGPR, TimesOne)); 1242 jit.sub64(TrustedImm32(sizeof(Register)), iteratorGPR); 1243 jit.jump(initLoop); 1246 1244 } 1247 initDone.link(this); 1248 1249 // Implementing emitWriteBarrier(m_codeBlock). 1250 Jump ownerIsRememberedOrInEden = barrierBranch(vm, codeBlockGPR, argumentGPR2); 1251 1252 setupArguments<decltype(operationWriteBarrierSlowPath)>(&vm, codeBlockGPR); 1253 prepareCallOperation(vm); 1254 Call operationWriteBarrierCall = call(OperationPtrTag); 1255 1256 if (canBeOptimized) 1257 loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR); 1258 1259 ownerIsRememberedOrInEden.link(this); 1245 initDone.link(&jit); 1246 1247 // emitWriteBarrier(m_codeBlock). 1248 jit.loadPtr(addressFor(CallFrameSlot::codeBlock), argumentGPR1); 1249 Jump ownerIsRememberedOrInEden = jit.barrierBranch(vm, argumentGPR1, argumentGPR2); 1250 1251 jit.move(canBeOptimizedGPR, GPRInfo::numberTagRegister); // save. 1252 jit.setupArguments<decltype(operationWriteBarrierSlowPath)>(&vm, argumentGPR1); 1253 jit.prepareCallOperation(vm); 1254 Call operationWriteBarrierCall = jit.call(OperationPtrTag); 1255 1256 jit.move(GPRInfo::numberTagRegister, canBeOptimizedGPR); // restore. 1257 jit.move(TrustedImm64(JSValue::NumberTag), GPRInfo::numberTagRegister); 1258 ownerIsRememberedOrInEden.link(&jit); 1260 1259 1261 1260 #if ENABLE(DFG_JIT) 1262 // Implementing emitEnterOptimizationCheck().1263 1261 Call operationOptimizeCall; 1264 if (canBeOptimized) { 1262 if (Options::useDFGJIT()) { 1263 // emitEnterOptimizationCheck(). 1265 1264 JumpList skipOptimize; 1266 1265 1267 skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(codeBlockGPR, CodeBlock::offsetOfJITExecuteCounter()))); 1268 1269 copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm.topEntryFrame); 1270 1271 setupArguments<decltype(operationOptimize)>(&vm, TrustedImm32(0)); 1272 prepareCallOperation(vm); 1273 operationOptimizeCall = call(OperationPtrTag); 1274 1275 skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); 1276 farJump(returnValueGPR, GPRInfo::callFrameRegister); 1277 1278 skipOptimize.link(this); 1266 skipOptimize.append(jit.branchTest32(Zero, canBeOptimizedGPR)); 1267 1268 jit.loadPtr(addressFor(CallFrameSlot::codeBlock), argumentGPR1); 1269 skipOptimize.append(jit.branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(argumentGPR1, CodeBlock::offsetOfJITExecuteCounter()))); 1270 1271 jit.copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm.topEntryFrame); 1272 1273 jit.setupArguments<decltype(operationOptimize)>(&vm, TrustedImm32(0)); 1274 jit.prepareCallOperation(vm); 1275 operationOptimizeCall = jit.call(OperationPtrTag); 1276 1277 skipOptimize.append(jit.branchTestPtr(Zero, returnValueGPR)); 1278 jit.farJump(returnValueGPR, GPRInfo::callFrameRegister); 1279 1280 skipOptimize.link(&jit); 1279 1281 } 1280 1282 #endif // ENABLE(DFG_JIT) 1281 1283 1282 1284 #if CPU(X86_64) 1283 pop(X86Registers::ebp);1285 jit.pop(X86Registers::ebp); 1284 1286 #elif CPU(ARM64) 1285 popPair(framePointerRegister, linkRegister);1286 #endif 1287 ret();1288 1289 LinkBuffer patchBuffer( *this, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);1287 jit.popPair(framePointerRegister, linkRegister); 1288 #endif 1289 jit.ret(); 1290 1291 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk); 1290 1292 patchBuffer.link(operationWriteBarrierCall, FunctionPtr<OperationPtrTag>(operationWriteBarrierSlowPath)); 1291 1293 #if ENABLE(DFG_JIT) 1292 if ( canBeOptimized)1294 if (Options::useDFGJIT()) 1293 1295 patchBuffer.link(operationOptimizeCall, FunctionPtr<OperationPtrTag>(operationOptimize)); 1294 1296 #endif 1295 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, thunkName); 1296 } 1297 1298 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_canBeOptimized_Generator(VM& vm) 1299 { 1300 JIT jit(vm); 1301 constexpr bool canBeOptimized = true; 1302 return jit.op_enter_Generator(vm, canBeOptimized, "Baseline: op_enter_canBeOptimized"); 1303 } 1304 1305 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_cannotBeOptimized_Generator(VM& vm) 1306 { 1307 JIT jit(vm); 1308 constexpr bool canBeOptimized = false; 1309 return jit.op_enter_Generator(vm, canBeOptimized, "Baseline: op_enter_cannotBeOptimized"); 1297 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: op_enter_handler"); 1310 1298 } 1311 1299 #endif // ENABLE(EXTRA_CTI_THUNKS) … … 1459 1447 store64(regT0, ptr); 1460 1448 } 1461 #else 1462 UNUSED_PARAM(instruction); 1463 #endif 1464 1465 // Emit the JIT optimization check: 1449 #endif 1450 1451 // Emit the JIT optimization check: 1466 1452 if (canBeOptimized()) { 1467 constexpr GPRReg codeBlockGPR = regT0;1468 loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);1469 1453 addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), 1470 A ddress(codeBlockGPR, CodeBlock::offsetOfJITExecuteCounter())));1454 AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); 1471 1455 } 1472 1456 } 1473 1457 1474 void JIT::emitSlow_op_loop_hint(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)1458 void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) 1475 1459 { 1476 1460 #if ENABLE(DFG_JIT) … … 1479 1463 linkAllSlowCases(iter); 1480 1464 1481 #if !ENABLE(EXTRA_CTI_THUNKS)1482 1465 copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame); 1483 1466 … … 1492 1475 noOptimizedEntry.link(this); 1493 1476 1494 #else // ENABLE(EXTRA_CTI_THUNKS) 1495 uint32_t bytecodeOffset = m_bytecodeIndex.offset(); 1496 ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex); 1497 ASSERT(m_codeBlock->instructionAt(m_bytecodeIndex) == instruction); 1498 1499 constexpr GPRReg bytecodeOffsetGPR = regT7; 1500 1501 move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR); 1502 emitNakedNearCall(vm().getCTIStub(op_loop_hint_Generator).retaggedCode<NoPtrTag>()); 1503 #endif // !ENABLE(EXTRA_CTI_THUNKS) 1477 emitJumpSlowToHot(jump(), currentInstruction->size()); 1504 1478 } 1505 #endif // ENABLE(DFG_JIT) 1479 #else 1480 UNUSED_PARAM(currentInstruction); 1506 1481 UNUSED_PARAM(iter); 1507 UNUSED_PARAM(instruction); 1508 } 1509 1510 #if ENABLE(EXTRA_CTI_THUNKS) 1511 1512 #if ENABLE(DFG_JIT) 1513 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_loop_hint_Generator(VM& vm) 1514 { 1515 // The thunk generated by this function can only work with the LLInt / Baseline JIT because 1516 // it makes assumptions about the right globalObject being available from CallFrame::codeBlock(). 1517 // DFG/FTL may inline functions belonging to other globalObjects, which may not match 1518 // CallFrame::codeBlock(). 1519 JIT jit(vm); 1520 1521 jit.tagReturnAddress(); 1522 1523 constexpr GPRReg bytecodeOffsetGPR = regT7; // incoming. 1524 1525 #if CPU(X86_64) 1526 jit.push(framePointerRegister); 1527 #elif CPU(ARM64) 1528 jit.pushPair(framePointerRegister, linkRegister); 1529 #endif 1530 1531 auto usedRegisters = RegisterSet::stubUnavailableRegisters(); 1532 usedRegisters.add(bytecodeOffsetGPR); 1533 jit.copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, usedRegisters); 1534 1535 jit.store32(bytecodeOffsetGPR, CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis)); 1536 jit.lshift32(TrustedImm32(BytecodeIndex::checkpointShift), bytecodeOffsetGPR); 1537 jit.setupArguments<decltype(operationOptimize)>(TrustedImmPtr(&vm), bytecodeOffsetGPR); 1538 jit.prepareCallOperation(vm); 1539 Call operationCall = jit.call(OperationPtrTag); 1540 Jump hasOptimizedEntry = jit.branchTestPtr(NonZero, returnValueGPR); 1541 1542 #if CPU(X86_64) 1543 jit.pop(framePointerRegister); 1544 #elif CPU(ARM64) 1545 jit.popPair(framePointerRegister, linkRegister); 1546 #endif 1547 jit.ret(); 1548 1549 hasOptimizedEntry.link(&jit); 1550 #if CPU(X86_64) 1551 jit.addPtr(CCallHelpers::TrustedImm32(2 * sizeof(CPURegister)), stackPointerRegister); 1552 #elif CPU(ARM64) 1553 jit.popPair(framePointerRegister, linkRegister); 1554 #endif 1555 if (ASSERT_ENABLED) { 1556 Jump ok = jit.branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000)))); 1557 jit.abortWithReason(JITUnreasonableLoopHintJumpTarget); 1558 ok.link(&jit); 1559 } 1560 1561 jit.farJump(returnValueGPR, GPRInfo::callFrameRegister); 1562 1563 LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk); 1564 patchBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(operationOptimize)); 1565 return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: op_loop_hint"); 1566 } 1567 #endif // ENABLE(DFG_JIT) 1568 #endif // !ENABLE(EXTRA_CTI_THUNKS) 1482 #endif 1483 } 1569 1484 1570 1485 void JIT::emit_op_check_traps(const Instruction*) -
trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
r278656 r279049 1069 1069 // registers to zap stale pointers, to avoid unnecessarily prolonging 1070 1070 // object lifetime and increasing GC pressure. 1071 for ( unsignedi = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i)1071 for (int i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i) 1072 1072 emitStore(virtualRegisterForLocal(i), jsUndefined()); 1073 1073 -
trunk/Source/JavaScriptCore/jit/ThunkGenerators.cpp
r278576 r279049 82 82 CCallHelpers jit; 83 83 84 jit.addPtr(CCallHelpers::TrustedImm32(2 * sizeof(CPURegister)), CCallHelpers::stackPointerRegister); 84 #if CPU(X86_64) 85 jit.addPtr(CCallHelpers::TrustedImm32(2 * sizeof(CPURegister)), X86Registers::esp); 86 #elif CPU(ARM64) 87 jit.popPair(CCallHelpers::framePointerRegister, CCallHelpers::linkRegister); 88 #endif 85 89 86 90 CCallHelpers::Jump continuation = jit.jump();
Note:
See TracChangeset
for help on using the changeset viewer.