Changeset 279049 in webkit


Ignore:
Timestamp:
Jun 19, 2021 1:25:14 AM (3 years ago)
Author:
mark.lam@apple.com
Message:

[Revert r278576] Put the Baseline JIT prologue and op_loop_hint code in JIT thunks.
https://bugs.webkit.org/show_bug.cgi?id=226375

Not reviewed.

Suspect regresses Speedometer2.

  • assembler/AbstractMacroAssembler.h:

(JSC::AbstractMacroAssembler::untagReturnAddress):
(JSC::AbstractMacroAssembler::untagReturnAddressWithoutExtraValidation): Deleted.

  • assembler/MacroAssemblerARM64E.h:

(JSC::MacroAssemblerARM64E::untagReturnAddress):
(JSC::MacroAssemblerARM64E::untagReturnAddressWithoutExtraValidation): Deleted.

  • assembler/MacroAssemblerARMv7.h:
  • assembler/MacroAssemblerMIPS.h:
  • bytecode/CodeBlock.h:

(JSC::CodeBlock::addressOfNumParameters):
(JSC::CodeBlock::offsetOfNumParameters):
(JSC::CodeBlock::offsetOfInstructionsRawPointer):
(JSC::CodeBlock::offsetOfNumCalleeLocals): Deleted.
(JSC::CodeBlock::offsetOfNumVars): Deleted.
(JSC::CodeBlock::offsetOfArgumentValueProfiles): Deleted.
(JSC::CodeBlock::offsetOfShouldAlwaysBeInlined): Deleted.

  • jit/AssemblyHelpers.h:

(JSC::AssemblyHelpers::emitSaveCalleeSavesFor):
(JSC::AssemblyHelpers::emitSaveCalleeSavesForBaselineJIT): Deleted.
(JSC::AssemblyHelpers::emitRestoreCalleeSavesForBaselineJIT): Deleted.

  • jit/JIT.cpp:

(JSC::JIT::compileAndLinkWithoutFinalizing):
(JSC::JIT::privateCompileExceptionHandlers):
(JSC::prologueGeneratorSelector): Deleted.
(JSC::JIT::prologueGenerator): Deleted.
(JSC::JIT::arityFixupPrologueGenerator): Deleted.

  • jit/JIT.h:
  • jit/JITInlines.h:

(JSC::JIT::emitNakedNearCall):

  • jit/JITOpcodes.cpp:

(JSC::JIT::op_ret_handlerGenerator):
(JSC::JIT::emit_op_enter):
(JSC::JIT::op_enter_handlerGenerator):
(JSC::JIT::emit_op_loop_hint):
(JSC::JIT::emitSlow_op_loop_hint):
(JSC::JIT::op_enter_Generator): Deleted.
(JSC::JIT::op_enter_canBeOptimized_Generator): Deleted.
(JSC::JIT::op_enter_cannotBeOptimized_Generator): Deleted.
(JSC::JIT::op_loop_hint_Generator): Deleted.

  • jit/JITOpcodes32_64.cpp:

(JSC::JIT::emit_op_enter):

  • jit/ThunkGenerators.cpp:

(JSC::popThunkStackPreservesAndHandleExceptionGenerator):

Location:
trunk/Source/JavaScriptCore
Files:
13 edited

Legend:

Unmodified
Added
Removed
  • trunk/Source/JavaScriptCore/ChangeLog

    r279048 r279049  
     12021-06-19  Mark Lam  <mark.lam@apple.com>
     2
     3        [Revert r278576] Put the Baseline JIT prologue and op_loop_hint code in JIT thunks.
     4        https://bugs.webkit.org/show_bug.cgi?id=226375
     5
     6        Not reviewed.
     7
     8        Suspect regresses Speedometer2.
     9
     10        * assembler/AbstractMacroAssembler.h:
     11        (JSC::AbstractMacroAssembler::untagReturnAddress):
     12        (JSC::AbstractMacroAssembler::untagReturnAddressWithoutExtraValidation): Deleted.
     13        * assembler/MacroAssemblerARM64E.h:
     14        (JSC::MacroAssemblerARM64E::untagReturnAddress):
     15        (JSC::MacroAssemblerARM64E::untagReturnAddressWithoutExtraValidation): Deleted.
     16        * assembler/MacroAssemblerARMv7.h:
     17        * assembler/MacroAssemblerMIPS.h:
     18        * bytecode/CodeBlock.h:
     19        (JSC::CodeBlock::addressOfNumParameters):
     20        (JSC::CodeBlock::offsetOfNumParameters):
     21        (JSC::CodeBlock::offsetOfInstructionsRawPointer):
     22        (JSC::CodeBlock::offsetOfNumCalleeLocals): Deleted.
     23        (JSC::CodeBlock::offsetOfNumVars): Deleted.
     24        (JSC::CodeBlock::offsetOfArgumentValueProfiles): Deleted.
     25        (JSC::CodeBlock::offsetOfShouldAlwaysBeInlined): Deleted.
     26        * jit/AssemblyHelpers.h:
     27        (JSC::AssemblyHelpers::emitSaveCalleeSavesFor):
     28        (JSC::AssemblyHelpers::emitSaveCalleeSavesForBaselineJIT): Deleted.
     29        (JSC::AssemblyHelpers::emitRestoreCalleeSavesForBaselineJIT): Deleted.
     30        * jit/JIT.cpp:
     31        (JSC::JIT::compileAndLinkWithoutFinalizing):
     32        (JSC::JIT::privateCompileExceptionHandlers):
     33        (JSC::prologueGeneratorSelector): Deleted.
     34        (JSC::JIT::prologueGenerator): Deleted.
     35        (JSC::JIT::arityFixupPrologueGenerator): Deleted.
     36        * jit/JIT.h:
     37        * jit/JITInlines.h:
     38        (JSC::JIT::emitNakedNearCall):
     39        * jit/JITOpcodes.cpp:
     40        (JSC::JIT::op_ret_handlerGenerator):
     41        (JSC::JIT::emit_op_enter):
     42        (JSC::JIT::op_enter_handlerGenerator):
     43        (JSC::JIT::emit_op_loop_hint):
     44        (JSC::JIT::emitSlow_op_loop_hint):
     45        (JSC::JIT::op_enter_Generator): Deleted.
     46        (JSC::JIT::op_enter_canBeOptimized_Generator): Deleted.
     47        (JSC::JIT::op_enter_cannotBeOptimized_Generator): Deleted.
     48        (JSC::JIT::op_loop_hint_Generator): Deleted.
     49        * jit/JITOpcodes32_64.cpp:
     50        (JSC::JIT::emit_op_enter):
     51        * jit/ThunkGenerators.cpp:
     52        (JSC::popThunkStackPreservesAndHandleExceptionGenerator):
     53
    1542021-06-19  Commit Queue  <commit-queue@webkit.org>
    255
  • trunk/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h

    r278576 r279049  
    10041004    ALWAYS_INLINE void tagReturnAddress() { }
    10051005    ALWAYS_INLINE void untagReturnAddress(RegisterID = RegisterID::InvalidGPRReg) { }
    1006     ALWAYS_INLINE void untagReturnAddressWithoutExtraValidation() { }
    10071006
    10081007    ALWAYS_INLINE void tagPtr(PtrTag, RegisterID) { }
  • trunk/Source/JavaScriptCore/assembler/MacroAssemblerARM64E.h

    r279029 r279049  
    6161    ALWAYS_INLINE void untagReturnAddress(RegisterID scratch = InvalidGPR)
    6262    {
    63         untagReturnAddressWithoutExtraValidation();
     63        untagPtr(ARM64Registers::sp, ARM64Registers::lr);
    6464        validateUntaggedPtr(ARM64Registers::lr, scratch);
    65     }
    66 
    67     ALWAYS_INLINE void untagReturnAddressWithoutExtraValidation()
    68     {
    69         untagPtr(ARM64Registers::sp, ARM64Registers::lr);
    7065    }
    7166
  • trunk/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h

    r278576 r279049  
    17961796    }
    17971797
    1798     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, Address dest)
    1799     {
    1800         load32(dest, dataTempRegister);
    1801 
    1802         // Do the add.
    1803         ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
    1804         if (armImm.isValid())
    1805             m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
    1806         else {
    1807             move(imm, addressTempRegister);
    1808             m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
    1809         }
    1810 
    1811         store32(dataTempRegister, dest);
    1812         return Jump(makeBranch(cond));
    1813     }
    1814 
    18151798    Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
    18161799    {
  • trunk/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h

    r278576 r279049  
    23112311    }
    23122312
    2313     Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, ImplicitAddress destAddress)
    2314     {
    2315         bool useAddrTempRegister = !(destAddress.offset >= -32768 && destAddress.offset <= 32767
    2316             && !m_fixedWidth);
    2317 
    2318         if (useAddrTempRegister) {
    2319             m_assembler.lui(addrTempRegister, (destAddress.offset + 0x8000) >> 16);
    2320             m_assembler.addu(addrTempRegister, addrTempRegister, destAddress.base);
    2321         }
    2322 
    2323         auto loadDest = [&] (RegisterID dest) {
    2324             if (useAddrTempRegister)
    2325                 m_assembler.lw(dest, addrTempRegister, destAddress.offset);
    2326             else
    2327                 m_assembler.lw(dest, destAddress.base, destAddress.offset);
    2328         };
    2329 
    2330         auto storeDest = [&] (RegisterID src) {
    2331             if (useAddrTempRegister)
    2332                 m_assembler.sw(src, addrTempRegister, destAddress.offset);
    2333             else
    2334                 m_assembler.sw(src, destAddress.base, destAddress.offset);
    2335         };
    2336 
    2337         ASSERT((cond == Overflow) || (cond == Signed) || (cond == PositiveOrZero) || (cond == Zero) || (cond == NonZero));
    2338         if (cond == Overflow) {
    2339             if (m_fixedWidth) {
    2340                 /*
    2341                     load    dest, dataTemp
    2342                     move    imm, immTemp
    2343                     xor     cmpTemp, dataTemp, immTemp
    2344                     addu    dataTemp, dataTemp, immTemp
    2345                     store   dataTemp, dest
    2346                     bltz    cmpTemp, No_overflow    # diff sign bit -> no overflow
    2347                     xor     cmpTemp, dataTemp, immTemp
    2348                     bgez    cmpTemp, No_overflow    # same sign big -> no overflow
    2349                     nop
    2350                     b       Overflow
    2351                     nop
    2352                     b       No_overflow
    2353                     nop
    2354                     nop
    2355                     nop
    2356                 No_overflow:
    2357                 */
    2358                 loadDest(dataTempRegister);
    2359                 move(imm, immTempRegister);
    2360                 m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
    2361                 m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
    2362                 storeDest(dataTempRegister);
    2363                 m_assembler.bltz(cmpTempRegister, 9);
    2364                 m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
    2365                 m_assembler.bgez(cmpTempRegister, 7);
    2366                 m_assembler.nop();
    2367             } else {
    2368                 loadDest(dataTempRegister);
    2369                 if (imm.m_value >= 0 && imm.m_value  <= 32767) {
    2370                     move(dataTempRegister, cmpTempRegister);
    2371                     m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
    2372                     m_assembler.bltz(cmpTempRegister, 9);
    2373                     storeDest(dataTempRegister);
    2374                     m_assembler.bgez(dataTempRegister, 7);
    2375                     m_assembler.nop();
    2376                 } else if (imm.m_value >= -32768 && imm.m_value < 0) {
    2377                     move(dataTempRegister, cmpTempRegister);
    2378                     m_assembler.addiu(dataTempRegister, dataTempRegister, imm.m_value);
    2379                     m_assembler.bgez(cmpTempRegister, 9);
    2380                     storeDest(dataTempRegister);
    2381                     m_assembler.bltz(cmpTempRegister, 7);
    2382                     m_assembler.nop();
    2383                 } else {
    2384                     move(imm, immTempRegister);
    2385                     m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
    2386                     m_assembler.addu(dataTempRegister, dataTempRegister, immTempRegister);
    2387                     m_assembler.bltz(cmpTempRegister, 10);
    2388                     storeDest(dataTempRegister);
    2389                     m_assembler.xorInsn(cmpTempRegister, dataTempRegister, immTempRegister);
    2390                     m_assembler.bgez(cmpTempRegister, 7);
    2391                     m_assembler.nop();
    2392                 }
    2393             }
    2394             return jump();
    2395         }
    2396         move(imm, immTempRegister);
    2397         loadDest(dataTempRegister);
    2398         add32(immTempRegister, dataTempRegister);
    2399         storeDest(dataTempRegister);
    2400         if (cond == Signed) {
    2401             // Check if dest is negative.
    2402             m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
    2403             return branchNotEqual(cmpTempRegister, MIPSRegisters::zero);
    2404         }
    2405         if (cond == PositiveOrZero) {
    2406             // Check if dest is not negative.
    2407             m_assembler.slt(cmpTempRegister, dataTempRegister, MIPSRegisters::zero);
    2408             return branchEqual(cmpTempRegister, MIPSRegisters::zero);
    2409         }
    2410         if (cond == Zero)
    2411             return branchEqual(dataTempRegister, MIPSRegisters::zero);
    2412         if (cond == NonZero)
    2413             return branchNotEqual(dataTempRegister, MIPSRegisters::zero);
    2414         ASSERT(0);
    2415         return Jump();
    2416     }
    2417 
    24182313    Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
    24192314    {
  • trunk/Source/JavaScriptCore/bytecode/CodeBlock.h

    r278656 r279049  
    170170
    171171    unsigned* addressOfNumParameters() { return &m_numParameters; }
    172 
    173     static ptrdiff_t offsetOfNumCalleeLocals() { return OBJECT_OFFSETOF(CodeBlock, m_numCalleeLocals); }
    174172    static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
    175     static ptrdiff_t offsetOfNumVars() { return OBJECT_OFFSETOF(CodeBlock, m_numVars); }
    176173
    177174    CodeBlock* alternative() const { return static_cast<CodeBlock*>(m_alternative.get()); }
     
    494491        return result;
    495492    }
    496 
    497     static ptrdiff_t offsetOfArgumentValueProfiles() { return OBJECT_OFFSETOF(CodeBlock, m_argumentValueProfiles); }
    498493
    499494    ValueProfile& valueProfileForBytecodeIndex(BytecodeIndex);
     
    830825
    831826    bool wasCompiledWithDebuggingOpcodes() const { return m_unlinkedCode->wasCompiledWithDebuggingOpcodes(); }
    832 
     827   
    833828    // This is intentionally public; it's the responsibility of anyone doing any
    834829    // of the following to hold the lock:
     
    917912    static ptrdiff_t offsetOfMetadataTable() { return OBJECT_OFFSETOF(CodeBlock, m_metadata); }
    918913    static ptrdiff_t offsetOfInstructionsRawPointer() { return OBJECT_OFFSETOF(CodeBlock, m_instructionsRawPointer); }
    919     static ptrdiff_t offsetOfShouldAlwaysBeInlined() { return OBJECT_OFFSETOF(CodeBlock, m_shouldAlwaysBeInlined); }
    920914
    921915    bool loopHintsAreEligibleForFuzzingEarlyReturn()
  • trunk/Source/JavaScriptCore/jit/AssemblyHelpers.h

    r278937 r279049  
    327327
    328328        const RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
    329         emitSaveCalleeSavesFor(calleeSaves);
    330     }
    331 
    332     void emitSaveCalleeSavesFor(const RegisterAtOffsetList* calleeSaves)
    333     {
    334329        RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters());
    335330        unsigned registerCount = calleeSaves->size();
     
    407402    }
    408403
    409     void emitSaveCalleeSavesForBaselineJIT()
    410     {
    411         emitSaveCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
    412     }
    413 
    414404    void emitSaveThenMaterializeTagRegisters()
    415405    {
     
    428418    {
    429419        emitRestoreCalleeSavesFor(codeBlock());
    430     }
    431 
    432     void emitRestoreCalleeSavesForBaselineJIT()
    433     {
    434         emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
    435420    }
    436421
  • trunk/Source/JavaScriptCore/jit/JIT.cpp

    r278656 r279049  
    5656}
    5757
    58 #if ENABLE(EXTRA_CTI_THUNKS)
    59 #if CPU(ARM64) || (CPU(X86_64) && !OS(WINDOWS))
    60 // These are supported ports.
    61 #else
    62 // This is a courtesy reminder (and warning) that the implementation of EXTRA_CTI_THUNKS can
    63 // use up to 6 argument registers and/or 6/7 temp registers, and make use of ARM64 like
    64 // features. Hence, it may not work for many other ports without significant work. If you
    65 // plan on adding EXTRA_CTI_THUNKS support for your port, please remember to search the
    66 // EXTRA_CTI_THUNKS code for CPU(ARM64) and CPU(X86_64) conditional code, and add support
    67 // for your port there as well.
    68 #error "unsupported architecture"
    69 #endif
    70 #endif // ENABLE(EXTRA_CTI_THUNKS)
    71 
    7258Seconds totalBaselineCompileTime;
    7359Seconds totalDFGCompileTime;
     
    9884}
    9985
    100 #if ENABLE(DFG_JIT) && !ENABLE(EXTRA_CTI_THUNKS)
     86#if ENABLE(DFG_JIT)
    10187void JIT::emitEnterOptimizationCheck()
    10288{
     
    116102    skipOptimize.link(this);
    117103}
    118 #endif // ENABLE(DFG_JIT) && !ENABLE(EXTRA_CTI_THUNKS)(
     104#endif
    119105
    120106void JIT::emitNotifyWrite(WatchpointSet* set)
     
    697683}
    698684
    699 static inline unsigned prologueGeneratorSelector(bool doesProfiling, bool isConstructor, bool hasHugeFrame)
    700 {
    701     return doesProfiling << 2 | isConstructor << 1 | hasHugeFrame << 0;
    702 }
    703 
    704 #define FOR_EACH_NON_PROFILING_PROLOGUE_GENERATOR(v) \
    705     v(!doesProfiling, !isConstructor, !hasHugeFrame, prologueGenerator0, arityFixup_prologueGenerator0) \
    706     v(!doesProfiling, !isConstructor,  hasHugeFrame, prologueGenerator1, arityFixup_prologueGenerator1) \
    707     v(!doesProfiling,  isConstructor, !hasHugeFrame, prologueGenerator2, arityFixup_prologueGenerator2) \
    708     v(!doesProfiling,  isConstructor,  hasHugeFrame, prologueGenerator3, arityFixup_prologueGenerator3)
    709 
    710 #if ENABLE(DFG_JIT)
    711 #define FOR_EACH_PROFILING_PROLOGUE_GENERATOR(v) \
    712     v( doesProfiling, !isConstructor, !hasHugeFrame, prologueGenerator4, arityFixup_prologueGenerator4) \
    713     v( doesProfiling, !isConstructor,  hasHugeFrame, prologueGenerator5, arityFixup_prologueGenerator5) \
    714     v( doesProfiling,  isConstructor, !hasHugeFrame, prologueGenerator6, arityFixup_prologueGenerator6) \
    715     v( doesProfiling,  isConstructor,  hasHugeFrame, prologueGenerator7, arityFixup_prologueGenerator7)
    716 
    717 #else // not ENABLE(DFG_JIT)
    718 #define FOR_EACH_PROFILING_PROLOGUE_GENERATOR(v)
    719 #endif // ENABLE(DFG_JIT)
    720 
    721 #define FOR_EACH_PROLOGUE_GENERATOR(v) \
    722     FOR_EACH_NON_PROFILING_PROLOGUE_GENERATOR(v) \
    723     FOR_EACH_PROFILING_PROLOGUE_GENERATOR(v)
    724 
    725685void JIT::compileAndLinkWithoutFinalizing(JITCompilationEffort effort)
    726686{
     
    791751
    792752    emitFunctionPrologue();
    793 
    794 #if !ENABLE(EXTRA_CTI_THUNKS)
    795753    emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
    796754
     
    814772        ASSERT(!m_bytecodeIndex);
    815773        if (shouldEmitProfiling()) {
    816             // If this is a constructor, then we want to put in a dummy profiling site (to
    817             // keep things consistent) but we don't actually want to record the dummy value.
    818             unsigned startArgument = m_codeBlock->isConstructor() ? 1 : 0;
    819             for (unsigned argument = startArgument; argument < m_codeBlock->numParameters(); ++argument) {
     774            for (unsigned argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
     775                // If this is a constructor, then we want to put in a dummy profiling site (to
     776                // keep things consistent) but we don't actually want to record the dummy value.
     777                if (m_codeBlock->isConstructor() && !argument)
     778                    continue;
    820779                int offset = CallFrame::argumentOffsetIncludingThis(argument) * static_cast<int>(sizeof(Register));
    821780#if USE(JSVALUE64)
     
    831790        }
    832791    }
    833 #else // ENABLE(EXTRA_CTI_THUNKS)
    834     constexpr GPRReg codeBlockGPR = regT7;
    835     ASSERT(!m_bytecodeIndex);
    836 
    837     int frameTopOffset = stackPointerOffsetFor(m_codeBlock) * sizeof(Register);
    838     unsigned maxFrameSize = -frameTopOffset;
    839 
    840     bool doesProfiling = (m_codeBlock->codeType() == FunctionCode) && shouldEmitProfiling();
    841     bool isConstructor = m_codeBlock->isConstructor();
    842     bool hasHugeFrame = maxFrameSize > Options::reservedZoneSize();
    843 
    844     static constexpr ThunkGenerator generators[] = {
    845 #define USE_PROLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) name,
    846         FOR_EACH_PROLOGUE_GENERATOR(USE_PROLOGUE_GENERATOR)
    847 #undef USE_PROLOGUE_GENERATOR
    848     };
    849     static constexpr unsigned numberOfGenerators = sizeof(generators) / sizeof(generators[0]);
    850 
    851     move(TrustedImmPtr(m_codeBlock), codeBlockGPR);
    852 
    853     unsigned generatorSelector = prologueGeneratorSelector(doesProfiling, isConstructor, hasHugeFrame);
    854     RELEASE_ASSERT(generatorSelector < numberOfGenerators);
    855     auto generator = generators[generatorSelector];
    856     emitNakedNearCall(vm().getCTIStub(generator).retaggedCode<NoPtrTag>());
    857 
    858     Label bodyLabel(this);
    859 #endif // !ENABLE(EXTRA_CTI_THUNKS)
    860 
     792   
    861793    RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
    862794
     
    872804    m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
    873805
    874 #if !ENABLE(EXTRA_CTI_THUNKS)
    875806    stackOverflow.link(this);
    876807    m_bytecodeIndex = BytecodeIndex(0);
     
    878809        addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
    879810    callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
    880 #endif
    881811
    882812    // If the number of parameters is 1, we never require arity fixup.
     
    884814    if (m_codeBlock->codeType() == FunctionCode && requiresArityFixup) {
    885815        m_arityCheck = label();
    886 #if !ENABLE(EXTRA_CTI_THUNKS)
    887816        store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
    888817        emitFunctionPrologue();
     
    903832        emitNakedNearCall(m_vm->getCTIStub(arityFixupGenerator).retaggedCode<NoPtrTag>());
    904833
    905         jump(beginLabel);
    906 
    907 #else // ENABLE(EXTRA_CTI_THUNKS)
    908         emitFunctionPrologue();
    909 
    910         static_assert(codeBlockGPR == regT7);
    911         ASSERT(!m_bytecodeIndex);
    912 
    913         static constexpr ThunkGenerator generators[] = {
    914 #define USE_PROLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) arityFixupName,
    915             FOR_EACH_PROLOGUE_GENERATOR(USE_PROLOGUE_GENERATOR)
    916 #undef USE_PROLOGUE_GENERATOR
    917         };
    918         static constexpr unsigned numberOfGenerators = sizeof(generators) / sizeof(generators[0]);
    919 
    920         move(TrustedImmPtr(m_codeBlock), codeBlockGPR);
    921 
    922         RELEASE_ASSERT(generatorSelector < numberOfGenerators);
    923         auto generator = generators[generatorSelector];
    924         RELEASE_ASSERT(generator);
    925         emitNakedNearCall(vm().getCTIStub(generator).retaggedCode<NoPtrTag>());
    926 
    927         jump(bodyLabel);
    928 #endif // !ENABLE(EXTRA_CTI_THUNKS)
    929 
    930834#if ASSERT_ENABLED
    931835        m_bytecodeIndex = BytecodeIndex(); // Reset this, in order to guard its use with ASSERTs.
    932836#endif
     837
     838        jump(beginLabel);
    933839    } else
    934840        m_arityCheck = entryLabel; // Never require arity fixup.
     
    936842    ASSERT(m_jmpTable.isEmpty());
    937843   
    938 #if !ENABLE(EXTRA_CTI_THUNKS)
    939844    privateCompileExceptionHandlers();
    940 #endif
    941845   
    942846    if (m_disassembler)
     
    947851    link();
    948852}
    949 
    950 #if ENABLE(EXTRA_CTI_THUNKS)
    951 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::prologueGenerator(VM& vm, bool doesProfiling, bool isConstructor, bool hasHugeFrame, const char* thunkName)
    952 {
    953     // This function generates the Baseline JIT's prologue code. It is not useable by other tiers.
    954     constexpr GPRReg codeBlockGPR = regT7; // incoming.
    955 
    956     constexpr int virtualRegisterSize = static_cast<int>(sizeof(Register));
    957     constexpr int virtualRegisterSizeShift = 3;
    958     static_assert((1 << virtualRegisterSizeShift) == virtualRegisterSize);
    959 
    960     tagReturnAddress();
    961 
    962     storePtr(codeBlockGPR, addressFor(CallFrameSlot::codeBlock));
    963 
    964     load32(Address(codeBlockGPR, CodeBlock::offsetOfNumCalleeLocals()), regT1);
    965     if constexpr (maxFrameExtentForSlowPathCallInRegisters)
    966         add32(TrustedImm32(maxFrameExtentForSlowPathCallInRegisters), regT1);
    967     lshift32(TrustedImm32(virtualRegisterSizeShift), regT1);
    968     neg64(regT1);
    969 #if ASSERT_ENABLED
    970     Probe::Function probeFunction = [] (Probe::Context& context) {
    971         CodeBlock* codeBlock = context.fp<CallFrame*>()->codeBlock();
    972         int64_t frameTopOffset = stackPointerOffsetFor(codeBlock) * sizeof(Register);
    973         RELEASE_ASSERT(context.gpr<intptr_t>(regT1) == frameTopOffset);
    974     };
    975     probe(tagCFunctionPtr<JITProbePtrTag>(probeFunction), nullptr);
    976 #endif
    977 
    978     addPtr(callFrameRegister, regT1);
    979 
    980     JumpList stackOverflow;
    981     if (hasHugeFrame)
    982         stackOverflow.append(branchPtr(Above, regT1, callFrameRegister));
    983     stackOverflow.append(branchPtr(Above, AbsoluteAddress(vm.addressOfSoftStackLimit()), regT1));
    984 
    985     // We'll be imminently returning with a `retab` (ARM64E's return with authentication
    986     // using the B key) in the normal path (see MacroAssemblerARM64E's implementation of
    987     // ret()), which will do validation. So, extra validation here is redundant and unnecessary.
    988     untagReturnAddressWithoutExtraValidation();
    989 #if CPU(X86_64)
    990     pop(regT2); // Save the return address.
    991 #endif
    992     move(regT1, stackPointerRegister);
    993     tagReturnAddress();
    994     checkStackPointerAlignment();
    995 #if CPU(X86_64)
    996     push(regT2); // Restore the return address.
    997 #endif
    998 
    999     emitSaveCalleeSavesForBaselineJIT();
    1000     emitMaterializeTagCheckRegisters();
    1001 
    1002     if (doesProfiling) {
    1003         constexpr GPRReg argumentValueProfileGPR = regT6;
    1004         constexpr GPRReg numParametersGPR = regT5;
    1005         constexpr GPRReg argumentGPR = regT4;
    1006 
    1007         load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);
    1008         loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfArgumentValueProfiles()), argumentValueProfileGPR);
    1009         if (isConstructor)
    1010             addPtr(TrustedImm32(sizeof(ValueProfile)), argumentValueProfileGPR);
    1011 
    1012         int startArgument = CallFrameSlot::thisArgument + (isConstructor ? 1 : 0);
    1013         int startArgumentOffset = startArgument * virtualRegisterSize;
    1014         move(TrustedImm64(startArgumentOffset), argumentGPR);
    1015 
    1016         add32(TrustedImm32(static_cast<int>(CallFrameSlot::thisArgument)), numParametersGPR);
    1017         lshift32(TrustedImm32(virtualRegisterSizeShift), numParametersGPR);
    1018 
    1019         addPtr(callFrameRegister, argumentGPR);
    1020         addPtr(callFrameRegister, numParametersGPR);
    1021 
    1022         Label loopStart(this);
    1023         Jump done = branchPtr(AboveOrEqual, argumentGPR, numParametersGPR);
    1024         {
    1025             load64(Address(argumentGPR), regT0);
    1026             store64(regT0, Address(argumentValueProfileGPR, OBJECT_OFFSETOF(ValueProfile, m_buckets)));
    1027 
    1028             // The argument ValueProfiles are stored in a FixedVector. Hence, the
    1029             // address of the next profile can be trivially computed with an increment.
    1030             addPtr(TrustedImm32(sizeof(ValueProfile)), argumentValueProfileGPR);
    1031             addPtr(TrustedImm32(virtualRegisterSize), argumentGPR);
    1032             jump().linkTo(loopStart, this);
    1033         }
    1034         done.link(this);
    1035     }
    1036     ret();
    1037 
    1038     stackOverflow.link(this);
    1039 #if CPU(X86_64)
    1040     addPtr(TrustedImm32(1 * sizeof(CPURegister)), stackPointerRegister); // discard return address.
    1041 #endif
    1042 
    1043     uint32_t locationBits = CallSiteIndex(0).bits();
    1044     store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCountIncludingThis));
    1045 
    1046     if (maxFrameExtentForSlowPathCall)
    1047         addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
    1048 
    1049     setupArguments<decltype(operationThrowStackOverflowError)>(codeBlockGPR);
    1050     prepareCallOperation(vm);
    1051     MacroAssembler::Call operationCall = call(OperationPtrTag);
    1052     Jump handleExceptionJump = jump();
    1053 
    1054     auto handler = vm.getCTIStub(handleExceptionWithCallFrameRollbackGenerator);
    1055 
    1056     LinkBuffer patchBuffer(*this, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
    1057     patchBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(operationThrowStackOverflowError));
    1058     patchBuffer.link(handleExceptionJump, CodeLocationLabel(handler.retaggedCode<NoPtrTag>()));
    1059     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, thunkName);
    1060 }
    1061 
    1062 static constexpr bool doesProfiling = true;
    1063 static constexpr bool isConstructor = true;
    1064 static constexpr bool hasHugeFrame = true;
    1065 
    1066 #define DEFINE_PROGLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) \
    1067     MacroAssemblerCodeRef<JITThunkPtrTag> JIT::name(VM& vm) \
    1068     { \
    1069         JIT jit(vm); \
    1070         return jit.prologueGenerator(vm, doesProfiling, isConstructor, hasHugeFrame, "Baseline: " #name); \
    1071     }
    1072 
    1073 FOR_EACH_PROLOGUE_GENERATOR(DEFINE_PROGLOGUE_GENERATOR)
    1074 #undef DEFINE_PROGLOGUE_GENERATOR
    1075 
    1076 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::arityFixupPrologueGenerator(VM& vm, bool isConstructor, ThunkGenerator normalPrologueGenerator, const char* thunkName)
    1077 {
    1078     // This function generates the Baseline JIT's prologue code. It is not useable by other tiers.
    1079     constexpr GPRReg codeBlockGPR = regT7; // incoming.
    1080     constexpr GPRReg numParametersGPR = regT6;
    1081 
    1082     tagReturnAddress();
    1083 #if CPU(X86_64)
    1084     push(framePointerRegister);
    1085 #elif CPU(ARM64)
    1086     pushPair(framePointerRegister, linkRegister);
    1087 #endif
    1088 
    1089     storePtr(codeBlockGPR, addressFor(CallFrameSlot::codeBlock));
    1090     store8(TrustedImm32(0), Address(codeBlockGPR, CodeBlock::offsetOfShouldAlwaysBeInlined()));
    1091 
    1092     load32(payloadFor(CallFrameSlot::argumentCountIncludingThis), regT1);
    1093     load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);
    1094     Jump noFixupNeeded = branch32(AboveOrEqual, regT1, numParametersGPR);
    1095 
    1096     if constexpr (maxFrameExtentForSlowPathCall)
    1097         addPtr(TrustedImm32(-static_cast<int32_t>(maxFrameExtentForSlowPathCall)), stackPointerRegister);
    1098 
    1099     loadPtr(Address(codeBlockGPR, CodeBlock::offsetOfGlobalObject()), argumentGPR0);
    1100 
    1101     static_assert(std::is_same<decltype(operationConstructArityCheck), decltype(operationCallArityCheck)>::value);
    1102     setupArguments<decltype(operationCallArityCheck)>(argumentGPR0);
    1103     prepareCallOperation(vm);
    1104 
    1105     MacroAssembler::Call arityCheckCall = call(OperationPtrTag);
    1106     Jump handleExceptionJump = emitNonPatchableExceptionCheck(vm);
    1107 
    1108     if constexpr (maxFrameExtentForSlowPathCall)
    1109         addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
    1110     Jump needFixup = branchTest32(NonZero, returnValueGPR);
    1111     noFixupNeeded.link(this);
    1112 
    1113     // The normal prologue expects incoming codeBlockGPR.
    1114     load64(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);
    1115 
    1116 #if CPU(X86_64)
    1117     pop(framePointerRegister);
    1118 #elif CPU(ARM64)
    1119     popPair(framePointerRegister, linkRegister);
    1120 #endif
    1121     untagReturnAddress();
    1122 
    1123     JumpList normalPrologueJump;
    1124     normalPrologueJump.append(jump());
    1125 
    1126     needFixup.link(this);
    1127 
    1128     // Restore the stack for arity fixup, and preserve the return address.
    1129     // arityFixupGenerator will be shifting the stack. So, we can't use the stack to
    1130     // preserve the return address. We also can't use callee saved registers because
    1131     // they haven't been saved yet.
    1132     //
    1133     // arityFixupGenerator is carefully crafted to only use a0, a1, a2, t3, t4 and t5.
    1134     // So, the return address can be preserved in regT7.
    1135 #if CPU(X86_64)
    1136     pop(argumentGPR2); // discard.
    1137     pop(regT7); // save return address.
    1138 #elif CPU(ARM64)
    1139     popPair(framePointerRegister, linkRegister);
    1140     untagReturnAddress();
    1141     move(linkRegister, regT7);
    1142     auto randomReturnAddressTag = random();
    1143     move(TrustedImm32(randomReturnAddressTag), regT1);
    1144     tagPtr(regT1, regT7);
    1145 #endif
    1146     move(returnValueGPR, GPRInfo::argumentGPR0);
    1147     Call arityFixupCall = nearCall();
    1148 
    1149 #if CPU(X86_64)
    1150     push(regT7); // restore return address.
    1151 #elif CPU(ARM64)
    1152     move(TrustedImm32(randomReturnAddressTag), regT1);
    1153     untagPtr(regT1, regT7);
    1154     move(regT7, linkRegister);
    1155 #endif
    1156 
    1157     load64(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);
    1158     normalPrologueJump.append(jump());
    1159 
    1160     auto arityCheckOperation = isConstructor ? operationConstructArityCheck : operationCallArityCheck;
    1161     auto arityFixup = vm.getCTIStub(arityFixupGenerator);
    1162     auto normalPrologue = vm.getCTIStub(normalPrologueGenerator);
    1163     auto exceptionHandler = vm.getCTIStub(popThunkStackPreservesAndHandleExceptionGenerator);
    1164 
    1165     LinkBuffer patchBuffer(*this, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
    1166     patchBuffer.link(arityCheckCall, FunctionPtr<OperationPtrTag>(arityCheckOperation));
    1167     patchBuffer.link(arityFixupCall, FunctionPtr(arityFixup.retaggedCode<NoPtrTag>()));
    1168     patchBuffer.link(normalPrologueJump, CodeLocationLabel(normalPrologue.retaggedCode<NoPtrTag>()));
    1169     patchBuffer.link(handleExceptionJump, CodeLocationLabel(exceptionHandler.retaggedCode<NoPtrTag>()));
    1170     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, thunkName);
    1171 }
    1172 
    1173 #define DEFINE_ARITY_PROGLOGUE_GENERATOR(doesProfiling, isConstructor, hasHugeFrame, name, arityFixupName) \
    1174 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::arityFixupName(VM& vm) \
    1175     { \
    1176         JIT jit(vm); \
    1177         return jit.arityFixupPrologueGenerator(vm, isConstructor, name, "Baseline: " #arityFixupName); \
    1178     }
    1179 
    1180 FOR_EACH_PROLOGUE_GENERATOR(DEFINE_ARITY_PROGLOGUE_GENERATOR)
    1181 #undef DEFINE_ARITY_PROGLOGUE_GENERATOR
    1182 
    1183 #endif // ENABLE(EXTRA_CTI_THUNKS)
    1184853
    1185854void JIT::link()
     
    13831052}
    13841053
     1054void JIT::privateCompileExceptionHandlers()
     1055{
    13851056#if !ENABLE(EXTRA_CTI_THUNKS)
    1386 void JIT::privateCompileExceptionHandlers()
    1387 {
    13881057    if (!m_exceptionChecksWithCallFrameRollback.empty()) {
    13891058        m_exceptionChecksWithCallFrameRollback.link(this);
     
    14101079        jumpToExceptionHandler(vm());
    14111080    }
    1412 }
    1413 #endif // !ENABLE(EXTRA_CTI_THUNKS)
     1081#endif // ENABLE(EXTRA_CTI_THUNKS)
     1082}
    14141083
    14151084void JIT::doMainThreadPreparationBeforeCompile()
  • trunk/Source/JavaScriptCore/jit/JIT.h

    r278687 r279049  
    324324        }
    325325
    326 #if !ENABLE(EXTRA_CTI_THUNKS)
    327326        void privateCompileExceptionHandlers();
    328 #endif
    329327
    330328        void advanceToNextCheckpoint();
     
    798796#if ENABLE(EXTRA_CTI_THUNKS)
    799797        // Thunk generators.
    800         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator0(VM&);
    801         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator1(VM&);
    802         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator2(VM&);
    803         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator3(VM&);
    804         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator4(VM&);
    805         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator5(VM&);
    806         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator6(VM&);
    807         static MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator7(VM&);
    808         MacroAssemblerCodeRef<JITThunkPtrTag> prologueGenerator(VM&, bool doesProfiling, bool isConstructor, bool hasHugeFrame, const char* name);
    809 
    810         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator0(VM&);
    811         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator1(VM&);
    812         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator2(VM&);
    813         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator3(VM&);
    814         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator4(VM&);
    815         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator5(VM&);
    816         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator6(VM&);
    817         static MacroAssemblerCodeRef<JITThunkPtrTag> arityFixup_prologueGenerator7(VM&);
    818         MacroAssemblerCodeRef<JITThunkPtrTag> arityFixupPrologueGenerator(VM&, bool isConstructor, ThunkGenerator normalPrologueGenerator, const char* name);
    819 
    820798        static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_del_by_id_prepareCallGenerator(VM&);
    821799        static MacroAssemblerCodeRef<JITThunkPtrTag> slow_op_del_by_val_prepareCallGenerator(VM&);
     
    832810
    833811        static MacroAssemblerCodeRef<JITThunkPtrTag> op_check_traps_handlerGenerator(VM&);
    834 
    835         static MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_canBeOptimized_Generator(VM&);
    836         static MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_cannotBeOptimized_Generator(VM&);
    837         MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_Generator(VM&, bool canBeOptimized, const char* thunkName);
    838 
    839 #if ENABLE(DFG_JIT)
    840         static MacroAssemblerCodeRef<JITThunkPtrTag> op_loop_hint_Generator(VM&);
    841 #endif
     812        static MacroAssemblerCodeRef<JITThunkPtrTag> op_enter_handlerGenerator(VM&);
    842813        static MacroAssemblerCodeRef<JITThunkPtrTag> op_ret_handlerGenerator(VM&);
    843814        static MacroAssemblerCodeRef<JITThunkPtrTag> op_throw_handlerGenerator(VM&);
  • trunk/Source/JavaScriptCore/jit/JITInlines.h

    r278656 r279049  
    9292ALWAYS_INLINE JIT::Call JIT::emitNakedNearCall(CodePtr<NoPtrTag> target)
    9393{
     94    ASSERT(m_bytecodeIndex); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
    9495    Call nakedCall = nearCall();
    9596    m_nearCalls.append(NearCallRecord(nakedCall, FunctionPtr<JSInternalPtrTag>(target.retagged<JSInternalPtrTag>())));
  • trunk/Source/JavaScriptCore/jit/JITOpcodes.cpp

    r278656 r279049  
    389389
    390390    jit.checkStackPointerAlignment();
    391     jit.emitRestoreCalleeSavesForBaselineJIT();
     391    jit.emitRestoreCalleeSavesFor(&RegisterAtOffsetList::llintBaselineCalleeSaveRegisters());
    392392    jit.emitFunctionEpilogue();
    393393    jit.ret();
     
    11991199#else
    12001200    ASSERT(m_bytecodeIndex.offset() == 0);
     1201    constexpr GPRReg localsToInitGPR = argumentGPR0;
     1202    constexpr GPRReg canBeOptimizedGPR = argumentGPR4;
     1203
    12011204    unsigned localsToInit = count - CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters();
    12021205    RELEASE_ASSERT(localsToInit < count);
    1203     ThunkGenerator generator = canBeOptimized() ? op_enter_canBeOptimized_Generator : op_enter_cannotBeOptimized_Generator;
    1204     emitNakedNearCall(vm().getCTIStub(generator).retaggedCode<NoPtrTag>());
     1206    move(TrustedImm32(localsToInit * sizeof(Register)), localsToInitGPR);
     1207    move(TrustedImm32(canBeOptimized()), canBeOptimizedGPR);
     1208    emitNakedNearCall(vm().getCTIStub(op_enter_handlerGenerator).retaggedCode<NoPtrTag>());
    12051209#endif // ENABLE(EXTRA_CTI_THUNKS)
    12061210}
    12071211
    12081212#if ENABLE(EXTRA_CTI_THUNKS)
    1209 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_Generator(VM& vm, bool canBeOptimized, const char* thunkName)
    1210 {
     1213MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_handlerGenerator(VM& vm)
     1214{
     1215    JIT jit(vm);
     1216
    12111217#if CPU(X86_64)
    1212     push(X86Registers::ebp);
     1218    jit.push(X86Registers::ebp);
    12131219#elif CPU(ARM64)
    1214     tagReturnAddress();
    1215     pushPair(framePointerRegister, linkRegister);
     1220    jit.tagReturnAddress();
     1221    jit.pushPair(framePointerRegister, linkRegister);
    12161222#endif
    12171223    // op_enter is always at bytecodeOffset 0.
    1218     store32(TrustedImm32(0), tagFor(CallFrameSlot::argumentCountIncludingThis));
     1224    jit.store32(TrustedImm32(0), tagFor(CallFrameSlot::argumentCountIncludingThis));
    12191225
    12201226    constexpr GPRReg localsToInitGPR = argumentGPR0;
     
    12221228    constexpr GPRReg endGPR = argumentGPR2;
    12231229    constexpr GPRReg undefinedGPR = argumentGPR3;
    1224     constexpr GPRReg codeBlockGPR = argumentGPR4;
    1225 
    1226     constexpr int virtualRegisterSizeShift = 3;
    1227     static_assert((1 << virtualRegisterSizeShift) == sizeof(Register));
    1228 
    1229     loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);
    1230     load32(Address(codeBlockGPR, CodeBlock::offsetOfNumVars()), localsToInitGPR);
    1231     sub32(TrustedImm32(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()), localsToInitGPR);
    1232     lshift32(TrustedImm32(virtualRegisterSizeShift), localsToInitGPR);
     1230    constexpr GPRReg canBeOptimizedGPR = argumentGPR4;
    12331231
    12341232    size_t startLocal = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters();
    12351233    int startOffset = virtualRegisterForLocal(startLocal).offset();
    1236     move(TrustedImm64(startOffset * sizeof(Register)), iteratorGPR);
    1237     sub64(iteratorGPR, localsToInitGPR, endGPR);
    1238 
    1239     move(TrustedImm64(JSValue::encode(jsUndefined())), undefinedGPR);
    1240     auto initLoop = label();
    1241     Jump initDone = branch32(LessThanOrEqual, iteratorGPR, endGPR);
     1234    jit.move(TrustedImm64(startOffset * sizeof(Register)), iteratorGPR);
     1235    jit.sub64(iteratorGPR, localsToInitGPR, endGPR);
     1236
     1237    jit.move(TrustedImm64(JSValue::encode(jsUndefined())), undefinedGPR);
     1238    auto initLoop = jit.label();
     1239    Jump initDone = jit.branch32(LessThanOrEqual, iteratorGPR, endGPR);
    12421240    {
    1243         store64(undefinedGPR, BaseIndex(GPRInfo::callFrameRegister, iteratorGPR, TimesOne));
    1244         sub64(TrustedImm32(sizeof(Register)), iteratorGPR);
    1245         jump(initLoop);
     1241        jit.store64(undefinedGPR, BaseIndex(GPRInfo::callFrameRegister, iteratorGPR, TimesOne));
     1242        jit.sub64(TrustedImm32(sizeof(Register)), iteratorGPR);
     1243        jit.jump(initLoop);
    12461244    }
    1247     initDone.link(this);
    1248 
    1249     // Implementing emitWriteBarrier(m_codeBlock).
    1250     Jump ownerIsRememberedOrInEden = barrierBranch(vm, codeBlockGPR, argumentGPR2);
    1251 
    1252     setupArguments<decltype(operationWriteBarrierSlowPath)>(&vm, codeBlockGPR);
    1253     prepareCallOperation(vm);
    1254     Call operationWriteBarrierCall = call(OperationPtrTag);
    1255 
    1256     if (canBeOptimized)
    1257         loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);
    1258 
    1259     ownerIsRememberedOrInEden.link(this);
     1245    initDone.link(&jit);
     1246
     1247    // emitWriteBarrier(m_codeBlock).
     1248    jit.loadPtr(addressFor(CallFrameSlot::codeBlock), argumentGPR1);
     1249    Jump ownerIsRememberedOrInEden = jit.barrierBranch(vm, argumentGPR1, argumentGPR2);
     1250
     1251    jit.move(canBeOptimizedGPR, GPRInfo::numberTagRegister); // save.
     1252    jit.setupArguments<decltype(operationWriteBarrierSlowPath)>(&vm, argumentGPR1);
     1253    jit.prepareCallOperation(vm);
     1254    Call operationWriteBarrierCall = jit.call(OperationPtrTag);
     1255
     1256    jit.move(GPRInfo::numberTagRegister, canBeOptimizedGPR); // restore.
     1257    jit.move(TrustedImm64(JSValue::NumberTag), GPRInfo::numberTagRegister);
     1258    ownerIsRememberedOrInEden.link(&jit);
    12601259
    12611260#if ENABLE(DFG_JIT)
    1262     // Implementing emitEnterOptimizationCheck().
    12631261    Call operationOptimizeCall;
    1264     if (canBeOptimized) {
     1262    if (Options::useDFGJIT()) {
     1263        // emitEnterOptimizationCheck().
    12651264        JumpList skipOptimize;
    12661265
    1267         skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(codeBlockGPR, CodeBlock::offsetOfJITExecuteCounter())));
    1268 
    1269         copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
    1270 
    1271         setupArguments<decltype(operationOptimize)>(&vm, TrustedImm32(0));
    1272         prepareCallOperation(vm);
    1273         operationOptimizeCall = call(OperationPtrTag);
    1274 
    1275         skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
    1276         farJump(returnValueGPR, GPRInfo::callFrameRegister);
    1277 
    1278         skipOptimize.link(this);
     1266        skipOptimize.append(jit.branchTest32(Zero, canBeOptimizedGPR));
     1267
     1268        jit.loadPtr(addressFor(CallFrameSlot::codeBlock), argumentGPR1);
     1269        skipOptimize.append(jit.branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), Address(argumentGPR1, CodeBlock::offsetOfJITExecuteCounter())));
     1270
     1271        jit.copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm.topEntryFrame);
     1272
     1273        jit.setupArguments<decltype(operationOptimize)>(&vm, TrustedImm32(0));
     1274        jit.prepareCallOperation(vm);
     1275        operationOptimizeCall = jit.call(OperationPtrTag);
     1276
     1277        skipOptimize.append(jit.branchTestPtr(Zero, returnValueGPR));
     1278        jit.farJump(returnValueGPR, GPRInfo::callFrameRegister);
     1279
     1280        skipOptimize.link(&jit);
    12791281    }
    12801282#endif // ENABLE(DFG_JIT)
    12811283
    12821284#if CPU(X86_64)
    1283     pop(X86Registers::ebp);
     1285    jit.pop(X86Registers::ebp);
    12841286#elif CPU(ARM64)
    1285     popPair(framePointerRegister, linkRegister);
    1286 #endif
    1287     ret();
    1288 
    1289     LinkBuffer patchBuffer(*this, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
     1287    jit.popPair(framePointerRegister, linkRegister);
     1288#endif
     1289    jit.ret();
     1290
     1291    LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
    12901292    patchBuffer.link(operationWriteBarrierCall, FunctionPtr<OperationPtrTag>(operationWriteBarrierSlowPath));
    12911293#if ENABLE(DFG_JIT)
    1292     if (canBeOptimized)
     1294    if (Options::useDFGJIT())
    12931295        patchBuffer.link(operationOptimizeCall, FunctionPtr<OperationPtrTag>(operationOptimize));
    12941296#endif
    1295     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, thunkName);
    1296 }
    1297 
    1298 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_canBeOptimized_Generator(VM& vm)
    1299 {
    1300     JIT jit(vm);
    1301     constexpr bool canBeOptimized = true;
    1302     return jit.op_enter_Generator(vm, canBeOptimized, "Baseline: op_enter_canBeOptimized");
    1303 }
    1304 
    1305 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_enter_cannotBeOptimized_Generator(VM& vm)
    1306 {
    1307     JIT jit(vm);
    1308     constexpr bool canBeOptimized = false;
    1309     return jit.op_enter_Generator(vm, canBeOptimized, "Baseline: op_enter_cannotBeOptimized");
     1297    return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: op_enter_handler");
    13101298}
    13111299#endif // ENABLE(EXTRA_CTI_THUNKS)
     
    14591447        store64(regT0, ptr);
    14601448    }
    1461 #else
    1462     UNUSED_PARAM(instruction);
    1463 #endif
    1464 
    1465     // Emit the JIT optimization check:
     1449#endif
     1450
     1451    // Emit the JIT optimization check:
    14661452    if (canBeOptimized()) {
    1467         constexpr GPRReg codeBlockGPR = regT0;
    1468         loadPtr(addressFor(CallFrameSlot::codeBlock), codeBlockGPR);
    14691453        addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
    1470             Address(codeBlockGPR, CodeBlock::offsetOfJITExecuteCounter())));
     1454            AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
    14711455    }
    14721456}
    14731457
    1474 void JIT::emitSlow_op_loop_hint(const Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
     1458void JIT::emitSlow_op_loop_hint(const Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
    14751459{
    14761460#if ENABLE(DFG_JIT)
     
    14791463        linkAllSlowCases(iter);
    14801464
    1481 #if !ENABLE(EXTRA_CTI_THUNKS)
    14821465        copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm().topEntryFrame);
    14831466
     
    14921475        noOptimizedEntry.link(this);
    14931476
    1494 #else // ENABLE(EXTRA_CTI_THUNKS)
    1495         uint32_t bytecodeOffset = m_bytecodeIndex.offset();
    1496         ASSERT(BytecodeIndex(bytecodeOffset) == m_bytecodeIndex);
    1497         ASSERT(m_codeBlock->instructionAt(m_bytecodeIndex) == instruction);
    1498 
    1499         constexpr GPRReg bytecodeOffsetGPR = regT7;
    1500 
    1501         move(TrustedImm32(bytecodeOffset), bytecodeOffsetGPR);
    1502         emitNakedNearCall(vm().getCTIStub(op_loop_hint_Generator).retaggedCode<NoPtrTag>());
    1503 #endif // !ENABLE(EXTRA_CTI_THUNKS)
     1477        emitJumpSlowToHot(jump(), currentInstruction->size());
    15041478    }
    1505 #endif // ENABLE(DFG_JIT)
     1479#else
     1480    UNUSED_PARAM(currentInstruction);
    15061481    UNUSED_PARAM(iter);
    1507     UNUSED_PARAM(instruction);
    1508 }
    1509 
    1510 #if ENABLE(EXTRA_CTI_THUNKS)
    1511 
    1512 #if ENABLE(DFG_JIT)
    1513 MacroAssemblerCodeRef<JITThunkPtrTag> JIT::op_loop_hint_Generator(VM& vm)
    1514 {
    1515     // The thunk generated by this function can only work with the LLInt / Baseline JIT because
    1516     // it makes assumptions about the right globalObject being available from CallFrame::codeBlock().
    1517     // DFG/FTL may inline functions belonging to other globalObjects, which may not match
    1518     // CallFrame::codeBlock().
    1519     JIT jit(vm);
    1520 
    1521     jit.tagReturnAddress();
    1522 
    1523     constexpr GPRReg bytecodeOffsetGPR = regT7; // incoming.
    1524 
    1525 #if CPU(X86_64)
    1526     jit.push(framePointerRegister);
    1527 #elif CPU(ARM64)
    1528     jit.pushPair(framePointerRegister, linkRegister);
    1529 #endif
    1530 
    1531     auto usedRegisters = RegisterSet::stubUnavailableRegisters();
    1532     usedRegisters.add(bytecodeOffsetGPR);
    1533     jit.copyLLIntBaselineCalleeSavesFromFrameOrRegisterToEntryFrameCalleeSavesBuffer(vm.topEntryFrame, usedRegisters);
    1534 
    1535     jit.store32(bytecodeOffsetGPR, CCallHelpers::tagFor(CallFrameSlot::argumentCountIncludingThis));
    1536     jit.lshift32(TrustedImm32(BytecodeIndex::checkpointShift), bytecodeOffsetGPR);
    1537     jit.setupArguments<decltype(operationOptimize)>(TrustedImmPtr(&vm), bytecodeOffsetGPR);
    1538     jit.prepareCallOperation(vm);
    1539     Call operationCall = jit.call(OperationPtrTag);
    1540     Jump hasOptimizedEntry = jit.branchTestPtr(NonZero, returnValueGPR);
    1541 
    1542 #if CPU(X86_64)
    1543     jit.pop(framePointerRegister);
    1544 #elif CPU(ARM64)
    1545     jit.popPair(framePointerRegister, linkRegister);
    1546 #endif
    1547     jit.ret();
    1548 
    1549     hasOptimizedEntry.link(&jit);
    1550 #if CPU(X86_64)
    1551     jit.addPtr(CCallHelpers::TrustedImm32(2 * sizeof(CPURegister)), stackPointerRegister);
    1552 #elif CPU(ARM64)
    1553     jit.popPair(framePointerRegister, linkRegister);
    1554 #endif
    1555     if (ASSERT_ENABLED) {
    1556         Jump ok = jit.branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
    1557         jit.abortWithReason(JITUnreasonableLoopHintJumpTarget);
    1558         ok.link(&jit);
    1559     }
    1560 
    1561     jit.farJump(returnValueGPR, GPRInfo::callFrameRegister);
    1562 
    1563     LinkBuffer patchBuffer(jit, GLOBAL_THUNK_ID, LinkBuffer::Profile::ExtraCTIThunk);
    1564     patchBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(operationOptimize));
    1565     return FINALIZE_CODE(patchBuffer, JITThunkPtrTag, "Baseline: op_loop_hint");
    1566 }
    1567 #endif // ENABLE(DFG_JIT)
    1568 #endif // !ENABLE(EXTRA_CTI_THUNKS)
     1482#endif
     1483}
    15691484
    15701485void JIT::emit_op_check_traps(const Instruction*)
  • trunk/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp

    r278656 r279049  
    10691069    // registers to zap stale pointers, to avoid unnecessarily prolonging
    10701070    // object lifetime and increasing GC pressure.
    1071     for (unsigned i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i)
     1071    for (int i = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); i < m_codeBlock->numVars(); ++i)
    10721072        emitStore(virtualRegisterForLocal(i), jsUndefined());
    10731073
  • trunk/Source/JavaScriptCore/jit/ThunkGenerators.cpp

    r278576 r279049  
    8282    CCallHelpers jit;
    8383
    84     jit.addPtr(CCallHelpers::TrustedImm32(2 * sizeof(CPURegister)), CCallHelpers::stackPointerRegister);
     84#if CPU(X86_64)
     85    jit.addPtr(CCallHelpers::TrustedImm32(2 * sizeof(CPURegister)), X86Registers::esp);
     86#elif CPU(ARM64)
     87    jit.popPair(CCallHelpers::framePointerRegister, CCallHelpers::linkRegister);
     88#endif
    8589
    8690    CCallHelpers::Jump continuation = jit.jump();
Note: See TracChangeset for help on using the changeset viewer.