Changeset 187618 in webkit
- Timestamp:
- Jul 30, 2015 4:19:30 PM (9 years ago)
- Location:
- branches/jsc-tailcall/Source/JavaScriptCore
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/jsc-tailcall/Source/JavaScriptCore/ChangeLog
r187616 r187618 1 2015-07-30 Basile Clement <basile_clement@apple.com> 2 3 jsc-tailcall: Don't waste stack space when arity fixup was performed 4 https://bugs.webkit.org/show_bug.cgi?id=147447 5 6 Reviewed by Michael Saboff. 7 8 When doing a tail call, we overwrite an amount of stack space based on 9 the number of arguments in the call frame. If we entered the tail 10 caller by performing an arity fixup, this is incorrect and leads to 11 wasted stack space - we must use the CodeBlock's number of parameters 12 instead in that case. 13 14 This patch is also moving the prepareForTailCall() function from 15 jit/ThunkGenerators.h to the place where it should have always been, 16 namely jit/CCallHelpers.h 17 18 * jit/CCallHelpers.h: 19 (JSC::CCallHelpers::prepareForTailCallSlow): 20 * jit/JITCall.cpp: 21 (JSC::JIT::compileOpCall): 22 * jit/Repatch.cpp: 23 (JSC::linkPolymorphicCall): 24 * jit/ThunkGenerators.cpp: 25 (JSC::slowPathFor): 26 (JSC::virtualThunkFor): 27 * jit/ThunkGenerators.h: 28 * tests/stress/tail-call-no-stack-overflow.js: 29 (strictLoopArityFixup): 30 1 31 2015-07-30 Basile Clement <basile_clement@apple.com> 2 32 -
branches/jsc-tailcall/Source/JavaScriptCore/jit/CCallHelpers.h
r185323 r187618 31 31 #include "AssemblyHelpers.h" 32 32 #include "GPRInfo.h" 33 #include "StackAlignment.h" 33 34 34 35 namespace JSC { … … 2021 2022 jump(GPRInfo::regT1); 2022 2023 } 2024 2025 void prepareForTailCallSlow(const TempRegisterSet& usedRegisters = { RegisterSet::specialRegisters() }) 2026 { 2027 GPRReg temp1 = usedRegisters.getFreeGPR(0); 2028 GPRReg temp2 = usedRegisters.getFreeGPR(1); 2029 ASSERT(temp2 != InvalidGPRReg); 2030 2031 subPtr(TrustedImm32(sizeof(CallerFrameAndPC)), stackPointerRegister); 2032 loadPtr(Address(GPRInfo::callFrameRegister), temp1); 2033 storePtr(temp1, Address(stackPointerRegister)); 2034 loadPtr(Address(GPRInfo::callFrameRegister, sizeof(void*)), temp1); 2035 storePtr(temp1, Address(stackPointerRegister, sizeof(void*))); 2036 2037 // Now stackPointerRegister points to a valid call frame for the callee 2038 // and callFrameRegister points to our own call frame. 2039 // We now slide the callee's call frame over our own call frame, 2040 // starting with the top to avoid unwanted overwrites 2041 2042 // Move the callFrameRegister to the top of our (trashed) call frame 2043 load32(Address(GPRInfo::callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), 2044 temp1); 2045 2046 // We need to use the number of parameters instead of the argument count in case of arity fixup 2047 loadPtr(Address(GPRInfo::callFrameRegister, JSStack::CodeBlock * static_cast<int>(sizeof(Register))), temp2); 2048 load32(Address(temp2, CodeBlock::offsetOfNumParameters()), temp2); 2049 MacroAssembler::Jump argumentCountWasNotFixedUp = branch32(BelowOrEqual, temp2, temp1); 2050 move(temp2, temp1); 2051 argumentCountWasNotFixedUp.link(this); 2052 2053 add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), temp1); 2054 and32(TrustedImm32(-stackAlignmentRegisters()), temp1); 2055 mul32(TrustedImm32(sizeof(Register)), temp1, temp1); 2056 addPtr(temp1, GPRInfo::callFrameRegister); 2057 2058 // Compute the call frame size of the callee's call frame 2059 load32(Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), 2060 temp2); 2061 add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), temp2); 2062 and32(TrustedImm32(-stackAlignmentRegisters()), temp2); 2063 2064 #if USE(JSVALUE32_64) 2065 COMPILE_ASSERT(sizeof(void*) * 2 == sizeof(Register), Register_is_two_pointers_sized); 2066 lshift32(TrustedImm32(1), temp2); 2067 #endif 2068 2069 // Do the sliding 2070 MacroAssembler::Label copyLoop(label()); 2071 2072 subPtr(TrustedImm32(sizeof(void*)), GPRInfo::callFrameRegister); 2073 sub32(TrustedImm32(1), temp2); 2074 loadPtr(BaseIndex(stackPointerRegister, temp2, MacroAssembler::timesPtr()), temp1); 2075 storePtr(temp1, Address(GPRInfo::callFrameRegister)); 2076 2077 branchTest32(MacroAssembler::NonZero, temp2).linkTo(copyLoop, this); 2078 2079 emitFunctionEpilogue(); 2080 } 2081 2082 void prepareForTailCallSlow(GPRReg calleeGPR) 2083 { 2084 TempRegisterSet usedRegisters { RegisterSet::specialRegisters() }; 2085 usedRegisters.set(calleeGPR); 2086 prepareForTailCallSlow(usedRegisters); 2087 } 2023 2088 }; 2024 2089 -
branches/jsc-tailcall/Source/JavaScriptCore/jit/JITCall.cpp
r187590 r187618 193 193 194 194 if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) { 195 prepareForTailCall (*this);195 prepareForTailCallSlow(); 196 196 m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); 197 197 // We must never come back here -
branches/jsc-tailcall/Source/JavaScriptCore/jit/JITCall32_64.cpp
r187590 r187618 277 277 checkStackPointerAlignment(); 278 278 if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) { 279 prepareForTailCall (*this);279 prepareForTailCallSlow(); 280 280 m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); 281 281 // We must never come back here -
branches/jsc-tailcall/Source/JavaScriptCore/jit/Repatch.cpp
r187590 r187618 1882 1882 } 1883 1883 if (CallLinkInfo::isTailCallType(callLinkInfo.callType())) { 1884 prepareForTailCall(stubJit);1884 stubJit.prepareForTailCallSlow(); 1885 1885 calls[caseIndex].call = stubJit.nearTailCall(); 1886 1886 } else -
branches/jsc-tailcall/Source/JavaScriptCore/jit/ThunkGenerators.cpp
r187590 r187618 103 103 104 104 jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR); 105 jit.move(GPRInfo::returnValueGPR, GPRInfo::regT4); // FIXME 106 prepareForTailCall(jit); 107 jit.jump(GPRInfo::regT4); 105 jit.prepareForTailCallSlow(GPRInfo::returnValueGPR); 108 106 109 107 doNotTrash.link(&jit); … … 196 194 if (CallLinkInfo::isTailCallType(callLinkInfo.callType())) { 197 195 jit.preserveReturnAddressAfterCall(GPRInfo::regT0); 198 prepareForTailCall(jit);196 jit.prepareForTailCallSlow(GPRInfo::regT4); 199 197 } 200 198 jit.jump(GPRInfo::regT4); … … 460 458 LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); 461 459 return FINALIZE_CODE(patchBuffer, ("unreachable thunk")); 462 }463 464 void prepareForTailCall(CCallHelpers& jit)465 {466 jit.subPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), JSInterfaceJIT::stackPointerRegister);467 jit.loadPtr(CCallHelpers::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT1);468 jit.storePtr(JSInterfaceJIT::regT1, CCallHelpers::Address(JSInterfaceJIT::stackPointerRegister));469 jit.loadPtr(CCallHelpers::Address(JSInterfaceJIT::callFrameRegister, sizeof(void*)), JSInterfaceJIT::regT1);470 jit.storePtr(JSInterfaceJIT::regT1, CCallHelpers::Address(JSInterfaceJIT::stackPointerRegister, sizeof(void*)));471 472 // Now stackPointerRegister points to a valid call frame for the callee473 // and callFrameRegister points to our own call frame.474 // We now slide the callee's call frame over our own call frame,475 // starting with the top to avoid unwanted overwrites476 477 // Move the callFrameRegister to the top of our (trashed) call frame478 jit.load32(CCallHelpers::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset),479 JSInterfaceJIT::regT1);480 jit.add32(CCallHelpers::TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), JSInterfaceJIT::regT1);481 jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::regT1);482 jit.mul32(CCallHelpers::TrustedImm32(sizeof(Register)), JSInterfaceJIT::regT1, JSInterfaceJIT::regT1);483 jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister);484 485 // Compute the call frame size of the callee's call frame486 jit.load32(CCallHelpers::Address(JSInterfaceJIT::stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset),487 JSInterfaceJIT::regT2);488 jit.add32(CCallHelpers::TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), JSInterfaceJIT::regT2);489 jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::regT2);490 491 #if USE(JSVALUE32_64)492 COMPILE_ASSERT(sizeof(void*) * 2 == sizeof(Register), Register_is_two_pointers_sized);493 jit.lshift32(CCallHelpers::TrustedImm32(1), JSInterfaceJIT::regT2);494 #endif495 496 // Do the sliding497 MacroAssembler::Label copyLoop(jit.label());498 499 jit.subPtr(CCallHelpers::TrustedImm32(sizeof(void*)), JSInterfaceJIT::callFrameRegister);500 jit.sub32(CCallHelpers::TrustedImm32(1), JSInterfaceJIT::regT2);501 jit.loadPtr(CCallHelpers::BaseIndex(JSInterfaceJIT::stackPointerRegister, JSInterfaceJIT::regT2, MacroAssembler::timesPtr()), JSInterfaceJIT::regT1);502 jit.storePtr(JSInterfaceJIT::regT1, CCallHelpers::Address(JSInterfaceJIT::callFrameRegister));503 504 jit.branchTest32(MacroAssembler::NonZero, JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);505 506 jit.emitFunctionEpilogue();507 460 } 508 461 -
branches/jsc-tailcall/Source/JavaScriptCore/jit/ThunkGenerators.h
r187590 r187618 50 50 MacroAssemblerCodeRef unreachableGenerator(VM*); 51 51 52 void prepareForTailCall(CCallHelpers& jit);53 54 52 MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm); 55 53 MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm); -
branches/jsc-tailcall/Source/JavaScriptCore/tests/stress/tail-call-no-stack-overflow.js
r187166 r187618 25 25 } 26 26 27 function strictLoopArityFixup(n, dummy) { 28 "use strict"; 29 if (n > 0) 30 return strictLoopArityFixup(n - 1); 31 } 32 27 33 shouldThrow(function () { sloppyLoop(100000); }, 'RangeError: Maximum call stack size exceeded.'); 34 35 // These should not throw 28 36 strictLoop(100000); 37 strictLoopArityFixup(100000);
Note: See TracChangeset
for help on using the changeset viewer.