Changeset 220118 in webkit
- Timestamp:
- Aug 1, 2017, 6:50:16 PM (8 years ago)
- Location:
- trunk
- Files:
-
- 14 added
- 84 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JSTests/wasm/stress/oom.js
r215662 r220118 1 // We don't need N versions of this simultaneously filling up RAM. 2 //@ runDefault 3 1 4 const verbose = false; 2 5 -
trunk/Source/JavaScriptCore/CMakeLists.txt
r219981 r220118 337 337 dfg/DFGFailedFinalizer.cpp 338 338 dfg/DFGFinalizer.cpp 339 dfg/DFGFixedButterflyAccessUncagingPhase.cpp 339 340 dfg/DFGFixupPhase.cpp 340 341 dfg/DFGFlowIndexing.cpp … … 505 506 heap/GCLogging.cpp 506 507 heap/GCRequest.cpp 508 heap/GigacageSubspace.cpp 507 509 heap/HandleSet.cpp 508 510 heap/HandleStack.cpp -
trunk/Source/JavaScriptCore/ChangeLog
r220081 r220118 1 2017-08-01 Filip Pizlo <fpizlo@apple.com> 2 3 Bmalloc and GC should put auxiliaries (butterflies, typed array backing stores) in a gigacage (separate multi-GB VM region) 4 https://bugs.webkit.org/show_bug.cgi?id=174727 5 6 Reviewed by Mark Lam. 7 8 This adopts the Gigacage for the GigacageSubspace, which we use for Auxiliary allocations. Also, in 9 one place in the code - the FTL codegen for butterfly and typed array access - we "cage" the accesses 10 themselves. Basically, we do masking to ensure that the pointer points into the gigacage. 11 12 This is neutral on JetStream. 13 14 * CMakeLists.txt: 15 * JavaScriptCore.xcodeproj/project.pbxproj: 16 * b3/B3InsertionSet.cpp: 17 (JSC::B3::InsertionSet::execute): 18 * dfg/DFGAbstractInterpreterInlines.h: 19 (JSC::DFG::AbstractInterpreter<AbstractStateType>::executeEffects): 20 * dfg/DFGArgumentsEliminationPhase.cpp: 21 * dfg/DFGClobberize.cpp: 22 (JSC::DFG::readsOverlap): 23 * dfg/DFGClobberize.h: 24 (JSC::DFG::clobberize): 25 * dfg/DFGDoesGC.cpp: 26 (JSC::DFG::doesGC): 27 * dfg/DFGFixedButterflyAccessUncagingPhase.cpp: Added. 28 (JSC::DFG::performFixedButterflyAccessUncaging): 29 * dfg/DFGFixedButterflyAccessUncagingPhase.h: Added. 30 * dfg/DFGFixupPhase.cpp: 31 (JSC::DFG::FixupPhase::fixupNode): 32 * dfg/DFGHeapLocation.cpp: 33 (WTF::printInternal): 34 * dfg/DFGHeapLocation.h: 35 * dfg/DFGNodeType.h: 36 * dfg/DFGPlan.cpp: 37 (JSC::DFG::Plan::compileInThreadImpl): 38 * dfg/DFGPredictionPropagationPhase.cpp: 39 * dfg/DFGSafeToExecute.h: 40 (JSC::DFG::safeToExecute): 41 * dfg/DFGSpeculativeJIT.cpp: 42 (JSC::DFG::SpeculativeJIT::compileGetButterfly): 43 * dfg/DFGSpeculativeJIT32_64.cpp: 44 (JSC::DFG::SpeculativeJIT::compile): 45 * dfg/DFGSpeculativeJIT64.cpp: 46 (JSC::DFG::SpeculativeJIT::compile): 47 * dfg/DFGTypeCheckHoistingPhase.cpp: 48 (JSC::DFG::TypeCheckHoistingPhase::identifyRedundantStructureChecks): 49 (JSC::DFG::TypeCheckHoistingPhase::identifyRedundantArrayChecks): 50 * ftl/FTLCapabilities.cpp: 51 (JSC::FTL::canCompile): 52 * ftl/FTLLowerDFGToB3.cpp: 53 (JSC::FTL::DFG::LowerDFGToB3::compileNode): 54 (JSC::FTL::DFG::LowerDFGToB3::compileGetButterfly): 55 (JSC::FTL::DFG::LowerDFGToB3::compileGetIndexedPropertyStorage): 56 (JSC::FTL::DFG::LowerDFGToB3::compileGetByVal): 57 (JSC::FTL::DFG::LowerDFGToB3::compileStringCharAt): 58 (JSC::FTL::DFG::LowerDFGToB3::compileStringCharCodeAt): 59 (JSC::FTL::DFG::LowerDFGToB3::compileGetMapBucket): 60 (JSC::FTL::DFG::LowerDFGToB3::compileGetDirectPname): 61 (JSC::FTL::DFG::LowerDFGToB3::compileToLowerCase): 62 (JSC::FTL::DFG::LowerDFGToB3::caged): 63 * heap/GigacageSubspace.cpp: Added. 64 (JSC::GigacageSubspace::GigacageSubspace): 65 (JSC::GigacageSubspace::~GigacageSubspace): 66 (JSC::GigacageSubspace::tryAllocateAlignedMemory): 67 (JSC::GigacageSubspace::freeAlignedMemory): 68 (JSC::GigacageSubspace::canTradeBlocksWith): 69 * heap/GigacageSubspace.h: Added. 70 * heap/Heap.cpp: 71 (JSC::Heap::Heap): 72 (JSC::Heap::lastChanceToFinalize): 73 (JSC::Heap::finalize): 74 (JSC::Heap::sweepInFinalize): 75 (JSC::Heap::updateAllocationLimits): 76 (JSC::Heap::shouldDoFullCollection): 77 (JSC::Heap::collectIfNecessaryOrDefer): 78 (JSC::Heap::reportWebAssemblyFastMemoriesAllocated): Deleted. 79 (JSC::Heap::webAssemblyFastMemoriesThisCycleAtThreshold const): Deleted. 80 (JSC::Heap::sweepLargeAllocations): Deleted. 81 (JSC::Heap::didAllocateWebAssemblyFastMemories): Deleted. 82 * heap/Heap.h: 83 * heap/LargeAllocation.cpp: 84 (JSC::LargeAllocation::tryCreate): 85 (JSC::LargeAllocation::destroy): 86 * heap/MarkedAllocator.cpp: 87 (JSC::MarkedAllocator::tryAllocateWithoutCollecting): 88 (JSC::MarkedAllocator::tryAllocateBlock): 89 * heap/MarkedBlock.cpp: 90 (JSC::MarkedBlock::tryCreate): 91 (JSC::MarkedBlock::Handle::Handle): 92 (JSC::MarkedBlock::Handle::~Handle): 93 (JSC::MarkedBlock::Handle::didAddToAllocator): 94 (JSC::MarkedBlock::Handle::subspace const): Deleted. 95 * heap/MarkedBlock.h: 96 (JSC::MarkedBlock::Handle::subspace const): 97 * heap/MarkedSpace.cpp: 98 (JSC::MarkedSpace::~MarkedSpace): 99 (JSC::MarkedSpace::freeMemory): 100 (JSC::MarkedSpace::prepareForAllocation): 101 (JSC::MarkedSpace::addMarkedAllocator): 102 (JSC::MarkedSpace::findEmptyBlockToSteal): Deleted. 103 * heap/MarkedSpace.h: 104 (JSC::MarkedSpace::firstAllocator const): 105 (JSC::MarkedSpace::allocatorForEmptyAllocation const): Deleted. 106 * heap/Subspace.cpp: 107 (JSC::Subspace::Subspace): 108 (JSC::Subspace::canTradeBlocksWith): 109 (JSC::Subspace::tryAllocateAlignedMemory): 110 (JSC::Subspace::freeAlignedMemory): 111 (JSC::Subspace::prepareForAllocation): 112 (JSC::Subspace::findEmptyBlockToSteal): 113 * heap/Subspace.h: 114 (JSC::Subspace::didCreateFirstAllocator): 115 * heap/SubspaceInlines.h: 116 (JSC::Subspace::forEachAllocator): 117 (JSC::Subspace::forEachMarkedBlock): 118 (JSC::Subspace::forEachNotEmptyMarkedBlock): 119 * jit/JITPropertyAccess.cpp: 120 (JSC::JIT::emitDoubleLoad): 121 (JSC::JIT::emitContiguousLoad): 122 (JSC::JIT::emitArrayStorageLoad): 123 (JSC::JIT::emitGenericContiguousPutByVal): 124 (JSC::JIT::emitArrayStoragePutByVal): 125 (JSC::JIT::emit_op_get_from_scope): 126 (JSC::JIT::emit_op_put_to_scope): 127 (JSC::JIT::emitIntTypedArrayGetByVal): 128 (JSC::JIT::emitFloatTypedArrayGetByVal): 129 (JSC::JIT::emitIntTypedArrayPutByVal): 130 (JSC::JIT::emitFloatTypedArrayPutByVal): 131 * jsc.cpp: 132 (fillBufferWithContentsOfFile): 133 (functionReadFile): 134 (gigacageDisabled): 135 (jscmain): 136 * llint/LowLevelInterpreter64.asm: 137 * runtime/ArrayBuffer.cpp: 138 (JSC::ArrayBufferContents::tryAllocate): 139 (JSC::ArrayBuffer::createAdopted): 140 (JSC::ArrayBuffer::createFromBytes): 141 (JSC::ArrayBuffer::tryCreate): 142 * runtime/IndexingHeader.h: 143 * runtime/InitializeThreading.cpp: 144 (JSC::initializeThreading): 145 * runtime/JSArrayBuffer.cpp: 146 * runtime/JSArrayBufferView.cpp: 147 (JSC::JSArrayBufferView::ConstructionContext::ConstructionContext): 148 (JSC::JSArrayBufferView::finalize): 149 * runtime/JSLock.cpp: 150 (JSC::JSLock::didAcquireLock): 151 * runtime/JSObject.h: 152 * runtime/Options.cpp: 153 (JSC::recomputeDependentOptions): 154 * runtime/Options.h: 155 * runtime/ScopedArgumentsTable.h: 156 * runtime/VM.cpp: 157 (JSC::VM::VM): 158 (JSC::VM::~VM): 159 (JSC::VM::gigacageDisabledCallback): 160 (JSC::VM::gigacageDisabled): 161 * runtime/VM.h: 162 (JSC::VM::fireGigacageEnabledIfNecessary): 163 (JSC::VM::gigacageEnabled): 164 * wasm/WasmB3IRGenerator.cpp: 165 (JSC::Wasm::B3IRGenerator::B3IRGenerator): 166 (JSC::Wasm::B3IRGenerator::emitCheckAndPreparePointer): 167 * wasm/WasmCodeBlock.cpp: 168 (JSC::Wasm::CodeBlock::isSafeToRun): 169 * wasm/WasmMemory.cpp: 170 (JSC::Wasm::makeString): 171 (JSC::Wasm::Memory::create): 172 (JSC::Wasm::Memory::~Memory): 173 (JSC::Wasm::Memory::addressIsInActiveFastMemory): 174 (JSC::Wasm::Memory::grow): 175 (JSC::Wasm::Memory::initializePreallocations): Deleted. 176 (JSC::Wasm::Memory::maxFastMemoryCount): Deleted. 177 * wasm/WasmMemory.h: 178 * wasm/js/JSWebAssemblyInstance.cpp: 179 (JSC::JSWebAssemblyInstance::create): 180 * wasm/js/JSWebAssemblyMemory.cpp: 181 (JSC::JSWebAssemblyMemory::grow): 182 (JSC::JSWebAssemblyMemory::finishCreation): 183 * wasm/js/JSWebAssemblyMemory.h: 184 (JSC::JSWebAssemblyMemory::subspaceFor): 185 1 186 2017-07-31 Mark Lam <mark.lam@apple.com> 2 187 -
trunk/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
r219981 r220118 465 465 0F5AE2C41DF4F2800066EFE1 /* VMInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = FE90BB3A1B7CF64E006B3F03 /* VMInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; 466 466 0F5B4A331C84F0D600F1B17E /* SlowPathReturnType.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5B4A321C84F0D600F1B17E /* SlowPathReturnType.h */; settings = {ATTRIBUTES = (Private, ); }; }; 467 0F5BF1561F22EB170029D91D /* GigacageSubspace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5BF1541F22EB170029D91D /* GigacageSubspace.cpp */; }; 468 0F5BF1571F22EB170029D91D /* GigacageSubspace.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1551F22EB170029D91D /* GigacageSubspace.h */; settings = {ATTRIBUTES = (Private, ); }; }; 467 469 0F5BF1631F2317120029D91D /* B3HoistLoopInvariantValues.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5BF1611F2317120029D91D /* B3HoistLoopInvariantValues.cpp */; }; 468 470 0F5BF1641F2317120029D91D /* B3HoistLoopInvariantValues.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1621F2317120029D91D /* B3HoistLoopInvariantValues.h */; }; … … 825 827 0FD8A32B17D51F5700CA2C40 /* DFGToFTLForOSREntryDeferredCompilationCallback.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD8A32317D51F5700CA2C40 /* DFGToFTLForOSREntryDeferredCompilationCallback.cpp */; }; 826 828 0FD8A32C17D51F5700CA2C40 /* DFGToFTLForOSREntryDeferredCompilationCallback.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD8A32417D51F5700CA2C40 /* DFGToFTLForOSREntryDeferredCompilationCallback.h */; }; 829 0FD9EA881F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD9EA861F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.cpp */; }; 830 0FD9EA891F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD9EA871F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.h */; }; 827 831 0FDB2CC9173DA520007B3C1B /* FTLAbbreviatedTypes.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FDB2CC7173DA51E007B3C1B /* FTLAbbreviatedTypes.h */; settings = {ATTRIBUTES = (Private, ); }; }; 828 832 0FDB2CCA173DA523007B3C1B /* FTLValueFromBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FDB2CC8173DA51E007B3C1B /* FTLValueFromBlock.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 3050 3054 0F5A6282188C98D40072C9DF /* FTLValueRange.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FTLValueRange.h; path = ftl/FTLValueRange.h; sourceTree = "<group>"; }; 3051 3055 0F5B4A321C84F0D600F1B17E /* SlowPathReturnType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SlowPathReturnType.h; sourceTree = "<group>"; }; 3056 0F5BF1541F22EB170029D91D /* GigacageSubspace.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = GigacageSubspace.cpp; sourceTree = "<group>"; }; 3057 0F5BF1551F22EB170029D91D /* GigacageSubspace.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = GigacageSubspace.h; sourceTree = "<group>"; }; 3052 3058 0F5BF1611F2317120029D91D /* B3HoistLoopInvariantValues.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = B3HoistLoopInvariantValues.cpp; path = b3/B3HoistLoopInvariantValues.cpp; sourceTree = "<group>"; }; 3053 3059 0F5BF1621F2317120029D91D /* B3HoistLoopInvariantValues.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = B3HoistLoopInvariantValues.h; path = b3/B3HoistLoopInvariantValues.h; sourceTree = "<group>"; }; … … 3419 3425 0FD8A32317D51F5700CA2C40 /* DFGToFTLForOSREntryDeferredCompilationCallback.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGToFTLForOSREntryDeferredCompilationCallback.cpp; path = dfg/DFGToFTLForOSREntryDeferredCompilationCallback.cpp; sourceTree = "<group>"; }; 3420 3426 0FD8A32417D51F5700CA2C40 /* DFGToFTLForOSREntryDeferredCompilationCallback.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGToFTLForOSREntryDeferredCompilationCallback.h; path = dfg/DFGToFTLForOSREntryDeferredCompilationCallback.h; sourceTree = "<group>"; }; 3427 0FD9EA861F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = DFGFixedButterflyAccessUncagingPhase.cpp; path = dfg/DFGFixedButterflyAccessUncagingPhase.cpp; sourceTree = "<group>"; }; 3428 0FD9EA871F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = DFGFixedButterflyAccessUncagingPhase.h; path = dfg/DFGFixedButterflyAccessUncagingPhase.h; sourceTree = "<group>"; }; 3421 3429 0FDB2CC7173DA51E007B3C1B /* FTLAbbreviatedTypes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = FTLAbbreviatedTypes.h; path = ftl/FTLAbbreviatedTypes.h; sourceTree = "<group>"; }; 3422 3430 0FDB2CC8173DA51E007B3C1B /* FTLValueFromBlock.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = FTLValueFromBlock.h; path = ftl/FTLValueFromBlock.h; sourceTree = "<group>"; }; … … 6072 6080 2A343F7718A1749D0039B085 /* GCSegmentedArrayInlines.h */, 6073 6081 0F86A26E1D6F7B3100CB0C92 /* GCTypeMap.h */, 6082 0F5BF1541F22EB170029D91D /* GigacageSubspace.cpp */, 6083 0F5BF1551F22EB170029D91D /* GigacageSubspace.h */, 6074 6084 142E312B134FF0A600AFADB5 /* Handle.h */, 6075 6085 C28318FF16FE4B7D00157BFD /* HandleBlock.h */, … … 7356 7366 A78A976E179738B8009DF744 /* DFGFinalizer.cpp */, 7357 7367 A78A976F179738B8009DF744 /* DFGFinalizer.h */, 7368 0FD9EA861F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.cpp */, 7369 0FD9EA871F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.h */, 7358 7370 0F2BDC12151C5D4A00CD8910 /* DFGFixupPhase.cpp */, 7359 7371 0F2BDC13151C5D4A00CD8910 /* DFGFixupPhase.h */, … … 8491 8503 0FD0E5F21E46C8AF0006AB08 /* CollectingScope.h in Headers */, 8492 8504 0FA762051DB9242900B7A2FD /* CollectionScope.h in Headers */, 8505 0FD9EA891F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.h in Headers */, 8493 8506 0FD0E5E91E43D3490006AB08 /* CollectorPhase.h in Headers */, 8494 8507 A53243981856A489002ED692 /* CombinedDomains.json in Headers */, … … 9539 9552 FE3022D71E42857300BAC493 /* VMInspector.h in Headers */, 9540 9553 FE6F56DE1E64EAD600D17801 /* VMTraps.h in Headers */, 9554 0F5BF1571F22EB170029D91D /* GigacageSubspace.h in Headers */, 9541 9555 53F40E931D5A4AB30099A1B6 /* WasmB3IRGenerator.h in Headers */, 9542 9556 53CA730A1EA533D80076049D /* WasmBBQPlan.h in Headers */, … … 10448 10462 0F5D085D1B8CF99D001143B4 /* DFGNodeOrigin.cpp in Sources */, 10449 10463 0F2B9CE619D0BA7D00B1D1B5 /* DFGObjectAllocationSinkingPhase.cpp in Sources */, 10464 0FD9EA881F29162C00F32BEE /* DFGFixedButterflyAccessUncagingPhase.cpp in Sources */, 10450 10465 0F2B9CE819D0BA7D00B1D1B5 /* DFGObjectMaterializationData.cpp in Sources */, 10451 10466 86EC9DCF1328DF82002B2AD7 /* DFGOperations.cpp in Sources */, … … 10975 10990 14469DEC107EC7E700650446 /* StringObject.cpp in Sources */, 10976 10991 14469DED107EC7E700650446 /* StringPrototype.cpp in Sources */, 10992 0F5BF1561F22EB170029D91D /* GigacageSubspace.cpp in Sources */, 10977 10993 9335F24D12E6765B002B5553 /* StringRecursionChecker.cpp in Sources */, 10978 10994 BCDE3B430E6C832D001453A7 /* Structure.cpp in Sources */, -
trunk/Source/JavaScriptCore/b3/B3InsertionSet.cpp
r213714 r220118 66 66 void InsertionSet::execute(BasicBlock* block) 67 67 { 68 for (Insertion& insertion : m_insertions) 69 insertion.element()->owner = block; 68 70 bubbleSort(m_insertions.begin(), m_insertions.end()); 69 71 executeInsertions(block->m_values, m_insertions); -
trunk/Source/JavaScriptCore/dfg/DFGAbstractInterpreterInlines.h
r219981 r220118 2385 2385 break; 2386 2386 case GetButterfly: 2387 case GetButterflyWithoutCaging: 2387 2388 case AllocatePropertyStorage: 2388 2389 case ReallocatePropertyStorage: -
trunk/Source/JavaScriptCore/dfg/DFGArgumentsEliminationPhase.cpp
r219997 r220118 359 359 360 360 case GetButterfly: 361 case GetButterflyWithoutCaging: 361 362 // This barely works. The danger is that the GetButterfly is used by something that 362 363 // does something escaping to a candidate. Fortunately, the only butterfly-using ops -
trunk/Source/JavaScriptCore/dfg/DFGClobberize.h
r219981 r220118 1012 1012 return; 1013 1013 1014 case GetButterflyWithoutCaging: 1015 read(JSObject_butterfly); 1016 def(HeapLocation(ButterflyWithoutCagingLoc, JSObject_butterfly, node->child1()), LazyNode(node)); 1017 return; 1018 1014 1019 case CheckSubClass: 1015 1020 def(PureValue(node, node->classInfo())); -
trunk/Source/JavaScriptCore/dfg/DFGDoesGC.cpp
r218084 r220118 116 116 case GetExecutable: 117 117 case GetButterfly: 118 case GetButterflyWithoutCaging: 118 119 case CheckSubClass: 119 120 case CheckArray: -
trunk/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
r219981 r220118 1399 1399 case CheckCell: 1400 1400 case CreateThis: 1401 case GetButterfly: { 1401 case GetButterfly: 1402 case GetButterflyWithoutCaging: { 1402 1403 fixEdge<CellUse>(node->child1()); 1403 1404 break; -
trunk/Source/JavaScriptCore/dfg/DFGHeapLocation.cpp
r217202 r220118 97 97 return; 98 98 99 case ButterflyWithoutCagingLoc: 100 out.print("ButterflyWithoutCagingLoc"); 101 return; 102 99 103 case CheckTypeInfoFlagsLoc: 100 104 out.print("CheckTypeInfoFlagsLoc"); -
trunk/Source/JavaScriptCore/dfg/DFGHeapLocation.h
r217202 r220118 40 40 VectorLengthLoc, 41 41 ButterflyLoc, 42 ButterflyWithoutCagingLoc, 42 43 CheckTypeInfoFlagsLoc, 43 44 OverridesHasInstanceLoc, -
trunk/Source/JavaScriptCore/dfg/DFGNodeType.h
r218084 r220118 206 206 macro(ReallocatePropertyStorage, NodeMustGenerate | NodeResultStorage) \ 207 207 macro(GetButterfly, NodeResultStorage) \ 208 macro(GetButterflyWithoutCaging, NodeResultStorage) \ 208 209 macro(NukeStructureAndSetButterfly, NodeMustGenerate) \ 209 210 macro(CheckArray, NodeMustGenerate) \ -
trunk/Source/JavaScriptCore/dfg/DFGPlan.cpp
r216815 r220118 42 42 #include "DFGDCEPhase.h" 43 43 #include "DFGFailedFinalizer.h" 44 #include "DFGFixedButterflyAccessUncagingPhase.h" 44 45 #include "DFGFixupPhase.h" 45 46 #include "DFGGraphSafepoint.h" … … 469 470 RUN_PHASE(performGlobalStoreBarrierInsertion); 470 471 RUN_PHASE(performStoreBarrierClustering); 472 RUN_PHASE(performFixedButterflyAccessUncaging); 471 473 if (Options::useMovHintRemoval()) 472 474 RUN_PHASE(performMovHintRemoval); -
trunk/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
r218084 r220118 840 840 } 841 841 case GetButterfly: 842 case GetButterflyWithoutCaging: 842 843 case GetIndexedPropertyStorage: 843 844 case AllocatePropertyStorage: -
trunk/Source/JavaScriptCore/dfg/DFGSafeToExecute.h
r218084 r220118 216 216 case GetExecutable: 217 217 case GetButterfly: 218 case GetButterflyWithoutCaging: 218 219 case CallDOMGetter: 219 220 case CallDOM: -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
r219981 r220118 7984 7984 7985 7985 m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR); 7986 7987 // FIXME: Implement caging! 7988 // https://bugs.webkit.org/show_bug.cgi?id=174918 7986 7989 7987 7990 storageResult(resultGPR, node); -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
r218729 r220118 4471 4471 4472 4472 case GetButterfly: 4473 case GetButterflyWithoutCaging: 4473 4474 compileGetButterfly(node); 4474 4475 break; -
trunk/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
r218729 r220118 4657 4657 4658 4658 case GetButterfly: 4659 case GetButterflyWithoutCaging: 4659 4660 compileGetButterfly(node); 4660 4661 break; -
trunk/Source/JavaScriptCore/dfg/DFGTypeCheckHoistingPhase.cpp
r211237 r220118 249 249 case NukeStructureAndSetButterfly: 250 250 case GetButterfly: 251 case GetButterflyWithoutCaging: 251 252 case GetByVal: 252 253 case PutByValDirect: … … 325 326 case ReallocatePropertyStorage: 326 327 case GetButterfly: 328 case GetButterflyWithoutCaging: 327 329 case GetByVal: 328 330 case PutByValDirect: -
trunk/Source/JavaScriptCore/ftl/FTLCapabilities.cpp
r218084 r220118 70 70 case PutStructure: 71 71 case GetButterfly: 72 case GetButterflyWithoutCaging: 72 73 case NewObject: 73 74 case NewArray: -
trunk/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
r219981 r220118 88 88 #include <unordered_set> 89 89 #include <wtf/Box.h> 90 #include <wtf/Gigacage.h> 90 91 91 92 namespace JSC { namespace FTL { … … 665 666 break; 666 667 case GetButterfly: 668 case GetButterflyWithoutCaging: 667 669 compileGetButterfly(); 668 670 break; … … 3232 3234 void compileGetButterfly() 3233 3235 { 3234 setStorage(m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly)); 3236 LValue butterfly = m_out.loadPtr(lowCell(m_node->child1()), m_heaps.JSObject_butterfly); 3237 if (m_node->op() != GetButterflyWithoutCaging) 3238 butterfly = caged(butterfly); 3239 setStorage(butterfly); 3235 3240 } 3236 3241 … … 3268 3273 3269 3274 DFG_ASSERT(m_graph, m_node, isTypedView(m_node->arrayMode().typedArrayType())); 3270 setStorage( m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector));3275 setStorage(caged(m_out.loadPtr(cell, m_heaps.JSArrayBufferView_vector))); 3271 3276 } 3272 3277 … … 3510 3515 m_out.load32NonNegative(base, m_heaps.DirectArguments_length))); 3511 3516 3517 // FIXME: I guess we need to cage DirectArguments? 3518 // https://bugs.webkit.org/show_bug.cgi?id=174920 3512 3519 TypedPointer address = m_out.baseIndex( 3513 3520 m_heaps.DirectArguments_storage, base, m_out.zeroExtPtr(index)); … … 3541 3548 LValue arguments = m_out.loadPtr(table, m_heaps.ScopedArgumentsTable_arguments); 3542 3549 3550 // FIXME: I guess we need to cage ScopedArguments? 3551 // https://bugs.webkit.org/show_bug.cgi?id=174921 3543 3552 TypedPointer address = m_out.baseIndex( 3544 3553 m_heaps.scopedArgumentsTableArguments, arguments, m_out.zeroExtPtr(index)); … … 3549 3558 m_out.equal(scopeOffset, m_out.constInt32(ScopeOffset::invalidOffset))); 3550 3559 3560 // FIXME: I guess we need to cage JSEnvironmentRecord? 3561 // https://bugs.webkit.org/show_bug.cgi?id=174922 3551 3562 address = m_out.baseIndex( 3552 3563 m_heaps.JSEnvironmentRecord_variables, scope, m_out.zeroExtPtr(scopeOffset)); … … 3556 3567 m_out.appendTo(overflowCase, continuation); 3557 3568 3569 // FIXME: I guess we need to cage overflow storage? 3570 // https://bugs.webkit.org/show_bug.cgi?id=174923 3558 3571 address = m_out.baseIndex( 3559 3572 m_heaps.ScopedArguments_overflowStorage, base, … … 5379 5392 m_out.appendTo(is8Bit, is16Bit); 5380 5393 5394 // FIXME: Need to cage strings! 5395 // https://bugs.webkit.org/show_bug.cgi?id=174924 5381 5396 ValueFromBlock char8Bit = m_out.anchor( 5382 5397 m_out.load8ZeroExt32(m_out.baseIndex( … … 5480 5495 LBasicBlock lastNext = m_out.appendTo(is8Bit, is16Bit); 5481 5496 5497 // FIXME: need to cage strings! 5498 // https://bugs.webkit.org/show_bug.cgi?id=174924 5482 5499 ValueFromBlock char8Bit = m_out.anchor( 5483 5500 m_out.load8ZeroExt32(m_out.baseIndex( … … 8076 8093 LValue unmaskedIndex = m_out.phi(Int32, indexStart); 8077 8094 LValue index = m_out.bitAnd(mask, unmaskedIndex); 8095 // FIXME: I think these buffers are caged? 8096 // https://bugs.webkit.org/show_bug.cgi?id=174925 8078 8097 LValue hashMapBucket = m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), buffer, m_out.zeroExt(index, Int64), ScaleEight)); 8079 8098 ValueFromBlock bucketResult = m_out.anchor(hashMapBucket); … … 8851 8870 int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); 8852 8871 ValueFromBlock outOfLineResult = m_out.anchor( 8853 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), storage, realIndex, ScaleEight, offsetOfFirstProperty)));8872 m_out.load64(m_out.baseIndex(m_heaps.properties.atAnyNumber(), caged(storage), realIndex, ScaleEight, offsetOfFirstProperty))); 8854 8873 m_out.jump(continuation); 8855 8874 … … 10269 10288 m_out.appendTo(loopBody, slowPath); 10270 10289 10290 // FIXME: Strings needs to be caged. 10291 // https://bugs.webkit.org/show_bug.cgi?id=174924 10271 10292 LValue byte = m_out.load8ZeroExt32(m_out.baseIndex(m_heaps.characters8, buffer, m_out.zeroExtPtr(index))); 10272 10293 LValue isInvalidAsciiRange = m_out.bitAnd(byte, m_out.constInt32(~0x7F)); … … 11592 11613 m_out.appendTo(performStore, lastNext); 11593 11614 } 11615 } 11616 11617 LValue caged(LValue ptr) 11618 { 11619 if (vm().gigacageEnabled().isStillValid()) { 11620 m_graph.watchpoints().addLazily(vm().gigacageEnabled()); 11621 11622 LValue basePtr = m_out.constIntPtr(g_gigacageBasePtr); 11623 LValue mask = m_out.constIntPtr(GIGACAGE_MASK); 11624 11625 // We don't have to worry about B3 messing up the bitAnd. Also, we want to get B3's excellent 11626 // codegen for 2-operand andq on x86-64. 11627 LValue masked = m_out.bitAnd(ptr, mask); 11628 11629 // But B3 will currently mess up the code generation of this add. Basically, any offset from what we 11630 // compute here will get reassociated and folded with g_gigacageBasePtr. There's a world in which 11631 // moveConstants() observes that it needs to reassociate in order to hoist the big constants. But 11632 // it's much easier to just block B3's badness here. That's what we do for now. 11633 PatchpointValue* patchpoint = m_out.patchpoint(pointerType()); 11634 patchpoint->appendSomeRegister(basePtr); 11635 patchpoint->appendSomeRegister(masked); 11636 patchpoint->setGenerator( 11637 [] (CCallHelpers& jit, const StackmapGenerationParams& params) { 11638 jit.addPtr(params[1].gpr(), params[2].gpr(), params[0].gpr()); 11639 }); 11640 patchpoint->effects = Effects::none(); 11641 return patchpoint; 11642 } 11643 11644 return ptr; 11594 11645 } 11595 11646 -
trunk/Source/JavaScriptCore/heap/Heap.cpp
r220069 r220118 269 269 , m_sizeBeforeLastEdenCollect(0) 270 270 , m_bytesAllocatedThisCycle(0) 271 , m_webAssemblyFastMemoriesAllocatedThisCycle(0)272 271 , m_bytesAbandonedSinceLastFullCollect(0) 273 272 , m_maxEdenSize(m_minBytesPerCycle) … … 437 436 sweepAllLogicallyEmptyWeakBlocks(); 438 437 438 m_objectSpace.freeMemory(); 439 439 440 if (Options::logGC()) 440 441 dataLog((MonotonicTime::now() - before).milliseconds(), "ms]\n"); … … 485 486 m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); 486 487 reportExtraMemoryAllocatedSlowCase(size); 487 }488 489 void Heap::reportWebAssemblyFastMemoriesAllocated(size_t count)490 {491 didAllocateWebAssemblyFastMemories(count);492 collectIfNecessaryOrDefer();493 }494 495 bool Heap::webAssemblyFastMemoriesThisCycleAtThreshold() const496 {497 // WebAssembly fast memories use large amounts of virtual memory and we498 // don't know how many can exist in this process. We keep track of the most499 // fast memories that have existed at any point in time. The GC uses this500 // top watermark as an indication of whether recent allocations should cause501 // a collection: get too close and we may be close to the actual limit.502 size_t fastMemoryThreshold = std::max<size_t>(1, Wasm::Memory::maxFastMemoryCount() / 2);503 return m_webAssemblyFastMemoriesAllocatedThisCycle > fastMemoryThreshold;504 488 } 505 489 … … 1998 1982 1999 1983 { 2000 SweepingScope helpingGCScope(*this);1984 SweepingScope sweepingScope(*this); 2001 1985 deleteUnmarkedCompiledCode(); 2002 1986 deleteSourceProviderCaches(); 2003 sweep LargeAllocations();1987 sweepInFinalize(); 2004 1988 } 2005 1989 … … 2052 2036 } 2053 2037 2054 void Heap::sweep LargeAllocations()2038 void Heap::sweepInFinalize() 2055 2039 { 2056 2040 m_objectSpace.sweepLargeAllocations(); 2041 2042 auto sweepBlock = [&] (MarkedBlock::Handle* handle) { 2043 handle->sweep(nullptr); 2044 }; 2045 2046 vm()->eagerlySweptDestructibleObjectSpace.forEachMarkedBlock(sweepBlock); 2057 2047 } 2058 2048 … … 2161 2151 dataLog("\n"); 2162 2152 dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n"); 2163 dataLog("webAssemblyFastMemoriesAllocatedThisCycle = ", m_webAssemblyFastMemoriesAllocatedThisCycle, "\n");2164 2153 } 2165 2154 … … 2244 2233 dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n"); 2245 2234 m_bytesAllocatedThisCycle = 0; 2246 m_webAssemblyFastMemoriesAllocatedThisCycle = 0;2247 2235 2248 2236 if (Options::logGC()) … … 2318 2306 } 2319 2307 2320 void Heap::didAllocateWebAssemblyFastMemories(size_t count)2321 {2322 m_webAssemblyFastMemoriesAllocatedThisCycle += count;2323 }2324 2325 2308 bool Heap::isValidAllocation(size_t) 2326 2309 { … … 2375 2358 2376 2359 if (!m_currentRequest.scope) 2377 return m_shouldDoFullCollection || webAssemblyFastMemoriesThisCycleAtThreshold() ||overCriticalMemoryThreshold();2360 return m_shouldDoFullCollection || overCriticalMemoryThreshold(); 2378 2361 return *m_currentRequest.scope == CollectionScope::Full; 2379 2362 } … … 2533 2516 #endif 2534 2517 2535 if (!webAssemblyFastMemoriesThisCycleAtThreshold() 2536 && m_bytesAllocatedThisCycle <= bytesAllowedThisCycle) 2518 if (m_bytesAllocatedThisCycle <= bytesAllowedThisCycle) 2537 2519 return; 2538 2520 } -
trunk/Source/JavaScriptCore/heap/Heap.h
r218794 r220118 205 205 JS_EXPORT_PRIVATE void reportExtraMemoryVisited(size_t); 206 206 207 // Same as above, but for uncommitted virtual memory allocations caused by208 // WebAssembly fast memories. This is counted separately because virtual209 // memory is logically a different type of resource than committed physical210 // memory. We can often allocate huge amounts of virtual memory (think211 // gigabytes) without adversely affecting regular GC'd memory. At some point212 // though, too much virtual memory becomes prohibitive and we want to213 // collect GC-able objects which keep this virtual memory alive.214 // This is counted in number of fast memories, not bytes.215 void reportWebAssemblyFastMemoriesAllocated(size_t);216 bool webAssemblyFastMemoriesThisCycleAtThreshold() const;217 218 207 #if ENABLE(RESOURCE_USAGE) 219 208 // Use this API to report the subset of extra memory that lives outside this process. … … 265 254 266 255 void didAllocate(size_t); 267 void didAllocateWebAssemblyFastMemories(size_t);268 256 bool isPagedOut(double deadline); 269 257 … … 502 490 void removeDeadHeapSnapshotNodes(HeapProfiler&); 503 491 void finalize(); 504 void sweep LargeAllocations();492 void sweepInFinalize(); 505 493 506 494 void sweepAllLogicallyEmptyWeakBlocks(); … … 549 537 550 538 size_t m_bytesAllocatedThisCycle; 551 size_t m_webAssemblyFastMemoriesAllocatedThisCycle;552 539 size_t m_bytesAbandonedSinceLastFullCollect; 553 540 size_t m_maxEdenSize; -
trunk/Source/JavaScriptCore/heap/LargeAllocation.cpp
r210844 r220118 35 35 LargeAllocation* LargeAllocation::tryCreate(Heap& heap, size_t size, Subspace* subspace) 36 36 { 37 void* space = tryFastAlignedMalloc(alignment, headerSize() + size);37 void* space = subspace->tryAllocateAlignedMemory(alignment, headerSize() + size); 38 38 if (!space) 39 39 return nullptr; … … 107 107 void LargeAllocation::destroy() 108 108 { 109 Subspace* subspace = m_subspace; 109 110 this->~LargeAllocation(); 110 fastAlignedFree(this);111 subspace->freeAlignedMemory(this); 111 112 } 112 113 -
trunk/Source/JavaScriptCore/heap/MarkedAllocator.cpp
r219897 r220118 104 104 105 105 if (Options::stealEmptyBlocksFromOtherAllocators()) { 106 if (MarkedBlock::Handle* block = markedSpace().findEmptyBlockToSteal()) { 106 if (MarkedBlock::Handle* block = m_subspace->findEmptyBlockToSteal()) { 107 RELEASE_ASSERT(block->subspace()->canTradeBlocksWith(m_subspace)); 108 RELEASE_ASSERT(m_subspace->canTradeBlocksWith(block->subspace())); 109 107 110 block->sweep(nullptr); 108 111 … … 241 244 SuperSamplerScope superSamplerScope(false); 242 245 243 MarkedBlock::Handle* handle = MarkedBlock::tryCreate(*m_heap );246 MarkedBlock::Handle* handle = MarkedBlock::tryCreate(*m_heap, subspace()); 244 247 if (!handle) 245 248 return nullptr; -
trunk/Source/JavaScriptCore/heap/MarkedBlock.cpp
r219897 r220118 44 44 static size_t balance; 45 45 46 MarkedBlock::Handle* MarkedBlock::tryCreate(Heap& heap )46 MarkedBlock::Handle* MarkedBlock::tryCreate(Heap& heap, Subspace* subspace) 47 47 { 48 48 if (computeBalance) { … … 51 51 dataLog("MarkedBlock Balance: ", balance, "\n"); 52 52 } 53 void* blockSpace = tryFastAlignedMalloc(blockSize, blockSize);53 void* blockSpace = subspace->tryAllocateAlignedMemory(blockSize, blockSize); 54 54 if (!blockSpace) 55 55 return nullptr; 56 56 if (scribbleFreeCells()) 57 57 scribble(blockSpace, blockSize); 58 return new Handle(heap, blockSpace); 59 } 60 61 MarkedBlock::Handle::Handle(Heap& heap, void* blockSpace) 62 : m_weakSet(heap.vm(), CellContainer()) 58 return new Handle(heap, subspace, blockSpace); 59 } 60 61 MarkedBlock::Handle::Handle(Heap& heap, Subspace* subspace, void* blockSpace) 62 : m_subspace(subspace) 63 , m_weakSet(heap.vm(), CellContainer()) 63 64 , m_newlyAllocatedVersion(MarkedSpace::nullVersion) 64 65 { … … 73 74 { 74 75 Heap& heap = *this->heap(); 76 Subspace* subspace = this->subspace(); 75 77 if (computeBalance) { 76 78 balance--; … … 80 82 removeFromAllocator(); 81 83 m_block->~MarkedBlock(); 82 fastAlignedFree(m_block);84 subspace->freeAlignedMemory(m_block); 83 85 heap.didFreeBlock(blockSize); 84 86 } … … 333 335 m_allocator = allocator; 334 336 337 RELEASE_ASSERT(m_subspace->canTradeBlocksWith(allocator->subspace())); 338 RELEASE_ASSERT(allocator->subspace()->canTradeBlocksWith(m_subspace)); 339 340 m_subspace = allocator->subspace(); 341 335 342 size_t cellSize = allocator->cellSize(); 336 343 m_atomsPerCell = (cellSize + atomSize - 1) / atomSize; … … 389 396 out.print(comma, name, ":", bitvector[index()] ? "YES" : "no"); 390 397 }); 391 }392 393 Subspace* MarkedBlock::Handle::subspace() const394 {395 return allocator()->subspace();396 398 } 397 399 -
trunk/Source/JavaScriptCore/heap/MarkedBlock.h
r218794 r220118 200 200 201 201 private: 202 Handle(Heap&, void*);202 Handle(Heap&, Subspace*, void*); 203 203 204 204 enum SweepDestructionMode { BlockHasNoDestructors, BlockHasDestructors, BlockHasDestructorsAndCollectorIsRunning }; … … 219 219 void setIsFreeListed(); 220 220 221 MarkedBlock::Handle* m_prev ;222 MarkedBlock::Handle* m_next ;221 MarkedBlock::Handle* m_prev { nullptr }; 222 MarkedBlock::Handle* m_next { nullptr }; 223 223 224 224 size_t m_atomsPerCell { std::numeric_limits<size_t>::max() }; … … 229 229 AllocatorAttributes m_attributes; 230 230 bool m_isFreeListed { false }; 231 231 232 Subspace* m_subspace { nullptr }; 232 233 MarkedAllocator* m_allocator { nullptr }; 233 234 size_t m_index { std::numeric_limits<size_t>::max() }; … … 239 240 }; 240 241 241 static MarkedBlock::Handle* tryCreate(Heap& );242 static MarkedBlock::Handle* tryCreate(Heap&, Subspace*); 242 243 243 244 Handle& handle(); … … 396 397 } 397 398 399 inline Subspace* MarkedBlock::Handle::subspace() const 400 { 401 return m_subspace; 402 } 403 398 404 inline Heap* MarkedBlock::Handle::heap() const 399 405 { -
trunk/Source/JavaScriptCore/heap/MarkedSpace.cpp
r219702 r220118 204 204 MarkedSpace::~MarkedSpace() 205 205 { 206 ASSERT(!m_blocks.set().size()); 207 } 208 209 void MarkedSpace::freeMemory() 210 { 206 211 forEachBlock( 207 212 [&] (MarkedBlock::Handle* block) { … … 210 215 for (LargeAllocation* allocation : m_largeAllocations) 211 216 allocation->destroy(); 212 ASSERT(!m_blocks.set().size());213 217 } 214 218 … … 255 259 void MarkedSpace::prepareForAllocation() 256 260 { 257 forEachAllocator( 258 [&] (MarkedAllocator& allocator) -> IterationStatus { 259 allocator.prepareForAllocation(); 260 return IterationStatus::Continue; 261 }); 261 for (Subspace* subspace : m_subspaces) 262 subspace->prepareForAllocation(); 262 263 263 264 m_activeWeakSets.takeFrom(m_newActiveWeakSets); … … 268 269 m_largeAllocationsNurseryOffsetForSweep = 0; 269 270 m_largeAllocationsNurseryOffset = m_largeAllocations.size(); 270 271 m_allocatorForEmptyAllocation = m_firstAllocator;272 271 } 273 272 … … 513 512 m_newActiveWeakSets.append(&block->weakSet()); 514 513 } 515 }516 517 MarkedBlock::Handle* MarkedSpace::findEmptyBlockToSteal()518 {519 for (; m_allocatorForEmptyAllocation; m_allocatorForEmptyAllocation = m_allocatorForEmptyAllocation->nextAllocator()) {520 if (MarkedBlock::Handle* block = m_allocatorForEmptyAllocation->findEmptyBlockToSteal())521 return block;522 }523 return nullptr;524 514 } 525 515 … … 573 563 m_firstAllocator = allocator; 574 564 m_lastAllocator = allocator; 575 m_allocatorForEmptyAllocation = allocator; 565 for (Subspace* subspace : m_subspaces) 566 subspace->didCreateFirstAllocator(allocator); 576 567 } else { 577 568 m_lastAllocator->setNextAllocator(allocator); -
trunk/Source/JavaScriptCore/heap/MarkedSpace.h
r213883 r220118 94 94 95 95 void lastChanceToFinalize(); // You must call stopAllocating before you call this. 96 void freeMemory(); 96 97 97 98 static size_t optimalSizeFor(size_t); … … 156 157 157 158 MarkedAllocator* firstAllocator() const { return m_firstAllocator; } 158 MarkedAllocator* allocatorForEmptyAllocation() const { return m_allocatorForEmptyAllocation; }159 160 MarkedBlock::Handle* findEmptyBlockToSteal();161 159 162 160 Lock& allocatorLock() { return m_allocatorLock; } … … 216 214 MarkedAllocator* m_firstAllocator { nullptr }; 217 215 MarkedAllocator* m_lastAllocator { nullptr }; 218 MarkedAllocator* m_allocatorForEmptyAllocation { nullptr };219 216 220 217 friend class HeapVerifier; -
trunk/Source/JavaScriptCore/heap/Subspace.cpp
r217711 r220118 59 59 , m_name(name) 60 60 , m_attributes(attributes) 61 , m_allocatorForEmptyAllocation(m_space.firstAllocator()) 61 62 { 62 63 // It's remotely possible that we're GCing right now even if the client is careful to only … … 88 89 } 89 90 91 bool Subspace::canTradeBlocksWith(Subspace*) 92 { 93 return true; 94 } 95 96 void* Subspace::tryAllocateAlignedMemory(size_t alignment, size_t size) 97 { 98 void* result = tryFastAlignedMalloc(alignment, size); 99 return result; 100 } 101 102 void Subspace::freeAlignedMemory(void* basePtr) 103 { 104 fastAlignedFree(basePtr); 105 WTF::compilerFence(); 106 } 107 90 108 // The reason why we distinguish between allocate and tryAllocate is to minimize the number of 91 109 // checks on the allocation path in both cases. Likewise, the reason why we have overloads with and … … 134 152 didAllocate(result); 135 153 return result; 154 } 155 156 void Subspace::prepareForAllocation() 157 { 158 forEachAllocator( 159 [&] (MarkedAllocator& allocator) { 160 allocator.prepareForAllocation(); 161 }); 162 163 m_allocatorForEmptyAllocation = m_space.firstAllocator(); 164 } 165 166 MarkedBlock::Handle* Subspace::findEmptyBlockToSteal() 167 { 168 for (; m_allocatorForEmptyAllocation; m_allocatorForEmptyAllocation = m_allocatorForEmptyAllocation->nextAllocator()) { 169 Subspace* otherSubspace = m_allocatorForEmptyAllocation->subspace(); 170 if (!canTradeBlocksWith(otherSubspace)) 171 continue; 172 if (!otherSubspace->canTradeBlocksWith(this)) 173 continue; 174 175 if (MarkedBlock::Handle* block = m_allocatorForEmptyAllocation->findEmptyBlockToSteal()) 176 return block; 177 } 178 return nullptr; 136 179 } 137 180 -
trunk/Source/JavaScriptCore/heap/Subspace.h
r217711 r220118 60 60 virtual void destroy(VM&, JSCell*); 61 61 62 virtual bool canTradeBlocksWith(Subspace* other); 63 virtual void* tryAllocateAlignedMemory(size_t alignment, size_t size); 64 virtual void freeAlignedMemory(void*); 65 62 66 MarkedAllocator* tryAllocatorFor(size_t); 63 67 MarkedAllocator* allocatorFor(size_t); … … 68 72 JS_EXPORT_PRIVATE void* tryAllocate(size_t); 69 73 JS_EXPORT_PRIVATE void* tryAllocate(GCDeferralContext*, size_t); 74 75 void prepareForAllocation(); 76 77 void didCreateFirstAllocator(MarkedAllocator* allocator) { m_allocatorForEmptyAllocation = allocator; } 78 79 // Finds an empty block from any Subspace that agrees to trade blocks with us. 80 MarkedBlock::Handle* findEmptyBlockToSteal(); 81 82 template<typename Func> 83 void forEachAllocator(const Func&); 70 84 71 85 template<typename Func> … … 104 118 std::array<MarkedAllocator*, MarkedSpace::numSizeClasses> m_allocatorForSizeStep; 105 119 MarkedAllocator* m_firstAllocator { nullptr }; 120 MarkedAllocator* m_allocatorForEmptyAllocation { nullptr }; // Uses the MarkedSpace linked list of blocks. 106 121 SentinelLinkedList<LargeAllocation, BasicRawSentinelNode<LargeAllocation>> m_largeAllocations; 107 122 }; -
trunk/Source/JavaScriptCore/heap/SubspaceInlines.h
r217711 r220118 35 35 36 36 template<typename Func> 37 void Subspace::forEachAllocator(const Func& func) 38 { 39 for (MarkedAllocator* allocator = m_firstAllocator; allocator; allocator = allocator->nextAllocatorInSubspace()) 40 func(*allocator); 41 } 42 43 template<typename Func> 37 44 void Subspace::forEachMarkedBlock(const Func& func) 38 45 { 39 for (MarkedAllocator* allocator = m_firstAllocator; allocator; allocator = allocator->nextAllocatorInSubspace()) 40 allocator->forEachBlock(func); 46 forEachAllocator( 47 [&] (MarkedAllocator& allocator) { 48 allocator.forEachBlock(func); 49 }); 41 50 } 42 51 … … 44 53 void Subspace::forEachNotEmptyMarkedBlock(const Func& func) 45 54 { 46 for (MarkedAllocator* allocator = m_firstAllocator; allocator; allocator = allocator->nextAllocatorInSubspace()) 47 allocator->forEachNotEmptyBlock(func); 55 forEachAllocator( 56 [&] (MarkedAllocator& allocator) { 57 allocator.forEachNotEmptyBlock(func); 58 }); 48 59 } 49 60 -
trunk/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
r218412 r220118 173 173 174 174 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape)); 175 // FIXME: Should do caging. 176 // https://bugs.webkit.org/show_bug.cgi?id=175037 175 177 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); 176 178 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()))); … … 186 188 187 189 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape)); 190 // FIXME: Should do caging. 191 // https://bugs.webkit.org/show_bug.cgi?id=175037 188 192 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); 189 193 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()))); … … 201 205 badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)); 202 206 207 // FIXME: Should do caging. 208 // https://bugs.webkit.org/show_bug.cgi?id=175037 203 209 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); 204 210 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset()))); … … 348 354 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape)); 349 355 356 // FIXME: Should do caging. 357 // https://bugs.webkit.org/show_bug.cgi?id=175037 350 358 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); 351 359 Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())); … … 403 411 404 412 badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape)); 413 // FIXME: Should do caging. 414 // https://bugs.webkit.org/show_bug.cgi?id=175037 405 415 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2); 406 416 slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset()))); … … 914 924 isOutOfLine.link(this); 915 925 } 926 // FIXME: Should do caging. 927 // https://bugs.webkit.org/show_bug.cgi?id=175037 916 928 loadPtr(Address(base, JSObject::butterflyOffset()), scratch); 917 929 neg32(offset); … … 1055 1067 emitGetVirtualRegister(value, regT2); 1056 1068 1069 // FIXME: Should do caging. 1070 // https://bugs.webkit.org/show_bug.cgi?id=175037 1057 1071 loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); 1058 1072 loadPtr(operandSlot, regT1); … … 1576 1590 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type))); 1577 1591 slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength()))); 1592 // FIXME: Should do caging. 1593 // https://bugs.webkit.org/show_bug.cgi?id=175037 1578 1594 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch); 1579 1595 … … 1647 1663 badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type))); 1648 1664 slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength()))); 1665 // FIXME: Should do caging. 1666 // https://bugs.webkit.org/show_bug.cgi?id=175037 1649 1667 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch); 1650 1668 … … 1714 1732 // We would be loading this into base as in get_by_val, except that the slow 1715 1733 // path expects the base to be unclobbered. 1734 // FIXME: Should do caging. 1735 // https://bugs.webkit.org/show_bug.cgi?id=175037 1716 1736 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch); 1717 1737 … … 1797 1817 // We would be loading this into base as in get_by_val, except that the slow 1798 1818 // path expects the base to be unclobbered. 1819 // FIXME: Should do caging. 1820 // https://bugs.webkit.org/show_bug.cgi?id=175037 1799 1821 loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch); 1800 1822 -
trunk/Source/JavaScriptCore/jsc.cpp
r219981 r220118 79 79 #include "TypeProfiler.h" 80 80 #include "TypeProfilerLog.h" 81 #include "TypedArrayInlines.h" 81 82 #include "WasmContext.h" 82 83 #include "WasmFaultSignalHandler.h" … … 985 986 986 987 static bool fillBufferWithContentsOfFile(const String& fileName, Vector<char>& buffer); 988 static RefPtr<Uint8Array> fillBufferWithContentsOfFile(const String& fileName); 987 989 988 990 class CommandLine; … … 1709 1711 } 1710 1712 1713 static RefPtr<Uint8Array> fillBufferWithContentsOfFile(FILE* file) 1714 { 1715 fseek(file, 0, SEEK_END); 1716 size_t bufferCapacity = ftell(file); 1717 fseek(file, 0, SEEK_SET); 1718 RefPtr<Uint8Array> result = Uint8Array::create(bufferCapacity); 1719 size_t readSize = fread(result->data(), 1, bufferCapacity, file); 1720 if (readSize != bufferCapacity) 1721 return nullptr; 1722 return result; 1723 } 1724 1725 static RefPtr<Uint8Array> fillBufferWithContentsOfFile(const String& fileName) 1726 { 1727 FILE* f = fopen(fileName.utf8().data(), "rb"); 1728 if (!f) { 1729 fprintf(stderr, "Could not open file: %s\n", fileName.utf8().data()); 1730 return nullptr; 1731 } 1732 1733 RefPtr<Uint8Array> result = fillBufferWithContentsOfFile(f); 1734 fclose(f); 1735 1736 return result; 1737 } 1738 1711 1739 static bool fillBufferWithContentsOfFile(FILE* file, Vector<char>& buffer) 1712 1740 { … … 2277 2305 } 2278 2306 2279 Vector<char> content;2280 if (! fillBufferWithContentsOfFile(fileName, content))2307 RefPtr<Uint8Array> content = fillBufferWithContentsOfFile(fileName); 2308 if (!content) 2281 2309 return throwVMError(exec, scope, "Could not open file."); 2282 2310 2283 2311 if (!isBinary) 2284 return JSValue::encode(jsString(exec, stringFromUTF(content)));2312 return JSValue::encode(jsString(exec, String::fromUTF8WithLatin1Fallback(content->data(), content->length()))); 2285 2313 2286 2314 Structure* structure = exec->lexicalGlobalObject()->typedArrayStructure(TypeUint8); 2287 auto length = content.size(); 2288 JSObject* result = createUint8TypedArray(exec, structure, ArrayBuffer::createFromBytes(content.releaseBuffer().leakPtr(), length, [] (void* p) { fastFree(p); }), 0, length); 2315 JSObject* result = JSUint8Array::create(vm, structure, WTFMove(content)); 2289 2316 RETURN_IF_EXCEPTION(scope, encodedJSValue()); 2290 2317 … … 3776 3803 } 3777 3804 3805 static void gigacageDisabled(void*) 3806 { 3807 dataLog("Gigacage disabled! Aborting.\n"); 3808 UNREACHABLE_FOR_PLATFORM(); 3809 } 3810 3778 3811 int jscmain(int argc, char** argv) 3779 3812 { … … 3794 3827 JSC::Wasm::enableFastMemory(); 3795 3828 #endif 3829 if (GIGACAGE_ENABLED) 3830 Gigacage::addDisableCallback(gigacageDisabled, nullptr); 3796 3831 3797 3832 int result; -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
r220047 r220118 1199 1199 macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) 1200 1200 bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline 1201 # FIXME: Should do caging 1202 # https://bugs.webkit.org/show_bug.cgi?id=175036 1201 1203 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage 1202 1204 negi propertyOffsetAsInt … … 1212 1214 macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) 1213 1215 bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline 1216 # FIXME: Should do caging 1217 # https://bugs.webkit.org/show_bug.cgi?id=175036 1214 1218 loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage 1215 1219 negi propertyOffsetAsInt … … 1288 1292 btiz t2, IndexingShapeMask, .opGetArrayLengthSlow 1289 1293 loadisFromInstruction(1, t1) 1294 # FIXME: Should do caging 1295 # https://bugs.webkit.org/show_bug.cgi?id=175036 1290 1296 loadp JSObject::m_butterfly[t3], t0 1291 1297 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 … … 1471 1477 loadConstantOrVariableInt32(t3, t1, .opGetByValSlow) 1472 1478 sxi2q t1, t1 1479 # FIXME: Should do caging 1480 # https://bugs.webkit.org/show_bug.cgi?id=175036 1473 1481 loadp JSObject::m_butterfly[t0], t3 1474 1482 andi IndexingShapeMask, t2 … … 1518 1526 1519 1527 # Sweet, now we know that we have a typed array. Do some basic things now. 1528 # FIXME: Should do caging 1529 # https://bugs.webkit.org/show_bug.cgi?id=175036 1520 1530 loadp JSArrayBufferView::m_vector[t0], t3 1521 1531 biaeq t1, JSArrayBufferView::m_length[t0], .opGetByValSlow … … 1609 1619 loadConstantOrVariableInt32(t0, t3, .opPutByValSlow) 1610 1620 sxi2q t3, t3 1621 # FIXME: Should do caging 1622 # https://bugs.webkit.org/show_bug.cgi?id=175036 1611 1623 loadp JSObject::m_butterfly[t1], t0 1612 1624 andi IndexingShapeMask, t2 -
trunk/Source/JavaScriptCore/runtime/ArrayBuffer.cpp
r217052 r220118 1 1 /* 2 * Copyright (C) 2009 , 2013, 2016Apple Inc. All rights reserved.2 * Copyright (C) 2009-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 30 30 #include "JSArrayBufferView.h" 31 31 #include "JSCInlines.h" 32 #include <wtf/Gigacage.h> 32 33 33 34 namespace JSC { … … 103 104 } 104 105 } 105 bool allocationSucceeded = false; 106 size_t size = static_cast<size_t>(numElements) * static_cast<size_t>(elementByteSize); 107 if (!size) 108 size = 1; // Make sure malloc actually allocates something, but not too much. We use null to mean that the buffer is neutered. 109 m_data = Gigacage::tryMalloc(size); 110 if (!m_data) { 111 reset(); 112 return; 113 } 114 106 115 if (policy == ZeroInitialize) 107 allocationSucceeded = WTF::tryFastCalloc(numElements, elementByteSize).getValue(m_data); 108 else { 109 ASSERT(policy == DontInitialize); 110 allocationSucceeded = WTF::tryFastMalloc(numElements * elementByteSize).getValue(m_data); 111 } 112 113 if (allocationSucceeded) { 114 m_sizeInBytes = numElements * elementByteSize; 115 m_destructor = [] (void* p) { fastFree(p); }; 116 return; 117 } 118 reset(); 116 memset(m_data, 0, size); 117 118 m_sizeInBytes = numElements * elementByteSize; 119 m_destructor = [] (void* p) { Gigacage::free(p); }; 119 120 } 120 121 … … 181 182 } 182 183 184 // FIXME: We cannot use this except if the memory comes from the cage. 185 // Current this is only used from: 186 // - JSGenericTypedArrayView<>::slowDownAndWasteMemory. But in that case, the memory should have already come 187 // from the cage. 183 188 Ref<ArrayBuffer> ArrayBuffer::createAdopted(const void* data, unsigned byteLength) 184 189 { 185 return createFromBytes(data, byteLength, [] (void* p) { fastFree(p); }); 186 } 187 190 return createFromBytes(data, byteLength, [] (void* p) { Gigacage::free(p); }); 191 } 192 193 // FIXME: We cannot use this except if the memory comes from the cage. 194 // Currently this is only used from: 195 // - The C API. We could support that by either having the system switch to a mode where typed arrays are no 196 // longer caged, or we could introduce a new set of typed array types that are uncaged and get accessed 197 // differently. 198 // - WebAssembly. Wasm should allocate from the cage. 188 199 Ref<ArrayBuffer> ArrayBuffer::createFromBytes(const void* data, unsigned byteLength, ArrayBufferDestructorFunction&& destructor) 189 200 { 201 if (!Gigacage::isCaged(data) && data && byteLength) 202 Gigacage::disableGigacage(); 203 190 204 ArrayBufferContents contents(const_cast<void*>(data), byteLength, WTFMove(destructor)); 191 205 return create(WTFMove(contents)); … … 205 219 { 206 220 ArrayBufferContents contents; 207 contents.tryAllocate(byteLength, 1, ArrayBufferContents:: ZeroInitialize);221 contents.tryAllocate(byteLength, 1, ArrayBufferContents::DontInitialize); 208 222 if (!contents.m_data) 209 223 return nullptr; -
trunk/Source/JavaScriptCore/runtime/IndexingHeader.h
r206525 r220118 123 123 union { 124 124 struct { 125 // FIXME: vectorLength should be least significant, so that it's really hard to craft a pointer by 126 // mucking with the butterfly. 127 // https://bugs.webkit.org/show_bug.cgi?id=174927 125 128 uint32_t publicLength; // The meaning of this field depends on the array type, but for all JSArrays we rely on this being the publicly visible length (array.length). 126 129 uint32_t vectorLength; // The length of the indexed property storage. The actual size of the storage depends on this, and the type. -
trunk/Source/JavaScriptCore/runtime/InitializeThreading.cpp
r220069 r220118 41 41 #include "StructureIDTable.h" 42 42 #include "SuperSampler.h" 43 #include "WasmMemory.h"44 43 #include "WasmThunks.h" 45 44 #include "WriteBarrier.h" … … 61 60 WTF::initializeThreading(); 62 61 Options::initialize(); 63 #if ENABLE(WEBASSEMBLY)64 Wasm::Memory::initializePreallocations();65 #endif66 62 #if ENABLE(WRITE_BARRIER_PROFILING) 67 63 WriteBarrierCounters::initialize(); -
trunk/Source/JavaScriptCore/runtime/JSArrayBuffer.cpp
r217108 r220118 30 30 #include "TypeError.h" 31 31 #include "TypedArrayController.h" 32 #include <wtf/Gigacage.h> 32 33 33 34 namespace JSC { -
trunk/Source/JavaScriptCore/runtime/JSArrayBufferView.cpp
r217108 r220118 31 31 #include "TypeError.h" 32 32 #include "TypedArrayController.h" 33 #include <wtf/Gigacage.h> 33 34 34 35 namespace JSC { … … 89 90 return; 90 91 91 if (mode == ZeroFill) { 92 if (!tryFastCalloc(length, elementSize).getValue(m_vector)) 93 return; 94 } else { 95 if (!tryFastMalloc(length * elementSize).getValue(m_vector)) 96 return; 97 } 92 size_t size = static_cast<size_t>(length) * static_cast<size_t>(elementSize); 93 m_vector = Gigacage::tryMalloc(size); 94 if (!m_vector) 95 return; 96 if (mode == ZeroFill) 97 memset(m_vector, 0, size); 98 98 99 99 vm.heap.reportExtraMemoryAllocated(static_cast<size_t>(length) * elementSize); … … 193 193 ASSERT(thisObject->m_mode == OversizeTypedArray || thisObject->m_mode == WastefulTypedArray); 194 194 if (thisObject->m_mode == OversizeTypedArray) 195 fastFree(thisObject->m_vector.get());195 Gigacage::free(thisObject->m_vector.get()); 196 196 } 197 197 -
trunk/Source/JavaScriptCore/runtime/JSLock.cpp
r220069 r220118 157 157 // Note: everything below must come after addCurrentThread(). 158 158 m_vm->traps().notifyGrabAllLocks(); 159 160 m_vm->fireGigacageEnabledIfNecessary(); 159 161 160 162 #if ENABLE(SAMPLING_PROFILER) -
trunk/Source/JavaScriptCore/runtime/JSObject.h
r219981 r220118 1046 1046 1047 1047 protected: 1048 // FIXME: This should do caging. 1049 // https://bugs.webkit.org/show_bug.cgi?id=175039 1048 1050 AuxiliaryBarrier<Butterfly*> m_butterfly; 1049 1051 #if USE(JSVALUE32_64) -
trunk/Source/JavaScriptCore/runtime/Options.cpp
r219055 r220118 406 406 Options::useWebAssembly() = false; 407 407 408 if (!Options::useWebAssembly()) { 409 Options::webAssemblyFastMemoryPreallocateCount() = 0; 408 if (!Options::useWebAssembly()) 410 409 Options::useWebAssemblyFastTLS() = false; 411 }412 410 413 411 if (Options::dumpDisassembly() -
trunk/Source/JavaScriptCore/runtime/Options.h
r219611 r220118 461 461 /* FIXME: enable fast memories on iOS and pre-allocate them. https://bugs.webkit.org/show_bug.cgi?id=170774 */ \ 462 462 v(bool, useWebAssemblyFastMemory, !isIOS(), Normal, "If true, we will try to use a 32-bit address space with a signal handler to bounds check wasm memory.") \ 463 v(bool, logWebAssemblyMemory, false, Normal, nullptr) \ 463 464 v(unsigned, webAssemblyFastMemoryRedzonePages, 128, Normal, "WebAssembly fast memories use 4GiB virtual allocations, plus a redzone (counted as multiple of 64KiB WebAssembly pages) at the end to catch reg+imm accesses which exceed 32-bit, anything beyond the redzone is explicitly bounds-checked") \ 464 465 v(bool, crashIfWebAssemblyCantFastMemory, false, Normal, "If true, we will crash if we can't obtain fast memory for wasm.") \ 465 v(unsigned, webAssemblyFastMemoryPreallocateCount, 0, Normal, "WebAssembly fast memories can be pre-allocated at program startup and remain cached to avoid fragmentation leading to bounds-checked memory. This number is an upper bound on initial allocation as well as total count of fast memories. Zero means no pre-allocation, no caching, and no limit to the number of runtime allocations.") \466 v(unsigned, maxNumWebAssemblyFastMemories, 10, Normal, nullptr) \ 466 467 v(bool, useWebAssemblyFastTLS, true, Normal, "If true, we will try to use fast thread-local storage if available on the current platform.") \ 467 468 v(bool, useFastTLSForWasmContext, true, Normal, "If true (and fast TLS is enabled), we will store context in fast TLS. If false, we will pin it to a register.") \ -
trunk/Source/JavaScriptCore/runtime/ScopedArgumentsTable.h
r206525 r220118 87 87 uint32_t m_length; 88 88 bool m_locked; // Being locked means that there are multiple references to this object and none of them expect to see the others' modifications. This means that modifications need to make a copy first. 89 // FIXME: Allocate this in the primitive gigacage 90 // https://bugs.webkit.org/show_bug.cgi?id=174921 89 91 std::unique_ptr<ScopeOffset[]> m_arguments; 90 92 }; -
trunk/Source/JavaScriptCore/runtime/VM.cpp
r220069 r220118 168 168 , stringSpace("JSString", heap) 169 169 , destructibleObjectSpace("JSDestructibleObject", heap) 170 , eagerlySweptDestructibleObjectSpace("Eagerly Swept JSDestructibleObject", heap) 170 171 , segmentedVariableObjectSpace("JSSegmentedVariableObjectSpace", heap) 171 172 #if ENABLE(WEBASSEMBLY) … … 208 209 , m_builtinExecutables(std::make_unique<BuiltinExecutables>(*this)) 209 210 , m_typeProfilerEnabledCount(0) 211 , m_gigacageEnabled(IsWatched) 210 212 , m_controlFlowProfilerEnabledCount(0) 211 213 , m_shadowChicken(std::make_unique<ShadowChicken>()) … … 285 287 initializeHostCallReturnValue(); // This is needed to convince the linker not to drop host call return support. 286 288 #endif 289 290 Gigacage::addDisableCallback(gigacageDisabledCallback, this); 287 291 288 292 heap.notifyIsSafeToCollect(); … … 339 343 VM::~VM() 340 344 { 345 Gigacage::removeDisableCallback(gigacageDisabledCallback, this); 341 346 promiseDeferredTimer->stopRunningTasks(); 342 347 #if ENABLE(WEBASSEMBLY) … … 405 410 fastFree(scratchBuffers[i]); 406 411 #endif 412 } 413 414 void VM::gigacageDisabledCallback(void* argument) 415 { 416 static_cast<VM*>(argument)->gigacageDisabled(); 417 } 418 419 void VM::gigacageDisabled() 420 { 421 if (m_apiLock->currentThreadIsHoldingLock()) { 422 m_gigacageEnabled.fireAll(*this, "Gigacage disabled"); 423 return; 424 } 425 426 // This is totally racy, and that's OK. The point is, it's up to the user to ensure that they pass the 427 // uncaged buffer in a nicely synchronized manner. 428 m_needToFireGigacageEnabled = true; 407 429 } 408 430 -
trunk/Source/JavaScriptCore/runtime/VM.h
r220069 r220118 37 37 #include "ExecutableAllocator.h" 38 38 #include "FunctionHasExecutedCache.h" 39 #include "GigacageSubspace.h" 39 40 #include "Heap.h" 40 41 #include "Intrinsic.h" … … 287 288 Heap heap; 288 289 289 Subspace auxiliarySpace;290 GigacageSubspace auxiliarySpace; 290 291 291 292 // Whenever possible, use subspaceFor<CellType>(vm) to get one of these subspaces. … … 294 295 JSStringSubspace stringSpace; 295 296 JSDestructibleObjectSubspace destructibleObjectSpace; 297 JSDestructibleObjectSubspace eagerlySweptDestructibleObjectSpace; 296 298 JSSegmentedVariableObjectSubspace segmentedVariableObjectSpace; 297 299 #if ENABLE(WEBASSEMBLY) … … 524 526 void* lastStackTop() { return m_lastStackTop; } 525 527 void setLastStackTop(void*); 528 529 void fireGigacageEnabledIfNecessary() 530 { 531 if (m_needToFireGigacageEnabled) { 532 m_needToFireGigacageEnabled = false; 533 m_gigacageEnabled.fireAll(*this, "Gigacage disabled asynchronously"); 534 } 535 } 526 536 527 537 JSValue hostCallReturnValue; … … 625 635 // FIXME: Use AtomicString once it got merged with Identifier. 626 636 JS_EXPORT_PRIVATE void addImpureProperty(const String&); 637 638 InlineWatchpointSet& gigacageEnabled() { return m_gigacageEnabled; } 627 639 628 640 BuiltinExecutables* builtinExecutables() { return m_builtinExecutables.get(); } … … 731 743 void verifyExceptionCheckNeedIsSatisfied(unsigned depth, ExceptionEventLocation&); 732 744 #endif 745 746 static void gigacageDisabledCallback(void*); 747 void gigacageDisabled(); 733 748 734 749 #if ENABLE(ASSEMBLER) … … 775 790 std::unique_ptr<TypeProfilerLog> m_typeProfilerLog; 776 791 unsigned m_typeProfilerEnabledCount; 792 bool m_needToFireGigacageEnabled { false }; 793 InlineWatchpointSet m_gigacageEnabled; 777 794 FunctionHasExecutedCache m_functionHasExecutedCache; 778 795 std::unique_ptr<ControlFlowProfiler> m_controlFlowProfiler; -
trunk/Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp
r219899 r220118 359 359 ASSERT_UNUSED(pinnedGPR, InvalidGPRReg == pinnedGPR); 360 360 break; 361 case MemoryMode::NumberOfMemoryModes:362 ASSERT_NOT_REACHED();363 361 } 364 362 this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); … … 638 636 } 639 637 break; 640 641 case MemoryMode::NumberOfMemoryModes:642 RELEASE_ASSERT_NOT_REACHED();643 638 } 644 639 pointer = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), pointer); -
trunk/Source/JavaScriptCore/wasm/WasmCodeBlock.cpp
r217942 r220118 126 126 // because the page protection detects out-of-bounds accesses. 127 127 return memoryMode == Wasm::MemoryMode::Signaling; 128 case Wasm::MemoryMode::NumberOfMemoryModes:129 break;130 128 } 131 129 RELEASE_ASSERT_NOT_REACHED(); -
trunk/Source/JavaScriptCore/wasm/WasmMemory.cpp
r219595 r220118 31 31 #include "VM.h" 32 32 #include "WasmThunks.h" 33 34 #include <atomic> 35 #include <wtf/MonotonicTime.h> 33 #include <wtf/Gigacage.h> 34 #include <wtf/Lock.h> 36 35 #include <wtf/Platform.h> 37 36 #include <wtf/PrintStream.h> 38 #include <wtf/ VMTags.h>37 #include <wtf/RAMSize.h> 39 38 40 39 namespace JSC { namespace Wasm { … … 45 44 46 45 namespace { 46 47 47 constexpr bool verbose = false; 48 48 49 49 NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntGetFastMemory() { CRASH(); } 50 NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntUnmapMemory() { CRASH(); } 51 NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntUnprotectMemory() { CRASH(); } 52 53 void* mmapBytes(size_t bytes) 54 { 55 void* location = mmap(nullptr, bytes, PROT_NONE, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_WEBASSEMBLY_MEMORY, 0); 56 return location == MAP_FAILED ? nullptr : location; 57 } 58 59 void munmapBytes(void* memory, size_t size) 60 { 61 if (UNLIKELY(munmap(memory, size))) 62 webAssemblyCouldntUnmapMemory(); 63 } 64 65 void zeroAndUnprotectBytes(void* start, size_t bytes) 66 { 67 if (bytes) { 68 dataLogLnIf(verbose, "Zeroing and unprotecting ", bytes, " from ", RawPointer(start)); 69 // FIXME: We could be smarter about memset / mmap / madvise. Here, we may not need to act synchronously, or maybe we can memset+unprotect smaller ranges of memory (which would pay off if not all the writable memory was actually physically backed: memset forces physical backing only to unprotect it right after). https://bugs.webkit.org/show_bug.cgi?id=170343 70 memset(start, 0, bytes); 71 if (UNLIKELY(mprotect(start, bytes, PROT_NONE))) 72 webAssemblyCouldntUnprotectMemory(); 73 } 74 } 75 76 // Allocate fast memories very early at program startup and cache them. The fast memories use significant amounts of virtual uncommitted address space, reducing the likelihood that we'll obtain any if we wait to allocate them. 77 // We still try to allocate fast memories at runtime, and will cache them when relinquished up to the preallocation limit. 78 // Note that this state is per-process, not per-VM. 79 // We use simple static globals which don't allocate to avoid early fragmentation and to keep management to the bare minimum. We avoid locking because fast memories use segfault signal handling to handle out-of-bounds accesses. This requires identifying if the faulting address is in a fast memory range, which should avoid acquiring a lock lest the actual signal was caused by this very code while it already held the lock. 80 // Speed and contention don't really matter here, but simplicity does. We therefore use straightforward FIFOs for our cache, and linear traversal for the list of currently active fast memories. 81 constexpr size_t fastMemoryCacheHardLimit { 16 }; 82 constexpr size_t fastMemoryAllocationSoftLimit { 32 }; // Prevents filling up the virtual address space. 83 static_assert(fastMemoryAllocationSoftLimit >= fastMemoryCacheHardLimit, "The cache shouldn't be bigger than the total number we'll ever allocate"); 84 size_t fastMemoryPreallocateCount { 0 }; 85 std::atomic<void*> fastMemoryCache[fastMemoryCacheHardLimit] = { ATOMIC_VAR_INIT(nullptr) }; 86 std::atomic<void*> currentlyActiveFastMemories[fastMemoryAllocationSoftLimit] = { ATOMIC_VAR_INIT(nullptr) }; 87 std::atomic<size_t> currentlyAllocatedFastMemories = ATOMIC_VAR_INIT(0); 88 std::atomic<size_t> observedMaximumFastMemory = ATOMIC_VAR_INIT(0); 89 std::atomic<size_t> currentSlowMemoryCapacity = ATOMIC_VAR_INIT(0); 90 91 size_t fastMemoryAllocatedBytesSoftLimit() 92 { 93 return fastMemoryAllocationSoftLimit * Memory::fastMappedBytes(); 94 } 95 96 void* tryGetCachedFastMemory() 97 { 98 for (unsigned idx = 0; idx < fastMemoryPreallocateCount; ++idx) { 99 if (void* previous = fastMemoryCache[idx].exchange(nullptr, std::memory_order_acq_rel)) 100 return previous; 101 } 102 return nullptr; 103 } 104 105 bool tryAddToCachedFastMemory(void* memory) 106 { 107 for (unsigned i = 0; i < fastMemoryPreallocateCount; ++i) { 108 void* expected = nullptr; 109 if (fastMemoryCache[i].compare_exchange_strong(expected, memory, std::memory_order_acq_rel)) { 110 dataLogLnIf(verbose, "Cached fast memory ", RawPointer(memory)); 111 return true; 112 } 113 } 114 return false; 115 } 116 117 bool tryAddToCurrentlyActiveFastMemories(void* memory) 118 { 119 for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) { 120 void* expected = nullptr; 121 if (currentlyActiveFastMemories[idx].compare_exchange_strong(expected, memory, std::memory_order_acq_rel)) 122 return true; 123 } 124 return false; 125 } 126 127 void removeFromCurrentlyActiveFastMemories(void* memory) 128 { 129 for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) { 130 void* expected = memory; 131 if (currentlyActiveFastMemories[idx].compare_exchange_strong(expected, nullptr, std::memory_order_acq_rel)) 132 return; 133 } 134 RELEASE_ASSERT_NOT_REACHED(); 135 } 136 137 void* tryGetFastMemory(VM& vm) 138 { 139 void* memory = nullptr; 140 141 if (LIKELY(Options::useWebAssemblyFastMemory())) { 142 memory = tryGetCachedFastMemory(); 143 if (memory) 144 dataLogLnIf(verbose, "tryGetFastMemory re-using ", RawPointer(memory)); 145 else if (currentlyAllocatedFastMemories.load(std::memory_order_acquire) >= 1) { 146 // No memory was available in the cache, but we know there's at least one currently live. Maybe GC will find a free one. 147 // FIXME collectSync(Full) and custom eager destruction of wasm memories could be better. For now use collectNow. Also, nothing tells us the current VM is holding onto fast memories. https://bugs.webkit.org/show_bug.cgi?id=170748 148 dataLogLnIf(verbose, "tryGetFastMemory waiting on GC and retrying"); 149 vm.heap.collectNow(Sync, CollectionScope::Full); 150 memory = tryGetCachedFastMemory(); 151 dataLogLnIf(verbose, "tryGetFastMemory waited on GC and retried ", memory? "successfully" : "unseccessfully"); 152 } 153 154 // The soft limit is inherently racy because checking+allocation isn't atomic. Exceeding it slightly is fine. 155 bool atAllocationSoftLimit = currentlyAllocatedFastMemories.load(std::memory_order_acquire) >= fastMemoryAllocationSoftLimit; 156 dataLogLnIf(verbose && atAllocationSoftLimit, "tryGetFastMemory reached allocation soft limit of ", fastMemoryAllocationSoftLimit); 157 158 if (!memory && !atAllocationSoftLimit) { 159 memory = mmapBytes(Memory::fastMappedBytes()); 160 if (memory) { 161 size_t currentlyAllocated = 1 + currentlyAllocatedFastMemories.fetch_add(1, std::memory_order_acq_rel); 162 size_t currentlyObservedMaximum = observedMaximumFastMemory.load(std::memory_order_acquire); 163 if (currentlyAllocated > currentlyObservedMaximum) { 164 size_t expected = currentlyObservedMaximum; 165 bool success = observedMaximumFastMemory.compare_exchange_strong(expected, currentlyAllocated, std::memory_order_acq_rel); 166 if (success) 167 dataLogLnIf(verbose, "tryGetFastMemory currently observed maximum is now ", currentlyAllocated); 168 else 169 // We lost the update race, but the counter is monotonic so the winner must have updated the value to what we were going to update it to, or multiple winners did so. 170 ASSERT(expected >= currentlyAllocated); 171 } 172 dataLogLnIf(verbose, "tryGetFastMemory allocated ", RawPointer(memory), ", currently allocated is ", currentlyAllocated); 173 } 174 } 175 } 176 177 if (memory) { 178 if (UNLIKELY(!tryAddToCurrentlyActiveFastMemories(memory))) { 179 // We got a memory, but reached the allocation soft limit *and* all of the allocated memories are active, none are cached. That's a bummer, we have to get rid of our memory. We can't just hold on to it because the list of active fast memories must be precise. 180 dataLogLnIf(verbose, "tryGetFastMemory found a fast memory but had to give it up"); 181 munmapBytes(memory, Memory::fastMappedBytes()); 182 currentlyAllocatedFastMemories.fetch_sub(1, std::memory_order_acq_rel); 183 memory = nullptr; 184 } 185 } 186 187 if (!memory) { 188 dataLogLnIf(verbose, "tryGetFastMemory couldn't re-use or allocate a fast memory"); 189 if (UNLIKELY(Options::crashIfWebAssemblyCantFastMemory())) 190 webAssemblyCouldntGetFastMemory(); 191 } 192 193 return memory; 194 } 195 196 bool slowMemoryCapacitySoftMaximumExceeded() 197 { 198 // The limit on slow memory capacity is arbitrary. Its purpose is to limit 199 // virtual memory allocation. We choose to set the limit at the same virtual 200 // memory limit imposed on fast memories. 201 size_t maximum = fastMemoryAllocatedBytesSoftLimit(); 202 size_t currentCapacity = currentSlowMemoryCapacity.load(std::memory_order_acquire); 203 if (UNLIKELY(currentCapacity > maximum)) { 204 dataLogLnIf(verbose, "Slow memory capacity limit reached"); 205 return true; 206 } 207 return false; 208 } 209 210 void* tryGetSlowMemory(size_t bytes) 211 { 212 if (slowMemoryCapacitySoftMaximumExceeded()) 50 51 struct MemoryResult { 52 enum Kind { 53 Success, 54 SuccessAndAsyncGC, 55 SyncGCAndRetry 56 }; 57 58 static const char* toString(Kind kind) 59 { 60 switch (kind) { 61 case Success: 62 return "Success"; 63 case SuccessAndAsyncGC: 64 return "SuccessAndAsyncGC"; 65 case SyncGCAndRetry: 66 return "SyncGCAndRetry"; 67 } 68 RELEASE_ASSERT_NOT_REACHED(); 213 69 return nullptr; 214 void* memory = mmapBytes(bytes); 215 if (memory) 216 currentSlowMemoryCapacity.fetch_add(bytes, std::memory_order_acq_rel); 217 dataLogLnIf(memory && verbose, "Obtained slow memory ", RawPointer(memory), " with capacity ", bytes); 218 dataLogLnIf(!memory && verbose, "Failed obtaining slow memory with capacity ", bytes); 219 return memory; 220 } 221 222 void relinquishMemory(void* memory, size_t writableSize, size_t mappedCapacity, MemoryMode mode) 223 { 224 switch (mode) { 225 case MemoryMode::Signaling: { 226 RELEASE_ASSERT(Options::useWebAssemblyFastMemory()); 227 RELEASE_ASSERT(mappedCapacity == Memory::fastMappedBytes()); 228 229 // This memory cannot cause a trap anymore. 230 removeFromCurrentlyActiveFastMemories(memory); 231 232 // We may cache fast memories. Assuming we will, we have to reset them before inserting them into the cache. 233 zeroAndUnprotectBytes(memory, writableSize); 234 235 if (tryAddToCachedFastMemory(memory)) 236 return; 237 238 dataLogLnIf(verbose, "relinquishMemory unable to cache fast memory, freeing instead ", RawPointer(memory)); 239 munmapBytes(memory, Memory::fastMappedBytes()); 240 currentlyAllocatedFastMemories.fetch_sub(1, std::memory_order_acq_rel); 241 242 return; 243 } 244 245 case MemoryMode::BoundsChecking: 246 dataLogLnIf(verbose, "relinquishFastMemory freeing slow memory ", RawPointer(memory)); 247 munmapBytes(memory, mappedCapacity); 248 currentSlowMemoryCapacity.fetch_sub(mappedCapacity, std::memory_order_acq_rel); 249 return; 250 251 case MemoryMode::NumberOfMemoryModes: 252 break; 253 } 254 255 RELEASE_ASSERT_NOT_REACHED(); 256 } 257 258 bool makeNewMemoryReadWriteOrRelinquish(void* memory, size_t initialBytes, size_t mappedCapacityBytes, MemoryMode mode) 259 { 260 ASSERT(memory && initialBytes <= mappedCapacityBytes); 261 if (initialBytes) { 262 dataLogLnIf(verbose, "Marking WebAssembly memory's ", RawPointer(memory), "'s initial ", initialBytes, " bytes as read+write"); 263 if (mprotect(memory, initialBytes, PROT_READ | PROT_WRITE)) { 264 const char* why = strerror(errno); 265 dataLogLnIf(verbose, "Failed making memory ", RawPointer(memory), " readable and writable: ", why); 266 relinquishMemory(memory, 0, mappedCapacityBytes, mode); 267 return false; 268 } 269 } 270 return true; 70 } 71 72 MemoryResult() { } 73 74 MemoryResult(void* basePtr, Kind kind) 75 : basePtr(basePtr) 76 , kind(kind) 77 { 78 } 79 80 void dump(PrintStream& out) const 81 { 82 out.print("{basePtr = ", RawPointer(basePtr), ", kind = ", toString(kind), "}"); 83 } 84 85 void* basePtr; 86 Kind kind; 87 }; 88 89 class MemoryManager { 90 public: 91 MemoryManager() 92 : m_maxCount(Options::maxNumWebAssemblyFastMemories()) 93 { 94 } 95 96 MemoryResult tryAllocateVirtualPages() 97 { 98 MemoryResult result = [&] { 99 auto holder = holdLock(m_lock); 100 if (m_memories.size() >= m_maxCount) 101 return MemoryResult(nullptr, MemoryResult::SyncGCAndRetry); 102 103 void* result = Gigacage::tryAllocateVirtualPages(Memory::fastMappedBytes()); 104 if (!result) 105 return MemoryResult(nullptr, MemoryResult::SyncGCAndRetry); 106 107 m_memories.append(result); 108 109 return MemoryResult( 110 result, 111 m_memories.size() >= m_maxCount / 2 ? MemoryResult::SuccessAndAsyncGC : MemoryResult::Success); 112 }(); 113 114 if (Options::logWebAssemblyMemory()) 115 dataLog("Allocated virtual: ", result, "; state: ", *this, "\n"); 116 117 return result; 118 } 119 120 void freeVirtualPages(void* basePtr) 121 { 122 { 123 auto holder = holdLock(m_lock); 124 Gigacage::freeVirtualPages(basePtr, Memory::fastMappedBytes()); 125 m_memories.removeFirst(basePtr); 126 } 127 128 if (Options::logWebAssemblyMemory()) 129 dataLog("Freed virtual; state: ", *this, "\n"); 130 } 131 132 bool containsAddress(void* address) 133 { 134 // NOTE: This can be called from a signal handler, but only after we proved that we're in JIT code. 135 auto holder = holdLock(m_lock); 136 for (void* memory : m_memories) { 137 char* start = static_cast<char*>(memory); 138 if (start <= address && address <= start + Memory::fastMappedBytes()) 139 return true; 140 } 141 return false; 142 } 143 144 // FIXME: Ideally, bmalloc would have this kind of mechanism. Then, we would just forward to that 145 // mechanism here. 146 MemoryResult::Kind tryAllocatePhysicalBytes(size_t bytes) 147 { 148 MemoryResult::Kind result = [&] { 149 auto holder = holdLock(m_lock); 150 if (m_physicalBytes + bytes > ramSize()) 151 return MemoryResult::SyncGCAndRetry; 152 153 m_physicalBytes += bytes; 154 155 if (m_physicalBytes >= ramSize() / 2) 156 return MemoryResult::SuccessAndAsyncGC; 157 158 return MemoryResult::Success; 159 }(); 160 161 if (Options::logWebAssemblyMemory()) 162 dataLog("Allocated physical: ", bytes, ", ", MemoryResult::toString(result), "; state: ", *this, "\n"); 163 164 return result; 165 } 166 167 void freePhysicalBytes(size_t bytes) 168 { 169 { 170 auto holder = holdLock(m_lock); 171 m_physicalBytes -= bytes; 172 } 173 174 if (Options::logWebAssemblyMemory()) 175 dataLog("Freed physical: ", bytes, "; state: ", *this, "\n"); 176 } 177 178 void dump(PrintStream& out) const 179 { 180 out.print("memories = ", m_memories.size(), "/", m_maxCount, ", bytes = ", m_physicalBytes, "/", ramSize()); 181 } 182 183 private: 184 Lock m_lock; 185 unsigned m_maxCount { 0 }; 186 Vector<void*> m_memories; 187 size_t m_physicalBytes { 0 }; 188 }; 189 190 static MemoryManager& memoryManager() 191 { 192 static std::once_flag onceFlag; 193 static MemoryManager* manager; 194 std::call_once( 195 onceFlag, 196 [] { 197 manager = new MemoryManager(); 198 }); 199 return *manager; 200 } 201 202 template<typename Func> 203 bool tryAndGC(VM& vm, const Func& allocate) 204 { 205 unsigned numTries = 2; 206 bool done = false; 207 for (unsigned i = 0; i < numTries && !done; ++i) { 208 switch (allocate()) { 209 case MemoryResult::Success: 210 done = true; 211 break; 212 case MemoryResult::SuccessAndAsyncGC: 213 vm.heap.collectAsync(CollectionScope::Full); 214 done = true; 215 break; 216 case MemoryResult::SyncGCAndRetry: 217 if (i + 1 == numTries) 218 break; 219 vm.heap.collectSync(CollectionScope::Full); 220 break; 221 } 222 } 223 return done; 271 224 } 272 225 273 226 } // anonymous namespace 274 275 227 276 228 const char* makeString(MemoryMode mode) … … 279 231 case MemoryMode::BoundsChecking: return "BoundsChecking"; 280 232 case MemoryMode::Signaling: return "Signaling"; 281 case MemoryMode::NumberOfMemoryModes: break;282 233 } 283 234 RELEASE_ASSERT_NOT_REACHED(); 284 235 return ""; 285 }286 287 void Memory::initializePreallocations()288 {289 if (UNLIKELY(!Options::useWebAssemblyFastMemory()))290 return;291 292 // Races cannot occur in this function: it is only called at program initialization, before WebAssembly can be invoked.293 294 MonotonicTime startTime;295 if (verbose)296 startTime = MonotonicTime::now();297 298 const size_t desiredFastMemories = std::min<size_t>(Options::webAssemblyFastMemoryPreallocateCount(), fastMemoryCacheHardLimit);299 300 // Start off trying to allocate fast memories contiguously so they don't fragment each other. This can fail if the address space is otherwise fragmented. In that case, go for smaller contiguous allocations. We'll eventually get individual non-contiguous fast memories allocated, or we'll just be unable to fit a single one at which point we give up.301 auto allocateContiguousFastMemories = [&] (size_t numContiguous) -> bool {302 if (void *memory = mmapBytes(Memory::fastMappedBytes() * numContiguous)) {303 for (size_t subMemory = 0; subMemory < numContiguous; ++subMemory) {304 void* startAddress = reinterpret_cast<char*>(memory) + Memory::fastMappedBytes() * subMemory;305 bool inserted = false;306 for (size_t cacheEntry = 0; cacheEntry < fastMemoryCacheHardLimit; ++cacheEntry) {307 if (fastMemoryCache[cacheEntry].load(std::memory_order_relaxed) == nullptr) {308 fastMemoryCache[cacheEntry].store(startAddress, std::memory_order_relaxed);309 inserted = true;310 break;311 }312 }313 RELEASE_ASSERT(inserted);314 }315 return true;316 }317 return false;318 };319 320 size_t fragments = 0;321 size_t numFastMemories = 0;322 size_t contiguousMemoryAllocationAttempt = desiredFastMemories;323 while (numFastMemories != desiredFastMemories && contiguousMemoryAllocationAttempt != 0) {324 if (allocateContiguousFastMemories(contiguousMemoryAllocationAttempt)) {325 numFastMemories += contiguousMemoryAllocationAttempt;326 contiguousMemoryAllocationAttempt = std::min(contiguousMemoryAllocationAttempt - 1, desiredFastMemories - numFastMemories);327 } else328 --contiguousMemoryAllocationAttempt;329 ++fragments;330 }331 332 fastMemoryPreallocateCount = numFastMemories;333 currentlyAllocatedFastMemories.store(fastMemoryPreallocateCount, std::memory_order_relaxed);334 observedMaximumFastMemory.store(fastMemoryPreallocateCount, std::memory_order_relaxed);335 336 if (verbose) {337 MonotonicTime endTime = MonotonicTime::now();338 339 for (size_t cacheEntry = 0; cacheEntry < fastMemoryPreallocateCount; ++cacheEntry) {340 void* startAddress = fastMemoryCache[cacheEntry].load(std::memory_order_relaxed);341 ASSERT(startAddress);342 dataLogLn("Pre-allocation of WebAssembly fast memory at ", RawPointer(startAddress));343 }344 345 dataLogLn("Pre-allocated ", fastMemoryPreallocateCount, " WebAssembly fast memories in ", fastMemoryPreallocateCount == 0 ? 0 : fragments, fragments == 1 ? " fragment, took " : " fragments, took ", endTime - startTime);346 }347 236 } 348 237 … … 374 263 const size_t initialBytes = initial.bytes(); 375 264 const size_t maximumBytes = maximum ? maximum.bytes() : 0; 376 size_t mappedCapacityBytes = 0;377 MemoryMode mode;378 265 379 266 // We need to be sure we have a stub prior to running code. … … 386 273 return adoptRef(new Memory(initial, maximum)); 387 274 } 388 389 void* memory = nullptr; 390 391 // First try fast memory, because they're fast. Fast memory is suitable for any initial / maximum. 392 memory = tryGetFastMemory(vm); 393 if (memory) { 394 mappedCapacityBytes = Memory::fastMappedBytes(); 395 mode = MemoryMode::Signaling; 396 } 397 398 // If we can't get a fast memory but the user expressed the intent to grow memory up to a certain maximum then we should try to honor that desire. It'll mean that grow is more likely to succeed, and won't require remapping. 399 if (!memory && maximum) { 400 memory = tryGetSlowMemory(maximumBytes); 401 if (memory) { 402 mappedCapacityBytes = maximumBytes; 403 mode = MemoryMode::BoundsChecking; 404 } 405 } 406 407 // We're stuck with a slow memory which may be slower or impossible to grow. 408 if (!memory) { 409 if (!initialBytes) 410 return adoptRef(new Memory(initial, maximum)); 411 memory = tryGetSlowMemory(initialBytes); 412 if (memory) { 413 mappedCapacityBytes = initialBytes; 414 mode = MemoryMode::BoundsChecking; 415 } 416 } 417 418 if (!memory) 275 276 bool done = tryAndGC( 277 vm, 278 [&] () -> MemoryResult::Kind { 279 return memoryManager().tryAllocatePhysicalBytes(initialBytes); 280 }); 281 if (!done) 419 282 return nullptr; 420 421 if (!makeNewMemoryReadWriteOrRelinquish(memory, initialBytes, mappedCapacityBytes, mode)) 283 284 char* fastMemory = nullptr; 285 if (Options::useWebAssemblyFastMemory()) { 286 tryAndGC( 287 vm, 288 [&] () -> MemoryResult::Kind { 289 auto result = memoryManager().tryAllocateVirtualPages(); 290 fastMemory = bitwise_cast<char*>(result.basePtr); 291 return result.kind; 292 }); 293 } 294 295 if (fastMemory) { 296 bool writable = true; 297 bool executable = false; 298 OSAllocator::commit(fastMemory, initialBytes, writable, executable); 299 300 if (mprotect(fastMemory + initialBytes, Memory::fastMappedBytes() - initialBytes, PROT_NONE)) { 301 dataLog("mprotect failed: ", strerror(errno), "\n"); 302 RELEASE_ASSERT_NOT_REACHED(); 303 } 304 305 memset(fastMemory, 0, initialBytes); 306 return adoptRef(new Memory(fastMemory, initial, maximum, Memory::fastMappedBytes(), MemoryMode::Signaling)); 307 } 308 309 if (UNLIKELY(Options::crashIfWebAssemblyCantFastMemory())) 310 webAssemblyCouldntGetFastMemory(); 311 312 if (!initialBytes) 313 return adoptRef(new Memory(initial, maximum)); 314 315 void* slowMemory = Gigacage::tryAlignedMalloc(WTF::pageSize(), initialBytes); 316 if (!slowMemory) { 317 memoryManager().freePhysicalBytes(initialBytes); 422 318 return nullptr; 423 424 return adoptRef(new Memory(memory, initial, maximum, mappedCapacityBytes, mode)); 319 } 320 memset(slowMemory, 0, initialBytes); 321 return adoptRef(new Memory(slowMemory, initial, maximum, initialBytes, MemoryMode::BoundsChecking)); 425 322 } 426 323 … … 428 325 { 429 326 if (m_memory) { 430 dataLogLnIf(verbose, "Memory::~Memory ", *this); 431 relinquishMemory(m_memory, m_size, m_mappedCapacity, m_mode); 327 memoryManager().freePhysicalBytes(m_size); 328 switch (m_mode) { 329 case MemoryMode::Signaling: 330 mprotect(m_memory, Memory::fastMappedBytes(), PROT_READ | PROT_WRITE); 331 memoryManager().freeVirtualPages(m_memory); 332 break; 333 case MemoryMode::BoundsChecking: 334 Gigacage::alignedFree(m_memory); 335 break; 336 } 432 337 } 433 338 } … … 444 349 } 445 350 446 size_t Memory::maxFastMemoryCount()447 {448 // The order can be relaxed here because we provide a monotonically-increasing estimate. A concurrent observer could see a slightly out-of-date value but can't tell that they did.449 return observedMaximumFastMemory.load(std::memory_order_relaxed);450 }451 452 351 bool Memory::addressIsInActiveFastMemory(void* address) 453 352 { 454 // This cannot race in any meaningful way: the thread which calls this function wants to know if a fault it received at a particular address is in a fast memory. That fast memory must therefore be active in that thread. It cannot be added or removed from the list of currently active fast memories. Other memories being added / removed concurrently are inconsequential. 455 for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) { 456 char* start = static_cast<char*>(currentlyActiveFastMemories[idx].load(std::memory_order_acquire)); 457 if (start <= address && address <= start + fastMappedBytes()) 458 return true; 459 } 460 return false; 461 } 462 463 bool Memory::grow(PageCount newSize) 353 return memoryManager().containsAddress(address); 354 } 355 356 bool Memory::grow(VM& vm, PageCount newSize) 464 357 { 465 358 RELEASE_ASSERT(newSize > PageCount::fromBytes(m_size)); … … 471 364 472 365 size_t desiredSize = newSize.bytes(); 473 366 RELEASE_ASSERT(desiredSize > m_size); 367 size_t extraBytes = desiredSize - m_size; 368 RELEASE_ASSERT(extraBytes); 369 bool success = tryAndGC( 370 vm, 371 [&] () -> MemoryResult::Kind { 372 return memoryManager().tryAllocatePhysicalBytes(extraBytes); 373 }); 374 if (!success) 375 return false; 376 474 377 switch (mode()) { 475 case MemoryMode::BoundsChecking: 378 case MemoryMode::BoundsChecking: { 476 379 RELEASE_ASSERT(maximum().bytes() != 0); 477 break; 478 case MemoryMode::Signaling: 380 381 void* newMemory = Gigacage::tryAlignedMalloc(WTF::pageSize(), desiredSize); 382 if (!newMemory) 383 return false; 384 memcpy(newMemory, m_memory, m_size); 385 memset(static_cast<char*>(newMemory) + m_size, 0, desiredSize - m_size); 386 if (m_memory) 387 Gigacage::alignedFree(m_memory); 388 m_memory = newMemory; 389 m_mappedCapacity = desiredSize; 390 m_size = desiredSize; 391 return true; 392 } 393 case MemoryMode::Signaling: { 394 RELEASE_ASSERT(m_memory); 479 395 // Signaling memory must have been pre-allocated virtually. 480 RELEASE_ASSERT(m_memory);481 break;482 case MemoryMode::NumberOfMemoryModes:483 RELEASE_ASSERT_NOT_REACHED();484 }485 486 if (m_memory && desiredSize <= m_mappedCapacity) {487 396 uint8_t* startAddress = static_cast<uint8_t*>(m_memory) + m_size; 488 size_t extraBytes = desiredSize - m_size; 489 RELEASE_ASSERT(extraBytes); 397 490 398 dataLogLnIf(verbose, "Marking WebAssembly memory's ", RawPointer(m_memory), " as read+write in range [", RawPointer(startAddress), ", ", RawPointer(startAddress + extraBytes), ")"); 491 399 if (mprotect(startAddress, extraBytes, PROT_READ | PROT_WRITE)) { … … 493 401 return false; 494 402 } 495 403 memset(startAddress, 0, extraBytes); 496 404 m_size = desiredSize; 497 dataLogLnIf(verbose, "Memory::grow in-place ", *this);498 405 return true; 499 } 500 501 // Signaling memory can't grow past its already-mapped size. 502 RELEASE_ASSERT(mode() != MemoryMode::Signaling); 503 504 // Otherwise, let's try to make some new memory. 505 // FIXME mremap would be nice https://bugs.webkit.org/show_bug.cgi?id=170557 506 // FIXME should we over-allocate here? https://bugs.webkit.org/show_bug.cgi?id=170826 507 void* newMemory = tryGetSlowMemory(desiredSize); 508 if (!newMemory) 509 return false; 510 511 if (!makeNewMemoryReadWriteOrRelinquish(newMemory, desiredSize, desiredSize, mode())) 512 return false; 513 514 if (m_memory) { 515 memcpy(newMemory, m_memory, m_size); 516 relinquishMemory(m_memory, m_size, m_size, m_mode); 517 } 518 519 m_memory = newMemory; 520 m_mappedCapacity = desiredSize; 521 m_size = desiredSize; 522 523 dataLogLnIf(verbose, "Memory::grow ", *this); 524 return true; 406 } } 407 408 RELEASE_ASSERT_NOT_REACHED(); 409 return false; 525 410 } 526 411 -
trunk/Source/JavaScriptCore/wasm/WasmMemory.h
r215340 r220118 46 46 enum class MemoryMode : uint8_t { 47 47 BoundsChecking, 48 Signaling, 49 NumberOfMemoryModes 48 Signaling 50 49 }; 51 static constexpr size_t NumberOfMemoryModes = static_cast<size_t>(MemoryMode::NumberOfMemoryModes);50 static constexpr size_t NumberOfMemoryModes = 2; 52 51 JS_EXPORT_PRIVATE const char* makeString(MemoryMode); 53 52 … … 59 58 60 59 explicit operator bool() const { return !!m_memory; } 61 62 static void initializePreallocations(); 60 63 61 static RefPtr<Memory> create(VM&, PageCount initial, PageCount maximum); 64 62 65 Memory() = default;66 63 ~Memory(); 67 64 68 65 static size_t fastMappedRedzoneBytes(); 69 66 static size_t fastMappedBytes(); // Includes redzone. 70 static size_t maxFastMemoryCount();71 67 static bool addressIsInActiveFastMemory(void*); 72 68 … … 82 78 // grow() should only be called from the JSWebAssemblyMemory object since that object needs to update internal 83 79 // pointers with the current base and size. 84 bool grow( PageCount);80 bool grow(VM&, PageCount); 85 81 86 82 void check() { ASSERT(!deletionHasBegun()); } -
trunk/Source/JavaScriptCore/wasm/js/JSWebAssemblyInstance.cpp
r218951 r220118 349 349 if (!instance->memory()) { 350 350 // Make sure we have a dummy memory, so that wasm -> wasm thunks avoid checking for a nullptr Memory when trying to set pinned registers. 351 instance->m_memory.set(vm, instance, JSWebAssemblyMemory::create(exec, vm, exec->lexicalGlobalObject()->WebAssemblyMemoryStructure(), adoptRef(*(new Wasm::Memory()))));351 instance->m_memory.set(vm, instance, JSWebAssemblyMemory::create(exec, vm, exec->lexicalGlobalObject()->WebAssemblyMemoryStructure(), Wasm::Memory::create(vm, 0, 0).releaseNonNull())); 352 352 RETURN_IF_EXCEPTION(throwScope, nullptr); 353 353 } -
trunk/Source/JavaScriptCore/wasm/js/JSWebAssemblyMemory.cpp
r218951 r220118 107 107 108 108 if (delta) { 109 bool success = memory().grow( newSize);109 bool success = memory().grow(vm, newSize); 110 110 if (!success) { 111 111 ASSERT(m_memoryBase == memory().memory()); … … 139 139 ASSERT(inherits(vm, info())); 140 140 heap()->reportExtraMemoryAllocated(memory().size()); 141 vm.heap.reportWebAssemblyFastMemoriesAllocated(1);142 141 } 143 142 -
trunk/Source/JavaScriptCore/wasm/js/JSWebAssemblyMemory.h
r218951 r220118 42 42 typedef JSDestructibleObject Base; 43 43 44 template<typename CellType> 45 static Subspace* subspaceFor(VM& vm) 46 { 47 // We hold onto a lot of memory, so it makes a lot of sense to be swept eagerly. 48 return &vm.eagerlySweptDestructibleObjectSpace; 49 } 50 44 51 static JSWebAssemblyMemory* create(ExecState*, VM&, Structure*, Ref<Wasm::Memory>&&); 45 52 static Structure* createStructure(VM&, JSGlobalObject*, JSValue); -
trunk/Source/WTF/ChangeLog
r220069 r220118 1 2017-08-01 Filip Pizlo <fpizlo@apple.com> 2 3 Bmalloc and GC should put auxiliaries (butterflies, typed array backing stores) in a gigacage (separate multi-GB VM region) 4 https://bugs.webkit.org/show_bug.cgi?id=174727 5 6 Reviewed by Mark Lam. 7 8 For the Gigacage project to have minimal impact, we need to have some abstraction that allows code to 9 avoid having to guard itself with #if's. This adds a Gigacage abstraction that overlays the Gigacage 10 namespace from bmalloc, which always lets you call things like Gigacage::caged and Gigacage::tryMalloc. 11 12 Because of how many places need to possibly allocate in a gigacage, or possibly perform caged accesses, 13 it's better to hide the question of whether or not it's enabled inside this API. 14 15 * WTF.xcodeproj/project.pbxproj: 16 * wtf/CMakeLists.txt: 17 * wtf/FastMalloc.cpp: 18 * wtf/Gigacage.cpp: Added. 19 (Gigacage::tryMalloc): 20 (Gigacage::tryAllocateVirtualPages): 21 (Gigacage::freeVirtualPages): 22 (Gigacage::tryAlignedMalloc): 23 (Gigacage::alignedFree): 24 (Gigacage::free): 25 * wtf/Gigacage.h: Added. 26 (Gigacage::ensureGigacage): 27 (Gigacage::disableGigacage): 28 (Gigacage::addDisableCallback): 29 (Gigacage::removeDisableCallback): 30 (Gigacage::caged): 31 (Gigacage::isCaged): 32 (Gigacage::tryAlignedMalloc): 33 (Gigacage::alignedFree): 34 (Gigacage::free): 35 1 36 2017-07-31 Matt Lewis <jlewis3@apple.com> 2 37 -
trunk/Source/WTF/WTF.xcodeproj/project.pbxproj
r220069 r220118 24 24 0F30BA901E78708E002CA847 /* GlobalVersion.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F30BA8A1E78708E002CA847 /* GlobalVersion.cpp */; }; 25 25 0F43D8F11DB5ADDC00108FB6 /* AutomaticThread.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F43D8EF1DB5ADDC00108FB6 /* AutomaticThread.cpp */; }; 26 0F5BF1761F23D49A0029D91D /* Gigacage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5BF1741F23D49A0029D91D /* Gigacage.cpp */; }; 26 27 0F60F32F1DFCBD1B00416D6C /* LockedPrintStream.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F60F32D1DFCBD1B00416D6C /* LockedPrintStream.cpp */; }; 27 28 0F66B28A1DC97BAB004A1D3F /* ClockType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F66B2801DC97BAB004A1D3F /* ClockType.cpp */; }; … … 181 182 0F4570421BE5B58F0062A629 /* Dominators.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dominators.h; sourceTree = "<group>"; }; 182 183 0F4570441BE834410062A629 /* BubbleSort.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BubbleSort.h; sourceTree = "<group>"; }; 184 0F5BF1741F23D49A0029D91D /* Gigacage.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = Gigacage.cpp; sourceTree = "<group>"; }; 185 0F5BF1751F23D49A0029D91D /* Gigacage.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = Gigacage.h; sourceTree = "<group>"; }; 183 186 0F5BF1651F2317830029D91D /* NaturalLoops.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = NaturalLoops.h; sourceTree = "<group>"; }; 184 187 0F60F32D1DFCBD1B00416D6C /* LockedPrintStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LockedPrintStream.cpp; sourceTree = "<group>"; }; … … 801 804 1A1D8B9B173186CE00141DA4 /* FunctionDispatcher.h */, 802 805 A8A472A8151A825A004123FF /* GetPtr.h */, 806 0F5BF1741F23D49A0029D91D /* Gigacage.cpp */, 807 0F5BF1751F23D49A0029D91D /* Gigacage.h */, 803 808 0F30BA8A1E78708E002CA847 /* GlobalVersion.cpp */, 804 809 0F30BA8B1E78708E002CA847 /* GlobalVersion.h */, … … 1384 1389 A5BA15FC182435A600A82E69 /* StringImplCF.cpp in Sources */, 1385 1390 A5BA15F51824348000A82E69 /* StringImplMac.mm in Sources */, 1391 0F5BF1761F23D49A0029D91D /* Gigacage.cpp in Sources */, 1386 1392 A5BA15F3182433A900A82E69 /* StringMac.mm in Sources */, 1387 1393 0FDDBFA71666DFA300C55FEF /* StringPrintStream.cpp in Sources */, -
trunk/Source/WTF/wtf/CMakeLists.txt
r220069 r220118 39 39 FunctionDispatcher.h 40 40 GetPtr.h 41 Gigacage.h 41 42 GlobalVersion.h 42 43 GraphNodeWorklist.h … … 220 221 FilePrintStream.cpp 221 222 FunctionDispatcher.cpp 223 Gigacage.cpp 222 224 GlobalVersion.cpp 223 225 GregorianDateTime.cpp -
trunk/Source/WTF/wtf/FastMalloc.cpp
r218800 r220118 1 1 /* 2 2 * Copyright (c) 2005, 2007, Google Inc. All rights reserved. 3 * Copyright (C) 2005-20 09, 2011, 2015-2016Apple Inc. All rights reserved.3 * Copyright (C) 2005-2017 Apple Inc. All rights reserved. 4 4 * Redistribution and use in source and binary forms, with or without 5 5 * modification, are permitted provided that the following conditions -
trunk/Source/WebCore/ChangeLog
r220117 r220118 1 2017-08-01 Filip Pizlo <fpizlo@apple.com> 2 3 Bmalloc and GC should put auxiliaries (butterflies, typed array backing stores) in a gigacage (separate multi-GB VM region) 4 https://bugs.webkit.org/show_bug.cgi?id=174727 5 6 Reviewed by Mark Lam. 7 8 No new tests because no change in behavior. 9 10 Needed to teach Metal how to allocate in the Gigacage. 11 12 * platform/graphics/cocoa/GPUBufferMetal.mm: 13 (WebCore::GPUBuffer::GPUBuffer): 14 (WebCore::GPUBuffer::contents): 15 1 16 2017-08-01 Fujii Hironori <Hironori.Fujii@sony.com> 2 17 -
trunk/Source/WebCore/platform/graphics/cocoa/GPUBufferMetal.mm
r213780 r220118 31 31 #import "GPUDevice.h" 32 32 #import "Logging.h" 33 34 33 #import <Metal/Metal.h> 34 #import <wtf/Gigacage.h> 35 #import <wtf/PageBlock.h> 35 36 36 37 namespace WebCore { … … 42 43 if (!device || !device->platformDevice() || !data) 43 44 return; 44 45 m_buffer = adoptNS((MTLBuffer *)[device->platformDevice() newBufferWithBytes:data->baseAddress() length:data->byteLength() options:MTLResourceOptionCPUCacheModeDefault]); 45 46 size_t pageSize = WTF::pageSize(); 47 size_t pageAlignedSize = roundUpToMultipleOf(pageSize, data->byteLength()); 48 void* pageAlignedCopy = Gigacage::tryAlignedMalloc(pageSize, pageAlignedSize); 49 if (!pageAlignedCopy) 50 return; 51 memcpy(pageAlignedCopy, data->baseAddress(), data->byteLength()); 52 m_contents = ArrayBuffer::createFromBytes(pageAlignedCopy, data->byteLength(), [] (void* ptr) { Gigacage::alignedFree(ptr); }); 53 m_contents->ref(); 54 ArrayBuffer* capturedContents = m_contents.get(); 55 m_buffer = adoptNS((MTLBuffer *)[device->platformDevice() newBufferWithBytesNoCopy:m_contents->data() length:pageAlignedSize options:MTLResourceOptionCPUCacheModeDefault deallocator:^(void*, NSUInteger) { capturedContents->deref(); }]); 56 if (!m_buffer) { 57 m_contents->deref(); 58 m_contents = nullptr; 59 } 46 60 } 47 61 … … 56 70 RefPtr<ArrayBuffer> GPUBuffer::contents() 57 71 { 58 if (m_contents)59 return m_contents;60 61 if (!m_buffer)62 return nullptr;63 64 m_contents = ArrayBuffer::createFromBytes([m_buffer contents], [m_buffer length], [] (void*) { });65 72 return m_contents; 66 73 } -
trunk/Source/WebKit/ChangeLog
r220115 r220118 1 2017-08-01 Filip Pizlo <fpizlo@apple.com> 2 3 Bmalloc and GC should put auxiliaries (butterflies, typed array backing stores) in a gigacage (separate multi-GB VM region) 4 https://bugs.webkit.org/show_bug.cgi?id=174727 5 6 Reviewed by Mark Lam. 7 8 The WebProcess should never disable the Gigacage by allocating typed arrays outside the Gigacage. So, 9 we add a callback that crashes the process. 10 11 * WebProcess/WebProcess.cpp: 12 (WebKit::gigacageDisabled): 13 (WebKit::m_webSQLiteDatabaseTracker): 14 1 15 2017-08-01 Brian Burg <bburg@apple.com> 2 16 -
trunk/Source/WebKit/WebProcess/WebProcess.cpp
r220105 r220118 147 147 namespace WebKit { 148 148 149 static void gigacageDisabled(void*) 150 { 151 UNREACHABLE_FOR_PLATFORM(); 152 } 153 149 154 WebProcess& WebProcess::singleton() 150 155 { … … 197 202 parentProcessConnection()->send(Messages::WebResourceLoadStatisticsStore::ResourceLoadStatisticsUpdated(WTFMove(statistics)), 0); 198 203 }); 204 205 if (GIGACAGE_ENABLED) 206 Gigacage::addDisableCallback(gigacageDisabled, nullptr); 199 207 } 200 208 -
trunk/Source/bmalloc/CMakeLists.txt
r216763 r220118 12 12 bmalloc/DebugHeap.cpp 13 13 bmalloc/Environment.cpp 14 bmalloc/Gigacage.cpp 14 15 bmalloc/Heap.cpp 15 16 bmalloc/LargeMap.cpp 16 17 bmalloc/Logging.cpp 17 18 bmalloc/ObjectType.cpp 19 bmalloc/Scavenger.cpp 18 20 bmalloc/StaticMutex.cpp 19 21 bmalloc/VMHeap.cpp -
trunk/Source/bmalloc/ChangeLog
r220097 r220118 1 2017-08-01 Filip Pizlo <fpizlo@apple.com> 2 3 Bmalloc and GC should put auxiliaries (butterflies, typed array backing stores) in a gigacage (separate multi-GB VM region) 4 https://bugs.webkit.org/show_bug.cgi?id=174727 5 6 Reviewed by Mark Lam. 7 8 This adds a mechanism for managing multiple isolated heaps in bmalloc. For now, these isoheaps 9 (isolated heaps) have a very simple relationship with each other and with the rest of bmalloc: 10 11 - You have to choose how many isoheaps you will have statically. See numHeaps in HeapKind.h. 12 13 - Because numHeaps is static, each isoheap gets fast thread-local allocation. Basically, we have a 14 Cache for each heap kind. 15 16 - Each isoheap gets its own Heap. 17 18 - Each Heap gets a scavenger thread. 19 20 - Some things, like Zone/VMHeap/Scavenger, are per-process. 21 22 Most of the per-HeapKind functionality is handled by PerHeapKind<>. 23 24 This approach is ideal for supporting special per-HeapKind behaviors. For now we have two heaps: 25 the Primary heap for normal malloc and the Gigacage. The gigacage is a 64GB-aligned 64GB virtual 26 region that we now use for variable-length random-access allocations. No Primary allocations will 27 go into the Gigacage. 28 29 * CMakeLists.txt: 30 * bmalloc.xcodeproj/project.pbxproj: 31 * bmalloc/AllocationKind.h: Added. 32 * bmalloc/Allocator.cpp: 33 (bmalloc::Allocator::Allocator): 34 (bmalloc::Allocator::tryAllocate): 35 (bmalloc::Allocator::allocateImpl): 36 (bmalloc::Allocator::reallocate): 37 (bmalloc::Allocator::refillAllocatorSlowCase): 38 (bmalloc::Allocator::allocateLarge): 39 * bmalloc/Allocator.h: 40 * bmalloc/BExport.h: Added. 41 * bmalloc/Cache.cpp: 42 (bmalloc::Cache::scavenge): 43 (bmalloc::Cache::Cache): 44 (bmalloc::Cache::tryAllocateSlowCaseNullCache): 45 (bmalloc::Cache::allocateSlowCaseNullCache): 46 (bmalloc::Cache::deallocateSlowCaseNullCache): 47 (bmalloc::Cache::reallocateSlowCaseNullCache): 48 (bmalloc::Cache::operator new): Deleted. 49 (bmalloc::Cache::operator delete): Deleted. 50 * bmalloc/Cache.h: 51 (bmalloc::Cache::tryAllocate): 52 (bmalloc::Cache::allocate): 53 (bmalloc::Cache::deallocate): 54 (bmalloc::Cache::reallocate): 55 * bmalloc/Deallocator.cpp: 56 (bmalloc::Deallocator::Deallocator): 57 (bmalloc::Deallocator::scavenge): 58 (bmalloc::Deallocator::processObjectLog): 59 (bmalloc::Deallocator::deallocateSlowCase): 60 * bmalloc/Deallocator.h: 61 * bmalloc/Gigacage.cpp: Added. 62 (Gigacage::Callback::Callback): 63 (Gigacage::Callback::function): 64 (Gigacage::Callbacks::Callbacks): 65 (Gigacage::ensureGigacage): 66 (Gigacage::disableGigacage): 67 (Gigacage::addDisableCallback): 68 (Gigacage::removeDisableCallback): 69 * bmalloc/Gigacage.h: Added. 70 (Gigacage::caged): 71 (Gigacage::isCaged): 72 * bmalloc/Heap.cpp: 73 (bmalloc::Heap::Heap): 74 (bmalloc::Heap::usingGigacage): 75 (bmalloc::Heap::concurrentScavenge): 76 (bmalloc::Heap::splitAndAllocate): 77 (bmalloc::Heap::tryAllocateLarge): 78 (bmalloc::Heap::allocateLarge): 79 (bmalloc::Heap::shrinkLarge): 80 (bmalloc::Heap::deallocateLarge): 81 * bmalloc/Heap.h: 82 (bmalloc::Heap::mutex): 83 (bmalloc::Heap::kind const): 84 (bmalloc::Heap::setScavengerThreadQOSClass): Deleted. 85 * bmalloc/HeapKind.h: Added. 86 * bmalloc/ObjectType.cpp: 87 (bmalloc::objectType): 88 * bmalloc/ObjectType.h: 89 * bmalloc/PerHeapKind.h: Added. 90 (bmalloc::PerHeapKindBase::PerHeapKindBase): 91 (bmalloc::PerHeapKindBase::size): 92 (bmalloc::PerHeapKindBase::at): 93 (bmalloc::PerHeapKindBase::at const): 94 (bmalloc::PerHeapKindBase::operator[]): 95 (bmalloc::PerHeapKindBase::operator[] const): 96 (bmalloc::StaticPerHeapKind::StaticPerHeapKind): 97 (bmalloc::PerHeapKind::PerHeapKind): 98 (bmalloc::PerHeapKind::~PerHeapKind): 99 * bmalloc/PerThread.h: 100 (bmalloc::PerThread<T>::destructor): 101 (bmalloc::PerThread<T>::getSlowCase): 102 (bmalloc::PerThreadStorage<Cache>::get): Deleted. 103 (bmalloc::PerThreadStorage<Cache>::init): Deleted. 104 * bmalloc/Scavenger.cpp: Added. 105 (bmalloc::Scavenger::Scavenger): 106 (bmalloc::Scavenger::scavenge): 107 * bmalloc/Scavenger.h: Added. 108 (bmalloc::Scavenger::setScavengerThreadQOSClass): 109 (bmalloc::Scavenger::requestedScavengerThreadQOSClass const): 110 * bmalloc/VMHeap.cpp: 111 (bmalloc::VMHeap::VMHeap): 112 (bmalloc::VMHeap::tryAllocateLargeChunk): 113 * bmalloc/VMHeap.h: 114 * bmalloc/Zone.cpp: 115 (bmalloc::Zone::Zone): 116 * bmalloc/Zone.h: 117 * bmalloc/bmalloc.h: 118 (bmalloc::api::tryMalloc): 119 (bmalloc::api::malloc): 120 (bmalloc::api::tryMemalign): 121 (bmalloc::api::memalign): 122 (bmalloc::api::realloc): 123 (bmalloc::api::tryLargeMemalignVirtual): 124 (bmalloc::api::free): 125 (bmalloc::api::freeLargeVirtual): 126 (bmalloc::api::scavengeThisThread): 127 (bmalloc::api::scavenge): 128 (bmalloc::api::isEnabled): 129 (bmalloc::api::setScavengerThreadQOSClass): 130 * bmalloc/mbmalloc.cpp: 131 1 132 2017-08-01 Daewoong Jang <daewoong.jang@navercorp.com> 2 133 -
trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj
r219009 r220118 8 8 9 9 /* Begin PBXBuildFile section */ 10 0F3DA0141F267AB800342C08 /* AllocationKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3DA0131F267AB800342C08 /* AllocationKind.h */; settings = {ATTRIBUTES = (Private, ); }; }; 11 0F5BF1471F22A8B10029D91D /* HeapKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1461F22A8B10029D91D /* HeapKind.h */; settings = {ATTRIBUTES = (Private, ); }; }; 12 0F5BF1491F22A8D80029D91D /* PerHeapKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1481F22A8D80029D91D /* PerHeapKind.h */; settings = {ATTRIBUTES = (Private, ); }; }; 13 0F5BF14D1F22B0C30029D91D /* Gigacage.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF14C1F22B0C30029D91D /* Gigacage.h */; settings = {ATTRIBUTES = (Private, ); }; }; 14 0F5BF14F1F22DEAF0029D91D /* Gigacage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5BF14E1F22DEAF0029D91D /* Gigacage.cpp */; }; 15 0F5BF1521F22E1570029D91D /* Scavenger.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5BF1501F22E1570029D91D /* Scavenger.cpp */; }; 16 0F5BF1531F22E1570029D91D /* Scavenger.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1511F22E1570029D91D /* Scavenger.h */; settings = {ATTRIBUTES = (Private, ); }; }; 17 0F5BF1731F23C5710029D91D /* BExport.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1721F23C5710029D91D /* BExport.h */; settings = {ATTRIBUTES = (Private, ); }; }; 10 18 1400274918F89C1300115C97 /* Heap.h in Headers */ = {isa = PBXBuildFile; fileRef = 14DA320C18875B09007269E0 /* Heap.h */; settings = {ATTRIBUTES = (Private, ); }; }; 11 19 1400274A18F89C2300115C97 /* VMHeap.h in Headers */ = {isa = PBXBuildFile; fileRef = 144F7BFC18BFC517003537F3 /* VMHeap.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 76 84 77 85 /* Begin PBXFileReference section */ 86 0F3DA0131F267AB800342C08 /* AllocationKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = AllocationKind.h; path = bmalloc/AllocationKind.h; sourceTree = "<group>"; }; 87 0F5BF1461F22A8B10029D91D /* HeapKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = HeapKind.h; path = bmalloc/HeapKind.h; sourceTree = "<group>"; }; 88 0F5BF1481F22A8D80029D91D /* PerHeapKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = PerHeapKind.h; path = bmalloc/PerHeapKind.h; sourceTree = "<group>"; }; 89 0F5BF14C1F22B0C30029D91D /* Gigacage.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = Gigacage.h; path = bmalloc/Gigacage.h; sourceTree = "<group>"; }; 90 0F5BF14E1F22DEAF0029D91D /* Gigacage.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = Gigacage.cpp; path = bmalloc/Gigacage.cpp; sourceTree = "<group>"; }; 91 0F5BF1501F22E1570029D91D /* Scavenger.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; name = Scavenger.cpp; path = bmalloc/Scavenger.cpp; sourceTree = "<group>"; }; 92 0F5BF1511F22E1570029D91D /* Scavenger.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = Scavenger.h; path = bmalloc/Scavenger.h; sourceTree = "<group>"; }; 93 0F5BF1721F23C5710029D91D /* BExport.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = BExport.h; path = bmalloc/BExport.h; sourceTree = "<group>"; }; 78 94 140FA00219CE429C00FFD3C8 /* BumpRange.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = BumpRange.h; path = bmalloc/BumpRange.h; sourceTree = "<group>"; }; 79 95 140FA00419CE4B6800FFD3C8 /* LineMetadata.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LineMetadata.h; path = bmalloc/LineMetadata.h; sourceTree = "<group>"; }; … … 236 252 isa = PBXGroup; 237 253 children = ( 254 0F3DA0131F267AB800342C08 /* AllocationKind.h */, 238 255 140FA00219CE429C00FFD3C8 /* BumpRange.h */, 239 256 147DC6E21CA5B70B00724E8D /* Chunk.h */, … … 242 259 14895D8F1A3A319C0006235D /* Environment.cpp */, 243 260 14895D901A3A319C0006235D /* Environment.h */, 261 0F5BF14E1F22DEAF0029D91D /* Gigacage.cpp */, 262 0F5BF14C1F22B0C30029D91D /* Gigacage.h */, 244 263 14DA320E18875D9F007269E0 /* Heap.cpp */, 245 264 14DA320C18875B09007269E0 /* Heap.h */, … … 248 267 14105E8318E14374003A106E /* ObjectType.cpp */, 249 268 1485656018A43DBA00ED6942 /* ObjectType.h */, 269 0F5BF1501F22E1570029D91D /* Scavenger.cpp */, 270 0F5BF1511F22E1570029D91D /* Scavenger.h */, 250 271 145F6874179DF84100D65598 /* Sizes.h */, 251 272 144F7BFB18BFC517003537F3 /* VMHeap.cpp */, … … 266 287 6599C5CB1EC3F15900A2F7BB /* AvailableMemory.h */, 267 288 1413E468189EEDE400546D68 /* BAssert.h */, 289 0F5BF1721F23C5710029D91D /* BExport.h */, 268 290 14C919C818FCC59F0028DB43 /* BPlatform.h */, 269 291 14D9DB4517F2447100EAAB79 /* FixedVector.h */, 292 0F5BF1461F22A8B10029D91D /* HeapKind.h */, 270 293 1413E460189DCE1E00546D68 /* Inline.h */, 271 294 141D9AFF1C8E51C0000ABBA0 /* List.h */, … … 274 297 14C8992A1CC485E70027A057 /* Map.h */, 275 298 144DCED617A649D90093B2F2 /* Mutex.h */, 299 0F5BF1481F22A8D80029D91D /* PerHeapKind.h */, 276 300 14446A0717A61FA400F9EA1D /* PerProcess.h */, 277 301 144469FD17A61F1F00F9EA1D /* PerThread.h */, … … 311 335 14DD78C518F48D7500950702 /* Algorithm.h in Headers */, 312 336 14DD789818F48D4A00950702 /* Allocator.h in Headers */, 337 0F5BF1531F22E1570029D91D /* Scavenger.h in Headers */, 338 0F5BF1471F22A8B10029D91D /* HeapKind.h in Headers */, 313 339 14DD78C618F48D7500950702 /* AsyncTask.h in Headers */, 314 340 6599C5CD1EC3F15900A2F7BB /* AvailableMemory.h in Headers */, … … 321 347 14DD789918F48D4A00950702 /* Cache.h in Headers */, 322 348 147DC6E31CA5B70B00724E8D /* Chunk.h in Headers */, 349 0F5BF1731F23C5710029D91D /* BExport.h in Headers */, 323 350 14DD789A18F48D4A00950702 /* Deallocator.h in Headers */, 324 351 142B44371E2839E7001DA6E9 /* DebugHeap.h in Headers */, … … 326 353 14DD78C818F48D7500950702 /* FixedVector.h in Headers */, 327 354 1400274918F89C1300115C97 /* Heap.h in Headers */, 355 0F5BF1491F22A8D80029D91D /* PerHeapKind.h in Headers */, 328 356 14DD78C918F48D7500950702 /* Inline.h in Headers */, 329 357 144C07F51C7B70260051BB6A /* LargeMap.h in Headers */, … … 337 365 14DD789318F48D0F00950702 /* ObjectType.h in Headers */, 338 366 14DD78CB18F48D7500950702 /* PerProcess.h in Headers */, 367 0F3DA0141F267AB800342C08 /* AllocationKind.h in Headers */, 339 368 14DD78CC18F48D7500950702 /* PerThread.h in Headers */, 340 369 14DD78CD18F48D7500950702 /* Range.h in Headers */, … … 345 374 143CB81D19022BC900B16A45 /* StaticMutex.h in Headers */, 346 375 14DD78CE18F48D7500950702 /* Syscall.h in Headers */, 376 0F5BF14D1F22B0C30029D91D /* Gigacage.h in Headers */, 347 377 14DD78CF18F48D7500950702 /* Vector.h in Headers */, 348 378 14DD78D018F48D7500950702 /* VMAllocate.h in Headers */, … … 430 460 buildActionMask = 2147483647; 431 461 files = ( 462 0F5BF1521F22E1570029D91D /* Scavenger.cpp in Sources */, 432 463 14F271C318EA3978008C152F /* Allocator.cpp in Sources */, 433 464 6599C5CC1EC3F15900A2F7BB /* AvailableMemory.cpp in Sources */, … … 437 468 14895D911A3A319C0006235D /* Environment.cpp in Sources */, 438 469 14F271C718EA3990008C152F /* Heap.cpp in Sources */, 470 0F5BF14F1F22DEAF0029D91D /* Gigacage.cpp in Sources */, 439 471 144C07F41C7B70260051BB6A /* LargeMap.cpp in Sources */, 440 472 4426E2801C838EE0008EB042 /* Logging.cpp in Sources */, -
trunk/Source/bmalloc/bmalloc/Allocator.cpp
r218788 r220118 39 39 namespace bmalloc { 40 40 41 Allocator::Allocator(Heap* heap, Deallocator& deallocator) 42 : m_debugHeap(heap->debugHeap()) 41 Allocator::Allocator(Heap& heap, Deallocator& deallocator) 42 : m_heap(heap) 43 , m_debugHeap(heap.debugHeap()) 43 44 , m_deallocator(deallocator) 44 45 { … … 60 61 return allocate(size); 61 62 62 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());63 return PerProcess<Heap>::getFastCase()->tryAllocateLarge(lock, alignment, size);63 std::lock_guard<StaticMutex> lock(Heap::mutex()); 64 return m_heap.tryAllocateLarge(lock, alignment, size); 64 65 } 65 66 … … 89 90 return allocate(roundUpToMultipleOf(alignment, size)); 90 91 91 std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); 92 Heap* heap = PerProcess<Heap>::getFastCase(); 92 std::lock_guard<StaticMutex> lock(Heap::mutex()); 93 93 if (crashOnFailure) 94 return heap->allocateLarge(lock, alignment, size);95 return heap->tryAllocateLarge(lock, alignment, size);94 return m_heap.allocateLarge(lock, alignment, size); 95 return m_heap.tryAllocateLarge(lock, alignment, size); 96 96 } 97 97 … … 102 102 103 103 size_t oldSize = 0; 104 switch (objectType( object)) {104 switch (objectType(m_heap.kind(), object)) { 105 105 case ObjectType::Small: { 106 BASSERT(objectType( nullptr) == ObjectType::Small);106 BASSERT(objectType(m_heap.kind(), nullptr) == ObjectType::Small); 107 107 if (!object) 108 108 break; … … 113 113 } 114 114 case ObjectType::Large: { 115 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());116 oldSize = PerProcess<Heap>::getFastCase()->largeSize(lock, object);115 std::lock_guard<StaticMutex> lock(Heap::mutex()); 116 oldSize = m_heap.largeSize(lock, object); 117 117 118 118 if (newSize < oldSize && newSize > smallMax) { 119 PerProcess<Heap>::getFastCase()->shrinkLarge(lock, Range(object, oldSize), newSize);119 m_heap.shrinkLarge(lock, Range(object, oldSize), newSize); 120 120 return object; 121 121 } … … 154 154 BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass]; 155 155 156 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());156 std::lock_guard<StaticMutex> lock(Heap::mutex()); 157 157 m_deallocator.processObjectLog(lock); 158 PerProcess<Heap>::getFastCase()->allocateSmallBumpRanges( 159 lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock)); 158 m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock)); 160 159 } 161 160 … … 170 169 NO_INLINE void* Allocator::allocateLarge(size_t size) 171 170 { 172 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());173 return PerProcess<Heap>::getFastCase()->allocateLarge(lock, alignment, size);171 std::lock_guard<StaticMutex> lock(Heap::mutex()); 172 return m_heap.allocateLarge(lock, alignment, size); 174 173 } 175 174 -
trunk/Source/bmalloc/bmalloc/Allocator.h
r210746 r220118 27 27 #define Allocator_h 28 28 29 #include "BExport.h" 29 30 #include "BumpAllocator.h" 30 31 #include <array> … … 40 41 class Allocator { 41 42 public: 42 Allocator(Heap *, Deallocator&);43 Allocator(Heap&, Deallocator&); 43 44 ~Allocator(); 44 45 … … 55 56 56 57 bool allocateFastCase(size_t, void*&); 57 void* allocateSlowCase(size_t);58 BEXPORT void* allocateSlowCase(size_t); 58 59 59 60 void* allocateLogSizeClass(size_t); … … 66 67 std::array<BumpRangeCache, sizeClassCount> m_bumpRangeCaches; 67 68 69 Heap& m_heap; 68 70 DebugHeap* m_debugHeap; 69 71 Deallocator& m_deallocator; -
trunk/Source/bmalloc/bmalloc/Cache.cpp
r181329 r220118 1 1 /* 2 * Copyright (C) 2014 , 2015Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 31 31 namespace bmalloc { 32 32 33 void * Cache::operator new(size_t size)33 void Cache::scavenge(HeapKind heapKind) 34 34 { 35 return vmAllocate(vmSize(size)); 35 PerHeapKind<Cache>* caches = PerThread<PerHeapKind<Cache>>::getFastCase(); 36 if (!caches) 37 return; 38 39 caches->at(heapKind).allocator().scavenge(); 40 caches->at(heapKind).deallocator().scavenge(); 36 41 } 37 42 38 void Cache::operator delete(void* p, size_t size) 39 { 40 vmDeallocate(p, vmSize(size)); 41 } 42 43 void Cache::scavenge() 44 { 45 Cache* cache = PerThread<Cache>::getFastCase(); 46 if (!cache) 47 return; 48 49 cache->allocator().scavenge(); 50 cache->deallocator().scavenge(); 51 } 52 53 Cache::Cache() 54 : m_deallocator(PerProcess<Heap>::get()) 55 , m_allocator(PerProcess<Heap>::get(), m_deallocator) 43 Cache::Cache(HeapKind heapKind) 44 : m_deallocator(PerProcess<PerHeapKind<Heap>>::get()->at(heapKind)) 45 , m_allocator(PerProcess<PerHeapKind<Heap>>::get()->at(heapKind), m_deallocator) 56 46 { 57 47 } 58 48 59 NO_INLINE void* Cache::tryAllocateSlowCaseNullCache( size_t size)49 NO_INLINE void* Cache::tryAllocateSlowCaseNullCache(HeapKind heapKind, size_t size) 60 50 { 61 return PerThread< Cache>::getSlowCase()->allocator().tryAllocate(size);51 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(heapKind).allocator().tryAllocate(size); 62 52 } 63 53 64 NO_INLINE void* Cache::allocateSlowCaseNullCache( size_t size)54 NO_INLINE void* Cache::allocateSlowCaseNullCache(HeapKind heapKind, size_t size) 65 55 { 66 return PerThread< Cache>::getSlowCase()->allocator().allocate(size);56 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(heapKind).allocator().allocate(size); 67 57 } 68 58 69 NO_INLINE void* Cache::allocateSlowCaseNullCache( size_t alignment, size_t size)59 NO_INLINE void* Cache::allocateSlowCaseNullCache(HeapKind heapKind, size_t alignment, size_t size) 70 60 { 71 return PerThread< Cache>::getSlowCase()->allocator().allocate(alignment, size);61 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(heapKind).allocator().allocate(alignment, size); 72 62 } 73 63 74 NO_INLINE void Cache::deallocateSlowCaseNullCache( void* object)64 NO_INLINE void Cache::deallocateSlowCaseNullCache(HeapKind heapKind, void* object) 75 65 { 76 PerThread< Cache>::getSlowCase()->deallocator().deallocate(object);66 PerThread<PerHeapKind<Cache>>::getSlowCase()->at(heapKind).deallocator().deallocate(object); 77 67 } 78 68 79 NO_INLINE void* Cache::reallocateSlowCaseNullCache( void* object, size_t newSize)69 NO_INLINE void* Cache::reallocateSlowCaseNullCache(HeapKind heapKind, void* object, size_t newSize) 80 70 { 81 return PerThread< Cache>::getSlowCase()->allocator().reallocate(object, newSize);71 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(heapKind).allocator().reallocate(object, newSize); 82 72 } 83 73 -
trunk/Source/bmalloc/bmalloc/Cache.h
r205462 r220118 1 1 /* 2 * Copyright (C) 2014 , 2016Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 28 28 29 29 #include "Allocator.h" 30 #include "BExport.h" 30 31 #include "Deallocator.h" 32 #include "HeapKind.h" 31 33 #include "PerThread.h" 32 34 … … 37 39 class Cache { 38 40 public: 39 void* operator new(size_t); 40 void operator delete(void*, size_t); 41 static void* tryAllocate(HeapKind, size_t); 42 static void* allocate(HeapKind, size_t); 43 static void* tryAllocate(HeapKind, size_t alignment, size_t); 44 static void* allocate(HeapKind, size_t alignment, size_t); 45 static void deallocate(HeapKind, void*); 46 static void* reallocate(HeapKind, void*, size_t); 41 47 42 static void* tryAllocate(size_t); 43 static void* allocate(size_t); 44 static void* tryAllocate(size_t alignment, size_t); 45 static void* allocate(size_t alignment, size_t); 46 static void deallocate(void*); 47 static void* reallocate(void*, size_t); 48 static void scavenge(HeapKind); 48 49 49 static void scavenge(); 50 51 Cache(); 50 Cache(HeapKind); 52 51 53 52 Allocator& allocator() { return m_allocator; } … … 55 54 56 55 private: 57 static void* tryAllocateSlowCaseNullCache(size_t);58 static void* allocateSlowCaseNullCache(size_t);59 static void* allocateSlowCaseNullCache(size_t alignment, size_t);60 static void deallocateSlowCaseNullCache(void*);61 static void* reallocateSlowCaseNullCache(void*, size_t);56 BEXPORT static void* tryAllocateSlowCaseNullCache(HeapKind, size_t); 57 BEXPORT static void* allocateSlowCaseNullCache(HeapKind, size_t); 58 BEXPORT static void* allocateSlowCaseNullCache(HeapKind, size_t alignment, size_t); 59 BEXPORT static void deallocateSlowCaseNullCache(HeapKind, void*); 60 BEXPORT static void* reallocateSlowCaseNullCache(HeapKind, void*, size_t); 62 61 63 62 Deallocator m_deallocator; … … 65 64 }; 66 65 67 inline void* Cache::tryAllocate( size_t size)66 inline void* Cache::tryAllocate(HeapKind heapKind, size_t size) 68 67 { 69 Cache* cache = PerThread<Cache>::getFastCase();70 if (!cache )71 return tryAllocateSlowCaseNullCache( size);72 return cache ->allocator().tryAllocate(size);68 PerHeapKind<Cache>* caches = PerThread<PerHeapKind<Cache>>::getFastCase(); 69 if (!caches) 70 return tryAllocateSlowCaseNullCache(heapKind, size); 71 return caches->at(heapKind).allocator().tryAllocate(size); 73 72 } 74 73 75 inline void* Cache::allocate( size_t size)74 inline void* Cache::allocate(HeapKind heapKind, size_t size) 76 75 { 77 Cache* cache = PerThread<Cache>::getFastCase();78 if (!cache )79 return allocateSlowCaseNullCache( size);80 return cache ->allocator().allocate(size);76 PerHeapKind<Cache>* caches = PerThread<PerHeapKind<Cache>>::getFastCase(); 77 if (!caches) 78 return allocateSlowCaseNullCache(heapKind, size); 79 return caches->at(heapKind).allocator().allocate(size); 81 80 } 82 81 83 inline void* Cache::tryAllocate( size_t alignment, size_t size)82 inline void* Cache::tryAllocate(HeapKind heapKind, size_t alignment, size_t size) 84 83 { 85 Cache* cache = PerThread<Cache>::getFastCase();86 if (!cache )87 return allocateSlowCaseNullCache( alignment, size);88 return cache ->allocator().tryAllocate(alignment, size);84 PerHeapKind<Cache>* caches = PerThread<PerHeapKind<Cache>>::getFastCase(); 85 if (!caches) 86 return allocateSlowCaseNullCache(heapKind, alignment, size); 87 return caches->at(heapKind).allocator().tryAllocate(alignment, size); 89 88 } 90 89 91 inline void* Cache::allocate( size_t alignment, size_t size)90 inline void* Cache::allocate(HeapKind heapKind, size_t alignment, size_t size) 92 91 { 93 Cache* cache = PerThread<Cache>::getFastCase();94 if (!cache )95 return allocateSlowCaseNullCache( alignment, size);96 return cache ->allocator().allocate(alignment, size);92 PerHeapKind<Cache>* caches = PerThread<PerHeapKind<Cache>>::getFastCase(); 93 if (!caches) 94 return allocateSlowCaseNullCache(heapKind, alignment, size); 95 return caches->at(heapKind).allocator().allocate(alignment, size); 97 96 } 98 97 99 inline void Cache::deallocate( void* object)98 inline void Cache::deallocate(HeapKind heapKind, void* object) 100 99 { 101 Cache* cache = PerThread<Cache>::getFastCase();102 if (!cache )103 return deallocateSlowCaseNullCache( object);104 return cache ->deallocator().deallocate(object);100 PerHeapKind<Cache>* caches = PerThread<PerHeapKind<Cache>>::getFastCase(); 101 if (!caches) 102 return deallocateSlowCaseNullCache(heapKind, object); 103 return caches->at(heapKind).deallocator().deallocate(object); 105 104 } 106 105 107 inline void* Cache::reallocate( void* object, size_t newSize)106 inline void* Cache::reallocate(HeapKind heapKind, void* object, size_t newSize) 108 107 { 109 Cache* cache = PerThread<Cache>::getFastCase();110 if (!cache )111 return reallocateSlowCaseNullCache( object, newSize);112 return cache ->allocator().reallocate(object, newSize);108 PerHeapKind<Cache>* caches = PerThread<PerHeapKind<Cache>>::getFastCase(); 109 if (!caches) 110 return reallocateSlowCaseNullCache(heapKind, object, newSize); 111 return caches->at(heapKind).allocator().reallocate(object, newSize); 113 112 } 114 113 -
trunk/Source/bmalloc/bmalloc/Deallocator.cpp
r218788 r220118 40 40 namespace bmalloc { 41 41 42 Deallocator::Deallocator(Heap* heap) 43 : m_debugHeap(heap->debugHeap()) 42 Deallocator::Deallocator(Heap& heap) 43 : m_heap(heap) 44 , m_debugHeap(heap.debugHeap()) 44 45 { 45 46 if (m_debugHeap) { … … 60 61 return; 61 62 62 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());63 std::lock_guard<StaticMutex> lock(Heap::mutex()); 63 64 64 65 processObjectLog(lock); 65 PerProcess<Heap>::getFastCase()->deallocateLineCache(lock, lineCache(lock));66 m_heap.deallocateLineCache(lock, lineCache(lock)); 66 67 } 67 68 68 69 void Deallocator::processObjectLog(std::lock_guard<StaticMutex>& lock) 69 70 { 70 Heap* heap = PerProcess<Heap>::getFastCase();71 72 71 for (Object object : m_objectLog) 73 heap->derefSmallLine(lock, object, lineCache(lock));72 m_heap.derefSmallLine(lock, object, lineCache(lock)); 74 73 m_objectLog.clear(); 75 74 } … … 83 82 return; 84 83 85 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());86 if ( PerProcess<Heap>::getFastCase()->isLarge(lock, object)) {87 PerProcess<Heap>::getFastCase()->deallocateLarge(lock, object);84 std::lock_guard<StaticMutex> lock(Heap::mutex()); 85 if (m_heap.isLarge(lock, object)) { 86 m_heap.deallocateLarge(lock, object); 88 87 return; 89 88 } -
trunk/Source/bmalloc/bmalloc/Deallocator.h
r218788 r220118 27 27 #define Deallocator_h 28 28 29 #include "BExport.h" 29 30 #include "FixedVector.h" 30 31 #include "SmallPage.h" … … 41 42 class Deallocator { 42 43 public: 43 Deallocator(Heap *);44 Deallocator(Heap&); 44 45 ~Deallocator(); 45 46 … … 53 54 private: 54 55 bool deallocateFastCase(void*); 55 void deallocateSlowCase(void*);56 BEXPORT void deallocateSlowCase(void*); 56 57 58 Heap& m_heap; 57 59 FixedVector<void*, deallocatorLogCapacity> m_objectLog; 58 60 LineCache m_lineCache; // The Heap removes items from this cache. -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r218788 r220118 1 1 /* 2 * Copyright (C) 2014-201 6Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 29 29 #include "BumpAllocator.h" 30 30 #include "Chunk.h" 31 #include "Gigacage.h" 31 32 #include "DebugHeap.h" 32 33 #include "PerProcess.h" 34 #include "Scavenger.h" 33 35 #include "SmallLine.h" 34 36 #include "SmallPage.h" 37 #include "VMHeap.h" 38 #include "bmalloc.h" 35 39 #include <thread> 36 40 37 41 namespace bmalloc { 38 42 39 Heap::Heap(std::lock_guard<StaticMutex>&) 40 : m_vmPageSizePhysical(vmPageSizePhysical()) 43 Heap::Heap(HeapKind kind, std::lock_guard<StaticMutex>&) 44 : m_kind(kind) 45 , m_vmPageSizePhysical(vmPageSizePhysical()) 41 46 , m_scavenger(*this, &Heap::concurrentScavenge) 42 47 , m_debugHeap(nullptr) … … 50 55 if (m_environment.isDebugHeapEnabled()) 51 56 m_debugHeap = PerProcess<DebugHeap>::get(); 52 53 #if BOS(DARWIN) 54 auto queue = dispatch_queue_create("WebKit Malloc Memory Pressure Handler", DISPATCH_QUEUE_SERIAL); 55 m_pressureHandlerDispatchSource = dispatch_source_create(DISPATCH_SOURCE_TYPE_MEMORYPRESSURE, 0, DISPATCH_MEMORYPRESSURE_CRITICAL, queue); 56 dispatch_source_set_event_handler(m_pressureHandlerDispatchSource, ^{ 57 std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); 58 scavenge(lock); 59 }); 60 dispatch_resume(m_pressureHandlerDispatchSource); 61 dispatch_release(queue); 57 else { 58 Gigacage::ensureGigacage(); 59 #if GIGACAGE_ENABLED 60 if (usingGigacage()) { 61 RELEASE_BASSERT(g_gigacageBasePtr); 62 m_largeFree.add(LargeRange(g_gigacageBasePtr, GIGACAGE_SIZE, 0)); 63 } 62 64 #endif 65 } 66 67 PerProcess<Scavenger>::get(); 68 } 69 70 bool Heap::usingGigacage() 71 { 72 return m_kind == HeapKind::Gigacage && g_gigacageBasePtr; 63 73 } 64 74 … … 121 131 void Heap::concurrentScavenge() 122 132 { 123 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());133 std::lock_guard<StaticMutex> lock(mutex()); 124 134 125 135 #if BOS(DARWIN) 126 pthread_set_qos_class_self_np( m_requestedScavengerThreadQOSClass, 0);136 pthread_set_qos_class_self_np(PerProcess<Scavenger>::getFastCase()->requestedScavengerThreadQOSClass(), 0); 127 137 #endif 128 138 … … 439 449 } 440 450 441 LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size )451 LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size, AllocationKind allocationKind) 442 452 { 443 453 LargeRange prev; … … 458 468 } 459 469 460 if (range.physicalSize() < range.size()) { 461 scheduleScavengerIfUnderMemoryPressure(range.size()); 470 switch (allocationKind) { 471 case AllocationKind::Virtual: 472 if (range.physicalSize()) 473 vmDeallocatePhysicalPagesSloppy(range.begin(), range.size()); 474 break; 462 475 463 vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize()); 464 range.setPhysicalSize(range.size()); 476 case AllocationKind::Physical: 477 if (range.physicalSize() < range.size()) { 478 scheduleScavengerIfUnderMemoryPressure(range.size()); 479 480 vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize()); 481 range.setPhysicalSize(range.size()); 482 } 483 break; 465 484 } 466 485 … … 477 496 } 478 497 479 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size )498 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size, AllocationKind allocationKind) 480 499 { 481 500 BASSERT(isPowerOfTwo(alignment)); … … 495 514 LargeRange range = m_largeFree.remove(alignment, size); 496 515 if (!range) { 497 range = m_vmHeap.tryAllocateLargeChunk(alignment, size); 516 if (usingGigacage()) 517 return nullptr; 518 519 range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size, allocationKind); 498 520 if (!range) 499 521 return nullptr; 500 522 501 523 m_largeFree.add(range); 502 524 … … 504 526 } 505 527 506 return splitAndAllocate(range, alignment, size ).begin();507 } 508 509 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size )510 { 511 void* result = tryAllocateLarge(lock, alignment, size );528 return splitAndAllocate(range, alignment, size, allocationKind).begin(); 529 } 530 531 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, AllocationKind allocationKind) 532 { 533 void* result = tryAllocateLarge(lock, alignment, size, allocationKind); 512 534 RELEASE_BASSERT(result); 513 535 return result; … … 530 552 size_t size = m_largeAllocated.remove(object.begin()); 531 553 LargeRange range = LargeRange(object, size); 532 splitAndAllocate(range, alignment, newSize );554 splitAndAllocate(range, alignment, newSize, AllocationKind::Physical); 533 555 534 556 scheduleScavenger(size); 535 557 } 536 558 537 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object )559 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object, AllocationKind allocationKind) 538 560 { 539 561 size_t size = m_largeAllocated.remove(object); 540 m_largeFree.add(LargeRange(object, size, size)); 541 562 m_largeFree.add(LargeRange(object, size, allocationKind == AllocationKind::Physical ? size : 0)); 542 563 scheduleScavenger(size); 543 564 } -
trunk/Source/bmalloc/bmalloc/Heap.h
r218788 r220118 1 1 /* 2 * Copyright (C) 2014-201 6Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 27 27 #define Heap_h 28 28 29 #include "AllocationKind.h" 29 30 #include "AsyncTask.h" 30 31 #include "BumpRange.h" 32 #include "Chunk.h" 31 33 #include "Environment.h" 34 #include "HeapKind.h" 32 35 #include "LargeMap.h" 33 36 #include "LineMetadata.h" … … 36 39 #include "Mutex.h" 37 40 #include "Object.h" 41 #include "PerHeapKind.h" 42 #include "PerProcess.h" 38 43 #include "SmallLine.h" 39 44 #include "SmallPage.h" 40 #include "VMHeap.h"41 45 #include "Vector.h" 42 46 #include <array> 43 47 #include <mutex> 44 45 #if BOS(DARWIN)46 #include <dispatch/dispatch.h>47 #endif48 48 49 49 namespace bmalloc { … … 56 56 class Heap { 57 57 public: 58 Heap(std::lock_guard<StaticMutex>&); 58 Heap(HeapKind, std::lock_guard<StaticMutex>&); 59 60 static StaticMutex& mutex() { return PerProcess<PerHeapKind<Heap>>::mutex(); } 61 62 HeapKind kind() const { return m_kind; } 59 63 60 64 DebugHeap* debugHeap() { return m_debugHeap; } … … 65 69 void deallocateLineCache(std::lock_guard<StaticMutex>&, LineCache&); 66 70 67 void* allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t );68 void* tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t );69 void deallocateLarge(std::lock_guard<StaticMutex>&, void* );71 void* allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t, AllocationKind = AllocationKind::Physical); 72 void* tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t, AllocationKind = AllocationKind::Physical); 73 void deallocateLarge(std::lock_guard<StaticMutex>&, void*, AllocationKind = AllocationKind::Physical); 70 74 71 75 bool isLarge(std::lock_guard<StaticMutex>&, void*); … … 74 78 75 79 void scavenge(std::lock_guard<StaticMutex>&); 76 77 #if BOS(DARWIN)78 void setScavengerThreadQOSClass(qos_class_t overrideClass) { m_requestedScavengerThreadQOSClass = overrideClass; }79 #endif80 80 81 81 private: … … 89 89 90 90 ~Heap() = delete; 91 92 bool usingGigacage(); 91 93 92 94 void initializeLineMetadata(); … … 108 110 void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& inVMHeap); 109 111 110 LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t );112 LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t, AllocationKind); 111 113 112 114 void scheduleScavenger(size_t); … … 114 116 115 117 void concurrentScavenge(); 118 119 HeapKind m_kind; 116 120 117 121 size_t m_vmPageSizePhysical; … … 135 139 Environment m_environment; 136 140 DebugHeap* m_debugHeap; 137 138 VMHeap m_vmHeap;139 140 #if BOS(DARWIN)141 dispatch_source_t m_pressureHandlerDispatchSource;142 qos_class_t m_requestedScavengerThreadQOSClass { QOS_CLASS_USER_INITIATED };143 #endif144 141 }; 145 142 -
trunk/Source/bmalloc/bmalloc/ObjectType.cpp
r199746 r220118 1 1 /* 2 * Copyright (C) 2014 Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 33 33 namespace bmalloc { 34 34 35 ObjectType objectType( void* object)35 ObjectType objectType(HeapKind kind, void* object) 36 36 { 37 37 if (mightBeLarge(object)) { … … 39 39 return ObjectType::Small; 40 40 41 std::lock_guard<StaticMutex> lock( PerProcess<Heap>::mutex());42 if (PerProcess< Heap>::getFastCase()->isLarge(lock, object))41 std::lock_guard<StaticMutex> lock(Heap::mutex()); 42 if (PerProcess<PerHeapKind<Heap>>::getFastCase()->at(kind).isLarge(lock, object)) 43 43 return ObjectType::Large; 44 44 } -
trunk/Source/bmalloc/bmalloc/ObjectType.h
r199746 r220118 1 1 /* 2 * Copyright (C) 2014 Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 28 28 29 29 #include "BAssert.h" 30 #include "HeapKind.h" 30 31 #include "Sizes.h" 31 32 … … 34 35 enum class ObjectType : unsigned char { Small, Large }; 35 36 36 ObjectType objectType( void*);37 ObjectType objectType(HeapKind, void*); 37 38 38 39 inline bool mightBeLarge(void* object) -
trunk/Source/bmalloc/bmalloc/PerThread.h
r209590 r220118 1 1 /* 2 * Copyright (C) 2014 Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 29 29 #include "BPlatform.h" 30 30 #include "Inline.h" 31 #include "PerHeapKind.h" 32 #include "VMAllocate.h" 31 33 #include <mutex> 32 34 #include <pthread.h> … … 64 66 template<typename T> struct PerThreadStorage; 65 67 66 // For now, we only support PerThread< Cache>. We can expand to other types by68 // For now, we only support PerThread<PerHeapKind<Cache>>. We can expand to other types by 67 69 // using more keys. 68 template<> struct PerThreadStorage< Cache> {70 template<> struct PerThreadStorage<PerHeapKind<Cache>> { 69 71 static const pthread_key_t key = __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0; 70 72 … … 132 134 { 133 135 T* t = static_cast<T*>(p); 134 delete t; 136 t->~T(); 137 vmDeallocate(t, vmSize(sizeof(T))); 135 138 } 136 139 … … 139 142 { 140 143 BASSERT(!getFastCase()); 141 T* t = new T; 144 T* t = static_cast<T*>(vmAllocate(vmSize(sizeof(T)))); 145 new (t) T(); 142 146 PerThreadStorage<T>::init(t, destructor); 143 147 return t; -
trunk/Source/bmalloc/bmalloc/VMHeap.cpp
r217811 r220118 1 1 /* 2 * Copyright (C) 2014 , 2015Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 30 30 namespace bmalloc { 31 31 32 LargeRange VMHeap::tryAllocateLargeChunk(size_t alignment, size_t size) 32 VMHeap::VMHeap(std::lock_guard<StaticMutex>&) 33 { 34 } 35 36 LargeRange VMHeap::tryAllocateLargeChunk(size_t alignment, size_t size, AllocationKind allocationKind) 33 37 { 34 38 // We allocate VM in aligned multiples to increase the chances that … … 47 51 if (!memory) 48 52 return LargeRange(); 53 54 if (allocationKind == AllocationKind::Virtual) 55 vmDeallocatePhysicalPagesSloppy(memory, size); 49 56 50 57 Chunk* chunk = static_cast<Chunk*>(memory); 51 58 52 59 #if BOS(DARWIN) 53 m_zone.addRange(Range(chunk->bytes(), size));60 PerProcess<Zone>::get()->addRange(Range(chunk->bytes(), size)); 54 61 #endif 55 62 -
trunk/Source/bmalloc/bmalloc/VMHeap.h
r217811 r220118 1 1 /* 2 * Copyright (C) 2014-201 6Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 27 27 #define VMHeap_h 28 28 29 #include "AllocationKind.h" 29 30 #include "Chunk.h" 30 31 #include "FixedVector.h" 32 #include "HeapKind.h" 31 33 #include "LargeRange.h" 32 34 #include "Map.h" … … 46 48 class VMHeap { 47 49 public: 48 LargeRange tryAllocateLargeChunk(size_t alignment, size_t);50 VMHeap(std::lock_guard<StaticMutex>&); 49 51 50 private: 51 #if BOS(DARWIN) 52 Zone m_zone; 53 #endif 52 LargeRange tryAllocateLargeChunk(size_t alignment, size_t, AllocationKind); 54 53 }; 55 54 -
trunk/Source/bmalloc/bmalloc/Zone.cpp
r200983 r220118 116 116 }; 117 117 118 Zone::Zone( )118 Zone::Zone(std::lock_guard<StaticMutex>&) 119 119 { 120 120 malloc_zone_t::size = &bmalloc::zoneSize; -
trunk/Source/bmalloc/bmalloc/Zone.h
r200983 r220118 29 29 #include "FixedVector.h" 30 30 #include "Range.h" 31 #include "StaticMutex.h" 31 32 #include <malloc/malloc.h> 33 #include <mutex> 32 34 33 35 namespace bmalloc { … … 40 42 static const size_t capacity = 2048; 41 43 42 Zone( );44 Zone(std::lock_guard<StaticMutex>&); 43 45 Zone(task_t, memory_reader_t, vm_address_t); 44 46 -
trunk/Source/bmalloc/bmalloc/bmalloc.h
r217918 r220118 1 1 /* 2 * Copyright (C) 2014-201 6Apple Inc. All rights reserved.2 * Copyright (C) 2014-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 26 26 #include "AvailableMemory.h" 27 27 #include "Cache.h" 28 #include "Gigacage.h" 28 29 #include "Heap.h" 30 #include "PerHeapKind.h" 29 31 #include "PerProcess.h" 32 #include "Scavenger.h" 30 33 #include "StaticMutex.h" 31 34 … … 34 37 35 38 // Returns null on failure. 36 inline void* tryMalloc(size_t size )39 inline void* tryMalloc(size_t size, HeapKind kind = HeapKind::Primary) 37 40 { 38 return Cache::tryAllocate( size);41 return Cache::tryAllocate(kind, size); 39 42 } 40 43 41 44 // Crashes on failure. 42 inline void* malloc(size_t size )45 inline void* malloc(size_t size, HeapKind kind = HeapKind::Primary) 43 46 { 44 return Cache::allocate( size);47 return Cache::allocate(kind, size); 45 48 } 46 49 47 50 // Returns null on failure. 48 inline void* tryMemalign(size_t alignment, size_t size )51 inline void* tryMemalign(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary) 49 52 { 50 return Cache::tryAllocate( alignment, size);53 return Cache::tryAllocate(kind, alignment, size); 51 54 } 52 55 53 56 // Crashes on failure. 54 inline void* memalign(size_t alignment, size_t size )57 inline void* memalign(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary) 55 58 { 56 return Cache::allocate( alignment, size);59 return Cache::allocate(kind, alignment, size); 57 60 } 58 61 59 62 // Crashes on failure. 60 inline void* realloc(void* object, size_t newSize )63 inline void* realloc(void* object, size_t newSize, HeapKind kind = HeapKind::Primary) 61 64 { 62 return Cache::reallocate( object, newSize);65 return Cache::reallocate(kind, object, newSize); 63 66 } 64 67 65 inline void free(void* object) 68 // Returns null for failure 69 inline void* tryLargeMemalignVirtual(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary) 66 70 { 67 Cache::deallocate(object); 71 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 72 std::lock_guard<StaticMutex> lock(Heap::mutex()); 73 return heap.allocateLarge(lock, alignment, size, AllocationKind::Virtual); 74 } 75 76 inline void free(void* object, HeapKind kind = HeapKind::Primary) 77 { 78 Cache::deallocate(kind, object); 79 } 80 81 inline void freeLargeVirtual(void* object, HeapKind kind = HeapKind::Primary) 82 { 83 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 84 std::lock_guard<StaticMutex> lock(Heap::mutex()); 85 heap.deallocateLarge(lock, object, AllocationKind::Virtual); 68 86 } 69 87 70 88 inline void scavengeThisThread() 71 89 { 72 Cache::scavenge(); 90 for (unsigned i = numHeaps; i--;) 91 Cache::scavenge(static_cast<HeapKind>(i)); 73 92 } 74 93 … … 77 96 scavengeThisThread(); 78 97 79 std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); 80 PerProcess<Heap>::get()->scavenge(lock); 98 PerProcess<Scavenger>::get()->scavenge(); 81 99 } 82 100 83 inline bool isEnabled( )101 inline bool isEnabled(HeapKind kind = HeapKind::Primary) 84 102 { 85 std::unique_lock<StaticMutex> lock( PerProcess<Heap>::mutex());86 return !PerProcess< Heap>::getFastCase()->debugHeap();103 std::unique_lock<StaticMutex> lock(Heap::mutex()); 104 return !PerProcess<PerHeapKind<Heap>>::getFastCase()->at(kind).debugHeap(); 87 105 } 88 106 … … 107 125 inline void setScavengerThreadQOSClass(qos_class_t overrideClass) 108 126 { 109 std::unique_lock<StaticMutex> lock( PerProcess<Heap>::mutex());110 PerProcess< Heap>::getFastCase()->setScavengerThreadQOSClass(overrideClass);127 std::unique_lock<StaticMutex> lock(Heap::mutex()); 128 PerProcess<Scavenger>::get()->setScavengerThreadQOSClass(overrideClass); 111 129 } 112 130 #endif -
trunk/Source/bmalloc/bmalloc/mbmalloc.cpp
r178609 r220118 26 26 #include "bmalloc.h" 27 27 28 # define EXPORT __attribute__((visibility("default")))28 #include "BExport.h" 29 29 30 30 extern "C" { 31 31 32 EXPORT void* mbmalloc(size_t);33 EXPORT void* mbmemalign(size_t, size_t);34 EXPORT void mbfree(void*, size_t);35 EXPORT void* mbrealloc(void*, size_t, size_t);36 EXPORT void mbscavenge();32 BEXPORT void* mbmalloc(size_t); 33 BEXPORT void* mbmemalign(size_t, size_t); 34 BEXPORT void mbfree(void*, size_t); 35 BEXPORT void* mbrealloc(void*, size_t, size_t); 36 BEXPORT void mbscavenge(); 37 37 38 38 void* mbmalloc(size_t size) -
trunk/Tools/Scripts/run-jsc-stress-tests
r219187 r220118 1214 1214 run("wasm-no-call-ic", "-m", "--useCallICsForWebAssemblyToJSCalls=false", *FTL_OPTIONS) 1215 1215 run("wasm-no-tls-context", "-m", "--useFastTLSForWasmContext=false", *FTL_OPTIONS) 1216 run("wasm-slow-memory", "-m", "--useWebAssemblyFastMemory=false", *FTL_OPTIONS) 1216 1217 end 1217 1218 end
Note:
See TracChangeset
for help on using the changeset viewer.