Changeset 241847 in webkit
- Timestamp:
- Feb 20, 2019 4:03:17 PM (5 years ago)
- Location:
- trunk/Source/bmalloc
- Files:
-
- 15 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/bmalloc/ChangeLog
r241841 r241847 1 2019-02-19 Yusuke Suzuki <ysuzuki@apple.com> 2 3 [bmalloc] bmalloc::Heap is allocated even though we use system malloc mode 4 https://bugs.webkit.org/show_bug.cgi?id=194836 5 6 Reviewed by Mark Lam. 7 8 Previously, bmalloc::Heap holds DebugHeap, and delegates allocation and deallocation to debug heap. 9 However, bmalloc::Heap is large. We would like to avoid initialization of bmalloc::Heap under the 10 system malloc mode. 11 12 This patch extracts out DebugHeap from bmalloc::Heap, and logically puts this in a boundary of 13 bmalloc::api. bmalloc::api delegates allocation and deallocation to DebugHeap if DebugHeap is enabled. 14 Otherwise, using bmalloc's usual mechanism. The challenge is that we would like to keep bmalloc fast 15 path fast. 16 17 1. For IsoHeaps, we use the similar techniques done in Cache. If the debug mode is enabled, we always go 18 to the slow path of the IsoHeap allocation, and keep IsoTLS::get() returning nullptr. In the slow path, 19 we just fallback to the usual bmalloc::api::tryMalloc implementation. This is efficient because bmalloc 20 continues using the fast path. 21 22 2. For the other APIs, like freeLargeVirtual, we just put DebugHeap check because this API itself takes fair 23 amount of time. Then debug heap check does not matter. 24 25 * bmalloc/Allocator.cpp: 26 (bmalloc::Allocator::reallocateImpl): 27 * bmalloc/Cache.cpp: 28 (bmalloc::Cache::tryAllocateSlowCaseNullCache): 29 (bmalloc::Cache::allocateSlowCaseNullCache): 30 (bmalloc::Cache::deallocateSlowCaseNullCache): 31 (bmalloc::Cache::tryReallocateSlowCaseNullCache): 32 (bmalloc::Cache::reallocateSlowCaseNullCache): 33 (): Deleted. 34 (bmalloc::debugHeap): Deleted. 35 * bmalloc/DebugHeap.cpp: 36 * bmalloc/DebugHeap.h: 37 (bmalloc::DebugHeap::tryGet): 38 * bmalloc/Heap.cpp: 39 (bmalloc::Heap::Heap): 40 (bmalloc::Heap::footprint): 41 (bmalloc::Heap::tryAllocateLarge): 42 (bmalloc::Heap::deallocateLarge): 43 * bmalloc/Heap.h: 44 (bmalloc::Heap::debugHeap): Deleted. 45 * bmalloc/IsoTLS.cpp: 46 (bmalloc::IsoTLS::IsoTLS): 47 (bmalloc::IsoTLS::isUsingDebugHeap): Deleted. 48 (bmalloc::IsoTLS::debugMalloc): Deleted. 49 (bmalloc::IsoTLS::debugFree): Deleted. 50 * bmalloc/IsoTLS.h: 51 * bmalloc/IsoTLSInlines.h: 52 (bmalloc::IsoTLS::allocateSlow): 53 (bmalloc::IsoTLS::deallocateSlow): 54 * bmalloc/ObjectType.cpp: 55 (bmalloc::objectType): 56 * bmalloc/ObjectType.h: 57 * bmalloc/Scavenger.cpp: 58 (bmalloc::Scavenger::Scavenger): 59 * bmalloc/bmalloc.cpp: 60 (bmalloc::api::tryLargeZeroedMemalignVirtual): 61 (bmalloc::api::freeLargeVirtual): 62 (bmalloc::api::scavenge): 63 (bmalloc::api::isEnabled): 64 (bmalloc::api::setScavengerThreadQOSClass): 65 (bmalloc::api::commitAlignedPhysical): 66 (bmalloc::api::decommitAlignedPhysical): 67 (bmalloc::api::enableMiniMode): 68 1 69 2019-02-20 Andy Estes <aestes@apple.com> 2 70 -
trunk/Source/bmalloc/bmalloc/Allocator.cpp
r241832 r241847 103 103 { 104 104 size_t oldSize = 0; 105 switch (objectType(m_heap .kind(), object)) {105 switch (objectType(m_heap, object)) { 106 106 case ObjectType::Small: { 107 BASSERT(objectType(m_heap .kind(), nullptr) == ObjectType::Small);107 BASSERT(objectType(m_heap, nullptr) == ObjectType::Small); 108 108 if (!object) 109 109 break; -
trunk/Source/bmalloc/bmalloc/Cache.cpp
r241837 r241847 33 33 namespace bmalloc { 34 34 35 static DebugHeap* debugHeapCache { nullptr };36 37 35 void Cache::scavenge(HeapKind heapKind) 38 36 { … … 47 45 } 48 46 49 static BINLINE DebugHeap* debugHeap()50 {51 if (debugHeapCache)52 return debugHeapCache;53 if (PerProcess<Environment>::get()->isDebugHeapEnabled()) {54 debugHeapCache = PerProcess<DebugHeap>::get();55 return debugHeapCache;56 }57 return nullptr;58 }59 60 47 Cache::Cache(HeapKind heapKind) 61 48 : m_deallocator(PerProcess<PerHeapKind<Heap>>::get()->at(heapKind)) … … 67 54 BNO_INLINE void* Cache::tryAllocateSlowCaseNullCache(HeapKind heapKind, size_t size) 68 55 { 69 if (auto* heap = debugHeap()) {56 if (auto* debugHeap = DebugHeap::tryGet()) { 70 57 constexpr bool crashOnFailure = false; 71 return heap->malloc(size, crashOnFailure);58 return debugHeap->malloc(size, crashOnFailure); 72 59 } 73 60 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().tryAllocate(size); … … 76 63 BNO_INLINE void* Cache::allocateSlowCaseNullCache(HeapKind heapKind, size_t size) 77 64 { 78 if (auto* heap = debugHeap()) {65 if (auto* debugHeap = DebugHeap::tryGet()) { 79 66 constexpr bool crashOnFailure = true; 80 return heap->malloc(size, crashOnFailure);67 return debugHeap->malloc(size, crashOnFailure); 81 68 } 82 69 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().allocate(size); … … 85 72 BNO_INLINE void* Cache::tryAllocateSlowCaseNullCache(HeapKind heapKind, size_t alignment, size_t size) 86 73 { 87 if (auto* heap = debugHeap()) {74 if (auto* debugHeap = DebugHeap::tryGet()) { 88 75 constexpr bool crashOnFailure = false; 89 return heap->memalign(alignment, size, crashOnFailure);76 return debugHeap->memalign(alignment, size, crashOnFailure); 90 77 } 91 78 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().tryAllocate(alignment, size); … … 94 81 BNO_INLINE void* Cache::allocateSlowCaseNullCache(HeapKind heapKind, size_t alignment, size_t size) 95 82 { 96 if (auto* heap = debugHeap()) {83 if (auto* debugHeap = DebugHeap::tryGet()) { 97 84 constexpr bool crashOnFailure = true; 98 return heap->memalign(alignment, size, crashOnFailure);85 return debugHeap->memalign(alignment, size, crashOnFailure); 99 86 } 100 87 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().allocate(alignment, size); … … 103 90 BNO_INLINE void Cache::deallocateSlowCaseNullCache(HeapKind heapKind, void* object) 104 91 { 105 if (auto* heap = debugHeap()) {106 heap->free(object);92 if (auto* debugHeap = DebugHeap::tryGet()) { 93 debugHeap->free(object); 107 94 return; 108 95 } … … 112 99 BNO_INLINE void* Cache::tryReallocateSlowCaseNullCache(HeapKind heapKind, void* object, size_t newSize) 113 100 { 114 if (auto* heap = debugHeap()) {101 if (auto* debugHeap = DebugHeap::tryGet()) { 115 102 constexpr bool crashOnFailure = false; 116 return heap->realloc(object, newSize, crashOnFailure);103 return debugHeap->realloc(object, newSize, crashOnFailure); 117 104 } 118 105 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().tryReallocate(object, newSize); … … 121 108 BNO_INLINE void* Cache::reallocateSlowCaseNullCache(HeapKind heapKind, void* object, size_t newSize) 122 109 { 123 if (auto* heap = debugHeap()) {110 if (auto* debugHeap = DebugHeap::tryGet()) { 124 111 constexpr bool crashOnFailure = true; 125 return heap->realloc(object, newSize, crashOnFailure);112 return debugHeap->realloc(object, newSize, crashOnFailure); 126 113 } 127 114 return PerThread<PerHeapKind<Cache>>::getSlowCase()->at(mapToActiveHeapKind(heapKind)).allocator().reallocate(object, newSize); -
trunk/Source/bmalloc/bmalloc/DebugHeap.cpp
r241837 r241847 34 34 35 35 namespace bmalloc { 36 37 DebugHeap* debugHeapCache { nullptr }; 36 38 37 39 #if BOS(DARWIN) -
trunk/Source/bmalloc/bmalloc/DebugHeap.h
r241837 r241847 26 26 #pragma once 27 27 28 #include "Environment.h" 28 29 #include "Mutex.h" 30 #include "PerProcess.h" 29 31 #include <mutex> 30 32 #include <unordered_map> … … 48 50 void freeLarge(void* base); 49 51 52 static DebugHeap* tryGet(); 53 50 54 private: 51 55 #if BOS(DARWIN) … … 59 63 }; 60 64 65 extern BEXPORT DebugHeap* debugHeapCache; 66 BINLINE DebugHeap* DebugHeap::tryGet() 67 { 68 if (debugHeapCache) 69 return debugHeapCache; 70 if (PerProcess<Environment>::get()->isDebugHeapEnabled()) { 71 debugHeapCache = PerProcess<DebugHeap>::get(); 72 return debugHeapCache; 73 } 74 return nullptr; 75 } 76 61 77 } // namespace bmalloc -
trunk/Source/bmalloc/bmalloc/Environment.h
r230308 r241847 33 33 class Environment { 34 34 public: 35 Environment(std::lock_guard<Mutex>&);35 BEXPORT Environment(std::lock_guard<Mutex>&); 36 36 37 37 bool isDebugHeapEnabled() { return m_isDebugHeapEnabled; } -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r241305 r241847 48 48 : m_kind(kind) 49 49 , m_vmPageSizePhysical(vmPageSizePhysical()) 50 , m_debugHeap(nullptr)51 50 { 52 51 RELEASE_BASSERT(vmPageSizePhysical() >= smallPageSize); … … 56 55 initializePageMetadata(); 57 56 58 if (PerProcess<Environment>::get()->isDebugHeapEnabled()) 59 m_debugHeap = PerProcess<DebugHeap>::get(); 60 else { 61 Gigacage::ensureGigacage(); 57 BASSERT(!PerProcess<Environment>::get()->isDebugHeapEnabled()); 58 59 Gigacage::ensureGigacage(); 62 60 #if GIGACAGE_ENABLED 63 64 65 66 67 68 69 70 71 61 if (usingGigacage()) { 62 RELEASE_BASSERT(gigacageBasePtr()); 63 uint64_t random[2]; 64 cryptoRandom(reinterpret_cast<unsigned char*>(random), sizeof(random)); 65 size_t size = roundDownToMultipleOf(vmPageSize(), gigacageSize() - (random[0] % Gigacage::maximumCageSizeReductionForSlide)); 66 ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize() - size)); 67 void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset; 68 m_largeFree.add(LargeRange(base, size, 0, 0)); 69 } 72 70 #endif 73 }74 71 75 72 m_scavenger = PerProcess<Scavenger>::get(); … … 154 151 size_t Heap::footprint() 155 152 { 156 BASSERT(!m_debugHeap);157 153 return m_footprint; 158 154 } … … 556 552 BASSERT(isPowerOfTwo(alignment)); 557 553 558 if (m_debugHeap)559 return m_debugHeap->memalignLarge(alignment, size);560 561 554 m_scavenger->didStartGrowing(); 562 555 … … 627 620 void Heap::deallocateLarge(std::unique_lock<Mutex>&, void* object) 628 621 { 629 if (m_debugHeap)630 return m_debugHeap->freeLarge(object);631 632 622 size_t size = m_largeAllocated.remove(object); 633 623 m_largeFree.add(LargeRange(object, size, size, size)); -
trunk/Source/bmalloc/bmalloc/Heap.h
r241305 r241847 64 64 HeapKind kind() const { return m_kind; } 65 65 66 DebugHeap* debugHeap() { return m_debugHeap; }67 68 66 void allocateSmallBumpRanges(std::unique_lock<Mutex>&, size_t sizeClass, 69 67 BumpAllocator&, BumpRangeCache&, LineCache&); … … 146 144 147 145 Scavenger* m_scavenger { nullptr }; 148 DebugHeap* m_debugHeap { nullptr };149 146 150 147 size_t m_footprint { 0 }; -
trunk/Source/bmalloc/bmalloc/IsoTLS.cpp
r241837 r241847 26 26 #include "IsoTLS.h" 27 27 28 #include "DebugHeap.h"29 28 #include "Environment.h" 30 29 #include "Gigacage.h" … … 56 55 IsoTLS::IsoTLS() 57 56 { 57 BASSERT(!PerProcess<Environment>::get()->isDebugHeapEnabled()); 58 58 } 59 59 … … 175 175 } 176 176 177 bool IsoTLS::isUsingDebugHeap()178 {179 return PerProcess<Environment>::get()->isDebugHeapEnabled();180 }181 182 auto IsoTLS::debugMalloc(size_t size) -> DebugMallocResult183 {184 DebugMallocResult result;185 if ((result.usingDebugHeap = isUsingDebugHeap())) {186 constexpr bool crashOnFailure = true;187 result.ptr = PerProcess<DebugHeap>::get()->malloc(size, crashOnFailure);188 }189 return result;190 }191 192 bool IsoTLS::debugFree(void* p)193 {194 if (isUsingDebugHeap()) {195 PerProcess<DebugHeap>::get()->free(p);196 return true;197 }198 return false;199 }200 201 177 void IsoTLS::determineMallocFallbackState() 202 178 { -
trunk/Source/bmalloc/bmalloc/IsoTLS.h
r229694 r241847 104 104 BEXPORT static void determineMallocFallbackState(); 105 105 106 static bool isUsingDebugHeap();107 108 struct DebugMallocResult {109 void* ptr { nullptr };110 bool usingDebugHeap { false };111 };112 113 BEXPORT static DebugMallocResult debugMalloc(size_t);114 BEXPORT static bool debugFree(void*);115 116 106 IsoTLSEntry* m_lastEntry { nullptr }; 117 107 unsigned m_extent { 0 }; -
trunk/Source/bmalloc/bmalloc/IsoTLSInlines.h
r230308 r241847 26 26 #pragma once 27 27 28 #include "Environment.h" 28 29 #include "IsoHeapImpl.h" 29 30 #include "IsoTLS.h" … … 95 96 } 96 97 97 auto debugMallocResult = debugMalloc(Config::objectSize); 98 if (debugMallocResult.usingDebugHeap) 99 return debugMallocResult.ptr; 98 // If debug heap is enabled, s_mallocFallbackState becomes MallocFallbackState::FallBackToMalloc. 99 BASSERT(!PerProcess<Environment>::get()->isDebugHeapEnabled()); 100 100 101 101 IsoTLS* tls = ensureHeapAndEntries(handle); … … 139 139 } 140 140 141 if (debugFree(p))142 return;141 // If debug heap is enabled, s_mallocFallbackState becomes MallocFallbackState::FallBackToMalloc. 142 BASSERT(!PerProcess<Environment>::get()->isDebugHeapEnabled()); 143 143 144 144 RELEASE_BASSERT(handle.isInitialized()); -
trunk/Source/bmalloc/bmalloc/ObjectType.cpp
r230501 r241847 33 33 namespace bmalloc { 34 34 35 ObjectType objectType(Heap Kind kind, void* object)35 ObjectType objectType(Heap& heap, void* object) 36 36 { 37 37 if (mightBeLarge(object)) { … … 40 40 41 41 std::unique_lock<Mutex> lock(Heap::mutex()); 42 if ( PerProcess<PerHeapKind<Heap>>::getFastCase()->at(kind).isLarge(lock, object))42 if (heap.isLarge(lock, object)) 43 43 return ObjectType::Large; 44 44 } -
trunk/Source/bmalloc/bmalloc/ObjectType.h
r220118 r241847 33 33 namespace bmalloc { 34 34 35 class Heap; 36 35 37 enum class ObjectType : unsigned char { Small, Large }; 36 38 37 ObjectType objectType(Heap Kind, void*);39 ObjectType objectType(Heap&, void*); 38 40 39 41 inline bool mightBeLarge(void* object) -
trunk/Source/bmalloc/bmalloc/Scavenger.cpp
r241580 r241847 68 68 Scavenger::Scavenger(std::lock_guard<Mutex>&) 69 69 { 70 if (PerProcess<Environment>::get()->isDebugHeapEnabled()) 71 return; 70 BASSERT(!PerProcess<Environment>::get()->isDebugHeapEnabled()); 72 71 73 72 #if BOS(DARWIN) -
trunk/Source/bmalloc/bmalloc/bmalloc.cpp
r241832 r241847 26 26 #include "bmalloc.h" 27 27 28 #include "DebugHeap.h" 28 29 #include "Environment.h" 29 30 #include "PerProcess.h" … … 51 52 RELEASE_BASSERT(size >= requestedSize); 52 53 53 kind = mapToActiveHeapKind(kind); 54 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 54 void* result; 55 if (auto* debugHeap = DebugHeap::tryGet()) 56 result = debugHeap->memalignLarge(alignment, size); 57 else { 58 kind = mapToActiveHeapKind(kind); 59 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 55 60 56 void* result;57 {58 61 std::unique_lock<Mutex> lock(Heap::mutex()); 59 62 result = heap.tryAllocateLarge(lock, alignment, size); … … 74 77 void freeLargeVirtual(void* object, size_t size, HeapKind kind) 75 78 { 79 if (auto* debugHeap = DebugHeap::tryGet()) { 80 debugHeap->freeLarge(object); 81 return; 82 } 76 83 kind = mapToActiveHeapKind(kind); 77 84 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); … … 86 93 scavengeThisThread(); 87 94 88 PerProcess<Scavenger>::get()->scavenge(); 95 if (!DebugHeap::tryGet()) 96 PerProcess<Scavenger>::get()->scavenge(); 89 97 } 90 98 … … 97 105 void setScavengerThreadQOSClass(qos_class_t overrideClass) 98 106 { 107 if (DebugHeap::tryGet()) 108 return; 99 109 std::unique_lock<Mutex> lock(Heap::mutex()); 100 110 PerProcess<Scavenger>::get()->setScavengerThreadQOSClass(overrideClass); … … 106 116 vmValidatePhysical(object, size); 107 117 vmAllocatePhysicalPages(object, size); 108 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind);109 heap.externalCommit(object, size);118 if (!DebugHeap::tryGet()) 119 PerProcess<PerHeapKind<Heap>>::get()->at(kind).externalCommit(object, size); 110 120 } 111 121 … … 114 124 vmValidatePhysical(object, size); 115 125 vmDeallocatePhysicalPages(object, size); 116 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind);117 heap.externalDecommit(object, size);126 if (!DebugHeap::tryGet()) 127 PerProcess<PerHeapKind<Heap>>::get()->at(kind).externalDecommit(object, size); 118 128 } 119 129 120 130 void enableMiniMode() 121 131 { 122 PerProcess<Scavenger>::get()->enableMiniMode(); 132 if (!DebugHeap::tryGet()) 133 PerProcess<Scavenger>::get()->enableMiniMode(); 123 134 } 124 135
Note: See TracChangeset
for help on using the changeset viewer.