Changeset 225125 in webkit
- Timestamp:
- Nov 23, 2017 4:47:58 PM (6 years ago)
- Location:
- trunk/Source/bmalloc
- Files:
-
- 1 added
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/bmalloc/CMakeLists.txt
r224537 r225125 19 19 bmalloc/HeapKind.cpp 20 20 bmalloc/IsoHeapImpl.cpp 21 bmalloc/IsoPage.cpp 21 22 bmalloc/IsoTLS.cpp 22 23 bmalloc/IsoTLSEntry.cpp -
trunk/Source/bmalloc/ChangeLog
r224811 r225125 1 2017-11-16 Filip Pizlo <fpizlo@apple.com> 2 3 Isolated Heaps caused an increase in reported leaks on the bots 4 https://bugs.webkit.org/show_bug.cgi?id=179463 5 6 Reviewed by Darin Adler. 7 8 This fixes the way isoheaps interact with system tools: 9 10 - Opts into the VMHeap API so that the leaks tool can find isoheap memory. 11 12 - Opts into the DebugHeap/Environment APIs so that we turn off isoheap allocation if memory 13 debugging options are in use. 14 15 * bmalloc.xcodeproj/project.pbxproj: 16 * bmalloc/DebugHeap.h: 17 * bmalloc/IsoHeap.h: 18 * bmalloc/IsoPage.cpp: Added. 19 (bmalloc::IsoPageBase::allocatePageMemory): 20 * bmalloc/IsoPage.h: 21 * bmalloc/IsoPageInlines.h: 22 (bmalloc::IsoPage<Config>::tryCreate): 23 * bmalloc/IsoTLS.cpp: 24 (bmalloc::IsoTLS::deallocateSlow): 25 (bmalloc::IsoTLS::ensureEntries): 26 (bmalloc::IsoTLS::isUsingDebugHeap): 27 (bmalloc::IsoTLS::debugMalloc): 28 * bmalloc/IsoTLS.h: 29 * bmalloc/IsoTLSInlines.h: 30 (bmalloc::IsoTLS::allocate): 31 (bmalloc::IsoTLS::deallocate): 32 (bmalloc::IsoTLS::allocateImpl): 33 (bmalloc::IsoTLS::allocateFast): 34 (bmalloc::IsoTLS::allocateSlow): 35 (bmalloc::IsoTLS::deallocateImpl): 36 (bmalloc::IsoTLS::deallocateFast): 37 (bmalloc::IsoTLS::ensureHeapAndEntries): 38 (bmalloc::IsoTLS::allocator): Deleted. 39 (bmalloc::IsoTLS::deallocator): Deleted. 40 * bmalloc/bmalloc.cpp: 41 (bmalloc::api::tryLargeMemalignVirtual): 42 (bmalloc::api::freeLargeVirtual): 43 (bmalloc::api::scavenge): 44 (bmalloc::api::isEnabled): 45 (bmalloc::api::setScavengerThreadQOSClass): 46 * bmalloc/bmalloc.h: 47 (bmalloc::api::tryLargeMemalignVirtual): Deleted. 48 (bmalloc::api::freeLargeVirtual): Deleted. 49 (bmalloc::api::scavenge): Deleted. 50 (bmalloc::api::isEnabled): Deleted. 51 (bmalloc::api::setScavengerThreadQOSClass): Deleted. 52 1 53 2017-11-14 Saam Barati <sbarati@apple.com> 2 54 -
trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj
r224537 r225125 24 24 0F3DA0141F267AB800342C08 /* AllocationKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3DA0131F267AB800342C08 /* AllocationKind.h */; settings = {ATTRIBUTES = (Private, ); }; }; 25 25 0F5167741FAD685C008236A8 /* bmalloc.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5167731FAD6852008236A8 /* bmalloc.cpp */; }; 26 0F5549EF1FB54704007FF75A /* IsoPage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5549EE1FB54701007FF75A /* IsoPage.cpp */; }; 26 27 0F5BF1471F22A8B10029D91D /* HeapKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1461F22A8B10029D91D /* HeapKind.h */; settings = {ATTRIBUTES = (Private, ); }; }; 27 28 0F5BF1491F22A8D80029D91D /* PerHeapKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5BF1481F22A8D80029D91D /* PerHeapKind.h */; settings = {ATTRIBUTES = (Private, ); }; }; … … 174 175 0F3DA0131F267AB800342C08 /* AllocationKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = AllocationKind.h; path = bmalloc/AllocationKind.h; sourceTree = "<group>"; }; 175 176 0F5167731FAD6852008236A8 /* bmalloc.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = bmalloc.cpp; path = bmalloc/bmalloc.cpp; sourceTree = "<group>"; }; 177 0F5549EE1FB54701007FF75A /* IsoPage.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoPage.cpp; path = bmalloc/IsoPage.cpp; sourceTree = "<group>"; }; 176 178 0F5BF1461F22A8B10029D91D /* HeapKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = HeapKind.h; path = bmalloc/HeapKind.h; sourceTree = "<group>"; }; 177 179 0F5BF1481F22A8D80029D91D /* PerHeapKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = PerHeapKind.h; path = bmalloc/PerHeapKind.h; sourceTree = "<group>"; }; … … 361 363 0F7EB7FD1F9541AD00F1ABCB /* IsoHeapImplInlines.h */, 362 364 0F7EB8091F9541AD00F1ABCB /* IsoHeapInlines.h */, 365 0F5549EE1FB54701007FF75A /* IsoPage.cpp */, 363 366 0F7EB8071F9541AD00F1ABCB /* IsoPage.h */, 364 367 0F7EB80A1F9541AE00F1ABCB /* IsoPageInlines.h */, … … 755 758 0FD557331F7EDB7B00B1F0A3 /* HeapKind.cpp in Sources */, 756 759 0F7EB83B1F9541B000F1ABCB /* IsoHeapImpl.cpp in Sources */, 760 0F5549EF1FB54704007FF75A /* IsoPage.cpp in Sources */, 757 761 14F271C318EA3978008C152F /* Allocator.cpp in Sources */, 758 762 6599C5CC1EC3F15900A2F7BB /* AvailableMemory.cpp in Sources */, -
trunk/Source/bmalloc/bmalloc/IsoPage.h
r224537 r225125 39 39 public: 40 40 static constexpr size_t pageSize = 16384; 41 42 protected: 43 BEXPORT static void* allocatePageMemory(); 41 44 }; 42 45 -
trunk/Source/bmalloc/bmalloc/IsoPageInlines.h
r224537 r225125 36 36 IsoPage<Config>* IsoPage<Config>::tryCreate(IsoDirectoryBase<Config>& directory, unsigned index) 37 37 { 38 void* memory = tryVMAllocate(pageSize, pageSize);38 void* memory = allocatePageMemory(); 39 39 if (!memory) 40 40 return nullptr; -
trunk/Source/bmalloc/bmalloc/IsoTLS.cpp
r224537 r225125 26 26 #include "IsoTLS.h" 27 27 28 #include "DebugHeap.h" 29 #include "Environment.h" 28 30 #include "IsoTLSEntryInlines.h" 29 31 #include "IsoTLSInlines.h" … … 49 51 IsoTLS::IsoTLS() 50 52 { 53 } 54 55 void IsoTLS::deallocateSlow(void* p) 56 { 57 // If we go down this path and we aren't in debug heap mode, then this means we have some corruption. 58 // Think of this as really being an assertion about offset < tls->m_extent. 59 RELEASE_BASSERT(PerProcess<Environment>::get()->isDebugHeapEnabled()); 60 61 PerProcess<DebugHeap>::get()->free(p); 51 62 } 52 63 … … 77 88 78 89 IsoTLSEntry* targetEntry = startEntry; 79 for (;;) { 90 size_t requiredCapacity = 0; 91 if (startEntry) { 92 for (;;) { 93 RELEASE_BASSERT(targetEntry); 94 RELEASE_BASSERT(targetEntry->offset() <= offset); 95 if (targetEntry->offset() == offset) 96 break; 97 targetEntry = targetEntry->m_next; 98 } 80 99 RELEASE_BASSERT(targetEntry); 81 RELEASE_BASSERT(targetEntry->offset() <= offset); 82 if (targetEntry->offset() == offset) 83 break; 84 targetEntry = targetEntry->m_next; 100 requiredCapacity = targetEntry->extent(); 85 101 } 86 RELEASE_BASSERT(targetEntry); 87 88 size_t requiredCapacity = targetEntry->extent(); 102 89 103 if (!tls || requiredCapacity > tls->m_capacity) { 90 104 size_t requiredSize = sizeForCapacity(requiredCapacity); … … 111 125 } 112 126 113 startEntry->walkUpToInclusive( 114 targetEntry, 115 [&] (IsoTLSEntry* entry) { 116 entry->construct(tls->m_data + entry->offset()); 117 }); 118 119 tls->m_lastEntry = targetEntry; 120 tls->m_extent = targetEntry->extent(); 127 if (startEntry) { 128 startEntry->walkUpToInclusive( 129 targetEntry, 130 [&] (IsoTLSEntry* entry) { 131 entry->construct(tls->m_data + entry->offset()); 132 }); 133 134 tls->m_lastEntry = targetEntry; 135 tls->m_extent = targetEntry->extent(); 136 } 121 137 122 138 return tls; … … 160 176 } 161 177 178 bool IsoTLS::isUsingDebugHeap() 179 { 180 return PerProcess<Environment>::get()->isDebugHeapEnabled(); 181 } 182 183 auto IsoTLS::debugMalloc(size_t size) -> DebugMallocResult 184 { 185 DebugMallocResult result; 186 if ((result.usingDebugHeap = isUsingDebugHeap())) 187 result.ptr = PerProcess<DebugHeap>::get()->malloc(size); 188 return result; 189 } 190 162 191 } // namespace bmalloc 163 192 -
trunk/Source/bmalloc/bmalloc/IsoTLS.h
r224537 r225125 59 59 60 60 template<typename Config, typename Type> 61 static IsoAllocator<Config>& allocator(api::IsoHeap<Type>&); 61 static void* allocateImpl(api::IsoHeap<Type>&, bool abortOnFailure); 62 63 template<typename Config> 64 void* allocateFast(unsigned offset, bool abortOnFailure); 62 65 63 66 template<typename Config, typename Type> 64 static IsoDeallocator<Config>& deallocator(api::IsoHeap<Type>&); 67 static void* allocateSlow(api::IsoHeap<Type>&, bool abortOnFailure); 68 69 template<typename Config, typename Type> 70 static void deallocateImpl(api::IsoHeap<Type>&, void* p); 71 72 template<typename Config> 73 void deallocateFast(unsigned offset, void* p); 74 75 BEXPORT static void deallocateSlow(void* p); 65 76 66 77 static IsoTLS* get(); … … 82 93 void forEachEntry(const Func&); 83 94 95 BEXPORT static bool isUsingDebugHeap(); 96 97 struct DebugMallocResult { 98 void* ptr { nullptr }; 99 bool usingDebugHeap { false }; 100 }; 101 102 BEXPORT static DebugMallocResult debugMalloc(size_t); 103 84 104 IsoTLSEntry* m_lastEntry { nullptr }; 85 105 unsigned m_extent { 0 }; -
trunk/Source/bmalloc/bmalloc/IsoTLSInlines.h
r224537 r225125 34 34 void* IsoTLS::allocate(api::IsoHeap<Type>& handle, bool abortOnFailure) 35 35 { 36 return allocat or<typename api::IsoHeap<Type>::Config>(handle).allocate(abortOnFailure);36 return allocateImpl<typename api::IsoHeap<Type>::Config>(handle, abortOnFailure); 37 37 } 38 38 … … 42 42 if (!p) 43 43 return; 44 deallocat or<typename api::IsoHeap<Type>::Config>(handle).deallocate(p);44 deallocateImpl<typename api::IsoHeap<Type>::Config>(handle, p); 45 45 } 46 46 … … 63 63 64 64 template<typename Config, typename Type> 65 IsoAllocator<Config>& IsoTLS::allocator(api::IsoHeap<Type>& handle)65 void* IsoTLS::allocateImpl(api::IsoHeap<Type>& handle, bool abortOnFailure) 66 66 { 67 67 unsigned offset = handle.allocatorOffset(); 68 68 IsoTLS* tls = get(); 69 if (!tls || offset >= tls->m_extent) { 70 tls = ensureHeapAndEntries(handle); 71 offset = handle.allocatorOffset(); 72 } 73 return *reinterpret_cast<IsoAllocator<Config>*>(tls->m_data + offset); 69 if (!tls || offset >= tls->m_extent) 70 return allocateSlow<typename api::IsoHeap<Type>::Config>(handle, abortOnFailure); 71 return tls->allocateFast<Config>(offset, abortOnFailure); 72 } 73 74 template<typename Config> 75 void* IsoTLS::allocateFast(unsigned offset, bool abortOnFailure) 76 { 77 return reinterpret_cast<IsoAllocator<Config>*>(m_data + offset)->allocate(abortOnFailure); 74 78 } 75 79 76 80 template<typename Config, typename Type> 77 IsoDeallocator<Config>& IsoTLS::deallocator(api::IsoHeap<Type>& handle) 81 BNO_INLINE void* IsoTLS::allocateSlow(api::IsoHeap<Type>& handle, bool abortOnFailure) 82 { 83 IsoTLS* tls = ensureHeapAndEntries(handle); 84 85 auto debugMallocResult = debugMalloc(Config::objectSize); 86 if (debugMallocResult.usingDebugHeap) 87 return debugMallocResult.ptr; 88 89 unsigned offset = handle.allocatorOffset(); 90 return tls->allocateFast<Config>(offset, abortOnFailure); 91 } 92 93 template<typename Config, typename Type> 94 void IsoTLS::deallocateImpl(api::IsoHeap<Type>& handle, void* p) 78 95 { 79 96 unsigned offset = handle.deallocatorOffset(); 80 97 IsoTLS* tls = get(); 81 RELEASE_BASSERT(offset < tls->m_extent); 82 return *reinterpret_cast<IsoDeallocator<Config>*>(tls->m_data + offset); 98 // Note that this bounds check would be here even if we didn't have to support DebugHeap, 99 // since we don't want unpredictable behavior if offset or m_extent ever got corrupted. 100 if (offset >= tls->m_extent) 101 deallocateSlow(p); 102 else 103 tls->deallocateFast<Config>(offset, p); 104 } 105 106 template<typename Config> 107 void IsoTLS::deallocateFast(unsigned offset, void* p) 108 { 109 reinterpret_cast<IsoDeallocator<Config>*>(m_data + offset)->deallocate(p); 83 110 } 84 111 … … 125 152 || handle.allocatorOffset() >= get()->m_extent 126 153 || handle.deallocatorOffset() >= get()->m_extent); 127 ensureHeap(handle); 128 return ensureEntries(std::max(handle.allocatorOffset(), handle.deallocatorOffset())); 154 unsigned offset; 155 if (isUsingDebugHeap()) { 156 if (IsoTLS* result = get()) 157 return result; 158 offset = 0; 159 } else { 160 ensureHeap(handle); 161 offset = std::max(handle.allocatorOffset(), handle.deallocatorOffset()); 162 } 163 return ensureEntries(offset); 129 164 } 130 165 -
trunk/Source/bmalloc/bmalloc/bmalloc.cpp
r224537 r225125 26 26 #include "bmalloc.h" 27 27 28 #include "PerProcess.h" 29 28 30 namespace bmalloc { namespace api { 29 31 … … 38 40 } 39 41 42 void* tryLargeMemalignVirtual(size_t alignment, size_t size, HeapKind kind) 43 { 44 kind = mapToActiveHeapKind(kind); 45 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 46 std::lock_guard<StaticMutex> lock(Heap::mutex()); 47 return heap.tryAllocateLarge(lock, alignment, size, AllocationKind::Virtual); 48 } 49 50 void freeLargeVirtual(void* object, HeapKind kind) 51 { 52 kind = mapToActiveHeapKind(kind); 53 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 54 std::lock_guard<StaticMutex> lock(Heap::mutex()); 55 heap.deallocateLarge(lock, object, AllocationKind::Virtual); 56 } 57 58 void scavenge() 59 { 60 scavengeThisThread(); 61 62 PerProcess<Scavenger>::get()->scavenge(); 63 } 64 65 bool isEnabled(HeapKind kind) 66 { 67 kind = mapToActiveHeapKind(kind); 68 std::unique_lock<StaticMutex> lock(Heap::mutex()); 69 return !PerProcess<PerHeapKind<Heap>>::getFastCase()->at(kind).debugHeap(); 70 } 71 72 #if BOS(DARWIN) 73 void setScavengerThreadQOSClass(qos_class_t overrideClass) 74 { 75 std::unique_lock<StaticMutex> lock(Heap::mutex()); 76 PerProcess<Scavenger>::get()->setScavengerThreadQOSClass(overrideClass); 77 } 78 #endif 79 40 80 } } // namespace bmalloc::api 41 81 -
trunk/Source/bmalloc/bmalloc/bmalloc.h
r224537 r225125 32 32 #include "IsoTLS.h" 33 33 #include "PerHeapKind.h" 34 #include "PerProcess.h"35 34 #include "Scavenger.h" 36 35 #include "StaticMutex.h" … … 72 71 73 72 // Returns null for failure 74 inline void* tryLargeMemalignVirtual(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary) 75 { 76 kind = mapToActiveHeapKind(kind); 77 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 78 std::lock_guard<StaticMutex> lock(Heap::mutex()); 79 return heap.tryAllocateLarge(lock, alignment, size, AllocationKind::Virtual); 80 } 73 BEXPORT void* tryLargeMemalignVirtual(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary); 81 74 82 75 inline void free(void* object, HeapKind kind = HeapKind::Primary) … … 87 80 BEXPORT void freeOutOfLine(void* object, HeapKind kind = HeapKind::Primary); 88 81 89 inline void freeLargeVirtual(void* object, HeapKind kind = HeapKind::Primary) 90 { 91 kind = mapToActiveHeapKind(kind); 92 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 93 std::lock_guard<StaticMutex> lock(Heap::mutex()); 94 heap.deallocateLarge(lock, object, AllocationKind::Virtual); 95 } 82 BEXPORT void freeLargeVirtual(void* object, HeapKind kind = HeapKind::Primary); 96 83 97 84 inline void scavengeThisThread() … … 102 89 } 103 90 104 inline void scavenge() 105 { 106 scavengeThisThread(); 91 BEXPORT void scavenge(); 107 92 108 PerProcess<Scavenger>::get()->scavenge(); 109 } 110 111 inline bool isEnabled(HeapKind kind = HeapKind::Primary) 112 { 113 kind = mapToActiveHeapKind(kind); 114 std::unique_lock<StaticMutex> lock(Heap::mutex()); 115 return !PerProcess<PerHeapKind<Heap>>::getFastCase()->at(kind).debugHeap(); 116 } 93 BEXPORT bool isEnabled(HeapKind kind = HeapKind::Primary); 117 94 118 95 inline size_t availableMemory() … … 134 111 135 112 #if BOS(DARWIN) 136 inline void setScavengerThreadQOSClass(qos_class_t overrideClass) 137 { 138 std::unique_lock<StaticMutex> lock(Heap::mutex()); 139 PerProcess<Scavenger>::get()->setScavengerThreadQOSClass(overrideClass); 140 } 113 BEXPORT void setScavengerThreadQOSClass(qos_class_t overrideClass); 141 114 #endif 142 115
Note: See TracChangeset
for help on using the changeset viewer.