Changeset 243144 in webkit
- Timestamp:
- Mar 19, 2019 10:31:01 AM (5 years ago)
- Location:
- trunk/Source/bmalloc
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/bmalloc/ChangeLog
r242938 r243144 1 2019-03-18 Michael Saboff <msaboff@apple.com> 2 3 [BMalloc] Scavenger should react to recent memory activity 4 https://bugs.webkit.org/show_bug.cgi?id=195895 5 6 Reviewed by Geoffrey Garen. 7 8 This change adds a recently used bit to objects that are scavenged. When an object is allocated, that bit is set. 9 When we scavenge, if the bit is set, we clear it. If the bit was already clear, we decommit the object. The timing 10 to scavenging has been changed as well. We perform our first scavne almost immediately after bmalloc is initialized 11 (10ms later). Subsequent scavenging is done as a multiple of the time it took to scavenge. We bound this computed 12 time between a minimum and maximum. Through empirical testing, the multiplier, minimum and maximum are 13 150x, 100ms and 10,000ms respectively. For mini-mode, when the JIT is disabled, we use much more aggressive values of 14 50x, 25ms and 500ms. 15 16 Eliminated partial scavenging since this change allows for any scavenge to be partial or full based on recent use of 17 the objects on the various free lists. 18 19 * bmalloc/Chunk.h: 20 (bmalloc::Chunk::usedSinceLastScavenge): 21 (bmalloc::Chunk::clearUsedSinceLastScavenge): 22 (bmalloc::Chunk::setUsedSinceLastScavenge): 23 * bmalloc/Heap.cpp: 24 (bmalloc::Heap::scavenge): 25 (bmalloc::Heap::allocateSmallChunk): 26 (bmalloc::Heap::allocateSmallPage): 27 (bmalloc::Heap::splitAndAllocate): 28 (bmalloc::Heap::tryAllocateLarge): 29 (bmalloc::Heap::scavengeToHighWatermark): Deleted. 30 * bmalloc/Heap.h: 31 * bmalloc/IsoDirectory.h: 32 * bmalloc/IsoDirectoryInlines.h: 33 (bmalloc::passedNumPages>::takeFirstEligible): 34 (bmalloc::passedNumPages>::scavenge): 35 (bmalloc::passedNumPages>::scavengeToHighWatermark): Deleted. 36 * bmalloc/IsoHeapImpl.h: 37 * bmalloc/IsoHeapImplInlines.h: 38 (bmalloc::IsoHeapImpl<Config>::scavengeToHighWatermark): Deleted. 39 * bmalloc/LargeRange.h: 40 (bmalloc::LargeRange::LargeRange): 41 (bmalloc::LargeRange::usedSinceLastScavenge): 42 (bmalloc::LargeRange::clearUsedSinceLastScavenge): 43 (bmalloc::LargeRange::setUsedSinceLastScavenge): 44 (): Deleted. 45 * bmalloc/Scavenger.cpp: 46 (bmalloc::Scavenger::Scavenger): 47 (bmalloc::Scavenger::threadRunLoop): 48 (bmalloc::Scavenger::timeSinceLastPartialScavenge): Deleted. 49 (bmalloc::Scavenger::partialScavenge): Deleted. 50 * bmalloc/Scavenger.h: 51 * bmalloc/SmallPage.h: 52 (bmalloc::SmallPage::usedSinceLastScavenge): 53 (bmalloc::SmallPage::clearUsedSinceLastScavenge): 54 (bmalloc::SmallPage::setUsedSinceLastScavenge): 55 1 56 2019-03-14 Yusuke Suzuki <ysuzuki@apple.com> 2 57 -
trunk/Source/bmalloc/bmalloc/Chunk.h
r241305 r243144 46 46 unsigned refCount() { return m_refCount; } 47 47 48 bool usedSinceLastScavenge() { return m_usedSinceLastScavenge; } 49 void clearUsedSinceLastScavenge() { m_usedSinceLastScavenge = false; } 50 void setUsedSinceLastScavenge() { m_usedSinceLastScavenge = true; } 51 48 52 size_t offset(void*); 49 53 … … 60 64 private: 61 65 size_t m_refCount { }; 66 bool m_usedSinceLastScavenge: 1; 62 67 List<SmallPage> m_freePages { }; 63 68 -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r242938 r243144 176 176 } 177 177 178 void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter )178 void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, size_t& deferredDecommits) 179 179 { 180 180 for (auto& list : m_freePages) { … … 183 183 if (!page->hasPhysicalPages()) 184 184 continue; 185 if (page->usedSinceLastScavenge()) { 186 page->clearUsedSinceLastScavenge(); 187 deferredDecommits++; 188 continue; 189 } 185 190 186 191 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]); … … 190 195 decommitter.addEager(page->begin()->begin(), pageSize); 191 196 page->setHasPhysicalPages(false); 192 #if ENABLE_PHYSICAL_PAGE_MAP 197 #if ENABLE_PHYSICAL_PAGE_MAP 193 198 m_physicalPageMap.decommit(page->begin()->begin(), pageSize); 194 199 #endif … … 196 201 } 197 202 } 198 203 199 204 for (auto& list : m_chunkCache) { 200 while (!list.isEmpty()) 201 deallocateSmallChunk(list.pop(), &list - &m_chunkCache[0]); 205 for (auto iter = list.begin(); iter != list.end(); ) { 206 Chunk* chunk = *iter; 207 if (chunk->usedSinceLastScavenge()) { 208 chunk->clearUsedSinceLastScavenge(); 209 deferredDecommits++; 210 ++iter; 211 continue; 212 } 213 ++iter; 214 list.remove(chunk); 215 deallocateSmallChunk(chunk, &list - &m_chunkCache[0]); 216 } 202 217 } 203 218 204 219 for (LargeRange& range : m_largeFree) { 205 m_highWatermark = std::min(m_highWatermark, static_cast<void*>(range.begin())); 220 if (range.usedSinceLastScavenge()) { 221 range.clearUsedSinceLastScavenge(); 222 deferredDecommits++; 223 continue; 224 } 206 225 decommitLargeRange(lock, range, decommitter); 207 226 } 208 209 m_freeableMemory = 0;210 }211 212 void Heap::scavengeToHighWatermark(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)213 {214 void* newHighWaterMark = nullptr;215 for (LargeRange& range : m_largeFree) {216 if (range.begin() <= m_highWatermark)217 newHighWaterMark = std::min(newHighWaterMark, static_cast<void*>(range.begin()));218 else219 decommitLargeRange(lock, range, decommitter);220 }221 m_highWatermark = newHighWaterMark;222 227 } 223 228 … … 250 255 forEachPage(chunk, pageSize, [&](SmallPage* page) { 251 256 page->setHasPhysicalPages(true); 257 page->setUsedSinceLastScavenge(); 252 258 page->setHasFreeLines(lock, true); 253 259 chunk->freePages().push(page); … … 311 317 312 318 chunk->ref(); 319 chunk->setUsedSinceLastScavenge(); 313 320 314 321 SmallPage* page = chunk->freePages().pop(); … … 325 332 vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize); 326 333 page->setHasPhysicalPages(true); 327 #if ENABLE_PHYSICAL_PAGE_MAP 334 #if ENABLE_PHYSICAL_PAGE_MAP 328 335 m_physicalPageMap.commit(page->begin()->begin(), pageSize); 329 336 #endif 330 337 } 338 page->setUsedSinceLastScavenge(); 331 339 332 340 return page; … … 586 594 587 595 void* result = splitAndAllocate(lock, range, alignment, size).begin(); 588 m_highWatermark = std::max(m_highWatermark, result);589 596 return result; 590 597 } -
trunk/Source/bmalloc/bmalloc/Heap.h
r242812 r243144 77 77 void shrinkLarge(std::unique_lock<Mutex>&, const Range&, size_t); 78 78 79 void scavenge(std::lock_guard<Mutex>&, BulkDecommit& );79 void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& deferredDecommits); 80 80 void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& freed, size_t goal); 81 void scavengeToHighWatermark(std::lock_guard<Mutex>&, BulkDecommit&);82 81 83 82 size_t freeableMemory(std::lock_guard<Mutex>&); … … 154 153 PhysicalPageMap m_physicalPageMap; 155 154 #endif 156 157 void* m_highWatermark { nullptr };158 155 }; 159 156 -
trunk/Source/bmalloc/bmalloc/IsoDirectory.h
r230501 r243144 76 76 // pages as being decommitted. It's the caller's job to do the actual decommitting. 77 77 void scavenge(Vector<DeferredDecommit>&); 78 void scavengeToHighWatermark(Vector<DeferredDecommit>&);79 78 80 79 template<typename Func> … … 91 90 std::array<IsoPage<Config>*, numPages> m_pages; 92 91 unsigned m_firstEligible { 0 }; 93 unsigned m_highWatermark { 0 };94 92 }; 95 93 -
trunk/Source/bmalloc/bmalloc/IsoDirectoryInlines.h
r242938 r243144 52 52 return EligibilityKind::Full; 53 53 54 m_highWatermark = std::max(pageIndex, m_highWatermark);55 56 54 Scavenger& scavenger = *Scavenger::get(); 57 55 scavenger.didStartGrowing(); … … 144 142 scavengePage(index, decommits); 145 143 }); 146 m_highWatermark = 0;147 }148 149 template<typename Config, unsigned passedNumPages>150 void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(Vector<DeferredDecommit>& decommits)151 {152 (m_empty & m_committed).forEachSetBit(153 [&] (size_t index) {154 if (index > m_highWatermark)155 scavengePage(index, decommits);156 });157 m_highWatermark = 0;158 144 } 159 145 -
trunk/Source/bmalloc/bmalloc/IsoHeapImpl.h
r230501 r243144 41 41 42 42 virtual void scavenge(Vector<DeferredDecommit>&) = 0; 43 virtual void scavengeToHighWatermark(Vector<DeferredDecommit>&) = 0;44 43 virtual size_t freeableMemory() = 0; 45 44 virtual size_t footprint() = 0; … … 73 72 74 73 void scavenge(Vector<DeferredDecommit>&) override; 75 void scavengeToHighWatermark(Vector<DeferredDecommit>&) override;76 74 77 75 size_t freeableMemory() override; -
trunk/Source/bmalloc/bmalloc/IsoHeapImplInlines.h
r230515 r243144 111 111 112 112 template<typename Config> 113 void IsoHeapImpl<Config>::scavengeToHighWatermark(Vector<DeferredDecommit>& decommits)114 {115 std::lock_guard<Mutex> locker(this->lock);116 if (!m_directoryHighWatermark)117 m_inlineDirectory.scavengeToHighWatermark(decommits);118 for (IsoDirectoryPage<Config>* page = m_headDirectory; page; page = page->next) {119 if (page->index() >= m_directoryHighWatermark)120 page->payload.scavengeToHighWatermark(decommits);121 }122 m_directoryHighWatermark = 0;123 }124 125 template<typename Config>126 113 size_t IsoHeapImpl<Config>::freeableMemory() 127 114 { -
trunk/Source/bmalloc/bmalloc/LargeMap.cpp
r230501 r243144 76 76 merged = merge(merged, m_free.pop(i--)); 77 77 } 78 78 79 merged.setUsedSinceLastScavenge(); 79 80 m_free.push(merged); 80 81 } -
trunk/Source/bmalloc/bmalloc/LargeRange.h
r230501 r243144 38 38 , m_startPhysicalSize(0) 39 39 , m_totalPhysicalSize(0) 40 , m_isEligible(true) 41 , m_usedSinceLastScavenge(false) 40 42 { 41 43 } … … 45 47 , m_startPhysicalSize(startPhysicalSize) 46 48 , m_totalPhysicalSize(totalPhysicalSize) 49 , m_isEligible(true) 50 , m_usedSinceLastScavenge(false) 47 51 { 48 52 BASSERT(this->size() >= this->totalPhysicalSize()); … … 50 54 } 51 55 52 LargeRange(void* begin, size_t size, size_t startPhysicalSize, size_t totalPhysicalSize )56 LargeRange(void* begin, size_t size, size_t startPhysicalSize, size_t totalPhysicalSize, bool usedSinceLastScavenge = false) 53 57 : Range(begin, size) 54 58 , m_startPhysicalSize(startPhysicalSize) 55 59 , m_totalPhysicalSize(totalPhysicalSize) 60 , m_isEligible(true) 61 , m_usedSinceLastScavenge(usedSinceLastScavenge) 56 62 { 57 63 BASSERT(this->size() >= this->totalPhysicalSize()); … … 84 90 bool isEligibile() const { return m_isEligible; } 85 91 92 bool usedSinceLastScavenge() const { return m_usedSinceLastScavenge; } 93 void clearUsedSinceLastScavenge() { m_usedSinceLastScavenge = false; } 94 void setUsedSinceLastScavenge() { m_usedSinceLastScavenge = true; } 95 86 96 bool operator<(const void* other) const { return begin() < other; } 87 97 bool operator<(const LargeRange& other) const { return begin() < other.begin(); } … … 90 100 size_t m_startPhysicalSize; 91 101 size_t m_totalPhysicalSize; 92 bool m_isEligible { true }; 102 unsigned m_isEligible: 1; 103 unsigned m_usedSinceLastScavenge: 1; 93 104 }; 94 105 … … 113 124 { 114 125 const LargeRange& left = std::min(a, b); 126 bool mergedUsedSinceLastScavenge = a.usedSinceLastScavenge() || b.usedSinceLastScavenge(); 115 127 if (left.size() == left.startPhysicalSize()) { 116 128 return LargeRange( … … 118 130 a.size() + b.size(), 119 131 a.startPhysicalSize() + b.startPhysicalSize(), 120 a.totalPhysicalSize() + b.totalPhysicalSize()); 132 a.totalPhysicalSize() + b.totalPhysicalSize(), 133 mergedUsedSinceLastScavenge); 121 134 } 122 135 … … 125 138 a.size() + b.size(), 126 139 left.startPhysicalSize(), 127 a.totalPhysicalSize() + b.totalPhysicalSize()); 140 a.totalPhysicalSize() + b.totalPhysicalSize(), 141 mergedUsedSinceLastScavenge); 128 142 } 129 143 -
trunk/Source/bmalloc/bmalloc/Scavenger.cpp
r242938 r243144 81 81 dispatch_release(queue); 82 82 #endif 83 83 m_waitTime = std::chrono::milliseconds(10); 84 84 85 m_thread = std::thread(&threadEntryPoint, this); 85 86 } … … 178 179 } 179 180 180 std::chrono::milliseconds Scavenger::timeSinceLastPartialScavenge()181 {182 std::unique_lock<Mutex> lock(m_mutex);183 return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastPartialScavengeTime);184 }185 186 181 void Scavenger::enableMiniMode() 187 182 { … … 206 201 { 207 202 PrintTime printTime("\nfull scavenge under lock time"); 203 size_t deferredDecommits = 0; 208 204 std::lock_guard<Mutex> lock(Heap::mutex()); 209 205 for (unsigned i = numHeaps; i--;) { 210 206 if (!isActiveHeapKind(static_cast<HeapKind>(i))) 211 207 continue; 212 PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock, decommitter );208 PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock, decommitter, deferredDecommits); 213 209 } 214 210 decommitter.processEager(); 211 212 if (deferredDecommits) 213 m_state = State::RunSoon; 215 214 } 216 215 … … 253 252 } 254 253 255 void Scavenger::partialScavenge()256 {257 std::unique_lock<Mutex> lock(m_scavengingMutex);258 259 if (verbose) {260 fprintf(stderr, "--------------------------------\n");261 fprintf(stderr, "--before partial scavenging--\n");262 dumpStats();263 }264 265 {266 BulkDecommit decommitter;267 {268 PrintTime printTime("\npartialScavenge under lock time");269 std::lock_guard<Mutex> lock(Heap::mutex());270 for (unsigned i = numHeaps; i--;) {271 if (!isActiveHeapKind(static_cast<HeapKind>(i)))272 continue;273 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(i);274 size_t freeableMemory = heap.freeableMemory(lock);275 if (freeableMemory < 4 * MB)276 continue;277 heap.scavengeToHighWatermark(lock, decommitter);278 }279 280 decommitter.processEager();281 }282 283 {284 PrintTime printTime("partialScavenge lazy decommit time");285 decommitter.processLazy();286 }287 288 {289 PrintTime printTime("partialScavenge mark all as eligible time");290 std::lock_guard<Mutex> lock(Heap::mutex());291 for (unsigned i = numHeaps; i--;) {292 if (!isActiveHeapKind(static_cast<HeapKind>(i)))293 continue;294 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(i);295 heap.markAllLargeAsEligibile(lock);296 }297 }298 }299 300 {301 RELEASE_BASSERT(!m_deferredDecommits.size());302 AllIsoHeaps::get()->forEach(303 [&] (IsoHeapImplBase& heap) {304 heap.scavengeToHighWatermark(m_deferredDecommits);305 });306 IsoHeapImplBase::finishScavenging(m_deferredDecommits);307 m_deferredDecommits.shrink(0);308 }309 310 if (verbose) {311 fprintf(stderr, "--after partial scavenging--\n");312 dumpStats();313 fprintf(stderr, "--------------------------------\n");314 }315 316 {317 std::unique_lock<Mutex> lock(m_mutex);318 m_lastPartialScavengeTime = std::chrono::steady_clock::now();319 }320 }321 322 254 size_t Scavenger::freeableMemory() 323 255 { … … 387 319 if (m_state == State::RunSoon) { 388 320 std::unique_lock<Mutex> lock(m_mutex); 389 m_condition.wait_for(lock, std::chrono::milliseconds(m_isInMiniMode ? 200 : 2000), [&]() { return m_state != State::RunSoon; });321 m_condition.wait_for(lock, m_waitTime, [&]() { return m_state != State::RunSoon; }); 390 322 } 391 323 … … 401 333 } 402 334 403 enum class ScavengeMode { 404 None, 405 Partial, 406 Full 407 }; 408 409 size_t freeableMemory = this->freeableMemory(); 410 411 ScavengeMode scavengeMode = [&] { 412 auto timeSinceLastFullScavenge = this->timeSinceLastFullScavenge(); 413 auto timeSinceLastPartialScavenge = this->timeSinceLastPartialScavenge(); 414 auto timeSinceLastScavenge = std::min(timeSinceLastPartialScavenge, timeSinceLastFullScavenge); 415 416 if (isUnderMemoryPressure() && freeableMemory > 1 * MB && timeSinceLastScavenge > std::chrono::milliseconds(5)) 417 return ScavengeMode::Full; 418 419 if (!m_isProbablyGrowing) { 420 if (timeSinceLastFullScavenge < std::chrono::milliseconds(1000) && !m_isInMiniMode) 421 return ScavengeMode::Partial; 422 return ScavengeMode::Full; 423 } 424 425 if (m_isInMiniMode) { 426 if (timeSinceLastFullScavenge < std::chrono::milliseconds(200)) 427 return ScavengeMode::Partial; 428 return ScavengeMode::Full; 429 } 430 431 #if BCPU(X86_64) 432 auto partialScavengeInterval = std::chrono::milliseconds(12000); 433 #else 434 auto partialScavengeInterval = std::chrono::milliseconds(8000); 435 #endif 436 if (timeSinceLastScavenge < partialScavengeInterval) { 437 // Rate limit partial scavenges. 438 return ScavengeMode::None; 439 } 440 if (freeableMemory < 25 * MB) 441 return ScavengeMode::None; 442 if (5 * freeableMemory < footprint()) 443 return ScavengeMode::None; 444 return ScavengeMode::Partial; 445 }(); 446 447 m_isProbablyGrowing = false; 448 449 switch (scavengeMode) { 450 case ScavengeMode::None: { 451 runSoon(); 452 break; 453 } 454 case ScavengeMode::Partial: { 455 partialScavenge(); 456 runSoon(); 457 break; 458 } 459 case ScavengeMode::Full: { 460 scavenge(); 461 break; 462 } 463 } 335 std::chrono::steady_clock::time_point start { std::chrono::steady_clock::now() }; 336 337 scavenge(); 338 339 auto timeSpentScavenging = std::chrono::steady_clock::now() - start; 340 341 if (verbose) { 342 fprintf(stderr, "time spent scavenging %lfms\n", 343 static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(timeSpentScavenging).count()) / 1000); 344 } 345 346 std::chrono::milliseconds newWaitTime; 347 348 if (m_isInMiniMode) { 349 timeSpentScavenging *= 50; 350 newWaitTime = std::chrono::duration_cast<std::chrono::milliseconds>(timeSpentScavenging); 351 newWaitTime = std::min(std::max(newWaitTime, std::chrono::milliseconds(25)), std::chrono::milliseconds(500)); 352 } else { 353 timeSpentScavenging *= 150; 354 newWaitTime = std::chrono::duration_cast<std::chrono::milliseconds>(timeSpentScavenging); 355 m_waitTime = std::min(std::max(newWaitTime, std::chrono::milliseconds(100)), std::chrono::milliseconds(10000)); 356 } 357 358 if (verbose) 359 fprintf(stderr, "new wait time %lldms\n", m_waitTime.count()); 464 360 } 465 361 } -
trunk/Source/bmalloc/bmalloc/Scavenger.h
r242938 r243144 90 90 91 91 std::chrono::milliseconds timeSinceLastFullScavenge(); 92 std::chrono::milliseconds timeSinceLastPartialScavenge();93 void partialScavenge();94 92 95 93 std::atomic<State> m_state { State::Sleep }; 96 94 size_t m_scavengerBytes { 0 }; 95 std::chrono::milliseconds m_waitTime; 97 96 bool m_isProbablyGrowing { false }; 98 97 bool m_isInMiniMode { false }; … … 104 103 std::thread m_thread; 105 104 std::chrono::steady_clock::time_point m_lastFullScavengeTime { std::chrono::steady_clock::now() }; 106 std::chrono::steady_clock::time_point m_lastPartialScavengeTime { std::chrono::steady_clock::now() };107 105 108 106 #if BOS(DARWIN) -
trunk/Source/bmalloc/bmalloc/SmallPage.h
r241305 r243144 52 52 void setHasPhysicalPages(bool hasPhysicalPages) { m_hasPhysicalPages = hasPhysicalPages; } 53 53 54 bool usedSinceLastScavenge() { return m_usedSinceLastScavenge; } 55 void clearUsedSinceLastScavenge() { m_usedSinceLastScavenge = false; } 56 void setUsedSinceLastScavenge() { m_usedSinceLastScavenge = true; } 57 54 58 SmallLine* begin(); 55 59 … … 60 64 unsigned char m_hasFreeLines: 1; 61 65 unsigned char m_hasPhysicalPages: 1; 66 unsigned char m_usedSinceLastScavenge: 1; 62 67 unsigned char m_refCount: 7; 63 68 unsigned char m_sizeClass;
Note: See TracChangeset
for help on using the changeset viewer.