Changeset 249578 in webkit
- Timestamp:
- Sep 6, 2019 10:04:13 AM (5 years ago)
- Location:
- trunk
- Files:
-
- 2 added
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JSTests/ChangeLog
r249538 r249578 1 2019-09-06 Mark Lam <mark.lam@apple.com> 2 3 Fix bmalloc::Allocator:tryAllocate() to return null on failure to allocate. 4 https://bugs.webkit.org/show_bug.cgi?id=201529 5 <rdar://problem/53935772> 6 7 Reviewed by Yusuke Suzuki. 8 9 * stress/test-out-of-memory.js: Added. 10 1 11 2019-09-05 Tadeu Zagallo <tzagallo@apple.com> 2 12 -
trunk/Source/bmalloc/CMakeLists.txt
r249065 r249578 65 65 bmalloc/EligibilityResultInlines.h 66 66 bmalloc/Environment.h 67 bmalloc/FailureAction.h 67 68 bmalloc/FixedVector.h 68 69 bmalloc/FreeList.h -
trunk/Source/bmalloc/ChangeLog
r249556 r249578 1 2019-09-06 Mark Lam <mark.lam@apple.com> 2 3 Fix bmalloc::Allocator:tryAllocate() to return null on failure to allocate. 4 https://bugs.webkit.org/show_bug.cgi?id=201529 5 <rdar://problem/53935772> 6 7 Reviewed by Yusuke Suzuki. 8 9 In this implementation, we pass FailureAction in as a runtime option. If this 10 proves to be a perf issue, we can easily fix this by passing it as a template 11 argument. That will also automatically elide unneeded code paths. We'll defer 12 that exercise until we have evidence that it is warranted. 13 14 * CMakeLists.txt: 15 * bmalloc.xcodeproj/project.pbxproj: 16 * bmalloc/Allocator.cpp: 17 (bmalloc::Allocator::allocateImpl): 18 (bmalloc::Allocator::reallocateImpl): 19 (bmalloc::Allocator::refillAllocatorSlowCase): 20 (bmalloc::Allocator::refillAllocator): 21 (bmalloc::Allocator::allocateLarge): 22 (bmalloc::Allocator::allocateLogSizeClass): 23 (bmalloc::Allocator::allocateSlowCase): 24 (bmalloc::Allocator::tryAllocate): Deleted. 25 (bmalloc::Allocator::allocate): Deleted. 26 (bmalloc::Allocator::reallocate): Deleted. 27 (bmalloc::Allocator::tryReallocate): Deleted. 28 * bmalloc/Allocator.h: 29 (bmalloc::Allocator::tryAllocate): 30 (bmalloc::Allocator::allocate): 31 (bmalloc::Allocator::tryReallocate): 32 (bmalloc::Allocator::reallocate): 33 (bmalloc::Allocator::allocateImpl): 34 * bmalloc/BumpAllocator.h: 35 * bmalloc/FailureAction.h: Added. 36 * bmalloc/Heap.cpp: 37 (bmalloc::Heap::allocateSmallChunk): 38 (bmalloc::Heap::allocateSmallPage): 39 (bmalloc::Heap::allocateSmallBumpRangesByMetadata): 40 (bmalloc::Heap::allocateSmallBumpRangesByObject): 41 (bmalloc::Heap::allocateLarge): 42 (bmalloc::Heap::tryAllocateLarge): Deleted. 43 * bmalloc/Heap.h: 44 (bmalloc::Heap::allocateSmallBumpRanges): 45 * bmalloc/bmalloc.cpp: 46 (bmalloc::api::tryLargeZeroedMemalignVirtual): 47 1 48 2019-09-05 Mark Lam <mark.lam@apple.com> 2 49 -
trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj
r249065 r249578 148 148 E3FBB5A2225EADB000DB6FBD /* IsoSharedHeap.h in Headers */ = {isa = PBXBuildFile; fileRef = E3FBB59F225EADB000DB6FBD /* IsoSharedHeap.h */; settings = {ATTRIBUTES = (Private, ); }; }; 149 149 E3FBB5A4225ECAD200DB6FBD /* IsoSharedHeapInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E3FBB5A3225ECAD200DB6FBD /* IsoSharedHeapInlines.h */; settings = {ATTRIBUTES = (Private, ); }; }; 150 FE48BD3B2321E8D700F136D0 /* FailureAction.h in Headers */ = {isa = PBXBuildFile; fileRef = FE48BD3A2321E8CC00F136D0 /* FailureAction.h */; settings = {ATTRIBUTES = (Private, ); }; }; 150 151 /* End PBXBuildFile section */ 151 152 … … 300 301 E3FBB59F225EADB000DB6FBD /* IsoSharedHeap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedHeap.h; path = bmalloc/IsoSharedHeap.h; sourceTree = "<group>"; }; 301 302 E3FBB5A3225ECAD200DB6FBD /* IsoSharedHeapInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedHeapInlines.h; path = bmalloc/IsoSharedHeapInlines.h; sourceTree = "<group>"; }; 303 FE48BD3A2321E8CC00F136D0 /* FailureAction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = FailureAction.h; path = bmalloc/FailureAction.h; sourceTree = "<group>"; }; 302 304 /* End PBXFileReference section */ 303 305 … … 476 478 14895D8F1A3A319C0006235D /* Environment.cpp */, 477 479 14895D901A3A319C0006235D /* Environment.h */, 480 FE48BD3A2321E8CC00F136D0 /* FailureAction.h */, 478 481 0F5BF14E1F22DEAF0029D91D /* Gigacage.cpp */, 479 482 0F5BF14C1F22B0C30029D91D /* Gigacage.h */, … … 614 617 0F7EB8311F9541B000F1ABCB /* IsoPageInlines.h in Headers */, 615 618 0F7EB82B1F9541B000F1ABCB /* IsoPageTrigger.h in Headers */, 619 FE48BD3B2321E8D700F136D0 /* FailureAction.h in Headers */, 616 620 E3FBB5A0225EADB000DB6FBD /* IsoSharedConfig.h in Headers */, 617 621 E3FBB5A2225EADB000DB6FBD /* IsoSharedHeap.h in Headers */, -
trunk/Source/bmalloc/bmalloc/Allocator.cpp
r242938 r249578 1 1 /* 2 * Copyright (C) 2014-201 8Apple Inc. All rights reserved.2 * Copyright (C) 2014-2019 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 51 51 } 52 52 53 void* Allocator::tryAllocate(size_t size) 54 { 55 if (size <= smallMax) 56 return allocate(size); 57 58 std::unique_lock<Mutex> lock(Heap::mutex()); 59 return m_heap.tryAllocateLarge(lock, alignment, size); 60 } 61 62 void* Allocator::allocate(size_t alignment, size_t size) 63 { 64 bool crashOnFailure = true; 65 return allocateImpl(alignment, size, crashOnFailure); 66 } 67 68 void* Allocator::tryAllocate(size_t alignment, size_t size) 69 { 70 bool crashOnFailure = false; 71 return allocateImpl(alignment, size, crashOnFailure); 72 } 73 74 void* Allocator::allocateImpl(size_t alignment, size_t size, bool crashOnFailure) 53 void* Allocator::allocateImpl(size_t alignment, size_t size, FailureAction action) 75 54 { 76 55 BASSERT(isPowerOfTwo(alignment)); … … 80 59 81 60 if (size <= smallMax && alignment <= smallMax) 82 return allocate (roundUpToMultipleOf(alignment, size));61 return allocateImpl(roundUpToMultipleOf(alignment, size), action); 83 62 84 std::unique_lock<Mutex> lock(Heap::mutex()); 85 if (crashOnFailure) 86 return m_heap.allocateLarge(lock, alignment, size); 87 return m_heap.tryAllocateLarge(lock, alignment, size); 63 return allocateLarge(size, action); 88 64 } 89 65 90 void* Allocator::reallocate(void* object, size_t newSize) 91 { 92 bool crashOnFailure = true; 93 return reallocateImpl(object, newSize, crashOnFailure); 94 } 95 96 void* Allocator::tryReallocate(void* object, size_t newSize) 97 { 98 bool crashOnFailure = false; 99 return reallocateImpl(object, newSize, crashOnFailure); 100 } 101 102 void* Allocator::reallocateImpl(void* object, size_t newSize, bool crashOnFailure) 66 void* Allocator::reallocateImpl(void* object, size_t newSize, FailureAction action) 103 67 { 104 68 size_t oldSize = 0; … … 126 90 127 91 void* result = nullptr; 128 if (crashOnFailure) 129 result = allocate(newSize); 130 else { 131 result = tryAllocate(newSize); 132 if (!result) 133 return nullptr; 92 result = allocateImpl(newSize, action); 93 if (!result) { 94 BASSERT(action == FailureAction::ReturnNull); 95 return nullptr; 134 96 } 135 97 size_t copySize = std::min(oldSize, newSize); … … 158 120 } 159 121 160 BNO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass )122 BNO_INLINE void Allocator::refillAllocatorSlowCase(BumpAllocator& allocator, size_t sizeClass, FailureAction action) 161 123 { 162 124 BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass]; … … 164 126 std::unique_lock<Mutex> lock(Heap::mutex()); 165 127 m_deallocator.processObjectLog(lock); 166 m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock) );128 m_heap.allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache, m_deallocator.lineCache(lock), action); 167 129 } 168 130 169 BINLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass )131 BINLINE void Allocator::refillAllocator(BumpAllocator& allocator, size_t sizeClass, FailureAction action) 170 132 { 171 133 BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass]; 172 134 if (!bumpRangeCache.size()) 173 return refillAllocatorSlowCase(allocator, sizeClass );135 return refillAllocatorSlowCase(allocator, sizeClass, action); 174 136 return allocator.refill(bumpRangeCache.pop()); 175 137 } 176 138 177 BNO_INLINE void* Allocator::allocateLarge(size_t size )139 BNO_INLINE void* Allocator::allocateLarge(size_t size, FailureAction action) 178 140 { 179 141 std::unique_lock<Mutex> lock(Heap::mutex()); 180 return m_heap.allocateLarge(lock, alignment, size );142 return m_heap.allocateLarge(lock, alignment, size, action); 181 143 } 182 144 183 BNO_INLINE void* Allocator::allocateLogSizeClass(size_t size )145 BNO_INLINE void* Allocator::allocateLogSizeClass(size_t size, FailureAction action) 184 146 { 185 147 size_t sizeClass = bmalloc::sizeClass(size); 186 148 BumpAllocator& allocator = m_bumpAllocators[sizeClass]; 187 149 if (!allocator.canAllocate()) 188 refillAllocator(allocator, sizeClass); 150 refillAllocator(allocator, sizeClass, action); 151 if (action == FailureAction::ReturnNull && !allocator.canAllocate()) 152 return nullptr; 189 153 return allocator.allocate(); 190 154 } 191 155 192 void* Allocator::allocateSlowCase(size_t size )156 void* Allocator::allocateSlowCase(size_t size, FailureAction action) 193 157 { 194 158 if (size <= maskSizeClassMax) { 195 159 size_t sizeClass = bmalloc::maskSizeClass(size); 196 160 BumpAllocator& allocator = m_bumpAllocators[sizeClass]; 197 refillAllocator(allocator, sizeClass); 161 refillAllocator(allocator, sizeClass, action); 162 if (action == FailureAction::ReturnNull && !allocator.canAllocate()) 163 return nullptr; 198 164 return allocator.allocate(); 199 165 } 200 166 201 167 if (size <= smallMax) 202 return allocateLogSizeClass(size );168 return allocateLogSizeClass(size, action); 203 169 204 return allocateLarge(size );170 return allocateLarge(size, action); 205 171 } 206 172 -
trunk/Source/bmalloc/bmalloc/Allocator.h
r241832 r249578 1 1 /* 2 * Copyright (C) 2014-201 8Apple Inc. All rights reserved.2 * Copyright (C) 2014-2019 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 29 29 #include "BExport.h" 30 30 #include "BumpAllocator.h" 31 #include "FailureAction.h" 31 32 #include <array> 32 33 … … 43 44 ~Allocator(); 44 45 45 BEXPORT void* tryAllocate(size_t);46 void* allocate(size_t );47 void* tryAllocate(size_t alignment, size_t );48 void* allocate(size_t alignment, size_t );49 void* tryReallocate(void* , size_t);50 void* reallocate(void* , size_t);46 void* tryAllocate(size_t size) { return allocateImpl(size, FailureAction::ReturnNull); } 47 void* allocate(size_t size) { return allocateImpl(size, FailureAction::Crash); } 48 void* tryAllocate(size_t alignment, size_t size) { return allocateImpl(alignment, size, FailureAction::ReturnNull); } 49 void* allocate(size_t alignment, size_t size) { return allocateImpl(alignment, size, FailureAction::Crash); } 50 void* tryReallocate(void* object, size_t newSize) { return reallocateImpl(object, newSize, FailureAction::ReturnNull); } 51 void* reallocate(void* object, size_t newSize) { return reallocateImpl(object, newSize, FailureAction::Crash); } 51 52 52 53 void scavenge(); 53 54 54 55 private: 55 void* allocateImpl(size_t alignment, size_t, bool crashOnFailure); 56 void* reallocateImpl(void*, size_t, bool crashOnFailure); 56 void* allocateImpl(size_t, FailureAction); 57 void* allocateImpl(size_t alignment, size_t, FailureAction); 58 void* reallocateImpl(void*, size_t, FailureAction); 57 59 58 60 bool allocateFastCase(size_t, void*&); 59 BEXPORT void* allocateSlowCase(size_t); 61 BEXPORT void* allocateSlowCase(size_t, FailureAction); 62 63 void* allocateLogSizeClass(size_t, FailureAction); 64 void* allocateLarge(size_t, FailureAction); 60 65 61 void* allocateLogSizeClass(size_t); 62 void* allocateLarge(size_t); 63 64 void refillAllocator(BumpAllocator&, size_t sizeClass); 65 void refillAllocatorSlowCase(BumpAllocator&, size_t sizeClass); 66 inline void refillAllocator(BumpAllocator&, size_t sizeClass, FailureAction); 67 void refillAllocatorSlowCase(BumpAllocator&, size_t sizeClass, FailureAction); 66 68 67 69 std::array<BumpAllocator, sizeClassCount> m_bumpAllocators; … … 85 87 } 86 88 87 inline void* Allocator::allocate (size_t size)89 inline void* Allocator::allocateImpl(size_t size, FailureAction action) 88 90 { 89 91 void* object; 90 92 if (!allocateFastCase(size, object)) 91 return allocateSlowCase(size );93 return allocateSlowCase(size, action); 92 94 return object; 93 95 } -
trunk/Source/bmalloc/bmalloc/BumpAllocator.h
r198995 r249578 1 1 /* 2 * Copyright (C) 2014 Apple Inc. All rights reserved.2 * Copyright (C) 2014-2019 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r243389 r249578 1 1 /* 2 * Copyright (C) 2014-201 8Apple Inc. All rights reserved.2 * Copyright (C) 2014-2019 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 227 227 } 228 228 229 void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass )229 void Heap::allocateSmallChunk(std::unique_lock<Mutex>& lock, size_t pageClass, FailureAction action) 230 230 { 231 231 RELEASE_BASSERT(isActiveHeapKind(m_kind)); … … 233 233 size_t pageSize = bmalloc::pageSize(pageClass); 234 234 235 Chunk* chunk = [&]() {235 Chunk* chunk = [&]() -> Chunk* { 236 236 if (!m_chunkCache[pageClass].isEmpty()) 237 237 return m_chunkCache[pageClass].pop(); 238 238 239 void* memory = allocateLarge(lock, chunkSize, chunkSize); 239 void* memory = allocateLarge(lock, chunkSize, chunkSize, action); 240 if (!memory) { 241 BASSERT(action == FailureAction::ReturnNull); 242 return nullptr; 243 } 240 244 241 245 Chunk* chunk = new (memory) Chunk(pageSize); … … 257 261 }(); 258 262 259 m_freePages[pageClass].push(chunk); 263 if (chunk) 264 m_freePages[pageClass].push(chunk); 260 265 } 261 266 … … 286 291 } 287 292 288 SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache )293 SmallPage* Heap::allocateSmallPage(std::unique_lock<Mutex>& lock, size_t sizeClass, LineCache& lineCache, FailureAction action) 289 294 { 290 295 RELEASE_BASSERT(isActiveHeapKind(m_kind)); … … 298 303 m_scavenger->didStartGrowing(); 299 304 300 SmallPage* page = [&]() {305 SmallPage* page = [&]() -> SmallPage* { 301 306 size_t pageClass = m_pageClasses[sizeClass]; 302 307 303 308 if (m_freePages[pageClass].isEmpty()) 304 allocateSmallChunk(lock, pageClass); 309 allocateSmallChunk(lock, pageClass, action); 310 if (action == FailureAction::ReturnNull && m_freePages[pageClass].isEmpty()) 311 return nullptr; 305 312 306 313 Chunk* chunk = m_freePages[pageClass].tail(); … … 329 336 return page; 330 337 }(); 338 if (!page) { 339 BASSERT(action == FailureAction::ReturnNull); 340 return nullptr; 341 } 331 342 332 343 page->setSizeClass(sizeClass); … … 377 388 std::unique_lock<Mutex>& lock, size_t sizeClass, 378 389 BumpAllocator& allocator, BumpRangeCache& rangeCache, 379 LineCache& lineCache) 380 { 390 LineCache& lineCache, FailureAction action) 391 { 392 BUNUSED(action); 381 393 RELEASE_BASSERT(isActiveHeapKind(m_kind)); 382 394 383 SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache); 395 SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache, action); 396 if (!page) { 397 BASSERT(action == FailureAction::ReturnNull); 398 return; 399 } 384 400 SmallLine* lines = page->begin(); 385 401 BASSERT(page->hasFreeLines(lock)); … … 419 435 if (!findSmallBumpRange(lineNumber)) { 420 436 page->setHasFreeLines(lock, false); 421 BASSERT(a llocator.canAllocate());437 BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate()); 422 438 return; 423 439 } … … 426 442 if (rangeCache.size() == rangeCache.capacity()) { 427 443 lineCache[sizeClass].push(page); 428 BASSERT(a llocator.canAllocate());444 BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate()); 429 445 return; 430 446 } … … 441 457 std::unique_lock<Mutex>& lock, size_t sizeClass, 442 458 BumpAllocator& allocator, BumpRangeCache& rangeCache, 443 LineCache& lineCache) 444 { 459 LineCache& lineCache, FailureAction action) 460 { 461 BUNUSED(action); 445 462 RELEASE_BASSERT(isActiveHeapKind(m_kind)); 446 463 447 464 size_t size = allocator.size(); 448 SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache); 465 SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache, action); 466 if (!page) { 467 BASSERT(action == FailureAction::ReturnNull); 468 return; 469 } 449 470 BASSERT(page->hasFreeLines(lock)); 450 471 … … 476 497 if (!findSmallBumpRange(it, end)) { 477 498 page->setHasFreeLines(lock, false); 478 BASSERT(a llocator.canAllocate());499 BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate()); 479 500 return; 480 501 } … … 483 504 if (rangeCache.size() == rangeCache.capacity()) { 484 505 lineCache[sizeClass].push(page); 485 BASSERT(a llocator.canAllocate());506 BASSERT(action == FailureAction::ReturnNull || allocator.canAllocate()); 486 507 return; 487 508 } … … 543 564 } 544 565 545 void* Heap::tryAllocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size) 546 { 566 void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size, FailureAction action) 567 { 568 #define ASSERT_OR_RETURN_ON_FAILURE(cond) do { \ 569 if (action == FailureAction::Crash) \ 570 RELEASE_BASSERT(cond); \ 571 else if (!(cond)) \ 572 return nullptr; \ 573 } while (false) 574 575 547 576 RELEASE_BASSERT(isActiveHeapKind(m_kind)); 548 577 … … 552 581 553 582 size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment; 554 if (roundedSize < size) // Check for overflow 555 return nullptr; 583 ASSERT_OR_RETURN_ON_FAILURE(roundedSize >= size); // Check for overflow 556 584 size = roundedSize; 557 585 558 586 size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment); 559 if (roundedAlignment < alignment) // Check for overflow 560 return nullptr; 587 ASSERT_OR_RETURN_ON_FAILURE(roundedAlignment >= alignment); // Check for overflow 561 588 alignment = roundedAlignment; 562 589 … … 566 593 m_condition.wait(lock, [&]() { return !m_hasPendingDecommits; }); 567 594 // Now we're guaranteed we're looking at all available memory. 568 return tryAllocateLarge(lock, alignment, size); 569 } 570 571 if (usingGigacage()) 572 return nullptr; 595 return allocateLarge(lock, alignment, size, action); 596 } 597 598 ASSERT_OR_RETURN_ON_FAILURE(!usingGigacage()); 573 599 574 600 range = VMHeap::get()->tryAllocateLargeChunk(alignment, size); 575 if (!range) 576 return nullptr; 601 ASSERT_OR_RETURN_ON_FAILURE(range); 577 602 578 603 m_largeFree.add(range); … … 583 608 584 609 void* result = splitAndAllocate(lock, range, alignment, size).begin(); 610 ASSERT_OR_RETURN_ON_FAILURE(result); 585 611 return result; 586 } 587 588 void* Heap::allocateLarge(std::unique_lock<Mutex>& lock, size_t alignment, size_t size) 589 { 590 void* result = tryAllocateLarge(lock, alignment, size); 591 RELEASE_BASSERT(result); 592 return result; 612 613 #undef ASSERT_OR_RETURN_ON_FAILURE 593 614 } 594 615 -
trunk/Source/bmalloc/bmalloc/Heap.h
r243144 r249578 1 1 /* 2 * Copyright (C) 2014-201 8Apple Inc. All rights reserved.2 * Copyright (C) 2014-2019 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 29 29 #include "BumpRange.h" 30 30 #include "Chunk.h" 31 #include "FailureAction.h" 31 32 #include "HeapKind.h" 32 33 #include "LargeMap.h" … … 65 66 66 67 void allocateSmallBumpRanges(std::unique_lock<Mutex>&, size_t sizeClass, 67 BumpAllocator&, BumpRangeCache&, LineCache& );68 BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction); 68 69 void derefSmallLine(std::unique_lock<Mutex>&, Object, LineCache&); 69 70 void deallocateLineCache(std::unique_lock<Mutex>&, LineCache&); 70 71 71 void* allocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t); 72 void* tryAllocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t); 72 void* allocateLarge(std::unique_lock<Mutex>&, size_t alignment, size_t, FailureAction); 73 73 void deallocateLarge(std::unique_lock<Mutex>&, void*); 74 74 … … 111 111 112 112 void allocateSmallBumpRangesByMetadata(std::unique_lock<Mutex>&, 113 size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache& );113 size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction); 114 114 void allocateSmallBumpRangesByObject(std::unique_lock<Mutex>&, 115 size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache& );115 size_t sizeClass, BumpAllocator&, BumpRangeCache&, LineCache&, FailureAction); 116 116 117 SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache& );117 SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&, FailureAction); 118 118 void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&); 119 119 120 void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass );120 void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass, FailureAction); 121 121 void deallocateSmallChunk(Chunk*, size_t pageClass); 122 122 … … 158 158 std::unique_lock<Mutex>& lock, size_t sizeClass, 159 159 BumpAllocator& allocator, BumpRangeCache& rangeCache, 160 LineCache& lineCache )160 LineCache& lineCache, FailureAction action) 161 161 { 162 162 if (sizeClass < bmalloc::sizeClass(smallLineSize)) 163 return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache, lineCache );164 return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache );163 return allocateSmallBumpRangesByMetadata(lock, sizeClass, allocator, rangeCache, lineCache, action); 164 return allocateSmallBumpRangesByObject(lock, sizeClass, allocator, rangeCache, lineCache, action); 165 165 } 166 166 -
trunk/Source/bmalloc/bmalloc/bmalloc.cpp
r242938 r249578 60 60 61 61 std::unique_lock<Mutex> lock(Heap::mutex()); 62 result = heap. tryAllocateLarge(lock, alignment, size);62 result = heap.allocateLarge(lock, alignment, size, FailureAction::ReturnNull); 63 63 if (result) { 64 64 // Don't track this as dirty memory that dictates how we drive the scavenger.
Note: See TracChangeset
for help on using the changeset viewer.