Changeset 196873 in webkit
- Timestamp:
- Feb 21, 2016 10:43:22 AM (8 years ago)
- Location:
- trunk/Source/bmalloc
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/bmalloc/ChangeLog
r196871 r196873 1 2016-02-21 Geoffrey Garen <ggaren@apple.com> 2 3 bmalloc: Don't use a whole page for metadata 4 https://bugs.webkit.org/show_bug.cgi?id=154510 5 6 Reviewed by Andreas Kling. 7 8 (1) Don't round up metadata to a page boundary. This saves 1.5% dirty 9 memory on iOS and 0.2% on Mac. It also enables a future patch to allocate 10 smaller chunks without wasting memory. 11 12 (2) Initialize metadata lazily. This saves dirty memory when the program 13 allocates primarily small or large objects (but not both), leaving some 14 metadata uninitialized. 15 16 * bmalloc.xcodeproj/project.pbxproj: Medium objects are gone now. 17 18 * bmalloc/BumpAllocator.h: 19 (bmalloc::BumpAllocator::refill): Added an ASSERT to help debug a bug 20 I cause while working on this patch. 21 22 * bmalloc/Heap.cpp: 23 (bmalloc::Heap::allocateSmallBumpRanges): Ditto. 24 25 (bmalloc::Heap::splitAndAllocate): 26 (bmalloc::Heap::allocateLarge): Updated for interface change. 27 28 * bmalloc/LargeChunk.h: Changed the boundaryTagCount calculation to 29 a static_assert. 30 31 Don't round up to page boundary. (See above.) 32 33 (bmalloc::LargeChunk::LargeChunk): Moved code here from LargeChunk::init. 34 A constructor is a more natural / automatic way to do this initialization. 35 36 * bmalloc/LargeObject.h: 37 (bmalloc::LargeObject::init): Deleted. Moved to LargeChunk. 38 39 * bmalloc/Sizes.h: Chagned largeChunkMetadataSize to a simpler constant 40 because metadata size no longer varies by page size. 41 42 * bmalloc/SmallChunk.h: 43 (bmalloc::SmallChunk::begin): 44 (bmalloc::SmallChunk::end): 45 (bmalloc::SmallChunk::lines): 46 (bmalloc::SmallChunk::pages): Use std::array to make begin/end 47 calculations easier. 48 49 (bmalloc::SmallChunk::SmallChunk): Treat our metadata like a series 50 of allocated objects. We used to avoid trampling our metadata by 51 starting object memory at the next page. Now we share the first page 52 between metadata and objects, and we account for metadata explicitly. 53 54 * bmalloc/SuperChunk.h: 55 (bmalloc::SuperChunk::SuperChunk): 56 (bmalloc::SuperChunk::smallChunk): 57 (bmalloc::SuperChunk::largeChunk): 58 (bmalloc::SuperChunk::create): Deleted. Don't eagerly run the SmallChunk 59 and LargeChunk constructors. We'll run them lazily as needed. 60 61 * bmalloc/VMHeap.cpp: 62 (bmalloc::VMHeap::VMHeap): 63 (bmalloc::VMHeap::allocateSmallChunk): 64 (bmalloc::VMHeap::allocateLargeChunk): 65 (bmalloc::VMHeap::allocateSuperChunk): 66 (bmalloc::VMHeap::grow): Deleted. Track small and large chunks explicitly 67 so we can initialize them lazily. 68 69 * bmalloc/VMHeap.h: 70 (bmalloc::VMHeap::allocateSmallPage): 71 (bmalloc::VMHeap::allocateLargeObject): Specify whether we're allocating 72 a small or large chunk since we don't allocate both at once anymore. 73 1 74 2016-02-20 Mark Lam <mark.lam@apple.com> 2 75 -
trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj
r196847 r196873 166 166 14D9DB4D17F2865C00EAAB79 /* cache */, 167 167 147AAA9C18CE6010002201E4 /* heap: large */, 168 147AAA9A18CE5FD3002201E4 /* heap: small | medium*/,168 147AAA9A18CE5FD3002201E4 /* heap: small */, 169 169 14D9DB4E17F2866E00EAAB79 /* heap */, 170 170 14D9DB4F17F2868900EAAB79 /* stdlib */, … … 183 183 sourceTree = "<group>"; 184 184 }; 185 147AAA9A18CE5FD3002201E4 /* heap: small | medium*/ = {185 147AAA9A18CE5FD3002201E4 /* heap: small */ = { 186 186 isa = PBXGroup; 187 187 children = ( … … 190 190 143E29ED18CAE90500FE8A0F /* SmallPage.h */, 191 191 ); 192 name = "heap: small | medium";192 name = "heap: small"; 193 193 sourceTree = "<group>"; 194 194 }; -
trunk/Source/bmalloc/bmalloc/BumpAllocator.h
r196845 r196873 100 100 m_ptr = bumpRange.begin; 101 101 m_remaining = bumpRange.objectCount; 102 BASSERT(canAllocate()); 102 103 } 103 104 -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r196847 r196873 126 126 if (rangeCache.size() == rangeCache.capacity()) { 127 127 m_smallPagesWithFreeLines[sizeClass].push(page); 128 BASSERT(allocator.canAllocate()); 128 129 return; 129 130 } … … 154 155 } 155 156 157 BASSERT(allocator.canAllocate()); 156 158 page->setHasFreeLines(lock, false); 157 159 } … … 305 307 } 306 308 307 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& , size_t size)309 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t size) 308 310 { 309 311 BASSERT(size <= largeMax); … … 313 315 LargeObject largeObject = m_largeObjects.take(size); 314 316 if (!largeObject) 315 largeObject = m_vmHeap.allocateLargeObject( size);317 largeObject = m_vmHeap.allocateLargeObject(lock, size); 316 318 317 319 if (largeObject.vmState().hasVirtual()) { … … 327 329 } 328 330 329 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& , size_t alignment, size_t size, size_t unalignedSize)331 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize) 330 332 { 331 333 BASSERT(size <= largeMax); … … 341 343 LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize); 342 344 if (!largeObject) 343 largeObject = m_vmHeap.allocateLargeObject( alignment, size, unalignedSize);345 largeObject = m_vmHeap.allocateLargeObject(lock, alignment, size, unalignedSize); 344 346 345 347 if (largeObject.vmState().hasVirtual()) { -
trunk/Source/bmalloc/bmalloc/LargeChunk.h
r196845 r196873 32 32 #include "Sizes.h" 33 33 #include "VMAllocate.h" 34 #include <array> 34 35 35 36 namespace bmalloc { … … 37 38 class LargeChunk { 38 39 public: 39 static LargeChunk* create();40 LargeChunk(); 40 41 static LargeChunk* get(void*); 41 42 … … 47 48 48 49 private: 49 // Round up to ensure 2 dummy boundary tags -- for the left and right sentinels.50 static const size_t boundaryTagCount = max(2 * largeMin / sizeof(BoundaryTag), largeChunkSize / largeMin);50 static const size_t boundaryTagCount = largeChunkSize / largeMin; 51 static_assert(boundaryTagCount > 2, "LargeChunk must have space for two sentinel boundary tags"); 51 52 52 53 // Our metadata layout includes a left and right edge sentinel. … … 64 65 // We use the X's for boundary tags and the O's for edge sentinels. 65 66 66 BoundaryTag m_boundaryTags[boundaryTagCount]; 67 68 // Align to vmPageSize to avoid sharing physical pages with metadata. 69 // Otherwise, we'll confuse the scavenger into trying to scavenge metadata. 70 // FIXME: Below #ifdef workaround fix should be removed after all linux based ports bump 71 // own gcc version. See https://bugs.webkit.org/show_bug.cgi?id=140162#c87 72 #if BPLATFORM(IOS) 73 char m_memory[] __attribute__((aligned(16384))); 74 static_assert(vmPageSize == 16384, "vmPageSize and alignment must be same"); 75 #else 76 char m_memory[] __attribute__((aligned(4096))); 77 static_assert(vmPageSize == 4096, "vmPageSize and alignment must be same"); 78 #endif 67 std::array<BoundaryTag, boundaryTagCount> m_boundaryTags; 68 char m_memory[] __attribute__((aligned(largeAlignment))); 79 69 }; 80 70 81 static_assert(largeChunkMetadataSize == sizeof(LargeChunk), "'largeChunkMetadataSize' should be the same number as sizeof(LargeChunk) or our computation in Sizes.h for 'largeMax' is wrong"); 82 static_assert(largeChunkMetadataSize + largeMax <= largeChunkSize, "We will think we can accommodate larger objects than we can in reality"); 71 static_assert(largeChunkMetadataSize == sizeof(LargeChunk), "Our largeChunkMetadataSize math in Sizes.h is wrong"); 72 static_assert(largeChunkMetadataSize + largeMax == largeChunkSize, "largeMax is too small or too big"); 73 74 inline LargeChunk::LargeChunk() 75 { 76 Range range(begin(), end() - begin()); 77 BASSERT(range.size() == largeMax); 78 79 BeginTag* beginTag = LargeChunk::beginTag(range.begin()); 80 beginTag->setRange(range); 81 beginTag->setFree(true); 82 beginTag->setVMState(VMState::Virtual); 83 84 EndTag* endTag = LargeChunk::endTag(range.begin(), range.size()); 85 endTag->init(beginTag); 86 87 // Mark the left and right edges of our range as allocated. This naturally 88 // prevents merging logic from overflowing left (into metadata) or right 89 // (beyond our chunk), without requiring special-case checks. 90 91 EndTag* leftSentinel = beginTag->prev(); 92 BASSERT(leftSentinel >= m_boundaryTags.begin()); 93 BASSERT(leftSentinel < m_boundaryTags.end()); 94 leftSentinel->initSentinel(); 95 96 BeginTag* rightSentinel = endTag->next(); 97 BASSERT(rightSentinel >= m_boundaryTags.begin()); 98 BASSERT(rightSentinel < m_boundaryTags.end()); 99 rightSentinel->initSentinel(); 100 } 83 101 84 102 inline LargeChunk* LargeChunk::get(void* object) -
trunk/Source/bmalloc/bmalloc/LargeObject.h
r196840 r196873 36 36 class LargeObject { 37 37 public: 38 static Range init(LargeChunk*);39 40 38 LargeObject(); 41 39 LargeObject(void*); … … 272 270 } 273 271 274 inline Range LargeObject::init(LargeChunk* chunk)275 {276 Range range(chunk->begin(), chunk->end() - chunk->begin());277 278 BeginTag* beginTag = LargeChunk::beginTag(range.begin());279 beginTag->setRange(range);280 beginTag->setFree(true);281 beginTag->setVMState(VMState::Virtual);282 283 EndTag* endTag = LargeChunk::endTag(range.begin(), range.size());284 endTag->init(beginTag);285 286 // Mark the left and right edges of our chunk as allocated. This naturally287 // prevents merging logic from overflowing beyond our chunk, without requiring288 // special-case checks.289 290 EndTag* leftSentinel = beginTag->prev();291 BASSERT(leftSentinel >= static_cast<void*>(chunk));292 leftSentinel->initSentinel();293 294 BeginTag* rightSentinel = endTag->next();295 BASSERT(rightSentinel < static_cast<void*>(range.begin()));296 rightSentinel->initSentinel();297 298 return range;299 }300 301 272 } // namespace bmalloc 302 273 -
trunk/Source/bmalloc/bmalloc/Sizes.h
r196847 r196873 57 57 static const size_t superChunkMask = ~(superChunkSize - 1); 58 58 59 static const size_t smallMax = 1024;60 static const size_t smallLineSize = 256;61 static const size_t smallLineCount = vmPageSize / smallLineSize;62 static const size_t smallLineMask = ~(smallLineSize - 1ul);63 64 59 static const size_t smallChunkSize = superChunkSize / 2; 65 60 static const size_t smallChunkOffset = superChunkSize / 2; 66 61 static const size_t smallChunkMask = ~(smallChunkSize - 1ul); 67 62 63 static const size_t smallMax = 1024; 64 static const size_t smallLineSize = 256; 65 static const size_t smallLineCount = vmPageSize / smallLineSize; 66 68 67 static const size_t largeChunkSize = superChunkSize / 2; 69 #if BPLATFORM(IOS)70 static const size_t largeChunkMetadataSize = 16 * kB;71 #else72 static const size_t largeChunkMetadataSize = 4 * kB;73 #endif74 68 static const size_t largeChunkOffset = 0; 75 69 static const size_t largeChunkMask = ~(largeChunkSize - 1ul); 76 70 77 71 static const size_t largeAlignment = 64; 72 static const size_t largeMin = smallMax; 73 static const size_t largeChunkMetadataSize = 4 * kB; // sizeof(LargeChunk) 78 74 static const size_t largeMax = largeChunkSize - largeChunkMetadataSize; 79 static const size_t largeMin = smallMax; 80 75 81 76 static const size_t xLargeAlignment = vmPageSize; 82 77 static const size_t xLargeMax = std::numeric_limits<size_t>::max() - xLargeAlignment; // Make sure that rounding up to xLargeAlignment does not overflow. -
trunk/Source/bmalloc/bmalloc/SmallChunk.h
r196847 r196873 36 36 class SmallChunk { 37 37 public: 38 SmallChunk(std::lock_guard<StaticMutex>&); 39 38 40 static SmallChunk* get(void*); 39 41 40 42 SmallPage* begin() { return SmallPage::get(SmallLine::get(m_memory)); } 41 SmallPage* end() { return &m_pages[pageCount]; }43 SmallPage* end() { return m_pages.end(); } 42 44 43 SmallLine* lines() { return m_lines; } 44 SmallPage* pages() { return m_pages; } 45 SmallLine* lines() { return m_lines.begin(); } 46 SmallPage* pages() { return m_pages.begin(); } 47 48 private: 49 std::array<SmallLine, smallChunkSize / smallLineSize> m_lines; 50 std::array<SmallPage, smallChunkSize / vmPageSize> m_pages; 51 char m_memory[] __attribute__((aligned(smallLineSize))); 52 }; 45 53 46 private: 47 static_assert(!(vmPageSize % smallLineSize), "vmPageSize must be an even multiple of line size"); 48 static_assert(!(smallChunkSize % smallLineSize), "chunk size must be an even multiple of line size"); 54 static_assert(!(vmPageSize % smallLineSize), "vmPageSize must be an even multiple of line size"); 55 static_assert(!(smallChunkSize % smallLineSize), "chunk size must be an even multiple of line size"); 56 static_assert( 57 sizeof(SmallChunk) - vmPageSize % sizeof(SmallChunk) < vmPageSize - 2 * smallMax, 58 "the first page of object memory in a small chunk can't allocate smallMax"); 49 59 50 static const size_t lineCount = smallChunkSize / smallLineSize; 51 static const size_t pageCount = smallChunkSize / vmPageSize; 60 inline SmallChunk::SmallChunk(std::lock_guard<StaticMutex>& lock) 61 { 62 // Track the memory used for metadata by allocating imaginary objects. 63 for (SmallLine* line = m_lines.begin(); line < SmallLine::get(m_memory); ++line) { 64 line->ref(lock, 1); 52 65 53 SmallLine m_lines[lineCount]; 54 SmallPage m_pages[pageCount]; 66 SmallPage* page = SmallPage::get(line); 67 page->ref(lock); 68 } 55 69 56 // Align to vmPageSize to avoid sharing physical pages with metadata. 57 // Otherwise, we'll confuse the scavenger into trying to scavenge metadata. 58 // FIXME: Below #ifdef workaround fix should be removed after all linux based ports bump 59 // own gcc version. See https://bugs.webkit.org/show_bug.cgi?id=140162#c87 60 #if BPLATFORM(IOS) 61 char m_memory[] __attribute__((aligned(16384))); 62 static_assert(vmPageSize == 16384, "vmPageSize and alignment must be same"); 63 #else 64 char m_memory[] __attribute__((aligned(4096))); 65 static_assert(vmPageSize == 4096, "vmPageSize and alignment must be same"); 66 #endif 67 }; 70 for (SmallPage* page = begin(); page != end(); ++page) 71 page->setHasFreeLines(lock, true); 72 } 68 73 69 74 inline SmallChunk* SmallChunk::get(void* object) -
trunk/Source/bmalloc/bmalloc/SuperChunk.h
r196845 r196873 34 34 class SuperChunk { 35 35 public: 36 static SuperChunk* create();36 SuperChunk(); 37 37 38 SmallChunk* smallChunk(); 39 LargeChunk* largeChunk(); 40 41 private: 42 SuperChunk(); 38 void* smallChunk(); 39 void* largeChunk(); 43 40 }; 44 45 inline SuperChunk* SuperChunk::create()46 {47 void* result = static_cast<char*>(vmAllocate(superChunkSize, superChunkSize));48 return new (result) SuperChunk;49 }50 41 51 42 inline SuperChunk::SuperChunk() 52 43 { 53 new (smallChunk()) SmallChunk; 54 new (largeChunk()) LargeChunk; 44 BASSERT(!test(this, ~superChunkMask)); 45 BASSERT(!test(smallChunk(), ~smallChunkMask)); 46 BASSERT(!test(largeChunk(), ~largeChunkMask)); 55 47 } 56 48 57 inline SmallChunk* SuperChunk::smallChunk()49 inline void* SuperChunk::smallChunk() 58 50 { 59 return reinterpret_cast<SmallChunk*>( 60 reinterpret_cast<char*>(this) + smallChunkOffset); 51 return reinterpret_cast<char*>(this) + smallChunkOffset; 61 52 } 62 53 63 inline LargeChunk* SuperChunk::largeChunk()54 inline void* SuperChunk::largeChunk() 64 55 { 65 return reinterpret_cast<LargeChunk*>( 66 reinterpret_cast<char*>(this) + largeChunkOffset); 56 return reinterpret_cast<char*>(this) + largeChunkOffset; 67 57 } 68 58 -
trunk/Source/bmalloc/bmalloc/VMHeap.cpp
r196845 r196873 37 37 } 38 38 39 void VMHeap:: grow()39 void VMHeap::allocateSmallChunk(std::lock_guard<StaticMutex>& lock) 40 40 { 41 SuperChunk* superChunk = SuperChunk::create(); 41 if (!m_smallChunks.size()) 42 allocateSuperChunk(lock); 43 44 // We initialize chunks lazily to avoid dirtying their metadata pages. 45 SmallChunk* smallChunk = new (m_smallChunks.pop()->smallChunk()) SmallChunk(lock); 46 for (auto* it = smallChunk->begin(); it < smallChunk->end(); ++it) 47 m_smallPages.push(it); 48 } 49 50 void VMHeap::allocateLargeChunk(std::lock_guard<StaticMutex>& lock) 51 { 52 if (!m_largeChunks.size()) 53 allocateSuperChunk(lock); 54 55 // We initialize chunks lazily to avoid dirtying their metadata pages. 56 LargeChunk* largeChunk = new (m_largeChunks.pop()->largeChunk()) LargeChunk; 57 LargeObject largeObject(largeChunk->begin()); 58 m_largeObjects.insert(largeObject); 59 } 60 61 void VMHeap::allocateSuperChunk(std::lock_guard<StaticMutex>&) 62 { 63 SuperChunk* superChunk = 64 new (vmAllocate(superChunkSize, superChunkSize)) SuperChunk; 65 m_smallChunks.push(superChunk); 66 m_largeChunks.push(superChunk); 42 67 #if BOS(DARWIN) 43 68 m_zone.addSuperChunk(superChunk); 44 69 #endif 45 46 SmallChunk* smallChunk = superChunk->smallChunk();47 for (auto* it = smallChunk->begin(); it != smallChunk->end(); ++it)48 m_smallPages.push(it);49 50 LargeChunk* largeChunk = superChunk->largeChunk();51 LargeObject result(LargeObject::init(largeChunk).begin());52 BASSERT(result.size() == largeMax);53 m_largeObjects.insert(result);54 70 } 55 71 -
trunk/Source/bmalloc/bmalloc/VMHeap.h
r196845 r196873 52 52 53 53 SmallPage* allocateSmallPage(std::lock_guard<StaticMutex>&); 54 LargeObject allocateLargeObject(s ize_t);55 LargeObject allocateLargeObject(s ize_t, size_t, size_t);54 LargeObject allocateLargeObject(std::lock_guard<StaticMutex>&, size_t); 55 LargeObject allocateLargeObject(std::lock_guard<StaticMutex>&, size_t, size_t, size_t); 56 56 57 57 void deallocateSmallPage(std::unique_lock<StaticMutex>&, SmallPage*); 58 58 void deallocateLargeObject(std::unique_lock<StaticMutex>&, LargeObject); 59 59 60 60 private: 61 void grow(); 61 void allocateSmallChunk(std::lock_guard<StaticMutex>&); 62 void allocateLargeChunk(std::lock_guard<StaticMutex>&); 63 void allocateSuperChunk(std::lock_guard<StaticMutex>&); 62 64 63 65 Vector<SmallPage*> m_smallPages; 64 66 SegregatedFreeList m_largeObjects; 67 68 Vector<SuperChunk*> m_smallChunks; 69 Vector<SuperChunk*> m_largeChunks; 70 65 71 #if BOS(DARWIN) 66 72 Zone m_zone; … … 71 77 { 72 78 if (!m_smallPages.size()) 73 grow();79 allocateSmallChunk(lock); 74 80 75 81 SmallPage* page = m_smallPages.pop(); 76 page->setHasFreeLines(lock, true);77 82 vmAllocatePhysicalPages(page->begin()->begin(), vmPageSize); 78 83 return page; 79 84 } 80 85 81 inline LargeObject VMHeap::allocateLargeObject(s ize_t size)86 inline LargeObject VMHeap::allocateLargeObject(std::lock_guard<StaticMutex>& lock, size_t size) 82 87 { 83 88 LargeObject largeObject = m_largeObjects.take(size); 84 89 if (!largeObject) { 85 grow();90 allocateLargeChunk(lock); 86 91 largeObject = m_largeObjects.take(size); 87 92 BASSERT(largeObject); … … 91 96 } 92 97 93 inline LargeObject VMHeap::allocateLargeObject(s ize_t alignment, size_t size, size_t unalignedSize)98 inline LargeObject VMHeap::allocateLargeObject(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size, size_t unalignedSize) 94 99 { 95 100 LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize); 96 101 if (!largeObject) { 97 grow();102 allocateLargeChunk(lock); 98 103 largeObject = m_largeObjects.take(alignment, size, unalignedSize); 99 104 BASSERT(largeObject);
Note: See TracChangeset
for help on using the changeset viewer.