Changeset 162017 in webkit
- Timestamp:
- Jan 14, 2014 3:03:01 PM (10 years ago)
- Location:
- trunk/Source/JavaScriptCore
- Files:
-
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r162006 r162017 1 2014-01-10 Mark Hahnenberg <mhahnenberg@apple.com> 2 3 Copying should be generational 4 https://bugs.webkit.org/show_bug.cgi?id=126555 5 6 Reviewed by Geoffrey Garen. 7 8 This patch adds support for copying to our generational collector. Eden collections 9 always trigger copying. Full collections use our normal fragmentation-based heuristics. 10 11 The way this works is that the CopiedSpace now has the notion of an old generation set of CopiedBlocks 12 and a new generation of CopiedBlocks. During each mutator cycle new CopiedSpace allocations reside 13 in the new generation. When a collection occurs, those blocks are moved to the old generation. 14 15 One key thing to remember is that both new and old generation objects in the MarkedSpace can 16 refer to old or new generation allocations in CopiedSpace. This is why we must fire write barriers 17 when assigning to an old (MarkedSpace) object's Butterfly. 18 19 * heap/CopiedAllocator.h: 20 (JSC::CopiedAllocator::tryAllocateDuringCopying): 21 * heap/CopiedBlock.h: 22 (JSC::CopiedBlock::CopiedBlock): 23 (JSC::CopiedBlock::didEvacuateBytes): 24 (JSC::CopiedBlock::isOld): 25 (JSC::CopiedBlock::didPromote): 26 * heap/CopiedBlockInlines.h: 27 (JSC::CopiedBlock::reportLiveBytes): 28 (JSC::CopiedBlock::reportLiveBytesDuringCopying): 29 * heap/CopiedSpace.cpp: 30 (JSC::CopiedSpace::CopiedSpace): 31 (JSC::CopiedSpace::~CopiedSpace): 32 (JSC::CopiedSpace::init): 33 (JSC::CopiedSpace::tryAllocateOversize): 34 (JSC::CopiedSpace::tryReallocateOversize): 35 (JSC::CopiedSpace::doneFillingBlock): 36 (JSC::CopiedSpace::didStartFullCollection): 37 (JSC::CopiedSpace::doneCopying): 38 (JSC::CopiedSpace::size): 39 (JSC::CopiedSpace::capacity): 40 (JSC::CopiedSpace::isPagedOut): 41 * heap/CopiedSpace.h: 42 (JSC::CopiedSpace::CopiedGeneration::CopiedGeneration): 43 * heap/CopiedSpaceInlines.h: 44 (JSC::CopiedSpace::contains): 45 (JSC::CopiedSpace::recycleEvacuatedBlock): 46 (JSC::CopiedSpace::allocateBlock): 47 (JSC::CopiedSpace::startedCopying): 48 * heap/CopyVisitor.cpp: 49 (JSC::CopyVisitor::copyFromShared): 50 * heap/CopyVisitorInlines.h: 51 (JSC::CopyVisitor::allocateNewSpace): 52 (JSC::CopyVisitor::allocateNewSpaceSlow): 53 * heap/GCThreadSharedData.cpp: 54 (JSC::GCThreadSharedData::didStartCopying): 55 * heap/Heap.cpp: 56 (JSC::Heap::copyBackingStores): 57 * heap/SlotVisitorInlines.h: 58 (JSC::SlotVisitor::copyLater): 59 * heap/TinyBloomFilter.h: 60 (JSC::TinyBloomFilter::add): 61 1 62 2014-01-14 Mark Lam <mark.lam@apple.com> 2 63 -
trunk/Source/JavaScriptCore/heap/CopiedAllocator.h
r122768 r162017 39 39 bool fastPathShouldSucceed(size_t bytes) const; 40 40 CheckedBoolean tryAllocate(size_t bytes, void** outPtr); 41 CheckedBoolean tryAllocateDuringCopying(size_t bytes, void** outPtr); 41 42 CheckedBoolean tryReallocate(void *oldPtr, size_t oldBytes, size_t newBytes); 42 43 void* forceAllocate(size_t bytes); … … 91 92 ASSERT(is8ByteAligned(*outPtr)); 92 93 94 return true; 95 } 96 97 inline CheckedBoolean CopiedAllocator::tryAllocateDuringCopying(size_t bytes, void** outPtr) 98 { 99 if (!tryAllocate(bytes, outPtr)) 100 return false; 101 m_currentBlock->reportLiveBytesDuringCopying(bytes); 93 102 return true; 94 103 } -
trunk/Source/JavaScriptCore/heap/CopiedBlock.h
r155487 r162017 50 50 bool isPinned(); 51 51 52 bool isOld(); 52 53 bool isOversize(); 54 void didPromote(); 53 55 54 56 unsigned liveBytes(); 55 void reportLiveBytes(JSCell*, CopyToken, unsigned); 57 bool shouldReportLiveBytes(SpinLockHolder&, JSCell* owner); 58 void reportLiveBytes(SpinLockHolder&, JSCell*, CopyToken, unsigned); 59 void reportLiveBytesDuringCopying(unsigned); 56 60 void didSurviveGC(); 57 61 void didEvacuateBytes(unsigned); … … 82 86 bool hasWorkList(); 83 87 CopyWorkList& workList(); 88 SpinLock& workListLock() { return m_workListLock; } 84 89 85 90 private: … … 89 94 void checkConsistency(); 90 95 91 #if ENABLE(PARALLEL_GC)92 96 SpinLock m_workListLock; 93 #endif94 97 OwnPtr<CopyWorkList> m_workList; 95 98 96 99 size_t m_remaining; 97 uintptr_t m_isPinned; 100 bool m_isPinned : 1; 101 bool m_isOld : 1; 98 102 unsigned m_liveBytes; 99 103 #ifndef NDEBUG … … 131 135 , m_remaining(payloadCapacity()) 132 136 , m_isPinned(false) 137 , m_isOld(false) 133 138 , m_liveBytes(0) 134 139 #ifndef NDEBUG … … 136 141 #endif 137 142 { 138 #if ENABLE(PARALLEL_GC)139 143 m_workListLock.Init(); 140 #endif141 144 ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining))); 142 145 } … … 157 160 { 158 161 ASSERT(m_liveBytes >= bytes); 162 ASSERT(m_liveObjects); 159 163 checkConsistency(); 160 164 m_liveBytes -= bytes; … … 189 193 } 190 194 195 inline bool CopiedBlock::isOld() 196 { 197 return m_isOld; 198 } 199 200 inline void CopiedBlock::didPromote() 201 { 202 m_isOld = true; 203 } 204 191 205 inline bool CopiedBlock::isOversize() 192 206 { -
trunk/Source/JavaScriptCore/heap/CopiedBlockInlines.h
r161615 r162017 27 27 #define CopiedBlockInlines_h 28 28 29 #include "ClassInfo.h" 29 30 #include "CopiedBlock.h" 30 31 #include "Heap.h" 32 #include "MarkedBlock.h" 31 33 32 34 namespace JSC { 33 35 34 inline void CopiedBlock::reportLiveBytes(JSCell* owner, CopyToken token, unsigned bytes)36 inline bool CopiedBlock::shouldReportLiveBytes(SpinLockHolder&, JSCell* owner) 35 37 { 36 #if ENABLE(PARALLEL_GC) 37 SpinLockHolder locker(&m_workListLock); 38 #endif 38 // We want to add to live bytes if the owner isn't part of the remembered set or 39 // if this block was allocated during the last cycle. 40 // If we always added live bytes we would double count for elements in the remembered 41 // set across collections. 42 // If we didn't always add live bytes to new blocks, we'd get too few. 43 bool ownerIsRemembered = MarkedBlock::blockFor(owner)->isRemembered(owner); 44 return !ownerIsRemembered || !m_isOld; 45 } 46 47 inline void CopiedBlock::reportLiveBytes(SpinLockHolder&, JSCell* owner, CopyToken token, unsigned bytes) 48 { 49 checkConsistency(); 39 50 #ifndef NDEBUG 40 checkConsistency();41 51 m_liveObjects++; 42 52 #endif 43 53 m_liveBytes += bytes; 54 checkConsistency(); 55 ASSERT(m_liveBytes <= CopiedBlock::blockSize); 44 56 45 57 if (isPinned()) … … 57 69 } 58 70 71 inline void CopiedBlock::reportLiveBytesDuringCopying(unsigned bytes) 72 { 73 checkConsistency(); 74 // This doesn't need to be locked because the thread that calls this function owns the current block. 75 m_isOld = true; 76 #ifndef NDEBUG 77 m_liveObjects++; 78 #endif 79 m_liveBytes += bytes; 80 checkConsistency(); 81 ASSERT(m_liveBytes <= CopiedBlock::blockSize); 82 } 83 59 84 } // namespace JSC 60 85 -
trunk/Source/JavaScriptCore/heap/CopiedSpace.cpp
r161615 r162017 36 36 CopiedSpace::CopiedSpace(Heap* heap) 37 37 : m_heap(heap) 38 , m_toSpace(0)39 , m_fromSpace(0)40 38 , m_inCopyingPhase(false) 41 39 , m_shouldDoCopyPhase(false) … … 47 45 CopiedSpace::~CopiedSpace() 48 46 { 49 while (!m_toSpace->isEmpty()) 50 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_toSpace->removeHead())); 51 52 while (!m_fromSpace->isEmpty()) 53 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_fromSpace->removeHead())); 54 55 while (!m_oversizeBlocks.isEmpty()) 56 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_oversizeBlocks.removeHead())); 47 while (!m_oldGen.toSpace->isEmpty()) 48 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_oldGen.toSpace->removeHead())); 49 50 while (!m_oldGen.fromSpace->isEmpty()) 51 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_oldGen.fromSpace->removeHead())); 52 53 while (!m_oldGen.oversizeBlocks.isEmpty()) 54 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_oldGen.oversizeBlocks.removeHead())); 55 56 while (!m_newGen.toSpace->isEmpty()) 57 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_newGen.toSpace->removeHead())); 58 59 while (!m_newGen.fromSpace->isEmpty()) 60 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(m_newGen.fromSpace->removeHead())); 61 62 while (!m_newGen.oversizeBlocks.isEmpty()) 63 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(m_newGen.oversizeBlocks.removeHead())); 64 65 ASSERT(m_oldGen.toSpace->isEmpty()); 66 ASSERT(m_oldGen.fromSpace->isEmpty()); 67 ASSERT(m_oldGen.oversizeBlocks.isEmpty()); 68 ASSERT(m_newGen.toSpace->isEmpty()); 69 ASSERT(m_newGen.fromSpace->isEmpty()); 70 ASSERT(m_newGen.oversizeBlocks.isEmpty()); 57 71 } 58 72 59 73 void CopiedSpace::init() 60 74 { 61 m_toSpace = &m_blocks1; 62 m_fromSpace = &m_blocks2; 63 75 m_oldGen.toSpace = &m_oldGen.blocks1; 76 m_oldGen.fromSpace = &m_oldGen.blocks2; 77 78 m_newGen.toSpace = &m_newGen.blocks1; 79 m_newGen.fromSpace = &m_newGen.blocks2; 80 64 81 allocateBlock(); 65 82 } … … 84 101 85 102 CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocateCustomSize(sizeof(CopiedBlock) + bytes, CopiedBlock::blockSize)); 86 m_ oversizeBlocks.push(block);87 m_ blockFilter.add(reinterpret_cast<Bits>(block));103 m_newGen.oversizeBlocks.push(block); 104 m_newGen.blockFilter.add(reinterpret_cast<Bits>(block)); 88 105 m_blockSet.add(block); 106 ASSERT(!block->isOld()); 89 107 90 108 CopiedAllocator allocator; … … 139 157 CopiedBlock* oldBlock = CopiedSpace::blockFor(oldPtr); 140 158 if (oldBlock->isOversize()) { 141 m_oversizeBlocks.remove(oldBlock); 159 if (oldBlock->isOld()) 160 m_oldGen.oversizeBlocks.remove(oldBlock); 161 else 162 m_newGen.oversizeBlocks.remove(oldBlock); 142 163 m_blockSet.remove(oldBlock); 143 164 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(oldBlock)); … … 166 187 167 188 { 189 // Always put the block into the old gen because it's being promoted! 168 190 SpinLockHolder locker(&m_toSpaceLock); 169 m_ toSpace->push(block);191 m_oldGen.toSpace->push(block); 170 192 m_blockSet.add(block); 171 m_ blockFilter.add(reinterpret_cast<Bits>(block));193 m_oldGen.blockFilter.add(reinterpret_cast<Bits>(block)); 172 194 } 173 195 … … 182 204 } 183 205 184 void CopiedSpace::startedCopying() 185 { 186 std::swap(m_fromSpace, m_toSpace); 187 188 m_blockFilter.reset(); 189 m_allocator.resetCurrentBlock(); 190 191 CopiedBlock* next = 0; 192 size_t totalLiveBytes = 0; 193 size_t totalUsableBytes = 0; 194 for (CopiedBlock* block = m_fromSpace->head(); block; block = next) { 195 next = block->next(); 196 if (!block->isPinned() && block->canBeRecycled()) { 197 recycleEvacuatedBlock(block); 198 continue; 199 } 200 totalLiveBytes += block->liveBytes(); 201 totalUsableBytes += block->payloadCapacity(); 202 } 203 204 CopiedBlock* block = m_oversizeBlocks.head(); 205 while (block) { 206 CopiedBlock* next = block->next(); 207 if (block->isPinned()) { 208 m_blockFilter.add(reinterpret_cast<Bits>(block)); 209 totalLiveBytes += block->payloadCapacity(); 210 totalUsableBytes += block->payloadCapacity(); 211 block->didSurviveGC(); 212 } else { 213 m_oversizeBlocks.remove(block); 214 m_blockSet.remove(block); 215 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block)); 216 } 217 block = next; 218 } 219 220 double markedSpaceBytes = m_heap->objectSpace().capacity(); 221 double totalFragmentation = ((double)totalLiveBytes + markedSpaceBytes) / ((double)totalUsableBytes + markedSpaceBytes); 222 m_shouldDoCopyPhase = totalFragmentation <= Options::minHeapUtilization(); 223 if (!m_shouldDoCopyPhase) 224 return; 225 226 ASSERT(m_shouldDoCopyPhase); 227 ASSERT(!m_inCopyingPhase); 228 ASSERT(!m_numberOfLoanedBlocks); 229 m_inCopyingPhase = true; 206 void CopiedSpace::didStartFullCollection() 207 { 208 ASSERT(heap()->operationInProgress() == FullCollection); 209 ASSERT(m_oldGen.fromSpace->isEmpty()); 210 ASSERT(m_newGen.fromSpace->isEmpty()); 211 212 #ifndef NDEBUG 213 for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next()) 214 ASSERT(!block->liveBytes()); 215 216 for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next()) 217 ASSERT(!block->liveBytes()); 218 #endif 219 220 for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next()) 221 block->didSurviveGC(); 222 223 for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next()) 224 block->didSurviveGC(); 230 225 } 231 226 … … 241 236 m_inCopyingPhase = false; 242 237 243 while (!m_fromSpace->isEmpty()) { 244 CopiedBlock* block = m_fromSpace->removeHead(); 245 // All non-pinned blocks in from-space should have been reclaimed as they were evacuated. 246 ASSERT(block->isPinned() || !m_shouldDoCopyPhase); 247 block->didSurviveGC(); 238 DoublyLinkedList<CopiedBlock>* toSpace; 239 DoublyLinkedList<CopiedBlock>* fromSpace; 240 TinyBloomFilter* blockFilter; 241 if (heap()->operationInProgress() == FullCollection) { 242 toSpace = m_oldGen.toSpace; 243 fromSpace = m_oldGen.fromSpace; 244 blockFilter = &m_oldGen.blockFilter; 245 } else { 246 toSpace = m_newGen.toSpace; 247 fromSpace = m_newGen.fromSpace; 248 blockFilter = &m_newGen.blockFilter; 249 } 250 251 while (!fromSpace->isEmpty()) { 252 CopiedBlock* block = fromSpace->removeHead(); 248 253 // We don't add the block to the blockSet because it was never removed. 249 254 ASSERT(m_blockSet.contains(block)); 250 m_blockFilter.add(reinterpret_cast<Bits>(block)); 251 m_toSpace->push(block); 252 } 253 254 if (!m_toSpace->head()) 255 allocateBlock(); 256 else 257 m_allocator.setCurrentBlock(m_toSpace->head()); 255 blockFilter->add(reinterpret_cast<Bits>(block)); 256 toSpace->push(block); 257 } 258 259 if (heap()->operationInProgress() == EdenCollection) { 260 m_oldGen.toSpace->append(*m_newGen.toSpace); 261 m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks); 262 m_oldGen.blockFilter.add(m_newGen.blockFilter); 263 m_newGen.blockFilter.reset(); 264 } 265 266 ASSERT(m_newGen.toSpace->isEmpty()); 267 ASSERT(m_newGen.fromSpace->isEmpty()); 268 ASSERT(m_newGen.oversizeBlocks.isEmpty()); 269 270 allocateBlock(); 258 271 259 272 m_shouldDoCopyPhase = false; … … 264 277 size_t calculatedSize = 0; 265 278 266 for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) 267 calculatedSize += block->size(); 268 269 for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next()) 270 calculatedSize += block->size(); 271 272 for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) 279 for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next()) 280 calculatedSize += block->size(); 281 282 for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next()) 283 calculatedSize += block->size(); 284 285 for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next()) 286 calculatedSize += block->size(); 287 288 for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next()) 289 calculatedSize += block->size(); 290 291 for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next()) 292 calculatedSize += block->size(); 293 294 for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next()) 273 295 calculatedSize += block->size(); 274 296 … … 280 302 size_t calculatedCapacity = 0; 281 303 282 for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) 283 calculatedCapacity += block->capacity(); 284 285 for (CopiedBlock* block = m_fromSpace->head(); block; block = block->next()) 286 calculatedCapacity += block->capacity(); 287 288 for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) 304 for (CopiedBlock* block = m_oldGen.toSpace->head(); block; block = block->next()) 305 calculatedCapacity += block->capacity(); 306 307 for (CopiedBlock* block = m_oldGen.fromSpace->head(); block; block = block->next()) 308 calculatedCapacity += block->capacity(); 309 310 for (CopiedBlock* block = m_oldGen.oversizeBlocks.head(); block; block = block->next()) 311 calculatedCapacity += block->capacity(); 312 313 for (CopiedBlock* block = m_newGen.toSpace->head(); block; block = block->next()) 314 calculatedCapacity += block->capacity(); 315 316 for (CopiedBlock* block = m_newGen.fromSpace->head(); block; block = block->next()) 317 calculatedCapacity += block->capacity(); 318 319 for (CopiedBlock* block = m_newGen.oversizeBlocks.head(); block; block = block->next()) 289 320 calculatedCapacity += block->capacity(); 290 321 … … 312 343 bool CopiedSpace::isPagedOut(double deadline) 313 344 { 314 return isBlockListPagedOut(deadline, m_toSpace) 315 || isBlockListPagedOut(deadline, m_fromSpace) 316 || isBlockListPagedOut(deadline, &m_oversizeBlocks); 317 } 318 319 void CopiedSpace::didStartFullCollection() 320 { 321 ASSERT(heap()->operationInProgress() == FullCollection); 322 323 ASSERT(m_fromSpace->isEmpty()); 324 325 for (CopiedBlock* block = m_toSpace->head(); block; block = block->next()) 326 block->didSurviveGC(); 327 328 for (CopiedBlock* block = m_oversizeBlocks.head(); block; block = block->next()) 329 block->didSurviveGC(); 345 return isBlockListPagedOut(deadline, m_oldGen.toSpace) 346 || isBlockListPagedOut(deadline, m_oldGen.fromSpace) 347 || isBlockListPagedOut(deadline, &m_oldGen.oversizeBlocks) 348 || isBlockListPagedOut(deadline, m_newGen.toSpace) 349 || isBlockListPagedOut(deadline, m_newGen.fromSpace) 350 || isBlockListPagedOut(deadline, &m_newGen.oversizeBlocks); 330 351 } 331 352 -
trunk/Source/JavaScriptCore/heap/CopiedSpace.h
r161615 r162017 29 29 #include "CopiedAllocator.h" 30 30 #include "HeapBlock.h" 31 #include "HeapOperation.h" 31 32 #include "TinyBloomFilter.h" 32 33 #include <wtf/Assertions.h> … … 63 64 void didStartFullCollection(); 64 65 66 template <HeapOperation collectionType> 65 67 void startedCopying(); 68 void startedEdenCopy(); 69 void startedFullCopy(); 66 70 void doneCopying(); 67 71 bool isInCopyPhase() { return m_inCopyingPhase; } … … 96 100 97 101 void doneFillingBlock(CopiedBlock*, CopiedBlock**); 98 void recycleEvacuatedBlock(CopiedBlock* );102 void recycleEvacuatedBlock(CopiedBlock*, HeapOperation collectionType); 99 103 void recycleBorrowedBlock(CopiedBlock*); 100 104 … … 103 107 CopiedAllocator m_allocator; 104 108 105 TinyBloomFilter m_blockFilter;106 109 HashSet<CopiedBlock*> m_blockSet; 107 110 108 111 SpinLock m_toSpaceLock; 109 112 110 DoublyLinkedList<CopiedBlock>* m_toSpace; 111 DoublyLinkedList<CopiedBlock>* m_fromSpace; 112 113 DoublyLinkedList<CopiedBlock> m_blocks1; 114 DoublyLinkedList<CopiedBlock> m_blocks2; 115 DoublyLinkedList<CopiedBlock> m_oversizeBlocks; 113 struct CopiedGeneration { 114 CopiedGeneration() 115 : toSpace(0) 116 , fromSpace(0) 117 { 118 } 119 120 DoublyLinkedList<CopiedBlock>* toSpace; 121 DoublyLinkedList<CopiedBlock>* fromSpace; 122 123 DoublyLinkedList<CopiedBlock> blocks1; 124 DoublyLinkedList<CopiedBlock> blocks2; 125 DoublyLinkedList<CopiedBlock> oversizeBlocks; 126 127 TinyBloomFilter blockFilter; 128 }; 129 130 CopiedGeneration m_oldGen; 131 CopiedGeneration m_newGen; 116 132 117 133 bool m_inCopyingPhase; -
trunk/Source/JavaScriptCore/heap/CopiedSpaceInlines.h
r158583 r162017 38 38 inline bool CopiedSpace::contains(CopiedBlock* block) 39 39 { 40 return !m_blockFilter.ruleOut(reinterpret_cast<Bits>(block)) && m_blockSet.contains(block); 40 return (!m_newGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)) || !m_oldGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block))) 41 && m_blockSet.contains(block); 41 42 } 42 43 … … 93 94 } 94 95 95 inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block )96 inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block, HeapOperation collectionType) 96 97 { 97 98 ASSERT(block); … … 101 102 SpinLockHolder locker(&m_toSpaceLock); 102 103 m_blockSet.remove(block); 103 m_fromSpace->remove(block); 104 if (collectionType == EdenCollection) 105 m_newGen.fromSpace->remove(block); 106 else 107 m_oldGen.fromSpace->remove(block); 104 108 } 105 109 m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block)); … … 142 146 CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate<CopiedBlock>()); 143 147 144 m_ toSpace->push(block);145 m_ blockFilter.add(reinterpret_cast<Bits>(block));148 m_newGen.toSpace->push(block); 149 m_newGen.blockFilter.add(reinterpret_cast<Bits>(block)); 146 150 m_blockSet.add(block); 147 151 m_allocator.setCurrentBlock(block); … … 175 179 } 176 180 181 template <HeapOperation collectionType> 182 inline void CopiedSpace::startedCopying() 183 { 184 DoublyLinkedList<CopiedBlock>* fromSpace; 185 DoublyLinkedList<CopiedBlock>* oversizeBlocks; 186 TinyBloomFilter* blockFilter; 187 if (collectionType == FullCollection) { 188 ASSERT(m_oldGen.fromSpace->isEmpty()); 189 ASSERT(m_newGen.fromSpace->isEmpty()); 190 191 m_oldGen.toSpace->append(*m_newGen.toSpace); 192 m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks); 193 194 ASSERT(m_newGen.toSpace->isEmpty()); 195 ASSERT(m_newGen.fromSpace->isEmpty()); 196 ASSERT(m_newGen.oversizeBlocks.isEmpty()); 197 198 std::swap(m_oldGen.fromSpace, m_oldGen.toSpace); 199 fromSpace = m_oldGen.fromSpace; 200 oversizeBlocks = &m_oldGen.oversizeBlocks; 201 blockFilter = &m_oldGen.blockFilter; 202 } else { 203 std::swap(m_newGen.fromSpace, m_newGen.toSpace); 204 fromSpace = m_newGen.fromSpace; 205 oversizeBlocks = &m_newGen.oversizeBlocks; 206 blockFilter = &m_newGen.blockFilter; 207 } 208 209 blockFilter->reset(); 210 m_allocator.resetCurrentBlock(); 211 212 CopiedBlock* next = 0; 213 size_t totalLiveBytes = 0; 214 size_t totalUsableBytes = 0; 215 for (CopiedBlock* block = fromSpace->head(); block; block = next) { 216 next = block->next(); 217 if (!block->isPinned() && block->canBeRecycled()) { 218 recycleEvacuatedBlock(block, collectionType); 219 continue; 220 } 221 ASSERT(block->liveBytes() <= CopiedBlock::blockSize); 222 totalLiveBytes += block->liveBytes(); 223 totalUsableBytes += block->payloadCapacity(); 224 block->didPromote(); 225 } 226 227 CopiedBlock* block = oversizeBlocks->head(); 228 while (block) { 229 CopiedBlock* next = block->next(); 230 if (block->isPinned()) { 231 blockFilter->add(reinterpret_cast<Bits>(block)); 232 totalLiveBytes += block->payloadCapacity(); 233 totalUsableBytes += block->payloadCapacity(); 234 block->didPromote(); 235 } else { 236 oversizeBlocks->remove(block); 237 m_blockSet.remove(block); 238 m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block)); 239 } 240 block = next; 241 } 242 243 double markedSpaceBytes = m_heap->objectSpace().capacity(); 244 double totalFragmentation = static_cast<double>(totalLiveBytes + markedSpaceBytes) / static_cast<double>(totalUsableBytes + markedSpaceBytes); 245 m_shouldDoCopyPhase = m_heap->operationInProgress() == EdenCollection || totalFragmentation <= Options::minHeapUtilization(); 246 if (!m_shouldDoCopyPhase) { 247 if (Options::logGC()) 248 dataLog("Skipped copying, "); 249 return; 250 } 251 252 if (Options::logGC()) 253 dataLogF("Did copy, "); 254 ASSERT(m_shouldDoCopyPhase); 255 ASSERT(!m_numberOfLoanedBlocks); 256 ASSERT(!m_inCopyingPhase); 257 m_inCopyingPhase = true; 258 } 259 177 260 } // namespace JSC 178 261 -
trunk/Source/JavaScriptCore/heap/CopyVisitor.cpp
r153720 r162017 58 58 59 59 ASSERT(!block->liveBytes()); 60 m_shared.m_copiedSpace->recycleEvacuatedBlock(block );60 m_shared.m_copiedSpace->recycleEvacuatedBlock(block, m_shared.m_vm->heap.operationInProgress()); 61 61 } 62 62 m_shared.getNextBlocksToCopy(next, end); -
trunk/Source/JavaScriptCore/heap/CopyVisitorInlines.h
r155487 r162017 56 56 { 57 57 void* result = 0; // Compilers don't realize that this will be assigned. 58 if (LIKELY(m_copiedAllocator.tryAllocate (bytes, &result)))58 if (LIKELY(m_copiedAllocator.tryAllocateDuringCopying(bytes, &result))) 59 59 return result; 60 60 … … 71 71 72 72 void* result = 0; 73 CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate (bytes, &result);73 CheckedBoolean didSucceed = m_copiedAllocator.tryAllocateDuringCopying(bytes, &result); 74 74 ASSERT(didSucceed); 75 75 return result; -
trunk/Source/JavaScriptCore/heap/GCThreadSharedData.cpp
r155317 r162017 182 182 { 183 183 SpinLockHolder locker(&m_copyLock); 184 WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy); 184 if (m_vm->heap.operationInProgress() == EdenCollection) { 185 // Reset the vector to be empty, but don't throw away the backing store. 186 m_blocksToCopy.shrink(0); 187 for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next()) 188 m_blocksToCopy.append(block); 189 } else { 190 ASSERT(m_vm->heap.operationInProgress() == FullCollection); 191 WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy); 192 } 185 193 m_copyIndex = 0; 186 194 } -
trunk/Source/JavaScriptCore/heap/Heap.cpp
r161914 r162017 642 642 void Heap::copyBackingStores() 643 643 { 644 if (collectionType == EdenCollection) 645 return; 646 647 m_storageSpace.startedCopying(); 644 m_storageSpace.startedCopying<collectionType>(); 648 645 if (m_storageSpace.shouldDoCopyPhase()) { 649 646 m_sharedData.didStartCopying(); -
trunk/Source/JavaScriptCore/heap/SlotVisitorInlines.h
r161914 r162017 226 226 { 227 227 ASSERT(bytes); 228 // We don't do any copying during EdenCollections.229 ASSERT(heap()->operationInProgress() != EdenCollection);230 231 m_bytesCopied += bytes;232 233 228 CopiedBlock* block = CopiedSpace::blockFor(ptr); 234 229 if (block->isOversize()) { … … 237 232 } 238 233 239 block->reportLiveBytes(owner, token, bytes); 234 SpinLockHolder locker(&block->workListLock()); 235 if (heap()->operationInProgress() == FullCollection || block->shouldReportLiveBytes(locker, owner)) { 236 m_bytesCopied += bytes; 237 block->reportLiveBytes(locker, owner, token, bytes); 238 } 240 239 } 241 240 -
trunk/Source/JavaScriptCore/heap/TinyBloomFilter.h
r105442 r162017 36 36 37 37 void add(Bits); 38 void add(TinyBloomFilter&); 38 39 bool ruleOut(Bits) const; // True for 0. 39 40 void reset(); … … 51 52 { 52 53 m_bits |= bits; 54 } 55 56 inline void TinyBloomFilter::add(TinyBloomFilter& other) 57 { 58 m_bits |= other.m_bits; 53 59 } 54 60
Note: See TracChangeset
for help on using the changeset viewer.