Changeset 227951 in webkit
- Timestamp:
- Jan 31, 2018 9:36:40 PM (6 years ago)
- Location:
- trunk/Source
- Files:
-
- 1 deleted
- 18 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r227929 r227951 1 2018-01-31 Saam Barati <sbarati@apple.com> 2 3 Replace tryLargeMemalignVirtual with tryLargeZeroedMemalignVirtual and use it to allocate large zeroed memory in Wasm 4 https://bugs.webkit.org/show_bug.cgi?id=182064 5 <rdar://problem/36840132> 6 7 Reviewed by Geoffrey Garen. 8 9 This patch switches WebAssembly Memory to always use bmalloc's 10 zeroed virtual allocation API. This makes it so that we don't 11 dirty the memory to zero it. It's a huge compile time speedup 12 on WasmBench on iOS. 13 14 * wasm/WasmMemory.cpp: 15 (JSC::Wasm::Memory::create): 16 (JSC::Wasm::Memory::~Memory): 17 (JSC::Wasm::Memory::addressIsInActiveFastMemory): 18 (JSC::Wasm::Memory::grow): 19 (JSC::Wasm::commitZeroPages): Deleted. 20 1 21 2018-01-31 Mark Lam <mark.lam@apple.com> 2 22 -
trunk/Source/JavaScriptCore/wasm/WasmMemory.cpp
r226461 r227951 96 96 public: 97 97 MemoryManager() 98 : m_max Count(Options::maxNumWebAssemblyFastMemories())99 { 100 } 101 102 MemoryResult tryAllocate VirtualPages()98 : m_maxFastMemoryCount(Options::maxNumWebAssemblyFastMemories()) 99 { 100 } 101 102 MemoryResult tryAllocateFastMemory() 103 103 { 104 104 MemoryResult result = [&] { 105 105 auto holder = holdLock(m_lock); 106 if (m_ memories.size() >= m_maxCount)106 if (m_fastMemories.size() >= m_maxFastMemoryCount) 107 107 return MemoryResult(nullptr, MemoryResult::SyncTryToReclaimMemory); 108 108 109 void* result = Gigacage::tryAllocate VirtualPages(Gigacage::Primitive, Memory::fastMappedBytes());109 void* result = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, Memory::fastMappedBytes()); 110 110 if (!result) 111 111 return MemoryResult(nullptr, MemoryResult::SyncTryToReclaimMemory); 112 112 113 m_ memories.append(result);113 m_fastMemories.append(result); 114 114 115 115 return MemoryResult( 116 116 result, 117 m_ memories.size() >= m_maxCount / 2 ? MemoryResult::SuccessAndNotifyMemoryPressure : MemoryResult::Success);117 m_fastMemories.size() >= m_maxFastMemoryCount / 2 ? MemoryResult::SuccessAndNotifyMemoryPressure : MemoryResult::Success); 118 118 }(); 119 119 … … 124 124 } 125 125 126 void free VirtualPages(void* basePtr)126 void freeFastMemory(void* basePtr) 127 127 { 128 128 { 129 129 auto holder = holdLock(m_lock); 130 130 Gigacage::freeVirtualPages(Gigacage::Primitive, basePtr, Memory::fastMappedBytes()); 131 m_ memories.removeFirst(basePtr);131 m_fastMemories.removeFirst(basePtr); 132 132 } 133 133 … … 136 136 } 137 137 138 bool containsAddress(void* address)138 bool isAddressInFastMemory(void* address) 139 139 { 140 140 // NOTE: This can be called from a signal handler, but only after we proved that we're in JIT code. 141 141 auto holder = holdLock(m_lock); 142 for (void* memory : m_ memories) {142 for (void* memory : m_fastMemories) { 143 143 char* start = static_cast<char*>(memory); 144 144 if (start <= address && address <= start + Memory::fastMappedBytes()) … … 189 189 void dump(PrintStream& out) const 190 190 { 191 out.print(" virtual memories = ", m_memories.size(), "/", m_maxCount, ", bytes = ", m_physicalBytes, "/", memoryLimit());191 out.print("fast memories = ", m_fastMemories.size(), "/", m_maxFastMemoryCount, ", bytes = ", m_physicalBytes, "/", memoryLimit()); 192 192 } 193 193 194 194 private: 195 195 Lock m_lock; 196 unsigned m_max Count { 0 };197 Vector<void*> m_ memories;196 unsigned m_maxFastMemoryCount { 0 }; 197 Vector<void*> m_fastMemories; 198 198 size_t m_physicalBytes { 0 }; 199 199 }; … … 270 270 } 271 271 272 static void commitZeroPages(void* startAddress, size_t sizeInBytes)273 {274 bool writable = true;275 bool executable = false;276 #if OS(LINUX)277 // In Linux, MADV_DONTNEED clears backing pages with zero. Be Careful that MADV_DONTNEED shows different semantics in different OSes.278 // For example, FreeBSD does not clear backing pages immediately.279 while (madvise(startAddress, sizeInBytes, MADV_DONTNEED) == -1 && errno == EAGAIN) { }280 OSAllocator::commit(startAddress, sizeInBytes, writable, executable);281 #else282 OSAllocator::commit(startAddress, sizeInBytes, writable, executable);283 memset(startAddress, 0, sizeInBytes);284 #endif285 }286 287 272 RefPtr<Memory> Memory::create() 288 273 { … … 315 300 tryAllocate( 316 301 [&] () -> MemoryResult::Kind { 317 auto result = memoryManager().tryAllocate VirtualPages();302 auto result = memoryManager().tryAllocateFastMemory(); 318 303 fastMemory = bitwise_cast<char*>(result.basePtr); 319 304 return result.kind; … … 328 313 } 329 314 330 commitZeroPages(fastMemory, initialBytes);331 332 315 return adoptRef(new Memory(fastMemory, initial, maximum, Memory::fastMappedBytes(), MemoryMode::Signaling, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback))); 333 316 } … … 339 322 return adoptRef(new Memory(initial, maximum, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback))); 340 323 341 void* slowMemory = Gigacage::tryAl ignedMalloc(Gigacage::Primitive, WTF::pageSize(), initialBytes);324 void* slowMemory = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, initialBytes); 342 325 if (!slowMemory) { 343 326 memoryManager().freePhysicalBytes(initialBytes); 344 327 return nullptr; 345 328 } 346 memset(slowMemory, 0, initialBytes);347 329 return adoptRef(new Memory(slowMemory, initial, maximum, initialBytes, MemoryMode::BoundsChecking, WTFMove(notifyMemoryPressure), WTFMove(syncTryToReclaimMemory), WTFMove(growSuccessCallback))); 348 330 } … … 358 340 RELEASE_ASSERT_NOT_REACHED(); 359 341 } 360 memoryManager().free VirtualPages(m_memory);342 memoryManager().freeFastMemory(m_memory); 361 343 break; 362 344 case MemoryMode::BoundsChecking: 363 Gigacage:: alignedFree(Gigacage::Primitive, m_memory);345 Gigacage::freeVirtualPages(Gigacage::Primitive, m_memory, m_size); 364 346 break; 365 347 } … … 380 362 bool Memory::addressIsInActiveFastMemory(void* address) 381 363 { 382 return memoryManager(). containsAddress(address);364 return memoryManager().isAddressInFastMemory(address); 383 365 } 384 366 … … 423 405 RELEASE_ASSERT(maximum().bytes() != 0); 424 406 425 void* newMemory = Gigacage::tryAl ignedMalloc(Gigacage::Primitive, WTF::pageSize(), desiredSize);407 void* newMemory = Gigacage::tryAllocateZeroedVirtualPages(Gigacage::Primitive, desiredSize); 426 408 if (!newMemory) 427 409 return makeUnexpected(GrowFailReason::OutOfMemory); 428 410 429 411 memcpy(newMemory, m_memory, m_size); 430 memset(static_cast<char*>(newMemory) + m_size, 0, desiredSize - m_size);431 412 if (m_memory) 432 Gigacage:: alignedFree(Gigacage::Primitive, m_memory);413 Gigacage::freeVirtualPages(Gigacage::Primitive, m_memory, m_size); 433 414 m_memory = newMemory; 434 415 m_mappedCapacity = desiredSize; … … 447 428 RELEASE_ASSERT_NOT_REACHED(); 448 429 } 449 commitZeroPages(startAddress, extraBytes);450 430 m_size = desiredSize; 451 431 m_indexingMask = WTF::computeIndexingMask(desiredSize); -
trunk/Source/WTF/ChangeLog
r227940 r227951 1 2018-01-31 Saam Barati <sbarati@apple.com> 2 3 Replace tryLargeMemalignVirtual with tryLargeZeroedMemalignVirtual and use it to allocate large zeroed memory in Wasm 4 https://bugs.webkit.org/show_bug.cgi?id=182064 5 <rdar://problem/36840132> 6 7 Reviewed by Geoffrey Garen. 8 9 * wtf/Gigacage.cpp: 10 (Gigacage::tryAllocateZeroedVirtualPages): 11 (Gigacage::freeVirtualPages): 12 (Gigacage::tryAllocateVirtualPages): Deleted. 13 * wtf/Gigacage.h: 14 * wtf/OSAllocator.h: 15 1 16 2018-01-31 Mark Lam <mark.lam@apple.com> 2 17 -
trunk/Source/WTF/wtf/Gigacage.cpp
r225471 r227951 42 42 } 43 43 44 void* tryAllocate VirtualPages(Kind, size_t size)44 void* tryAllocateZeroedVirtualPages(Kind, size_t size) 45 45 { 46 return OSAllocator::reserveUncommitted(size); 46 size = roundUpToMultipleOf(WTF::pageSize(), size); 47 void* result = OSAllocator::reserveAndCommit(size); 48 #if !ASSERT_DISABLED 49 if (result) { 50 for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) 51 ASSERT(static_cast<uintptr_t*>(result)[i] == 0); 52 } 53 #endif 54 return result; 47 55 } 48 56 49 57 void freeVirtualPages(Kind, void* basePtr, size_t size) 50 58 { 51 OSAllocator:: releaseDecommitted(basePtr, size);59 OSAllocator::decommitAndRelease(basePtr, size); 52 60 } 53 61 … … 94 102 } 95 103 96 void* tryAllocate VirtualPages(Kind kind, size_t size)104 void* tryAllocateZeroedVirtualPages(Kind kind, size_t size) 97 105 { 98 void* result = bmalloc::api::tryLarge MemalignVirtual(WTF::pageSize(), size, bmalloc::heapKind(kind));106 void* result = bmalloc::api::tryLargeZeroedMemalignVirtual(WTF::pageSize(), size, bmalloc::heapKind(kind)); 99 107 WTF::compilerFence(); 100 108 return result; -
trunk/Source/WTF/wtf/Gigacage.h
r225471 r227951 120 120 inline void free(Kind, void* p) { fastFree(p); } 121 121 122 WTF_EXPORT_PRIVATE void* tryAllocate VirtualPages(Kind, size_t size);122 WTF_EXPORT_PRIVATE void* tryAllocateZeroedVirtualPages(Kind, size_t size); 123 123 WTF_EXPORT_PRIVATE void freeVirtualPages(Kind, void* basePtr, size_t size); 124 124 … … 134 134 WTF_EXPORT_PRIVATE void free(Kind, void*); 135 135 136 WTF_EXPORT_PRIVATE void* tryAllocate VirtualPages(Kind, size_t size);136 WTF_EXPORT_PRIVATE void* tryAllocateZeroedVirtualPages(Kind, size_t size); 137 137 WTF_EXPORT_PRIVATE void freeVirtualPages(Kind, void* basePtr, size_t size); 138 138 -
trunk/Source/WTF/wtf/OSAllocator.h
r215340 r227951 43 43 // These methods are symmetric; reserveUncommitted allocates VM in an uncommitted state, 44 44 // releaseDecommitted should be called on a region of VM allocated by a single reservation, 45 // the memory must all currently be in a decommitted state. 45 // the memory must all currently be in a decommitted state. reserveUncommitted returns to 46 // you memory that is zeroed. 46 47 WTF_EXPORT_PRIVATE static void* reserveUncommitted(size_t, Usage = UnknownUsage, bool writable = true, bool executable = false, bool includesGuardPages = false); 47 48 WTF_EXPORT_PRIVATE static void releaseDecommitted(void*, size_t); -
trunk/Source/bmalloc/ChangeLog
r227215 r227951 1 2018-01-31 Saam Barati <sbarati@apple.com> 2 3 Replace tryLargeMemalignVirtual with tryLargeZeroedMemalignVirtual and use it to allocate large zeroed memory in Wasm 4 https://bugs.webkit.org/show_bug.cgi?id=182064 5 <rdar://problem/36840132> 6 7 Reviewed by Geoffrey Garen. 8 9 This patch replaces the tryLargeMemalignVirtual API with tryLargeZeroedMemalignVirtual. 10 By doing that, we're able to remove the AllocationKind enum. To zero the memory, 11 tryLargeZeroedMemalignVirtual uses mmap(... MAP_ANON ...) over previously mmapped 12 memory. This both purges the any resident memory for the virtual range and ensures 13 that the pages in the range are zeroed. Most OSs should implement this by taking a 14 page fault and zero filling on first access. Therefore, this API is returning pages 15 that will result in page faults on first access. Hence, the name 'virtual' in the API. 16 This API differs from the old API in that users of it need not call madvise themselves. 17 The memory is ready to go. 18 19 * bmalloc.xcodeproj/project.pbxproj: 20 * bmalloc/AllocationKind.h: Removed. 21 * bmalloc/DebugHeap.cpp: 22 (bmalloc::DebugHeap::memalignLarge): 23 (bmalloc::DebugHeap::freeLarge): 24 * bmalloc/DebugHeap.h: 25 * bmalloc/Heap.cpp: 26 (bmalloc::Heap::splitAndAllocate): 27 (bmalloc::Heap::tryAllocateLarge): 28 (bmalloc::Heap::allocateLarge): 29 (bmalloc::Heap::shrinkLarge): 30 (bmalloc::Heap::deallocateLarge): 31 * bmalloc/Heap.h: 32 * bmalloc/IsoPage.cpp: 33 (bmalloc::IsoPageBase::allocatePageMemory): 34 * bmalloc/VMAllocate.h: 35 (bmalloc::vmZeroAndPurge): 36 * bmalloc/VMHeap.cpp: 37 (bmalloc::VMHeap::tryAllocateLargeChunk): 38 * bmalloc/VMHeap.h: 39 * bmalloc/bmalloc.cpp: 40 (bmalloc::api::tryLargeZeroedMemalignVirtual): 41 (bmalloc::api::freeLargeVirtual): 42 (bmalloc::api::tryLargeMemalignVirtual): Deleted. 43 * bmalloc/bmalloc.h: 44 1 45 2018-01-19 Keith Miller <keith_miller@apple.com> 2 46 -
trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj
r226067 r227951 22 22 23 23 /* Begin PBXBuildFile section */ 24 0F3DA0141F267AB800342C08 /* AllocationKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F3DA0131F267AB800342C08 /* AllocationKind.h */; settings = {ATTRIBUTES = (Private, ); }; };25 24 0F5167741FAD685C008236A8 /* bmalloc.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5167731FAD6852008236A8 /* bmalloc.cpp */; }; 26 25 0F5549EF1FB54704007FF75A /* IsoPage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F5549EE1FB54701007FF75A /* IsoPage.cpp */; }; … … 174 173 175 174 /* Begin PBXFileReference section */ 176 0F3DA0131F267AB800342C08 /* AllocationKind.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = AllocationKind.h; path = bmalloc/AllocationKind.h; sourceTree = "<group>"; };177 175 0F5167731FAD6852008236A8 /* bmalloc.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = bmalloc.cpp; path = bmalloc/bmalloc.cpp; sourceTree = "<group>"; }; 178 176 0F5549EE1FB54701007FF75A /* IsoPage.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoPage.cpp; path = bmalloc/IsoPage.cpp; sourceTree = "<group>"; }; … … 469 467 isa = PBXGroup; 470 468 children = ( 471 0F3DA0131F267AB800342C08 /* AllocationKind.h */,472 469 140FA00219CE429C00FFD3C8 /* BumpRange.h */, 473 470 147DC6E21CA5B70B00724E8D /* Chunk.h */, … … 615 612 14DD78CB18F48D7500950702 /* PerProcess.h in Headers */, 616 613 0F7EB8261F9541B000F1ABCB /* IsoAllocatorInlines.h in Headers */, 617 0F3DA0141F267AB800342C08 /* AllocationKind.h in Headers */,618 614 14DD78CC18F48D7500950702 /* PerThread.h in Headers */, 619 615 14DD78CD18F48D7500950702 /* Range.h in Headers */, -
trunk/Source/bmalloc/bmalloc/DebugHeap.cpp
r220154 r227951 116 116 // https://bugs.webkit.org/show_bug.cgi?id=175086 117 117 118 void* DebugHeap::memalignLarge(size_t alignment, size_t size , AllocationKind allocationKind)118 void* DebugHeap::memalignLarge(size_t alignment, size_t size) 119 119 { 120 120 alignment = roundUpToMultipleOf(m_pageSize, alignment); … … 123 123 if (!result) 124 124 return nullptr; 125 if (allocationKind == AllocationKind::Virtual)126 vmDeallocatePhysicalPages(result, size);127 125 { 128 126 std::lock_guard<std::mutex> locker(m_lock); … … 132 130 } 133 131 134 void DebugHeap::freeLarge(void* base , AllocationKind)132 void DebugHeap::freeLarge(void* base) 135 133 { 136 134 if (!base) -
trunk/Source/bmalloc/bmalloc/DebugHeap.h
r220154 r227951 26 26 #pragma once 27 27 28 #include "AllocationKind.h"29 28 #include "StaticMutex.h" 30 29 #include <mutex> … … 46 45 void free(void*); 47 46 48 void* memalignLarge(size_t alignment, size_t , AllocationKind);49 void freeLarge(void* base , AllocationKind);47 void* memalignLarge(size_t alignment, size_t); 48 void freeLarge(void* base); 50 49 51 50 private: -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r223239 r227951 421 421 } 422 422 423 LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size , AllocationKind allocationKind)423 LargeRange Heap::splitAndAllocate(LargeRange& range, size_t alignment, size_t size) 424 424 { 425 425 RELEASE_BASSERT(isActiveHeapKind(m_kind)); … … 442 442 } 443 443 444 switch (allocationKind) { 445 case AllocationKind::Virtual: 446 if (range.physicalSize()) 447 vmDeallocatePhysicalPagesSloppy(range.begin(), range.size()); 448 break; 449 450 case AllocationKind::Physical: 451 if (range.physicalSize() < range.size()) { 452 m_scavenger->scheduleIfUnderMemoryPressure(range.size()); 453 454 vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize()); 455 range.setPhysicalSize(range.size()); 456 } 457 break; 444 if (range.physicalSize() < range.size()) { 445 m_scavenger->scheduleIfUnderMemoryPressure(range.size()); 446 vmAllocatePhysicalPagesSloppy(range.begin() + range.physicalSize(), range.size() - range.physicalSize()); 447 range.setPhysicalSize(range.size()); 458 448 } 459 449 … … 470 460 } 471 461 472 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size , AllocationKind allocationKind)462 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t size) 473 463 { 474 464 RELEASE_BASSERT(isActiveHeapKind(m_kind)); … … 477 467 478 468 if (m_debugHeap) 479 return m_debugHeap->memalignLarge(alignment, size , allocationKind);469 return m_debugHeap->memalignLarge(alignment, size); 480 470 481 471 m_scavenger->didStartGrowing(); … … 496 486 return nullptr; 497 487 498 range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size , allocationKind);488 range = PerProcess<VMHeap>::get()->tryAllocateLargeChunk(alignment, size); 499 489 if (!range) 500 490 return nullptr; … … 505 495 } 506 496 507 return splitAndAllocate(range, alignment, size , allocationKind).begin();508 } 509 510 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size , AllocationKind allocationKind)511 { 512 void* result = tryAllocateLarge(lock, alignment, size , allocationKind);497 return splitAndAllocate(range, alignment, size).begin(); 498 } 499 500 void* Heap::allocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size) 501 { 502 void* result = tryAllocateLarge(lock, alignment, size); 513 503 RELEASE_BASSERT(result); 514 504 return result; … … 531 521 size_t size = m_largeAllocated.remove(object.begin()); 532 522 LargeRange range = LargeRange(object, size); 533 splitAndAllocate(range, alignment, newSize , AllocationKind::Physical);523 splitAndAllocate(range, alignment, newSize); 534 524 535 525 m_scavenger->schedule(size); 536 526 } 537 527 538 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object , AllocationKind allocationKind)528 void Heap::deallocateLarge(std::lock_guard<StaticMutex>&, void* object) 539 529 { 540 530 if (m_debugHeap) 541 return m_debugHeap->freeLarge(object , allocationKind);531 return m_debugHeap->freeLarge(object); 542 532 543 533 size_t size = m_largeAllocated.remove(object); 544 m_largeFree.add(LargeRange(object, size, allocationKind == AllocationKind::Physical ? size : 0));534 m_largeFree.add(LargeRange(object, size, size)); 545 535 m_scavenger->schedule(size); 546 536 } -
trunk/Source/bmalloc/bmalloc/Heap.h
r222982 r227951 27 27 #define Heap_h 28 28 29 #include "AllocationKind.h"30 29 #include "BumpRange.h" 31 30 #include "Chunk.h" … … 68 67 void deallocateLineCache(std::lock_guard<StaticMutex>&, LineCache&); 69 68 70 void* allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t , AllocationKind = AllocationKind::Physical);71 void* tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t , AllocationKind = AllocationKind::Physical);72 void deallocateLarge(std::lock_guard<StaticMutex>&, void* , AllocationKind = AllocationKind::Physical);69 void* allocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t); 70 void* tryAllocateLarge(std::lock_guard<StaticMutex>&, size_t alignment, size_t); 71 void deallocateLarge(std::lock_guard<StaticMutex>&, void*); 73 72 74 73 bool isLarge(std::lock_guard<StaticMutex>&, void*); … … 111 110 void mergeLargeRight(EndTag*&, BeginTag*&, Range&, bool& inVMHeap); 112 111 113 LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t , AllocationKind);112 LargeRange splitAndAllocate(LargeRange&, size_t alignment, size_t); 114 113 115 114 HeapKind m_kind; -
trunk/Source/bmalloc/bmalloc/IsoPage.cpp
r225125 r227951 33 33 void* IsoPageBase::allocatePageMemory() 34 34 { 35 return PerProcess<VMHeap>::get()->tryAllocateLargeChunk(pageSize, pageSize , AllocationKind::Physical).begin();35 return PerProcess<VMHeap>::get()->tryAllocateLargeChunk(pageSize, pageSize).begin(); 36 36 } 37 37 -
trunk/Source/bmalloc/bmalloc/VMAllocate.h
r225912 r227951 147 147 } 148 148 149 inline void vmZeroAndPurge(void* p, size_t vmSize) 150 { 151 vmValidate(p, vmSize); 152 // MAP_ANON guarantees the memory is zeroed. This will also cause 153 // page faults on accesses to this range following this call. 154 void* result = mmap(p, vmSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_FIXED | BMALLOC_NORESERVE, BMALLOC_VM_TAG, 0); 155 RELEASE_BASSERT(result == p); 156 } 157 149 158 // Allocates vmSize bytes at a specified power-of-two alignment. 150 159 // Use this function to create maskable memory regions. -
trunk/Source/bmalloc/bmalloc/VMHeap.cpp
r220118 r227951 34 34 } 35 35 36 LargeRange VMHeap::tryAllocateLargeChunk(size_t alignment, size_t size , AllocationKind allocationKind)36 LargeRange VMHeap::tryAllocateLargeChunk(size_t alignment, size_t size) 37 37 { 38 38 // We allocate VM in aligned multiples to increase the chances that … … 52 52 return LargeRange(); 53 53 54 if (allocationKind == AllocationKind::Virtual)55 vmDeallocatePhysicalPagesSloppy(memory, size);56 57 54 Chunk* chunk = static_cast<Chunk*>(memory); 58 55 -
trunk/Source/bmalloc/bmalloc/VMHeap.h
r220118 r227951 27 27 #define VMHeap_h 28 28 29 #include "AllocationKind.h"30 29 #include "Chunk.h" 31 30 #include "FixedVector.h" … … 50 49 VMHeap(std::lock_guard<StaticMutex>&); 51 50 52 LargeRange tryAllocateLargeChunk(size_t alignment, size_t , AllocationKind);51 LargeRange tryAllocateLargeChunk(size_t alignment, size_t); 53 52 }; 54 53 -
trunk/Source/bmalloc/bmalloc/bmalloc.cpp
r225125 r227951 40 40 } 41 41 42 void* tryLarge MemalignVirtual(size_t alignment, size_t size, HeapKind kind)42 void* tryLargeZeroedMemalignVirtual(size_t alignment, size_t size, HeapKind kind) 43 43 { 44 BASSERT(isPowerOfTwo(alignment)); 45 46 size_t pageSize = vmPageSize(); 47 alignment = roundUpToMultipleOf(pageSize, alignment); 48 size = roundUpToMultipleOf(pageSize, size); 49 44 50 kind = mapToActiveHeapKind(kind); 45 51 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 46 std::lock_guard<StaticMutex> lock(Heap::mutex()); 47 return heap.tryAllocateLarge(lock, alignment, size, AllocationKind::Virtual); 52 53 void* result; 54 { 55 std::lock_guard<StaticMutex> lock(Heap::mutex()); 56 result = heap.tryAllocateLarge(lock, alignment, size); 57 } 58 59 if (result) 60 vmZeroAndPurge(result, size); 61 return result; 48 62 } 49 63 … … 53 67 Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(kind); 54 68 std::lock_guard<StaticMutex> lock(Heap::mutex()); 55 heap.deallocateLarge(lock, object , AllocationKind::Virtual);69 heap.deallocateLarge(lock, object); 56 70 } 57 71 -
trunk/Source/bmalloc/bmalloc/bmalloc.h
r225125 r227951 70 70 } 71 71 72 // Returns null for failure 73 BEXPORT void* tryLargeMemalignVirtual(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary); 72 // Returns null on failure. 73 // This API will give you zeroed pages that are ready to be used. These pages 74 // will page fault on first access. It returns to you memory that initially only 75 // uses up virtual address space, not `size` bytes of physical memory. 76 BEXPORT void* tryLargeZeroedMemalignVirtual(size_t alignment, size_t size, HeapKind kind = HeapKind::Primary); 74 77 75 78 inline void free(void* object, HeapKind kind = HeapKind::Primary)
Note: See TracChangeset
for help on using the changeset viewer.