Changeset 199759 in webkit
- Timestamp:
- Apr 19, 2016 9:14:02 PM (8 years ago)
- Location:
- trunk/Source/bmalloc
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/bmalloc/ChangeLog
r199756 r199759 1 2016-04-19 Geoffrey Garen <ggaren@apple.com> 2 3 bmalloc: fix up overflow checks 4 https://bugs.webkit.org/show_bug.cgi?id=156780 5 6 Reviewed by Mark Lam. 7 8 We used to try to avoid overflow in large object math by setting a very 9 high limit on the largest large object. But that's a bit error-prone 10 since the check is far away from the math that might overflow -- and 11 we were missing some cases. 12 13 This patch removes the limit and instead checks at each math site. 14 15 * bmalloc/Allocator.cpp: 16 (bmalloc::Allocator::tryAllocate): 17 (bmalloc::Allocator::allocate): 18 (bmalloc::Allocator::reallocate): 19 (bmalloc::Allocator::allocateSlowCase): Remove the limit. tryAllocateLarge 20 will check for overflow for us. 21 22 * bmalloc/Chunk.h: This ASSERT was just totally wrong. 23 24 * bmalloc/Heap.cpp: 25 (bmalloc::Heap::tryAllocateLarge): Check for overflow when adding. 26 27 * bmalloc/Sizes.h: 28 29 * bmalloc/VMAllocate.h: 30 (bmalloc::tryVMAllocate): Check for overflow when adding. 31 32 * bmalloc/VMHeap.cpp: 33 (bmalloc::VMHeap::tryAllocateLargeChunk): Check for overflow when adding. 34 1 35 2016-04-19 Geoffrey Garen <ggaren@apple.com> 2 36 -
trunk/Source/bmalloc/bmalloc/Allocator.cpp
r199746 r199759 59 59 return allocate(size); 60 60 61 if (size <= largeMax) { 62 std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); 63 return PerProcess<Heap>::getFastCase()->tryAllocateLarge(lock, alignment, size); 64 } 65 66 return nullptr; 61 std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); 62 return PerProcess<Heap>::getFastCase()->tryAllocateLarge(lock, alignment, size); 67 63 } 68 64 … … 84 80 return allocate(roundUpToMultipleOf(alignment, size)); 85 81 86 if (size <= largeMax && alignment <= largeMax / 2) { 87 std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); 88 return PerProcess<Heap>::getFastCase()->allocateLarge(lock, alignment, size); 89 } 90 91 BCRASH(); 92 return nullptr; 82 std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex()); 83 return PerProcess<Heap>::getFastCase()->allocateLarge(lock, alignment, size); 93 84 } 94 85 … … 194 185 return allocateLogSizeClass(size); 195 186 196 if (size <= largeMax) 197 return allocateLarge(size); 198 199 BCRASH(); 200 return nullptr; 187 return allocateLarge(size); 201 188 } 202 189 -
trunk/Source/bmalloc/bmalloc/Chunk.h
r199746 r199759 56 56 std::array<SmallPage, chunkSize / smallPageSize> m_pages; 57 57 }; 58 59 static_assert(sizeof(Chunk) + largeMax <= chunkSize, "largeMax is too big");60 58 61 59 struct ChunkHash { -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r199756 r199759 343 343 void* Heap::tryAllocateLarge(std::lock_guard<StaticMutex>& lock, size_t alignment, size_t size) 344 344 { 345 BASSERT(size <= largeMax);346 BASSERT(alignment <= largeMax / 2);347 345 BASSERT(isPowerOfTwo(alignment)); 348 346 349 size = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment; 350 alignment = roundUpToMultipleOf<largeAlignment>(alignment); 347 size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment; 348 if (roundedSize < size) // Check for overflow 349 return nullptr; 350 size = roundedSize; 351 352 size_t roundedAlignment = roundUpToMultipleOf<largeAlignment>(alignment); 353 if (roundedAlignment < alignment) // Check for overflow 354 return nullptr; 355 alignment = roundedAlignment; 351 356 352 357 XLargeRange range = m_largeFree.remove(alignment, size); -
trunk/Source/bmalloc/bmalloc/Sizes.h
r199746 r199759 65 65 static const size_t largeAlignment = smallMax / pageSizeWasteFactor; 66 66 static const size_t largeAlignmentMask = largeAlignment - 1; 67 static const size_t largeMax = std::numeric_limits<size_t>::max() - largeAlignment; // Make sure that rounding up to largeAlignment does not overflow.68 67 69 68 static const size_t deallocatorLogCapacity = 256; -
trunk/Source/bmalloc/bmalloc/VMAllocate.h
r198829 r199759 146 146 vmValidate(vmAlignment); 147 147 148 size_t mappedSize = vmAlignment - vmPageSize() + vmSize; 148 size_t mappedSize = vmAlignment + vmSize; 149 if (mappedSize < vmAlignment || mappedSize < vmSize) // Check for overflow 150 return nullptr; 151 149 152 char* mapped = static_cast<char*>(tryVMAllocate(mappedSize)); 150 153 if (!mapped) -
trunk/Source/bmalloc/bmalloc/VMHeap.cpp
r199746 r199759 34 34 // We allocate VM in aligned multiples to increase the chances that 35 35 // the OS will provide contiguous ranges that we can merge. 36 alignment = roundUpToMultipleOf<chunkSize>(alignment); 37 size = roundUpToMultipleOf<chunkSize>(size); 36 size_t roundedAlignment = roundUpToMultipleOf<chunkSize>(alignment); 37 if (roundedAlignment < alignment) // Check for overflow 38 return XLargeRange(); 39 alignment = roundedAlignment; 40 41 size_t roundedSize = roundUpToMultipleOf<chunkSize>(size); 42 if (roundedSize < size) // Check for overflow 43 return XLargeRange(); 44 size = roundedSize; 38 45 39 46 void* memory = tryVMAllocate(alignment, size);
Note: See TracChangeset
for help on using the changeset viewer.