Changeset 173224 in webkit
- Timestamp:
- Sep 3, 2014 2:00:33 PM (10 years ago)
- Location:
- trunk/Source/bmalloc
- Files:
-
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/bmalloc/ChangeLog
r172608 r173224 1 2014-09-02 Geoffrey Garen <ggaren@apple.com> 2 3 bmalloc crashes on the EWS bots (due to bad large object allocation) 4 https://bugs.webkit.org/show_bug.cgi?id=136469 5 6 Reviewed by Andreas Kling. 7 8 It's possible to convince bmalloc to perform a bad large object allocation, 9 through these steps: 10 11 (1) Insert object A into freelist F0. 12 13 (2) Split, merge and split again A's neighbors such that object B is 14 inserted into freelist F0, with boundary tag and size equal to object A, 15 but pointer not completely equal to object A. Put object B at the head of F0. 16 17 (3) Allocate some other object from F0, swapping its position in the 18 freelist with object B, such that object A is now ahead of object B. 19 20 --> Now, the next allocation for size A/B will allocate object A, which 21 has a slightly wrong idea about where the object actually begins. 22 Immediately, you'll corrupt a little memory, and over time, you'll also 23 corrupt boundary tag metadata. 24 25 The solution is to store the begin pointer in the boundary tag. Luckily, 26 this doesn't make the tag any bigger, and it's not a noticeable slowdown 27 on MallocBench. 28 29 * bmalloc/Algorithm.h: 30 (bmalloc::rightShift): 31 * bmalloc/BeginTag.h: 32 (bmalloc::BeginTag::isInFreeList): This is the bug fix. Make sure to 33 validate the start pointer when popping off the free list. Through a 34 very uncommon set of steps, it is possible to have an item in the free 35 list that is valid by all accounts except for its start pointer. 36 37 * bmalloc/BoundaryTag.h: 38 (bmalloc::BoundaryTag::compactBegin): 39 (bmalloc::BoundaryTag::setRange): 40 (bmalloc::BoundaryTag::setSize): Deleted. Record a compact version of the 41 start pointer. We don't need the whole pointer -- just the offset, in 42 largeAlignment increments, into the relevant boundary tag bucket. 43 44 * bmalloc/BoundaryTagInlines.h: 45 (bmalloc::validateNext): 46 (bmalloc::BoundaryTag::init): 47 (bmalloc::BoundaryTag::mergeLarge): 48 (bmalloc::BoundaryTag::splitLarge): 49 * bmalloc/SegregatedFreeList.cpp: 50 (bmalloc::SegregatedFreeList::insert): 51 (bmalloc::SegregatedFreeList::takeGreedy): 52 (bmalloc::SegregatedFreeList::take): Provide the whole range instead of 53 the size when establishing a boundary tag, as required by the new 54 interface. 55 56 * bmalloc/Sizes.h: 57 1 58 2014-08-14 Geoffrey Garen <ggaren@apple.com> 2 59 -
trunk/Source/bmalloc/bmalloc/Algorithm.h
r166893 r173224 47 47 return a < b ? a : b; 48 48 } 49 49 50 50 template<typename T> inline constexpr T mask(T value, uintptr_t mask) 51 51 { 52 52 return reinterpret_cast<T>(reinterpret_cast<uintptr_t>(value) & mask); 53 } 54 55 template<typename T> inline constexpr T rightShift(T value, uintptr_t shift) 56 { 57 return reinterpret_cast<T>(reinterpret_cast<uintptr_t>(value) >> shift); 53 58 } 54 59 -
trunk/Source/bmalloc/bmalloc/BeginTag.h
r166893 r173224 33 33 class BeginTag : public BoundaryTag { 34 34 public: 35 bool isInFreeList( size_t);35 bool isInFreeList(const Range&); 36 36 }; 37 37 38 inline bool BeginTag::isInFreeList( size_t size)38 inline bool BeginTag::isInFreeList(const Range& range) 39 39 { 40 return isFree() && !isEnd() && this->size() == size;40 return isFree() && !isEnd() && this->size() == range.size() && this->compactBegin() == compactBegin(range); 41 41 } 42 42 -
trunk/Source/bmalloc/bmalloc/BoundaryTag.h
r166956 r173224 28 28 29 29 #include "BAssert.h" 30 #include "Range.h" 30 31 #include "Sizes.h" 31 32 … … 42 43 static Range deallocate(void*); 43 44 static void allocate(size_t, Range&, Range& leftover, bool& hasPhysicalPages); 45 static unsigned compactBegin(const Range&); 44 46 45 47 bool isXLarge() { return m_size == xLargeMarker; } … … 59 61 60 62 size_t size() { return m_size; } 61 void setSize(size_t); 63 unsigned compactBegin() { return m_compactBegin; } 64 65 void setRange(const Range&); 62 66 63 67 EndTag* prev(); … … 66 70 private: 67 71 static const size_t flagBits = 3; 68 static const size_t sizeBits = bitCount<unsigned>() - flagBits; 72 static const size_t compactBeginBits = 5; 73 static const size_t sizeBits = bitCount<unsigned>() - flagBits - compactBeginBits; 69 74 static const size_t xLargeMarker = 1; // This size is unused because our minimum object size is greater than it. 70 75 71 76 static_assert(largeMin > xLargeMarker, "largeMin must provide enough umbrella to fit xLargeMarker."); 77 static_assert((1 << compactBeginBits) - 1 >= largeMin / largeAlignment, "compactBegin must be encodable in a BoundaryTag."); 72 78 static_assert((1 << sizeBits) - 1 >= largeMax, "largeMax must be encodable in a BoundaryTag."); 73 79 … … 80 86 bool m_isEnd: 1; 81 87 bool m_hasPhysicalPages: 1; 88 unsigned m_compactBegin: compactBeginBits; 82 89 unsigned m_size: sizeBits; 83 90 }; 84 91 85 inline void BoundaryTag::setSize(size_t size)92 inline unsigned BoundaryTag::compactBegin(const Range& range) 86 93 { 87 m_size = static_cast<unsigned>(size); 88 BASSERT(this->size() == size); 94 return static_cast<unsigned>( 95 reinterpret_cast<uintptr_t>( 96 rightShift( 97 mask(range.begin(), largeMin - 1), largeAlignmentShift))); 98 } 99 100 inline void BoundaryTag::setRange(const Range& range) 101 { 102 m_compactBegin = compactBegin(range); 103 m_size = static_cast<unsigned>(range.size()); 104 BASSERT(this->size() == range.size()); 89 105 BASSERT(!isXLarge()); 90 106 } -
trunk/Source/bmalloc/bmalloc/BoundaryTagInlines.h
r166956 r173224 65 65 static inline void validateNext(BeginTag* next, const Range& range) 66 66 { 67 if (next->size() == largeMin && !next-> isFree()) // Right sentinel tag.67 if (next->size() == largeMin && !next->compactBegin() && !next->isFree()) // Right sentinel tag. 68 68 return; 69 69 … … 85 85 86 86 BeginTag* beginTag = LargeChunk::beginTag(range.begin()); 87 beginTag->set Size(range.size());87 beginTag->setRange(range); 88 88 beginTag->setFree(true); 89 89 beginTag->setHasPhysicalPages(false); … … 98 98 EndTag* leftSentinel = beginTag->prev(); 99 99 BASSERT(leftSentinel >= static_cast<void*>(chunk)); 100 leftSentinel->set Size(largeMin);100 leftSentinel->setRange(Range(nullptr, largeMin)); 101 101 leftSentinel->setFree(false); 102 102 103 103 BeginTag* rightSentinel = endTag->next(); 104 104 BASSERT(rightSentinel < static_cast<void*>(range.begin())); 105 rightSentinel->set Size(largeMin);105 rightSentinel->setRange(Range(nullptr, largeMin)); 106 106 rightSentinel->setFree(false); 107 107 … … 151 151 mergeLargeRight(endTag, next, range, hasPhysicalPages); 152 152 153 beginTag->set Size(range.size());153 beginTag->setRange(range); 154 154 beginTag->setFree(true); 155 155 beginTag->setHasPhysicalPages(hasPhysicalPages); … … 176 176 INLINE void BoundaryTag::splitLarge(BeginTag* beginTag, size_t size, EndTag*& endTag, Range& range, Range& leftover) 177 177 { 178 beginTag->setSize(size); 178 leftover = Range(range.begin() + size, range.size() - size); 179 range = Range(range.begin(), size); 180 181 beginTag->setRange(range); 179 182 180 183 EndTag* splitEndTag = LargeChunk::endTag(range.begin(), size); … … 182 185 *splitEndTag = *beginTag; 183 186 184 leftover = Range(range.begin() + size, range.size() - size);185 187 BASSERT(leftover.size() >= largeMin); 186 188 BeginTag* leftoverBeginTag = LargeChunk::beginTag(leftover.begin()); 187 189 *leftoverBeginTag = *beginTag; 188 leftoverBeginTag->set Size(leftover.size());190 leftoverBeginTag->setRange(leftover); 189 191 190 192 if (leftoverBeginTag != static_cast<BoundaryTag*>(endTag)) 191 193 *endTag = *leftoverBeginTag; 192 194 193 validate(beginTag->prev(), Range(range.begin(), size), leftoverBeginTag);195 validate(beginTag->prev(), range, leftoverBeginTag); 194 196 validate(leftoverBeginTag->prev(), leftover, endTag->next()); 195 197 196 range = Range(range.begin(), size);197 198 endTag = splitEndTag; 198 199 } -
trunk/Source/bmalloc/bmalloc/SegregatedFreeList.cpp
r166956 r173224 40 40 IF_DEBUG( 41 41 BeginTag* beginTag = LargeChunk::beginTag(range.begin()); 42 BASSERT(beginTag->isInFreeList(range .size()));42 BASSERT(beginTag->isInFreeList(range)); 43 43 ) 44 44 … … 67 67 // so we need to validate each free list entry before using it. 68 68 BeginTag* beginTag = LargeChunk::beginTag(range.begin()); 69 if (!beginTag->isInFreeList(range .size())) {69 if (!beginTag->isInFreeList(range)) { 70 70 list.pop(i); 71 71 continue; … … 115 115 // we need to validate each free list entry before using it. 116 116 BeginTag* beginTag = LargeChunk::beginTag(range.begin()); 117 if (!beginTag->isInFreeList(range .size())) {117 if (!beginTag->isInFreeList(range)) { 118 118 list.pop(i); 119 119 continue; -
trunk/Source/bmalloc/bmalloc/Sizes.h
r167570 r173224 69 69 70 70 static const size_t largeAlignment = 64; 71 static const size_t largeAlignmentShift = 6; 72 static_assert(1 << largeAlignmentShift == largeAlignment, "largeAlignmentShift be log2(largeAlignment)."); 71 73 static const size_t largeMax = largeChunkSize * 99 / 100; // Plenty of room for metadata. 72 74 static const size_t largeMin = 1024;
Note: See TracChangeset
for help on using the changeset viewer.