Changeset 142536 in webkit
- Timestamp:
- Feb 11, 2013 3:53:22 PM (11 years ago)
- Location:
- trunk/Source/WTF
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/WTF/ChangeLog
r142533 r142536 1 2013-02-08 Oliver Hunt <oliver@apple.com> 2 3 Harden FastMalloc (again) 4 https://bugs.webkit.org/show_bug.cgi?id=109334 5 6 Reviewed by Mark Hahnenberg. 7 8 Re-implement hardening of linked lists in TCMalloc. 9 10 In order to keep heap introspection working, we need to thread the 11 heap entropy manually as the introspection process can't use the 12 address of a global in determining the mask. Given we now have to 13 thread a value through anyway, I've stopped relying on ASLR for entropy 14 and am simply using arc4random() on darwin, and time + ASLR everywhere 15 else. 16 17 I've also made an explicit struct type for the FastMalloc singly linked 18 lists, as it seemed like the only way to reliably distinguish between 19 void*'s that were lists vs. void* that were not. This also made it 20 somewhat easier to reason about things across processes. 21 22 Verified that all the introspection tools work as expected. 23 24 * wtf/FastMalloc.cpp: 25 (WTF::internalEntropyValue): 26 (WTF): 27 (HardenedSLL): 28 (WTF::HardenedSLL::create): 29 (WTF::HardenedSLL::null): 30 (WTF::HardenedSLL::setValue): 31 (WTF::HardenedSLL::value): 32 (WTF::HardenedSLL::operator!): 33 (WTF::HardenedSLL::operator UnspecifiedBoolType): 34 (TCEntry): 35 (WTF::SLL_Next): 36 (WTF::SLL_SetNext): 37 (WTF::SLL_Push): 38 (WTF::SLL_Pop): 39 (WTF::SLL_PopRange): 40 (WTF::SLL_PushRange): 41 (WTF::SLL_Size): 42 (PageHeapAllocator): 43 (WTF::PageHeapAllocator::Init): 44 (WTF::PageHeapAllocator::New): 45 (WTF::PageHeapAllocator::Delete): 46 (WTF::PageHeapAllocator::recordAdministrativeRegions): 47 (WTF::Span::next): 48 (WTF::Span::remoteNext): 49 (WTF::Span::prev): 50 (WTF::Span::setNext): 51 (WTF::Span::setPrev): 52 (Span): 53 (WTF::DLL_Init): 54 (WTF::DLL_Remove): 55 (WTF::DLL_IsEmpty): 56 (WTF::DLL_Length): 57 (WTF::DLL_Prepend): 58 (TCMalloc_Central_FreeList): 59 (WTF::TCMalloc_Central_FreeList::enumerateFreeObjects): 60 (WTF::TCMalloc_Central_FreeList::entropy): 61 (TCMalloc_PageHeap): 62 (WTF::TCMalloc_PageHeap::init): 63 (WTF::TCMalloc_PageHeap::scavenge): 64 (WTF::TCMalloc_PageHeap::New): 65 (WTF::TCMalloc_PageHeap::AllocLarge): 66 (WTF::TCMalloc_PageHeap::Carve): 67 (WTF::TCMalloc_PageHeap::Delete): 68 (WTF::TCMalloc_PageHeap::ReturnedBytes): 69 (WTF::TCMalloc_PageHeap::Check): 70 (WTF::TCMalloc_PageHeap::CheckList): 71 (WTF::TCMalloc_PageHeap::ReleaseFreeList): 72 (TCMalloc_ThreadCache_FreeList): 73 (WTF::TCMalloc_ThreadCache_FreeList::Init): 74 (WTF::TCMalloc_ThreadCache_FreeList::empty): 75 (WTF::TCMalloc_ThreadCache_FreeList::Push): 76 (WTF::TCMalloc_ThreadCache_FreeList::PushRange): 77 (WTF::TCMalloc_ThreadCache_FreeList::PopRange): 78 (WTF::TCMalloc_ThreadCache_FreeList::Pop): 79 (WTF::TCMalloc_ThreadCache_FreeList::enumerateFreeObjects): 80 (TCMalloc_ThreadCache): 81 (WTF::TCMalloc_Central_FreeList::Init): 82 (WTF::TCMalloc_Central_FreeList::ReleaseListToSpans): 83 (WTF::TCMalloc_Central_FreeList::ReleaseToSpans): 84 (WTF::TCMalloc_Central_FreeList::InsertRange): 85 (WTF::TCMalloc_Central_FreeList::RemoveRange): 86 (WTF::TCMalloc_Central_FreeList::FetchFromSpansSafe): 87 (WTF::TCMalloc_Central_FreeList::FetchFromSpans): 88 (WTF::TCMalloc_Central_FreeList::Populate): 89 (WTF::TCMalloc_ThreadCache::Init): 90 (WTF::TCMalloc_ThreadCache::Deallocate): 91 (WTF::TCMalloc_ThreadCache::FetchFromCentralCache): 92 (WTF::TCMalloc_ThreadCache::ReleaseToCentralCache): 93 (WTF::TCMalloc_ThreadCache::InitModule): 94 (WTF::TCMalloc_ThreadCache::NewHeap): 95 (WTF::TCMalloc_ThreadCache::CreateCacheIfNecessary): 96 * wtf/MallocZoneSupport.h: 97 (RemoteMemoryReader): 98 1 99 2013-02-11 Enrica Casucci <enrica@apple.com> 2 100 -
trunk/Source/WTF/wtf/FastMalloc.cpp
r141955 r142536 79 79 80 80 #include "Assertions.h" 81 #include "CurrentTime.h" 82 81 83 #include <limits> 82 84 #if OS(WINDOWS) … … 103 105 // Harden the pointers stored in the TCMalloc linked lists 104 106 #if COMPILER(GCC) 105 #define ENABLE_TCMALLOC_HARDENING 0107 #define ENABLE_TCMALLOC_HARDENING 1 106 108 #endif 107 109 … … 521 523 static const char kLLHardeningMask = 0; 522 524 enum { 523 MaskAddrShift = 8, 524 MaskKeyShift = 4 525 MaskKeyShift = 13 525 526 }; 527 528 template <unsigned> struct EntropySource; 529 template <> struct EntropySource<4> { 530 static uint32_t value() 531 { 532 #if OS(DARWIN) 533 return arc4random(); 534 #else 535 return static_cast<uint32_t>(static_cast<uintptr_t>(currentTime() * 10000) ^ reinterpret_cast<uintptr_t>(&kLLHardeningMask)); 536 #endif 537 } 538 }; 539 540 template <> struct EntropySource<8> { 541 static uint64_t value() 542 { 543 return EntropySource<4>::value() | (static_cast<uint64_t>(EntropySource<4>::value()) << 32); 544 } 545 }; 546 547 static ALWAYS_INLINE uintptr_t internalEntropyValue() { 548 static uintptr_t value = EntropySource<sizeof(uintptr_t)>::value(); 549 ASSERT(value); 550 return value; 551 } 552 553 #define HARDENING_ENTROPY internalEntropyValue() 526 554 #define ROTATE_VALUE(value, amount) (((value) >> (amount)) | ((value) << (sizeof(value) * 8 - (amount)))) 527 #define XOR_MASK_PTR_WITH_KEY(ptr, key) (reinterpret_cast<typeof(ptr)>(reinterpret_cast<uintptr_t>(ptr)^ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^ROTATE_VALUE(reinterpret_cast<uintptr_t>(&kLLHardeningMask), MaskAddrShift))) 528 #else 529 #define XOR_MASK_PTR_WITH_KEY(ptr, key) (ptr) 555 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (reinterpret_cast<typeof(ptr)>(reinterpret_cast<uintptr_t>(ptr)^(ROTATE_VALUE(reinterpret_cast<uintptr_t>(key), MaskKeyShift)^entropy))) 556 557 #else 558 #define XOR_MASK_PTR_WITH_KEY(ptr, key, entropy) (((void)entropy), ((void)key), ptr) 559 #define HARDENING_ENTROPY 0 530 560 #endif 531 561 … … 663 693 static size_t class_to_pages[kNumClasses]; 664 694 695 // Hardened singly linked list. We make this a class to allow compiler to 696 // statically prevent mismatching hardened and non-hardened list 697 class HardenedSLL { 698 public: 699 static ALWAYS_INLINE HardenedSLL create(void* value) 700 { 701 HardenedSLL result; 702 result.m_value = value; 703 return result; 704 } 705 706 static ALWAYS_INLINE HardenedSLL null() 707 { 708 HardenedSLL result; 709 result.m_value = 0; 710 return result; 711 } 712 713 ALWAYS_INLINE void setValue(void* value) { m_value = value; } 714 ALWAYS_INLINE void* value() const { return m_value; } 715 ALWAYS_INLINE bool operator!() const { return !m_value; } 716 typedef void* (HardenedSLL::*UnspecifiedBoolType); 717 ALWAYS_INLINE operator UnspecifiedBoolType() const { return m_value ? &HardenedSLL::m_value : 0; } 718 719 private: 720 void* m_value; 721 }; 722 665 723 // TransferCache is used to cache transfers of num_objects_to_move[size_class] 666 724 // back and forth between thread caches and the central cache for a given size 667 725 // class. 668 726 struct TCEntry { 669 void *head; // Head of chain of objects.670 void *tail; // Tail of chain of objects.727 HardenedSLL head; // Head of chain of objects. 728 HardenedSLL tail; // Tail of chain of objects. 671 729 }; 672 730 // A central cache freelist can have anywhere from 0 to kNumTransferEntries … … 693 751 } 694 752 695 // Some very basic linked list functions for dealing with using void * as 696 // storage. 697 698 static inline void *SLL_Next(void *t) { 699 return XOR_MASK_PTR_WITH_KEY(*(reinterpret_cast<void**>(t)), t); 700 } 701 702 static inline void SLL_SetNext(void *t, void *n) { 703 *(reinterpret_cast<void**>(t)) = XOR_MASK_PTR_WITH_KEY(n, t); 704 } 705 706 static inline void SLL_Push(void **list, void *element) { 707 SLL_SetNext(element, *list); 753 // Functions for using our simple hardened singly linked list 754 static ALWAYS_INLINE HardenedSLL SLL_Next(HardenedSLL t, uintptr_t entropy) { 755 return HardenedSLL::create(XOR_MASK_PTR_WITH_KEY(*(reinterpret_cast<void**>(t.value())), t.value(), entropy)); 756 } 757 758 static ALWAYS_INLINE void SLL_SetNext(HardenedSLL t, HardenedSLL n, uintptr_t entropy) { 759 *(reinterpret_cast<void**>(t.value())) = XOR_MASK_PTR_WITH_KEY(n.value(), t.value(), entropy); 760 } 761 762 static ALWAYS_INLINE void SLL_Push(HardenedSLL* list, HardenedSLL element, uintptr_t entropy) { 763 SLL_SetNext(element, *list, entropy); 708 764 *list = element; 709 765 } 710 766 711 static inline void *SLL_Pop(void **list) {712 void *result = *list;713 *list = SLL_Next(*list );767 static ALWAYS_INLINE HardenedSLL SLL_Pop(HardenedSLL *list, uintptr_t entropy) { 768 HardenedSLL result = *list; 769 *list = SLL_Next(*list, entropy); 714 770 return result; 715 771 } 716 717 772 718 773 // Remove N elements from a linked list to which head points. head will be … … 720 775 // and last nodes of the range. Note that end will point to NULL after this 721 776 // function is called. 722 static inline void SLL_PopRange(void **head, int N, void **start, void **end) { 777 778 static ALWAYS_INLINE void SLL_PopRange(HardenedSLL* head, int N, HardenedSLL *start, HardenedSLL *end, uintptr_t entropy) { 723 779 if (N == 0) { 724 *start = NULL;725 *end = NULL;780 *start = HardenedSLL::null(); 781 *end = HardenedSLL::null(); 726 782 return; 727 783 } 728 784 729 void *tmp = *head;785 HardenedSLL tmp = *head; 730 786 for (int i = 1; i < N; ++i) { 731 tmp = SLL_Next(tmp );787 tmp = SLL_Next(tmp, entropy); 732 788 } 733 789 734 790 *start = *head; 735 791 *end = tmp; 736 *head = SLL_Next(tmp );792 *head = SLL_Next(tmp, entropy); 737 793 // Unlink range from list. 738 SLL_SetNext(tmp, NULL);739 } 740 741 static inline void SLL_PushRange(void **head, void *start, void *end) {794 SLL_SetNext(tmp, HardenedSLL::null(), entropy); 795 } 796 797 static ALWAYS_INLINE void SLL_PushRange(HardenedSLL *head, HardenedSLL start, HardenedSLL end, uintptr_t entropy) { 742 798 if (!start) return; 743 SLL_SetNext(end, *head );799 SLL_SetNext(end, *head, entropy); 744 800 *head = start; 745 801 } 746 802 747 static inline size_t SLL_Size(void *head) {803 static ALWAYS_INLINE size_t SLL_Size(HardenedSLL head, uintptr_t entropy) { 748 804 int count = 0; 749 805 while (head) { 750 806 count++; 751 head = SLL_Next(head );807 head = SLL_Next(head, entropy); 752 808 } 753 809 return count; … … 943 999 944 1000 // Linked list of all regions allocated by this allocator 945 void*allocated_regions_;1001 HardenedSLL allocated_regions_; 946 1002 947 1003 // Free list of already carved objects 948 void*free_list_;1004 HardenedSLL free_list_; 949 1005 950 1006 // Number of allocated but unfreed objects 951 1007 int inuse_; 1008 uintptr_t entropy_; 952 1009 953 1010 public: 954 void Init( ) {1011 void Init(uintptr_t entropy) { 955 1012 ASSERT(kAlignedSize <= kAllocIncrement); 956 1013 inuse_ = 0; 957 allocated_regions_ = 0;1014 allocated_regions_ = HardenedSLL::null(); 958 1015 free_area_ = NULL; 959 1016 free_avail_ = 0; 960 free_list_ = NULL; 1017 free_list_.setValue(NULL); 1018 entropy_ = entropy; 961 1019 } 962 1020 … … 964 1022 // Consult free list 965 1023 void* result; 966 if (free_list_ != NULL) {967 result = free_list_ ;968 free_list_ = *(reinterpret_cast<void**>(result));1024 if (free_list_) { 1025 result = free_list_.value(); 1026 free_list_ = SLL_Next(free_list_, entropy_); 969 1027 } else { 970 1028 if (free_avail_ < kAlignedSize) { … … 974 1032 CRASH(); 975 1033 976 *reinterpret_cast_ptr<void**>(new_allocation) = allocated_regions_; 977 allocated_regions_ = new_allocation; 1034 HardenedSLL new_head = HardenedSLL::create(new_allocation); 1035 SLL_SetNext(new_head, allocated_regions_, entropy_); 1036 allocated_regions_ = new_head; 978 1037 free_area_ = new_allocation + kAlignedSize; 979 1038 free_avail_ = kAllocIncrement - kAlignedSize; … … 988 1047 989 1048 void Delete(T* p) { 990 *(reinterpret_cast<void**>(p)) = free_list_; 991 free_list_ = p; 1049 HardenedSLL new_head = HardenedSLL::create(p); 1050 SLL_SetNext(new_head, free_list_, entropy_); 1051 free_list_ = new_head; 992 1052 inuse_--; 993 1053 } … … 999 1059 void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader) 1000 1060 { 1001 for ( void* adminAllocation = allocated_regions_; adminAllocation; adminAllocation = reader.nextEntryInLinkedList(reinterpret_cast<void**>(adminAllocation)))1002 recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation ), kAllocIncrement);1061 for (HardenedSLL adminAllocation = allocated_regions_; adminAllocation; adminAllocation.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(adminAllocation.value()), entropy_))) 1062 recorder.recordRegion(reinterpret_cast<vm_address_t>(adminAllocation.value()), kAllocIncrement); 1003 1063 } 1004 1064 #endif … … 1041 1101 PageID start; // Starting page number 1042 1102 Length length; // Number of pages in span 1043 Span* next() const { return XOR_MASK_PTR_WITH_KEY(m_next, this); } 1044 Span* prev() const { return XOR_MASK_PTR_WITH_KEY(m_prev, this); } 1045 void setNext(Span* next) { m_next = XOR_MASK_PTR_WITH_KEY(next, this); } 1046 void setPrev(Span* prev) { m_prev = XOR_MASK_PTR_WITH_KEY(prev, this); } 1103 Span* next(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, this, entropy); } 1104 Span* remoteNext(const Span* remoteSpanPointer, uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_next, remoteSpanPointer, entropy); } 1105 Span* prev(uintptr_t entropy) const { return XOR_MASK_PTR_WITH_KEY(m_prev, this, entropy); } 1106 void setNext(Span* next, uintptr_t entropy) { m_next = XOR_MASK_PTR_WITH_KEY(next, this, entropy); } 1107 void setPrev(Span* prev, uintptr_t entropy) { m_prev = XOR_MASK_PTR_WITH_KEY(prev, this, entropy); } 1047 1108 1048 1109 private: … … 1050 1111 Span* m_prev; // Used when in link list 1051 1112 public: 1052 void*objects; // Linked list of free objects1113 HardenedSLL objects; // Linked list of free objects 1053 1114 unsigned int free : 1; // Is the span free 1054 1115 #ifndef NO_TCMALLOC_SAMPLES … … 1106 1167 // ------------------------------------------------------------------------- 1107 1168 1108 static inline void DLL_Init(Span* list ) {1109 list->setNext(list );1110 list->setPrev(list );1111 } 1112 1113 static inline void DLL_Remove(Span* span ) {1114 span->prev( )->setNext(span->next());1115 span->next( )->setPrev(span->prev());1116 span->setPrev(NULL );1117 span->setNext(NULL );1118 } 1119 1120 static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list ) {1121 return list->next( ) == list;1122 } 1123 1124 static int DLL_Length(const Span* list ) {1169 static inline void DLL_Init(Span* list, uintptr_t entropy) { 1170 list->setNext(list, entropy); 1171 list->setPrev(list, entropy); 1172 } 1173 1174 static inline void DLL_Remove(Span* span, uintptr_t entropy) { 1175 span->prev(entropy)->setNext(span->next(entropy), entropy); 1176 span->next(entropy)->setPrev(span->prev(entropy), entropy); 1177 span->setPrev(NULL, entropy); 1178 span->setNext(NULL, entropy); 1179 } 1180 1181 static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list, uintptr_t entropy) { 1182 return list->next(entropy) == list; 1183 } 1184 1185 static int DLL_Length(const Span* list, uintptr_t entropy) { 1125 1186 int result = 0; 1126 for (Span* s = list->next( ); s != list; s = s->next()) {1187 for (Span* s = list->next(entropy); s != list; s = s->next(entropy)) { 1127 1188 result++; 1128 1189 } … … 1140 1201 #endif 1141 1202 1142 static inline void DLL_Prepend(Span* list, Span* span) { 1143 ASSERT(span->next() == NULL); 1144 ASSERT(span->prev() == NULL); 1145 span->setNext(list->next()); 1146 span->setPrev(list); 1147 list->next()->setPrev(span); 1148 list->setNext(span); 1203 static inline void DLL_Prepend(Span* list, Span* span, uintptr_t entropy) { 1204 span->setNext(list->next(entropy), entropy); 1205 span->setPrev(list, entropy); 1206 list->next(entropy)->setPrev(span, entropy); 1207 list->setNext(span, entropy); 1149 1208 } 1150 1209 … … 1155 1214 class TCMalloc_Central_FreeList { 1156 1215 public: 1157 void Init(size_t cl );1216 void Init(size_t cl, uintptr_t entropy); 1158 1217 1159 1218 // These methods all do internal locking. … … 1161 1220 // Insert the specified range into the central freelist. N is the number of 1162 1221 // elements in the range. 1163 void InsertRange( void *start, void *end, int N);1222 void InsertRange(HardenedSLL start, HardenedSLL end, int N); 1164 1223 1165 1224 // Returns the actual number of fetched elements into N. 1166 void RemoveRange( void **start, void **end, int *N);1225 void RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N); 1167 1226 1168 1227 // Returns the number of free objects in cache. … … 1182 1241 void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList) 1183 1242 { 1184 for (Span* span = &empty_; span && span != &empty_; span = (span->next() ? reader(span->next()) : 0)) 1185 ASSERT(!span->objects); 1243 { 1244 static const ptrdiff_t emptyOffset = reinterpret_cast<const char*>(&empty_) - reinterpret_cast<const char*>(this); 1245 Span* remoteEmpty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + emptyOffset); 1246 Span* remoteSpan = nonempty_.remoteNext(remoteEmpty, entropy_); 1247 for (Span* span = reader(remoteEmpty); span && span != &empty_; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0)) 1248 ASSERT(!span->objects); 1249 } 1186 1250 1187 1251 ASSERT(!nonempty_.objects); … … 1189 1253 1190 1254 Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset); 1191 Span* remoteSpan = nonempty_.next(); 1192 1193 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next(), span = (span->next() ? reader(span->next()) : 0)) { 1194 for (void* nextObject = span->objects; nextObject; nextObject = reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject))) 1195 finder.visit(nextObject); 1196 } 1197 } 1198 #endif 1199 1255 Span* remoteSpan = nonempty_.remoteNext(remoteNonempty, entropy_); 1256 1257 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->remoteNext(remoteSpan, entropy_), span = (remoteSpan ? reader(remoteSpan) : 0)) { 1258 for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_))) { 1259 finder.visit(nextObject.value()); 1260 } 1261 } 1262 } 1263 #endif 1264 1265 uintptr_t entropy() const { return entropy_; } 1200 1266 private: 1201 1267 // REQUIRES: lock_ is held 1202 1268 // Remove object from cache and return. 1203 1269 // Return NULL if no free entries in cache. 1204 void*FetchFromSpans();1270 HardenedSLL FetchFromSpans(); 1205 1271 1206 1272 // REQUIRES: lock_ is held … … 1208 1274 // from pageheap if cache is empty. Only returns 1209 1275 // NULL on allocation failure. 1210 void*FetchFromSpansSafe();1276 HardenedSLL FetchFromSpansSafe(); 1211 1277 1212 1278 // REQUIRES: lock_ is held 1213 1279 // Release a linked list of objects to spans. 1214 1280 // May temporarily release lock_. 1215 void ReleaseListToSpans( void *start);1281 void ReleaseListToSpans(HardenedSLL start); 1216 1282 1217 1283 // REQUIRES: lock_ is held 1218 1284 // Release an object to spans. 1219 1285 // May temporarily release lock_. 1220 ALWAYS_INLINE void ReleaseToSpans( void*object);1286 ALWAYS_INLINE void ReleaseToSpans(HardenedSLL object); 1221 1287 1222 1288 // REQUIRES: lock_ is held … … 1269 1335 // on a given size class. 1270 1336 int32_t cache_size_; 1337 uintptr_t entropy_; 1271 1338 }; 1272 1339 … … 1587 1654 uintptr_t free_pages_; 1588 1655 1656 // Used for hardening 1657 uintptr_t entropy_; 1658 1589 1659 // Bytes allocated from system 1590 1660 uint64_t system_bytes_; … … 1679 1749 free_pages_ = 0; 1680 1750 system_bytes_ = 0; 1751 entropy_ = HARDENING_ENTROPY; 1681 1752 1682 1753 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY … … 1689 1760 scavenge_index_ = kMaxPages-1; 1690 1761 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits); 1691 DLL_Init(&large_.normal );1692 DLL_Init(&large_.returned );1762 DLL_Init(&large_.normal, entropy_); 1763 DLL_Init(&large_.returned, entropy_); 1693 1764 for (size_t i = 0; i < kMaxPages; i++) { 1694 DLL_Init(&free_[i].normal );1695 DLL_Init(&free_[i].returned );1765 DLL_Init(&free_[i].normal, entropy_); 1766 DLL_Init(&free_[i].returned, entropy_); 1696 1767 } 1697 1768 … … 1838 1909 // If the span size is bigger than kMinSpanListsWithSpans pages return all the spans in the list, else return all but 1 span. 1839 1910 // Return only 50% of a spanlist at a time so spans of size 1 are not the only ones left. 1840 size_t length = DLL_Length(&slist->normal );1911 size_t length = DLL_Length(&slist->normal, entropy_); 1841 1912 size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2; 1842 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal ) && free_committed_pages_ > targetPageCount; j++) {1843 Span* s = slist->normal.prev( );1844 DLL_Remove(s );1913 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal, entropy_) && free_committed_pages_ > targetPageCount; j++) { 1914 Span* s = slist->normal.prev(entropy_); 1915 DLL_Remove(s, entropy_); 1845 1916 ASSERT(!s->decommitted); 1846 1917 if (!s->decommitted) { … … 1851 1922 s->decommitted = true; 1852 1923 } 1853 DLL_Prepend(&slist->returned, s );1924 DLL_Prepend(&slist->returned, s, entropy_); 1854 1925 } 1855 1926 } … … 1878 1949 Span* ll = NULL; 1879 1950 bool released = false; 1880 if (!DLL_IsEmpty(&free_[s].normal )) {1951 if (!DLL_IsEmpty(&free_[s].normal, entropy_)) { 1881 1952 // Found normal span 1882 1953 ll = &free_[s].normal; 1883 } else if (!DLL_IsEmpty(&free_[s].returned )) {1954 } else if (!DLL_IsEmpty(&free_[s].returned, entropy_)) { 1884 1955 // Found returned span; reallocate it 1885 1956 ll = &free_[s].returned; … … 1890 1961 } 1891 1962 1892 Span* result = ll->next( );1963 Span* result = ll->next(entropy_); 1893 1964 Carve(result, n, released); 1894 1965 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY … … 1927 1998 1928 1999 // Search through normal list 1929 for (Span* span = large_.normal.next( );2000 for (Span* span = large_.normal.next(entropy_); 1930 2001 span != &large_.normal; 1931 span = span->next( )) {2002 span = span->next(entropy_)) { 1932 2003 if (span->length >= n) { 1933 2004 if ((best == NULL) … … 1941 2012 1942 2013 // Search through released list in case it has a better fit 1943 for (Span* span = large_.returned.next( );2014 for (Span* span = large_.returned.next(entropy_); 1944 2015 span != &large_.returned; 1945 span = span->next( )) {2016 span = span->next(entropy_)) { 1946 2017 if (span->length >= n) { 1947 2018 if ((best == NULL) … … 1990 2061 inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { 1991 2062 ASSERT(n > 0); 1992 DLL_Remove(span );2063 DLL_Remove(span, entropy_); 1993 2064 span->free = 0; 1994 2065 Event(span, 'A', n); … … 2016 2087 SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_; 2017 2088 Span* dst = &listpair->normal; 2018 DLL_Prepend(dst, leftover );2089 DLL_Prepend(dst, leftover, entropy_); 2019 2090 2020 2091 span->length = n; … … 2066 2137 #endif 2067 2138 mergeDecommittedStates(span, prev); 2068 DLL_Remove(prev );2139 DLL_Remove(prev, entropy_); 2069 2140 DeleteSpan(prev); 2070 2141 span->start -= len; … … 2083 2154 #endif 2084 2155 mergeDecommittedStates(span, next); 2085 DLL_Remove(next );2156 DLL_Remove(next, entropy_); 2086 2157 DeleteSpan(next); 2087 2158 span->length += len; … … 2094 2165 if (span->decommitted) { 2095 2166 if (span->length < kMaxPages) 2096 DLL_Prepend(&free_[span->length].returned, span );2167 DLL_Prepend(&free_[span->length].returned, span, entropy_); 2097 2168 else 2098 DLL_Prepend(&large_.returned, span );2169 DLL_Prepend(&large_.returned, span, entropy_); 2099 2170 } else { 2100 2171 if (span->length < kMaxPages) 2101 DLL_Prepend(&free_[span->length].normal, span );2172 DLL_Prepend(&free_[span->length].normal, span, entropy_); 2102 2173 else 2103 DLL_Prepend(&large_.normal, span );2174 DLL_Prepend(&large_.normal, span, entropy_); 2104 2175 } 2105 2176 free_pages_ += n; … … 2190 2261 size_t result = 0; 2191 2262 for (unsigned s = 0; s < kMaxPages; s++) { 2192 const int r_length = DLL_Length(&free_[s].returned );2263 const int r_length = DLL_Length(&free_[s].returned, entropy_); 2193 2264 unsigned r_pages = s * r_length; 2194 2265 result += r_pages << kPageShift; 2195 2266 } 2196 2267 2197 for (Span* s = large_.returned.next( ); s != &large_.returned; s = s->next())2268 for (Span* s = large_.returned.next(entropy_); s != &large_.returned; s = s->next(entropy_)) 2198 2269 result += s->length << kPageShift; 2199 2270 return result; … … 2322 2393 size_t totalFreeCommitted = 0; 2323 2394 #endif 2324 ASSERT(free_[0].normal.next( ) == &free_[0].normal);2325 ASSERT(free_[0].returned.next( ) == &free_[0].returned);2395 ASSERT(free_[0].normal.next(entropy_) == &free_[0].normal); 2396 ASSERT(free_[0].returned.next(entropy_) == &free_[0].returned); 2326 2397 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 2327 2398 totalFreeCommitted = CheckList(&large_.normal, kMaxPages, 1000000000, false); … … 2351 2422 size_t TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages, bool decommitted) { 2352 2423 size_t freeCount = 0; 2353 for (Span* s = list->next( ); s != list; s = s->next()) {2424 for (Span* s = list->next(entropy_); s != list; s = s->next(entropy_)) { 2354 2425 CHECK_CONDITION(s->free); 2355 2426 CHECK_CONDITION(s->length >= min_pages); … … 2371 2442 #endif 2372 2443 2373 while (!DLL_IsEmpty(list )) {2374 Span* s = list->prev( );2375 2376 DLL_Remove(s );2444 while (!DLL_IsEmpty(list, entropy_)) { 2445 Span* s = list->prev(entropy_); 2446 2447 DLL_Remove(s, entropy_); 2377 2448 s->decommitted = true; 2378 DLL_Prepend(returned, s );2449 DLL_Prepend(returned, s, entropy_); 2379 2450 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), 2380 2451 static_cast<size_t>(s->length << kPageShift)); … … 2405 2476 class TCMalloc_ThreadCache_FreeList { 2406 2477 private: 2407 void*list_; // Linked list of nodes2478 HardenedSLL list_; // Linked list of nodes 2408 2479 uint16_t length_; // Current length 2409 2480 uint16_t lowater_; // Low water mark for list length 2481 uintptr_t entropy_; // Entropy source for hardening 2410 2482 2411 2483 public: 2412 void Init( ) {2413 list_ = NULL;2484 void Init(uintptr_t entropy) { 2485 list_.setValue(NULL); 2414 2486 length_ = 0; 2415 2487 lowater_ = 0; 2488 entropy_ = entropy; 2489 #if ENABLE(TCMALLOC_HARDENING) 2490 ASSERT(entropy_); 2491 #endif 2416 2492 } 2417 2493 … … 2423 2499 // Is list empty? 2424 2500 bool empty() const { 2425 return list_ == NULL;2501 return !list_; 2426 2502 } 2427 2503 … … 2430 2506 void clear_lowwatermark() { lowater_ = length_; } 2431 2507 2432 ALWAYS_INLINE void Push( void*ptr) {2433 SLL_Push(&list_, ptr );2508 ALWAYS_INLINE void Push(HardenedSLL ptr) { 2509 SLL_Push(&list_, ptr, entropy_); 2434 2510 length_++; 2435 2511 } 2436 2512 2437 void PushRange(int N, void *start, void *end) {2438 SLL_PushRange(&list_, start, end );2513 void PushRange(int N, HardenedSLL start, HardenedSLL end) { 2514 SLL_PushRange(&list_, start, end, entropy_); 2439 2515 length_ = length_ + static_cast<uint16_t>(N); 2440 2516 } 2441 2517 2442 void PopRange(int N, void **start, void **end) {2443 SLL_PopRange(&list_, N, start, end );2518 void PopRange(int N, HardenedSLL* start, HardenedSLL* end) { 2519 SLL_PopRange(&list_, N, start, end, entropy_); 2444 2520 ASSERT(length_ >= N); 2445 2521 length_ = length_ - static_cast<uint16_t>(N); … … 2448 2524 2449 2525 ALWAYS_INLINE void* Pop() { 2450 ASSERT(list_ != NULL);2526 ASSERT(list_); 2451 2527 length_--; 2452 2528 if (length_ < lowater_) lowater_ = length_; 2453 return SLL_Pop(&list_ );2529 return SLL_Pop(&list_, entropy_).value(); 2454 2530 } 2455 2531 … … 2458 2534 void enumerateFreeObjects(Finder& finder, const Reader& reader) 2459 2535 { 2460 for ( void* nextObject = list_; nextObject; nextObject = reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))2461 finder.visit(nextObject );2536 for (HardenedSLL nextObject = list_; nextObject; nextObject.setValue(reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), entropy_))) 2537 finder.visit(nextObject.value()); 2462 2538 } 2463 2539 #endif … … 2486 2562 size_t bytes_until_sample_; // Bytes until we sample next 2487 2563 2564 uintptr_t entropy_; // Entropy value used for hardening 2565 2488 2566 // Allocate a new heap. REQUIRES: pageheap_lock is held. 2489 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid );2567 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid, uintptr_t entropy); 2490 2568 2491 2569 // Use only as pthread thread-specific destructor function. … … 2496 2574 TCMalloc_ThreadCache* prev_; 2497 2575 2498 void Init(ThreadIdentifier tid );2576 void Init(ThreadIdentifier tid, uintptr_t entropy); 2499 2577 void Cleanup(); 2500 2578 … … 2506 2584 2507 2585 ALWAYS_INLINE void* Allocate(size_t size); 2508 void Deallocate( void*ptr, size_t size_class);2586 void Deallocate(HardenedSLL ptr, size_t size_class); 2509 2587 2510 2588 ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize); … … 2704 2782 //------------------------------------------------------------------- 2705 2783 2706 void TCMalloc_Central_FreeList::Init(size_t cl ) {2784 void TCMalloc_Central_FreeList::Init(size_t cl, uintptr_t entropy) { 2707 2785 lock_.Init(); 2708 2786 size_class_ = cl; 2709 DLL_Init(&empty_); 2710 DLL_Init(&nonempty_); 2787 entropy_ = entropy; 2788 #if ENABLE(TCMALLOC_HARDENING) 2789 ASSERT(entropy_); 2790 #endif 2791 DLL_Init(&empty_, entropy_); 2792 DLL_Init(&nonempty_, entropy_); 2711 2793 counter_ = 0; 2712 2794 … … 2716 2798 } 2717 2799 2718 void TCMalloc_Central_FreeList::ReleaseListToSpans( void*start) {2800 void TCMalloc_Central_FreeList::ReleaseListToSpans(HardenedSLL start) { 2719 2801 while (start) { 2720 void *next = SLL_Next(start);2802 HardenedSLL next = SLL_Next(start, entropy_); 2721 2803 ReleaseToSpans(start); 2722 2804 start = next; … … 2724 2806 } 2725 2807 2726 ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans( void*object) {2727 const PageID p = reinterpret_cast<uintptr_t>(object ) >> kPageShift;2808 ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(HardenedSLL object) { 2809 const PageID p = reinterpret_cast<uintptr_t>(object.value()) >> kPageShift; 2728 2810 Span* span = pageheap->GetDescriptor(p); 2729 2811 ASSERT(span != NULL); … … 2731 2813 2732 2814 // If span is empty, move it to non-empty list 2733 if ( span->objects == NULL) {2734 DLL_Remove(span );2735 DLL_Prepend(&nonempty_, span );2815 if (!span->objects) { 2816 DLL_Remove(span, entropy_); 2817 DLL_Prepend(&nonempty_, span, entropy_); 2736 2818 Event(span, 'N', 0); 2737 2819 } … … 2741 2823 // Check that object does not occur in list 2742 2824 unsigned got = 0; 2743 for ( void* p = span->objects; p != NULL; p = *((void**) p)) {2744 ASSERT(p != object);2825 for (HardenedSLL p = span->objects; !p; SLL_Next(p, entropy_)) { 2826 ASSERT(p.value() != object.value()); 2745 2827 got++; 2746 2828 } … … 2754 2836 Event(span, '#', 0); 2755 2837 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass); 2756 DLL_Remove(span );2838 DLL_Remove(span, entropy_); 2757 2839 2758 2840 // Release central list lock while operating on pageheap … … 2764 2846 lock_.Lock(); 2765 2847 } else { 2766 *(reinterpret_cast<void**>(object)) = span->objects;2767 span->objects = object;2848 SLL_SetNext(object, span->objects, entropy_); 2849 span->objects.setValue(object.value()); 2768 2850 } 2769 2851 } … … 2839 2921 } 2840 2922 2841 void TCMalloc_Central_FreeList::InsertRange( void *start, void *end, int N) {2923 void TCMalloc_Central_FreeList::InsertRange(HardenedSLL start, HardenedSLL end, int N) { 2842 2924 SpinLockHolder h(&lock_); 2843 2925 if (N == num_objects_to_move[size_class_] && … … 2854 2936 } 2855 2937 2856 void TCMalloc_Central_FreeList::RemoveRange( void **start, void **end, int *N) {2938 void TCMalloc_Central_FreeList::RemoveRange(HardenedSLL* start, HardenedSLL* end, int *N) { 2857 2939 int num = *N; 2858 2940 ASSERT(num > 0); … … 2869 2951 2870 2952 // TODO: Prefetch multiple TCEntries? 2871 void *tail = FetchFromSpansSafe();2953 HardenedSLL tail = FetchFromSpansSafe(); 2872 2954 if (!tail) { 2873 2955 // We are completely out of memory. 2874 *start = *end = NULL;2956 *start = *end = HardenedSLL::null(); 2875 2957 *N = 0; 2876 2958 return; 2877 2959 } 2878 2960 2879 SLL_SetNext(tail, NULL);2880 void *head = tail;2961 SLL_SetNext(tail, HardenedSLL::null(), entropy_); 2962 HardenedSLL head = tail; 2881 2963 int count = 1; 2882 2964 while (count < num) { 2883 void *t = FetchFromSpans();2965 HardenedSLL t = FetchFromSpans(); 2884 2966 if (!t) break; 2885 SLL_Push(&head, t );2967 SLL_Push(&head, t, entropy_); 2886 2968 count++; 2887 2969 } … … 2892 2974 2893 2975 2894 void*TCMalloc_Central_FreeList::FetchFromSpansSafe() {2895 void *t = FetchFromSpans();2976 HardenedSLL TCMalloc_Central_FreeList::FetchFromSpansSafe() { 2977 HardenedSLL t = FetchFromSpans(); 2896 2978 if (!t) { 2897 2979 Populate(); … … 2901 2983 } 2902 2984 2903 void*TCMalloc_Central_FreeList::FetchFromSpans() {2904 if (DLL_IsEmpty(&nonempty_ )) return NULL;2905 Span* span = nonempty_.next( );2906 2907 ASSERT(span->objects != NULL);2985 HardenedSLL TCMalloc_Central_FreeList::FetchFromSpans() { 2986 if (DLL_IsEmpty(&nonempty_, entropy_)) return HardenedSLL::null(); 2987 Span* span = nonempty_.next(entropy_); 2988 2989 ASSERT(span->objects); 2908 2990 ASSERT_SPAN_COMMITTED(span); 2909 2991 span->refcount++; 2910 void*result = span->objects;2911 span->objects = *(reinterpret_cast<void**>(result));2912 if ( span->objects == NULL) {2992 HardenedSLL result = span->objects; 2993 span->objects = SLL_Next(result, entropy_); 2994 if (!span->objects) { 2913 2995 // Move to empty list 2914 DLL_Remove(span );2915 DLL_Prepend(&empty_, span );2996 DLL_Remove(span, entropy_); 2997 DLL_Prepend(&empty_, span, entropy_); 2916 2998 Event(span, 'E', 0); 2917 2999 } … … 2954 3036 // Split the block into pieces and add to the free-list 2955 3037 // TODO: coloring of objects to avoid cache conflicts? 2956 void** tail = &span->objects; 2957 char* ptr = reinterpret_cast<char*>(span->start << kPageShift); 2958 char* limit = ptr + (npages << kPageShift); 3038 HardenedSLL head = HardenedSLL::null(); 3039 char* start = reinterpret_cast<char*>(span->start << kPageShift); 2959 3040 const size_t size = ByteSizeForClass(size_class_); 3041 char* ptr = start + (npages << kPageShift) - ((npages << kPageShift) % size); 2960 3042 int num = 0; 2961 char* nptr;2962 while ((nptr = ptr + size) <= limit) {2963 *tail = ptr;2964 tail = reinterpret_cast_ptr<void**>(ptr);2965 ptr = nptr;3043 while (ptr > start) { 3044 ptr -= size; 3045 HardenedSLL node = HardenedSLL::create(ptr); 3046 SLL_SetNext(node, head, entropy_); 3047 head = node; 2966 3048 num++; 2967 3049 } 2968 ASSERT(ptr <= limit); 2969 *tail = NULL; 3050 ASSERT(ptr == start); 3051 ASSERT(ptr == head.value()); 3052 span->objects = head; 3053 ASSERT(span->objects.value() == head.value()); 2970 3054 span->refcount = 0; // No sub-object in use yet 2971 3055 2972 3056 // Add span to list of non-empty spans 2973 3057 lock_.Lock(); 2974 DLL_Prepend(&nonempty_, span );3058 DLL_Prepend(&nonempty_, span, entropy_); 2975 3059 counter_ += num; 2976 3060 } … … 2990 3074 } 2991 3075 2992 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid ) {3076 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid, uintptr_t entropy) { 2993 3077 size_ = 0; 2994 3078 next_ = NULL; … … 2996 3080 tid_ = tid; 2997 3081 in_setspecific_ = false; 3082 entropy_ = entropy; 3083 #if ENABLE(TCMALLOC_HARDENING) 3084 ASSERT(entropy_); 3085 #endif 2998 3086 for (size_t cl = 0; cl < kNumClasses; ++cl) { 2999 list_[cl].Init( );3087 list_[cl].Init(entropy_); 3000 3088 } 3001 3089 … … 3030 3118 } 3031 3119 3032 inline void TCMalloc_ThreadCache::Deallocate( void*ptr, size_t cl) {3120 inline void TCMalloc_ThreadCache::Deallocate(HardenedSLL ptr, size_t cl) { 3033 3121 size_ += ByteSizeForClass(cl); 3034 3122 FreeList* list = &list_[cl]; … … 3044 3132 ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) { 3045 3133 int fetch_count = num_objects_to_move[cl]; 3046 void *start, *end;3134 HardenedSLL start, end; 3047 3135 central_cache[cl].RemoveRange(&start, &end, &fetch_count); 3048 3136 list_[cl].PushRange(fetch_count, start, end); … … 3061 3149 int batch_size = num_objects_to_move[cl]; 3062 3150 while (N > batch_size) { 3063 void *tail, *head;3151 HardenedSLL tail, head; 3064 3152 src->PopRange(batch_size, &head, &tail); 3065 3153 central_cache[cl].InsertRange(head, tail, batch_size); 3066 3154 N -= batch_size; 3067 3155 } 3068 void *tail, *head;3156 HardenedSLL tail, head; 3069 3157 src->PopRange(N, &head, &tail); 3070 3158 central_cache[cl].InsertRange(head, tail, N); … … 3151 3239 SpinLockHolder h(&pageheap_lock); 3152 3240 if (!phinited) { 3241 uintptr_t entropy = HARDENING_ENTROPY; 3153 3242 #ifdef WTF_CHANGES 3154 3243 InitTSD(); 3155 3244 #endif 3156 3245 InitSizeClasses(); 3157 threadheap_allocator.Init( );3158 span_allocator.Init( );3246 threadheap_allocator.Init(entropy); 3247 span_allocator.Init(entropy); 3159 3248 span_allocator.New(); // Reduce cache conflicts 3160 3249 span_allocator.New(); // Reduce cache conflicts 3161 stacktrace_allocator.Init( );3162 DLL_Init(&sampled_objects );3250 stacktrace_allocator.Init(entropy); 3251 DLL_Init(&sampled_objects, entropy); 3163 3252 for (size_t i = 0; i < kNumClasses; ++i) { 3164 central_cache[i].Init(i );3253 central_cache[i].Init(i, entropy); 3165 3254 } 3166 3255 pageheap->init(); … … 3172 3261 } 3173 3262 3174 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid ) {3263 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid, uintptr_t entropy) { 3175 3264 // Create the heap and add it to the linked list 3176 3265 TCMalloc_ThreadCache *heap = threadheap_allocator.New(); 3177 heap->Init(tid );3266 heap->Init(tid, entropy); 3178 3267 heap->next_ = thread_heaps; 3179 3268 heap->prev_ = NULL; … … 3289 3378 } 3290 3379 3291 if (heap == NULL) heap = NewHeap(me );3380 if (heap == NULL) heap = NewHeap(me, HARDENING_ENTROPY); 3292 3381 } 3293 3382 … … 3791 3880 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent(); 3792 3881 if (heap != NULL) { 3793 heap->Deallocate( ptr, cl);3882 heap->Deallocate(HardenedSLL::create(ptr), cl); 3794 3883 } else { 3795 3884 // Delete directly into central cache 3796 SLL_SetNext( ptr, NULL);3797 central_cache[cl].InsertRange( ptr, ptr, 1);3885 SLL_SetNext(HardenedSLL::create(ptr), HardenedSLL::null(), central_cache[cl].entropy()); 3886 central_cache[cl].InsertRange(HardenedSLL::create(ptr), HardenedSLL::create(ptr), 1); 3798 3887 } 3799 3888 } else { … … 4438 4527 return 0; 4439 4528 4440 for ( void* free = span->objects; free != NULL; free = *((void**) free)) {4441 if (ptr == free )4529 for (HardenedSLL free = span->objects; free; free = SLL_Next(free, HARDENING_ENTROPY)) { 4530 if (ptr == free.value()) 4442 4531 return 0; 4443 4532 } … … 4451 4540 4452 4541 #if OS(DARWIN) 4542 4543 template <typename T> 4544 T* RemoteMemoryReader::nextEntryInHardenedLinkedList(T** remoteAddress, uintptr_t entropy) const 4545 { 4546 T** localAddress = (*this)(remoteAddress); 4547 if (!localAddress) 4548 return 0; 4549 T* hardenedNext = *localAddress; 4550 if (!hardenedNext || hardenedNext == (void*)entropy) 4551 return 0; 4552 return XOR_MASK_PTR_WITH_KEY(hardenedNext, remoteAddress, entropy); 4553 } 4453 4554 4454 4555 class FreeObjectFinder { … … 4480 4581 const RemoteMemoryReader& m_reader; 4481 4582 FreeObjectFinder& m_freeObjectFinder; 4583 uintptr_t m_entropy; 4482 4584 4483 4585 public: 4484 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder )4586 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder, uintptr_t entropy) 4485 4587 : m_reader(reader) 4486 4588 , m_freeObjectFinder(freeObjectFinder) 4487 { } 4589 , m_entropy(entropy) 4590 { 4591 #if ENABLE(TCMALLOC_HARDENING) 4592 ASSERT(m_entropy); 4593 #endif 4594 } 4488 4595 4489 4596 int visit(void* ptr) const … … 4501 4608 } else if (span->sizeclass) { 4502 4609 // Walk the free list of the small-object span, keeping track of each object seen 4503 for ( void* nextObject = span->objects; nextObject; nextObject = m_reader.nextEntryInLinkedList(reinterpret_cast<void**>(nextObject)))4504 m_freeObjectFinder.visit(nextObject );4610 for (HardenedSLL nextObject = span->objects; nextObject; nextObject.setValue(m_reader.nextEntryInHardenedLinkedList(reinterpret_cast<void**>(nextObject.value()), m_entropy))) 4611 m_freeObjectFinder.visit(nextObject.value()); 4505 4612 } 4506 4613 return span->length; … … 4536 4643 void recordPendingRegions() 4537 4644 { 4538 Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1]; 4539 vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 }; 4540 ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize); 4541 4542 // Mark the memory region the spans represent as a candidate for containing pointers 4543 if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE) 4544 (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1); 4545 4546 if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) { 4645 if (!(m_typeMask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE))) { 4547 4646 m_coalescedSpans.clear(); 4548 4647 return; … … 4574 4673 } 4575 4674 4576 (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());4675 (*m_recorder)(m_task, m_context, m_typeMask & (MALLOC_PTR_IN_USE_RANGE_TYPE | MALLOC_PTR_REGION_RANGE_TYPE), allocatedPointers.data(), allocatedPointers.size()); 4577 4676 4578 4677 m_coalescedSpans.clear(); … … 4675 4774 4676 4775 TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_; 4677 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder );4776 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder, pageHeap->entropy_); 4678 4777 pageMap->visitValues(pageMapFinder, memoryReader); 4679 4778 -
trunk/Source/WTF/wtf/MallocZoneSupport.h
r111778 r142536 60 60 61 61 template <typename T> 62 T* nextEntryInLinkedList(T** address) const 63 { 64 T** output = (*this)(address); 65 if (!output) 66 return 0; 67 return *output; 68 } 62 T* nextEntryInHardenedLinkedList(T** address, uintptr_t entropy) const; 69 63 }; 70 64
Note: See TracChangeset
for help on using the changeset viewer.