Changeset 240175 in webkit
- Timestamp:
- Jan 18, 2019 2:48:22 PM (5 years ago)
- Location:
- trunk/Source
- Files:
-
- 11 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/Source/JavaScriptCore/ChangeLog
r240171 r240175 1 2019-01-18 Keith Miller <keith_miller@apple.com> 2 3 Gigacages should start allocations from a slide 4 https://bugs.webkit.org/show_bug.cgi?id=193523 5 6 Reviewed by Mark Lam. 7 8 This patch changes some macros into constants since macros are the 9 devil. 10 11 * ftl/FTLLowerDFGToB3.cpp: 12 (JSC::FTL::DFG::LowerDFGToB3::caged): 13 * llint/LowLevelInterpreter64.asm: 14 1 15 2019-01-18 Matt Lewis <jlewis3@apple.com> 2 16 -
trunk/Source/JavaScriptCore/ftl/FTLLowerDFGToB3.cpp
r240114 r240175 13853 13853 LValue caged(Gigacage::Kind kind, LValue ptr) 13854 13854 { 13855 #if GIGACAGE_ENABLED 13855 13856 if (!Gigacage::isEnabled(kind)) 13856 13857 return ptr; … … 13881 13882 // https://bugs.webkit.org/show_bug.cgi?id=175493 13882 13883 return m_out.opaque(result); 13884 #else 13885 return ptr; 13886 #endif 13883 13887 } 13884 13888 -
trunk/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
r240171 r240175 1316 1316 btiz t0, IsArray, .opGetByIdSlow 1317 1317 btiz t0, IndexingShapeMask, .opGetByIdSlow 1318 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t3], t0, t1)1318 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t3], t0, t1) 1319 1319 loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 1320 1320 bilt t0, 0, .opGetByIdSlow … … 1439 1439 sxi2q t1, t1 1440 1440 1441 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t0], t3, tagTypeNumber)1441 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t0], t3, tagTypeNumber) 1442 1442 move TagTypeNumber, tagTypeNumber 1443 1443 … … 1505 1505 1506 1506 # We have Int8ArrayType. 1507 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1507 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1508 1508 loadbs [t3, t1], t0 1509 1509 finishIntGetByVal(t0, t1) … … 1513 1513 1514 1514 # We have Uint8ArrayType. 1515 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1515 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1516 1516 loadb [t3, t1], t0 1517 1517 finishIntGetByVal(t0, t1) … … 1519 1519 .opGetByValUint8ClampedArray: 1520 1520 # We have Uint8ClampedArrayType. 1521 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1521 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1522 1522 loadb [t3, t1], t0 1523 1523 finishIntGetByVal(t0, t1) … … 1528 1528 1529 1529 # We have Int16ArrayType. 1530 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1530 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1531 1531 loadhs [t3, t1, 2], t0 1532 1532 finishIntGetByVal(t0, t1) … … 1534 1534 .opGetByValUint16Array: 1535 1535 # We have Uint16ArrayType. 1536 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1536 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1537 1537 loadh [t3, t1, 2], t0 1538 1538 finishIntGetByVal(t0, t1) … … 1546 1546 1547 1547 # We have Int32ArrayType. 1548 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1548 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1549 1549 loadi [t3, t1, 4], t0 1550 1550 finishIntGetByVal(t0, t1) … … 1552 1552 .opGetByValUint32Array: 1553 1553 # We have Uint32ArrayType. 1554 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1554 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1555 1555 # This is the hardest part because of large unsigned values. 1556 1556 loadi [t3, t1, 4], t0 … … 1564 1564 1565 1565 # We have Float64ArrayType. 1566 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr PRIMITIVE_GIGACAGE_MASK, JSArrayBufferView::m_vector[t0], t3, t2)1566 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::primitive, constexpr Gigacage::primitiveGigacageMask, JSArrayBufferView::m_vector[t0], t3, t2) 1567 1567 loadd [t3, t1, 8], ft0 1568 1568 bdnequn ft0, ft0, .opGetByValSlow … … 1600 1600 loadConstantOrVariableInt32(size, t0, t3, .opPutByValSlow) 1601 1601 sxi2q t3, t3 1602 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr JSVALUE_GIGACAGE_MASK, JSObject::m_butterfly[t1], t0, tagTypeNumber)1602 loadCaged(_g_gigacageBasePtrs + Gigacage::BasePtrs::jsValue, constexpr Gigacage::jsValueGigacageMask, JSObject::m_butterfly[t1], t0, tagTypeNumber) 1603 1603 move TagTypeNumber, tagTypeNumber 1604 1604 btinz t2, CopyOnWrite, .opPutByValSlow -
trunk/Source/WTF/ChangeLog
r240171 r240175 1 2019-01-18 Keith Miller <keith_miller@apple.com> 2 3 Gigacages should start allocations from a slide 4 https://bugs.webkit.org/show_bug.cgi?id=193523 5 6 Reviewed by Mark Lam. 7 8 This patch changes some macros into constants since macros are the 9 devil. 10 11 * wtf/Gigacage.cpp: 12 * wtf/Gigacage.h: 13 1 14 2019-01-18 Matt Lewis <jlewis3@apple.com> 2 15 -
trunk/Source/WTF/wtf/Gigacage.cpp
r240171 r240175 33 33 #if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC 34 34 35 alignas(void*) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE]; 35 namespace Gigacage { 36 36 37 namespace Gigacage { 37 alignas(void*) char g_gigacageBasePtrs[gigacageBasePtrsSize]; 38 38 39 39 void* tryMalloc(Kind, size_t size) … … 62 62 63 63 } // namespace Gigacage 64 #else 64 #else // defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC 65 65 #include <bmalloc/bmalloc.h> 66 66 -
trunk/Source/WTF/wtf/Gigacage.h
r240171 r240175 27 27 28 28 #include <wtf/FastMalloc.h> 29 #include <wtf/StdLibExtras.h> 29 30 30 31 #if defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC 31 32 #define GIGACAGE_ENABLED 0 32 #define PRIMITIVE_GIGACAGE_MASK 033 #define JSVALUE_GIGACAGE_MASK 034 #define GIGACAGE_BASE_PTRS_SIZE 819235 36 extern "C" {37 alignas(void*) extern WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];38 }39 33 40 34 namespace Gigacage { 35 36 const size_t primitiveGigacageMask = 0; 37 const size_t jsValueGigacageMask = 0; 38 const size_t gigacageBasePtrsSize = 8 * KB; 39 40 extern "C" alignas(void*) WTF_EXPORT_PRIVATE char g_gigacageBasePtrs[gigacageBasePtrsSize]; 41 41 42 42 struct BasePtrs { -
trunk/Source/bmalloc/ChangeLog
r240171 r240175 1 2019-01-18 Keith Miller <keith_miller@apple.com> 2 3 Gigacages should start allocations from a slide 4 https://bugs.webkit.org/show_bug.cgi?id=193523 5 6 Reviewed by Mark Lam. 7 8 This patch makes it so that Gigacage Heaps slide the start of the 9 cage by some random amount. We still ensure that there is always 10 at least 4/2GB, on MacOS/iOS respectively, of VA space available 11 for allocation. 12 13 Also, this patch changes some macros into constants since macros 14 are the devil. 15 16 * bmalloc/Gigacage.cpp: 17 (Gigacage::bmalloc::protectGigacageBasePtrs): 18 (Gigacage::bmalloc::unprotectGigacageBasePtrs): 19 (Gigacage::bmalloc::runwaySize): 20 (Gigacage::ensureGigacage): 21 (Gigacage::shouldBeEnabled): 22 * bmalloc/Gigacage.h: 23 (Gigacage::name): 24 (Gigacage::gigacageSizeToMask): 25 (Gigacage::size): 26 (Gigacage::mask): 27 (Gigacage::basePtr): 28 (Gigacage::ensureGigacage): 29 (Gigacage::wasEnabled): 30 (Gigacage::isCaged): 31 (Gigacage::isEnabled): 32 (Gigacage::caged): 33 (Gigacage::disableDisablingPrimitiveGigacageIfShouldBeEnabled): 34 (Gigacage::canPrimitiveGigacageBeDisabled): 35 (Gigacage::disablePrimitiveGigacage): 36 (Gigacage::addPrimitiveDisableCallback): 37 (Gigacage::removePrimitiveDisableCallback): 38 * bmalloc/Heap.cpp: 39 (bmalloc::Heap::Heap): 40 * bmalloc/Sizes.h: 41 (bmalloc::Sizes::maskSizeClass): 42 (bmalloc::Sizes::maskObjectSize): 43 (bmalloc::Sizes::logSizeClass): 44 (bmalloc::Sizes::logObjectSize): 45 (bmalloc::Sizes::sizeClass): 46 (bmalloc::Sizes::objectSize): 47 (bmalloc::Sizes::pageSize): 48 1 49 2019-01-18 Matt Lewis <jlewis3@apple.com> 2 50 -
trunk/Source/bmalloc/bmalloc/Gigacage.cpp
r240171 r240175 36 36 #include <mutex> 37 37 38 #if GIGACAGE_ENABLED 39 40 namespace Gigacage { 41 38 42 // This is exactly 32GB because inside JSC, indexed accesses for arrays, typed arrays, etc, 39 43 // use unsigned 32-bit ints as indices. The items those indices access are 8 bytes or less … … 41 45 // bounds, the access is guaranteed to land somewhere else in the cage or inside the runway. 42 46 // If this were less than 32GB, those OOB accesses could reach outside of the cage. 43 #define GIGACAGE_RUNWAY (32llu * 1024 * 1024 * 1024) 47 constexpr size_t gigacageRunway = 32llu * 1024 * 1024 * 1024; 44 48 45 49 // Note: g_gigacageBasePtrs[0] is reserved for storing the wasEnabled flag. … … 47 51 // This is done so that the wasEnabled flag will also be protected along with the 48 52 // gigacageBasePtrs. 49 alignas( GIGACAGE_BASE_PTRS_SIZE) char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];53 alignas(gigacageBasePtrsSize) char g_gigacageBasePtrs[gigacageBasePtrsSize]; 50 54 51 55 using namespace bmalloc; 52 53 namespace Gigacage {54 56 55 57 namespace { … … 62 64 // We might only get page size alignment, but that's also the minimum we need. 63 65 RELEASE_BASSERT(!(basePtrs & (vmPageSize() - 1))); 64 mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ);66 mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ); 65 67 } 66 68 67 69 void unprotectGigacageBasePtrs() 68 70 { 69 mprotect(g_gigacageBasePtrs, GIGACAGE_BASE_PTRS_SIZE, PROT_READ | PROT_WRITE);71 mprotect(g_gigacageBasePtrs, gigacageBasePtrsSize, PROT_READ | PROT_WRITE); 70 72 } 71 73 … … 102 104 }; 103 105 104 #if GIGACAGE_ENABLED105 106 size_t runwaySize(Kind kind) 106 107 { … … 109 110 RELEASE_BASSERT_NOT_REACHED(); 110 111 case Kind::Primitive: 111 return static_cast<size_t>(GIGACAGE_RUNWAY);112 return gigacageRunway; 112 113 case Kind::JSValue: 113 return static_cast<size_t>(0); 114 } 115 return static_cast<size_t>(0); 116 } 117 #endif 114 return 0; 115 } 116 return 0; 117 } 118 118 119 119 } // anonymous namespace … … 121 121 void ensureGigacage() 122 122 { 123 #if GIGACAGE_ENABLED124 123 static std::once_flag onceFlag; 125 124 std::call_once( … … 190 189 protectGigacageBasePtrs(); 191 190 }); 192 #endif // GIGACAGE_ENABLED193 191 } 194 192 … … 266 264 { 267 265 static bool cached = false; 268 269 #if GIGACAGE_ENABLED270 266 static std::once_flag onceFlag; 271 267 std::call_once( … … 289 285 cached = true; 290 286 }); 287 return cached; 288 } 289 290 } // namespace Gigacage 291 291 292 #endif // GIGACAGE_ENABLED 292 293 return cached; 294 } 295 296 } // namespace Gigacage 297 298 299 293 294 -
trunk/Source/bmalloc/bmalloc/Gigacage.h
r240171 r240175 31 31 #include "BInline.h" 32 32 #include "BPlatform.h" 33 #include "Sizes.h" 33 34 #include <cstddef> 34 35 #include <inttypes.h> 35 36 36 #if BCPU(ARM64)37 #define PRIMITIVE_GIGACAGE_SIZE 0x80000000llu38 #define JSVALUE_GIGACAGE_SIZE 0x40000000llu39 #define GIGACAGE_ALLOCATION_CAN_FAIL 140 #else41 #define PRIMITIVE_GIGACAGE_SIZE 0x800000000llu42 #define JSVALUE_GIGACAGE_SIZE 0x400000000llu43 #define GIGACAGE_ALLOCATION_CAN_FAIL 044 #endif45 46 // In Linux, if `vm.overcommit_memory = 2` is specified, mmap with large size can fail if it exceeds the size of RAM.47 // So we specify GIGACAGE_ALLOCATION_CAN_FAIL = 1.48 #if BOS(LINUX)49 #undef GIGACAGE_ALLOCATION_CAN_FAIL50 #define GIGACAGE_ALLOCATION_CAN_FAIL 151 #endif52 53 static_assert(bmalloc::isPowerOfTwo(PRIMITIVE_GIGACAGE_SIZE), "");54 static_assert(bmalloc::isPowerOfTwo(JSVALUE_GIGACAGE_SIZE), "");55 56 #define GIGACAGE_SIZE_TO_MASK(size) ((size) - 1)57 58 #define PRIMITIVE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(PRIMITIVE_GIGACAGE_SIZE)59 #define JSVALUE_GIGACAGE_MASK GIGACAGE_SIZE_TO_MASK(JSVALUE_GIGACAGE_SIZE)60 61 37 #if ((BOS(DARWIN) || BOS(LINUX)) && \ 62 38 (BCPU(X86_64) || (BCPU(ARM64) && !defined(__ILP32__) && (!BPLATFORM(IOS_FAMILY) || BPLATFORM(IOS))))) 63 39 #define GIGACAGE_ENABLED 1 64 40 #else … … 66 42 #endif 67 43 68 #if BCPU(ARM64)69 #define GIGACAGE_BASE_PTRS_SIZE 1638470 #else71 #define GIGACAGE_BASE_PTRS_SIZE 409672 #endif73 74 extern "C" alignas(GIGACAGE_BASE_PTRS_SIZE) BEXPORT char g_gigacageBasePtrs[GIGACAGE_BASE_PTRS_SIZE];75 44 76 45 namespace Gigacage { 77 78 BINLINE bool wasEnabled() { return g_gigacageBasePtrs[0]; }79 BINLINE void setWasEnabled() { g_gigacageBasePtrs[0] = true; }80 81 struct BasePtrs {82 uintptr_t reservedForFlags;83 void* primitive;84 void* jsValue;85 };86 46 87 47 enum Kind { … … 90 50 JSValue, 91 51 }; 92 93 static_assert(offsetof(BasePtrs, primitive) == Kind::Primitive * sizeof(void*), "");94 static_assert(offsetof(BasePtrs, jsValue) == Kind::JSValue * sizeof(void*), "");95 96 static constexpr unsigned numKinds = 2;97 98 BEXPORT void ensureGigacage();99 100 BEXPORT void disablePrimitiveGigacage();101 102 // This will call the disable callback immediately if the Primitive Gigacage is currently disabled.103 BEXPORT void addPrimitiveDisableCallback(void (*)(void*), void*);104 BEXPORT void removePrimitiveDisableCallback(void (*)(void*), void*);105 106 BEXPORT void disableDisablingPrimitiveGigacageIfShouldBeEnabled();107 108 BEXPORT bool isDisablingPrimitiveGigacageDisabled();109 inline bool isPrimitiveGigacagePermanentlyEnabled() { return isDisablingPrimitiveGigacageDisabled(); }110 inline bool canPrimitiveGigacageBeDisabled() { return !isDisablingPrimitiveGigacageDisabled(); }111 52 112 53 BINLINE const char* name(Kind kind) … … 124 65 } 125 66 67 #if GIGACAGE_ENABLED 68 69 #if BCPU(ARM64) 70 constexpr size_t primitiveGigacageSize = 2 * bmalloc::Sizes::GB; 71 constexpr size_t jsValueGigacageSize = 1 * bmalloc::Sizes::GB; 72 constexpr size_t gigacageBasePtrsSize = 16 * bmalloc::Sizes::kB; 73 constexpr size_t minimumCageSizeAfterSlide = bmalloc::Sizes::GB / 2; 74 #define GIGACAGE_ALLOCATION_CAN_FAIL 1 75 #else 76 constexpr size_t primitiveGigacageSize = 32 * bmalloc::Sizes::GB; 77 constexpr size_t jsValueGigacageSize = 16 * bmalloc::Sizes::GB; 78 constexpr size_t gigacageBasePtrsSize = 4 * bmalloc::Sizes::kB; 79 constexpr size_t minimumCageSizeAfterSlide = 4 * bmalloc::Sizes::GB; 80 #define GIGACAGE_ALLOCATION_CAN_FAIL 0 81 #endif 82 83 // In Linux, if `vm.overcommit_memory = 2` is specified, mmap with large size can fail if it exceeds the size of RAM. 84 // So we specify GIGACAGE_ALLOCATION_CAN_FAIL = 1. 85 #if BOS(LINUX) 86 #undef GIGACAGE_ALLOCATION_CAN_FAIL 87 #define GIGACAGE_ALLOCATION_CAN_FAIL 1 88 #endif 89 90 91 static_assert(bmalloc::isPowerOfTwo(primitiveGigacageSize), ""); 92 static_assert(bmalloc::isPowerOfTwo(jsValueGigacageSize), ""); 93 static_assert(primitiveGigacageSize > minimumCageSizeAfterSlide, ""); 94 static_assert(jsValueGigacageSize > minimumCageSizeAfterSlide, ""); 95 96 constexpr size_t gigacageSizeToMask(size_t size) { return size - 1; } 97 98 constexpr size_t primitiveGigacageMask = gigacageSizeToMask(primitiveGigacageSize); 99 constexpr size_t jsValueGigacageMask = gigacageSizeToMask(jsValueGigacageSize); 100 101 extern "C" alignas(gigacageBasePtrsSize) BEXPORT char g_gigacageBasePtrs[gigacageBasePtrsSize]; 102 103 BINLINE bool wasEnabled() { return g_gigacageBasePtrs[0]; } 104 BINLINE void setWasEnabled() { g_gigacageBasePtrs[0] = true; } 105 106 struct BasePtrs { 107 uintptr_t reservedForFlags; 108 void* primitive; 109 void* jsValue; 110 }; 111 112 static_assert(offsetof(BasePtrs, primitive) == Kind::Primitive * sizeof(void*), ""); 113 static_assert(offsetof(BasePtrs, jsValue) == Kind::JSValue * sizeof(void*), ""); 114 115 constexpr unsigned numKinds = 2; 116 117 BEXPORT void ensureGigacage(); 118 119 BEXPORT void disablePrimitiveGigacage(); 120 121 // This will call the disable callback immediately if the Primitive Gigacage is currently disabled. 122 BEXPORT void addPrimitiveDisableCallback(void (*)(void*), void*); 123 BEXPORT void removePrimitiveDisableCallback(void (*)(void*), void*); 124 125 BEXPORT void disableDisablingPrimitiveGigacageIfShouldBeEnabled(); 126 127 BEXPORT bool isDisablingPrimitiveGigacageDisabled(); 128 inline bool isPrimitiveGigacagePermanentlyEnabled() { return isDisablingPrimitiveGigacageDisabled(); } 129 inline bool canPrimitiveGigacageBeDisabled() { return !isDisablingPrimitiveGigacageDisabled(); } 130 126 131 BINLINE void*& basePtr(BasePtrs& basePtrs, Kind kind) 127 132 { … … 159 164 RELEASE_BASSERT_NOT_REACHED(); 160 165 case Primitive: 161 return static_cast<size_t>( PRIMITIVE_GIGACAGE_SIZE);166 return static_cast<size_t>(primitiveGigacageSize); 162 167 case JSValue: 163 return static_cast<size_t>( JSVALUE_GIGACAGE_SIZE);168 return static_cast<size_t>(jsValueGigacageSize); 164 169 } 165 170 BCRASH(); … … 174 179 BINLINE size_t mask(Kind kind) 175 180 { 176 return GIGACAGE_SIZE_TO_MASK(size(kind));181 return gigacageSizeToMask(size(kind)); 177 182 } 178 183 … … 203 208 BEXPORT bool shouldBeEnabled(); 204 209 210 #else // GIGACAGE_ENABLED 211 212 BINLINE void*& basePtr(Kind) 213 { 214 BCRASH(); 215 static void* unreachable; 216 return unreachable; 217 } 218 BINLINE size_t size(Kind) { BCRASH(); return 0; } 219 BINLINE void ensureGigacage() { } 220 BINLINE bool wasEnabled() { return false; } 221 BINLINE bool isCaged(Kind, const void*) { return true; } 222 BINLINE bool isEnabled() { return false; } 223 template<typename T> BINLINE T* caged(Kind, T* ptr) { return ptr; } 224 BINLINE void disableDisablingPrimitiveGigacageIfShouldBeEnabled() { } 225 BINLINE bool canPrimitiveGigacageBeDisabled() { return false; } 226 BINLINE void disablePrimitiveGigacage() { } 227 BINLINE void addPrimitiveDisableCallback(void (*)(void*), void*) { } 228 BINLINE void removePrimitiveDisableCallback(void (*)(void*), void*) { } 229 230 #endif // GIGACAGE_ENABLED 231 205 232 } // namespace Gigacage 206 233 207 234 235 -
trunk/Source/bmalloc/bmalloc/Heap.cpp
r240171 r240175 30 30 #include "BumpAllocator.h" 31 31 #include "Chunk.h" 32 #include "CryptoRandom.h" 32 33 #include "Environment.h" 33 34 #include "Gigacage.h" … … 62 63 if (usingGigacage()) { 63 64 RELEASE_BASSERT(gigacageBasePtr()); 64 m_largeFree.add(LargeRange(gigacageBasePtr(), gigacageSize(), 0, 0)); 65 uint64_t random; 66 cryptoRandom(reinterpret_cast<unsigned char*>(&random), sizeof(random)); 67 ptrdiff_t offset = random % (gigacageSize() - Gigacage::minimumCageSizeAfterSlide); 68 offset = reinterpret_cast<ptrdiff_t>(roundDownToMultipleOf(vmPageSize(), reinterpret_cast<void*>(offset))); 69 void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr()) + offset; 70 m_largeFree.add(LargeRange(base, gigacageSize() - offset, 0, 0)); 65 71 } 66 72 #endif -
trunk/Source/bmalloc/bmalloc/Sizes.h
r240171 r240175 41 41 42 42 namespace Sizes { 43 static const size_t kB = 1024; 44 static const size_t MB = kB * kB; 43 static constexpr size_t kB = 1024; 44 static constexpr size_t MB = kB * kB; 45 static constexpr size_t GB = kB * kB * kB; 45 46 46 static constsize_t alignment = 8;47 static constsize_t alignmentMask = alignment - 1ul;47 static constexpr size_t alignment = 8; 48 static constexpr size_t alignmentMask = alignment - 1ul; 48 49 49 static constsize_t chunkSize = 1 * MB;50 static constsize_t chunkMask = ~(chunkSize - 1ul);50 static constexpr size_t chunkSize = 1 * MB; 51 static constexpr size_t chunkMask = ~(chunkSize - 1ul); 51 52 52 static constsize_t smallLineSize = 256;53 static constsize_t smallPageSize = 4 * kB;54 static constsize_t smallPageLineCount = smallPageSize / smallLineSize;53 static constexpr size_t smallLineSize = 256; 54 static constexpr size_t smallPageSize = 4 * kB; 55 static constexpr size_t smallPageLineCount = smallPageSize / smallLineSize; 55 56 56 static constsize_t maskSizeClassMax = 512;57 static constsize_t smallMax = 32 * kB;57 static constexpr size_t maskSizeClassMax = 512; 58 static constexpr size_t smallMax = 32 * kB; 58 59 59 static constsize_t pageSizeMax = smallMax * 2;60 static constsize_t pageClassCount = pageSizeMax / smallPageSize;60 static constexpr size_t pageSizeMax = smallMax * 2; 61 static constexpr size_t pageClassCount = pageSizeMax / smallPageSize; 61 62 62 static constsize_t pageSizeWasteFactor = 8;63 static constsize_t logWasteFactor = 8;63 static constexpr size_t pageSizeWasteFactor = 8; 64 static constexpr size_t logWasteFactor = 8; 64 65 65 static constsize_t largeAlignment = smallMax / pageSizeWasteFactor;66 static constsize_t largeAlignmentMask = largeAlignment - 1;66 static constexpr size_t largeAlignment = smallMax / pageSizeWasteFactor; 67 static constexpr size_t largeAlignmentMask = largeAlignment - 1; 67 68 68 static const size_t deallocatorLogCapacity = 512; 69 static const size_t bumpRangeCacheCapacity = 3; 70 71 static const size_t scavengerBytesPerMemoryPressureCheck = 16 * MB; 72 static const double memoryPressureThreshold = 0.75; 73 74 static const size_t maskSizeClassCount = maskSizeClassMax / alignment; 69 static constexpr size_t deallocatorLogCapacity = 512; 70 static constexpr size_t bumpRangeCacheCapacity = 3; 75 71 76 constexpr size_t maskSizeClass(size_t size) 77 { 78 // We mask to accommodate zero. 79 return mask((size - 1) / alignment, maskSizeClassCount - 1); 80 } 72 static constexpr size_t scavengerBytesPerMemoryPressureCheck = 16 * MB; 73 static constexpr double memoryPressureThreshold = 0.75; 81 74 82 inline size_t maskObjectSize(size_t maskSizeClass) 83 { 84 return (maskSizeClass + 1) * alignment; 85 } 75 static constexpr size_t maskSizeClassCount = maskSizeClassMax / alignment; 86 76 87 static const size_t logAlignmentMin = maskSizeClassMax / logWasteFactor; 77 constexpr size_t maskSizeClass(size_t size) 78 { 79 // We mask to accommodate zero. 80 return mask((size - 1) / alignment, maskSizeClassCount - 1); 81 } 88 82 89 static const size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor; 83 inline size_t maskObjectSize(size_t maskSizeClass) 84 { 85 return (maskSizeClass + 1) * alignment; 86 } 90 87 91 inline size_t logSizeClass(size_t size) 92 { 93 size_t base = log2(size - 1) - log2(maskSizeClassMax); 94 size_t offset = (size - 1 - (maskSizeClassMax << base)); 95 return base * logWasteFactor + offset / (logAlignmentMin << base); 96 } 88 static constexpr size_t logAlignmentMin = maskSizeClassMax / logWasteFactor; 97 89 98 inline size_t logObjectSize(size_t logSizeClass) 99 { 100 size_t base = logSizeClass / logWasteFactor; 101 size_t offset = logSizeClass % logWasteFactor; 102 return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base); 103 } 90 static constexpr size_t logSizeClassCount = (log2(smallMax) - log2(maskSizeClassMax)) * logWasteFactor; 104 91 105 static const size_t sizeClassCount = maskSizeClassCount + logSizeClassCount; 92 inline size_t logSizeClass(size_t size) 93 { 94 size_t base = log2(size - 1) - log2(maskSizeClassMax); 95 size_t offset = (size - 1 - (maskSizeClassMax << base)); 96 return base * logWasteFactor + offset / (logAlignmentMin << base); 97 } 106 98 107 inline size_t sizeClass(size_t size)108 109 if (size <= maskSizeClassMax)110 return maskSizeClass(size);111 return maskSizeClassCount + logSizeClass(size);112 99 inline size_t logObjectSize(size_t logSizeClass) 100 { 101 size_t base = logSizeClass / logWasteFactor; 102 size_t offset = logSizeClass % logWasteFactor; 103 return (maskSizeClassMax << base) + (offset + 1) * (logAlignmentMin << base); 104 } 113 105 114 inline size_t objectSize(size_t sizeClass) 115 { 116 if (sizeClass < maskSizeClassCount) 117 return maskObjectSize(sizeClass); 118 return logObjectSize(sizeClass - maskSizeClassCount); 119 } 120 121 inline size_t pageSize(size_t pageClass) 122 { 123 return (pageClass + 1) * smallPageSize; 124 } 106 static constexpr size_t sizeClassCount = maskSizeClassCount + logSizeClassCount; 107 108 inline size_t sizeClass(size_t size) 109 { 110 if (size <= maskSizeClassMax) 111 return maskSizeClass(size); 112 return maskSizeClassCount + logSizeClass(size); 125 113 } 114 115 inline size_t objectSize(size_t sizeClass) 116 { 117 if (sizeClass < maskSizeClassCount) 118 return maskObjectSize(sizeClass); 119 return logObjectSize(sizeClass - maskSizeClassCount); 120 } 121 122 inline size_t pageSize(size_t pageClass) 123 { 124 return (pageClass + 1) * smallPageSize; 125 } 126 } // namespace Sizes 126 127 127 128 using namespace Sizes;
Note: See TracChangeset
for help on using the changeset viewer.