Changeset 215340 in webkit
- Timestamp:
- Apr 13, 2017 2:48:42 PM (7 years ago)
- Location:
- trunk
- Files:
-
- 3 added
- 35 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/JSTests/ChangeLog
r215312 r215340 1 2017-04-13 JF Bastien <jfbastien@apple.com> 2 3 WebAssembly: manage memory better 4 https://bugs.webkit.org/show_bug.cgi?id=170628 5 6 Reviewed by Keith Miller, Michael Saboff. 7 8 * wasm/Builder.js: move a helper out so tests can use it 9 (export.default.Builder.prototype._registerSectionBuilders.const.section.in.WASM.description.section.switch.section.case.string_appeared_here.this.section): 10 * wasm/WASM.js: add utilities to classify opcodes 11 (export.opcodes): 12 (export.const.memoryAccessInfo.op.const.sign): 13 * wasm/function-tests/memory-access-past-4gib.js: Added. This test 14 fails before this patch. 15 (const.op.of.WASM.opcodes): 16 * wasm/function-tests/memory-many.js: Added. This simple tests 17 just shouldn't crash. In verbose mode it's useful at determining 18 if the GC falls behind or not. 19 * wasm/function-tests/memory-multiagent.js: Added. Emulate postMessage. 20 (const.startAgents.numAgentsToStart.a.agent.receiveBroadcast): 21 (const.startAgents.numAgentsToStart.a.write.const.idx.Math.random): 22 (const.broadcastToAgents): 23 * wasm/js-api/extension-MemoryMode.js: verbose logging. 24 (testMemoryNoMax): 25 (testMemory): 26 (testInstanceNoMemory): 27 (testInstanceNoMax): 28 (testInstance): 29 * wasm/utilities.js: move a utility here. 30 1 31 2017-04-12 Joseph Pecoraro <pecoraro@apple.com> 2 32 -
trunk/JSTests/wasm/Builder.js
r213745 r215340 28 28 import * as LLB from 'LowLevelBinary.js'; 29 29 import * as WASM from 'WASM.js'; 30 31 const _toJavaScriptName = name => { 32 const camelCase = name.replace(/([^a-z0-9].)/g, c => c[1].toUpperCase()); 33 const CamelCase = camelCase.charAt(0).toUpperCase() + camelCase.slice(1); 34 return CamelCase; 35 }; 30 import * as util from 'utilities.js'; 36 31 37 32 const _isValidValue = (value, type) => { … … 226 221 }; 227 222 for (let op of WASM.description.value_type) { 228 globalBuilder[ _toJavaScriptName(op)] = (module, field, mutability) => {223 globalBuilder[util.toJavaScriptName(op)] = (module, field, mutability) => { 229 224 assert.isString(module, `Import global module should be a string, got "${module}"`); 230 225 assert.isString(field, `Import global field should be a string, got "${field}"`); … … 319 314 let functionBuilder = {}; 320 315 for (const op in WASM.description.opcode) { 321 const name = _toJavaScriptName(op);316 const name = util.toJavaScriptName(op); 322 317 const value = WASM.description.opcode[op].value; 323 318 const ret = WASM.description.opcode[op]["return"]; … … 543 538 }; 544 539 for (let op of WASM.description.value_type) { 545 globalBuilder[ _toJavaScriptName(op)] = (initValue, mutability) => {540 globalBuilder[util.toJavaScriptName(op)] = (initValue, mutability) => { 546 541 s.data.push({ type: op, op: op + ".const", mutability: _normalizeMutability(mutability), initValue }); 547 542 return _errorHandlingProxyFor(globalBuilder); -
trunk/JSTests/wasm/WASM.js
r209306 r215340 45 45 export const sections = Object.keys(description.section); 46 46 export const sectionEncodingType = description.section[sections[0]].type; 47 48 export function* opcodes(category = undefined) { 49 for (let op in description.opcode) 50 if (category !== undefined && description.opcode[op].category === category) 51 yield { name: op, opcode: description.opcode[op] }; 52 }; 53 export const memoryAccessInfo = op => { 54 // <-----------valueType-----------> <-------type-------><---------width--------> <--sign--> 55 const classify = /((?:i32)|(?:i64)|(?:f32)|(?:f64))\.((?:load)|(?:store))((?:8)?|(?:16)?|(?:32)?)_?((?:s|u)?)/; 56 const found = op.name.match(classify); 57 const valueType = found[1]; 58 const type = found[2]; 59 const width = parseInt(found[3] ? found[3] : valueType.slice(1)); 60 const sign = (() => { 61 switch (found[4]) { 62 case "s": return "signed"; 63 case "u": return "unsigned"; 64 default: return "agnostic"; 65 } 66 })(); 67 return { valueType, type, width, sign }; 68 }; 69 70 export const constForValueType = valueType => { 71 for (let op in description.opcode) 72 if (op.endsWith(".const") && description.opcode[op]["return"] == valueType) 73 return op; 74 throw new Error(`Implementation problem: no const type for ${valueType}`); 75 }; 76 -
trunk/JSTests/wasm/js-api/extension-MemoryMode.js
r214547 r215340 3 3 4 4 const iterations = 32; 5 const verbose = false; 5 6 6 7 // This API isn't part of WebAssembly's official spec. It is use for testing within the shell. … … 22 23 const validateMode = what => { 23 24 const mode = WebAssemblyMemoryMode(what); 25 if (verbose) 26 print(` ${mode}`); 24 27 switch (mode) { 25 28 case "Signaling": … … 40 43 41 44 (function testMemoryNoMax() { 45 if (verbose) 46 print(`testMemoryNoMax`); 42 47 let memories = []; 43 48 for (let i = 0; i != iterations; ++i) … … 49 54 50 55 (function testMemory() { 56 if (verbose) 57 print(`testMemory`); 51 58 let memories = []; 52 59 for (let i = 0; i != iterations; ++i) … … 58 65 59 66 (function testInstanceNoMemory() { 67 if (verbose) 68 print(`testInstanceNoMemory`); 60 69 let instances = []; 61 70 for (let i = 0; i != iterations; ++i) { … … 67 76 // No-memory instances should never be Signaling: it would be wasteful. 68 77 assert.eq(WebAssemblyMemoryMode(instance), "BoundsChecking"); 78 if (verbose) 79 print(` ${WebAssemblyMemoryMode(instance)}`); 69 80 instances.push(instance); 70 81 } … … 75 86 76 87 (function testInstanceNoMax() { 88 if (verbose) 89 print(`testInstanceNoMax`); 77 90 let instances = []; 78 91 for (let i = 0; i != iterations; ++i) { … … 90 103 91 104 (function testInstance() { 105 if (verbose) 106 print(`testInstance`); 92 107 let instances = []; 93 108 for (let i = 0; i != iterations; ++i) { -
trunk/JSTests/wasm/utilities.js
r209123 r215340 80 80 }; 81 81 82 export const toJavaScriptName = name => { 83 const camelCase = name.replace(/([^a-z0-9].)/g, c => c[1].toUpperCase()); 84 const CamelCase = camelCase.charAt(0).toUpperCase() + camelCase.slice(1); 85 return CamelCase; 86 }; 87 82 88 // Use underscore names to avoid clashing with builtin names. 83 89 export { -
trunk/Source/JavaScriptCore/ChangeLog
r215318 r215340 1 2017-04-13 JF Bastien <jfbastien@apple.com> 2 3 WebAssembly: manage memory better 4 https://bugs.webkit.org/show_bug.cgi?id=170628 5 6 Reviewed by Keith Miller, Michael Saboff. 7 8 WebAssembly fast memories weren't managed very well. This patch 9 refactors it and puts us in a good position to further improve our 10 fast memory handling in the future. 11 12 We now cache fast memories at a process granularity, but make sure 13 that they don't consume dirty pages. We add a cap to the total 14 number of allocated fast memories to avoid ASLR degradation. 15 16 We teach the GC about memories as a kind of resource it should 17 care about because it didn't have visibility into the amount of 18 memory each represented. This allows benchmarks which allocate 19 memories back-to-back to reliably get fast memories 100% of the 20 time, even on a system under load, which wasn't the case 21 before. This reliability yields roughly 8% perf bump on x86-64 22 WasmBench. 23 24 The GC heuristic is as follows: each time we allocate a fast 25 memory we notify the GC, which then keeps track of the total 26 number of fast memories allocated since it last GC'd. We 27 separately keep track of the total number of fast memories which 28 have ever existed at any point in time (cached + allocated). This 29 is a monotonically-increasing high watermark. The GC will force a 30 full collection if, since it last ran, half or more of the high 31 watermark of fast memories was allocated. 32 33 At the same time, if we fail obtaining a fast memory from the 34 cache we do a GC to try to find one. If that fails we'll allocate 35 a new one (this can also fail, then we go to slow memory). This 36 can also be improved, but it's a good start. 37 38 This currently disables fast memories on iOS because getting fast 39 memories isn't a guaranteed thing. Rather, we get quite a few of 40 them and achieve significant speedups, but benchmarks which 41 allocate memories back-to-back end up falling behind because the 42 GC can conservatively hold onto memories, which then yields a perf 43 cliff. That cliff isn't reliable, WasmBench gets roughly 10 of 18 44 fast memories when in theory it should get all of them fast (as 45 MacOS does). The patch significantly improves the state of iOS 46 though, and in a follow-up we could re-enable fast memories. 47 48 Part of this good positioning is a facility to pre-allocate fast 49 memories very early at startup, before any fragmentation 50 occurs. This is currently disabled but worked extremely reliably 51 on iOS. Once we fix the above issues we'll want to re-visit and 52 turn on pre-allocation. 53 54 We also avoid locking for fast memory identification when 55 performing signal handling. I'm very nervous about acquiring locks 56 in a signal handler because in general signals can happen when 57 we've messed up. This isn't the case with fast memories: we're 58 raising a signal on purpose and handling it. However this doesn't 59 mean we won't mess up elsewhere! This will get more complicated 60 once we add support for multiple threads sharing memories and 61 being able to grow their memories. One example: the code calls 62 CRASH(), which executes the following code in release: 63 64 *(int *)(uintptr_t)0xbbadbeef = 0; 65 66 This is a segfault, which our fast memory signal handler tries to 67 handle. It does so by first figuring out whether 0xbbadbeef is in 68 a fast memory region, reqiring a lock. If we CRASH() while holding 69 the lock then our thread self-deadlocks, giving us no crash report 70 and a bad user experience. 71 72 Avoiding a lock therefore it's not about speed or reduced 73 contention. In fact, I'd use something else than a FIFO if these 74 were a concern. We're also doing syscalls, which dwarf any locking 75 cost. 76 77 We now only allocate 4GiB + redzone of 64k * 128 for fast memories 78 instead of 8GiB. This patch reuses the logic from 79 B3::WasmBoundsCheck to perform bounds checks when accesses could 80 exceed the redzone. We'll therefore benefit from CSE goodness when 81 it reaches WasmBoundsCheck. See bug #163469. 82 83 * b3/B3LowerToAir.cpp: fix a baaaaddd bug where unsigned->signed 84 conversion allowed out-of-bounds reads by -2GiB. I'll follow-up in 85 bug #170692 to prevent this type of bug once and for all. 86 (JSC::B3::Air::LowerToAir::lower): 87 * b3/B3Validate.cpp: update WasmBoundsCheck validation. 88 * b3/B3Value.cpp: 89 (JSC::B3::Value::effects): update WasmBoundsCheck effects. 90 * b3/B3WasmBoundsCheckValue.cpp: 91 (JSC::B3::WasmBoundsCheckValue::WasmBoundsCheckValue): 92 (JSC::B3::WasmBoundsCheckValue::redzoneLimit): 93 (JSC::B3::WasmBoundsCheckValue::dumpMeta): 94 * b3/B3WasmBoundsCheckValue.h: 95 (JSC::B3::WasmBoundsCheckValue::maximum): 96 * b3/air/AirCustom.cpp: 97 (JSC::B3::Air::WasmBoundsCheckCustom::isValidForm): 98 * b3/testb3.cpp: 99 (JSC::B3::testWasmBoundsCheck): 100 * heap/Heap.cpp: 101 (JSC::Heap::Heap): 102 (JSC::Heap::reportWebAssemblyFastMemoriesAllocated): 103 (JSC::Heap::webAssemblyFastMemoriesThisCycleAtThreshold): 104 (JSC::Heap::updateAllocationLimits): 105 (JSC::Heap::didAllocateWebAssemblyFastMemories): 106 (JSC::Heap::shouldDoFullCollection): 107 (JSC::Heap::collectIfNecessaryOrDefer): 108 * heap/Heap.h: 109 * runtime/InitializeThreading.cpp: 110 (JSC::initializeThreading): 111 * runtime/Options.cpp: 112 * runtime/Options.h: 113 * wasm/WasmB3IRGenerator.cpp: 114 (JSC::Wasm::B3IRGenerator::fixupPointerPlusOffset): 115 (JSC::Wasm::B3IRGenerator::B3IRGenerator): 116 (JSC::Wasm::B3IRGenerator::emitCheckAndPreparePointer): 117 (JSC::Wasm::B3IRGenerator::emitLoadOp): 118 (JSC::Wasm::B3IRGenerator::emitStoreOp): 119 (JSC::Wasm::createJSToWasmWrapper): 120 * wasm/WasmFaultSignalHandler.cpp: 121 (JSC::Wasm::trapHandler): 122 * wasm/WasmMemory.cpp: Rewrite. 123 (JSC::Wasm::makeString): 124 (JSC::Wasm::Memory::initializePreallocations): 125 (JSC::Wasm::Memory::createImpl): 126 (JSC::Wasm::Memory::create): 127 (JSC::Wasm::Memory::~Memory): 128 (JSC::Wasm::Memory::fastMappedRedzoneBytes): 129 (JSC::Wasm::Memory::fastMappedBytes): 130 (JSC::Wasm::Memory::maxFastMemoryCount): 131 (JSC::Wasm::Memory::addressIsInActiveFastMemory): 132 (JSC::Wasm::Memory::grow): 133 * wasm/WasmMemory.h: 134 (Memory::maxFastMemoryCount): 135 (Memory::addressIsInActiveFastMemory): 136 * wasm/js/JSWebAssemblyInstance.cpp: 137 (JSC::JSWebAssemblyInstance::finishCreation): 138 (JSC::JSWebAssemblyInstance::visitChildren): 139 (JSC::JSWebAssemblyInstance::globalMemoryByteSize): 140 * wasm/js/JSWebAssemblyInstance.h: 141 * wasm/js/JSWebAssemblyMemory.cpp: 142 (JSC::JSWebAssemblyMemory::grow): 143 (JSC::JSWebAssemblyMemory::finishCreation): 144 (JSC::JSWebAssemblyMemory::visitChildren): 145 1 146 2017-04-13 Yusuke Suzuki <utatane.tea@gmail.com> 2 147 -
trunk/Source/JavaScriptCore/b3/B3LowerToAir.cpp
r214908 r215340 3145 3145 3146 3146 case B3::WasmBoundsCheck: { 3147 #if ENABLE(WEBASSEMBLY) 3147 3148 WasmBoundsCheckValue* value = m_value->as<WasmBoundsCheckValue>(); 3148 3149 3149 3150 Value* ptr = value->child(0); 3150 3151 3151 Arg temp= m_code.newTmp(GP);3152 append(Inst(Move32, value, tmp(ptr), temp));3152 Arg ptrPlusImm = m_code.newTmp(GP); 3153 append(Inst(Move32, value, tmp(ptr), ptrPlusImm)); 3153 3154 if (value->offset()) { 3154 3155 if (imm(value->offset())) 3155 append(Add64, imm(value->offset()), temp);3156 append(Add64, imm(value->offset()), ptrPlusImm); 3156 3157 else { 3157 3158 Arg bigImm = m_code.newTmp(GP); 3158 3159 append(Move, Arg::bigImm(value->offset()), bigImm); 3159 append(Add64, bigImm, temp); 3160 } 3161 } 3162 append(Inst(Air::WasmBoundsCheck, value, temp, Arg(value->pinnedGPR()))); 3160 append(Add64, bigImm, ptrPlusImm); 3161 } 3162 } 3163 3164 Arg limit; 3165 if (value->pinnedGPR() != InvalidGPRReg) 3166 limit = Arg(value->pinnedGPR()); 3167 else { 3168 // Signaling memories don't pin a register because only the accesses whose reg+imm could ever overflow 4GiB+redzone need to be checked, 3169 // and we don't think these will be frequent. All other accesses will trap due to PROT_NONE pages. 3170 // 3171 // If we got here it's because a memory access had a very large offset. We could check that it doesn't exceed 4GiB+redzone since that's 3172 // technically the limit we need to avoid overflowing, but it's better if we use a smaller immediate which codegens more easily. 3173 // We know that anything above the declared 'maximum' will trap, so we can compare against that number. If there was no declared 3174 // 'maximum' then we still know that any access above 4GiB will trap, no need to add the redzone. 3175 limit = m_code.newTmp(GP); 3176 size_t limitValue = value->maximum() ? value->maximum().bytes() : std::numeric_limits<uint32_t>::max(); 3177 ASSERT(limitValue <= value->redzoneLimit()); 3178 if (imm(limitValue)) 3179 append(Move, imm(limitValue), limit); 3180 else 3181 append(Move, Arg::bigImm(limitValue), limit); 3182 } 3183 append(Inst(Air::WasmBoundsCheck, value, ptrPlusImm, limit)); 3184 #else 3185 append(Air::Oops); 3186 #endif // ENABLE(WEBASSEMBLY) 3163 3187 return; 3164 3188 } -
trunk/Source/JavaScriptCore/b3/B3Validate.cpp
r213714 r215340 471 471 VALIDATE(value->numChildren() == 1, ("At ", *value)); 472 472 VALIDATE(value->child(0)->type() == Int32, ("At ", *value)); 473 VALIDATE(m_procedure.code().isPinned(value->as<WasmBoundsCheckValue>()->pinnedGPR()), ("At ", *value)); 473 if (value->as<WasmBoundsCheckValue>()->pinnedGPR() != InvalidGPRReg) 474 VALIDATE(m_procedure.code().isPinned(value->as<WasmBoundsCheckValue>()->pinnedGPR()), ("At ", *value)); 474 475 VALIDATE(m_procedure.code().wasmBoundsCheckGenerator(), ("At ", *value)); 475 476 break; -
trunk/Source/JavaScriptCore/b3/B3Value.cpp
r214529 r215340 44 44 #include "B3ValueKeyInlines.h" 45 45 #include "B3VariableValue.h" 46 #include "B3WasmBoundsCheckValue.h" 46 47 #include <wtf/CommaPrinter.h> 47 48 #include <wtf/ListDump.h> … … 665 666 break; 666 667 case WasmBoundsCheck: 667 result.readsPinned = true; 668 if (as<WasmBoundsCheckValue>()->pinnedGPR() != InvalidGPRReg) 669 result.readsPinned = true; 668 670 result.exitsSideways = true; 669 671 break; -
trunk/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.cpp
r207266 r215340 1 1 /* 2 * Copyright (C) 2016 Apple Inc. All rights reserved.2 * Copyright (C) 2016-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 26 26 #include "config.h" 27 27 #include "B3WasmBoundsCheckValue.h" 28 #include "WasmMemory.h" 28 29 29 30 #if ENABLE(B3_JIT) … … 35 36 } 36 37 37 WasmBoundsCheckValue::WasmBoundsCheckValue(Origin origin, Value* ptr, GPRReg pinnedGPR, unsigned offset )38 WasmBoundsCheckValue::WasmBoundsCheckValue(Origin origin, Value* ptr, GPRReg pinnedGPR, unsigned offset, PageCount maximum) 38 39 : Value(CheckedOpcode, WasmBoundsCheck, origin, ptr) 39 40 , m_pinnedGPR(pinnedGPR) 40 41 , m_offset(offset) 42 , m_maximum(maximum) 41 43 { 42 44 } … … 47 49 } 48 50 51 size_t WasmBoundsCheckValue::redzoneLimit() const 52 { 53 ASSERT(m_pinnedGPR == InvalidGPRReg); 54 #if ENABLE(WEBASSEMBLY) 55 return static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()) + Wasm::Memory::fastMappedRedzoneBytes(); 56 #else 57 RELEASE_ASSERT_NOT_REACHED(); 58 #endif 59 } 60 49 61 void WasmBoundsCheckValue::dumpMeta(CommaPrinter& comma, PrintStream& out) const 50 62 { 51 out.print(comma, "sizeRegister = ", m_pinnedGPR, ", offset = ", m_offset); 63 if (m_pinnedGPR == InvalidGPRReg) 64 out.print(comma, "redzoneLimit = ", redzoneLimit(), ", offset = ", m_offset); 65 else 66 out.print(comma, "sizeRegister = ", m_pinnedGPR, ", offset = ", m_offset); 52 67 } 53 68 -
trunk/Source/JavaScriptCore/b3/B3WasmBoundsCheckValue.h
r207266 r215340 1 1 /* 2 * Copyright (C) 2015-201 6Apple Inc. All rights reserved.2 * Copyright (C) 2015-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 30 30 #include "B3Value.h" 31 31 #include "CCallHelpers.h" 32 #include "WasmPageCount.h" 32 33 33 34 namespace JSC { namespace B3 { … … 47 48 ~WasmBoundsCheckValue(); 48 49 50 #if ENABLE(WEBASSEMBLY) 51 typedef Wasm::PageCount PageCount; 52 #else 53 typedef char PageCount; 54 #endif 55 49 56 GPRReg pinnedGPR() const { return m_pinnedGPR; } 50 57 unsigned offset() const { return m_offset; } 58 size_t redzoneLimit() const; 59 PageCount maximum() const { return m_maximum; } 51 60 52 61 protected: … … 58 67 friend class Procedure; 59 68 60 JS_EXPORT_PRIVATE WasmBoundsCheckValue(Origin, Value* ptr, GPRReg pinnedGPR, unsigned offset );69 JS_EXPORT_PRIVATE WasmBoundsCheckValue(Origin, Value* ptr, GPRReg pinnedGPR, unsigned offset, PageCount maximum); 61 70 62 71 GPRReg m_pinnedGPR; 63 72 unsigned m_offset; 73 PageCount m_maximum; 64 74 }; 65 75 -
trunk/Source/JavaScriptCore/b3/air/AirCustom.cpp
r212970 r215340 186 186 return false; 187 187 188 return inst.args[1].isReg() ;188 return inst.args[1].isReg() || inst.args[1].isTmp() || inst.args[1].isSomeImm(); 189 189 } 190 190 -
trunk/Source/JavaScriptCore/b3/testb3.cpp
r215292 r215340 15186 15186 if (pointerType() != Int32) 15187 15187 left = root->appendNew<Value>(proc, Trunc, Origin(), left); 15188 root->appendNew<WasmBoundsCheckValue>(proc, Origin(), left, pinned, offset); 15188 Wasm::PageCount maximum; 15189 root->appendNew<WasmBoundsCheckValue>(proc, Origin(), left, pinned, offset, maximum); 15189 15190 Value* result = root->appendNew<Const32Value>(proc, Origin(), 0x42); 15190 15191 root->appendNewControlValue(proc, Return, Origin(), result); -
trunk/Source/JavaScriptCore/heap/Heap.cpp
r215265 r215340 63 63 #include "UnlinkedCodeBlock.h" 64 64 #include "VM.h" 65 #include "WasmMemory.h" 65 66 #include "WeakSetInlines.h" 66 67 #include <algorithm> … … 251 252 , m_sizeBeforeLastEdenCollect(0) 252 253 , m_bytesAllocatedThisCycle(0) 254 , m_webAssemblyFastMemoriesAllocatedThisCycle(0) 253 255 , m_bytesAbandonedSinceLastFullCollect(0) 254 256 , m_maxEdenSize(m_minBytesPerCycle) … … 465 467 m_deprecatedExtraMemorySize = UNLIKELY(checkedNewSize.hasOverflowed()) ? std::numeric_limits<size_t>::max() : checkedNewSize.unsafeGet(); 466 468 reportExtraMemoryAllocatedSlowCase(size); 469 } 470 471 void Heap::reportWebAssemblyFastMemoriesAllocated(size_t count) 472 { 473 didAllocateWebAssemblyFastMemories(count); 474 collectIfNecessaryOrDefer(); 475 } 476 477 bool Heap::webAssemblyFastMemoriesThisCycleAtThreshold() const 478 { 479 // WebAssembly fast memories use large amounts of virtual memory and we 480 // don't know how many can exist in this process. We keep track of the most 481 // fast memories that have existed at any point in time. The GC uses this 482 // top watermark as an indication of whether recent allocations should cause 483 // a collection: get too close and we may be close to the actual limit. 484 size_t fastMemoryThreshold = std::max<size_t>(1, Wasm::Memory::maxFastMemoryCount() / 2); 485 return m_webAssemblyFastMemoriesAllocatedThisCycle > fastMemoryThreshold; 467 486 } 468 487 … … 2104 2123 dataLog("\n"); 2105 2124 dataLog("bytesAllocatedThisCycle = ", m_bytesAllocatedThisCycle, "\n"); 2125 dataLog("webAssemblyFastMemoriesAllocatedThisCycle = ", m_webAssemblyFastMemoriesAllocatedThisCycle, "\n"); 2106 2126 } 2107 2127 … … 2181 2201 dataLog("sizeAfterLastCollect = ", m_sizeAfterLastCollect, "\n"); 2182 2202 m_bytesAllocatedThisCycle = 0; 2203 m_webAssemblyFastMemoriesAllocatedThisCycle = 0; 2183 2204 2184 2205 if (Options::logGC()) … … 2254 2275 } 2255 2276 2277 void Heap::didAllocateWebAssemblyFastMemories(size_t count) 2278 { 2279 m_webAssemblyFastMemoriesAllocatedThisCycle += count; 2280 } 2281 2256 2282 bool Heap::isValidAllocation(size_t) 2257 2283 { … … 2306 2332 2307 2333 if (!scope) 2308 return m_shouldDoFullCollection ;2334 return m_shouldDoFullCollection || webAssemblyFastMemoriesThisCycleAtThreshold(); 2309 2335 return *scope == CollectionScope::Full; 2310 2336 } … … 2457 2483 return; 2458 2484 } else { 2459 if (m_bytesAllocatedThisCycle <= m_maxEdenSize) 2485 if (!webAssemblyFastMemoriesThisCycleAtThreshold() 2486 && m_bytesAllocatedThisCycle <= m_maxEdenSize) 2460 2487 return; 2461 2488 } -
trunk/Source/JavaScriptCore/heap/Heap.h
r215265 r215340 200 200 JS_EXPORT_PRIVATE void reportExtraMemoryVisited(size_t); 201 201 202 // Same as above, but for uncommitted virtual memory allocations caused by 203 // WebAssembly fast memories. This is counted separately because virtual 204 // memory is logically a different type of resource than committed physical 205 // memory. We can often allocate huge amounts of virtual memory (think 206 // gigabytes) without adversely affecting regular GC'd memory. At some point 207 // though, too much virtual memory becomes prohibitive and we want to 208 // collect GC-able objects which keep this virtual memory alive. 209 // This is counted in number of fast memories, not bytes. 210 void reportWebAssemblyFastMemoriesAllocated(size_t); 211 bool webAssemblyFastMemoriesThisCycleAtThreshold() const; 212 202 213 #if ENABLE(RESOURCE_USAGE) 203 214 // Use this API to report the subset of extra memory that lives outside this process. … … 249 260 250 261 void didAllocate(size_t); 262 void didAllocateWebAssemblyFastMemories(size_t); 251 263 bool isPagedOut(double deadline); 252 264 … … 527 539 528 540 size_t m_bytesAllocatedThisCycle; 541 size_t m_webAssemblyFastMemoriesAllocatedThisCycle; 529 542 size_t m_bytesAbandonedSinceLastFullCollect; 530 543 size_t m_maxEdenSize; -
trunk/Source/JavaScriptCore/runtime/InitializeThreading.cpp
r214645 r215340 1 1 /* 2 * Copyright (C) 2008, 2015-201 6Apple Inc. All rights reserved.2 * Copyright (C) 2008, 2015-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 40 40 #include "StructureIDTable.h" 41 41 #include "SuperSampler.h" 42 #include "WasmMemory.h" 42 43 #include "WasmThunks.h" 43 44 #include "WriteBarrier.h" … … 59 60 WTF::initializeThreading(); 60 61 Options::initialize(); 62 #if ENABLE(WEBASSEMBLY) 63 Wasm::Memory::initializePreallocations(); 64 #endif 61 65 #if ENABLE(WRITE_BARRIER_PROFILING) 62 66 WriteBarrierCounters::initialize(); -
trunk/Source/JavaScriptCore/runtime/Options.cpp
r215318 r215340 60 60 bool restrictedOptionsEnabled = true; 61 61 #endif 62 63 ALWAYS_INLINE bool isIOS() 64 { 65 #if PLATFORM(IOS) 66 return true; 67 #else 68 return false; 69 #endif 70 } 62 71 } 63 72 -
trunk/Source/JavaScriptCore/runtime/Options.h
r215292 r215340 437 437 v(unsigned, webAssemblyB3OptimizationLevel, Options::defaultB3OptLevel(), Normal, "B3 Optimization level for Web Assembly modules.") \ 438 438 \ 439 v(bool, simulateWebAssemblyLowMemory, false, Normal, "If true, the Memory object won't mmap the full 'maximum' range and instead will allocate the minimum required amount.") \ 440 v(bool, useWebAssemblyFastMemory, true, Normal, "If true, we will try to use a 32-bit address space with a signal handler to bounds check wasm memory.") \ 439 /* FIXME: enable fast memories on iOS and pre-allocate them. https://bugs.webkit.org/show_bug.cgi?id=170774 */ \ 440 v(bool, useWebAssemblyFastMemory, !isIOS(), Normal, "If true, we will try to use a 32-bit address space with a signal handler to bounds check wasm memory.") \ 441 v(unsigned, webAssemblyFastMemoryRedzonePages, 128, Normal, "WebAssembly fast memories use 4GiB virtual allocations, plus a redzone (counted as multiple of 64KiB WebAssembly pages) at the end to catch reg+imm accesses which exceed 32-bit, anything beyond the redzone is explicitly bounds-checked") \ 441 442 v(bool, crashIfWebAssemblyCantFastMemory, false, Normal, "If true, we will crash if we can't obtain fast memory for wasm.") \ 443 v(unsigned, webAssemblyFastMemoryPreallocateCount, 0, Normal, "WebAssembly fast memories can be pre-allocated at program startup and remain cached to avoid fragmentation leading to bounds-checked memory. This number is an upper bound on initial allocation as well as total count of fast memories. Zero means no pre-allocation, no caching, and no limit to the number of runtime allocations.") \ 442 444 v(bool, useWebAssemblyFastTLS, true, Normal, "If true, we will try to use fast thread-local storage if available on the current platform.") 443 445 -
trunk/Source/JavaScriptCore/wasm/WasmB3IRGenerator.cpp
r215292 r215340 237 237 void emitChecksForModOrDiv(B3::Opcode, ExpressionType left, ExpressionType right); 238 238 239 void fixupPointerPlusOffset(ExpressionType&, uint32_t&); 240 239 241 Value* materializeWasmContext(Procedure&, BasicBlock*); 240 242 void restoreWasmContext(Procedure&, BasicBlock*, Value*); … … 258 260 }; 259 261 262 // Memory accesses in WebAssembly have unsigned 32-bit offsets, whereas they have signed 32-bit offsets in B3. 263 void B3IRGenerator::fixupPointerPlusOffset(ExpressionType& ptr, uint32_t& offset) 264 { 265 if (static_cast<uint64_t>(offset) > static_cast<uint64_t>(std::numeric_limits<int32_t>::max())) { 266 ptr = m_currentBlock->appendNew<Value>(m_proc, Add, origin(), ptr, m_currentBlock->appendNew<Const64Value>(m_proc, origin(), offset)); 267 offset = 0; 268 } 269 } 270 260 271 Value* B3IRGenerator::materializeWasmContext(Procedure& proc, BasicBlock* block) 261 272 { … … 341 352 m_proc.setWasmBoundsCheckGenerator([=] (CCallHelpers& jit, GPRReg pinnedGPR, unsigned) { 342 353 AllowMacroScratchRegisterUsage allowScratch(jit); 343 ASSERT_UNUSED(pinnedGPR, m_memorySizeGPR == pinnedGPR); 354 switch (m_mode) { 355 case MemoryMode::BoundsChecking: 356 ASSERT_UNUSED(pinnedGPR, m_memorySizeGPR == pinnedGPR); 357 break; 358 case MemoryMode::Signaling: 359 ASSERT_UNUSED(pinnedGPR, InvalidGPRReg == pinnedGPR); 360 break; 361 case MemoryMode::NumberOfMemoryModes: 362 ASSERT_NOT_REACHED(); 363 } 344 364 this->emitExceptionCheck(jit, ExceptionType::OutOfBoundsMemoryAccess); 345 365 }); … … 527 547 { 528 548 ASSERT(m_memoryBaseGPR); 529 if (m_mode == MemoryMode::BoundsChecking) { 549 switch (m_mode) { 550 case MemoryMode::BoundsChecking: 551 // We're not using signal handling at all, we must therefore check that no memory access exceeds the current memory size. 530 552 ASSERT(m_memorySizeGPR); 531 553 ASSERT(sizeOfOperation + offset > offset); 532 m_currentBlock->appendNew<WasmBoundsCheckValue>(m_proc, origin(), pointer, m_memorySizeGPR, sizeOfOperation + offset - 1); 554 m_currentBlock->appendNew<WasmBoundsCheckValue>(m_proc, origin(), pointer, m_memorySizeGPR, sizeOfOperation + offset - 1, m_info.memory.maximum()); 555 break; 556 case MemoryMode::Signaling: 557 // We've virtually mapped 4GiB+redzone fo this memory. Only the user-allocated pages are addressable, contiguously in range [0, current], and everything above is mapped PROT_NONE. We don't need to perform any explicit bounds check in the 4GiB range because WebAssembly register memory accesses are 32-bit. However WebAssembly register+immediate accesses perform the addition in 64-bit which can push an access above the 32-bit limit. The redzone will catch most small immediates, and we'll explicitly bounds check any register + large immediate access. 558 if (offset >= Memory::fastMappedRedzoneBytes()) 559 m_currentBlock->appendNew<WasmBoundsCheckValue>(m_proc, origin(), pointer, InvalidGPRReg, sizeOfOperation + offset - 1, m_info.memory.maximum()); 560 break; 561 case MemoryMode::NumberOfMemoryModes: 562 RELEASE_ASSERT_NOT_REACHED(); 533 563 } 534 564 pointer = m_currentBlock->appendNew<Value>(m_proc, ZExt32, origin(), pointer); … … 570 600 inline Value* B3IRGenerator::emitLoadOp(LoadOpType op, ExpressionType pointer, uint32_t offset) 571 601 { 602 fixupPointerPlusOffset(pointer, offset); 603 572 604 switch (op) { 573 605 case LoadOpType::I32Load8S: { … … 707 739 inline void B3IRGenerator::emitStoreOp(StoreOpType op, ExpressionType pointer, ExpressionType value, uint32_t offset) 708 740 { 741 fixupPointerPlusOffset(pointer, offset); 742 709 743 switch (op) { 710 744 case StoreOpType::I64Store8: … … 1262 1296 compilationContext.jsEntrypointToWasmEntrypointCall = jit.call(); 1263 1297 1298 if (!!info.memory) { 1299 // Resetting the register prevents the GC from mistakenly thinking that the context is still live. 1300 GPRReg baseMemory = pinnedRegs.baseMemoryPointer; 1301 jit.move(CCallHelpers::TrustedImm32(0), baseMemory); 1302 } 1303 1264 1304 for (const RegisterAtOffset& regAtOffset : registersToSpill) { 1265 1305 GPRReg reg = regAtOffset.reg().gpr(); -
trunk/Source/JavaScriptCore/wasm/WasmFaultSignalHandler.cpp
r215318 r215340 71 71 void* faultingAddress = sigInfo->si_addr; 72 72 dataLogLnIf(verbose, "checking faulting address: ", RawPointer(faultingAddress), " is in an active fast memory"); 73 LockHolder locker(memoryLock); 74 auto& activeFastMemories = viewActiveFastMemories(locker); 75 for (void* activeMemory : activeFastMemories) { 76 dataLogLnIf(verbose, "checking fast memory at: ", RawPointer(activeMemory)); 77 if (activeMemory <= faultingAddress && faultingAddress < static_cast<char*>(activeMemory) + fastMemoryMappedBytes) { 78 faultedInActiveFastMemory = true; 79 break; 80 } 81 } 73 faultedInActiveFastMemory = Wasm::Memory::addressIsInActiveFastMemory(faultingAddress); 82 74 } 83 75 if (faultedInActiveFastMemory) { -
trunk/Source/JavaScriptCore/wasm/WasmMemory.cpp
r214826 r215340 30 30 31 31 #include "VM.h" 32 #include "WasmFaultSignalHandler.h"33 32 #include "WasmThunks.h" 34 33 35 #include <wtf/HexNumber.h> 34 #include <atomic> 35 #include <wtf/MonotonicTime.h> 36 36 #include <wtf/NeverDestroyed.h> 37 #include <wtf/Platform.h> 37 38 #include <wtf/PrintStream.h> 38 #include <wtf/ text/WTFString.h>39 #include <wtf/VMTags.h> 39 40 40 41 namespace JSC { namespace Wasm { 41 42 43 // FIXME: We could be smarter about memset / mmap / madvise. https://bugs.webkit.org/show_bug.cgi?id=170343 44 // FIXME: Give up some of the cached fast memories if the GC determines it's easy to get them back, and they haven't been used in a while. https://bugs.webkit.org/show_bug.cgi?id=170773 45 // FIXME: Limit slow memory size. https://bugs.webkit.org/show_bug.cgi?id=170825 46 42 47 namespace { 43 const bool verbose = false; 44 } 45 46 static NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntGetFastMemory() 47 { 48 CRASH(); 49 } 50 51 inline bool mmapBytes(size_t bytes, void*& memory) 52 { 53 dataLogIf(verbose, "Attempting to mmap ", bytes, " bytes: "); 54 // FIXME: It would be nice if we had a VM tag for wasm memory. https://bugs.webkit.org/show_bug.cgi?id=163600 55 void* result = mmap(nullptr, bytes, PROT_NONE, MAP_PRIVATE | MAP_ANON, -1, 0); 56 if (result == MAP_FAILED) { 57 dataLogLnIf(verbose, "failed"); 58 return false; 59 } 60 dataLogLnIf(verbose, "succeeded"); 61 memory = result; 48 constexpr bool verbose = false; 49 50 NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntGetFastMemory() { CRASH(); } 51 NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntUnmapMemoryBytes() { CRASH(); } 52 NEVER_INLINE NO_RETURN_DUE_TO_CRASH void webAssemblyCouldntUnprotectMemory() { CRASH(); } 53 54 void* mmapBytes(size_t bytes) 55 { 56 #if OS(DARWIN) 57 int fd = VM_TAG_FOR_WEBASSEMBLY_MEMORY; 58 #else 59 int fd = -1; 60 #endif 61 62 void* location = mmap(nullptr, bytes, PROT_NONE, MAP_PRIVATE | MAP_ANON, fd, 0); 63 return location == MAP_FAILED ? nullptr : location; 64 } 65 66 void munmapBytes(void* memory, size_t size) 67 { 68 if (UNLIKELY(munmap(memory, size))) 69 webAssemblyCouldntUnmapMemoryBytes(); 70 } 71 72 void zeroAndUnprotectBytes(void* start, size_t bytes) 73 { 74 if (bytes) { 75 dataLogLnIf(verbose, "Zeroing and unprotecting ", bytes, " from ", RawPointer(start)); 76 // FIXME: We could be smarter about memset / mmap / madvise. Here, we may not need to act synchronously, or maybe we can memset+unprotect smaller ranges of memory (which would pay off if not all the writable memory was actually physically backed: memset forces physical backing only to unprotect it right after). https://bugs.webkit.org/show_bug.cgi?id=170343 77 memset(start, 0, bytes); 78 if (UNLIKELY(mprotect(start, bytes, PROT_NONE))) 79 webAssemblyCouldntUnprotectMemory(); 80 } 81 } 82 83 // Allocate fast memories very early at program startup and cache them. The fast memories use significant amounts of virtual uncommitted address space, reducing the likelihood that we'll obtain any if we wait to allocate them. 84 // We still try to allocate fast memories at runtime, and will cache them when relinquished up to the preallocation limit. 85 // Note that this state is per-process, not per-VM. 86 // We use simple static globals which don't allocate to avoid early fragmentation and to keep management to the bare minimum. We avoid locking because fast memories use segfault signal handling to handle out-of-bounds accesses. This requires identifying if the faulting address is in a fast memory range, which should avoid acquiring a lock lest the actual signal was caused by this very code while it already held the lock. 87 // Speed and contention don't really matter here, but simplicity does. We therefore use straightforward FIFOs for our cache, and linear traversal for the list of currently active fast memories. 88 constexpr size_t fastMemoryCacheHardLimit { 16 }; 89 constexpr size_t fastMemoryAllocationSoftLimit { 32 }; // Prevents filling up the virtual address space. 90 static_assert(fastMemoryAllocationSoftLimit >= fastMemoryCacheHardLimit, "The cache shouldn't be bigger than the total number we'll ever allocate"); 91 size_t fastMemoryPreallocateCount { 0 }; 92 std::atomic<void*> fastMemoryCache[fastMemoryCacheHardLimit] = { ATOMIC_VAR_INIT(nullptr) }; 93 std::atomic<void*> currentlyActiveFastMemories[fastMemoryAllocationSoftLimit] = { ATOMIC_VAR_INIT(nullptr) }; 94 std::atomic<size_t> currentlyAllocatedFastMemories = ATOMIC_VAR_INIT(0); 95 std::atomic<size_t> observedMaximumFastMemory = ATOMIC_VAR_INIT(0); 96 97 void* tryGetCachedFastMemory() 98 { 99 for (unsigned idx = 0; idx < fastMemoryPreallocateCount; ++idx) { 100 if (void* previous = fastMemoryCache[idx].exchange(nullptr, std::memory_order_acq_rel)) 101 return previous; 102 } 103 return nullptr; 104 } 105 106 bool tryAddToCachedFastMemory(void* memory) 107 { 108 for (unsigned i = 0; i < fastMemoryPreallocateCount; ++i) { 109 void* expected = nullptr; 110 if (fastMemoryCache[i].compare_exchange_strong(expected, memory, std::memory_order_acq_rel)) { 111 dataLogLnIf(verbose, "Cached fast memory ", RawPointer(memory)); 112 return true; 113 } 114 } 115 return false; 116 } 117 118 bool tryAddToCurrentlyActiveFastMemories(void* memory) 119 { 120 for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) { 121 void* expected = nullptr; 122 if (currentlyActiveFastMemories[idx].compare_exchange_strong(expected, memory, std::memory_order_acq_rel)) 123 return true; 124 } 125 return false; 126 } 127 128 void removeFromCurrentlyActiveFastMemories(void* memory) 129 { 130 for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) { 131 void* expected = memory; 132 if (currentlyActiveFastMemories[idx].compare_exchange_strong(expected, nullptr, std::memory_order_acq_rel)) 133 return; 134 } 135 RELEASE_ASSERT_NOT_REACHED(); 136 } 137 138 void* tryGetFastMemory(VM& vm) 139 { 140 void* memory = nullptr; 141 142 if (LIKELY(Options::useWebAssemblyFastMemory())) { 143 memory = tryGetCachedFastMemory(); 144 if (memory) 145 dataLogLnIf(verbose, "tryGetFastMemory re-using ", RawPointer(memory)); 146 else { 147 // No memory was available in the cache. Maybe waiting on GC will find a free one. 148 // FIXME collectSync(Full) and custom eager destruction of wasm memories could be better. For now use collectAllGarbage. Also, nothing tells us the current VM is holding onto fast memories. https://bugs.webkit.org/show_bug.cgi?id=170748 149 dataLogLnIf(verbose, "tryGetFastMemory waiting on GC and retrying"); 150 vm.heap.collectAllGarbage(); 151 memory = tryGetCachedFastMemory(); 152 dataLogLnIf(verbose, "tryGetFastMemory waited on GC and retried ", memory? "successfully" : "unseccessfully"); 153 } 154 155 // The soft limit is inherently racy because checking+allocation isn't atomic. Exceeding it slightly is fine. 156 bool atAllocationSoftLimit = currentlyAllocatedFastMemories.load(std::memory_order_acquire) >= fastMemoryAllocationSoftLimit; 157 dataLogLnIf(verbose && atAllocationSoftLimit, "tryGetFastMemory reached allocation soft limit of ", fastMemoryAllocationSoftLimit); 158 159 if (!memory && !atAllocationSoftLimit) { 160 memory = mmapBytes(Memory::fastMappedBytes()); 161 if (memory) { 162 size_t currentlyAllocated = 1 + currentlyAllocatedFastMemories.fetch_add(1, std::memory_order_acq_rel); 163 size_t currentlyObservedMaximum = observedMaximumFastMemory.load(std::memory_order_acquire); 164 if (currentlyAllocated > currentlyObservedMaximum) { 165 size_t expected = currentlyObservedMaximum; 166 bool success = observedMaximumFastMemory.compare_exchange_strong(expected, currentlyAllocated, std::memory_order_acq_rel); 167 if (success) 168 dataLogLnIf(verbose, "tryGetFastMemory currently observed maximum is now ", currentlyAllocated); 169 else 170 // We lost the update race, but the counter is monotonic so the winner must have updated the value to what we were going to update it to, or multiple winners did so. 171 ASSERT(expected >= currentlyAllocated); 172 } 173 dataLogLnIf(verbose, "tryGetFastMemory allocated ", RawPointer(memory), ", currently allocated is ", currentlyAllocated); 174 } 175 } 176 } 177 178 if (memory) { 179 if (UNLIKELY(!tryAddToCurrentlyActiveFastMemories(memory))) { 180 // We got a memory, but reached the allocation soft limit *and* all of the allocated memories are active, none are cached. That's a bummer, we have to get rid of our memory. We can't just hold on to it because the list of active fast memories must be precise. 181 dataLogLnIf(verbose, "tryGetFastMemory found a fast memory but had to give it up"); 182 munmapBytes(memory, Memory::fastMappedBytes()); 183 currentlyAllocatedFastMemories.fetch_sub(1, std::memory_order_acq_rel); 184 memory = nullptr; 185 } 186 } 187 188 if (!memory) { 189 dataLogLnIf(verbose, "tryGetFastMemory couldn't re-use or allocate a fast memory"); 190 if (UNLIKELY(Options::crashIfWebAssemblyCantFastMemory())) 191 webAssemblyCouldntGetFastMemory(); 192 } 193 194 return memory; 195 } 196 197 void* tryGetSlowMemory(size_t bytes) 198 { 199 void* memory = mmapBytes(bytes); 200 dataLogLnIf(memory && verbose, "Obtained slow memory ", RawPointer(memory), " with capacity ", bytes); 201 dataLogLnIf(!memory && verbose, "Failed obtaining slow memory with capacity ", bytes); 202 return memory; 203 } 204 205 void relinquishMemory(void* memory, size_t writableSize, size_t mappedCapacity, MemoryMode mode) 206 { 207 switch (mode) { 208 case MemoryMode::Signaling: { 209 RELEASE_ASSERT(Options::useWebAssemblyFastMemory()); 210 RELEASE_ASSERT(mappedCapacity == Memory::fastMappedBytes()); 211 212 // This memory cannot cause a trap anymore. 213 removeFromCurrentlyActiveFastMemories(memory); 214 215 // We may cache fast memories. Assuming we will, we have to reset them before inserting them into the cache. 216 zeroAndUnprotectBytes(memory, writableSize); 217 218 if (tryAddToCachedFastMemory(memory)) 219 return; 220 221 dataLogLnIf(verbose, "relinquishMemory unable to cache fast memory, freeing instead ", RawPointer(memory)); 222 munmapBytes(memory, Memory::fastMappedBytes()); 223 currentlyAllocatedFastMemories.fetch_sub(1, std::memory_order_acq_rel); 224 225 return; 226 } 227 228 case MemoryMode::BoundsChecking: 229 dataLogLnIf(verbose, "relinquishFastMemory freeing slow memory ", RawPointer(memory)); 230 munmapBytes(memory, mappedCapacity); 231 return; 232 233 case MemoryMode::NumberOfMemoryModes: 234 break; 235 } 236 237 RELEASE_ASSERT_NOT_REACHED(); 238 } 239 240 bool makeNewMemoryReadWriteOrRelinquish(void* memory, size_t initialBytes, size_t mappedCapacityBytes, MemoryMode mode) 241 { 242 ASSERT(memory && initialBytes <= mappedCapacityBytes); 243 if (initialBytes) { 244 dataLogLnIf(verbose, "Marking WebAssembly memory's ", RawPointer(memory), "'s initial ", initialBytes, " bytes as read+write"); 245 if (mprotect(memory, initialBytes, PROT_READ | PROT_WRITE)) { 246 const char* why = strerror(errno); 247 dataLogLnIf(verbose, "Failed making memory ", RawPointer(memory), " readable and writable: ", why); 248 relinquishMemory(memory, 0, mappedCapacityBytes, mode); 249 return false; 250 } 251 } 62 252 return true; 63 253 } 254 255 } // anonymous namespace 256 64 257 65 258 const char* makeString(MemoryMode mode) … … 74 267 } 75 268 76 static const unsigned maxFastMemories = 4; 77 static unsigned allocatedFastMemories { 0 }; 78 StaticLock memoryLock; 79 inline Deque<void*, maxFastMemories>& availableFastMemories(const AbstractLocker&) 80 { 81 static NeverDestroyed<Deque<void*, maxFastMemories>> availableFastMemories; 82 return availableFastMemories; 83 } 84 85 inline HashSet<void*>& activeFastMemories(const AbstractLocker&) 86 { 87 static NeverDestroyed<HashSet<void*>> activeFastMemories; 88 return activeFastMemories; 89 } 90 91 const HashSet<void*>& viewActiveFastMemories(const AbstractLocker& locker) 92 { 93 return activeFastMemories(locker); 94 } 95 96 inline bool tryGetFastMemory(VM& vm, void*& memory, size_t& mappedCapacity, MemoryMode& mode) 97 { 98 auto dequeFastMemory = [&] () -> bool { 99 // FIXME: We should eventually return these to the OS if we go some number of GCs 100 // without using them. 101 LockHolder locker(memoryLock); 102 if (!availableFastMemories(locker).isEmpty()) { 103 memory = availableFastMemories(locker).takeFirst(); 104 auto result = activeFastMemories(locker).add(memory); 105 ASSERT_UNUSED(result, result.isNewEntry); 106 mappedCapacity = fastMemoryMappedBytes; 107 mode = MemoryMode::Signaling; 269 void Memory::initializePreallocations() 270 { 271 if (UNLIKELY(!Options::useWebAssemblyFastMemory())) 272 return; 273 274 // Races cannot occur in this function: it is only called at program initialization, before WebAssembly can be invoked. 275 276 const auto startTime = MonotonicTime::now(); 277 const size_t desiredFastMemories = std::min<size_t>(Options::webAssemblyFastMemoryPreallocateCount(), fastMemoryCacheHardLimit); 278 279 // Start off trying to allocate fast memories contiguously so they don't fragment each other. This can fail if the address space is otherwise fragmented. In that case, go for smaller contiguous allocations. We'll eventually get individual non-contiguous fast memories allocated, or we'll just be unable to fit a single one at which point we give up. 280 auto allocateContiguousFastMemories = [&] (size_t numContiguous) -> bool { 281 if (void *memory = mmapBytes(Memory::fastMappedBytes() * numContiguous)) { 282 for (size_t subMemory = 0; subMemory < numContiguous; ++subMemory) { 283 void* startAddress = reinterpret_cast<char*>(memory) + Memory::fastMappedBytes() * subMemory + subMemory; 284 bool inserted = false; 285 for (size_t cacheEntry = 0; cacheEntry < fastMemoryCacheHardLimit; ++cacheEntry) { 286 if (fastMemoryCache[cacheEntry].load(std::memory_order_relaxed) == nullptr) { 287 fastMemoryCache[cacheEntry].store(startAddress, std::memory_order_relaxed); 288 inserted = true; 289 break; 290 } 291 } 292 RELEASE_ASSERT(inserted); 293 } 108 294 return true; 109 295 } … … 111 297 }; 112 298 113 auto fail = [] () -> bool { 114 if (UNLIKELY(Options::crashIfWebAssemblyCantFastMemory())) 115 webAssemblyCouldntGetFastMemory(); 116 return false; 117 }; 118 119 // We might GC here so we should be holding the API lock. 120 // FIXME: We should be able to syncronously trigger the GC from another thread. 121 ASSERT(vm.currentThreadIsHoldingAPILock()); 122 if (UNLIKELY(!fastMemoryEnabled())) 123 return fail(); 124 125 // We need to be sure we have a stub prior to running code. 126 if (UNLIKELY(!Thunks::singleton().stub(throwExceptionFromWasmThunkGenerator))) 127 return fail(); 128 129 ASSERT(allocatedFastMemories <= maxFastMemories); 130 if (dequeFastMemory()) 131 return true; 132 133 // If we have allocated all the fast memories... too bad. 134 if (allocatedFastMemories == maxFastMemories) { 135 // There is a reasonable chance that another module has died but has not been collected yet. Don't lose hope yet! 136 vm.heap.collectAllGarbage(); 137 if (dequeFastMemory()) 138 return true; 139 return fail(); 140 } 141 142 if (mmapBytes(fastMemoryMappedBytes, memory)) { 143 mappedCapacity = fastMemoryMappedBytes; 144 mode = MemoryMode::Signaling; 145 LockHolder locker(memoryLock); 146 allocatedFastMemories++; 147 auto result = activeFastMemories(locker).add(memory); 148 ASSERT_UNUSED(result, result.isNewEntry); 149 } 150 151 if (memory) 152 return true; 153 154 return fail(); 155 } 156 157 inline void releaseFastMemory(void*& memory, size_t writableSize, size_t mappedCapacity, MemoryMode mode) 158 { 159 if (mode != MemoryMode::Signaling || !memory) 160 return; 161 162 RELEASE_ASSERT(memory && mappedCapacity == fastMemoryMappedBytes); 163 ASSERT(fastMemoryEnabled()); 164 165 memset(memory, 0, writableSize); 166 if (mprotect(memory, writableSize, PROT_NONE)) 167 CRASH(); 168 169 LockHolder locker(memoryLock); 170 bool result = activeFastMemories(locker).remove(memory); 171 ASSERT_UNUSED(result, result); 172 ASSERT(availableFastMemories(locker).size() < allocatedFastMemories); 173 availableFastMemories(locker).append(memory); 174 memory = nullptr; 299 size_t fragments = 0; 300 size_t numFastMemories = 0; 301 size_t contiguousMemoryAllocationAttempt = desiredFastMemories; 302 while (numFastMemories != desiredFastMemories && contiguousMemoryAllocationAttempt != 0) { 303 if (allocateContiguousFastMemories(contiguousMemoryAllocationAttempt)) { 304 numFastMemories += contiguousMemoryAllocationAttempt; 305 contiguousMemoryAllocationAttempt = std::min(contiguousMemoryAllocationAttempt - 1, desiredFastMemories - numFastMemories); 306 } else 307 --contiguousMemoryAllocationAttempt; 308 ++fragments; 309 } 310 311 fastMemoryPreallocateCount = numFastMemories; 312 currentlyAllocatedFastMemories.store(fastMemoryPreallocateCount, std::memory_order_relaxed); 313 observedMaximumFastMemory.store(fastMemoryPreallocateCount, std::memory_order_relaxed); 314 315 const auto endTime = MonotonicTime::now(); 316 317 for (size_t cacheEntry = 0; cacheEntry < fastMemoryPreallocateCount; ++cacheEntry) { 318 void* startAddress = fastMemoryCache[cacheEntry].load(std::memory_order_relaxed); 319 ASSERT(startAddress); 320 dataLogLnIf(verbose, "Pre-allocation of WebAssembly fast memory at ", RawPointer(startAddress)); 321 } 322 dataLogLnIf(verbose, "Pre-allocated ", fastMemoryPreallocateCount, " WebAssembly fast memories in ", fastMemoryPreallocateCount == 0 ? 0 : fragments, fragments == 1 ? " fragment, took " : " fragments, took ", endTime - startTime); 175 323 } 176 324 … … 195 343 } 196 344 197 RefPtr<Memory> Memory::createImpl(VM& vm, PageCount initial, PageCount maximum, std::optional<MemoryMode> requiredMode) 198 { 345 RefPtr<Memory> Memory::create(VM& vm, PageCount initial, PageCount maximum) 346 { 347 ASSERT(initial); 199 348 RELEASE_ASSERT(!maximum || maximum >= initial); // This should be guaranteed by our caller. 200 349 201 MemoryMode mode = requiredMode ? *requiredMode : MemoryMode::BoundsChecking; 202 const size_t size = initial.bytes(); 203 size_t mappedCapacity = maximum ? maximum.bytes() : PageCount::max().bytes(); 350 const size_t initialBytes = initial.bytes(); 351 const size_t maximumBytes = maximum ? maximum.bytes() : 0; 352 size_t mappedCapacityBytes = 0; 353 MemoryMode mode; 354 355 // We need to be sure we have a stub prior to running code. 356 if (UNLIKELY(!Thunks::singleton().stub(throwExceptionFromWasmThunkGenerator))) 357 return nullptr; 358 359 if (maximum && !maximumBytes) { 360 // User specified a zero maximum, initial size must also be zero. 361 RELEASE_ASSERT(!initialBytes); 362 return adoptRef(new Memory(initial, maximum)); 363 } 364 204 365 void* memory = nullptr; 205 366 206 auto makeEmptyMemory = [&] () -> RefPtr<Memory> { 207 if (mode == MemoryMode::Signaling) 208 return nullptr; 209 210 return adoptRef(new Memory(initial, maximum)); 211 }; 212 213 if (!mappedCapacity) { 214 // This means we specified a zero as maximum (which means we also have zero as initial size). 215 RELEASE_ASSERT(!size); 216 dataLogLnIf(verbose, "Memory::create allocating nothing"); 217 return makeEmptyMemory(); 218 } 219 220 bool canUseFastMemory = !requiredMode || requiredMode == MemoryMode::Signaling; 221 if (!canUseFastMemory || !tryGetFastMemory(vm, memory, mappedCapacity, mode)) { 222 if (mode == MemoryMode::Signaling) 223 return nullptr; 224 225 if (Options::simulateWebAssemblyLowMemory() ? true : !mmapBytes(mappedCapacity, memory)) { 226 // Try again with a different number. 227 dataLogLnIf(verbose, "Memory::create mmap failed once for capacity, trying again"); 228 mappedCapacity = size; 229 if (!mappedCapacity) { 230 dataLogLnIf(verbose, "Memory::create mmap not trying again because size is zero"); 231 return makeEmptyMemory(); 232 } 233 234 if (!mmapBytes(mappedCapacity, memory)) { 235 dataLogLnIf(verbose, "Memory::create mmap failed twice"); 236 return nullptr; 237 } 238 } 239 } 240 241 ASSERT(memory && size <= mappedCapacity); 242 if (mprotect(memory, size, PROT_READ | PROT_WRITE)) { 243 // FIXME: should this ever occur? https://bugs.webkit.org/show_bug.cgi?id=169890 244 dataLogLnIf(verbose, "Memory::create mprotect failed"); 245 releaseFastMemory(memory, 0, mappedCapacity, mode); 367 // First try fast memory, because they're fast. Fast memory is suitable for any initial / maximum. 368 memory = tryGetFastMemory(vm); 369 if (memory) { 370 mappedCapacityBytes = Memory::fastMappedBytes(); 371 mode = MemoryMode::Signaling; 372 } 373 374 // If we can't get a fast memory but the user expressed the intent to grow memory up to a certain maximum then we should try to honor that desire. It'll mean that grow is more likely to succeed, and won't require remapping. 375 if (!memory && maximum) { 376 memory = tryGetSlowMemory(maximumBytes); 246 377 if (memory) { 247 if (munmap(memory, mappedCapacity)) 248 CRASH(); 249 } 378 mappedCapacityBytes = maximumBytes; 379 mode = MemoryMode::BoundsChecking; 380 } 381 } 382 383 // We're stuck with a slow memory which may be slower or impossible to grow. 384 if (!memory) { 385 memory = tryGetSlowMemory(initialBytes); 386 if (memory) { 387 mappedCapacityBytes = initialBytes; 388 mode = MemoryMode::BoundsChecking; 389 } 390 } 391 392 if (!memory) 250 393 return nullptr; 251 } 252 253 dataLogLnIf(verbose, "Memory::create mmap succeeded"); 254 return adoptRef(new Memory(memory, initial, maximum, mappedCapacity, mode)); 255 } 256 257 RefPtr<Memory> Memory::create(VM& vm, PageCount initial, PageCount maximum, std::optional<MemoryMode> mode) 258 { 259 RELEASE_ASSERT(!maximum || maximum >= initial); // This should be guaranteed by our caller. 260 RefPtr<Memory> result = createImpl(vm, initial, maximum, mode); 261 if (result) { 262 if (result->mode() == MemoryMode::Signaling) 263 RELEASE_ASSERT(result->m_mappedCapacity == fastMemoryMappedBytes); 264 if (mode) 265 ASSERT(*mode == result->mode()); 266 } 267 return result; 394 395 if (!makeNewMemoryReadWriteOrRelinquish(memory, initialBytes, mappedCapacityBytes, mode)) 396 return nullptr; 397 398 return adoptRef(new Memory(memory, initial, maximum, mappedCapacityBytes, mode)); 268 399 } 269 400 270 401 Memory::~Memory() 271 402 { 272 dataLogLnIf(verbose, "Memory::~Memory ", *this);273 releaseFastMemory(m_memory, m_size, m_mappedCapacity, m_mode);274 403 if (m_memory) { 275 if (munmap(m_memory, m_mappedCapacity)) 276 CRASH(); 277 } 404 dataLogLnIf(verbose, "Memory::~Memory ", *this); 405 relinquishMemory(m_memory, m_size, m_mappedCapacity, m_mode); 406 } 407 } 408 409 size_t Memory::fastMappedRedzoneBytes() 410 { 411 return static_cast<size_t>(PageCount::pageSize) * Options::webAssemblyFastMemoryRedzonePages(); 412 } 413 414 size_t Memory::fastMappedBytes() 415 { 416 static_assert(sizeof(uint64_t) == sizeof(size_t), "We rely on allowing the maximum size of Memory we map to be 2^32 + redzone which is larger than fits in a 32-bit integer that we'd pass to mprotect if this didn't hold."); 417 return static_cast<size_t>(std::numeric_limits<uint32_t>::max()) + fastMappedRedzoneBytes(); 418 } 419 420 size_t Memory::maxFastMemoryCount() 421 { 422 // The order can be relaxed here because we provide a monotonically-increasing estimate. A concurrent observer could see a slightly out-of-date value but can't tell that they did. 423 return observedMaximumFastMemory.load(std::memory_order_relaxed); 424 } 425 426 bool Memory::addressIsInActiveFastMemory(void* address) 427 { 428 // This cannot race in any meaningful way: the thread which calls this function wants to know if a fault it received at a particular address is in a fast memory. That fast memory must therefore be active in that thread. It cannot be added or removed from the list of currently active fast memories. Other memories being added / removed concurrently are inconsequential. 429 for (size_t idx = 0; idx < fastMemoryAllocationSoftLimit; ++idx) { 430 char* start = static_cast<char*>(currentlyActiveFastMemories[idx].load(std::memory_order_acquire)); 431 if (start <= address && address <= start + fastMappedBytes()) 432 return true; 433 } 434 return false; 278 435 } 279 436 … … 302 459 303 460 if (m_memory && desiredSize <= m_mappedCapacity) { 304 if (mprotect(static_cast<uint8_t*>(m_memory) + m_size, static_cast<size_t>(desiredSize - m_size), PROT_READ | PROT_WRITE)) { 305 // FIXME: should this ever occur? https://bugs.webkit.org/show_bug.cgi?id=169890 461 uint8_t* startAddress = static_cast<uint8_t*>(m_memory) + m_size; 462 size_t extraBytes = desiredSize - m_size; 463 RELEASE_ASSERT(extraBytes); 464 dataLogLnIf(verbose, "Marking WebAssembly memory's ", RawPointer(m_memory), " as read+write in range [", RawPointer(startAddress), ", ", RawPointer(startAddress + extraBytes), ")"); 465 if (mprotect(startAddress, extraBytes, PROT_READ | PROT_WRITE)) { 306 466 dataLogLnIf(verbose, "Memory::grow in-place failed ", *this); 307 467 return false; … … 317 477 318 478 // Otherwise, let's try to make some new memory. 319 // FIXME: It would be nice if we had a VM tag for wasm memory. https://bugs.webkit.org/show_bug.cgi?id=163600 320 void* newMemory = mmap(nullptr, desiredSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); 321 if (newMemory == MAP_FAILED) 479 // FIXME mremap would be nice https://bugs.webkit.org/show_bug.cgi?id=170557 480 // FIXME should we over-allocate here? https://bugs.webkit.org/show_bug.cgi?id=170826 481 void* newMemory = tryGetSlowMemory(desiredSize); 482 if (!newMemory) 483 return false; 484 485 if (!makeNewMemoryReadWriteOrRelinquish(newMemory, desiredSize, desiredSize, mode())) 322 486 return false; 323 487 324 488 if (m_memory) { 325 489 memcpy(newMemory, m_memory, m_size); 326 bool success = !munmap(m_memory, m_mappedCapacity);327 RELEASE_ASSERT(success);328 } 490 relinquishMemory(m_memory, m_size, m_size, m_mode); 491 } 492 329 493 m_memory = newMemory; 330 494 m_mappedCapacity = desiredSize; -
trunk/Source/JavaScriptCore/wasm/WasmMemory.h
r214826 r215340 30 30 #include "WasmPageCount.h" 31 31 32 #include <wtf/HashSet.h>33 #include <wtf/Optional.h>34 32 #include <wtf/RefCounted.h> 35 33 #include <wtf/RefPtr.h> … … 62 60 explicit operator bool() const { return !!m_memory; } 63 61 64 static RefPtr<Memory> create(VM&, PageCount initial, PageCount maximum, std::optional<MemoryMode> requiredMode = std::nullopt); 62 static void initializePreallocations(); 63 static RefPtr<Memory> create(VM&, PageCount initial, PageCount maximum); 65 64 66 65 Memory() = default; 67 66 ~Memory(); 67 68 static size_t fastMappedRedzoneBytes(); 69 static size_t fastMappedBytes(); // Includes redzone. 70 static size_t maxFastMemoryCount(); 71 static bool addressIsInActiveFastMemory(void*); 68 72 69 73 void* memory() const { return m_memory; } … … 82 86 void check() { ASSERT(!deletionHasBegun()); } 83 87 private: 84 static RefPtr<Memory> createImpl(VM&, PageCount initial, PageCount maximum, std::optional<MemoryMode> requiredMode = std::nullopt);85 88 Memory(void* memory, PageCount initial, PageCount maximum, size_t mappedCapacity, MemoryMode); 86 89 Memory(PageCount initial, PageCount maximum); … … 95 98 }; 96 99 97 static_assert(sizeof(uint64_t) == sizeof(size_t), "We rely on allowing the maximum size of Memory we map to be 2^33 which is larger than fits in a 32-bit integer that we'd pass to mprotect if this didn't hold."); 100 } } // namespace JSC::Wasm 98 101 99 const size_t fastMemoryMappedBytes = (static_cast<size_t>(std::numeric_limits<uint32_t>::max()) + 1) * 2; // pointer max + offset max. This is all we need since a load straddling readable memory will trap. 100 extern StaticLock memoryLock; 101 const HashSet<void*>& viewActiveFastMemories(const AbstractLocker&); 102 #else 103 104 namespace JSC { namespace Wasm { 105 106 class Memory { 107 public: 108 static size_t maxFastMemoryCount() { return 0; } 109 static bool addressIsInActiveFastMemory(void*) { return false; } 110 }; 102 111 103 112 } } // namespace JSC::Wasm -
trunk/Source/JavaScriptCore/wasm/js/JSWebAssemblyInstance.cpp
r215103 r215340 64 64 65 65 m_module.set(vm, this, module); 66 const size_t extraMemorySize = module->moduleInformation().globals.size() * sizeof(Register);66 const size_t extraMemorySize = globalMemoryByteSize(); 67 67 m_globals = MallocPtr<uint64_t>::malloc(extraMemorySize); 68 68 heap()->reportExtraMemoryAllocated(extraMemorySize); … … 90 90 visitor.append(thisObject->m_table); 91 91 visitor.append(thisObject->m_callee); 92 visitor.reportExtraMemoryVisited(thisObject-> module()->moduleInformation().globals.size());92 visitor.reportExtraMemoryVisited(thisObject->globalMemoryByteSize()); 93 93 for (unsigned i = 0; i < thisObject->m_numImportFunctions; ++i) 94 94 visitor.append(thisObject->importFunctions()[i]); … … 351 351 } 352 352 353 size_t JSWebAssemblyInstance::globalMemoryByteSize() const 354 { 355 return m_module->moduleInformation().globals.size() * sizeof(Register); 356 } 353 357 354 358 } // namespace JSC -
trunk/Source/JavaScriptCore/wasm/js/JSWebAssemblyInstance.h
r215103 r215340 93 93 VM* m_vm; 94 94 WriteBarrier<JSObject>* importFunctions() { return bitwise_cast<WriteBarrier<JSObject>*>(bitwise_cast<char*>(this) + offsetOfImportFunctions()); } 95 size_t globalMemoryByteSize() const; 95 96 96 97 WriteBarrier<JSWebAssemblyModule> m_module; -
trunk/Source/JavaScriptCore/wasm/js/JSWebAssemblyMemory.cpp
r214645 r215340 118 118 119 119 memory().check(); 120 // FIXME Should we report extra memory to the GC on allocation / grow / visit? https://bugs.webkit.org/show_bug.cgi?id=170690 120 121 return oldPageCount; 121 122 } … … 125 126 Base::finishCreation(vm); 126 127 ASSERT(inherits(vm, info())); 128 // FIXME Should we report extra memory to the GC on allocation / grow / visit? https://bugs.webkit.org/show_bug.cgi?id=170690 129 vm.heap.reportWebAssemblyFastMemoriesAllocated(1); 127 130 } 128 131 … … 142 145 Base::visitChildren(thisObject, visitor); 143 146 visitor.append(thisObject->m_bufferWrapper); 147 // FIXME Should we report extra memory to the GC on allocation / grow / visit? https://bugs.webkit.org/show_bug.cgi?id=170690 144 148 } 145 149 -
trunk/Source/WTF/ChangeLog
r215318 r215340 1 2017-04-13 JF Bastien <jfbastien@apple.com> 2 3 WebAssembly: manage memory better 4 https://bugs.webkit.org/show_bug.cgi?id=170628 5 6 Reviewed by Keith Miller, Michael Saboff. 7 8 Re-use a VM tag which was intended for JavaScript core, was then 9 used by our GC, and is now unused. If I don't do this then 10 WebAssembly fast memories will make vmmap look super weird because 11 it'll look like multi-gigabyte of virtual memory are allocated as 12 part of our process' regular memory! 13 14 Separately I need to update vmmap and other tools to print the 15 right name. Right now this tag gets identified as "JS garbage 16 collector". 17 18 * wtf/OSAllocator.h: 19 * wtf/VMTags.h: 20 1 21 2017-04-13 Yusuke Suzuki <utatane.tea@gmail.com> 2 22 -
trunk/Source/WTF/wtf/OSAllocator.h
r194204 r215340 37 37 UnknownUsage = -1, 38 38 FastMallocPages = VM_TAG_FOR_TCMALLOC_MEMORY, 39 JSGCHeapPages = VM_TAG_FOR_COLLECTOR_MEMORY,40 39 JSVMStackPages = VM_TAG_FOR_REGISTERFILE_MEMORY, 41 40 JSJITCodePages = VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, -
trunk/Source/WTF/wtf/VMTags.h
r172790 r215340 1 1 /* 2 * Copyright (C) 2009 Apple Inc. All rights reserved.2 * Copyright (C) 2009, 2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 52 52 53 53 #if defined(VM_MEMORY_JAVASCRIPT_CORE) 54 #define VM_TAG_FOR_ COLLECTOR_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_CORE)54 #define VM_TAG_FOR_WEBASSEMBLY_MEMORY VM_MAKE_TAG(VM_MEMORY_JAVASCRIPT_CORE) 55 55 #else 56 #define VM_TAG_FOR_ COLLECTOR_MEMORY VM_MAKE_TAG(63)56 #define VM_TAG_FOR_WEBASSEMBLY_MEMORY VM_MAKE_TAG(63) 57 57 #endif // defined(VM_MEMORY_JAVASCRIPT_CORE) 58 58 … … 60 60 61 61 #define VM_TAG_FOR_TCMALLOC_MEMORY -1 62 #define VM_TAG_FOR_ COLLECTOR_MEMORY -162 #define VM_TAG_FOR_WEBASSEMBLY_MEMORY -1 63 63 #define VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY -1 64 64 #define VM_TAG_FOR_REGISTERFILE_MEMORY -1 -
trunk/Source/WebCore/ChangeLog
r215337 r215340 1 2017-04-13 JF Bastien <jfbastien@apple.com> 2 3 WebAssembly: manage memory better 4 https://bugs.webkit.org/show_bug.cgi?id=170628 5 6 Reviewed by Keith Miller, Michael Saboff. 7 8 Re-use a VM tag which was intended for JavaScript core, was then 9 used by our GC, and is now unused. If I don't do this then 10 WebAssembly fast memories will make vmmap look super weird because 11 it'll look like multi-gigabyte of virtual memory are allocated as 12 part of our process' regular memory! 13 14 Separately I need to update vmmap and other tools to print the 15 right name. Right now this tag gets identified as "JS garbage 16 collector". 17 18 * page/ResourceUsageData.cpp: 19 (WebCore::ResourceUsageData::ResourceUsageData): 20 * page/ResourceUsageData.h: 21 * page/cocoa/ResourceUsageOverlayCocoa.mm: 22 (WebCore::HistoricResourceUsageData::HistoricResourceUsageData): 23 * page/cocoa/ResourceUsageThreadCocoa.mm: 24 (WebCore::displayNameForVMTag): 25 (WebCore::categoryForVMTag): 26 1 27 2017-04-13 Ryosuke Niwa <rniwa@webkit.org> 2 28 -
trunk/Source/WebCore/page/ResourceUsageData.cpp
r202394 r215340 1 1 /* 2 * Copyright (C) 2016 Apple Inc. All rights reserved.2 * Copyright (C) 2016-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 35 35 // VM tag categories. 36 36 categories[MemoryCategory::JSJIT] = MemoryCategoryInfo(MemoryCategory::JSJIT); 37 categories[MemoryCategory::WebAssembly] = MemoryCategoryInfo(MemoryCategory::WebAssembly); 37 38 categories[MemoryCategory::Images] = MemoryCategoryInfo(MemoryCategory::Images); 38 39 categories[MemoryCategory::Layers] = MemoryCategoryInfo(MemoryCategory::Layers); -
trunk/Source/WebCore/page/ResourceUsageData.h
r215241 r215340 1 1 /* 2 * Copyright (C) 2016 Apple Inc. All rights reserved.2 * Copyright (C) 2016-2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 37 37 static const unsigned LibcMalloc = 1; 38 38 static const unsigned JSJIT = 2; 39 static const unsigned Images = 3; 40 static const unsigned GCHeap = 4; 41 static const unsigned GCOwned = 5; 42 static const unsigned Other = 6; 43 static const unsigned Layers = 7; 44 static const unsigned NumberOfCategories = 8; 39 static const unsigned WebAssembly = 3; 40 static const unsigned Images = 4; 41 static const unsigned GCHeap = 5; 42 static const unsigned GCOwned = 6; 43 static const unsigned Other = 7; 44 static const unsigned Layers = 8; 45 static const unsigned NumberOfCategories = 9; 45 46 } 46 47 -
trunk/Source/WebCore/page/cocoa/ResourceUsageOverlayCocoa.mm
r215241 r215340 1 1 /* 2 * Copyright (C) 2015 Apple Inc. All rights reserved.2 * Copyright (C) 2015, 2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 163 163 // VM tag categories. 164 164 categories[MemoryCategory::JSJIT] = HistoricMemoryCategoryInfo(MemoryCategory::JSJIT, 0xFFFF60FF, "JS JIT"); 165 categories[MemoryCategory::WebAssembly] = HistoricMemoryCategoryInfo(MemoryCategory::WebAssembly, 0xFF654FF0, "WebAssembly"); 165 166 categories[MemoryCategory::Images] = HistoricMemoryCategoryInfo(MemoryCategory::Images, 0xFFFFFF00, "Images"); 166 167 categories[MemoryCategory::Layers] = HistoricMemoryCategoryInfo(MemoryCategory::Layers, 0xFF00FFFF, "Layers"); -
trunk/Source/WebCore/page/cocoa/ResourceUsageThreadCocoa.mm
r211622 r215340 1 1 /* 2 * Copyright (C) 2015 Apple Inc. All rights reserved.2 * Copyright (C) 2015, 2017 Apple Inc. All rights reserved. 3 3 * 4 4 * Redistribution and use in source and binary forms, with or without … … 81 81 case VM_MEMORY_CGIMAGE: return "CG image"; 82 82 case VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR: return "JSC JIT"; 83 case VM_MEMORY_JAVASCRIPT_CORE: return "WebAssembly"; 83 84 case VM_MEMORY_MALLOC: return "malloc"; 84 85 case VM_MEMORY_MALLOC_HUGE: return "malloc (huge)"; … … 182 183 case VM_MEMORY_JAVASCRIPT_JIT_EXECUTABLE_ALLOCATOR: 183 184 return MemoryCategory::JSJIT; 185 case VM_MEMORY_JAVASCRIPT_CORE: 186 return MemoryCategory::WebAssembly; 184 187 case VM_MEMORY_MALLOC: 185 188 case VM_MEMORY_MALLOC_HUGE: -
trunk/Websites/webkit.org/ChangeLog
r215154 r215340 1 2017-04-13 JF Bastien <jfbastien@apple.com> 2 3 WebAssembly: manage memory better 4 https://bugs.webkit.org/show_bug.cgi?id=170628 5 6 Reviewed by Keith Miller, Michael Saboff. 7 8 * docs/b3/intermediate-representation.html: typos 9 1 10 2017-04-08 Simon Fraser <simon.fraser@apple.com> 2 11 -
trunk/Websites/webkit.org/docs/b3/intermediate-representation.html
r213714 r215340 655 655 produces a Int64 less than to the pinnedGPR this falls through. Otherwise, it branches to 656 656 the exit path generated by the passed generator. Unlike the Patch/Check family, the 657 generator used by WasmBoundsCheck sould be set on the Procuder itself. The GRPReg passed in 658 pinnedGPR must also be marked as pinned by calling the Procedure's pinning API. B3 assumes 659 the WasmBoundsCheck will side-exit when the it branches, so the generator must do some kind 660 of termination. In Wasm this is used to trap and unwind back to JS. Must use the 661 WasmBoundsCheckValue class.</dd> 657 generator used by WasmBoundsCheck should be set on the Procedure itself. The GRPReg passed in 658 pinnedGPR must also be marked as pinned by calling the Procedure's pinning API, or it must be 659 InvalidGPR, in which case the out-of-bounds limit is 4GiB. In the later case a maximum parameter 660 can be provided, to further constrain the out-of-bounds limit and help generate smaller 661 immediates. B3 assumes the WasmBoundsCheck will side-exit when it branches, so the generator 662 must do some kind of termination. In Wasm this is used to trap and unwind back to JS. Must use 663 the WasmBoundsCheckValue class.</dd> 662 664 663 665 <dt>Void Upsilon(T, ^phi)</dt>
Note: See TracChangeset
for help on using the changeset viewer.