Changeset 248606 in webkit
- Timestamp:
- Aug 13, 2019 12:49:40 PM (5 years ago)
- Location:
- trunk
- Files:
-
- 1 added
- 23 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
trunk/LayoutTests/ChangeLog
r248604 r248606 1 2019-08-13 Justin Fan <justin_fan@apple.com> 2 3 [WebGPU] Improve GPUBindGroup performance using one device-shared argument MTLBuffer 4 https://bugs.webkit.org/show_bug.cgi?id=200606 5 6 Reviewed by Myles C. Maxfield. 7 8 Update bind-groups.html to better stress GPUBindGroup implementation. 9 10 * webgpu/bind-groups-expected.txt: 11 * webgpu/bind-groups.html: 12 1 13 2019-08-13 Antti Koivisto <antti@apple.com> 2 14 -
trunk/LayoutTests/webgpu/bind-groups-expected.txt
r244856 r248606 1 1 2 PASS Create a basic GPUBindGroup via GPUDevice. 2 PASS Create and use a basic GPUBindGroup. 3 PASS Create and use many GPUBindGroups in a single compute pass. 4 PASS Create and access a uniform-buffer in a GPUBindGroup. 5 PASS Create and access a sampled texture in a GPUBindGroup. 6 PASS Create and use multiple GPUBindGroups in a single dispatch. 7 PASS Bind a single GPUBuffer with different offsets in different GPUBindGroups 3 8 -
trunk/LayoutTests/webgpu/bind-groups.html
r247289 r248606 7 7 <script src="../resources/testharnessreport.js"></script> 8 8 <script> 9 promise_test(() => { 10 return getBasicDevice().then(function(device) { 11 // GPUBindGroupLayoutBindings 12 // FIXME: Also test sampled texture bindings. 13 const bufferLayoutBinding = { 9 let tests = {}; 10 11 const basicBufferShader = ` 12 [numthreads(1, 1, 1)] 13 compute void compute_main(device int[] buffer : register(u0)) 14 { 15 ++buffer[0]; 16 } 17 `; 18 19 let basicPipeline; 20 21 tests["Create and use a basic GPUBindGroup."] = async device => { 22 const bufferLayoutBinding = { 23 binding: 0, 24 visibility: GPUShaderStageBit.COMPUTE, 25 type: "storage-buffer" 26 }; 27 28 const bindGroupLayout = device.createBindGroupLayout({ bindings: [bufferLayoutBinding] }); 29 30 const basicBuffer = device.createBuffer({ size: 4, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.MAP_READ }); 31 const bufferBinding = { buffer: basicBuffer, size: 4 }; 32 const bindGroupBinding = { binding: 0, resource: bufferBinding }; 33 34 const bindGroup = device.createBindGroup({ layout: bindGroupLayout, bindings: [bindGroupBinding] }); 35 36 const pipelineLayout = device.createPipelineLayout({ bindGroupLayouts: [bindGroupLayout] }); 37 38 const basicShaderModule = device.createShaderModule({ code: basicBufferShader, isWHLSL: true }); 39 basicPipeline = device.createComputePipeline({ 40 layout: pipelineLayout, 41 computeStage: { 42 module: basicShaderModule, 43 entryPoint: "compute_main" 44 } 45 }); 46 47 const commandEncoder = device.createCommandEncoder(); 48 const passEncoder = commandEncoder.beginComputePass(); 49 passEncoder.setPipeline(basicPipeline); 50 passEncoder.setBindGroup(0, bindGroup); 51 passEncoder.dispatch(1, 1, 1); 52 passEncoder.endPass(); 53 device.getQueue().submit([commandEncoder.finish()]); 54 55 const results = new Int32Array(await basicBuffer.mapReadAsync()); 56 basicBuffer.unmap(); 57 assert_equals(results[0], 1, "Storage buffer binding written to successfully."); 58 }; 59 60 tests["Create and use many GPUBindGroups in a single compute pass."] = async device => { 61 const bufferLayoutBinding = { 62 binding: 0, 63 visibility: GPUShaderStageBit.COMPUTE, 64 type: "storage-buffer" 65 }; 66 67 const bindGroupLayout = device.createBindGroupLayout({ bindings: [bufferLayoutBinding] }); 68 69 const basicBuffer = device.createBuffer({ size: 4, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.MAP_READ }); 70 const bufferBinding = { buffer: basicBuffer, size: 4 }; 71 const bindGroupBinding = { binding: 0, resource: bufferBinding }; 72 73 const numGroups = 1000; 74 let bindGroups = new Array(numGroups); 75 for (let i = 0; i < numGroups; ++i) 76 bindGroups[i] = device.createBindGroup({ layout: bindGroupLayout, bindings: [bindGroupBinding] }); 77 78 const commandEncoder = device.createCommandEncoder(); 79 const passEncoder = commandEncoder.beginComputePass(); 80 81 let j = 0; 82 for (; j < numGroups; ++j) { 83 passEncoder.setPipeline(basicPipeline); 84 passEncoder.setBindGroup(0, bindGroups[j]); 85 passEncoder.dispatch(1, 1, 1); 86 } 87 88 passEncoder.endPass(); 89 device.getQueue().submit([commandEncoder.finish()]); 90 91 const results = new Int32Array(await basicBuffer.mapReadAsync()); 92 basicBuffer.unmap(); 93 assert_equals(results[0], j, "Storage buffer accessed successfully through multiple bind groups."); 94 }; 95 96 const uniformBufferShader = ` 97 [numthreads(1, 1, 1)] 98 compute void compute_main(constant int[] uniforms : register(b0), device int[] buffer : register(u1)) 99 { 100 buffer[0] += uniforms[0]; 101 } 102 `; 103 104 tests["Create and access a uniform-buffer in a GPUBindGroup."] = async device => { 105 const [uniformBuffer, writeArrayBuffer] = device.createBufferMapped({ size: 4, usage: GPUBufferUsage.UNIFORM }); 106 new Int32Array(writeArrayBuffer).set([42]); 107 uniformBuffer.unmap(); 108 109 const storageBuffer = device.createBuffer({ size: 4, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.MAP_READ }); 110 111 const bindGroupLayout = device.createBindGroupLayout({ 112 bindings: [{ 113 binding: 0, 114 visibility: GPUShaderStageBit.COMPUTE, 115 type: "uniform-buffer" 116 }, { 14 117 binding: 1, 15 visibility: GPUShaderStageBit. VERTEX,118 visibility: GPUShaderStageBit.COMPUTE, 16 119 type: "storage-buffer" 17 }; 18 19 const bindGroupLayout = device.createBindGroupLayout({ bindings: [bufferLayoutBinding] }); 20 21 const buffer = device.createBuffer({ size: 16, usage: GPUBufferUsage.STORAGE }); 22 const bufferBinding = { buffer: buffer, size: 16 }; 23 const bindGroupBinding = { binding: 1, resource: bufferBinding }; 24 25 const bindGroup = device.createBindGroup({ layout: bindGroupLayout, bindings: [bindGroupBinding]}); 26 assert_true(bindGroup instanceof GPUBindGroup, "GPUBindGroup successfully created."); 27 }, function() { 28 }); 29 }, "Create a basic GPUBindGroup via GPUDevice.") 120 }] 121 }); 122 123 const bindGroup = device.createBindGroup({ 124 layout: bindGroupLayout, 125 bindings: [{ 126 binding: 0, 127 resource: { 128 buffer: uniformBuffer, 129 size: 4 130 } 131 }, { 132 binding: 1, 133 resource: { 134 buffer: storageBuffer, 135 size: 4 136 } 137 }] 138 }); 139 140 const pipelineLayout = device.createPipelineLayout({ bindGroupLayouts: [bindGroupLayout] }); 141 142 const shaderModule = device.createShaderModule({ code: uniformBufferShader, isWHLSL: true }); 143 144 const pipeline = device.createComputePipeline({ 145 layout: pipelineLayout, 146 computeStage: { 147 module: shaderModule, 148 entryPoint: "compute_main" 149 } 150 }); 151 152 const commandEncoder = device.createCommandEncoder(); 153 const passEncoder = commandEncoder.beginComputePass(); 154 passEncoder.setPipeline(pipeline); 155 passEncoder.setBindGroup(0, bindGroup); 156 passEncoder.dispatch(1, 1, 1); 157 passEncoder.endPass(); 158 device.getQueue().submit([commandEncoder.finish()]); 159 160 const results = new Int32Array(await storageBuffer.mapReadAsync()); 161 storageBuffer.unmap(); 162 assert_equals(results[0], 42, "Storage buffer binding written to successfully."); 163 }; 164 165 const sampledTextureShader = ` 166 [numthreads(1, 1, 1)] 167 compute void compute_main(Texture2D<uint> inputTexture : register(t0), sampler inputSampler : register(s1), device uint[] output : register(u2)) 168 { 169 output[0] = Sample(inputTexture, inputSampler, float2(0, 0)); 170 } 171 `; 172 173 tests["Create and access a sampled texture in a GPUBindGroup."] = async device => { 174 const [textureDataBuffer, textureArrayBuffer] = device.createBufferMapped({ size: 4, usage: GPUBufferUsage.TRANSFER_SRC }); 175 new Uint32Array(textureArrayBuffer).set([42]); 176 textureDataBuffer.unmap(); 177 178 const textureSize = { width: 1, height: 1, depth: 1 }; 179 const texture = device.createTexture({ 180 size: textureSize, 181 format: "rgba8uint", 182 usage: GPUTextureUsage.SAMPLED | GPUTextureUsage.TRANSFER_DST 183 }); 184 185 const outputBuffer = device.createBuffer({ size: 4, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.MAP_READ }); 186 187 const bindGroupLayout = device.createBindGroupLayout({ 188 bindings: [{ 189 binding: 0, 190 visibility: GPUShaderStageBit.COMPUTE, 191 type: "sampled-texture" 192 }, { 193 binding: 1, 194 visibility: GPUShaderStageBit.COMPUTE, 195 type: "sampler" 196 }, { 197 binding: 2, 198 visibility: GPUShaderStageBit.COMPUTE, 199 type: "storage-buffer" 200 }] 201 }); 202 const bindGroup = device.createBindGroup({ 203 layout: bindGroupLayout, 204 bindings: [{ 205 binding: 0, 206 resource: texture.createDefaultView() 207 }, { 208 binding: 1, 209 resource: device.createSampler({}) 210 }, { 211 binding: 2, 212 resource: { 213 buffer: outputBuffer, 214 size: 4 215 } 216 }] 217 }); 218 219 const shaderModule = device.createShaderModule({ code: sampledTextureShader, isWHLSL: true }); 220 const pipelineLayout = device.createPipelineLayout({ bindGroupLayouts: [bindGroupLayout] }); 221 222 const pipeline = device.createComputePipeline({ 223 layout: pipelineLayout, 224 computeStage: { 225 module: shaderModule, 226 entryPoint: "compute_main" 227 } 228 }); 229 230 const commandEncoder = device.createCommandEncoder(); 231 commandEncoder.copyBufferToTexture({ 232 buffer: textureDataBuffer, 233 rowPitch: 4, 234 imageHeight: 0 235 }, { texture: texture }, textureSize); 236 237 const passEncoder = commandEncoder.beginComputePass(); 238 passEncoder.setPipeline(pipeline); 239 passEncoder.setBindGroup(0, bindGroup); 240 passEncoder.dispatch(1, 1, 1); 241 passEncoder.endPass(); 242 243 device.getQueue().submit([commandEncoder.finish()]); 244 245 const results = new Uint32Array(await outputBuffer.mapReadAsync()); 246 outputBuffer.unmap(); 247 assert_equals(results[0], 42, "Correct value sampled from a bound 2D texture."); 248 }; 249 250 const comboShader = ` 251 [numthreads(1, 1, 1)] 252 compute void compute_main( 253 Texture2D<uint> inputTexture : register(t0, space0), 254 sampler inputSampler : register(s0, space1), 255 constant uint[] input : register(b0, space2), 256 device uint[] output : register(u0, space3)) 257 { 258 output[0] = input[0] + Sample(inputTexture, inputSampler, float2(0, 0)); 259 } 260 `; 261 262 tests["Create and use multiple GPUBindGroups in a single dispatch."] = async device => { 263 const [textureDataBuffer, textureArrayBuffer] = device.createBufferMapped({ size: 4, usage: GPUBufferUsage.TRANSFER_SRC }); 264 new Uint32Array(textureArrayBuffer).set([17]); 265 textureDataBuffer.unmap(); 266 267 const textureSize = { width: 1, height: 1, depth: 1 }; 268 const texture = device.createTexture({ 269 size: textureSize, 270 format: "rgba8uint", 271 usage: GPUTextureUsage.SAMPLED | GPUTextureUsage.TRANSFER_DST 272 }); 273 274 const [inputBuffer, inputArrayBuffer] = device.createBufferMapped({ size: 4, usage: GPUBufferUsage.UNIFORM }); 275 new Uint32Array(inputArrayBuffer).set([25]); 276 inputBuffer.unmap(); 277 278 const outputBuffer = device.createBuffer({ size: 4, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.MAP_READ }); 279 280 const bgl0 = device.createBindGroupLayout({ 281 bindings: [{ 282 binding: 0, 283 visibility: GPUShaderStageBit.COMPUTE, 284 type: "sampled-texture" 285 }] 286 }); 287 const bgl1 = device.createBindGroupLayout({ 288 bindings: [{ 289 binding: 0, 290 visibility: GPUShaderStageBit.COMPUTE, 291 type: "sampler" 292 }] 293 }); 294 const bgl2 = device.createBindGroupLayout({ 295 bindings: [{ 296 binding: 0, 297 visibility: GPUShaderStageBit.COMPUTE, 298 type: "uniform-buffer" 299 }] 300 }); 301 const bgl3 = device.createBindGroupLayout({ 302 bindings: [{ 303 binding: 0, 304 visibility: GPUShaderStageBit.COMPUTE, 305 type: "storage-buffer" 306 }] 307 }) 308 309 const bg0 = device.createBindGroup({ 310 layout: bgl0, 311 bindings: [{ 312 binding: 0, 313 resource: texture.createDefaultView() 314 }] 315 }); 316 const bg1 = device.createBindGroup({ 317 layout: bgl1, 318 bindings: [{ 319 binding: 0, 320 resource: device.createSampler({}) 321 }] 322 }); 323 const bg2 = device.createBindGroup({ 324 layout: bgl2, 325 bindings: [{ 326 binding: 0, 327 resource: { 328 buffer: inputBuffer, 329 size: 4 330 } 331 }] 332 }); 333 const bg3 = device.createBindGroup({ 334 layout: bgl3, 335 bindings: [{ 336 binding: 0, 337 resource: { 338 buffer: outputBuffer, 339 size: 4 340 } 341 }] 342 }); 343 344 const shaderModule = device.createShaderModule({ code: comboShader, isWHLSL: true }); 345 const pipelineLayout = device.createPipelineLayout({ bindGroupLayouts: [bgl0, bgl1, bgl2, bgl3] }); 346 347 const pipeline = device.createComputePipeline({ 348 layout: pipelineLayout, 349 computeStage: { 350 module: shaderModule, 351 entryPoint: "compute_main" 352 } 353 }); 354 355 const commandEncoder = device.createCommandEncoder(); 356 commandEncoder.copyBufferToTexture({ 357 buffer: textureDataBuffer, 358 rowPitch: 4, 359 imageHeight: 0 360 }, { texture: texture }, textureSize); 361 362 const passEncoder = commandEncoder.beginComputePass(); 363 passEncoder.setPipeline(pipeline); 364 passEncoder.setBindGroup(0, bg0); 365 passEncoder.setBindGroup(1, bg1); 366 passEncoder.setBindGroup(2, bg2); 367 passEncoder.setBindGroup(3, bg3); 368 passEncoder.dispatch(1, 1, 1); 369 passEncoder.endPass(); 370 371 device.getQueue().submit([commandEncoder.finish()]); 372 373 const results = new Uint32Array(await outputBuffer.mapReadAsync()); 374 outputBuffer.unmap(); 375 assert_equals(results[0], 42, "Correct value sampled from a bound 2D texture."); 376 }; 377 378 tests["Bind a single GPUBuffer with different offsets in different GPUBindGroups"] = async device => { 379 const numInputs = 4; 380 const [uniformBuffer, writeArrayBuffer] = device.createBufferMapped({ size: 4 * numInputs, usage: GPUBufferUsage.UNIFORM }); 381 new Int32Array(writeArrayBuffer).set([1, 2, 3, 36]); 382 uniformBuffer.unmap(); 383 384 const storageBuffer = device.createBuffer({ size: 4, usage: GPUBufferUsage.STORAGE | GPUBufferUsage.MAP_READ }); 385 386 const bindGroupLayout = device.createBindGroupLayout({ 387 bindings: [{ 388 binding: 0, 389 visibility: GPUShaderStageBit.COMPUTE, 390 type: "uniform-buffer" 391 }, { 392 binding: 1, 393 visibility: GPUShaderStageBit.COMPUTE, 394 type: "storage-buffer" 395 }] 396 }); 397 398 let bindGroups = new Array(numInputs); 399 for (let i = 0; i < numInputs; ++i) { 400 bindGroups[i] = device.createBindGroup({ 401 layout: bindGroupLayout, 402 bindings: [{ 403 binding: 0, 404 resource: { 405 buffer: uniformBuffer, 406 offset: i * numInputs, 407 size: 4 408 } 409 }, { 410 binding: 1, 411 resource: { 412 buffer: storageBuffer, 413 size: 4 414 } 415 }] 416 }); 417 } 418 419 const pipelineLayout = device.createPipelineLayout({ bindGroupLayouts: [bindGroupLayout] }); 420 421 const shaderModule = device.createShaderModule({ code: uniformBufferShader, isWHLSL: true }); 422 423 const pipeline = device.createComputePipeline({ 424 layout: pipelineLayout, 425 computeStage: { 426 module: shaderModule, 427 entryPoint: "compute_main" 428 } 429 }); 430 431 const commandEncoder = device.createCommandEncoder(); 432 const passEncoder = commandEncoder.beginComputePass(); 433 passEncoder.setPipeline(pipeline); 434 for (let i = 0; i < numInputs; ++i) { 435 passEncoder.setBindGroup(0, bindGroups[i]); 436 passEncoder.dispatch(1, 1, 1); 437 } 438 passEncoder.endPass(); 439 device.getQueue().submit([commandEncoder.finish()]); 440 441 const results = new Int32Array(await storageBuffer.mapReadAsync()); 442 storageBuffer.unmap(); 443 assert_equals(results[0], 42, "Storage buffer binding written to successfully."); 444 }; 445 446 runTestsWithDevice(tests); 30 447 </script> 31 448 </body> -
trunk/Source/WebCore/ChangeLog
r248604 r248606 1 2019-08-13 Justin Fan <justin_fan@apple.com> 2 3 [WebGPU] Improve GPUBindGroup performance using one device-shared argument MTLBuffer 4 https://bugs.webkit.org/show_bug.cgi?id=200606 5 6 Reviewed by Myles C. Maxfield. 7 8 Manage all argument buffer storage for GPUBindGroups in one large MTLBuffer for a GPUDevice. 9 Vastly improves GPUProgrammablePassEncoder.setBindGroup performance; in alpha MotionMark WebGPU benchmark, 10 score improves from ~12000 to ~90000. 11 12 No expected change in WebGPU behavior, though bind-groups.html has been updated to cover more cases. 13 14 * Modules/webgpu/WebGPUDevice.cpp: 15 (WebCore::WebGPUDevice::createBindGroup const): 16 * SourcesCocoa.txt: 17 * WebCore.xcodeproj/project.pbxproj: 18 * platform/graphics/gpu/GPUBindGroup.h: No longer manages one unique MTLBuffer per MTLArgumentEncoder. 19 (WebCore::GPUBindGroup::argumentBuffer const): Delegates to GPUBindGroupAllocator for current argument buffer. 20 (WebCore::GPUBindGroup::vertexArgsBuffer const): Deleted. 21 (WebCore::GPUBindGroup::fragmentArgsBuffer const): Deleted. 22 (WebCore::GPUBindGroup::computeArgsBuffer const): Deleted. 23 * platform/graphics/gpu/GPUBindGroupAllocator.h: Added. Allocates MTLBuffer for and assigns offsets for argument buffers. 24 (WebCore::GPUBindGroupAllocator::argumentBuffer const): 25 * platform/graphics/gpu/GPUBindGroupLayout.h: 26 * platform/graphics/gpu/GPUBuffer.h: Move MTLResourceUsage calculation to GPUBuffer construction. 27 (WebCore::GPUBuffer::platformUsage const): 28 * platform/graphics/gpu/GPUComputePassEncoder.h: Prevent any potiential narrowing issues, as offset can be large. 29 * platform/graphics/gpu/GPUDevice.cpp: Now owns a GPUBindGroupAllocator for owning all its argument buffer storage. 30 (WebCore::GPUDevice::tryCreateBindGroup const): 31 * platform/graphics/gpu/GPUDevice.h: 32 * platform/graphics/gpu/GPUProgrammablePassEncoder.h: 33 (WebCore::GPUProgrammablePassEncoder::setVertexBuffer): 34 (WebCore::GPUProgrammablePassEncoder::setFragmentBuffer): 35 (WebCore::GPUProgrammablePassEncoder::setComputeBuffer): 36 * platform/graphics/gpu/GPURenderPassEncoder.h: 37 * platform/graphics/gpu/GPUTexture.h: Move MTLResourceUsage calculation to GPUTexture construction. 38 (WebCore::GPUTexture::platformUsage const): 39 * platform/graphics/gpu/cocoa/GPUBindGroupAllocatorMetal.mm: Added. 40 (WebCore::GPUBindGroupAllocator::create): 41 (WebCore::GPUBindGroupAllocator::GPUBindGroupAllocator): 42 (WebCore::GPUBindGroupAllocator::allocateAndSetEncoders): Ensures that MTLArgumentEncoders have appropriate allocation for encoding. 43 (WebCore::GPUBindGroupAllocator::reallocate): Create new MTLBuffer large enough for new encoder requirement, and copy over old argument buffer data. 44 (WebCore::GPUBindGroupAllocator::tryReset): For now, resets argument buffer if all GPUBindGroups created with this allocator are destroyed. 45 * platform/graphics/gpu/cocoa/GPUBindGroupMetal.mm: 46 (WebCore::tryGetResourceAsBufferBinding): Add size check. 47 (WebCore::GPUBindGroup::tryCreate): No longer owns new MTLBuffers. Requests argument buffer space from GPUBindGroupAllocator. 48 (WebCore::GPUBindGroup::GPUBindGroup): 49 (WebCore::GPUBindGroup::~GPUBindGroup): Remind allocator to check for possible reset. 50 (WebCore::tryCreateArgumentBuffer): Deleted. 51 * platform/graphics/gpu/cocoa/GPUBufferMetal.mm: 52 (WebCore::GPUBuffer::GPUBuffer): 53 * platform/graphics/gpu/cocoa/GPUComputePassEncoderMetal.mm: 54 (WebCore::GPUComputePassEncoder::setComputeBuffer): 55 * platform/graphics/gpu/cocoa/GPUDeviceMetal.mm: 56 * platform/graphics/gpu/cocoa/GPUProgrammablePassEncoderMetal.mm: 57 (WebCore::GPUProgrammablePassEncoder::setBindGroup): No need to recalculate usage every time. Set appropriate argument buffer and offsets for new bind group model. 58 * platform/graphics/gpu/cocoa/GPURenderPassEncoderMetal.mm: 59 (WebCore::GPURenderPassEncoder::setVertexBuffer): 60 (WebCore::GPURenderPassEncoder::setFragmentBuffer): 61 * platform/graphics/gpu/cocoa/GPUTextureMetal.mm: 62 (WebCore::GPUTexture::GPUTexture): 63 1 64 2019-08-13 Antti Koivisto <antti@apple.com> 2 65 -
trunk/Source/WebCore/Modules/webgpu/WebGPUDevice.cpp
r247892 r248606 146 146 return WebGPUBindGroup::create(nullptr); 147 147 148 auto bindGroup = GPUBindGroup::tryCreate(*gpuDescriptor);148 auto bindGroup = m_device->tryCreateBindGroup(*gpuDescriptor, m_errorScopes); 149 149 return WebGPUBindGroup::create(WTFMove(bindGroup)); 150 150 } -
trunk/Source/WebCore/SourcesCocoa.txt
r247530 r248606 327 327 platform/graphics/cv/VideoTextureCopierCV.cpp 328 328 329 platform/graphics/gpu/cocoa/GPUBindGroupAllocatorMetal.mm 329 330 platform/graphics/gpu/cocoa/GPUBindGroupMetal.mm 330 331 platform/graphics/gpu/cocoa/GPUBindGroupLayoutMetal.mm -
trunk/Source/WebCore/WebCore.xcodeproj/project.pbxproj
r248596 r248606 13935 13935 D0615FCD217FE5C6008A48A8 /* WebGPUShaderModule.cpp */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.cpp; path = WebGPUShaderModule.cpp; sourceTree = "<group>"; }; 13936 13936 D0615FCE217FE5C6008A48A8 /* WebGPUShaderModule.idl */ = {isa = PBXFileReference; lastKnownFileType = text; path = WebGPUShaderModule.idl; sourceTree = "<group>"; }; 13937 D065BE5722FB616D0076DD60 /* GPUBindGroupAllocator.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = GPUBindGroupAllocator.h; sourceTree = "<group>"; }; 13938 D065BE5822FB616D0076DD60 /* GPUBindGroupAllocatorMetal.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = GPUBindGroupAllocatorMetal.mm; sourceTree = "<group>"; }; 13937 13939 D06A9A2122026C7A0083C662 /* GPURequestAdapterOptions.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = GPURequestAdapterOptions.h; sourceTree = "<group>"; }; 13938 13940 D06C0D8D0CFD11460065F43F /* RemoveFormatCommand.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RemoveFormatCommand.h; sourceTree = "<group>"; }; … … 18274 18276 D087CE3721ACA94200BDE174 /* cocoa */, 18275 18277 D0BE105E21E6BAD300E42A89 /* GPUBindGroup.h */, 18278 D065BE5722FB616D0076DD60 /* GPUBindGroupAllocator.h */, 18276 18279 D0BE104E21E695E200E42A89 /* GPUBindGroupBinding.h */, 18277 18280 D0BE105121E6A70E00E42A89 /* GPUBindGroupDescriptor.h */, … … 26064 26067 isa = PBXGroup; 26065 26068 children = ( 26069 D065BE5822FB616D0076DD60 /* GPUBindGroupAllocatorMetal.mm */, 26066 26070 D0232B5821CB49B7009483B9 /* GPUBindGroupLayoutMetal.mm */, 26067 26071 D085E64A2236DEAE00C3E1E2 /* GPUBindGroupMetal.mm */, … … 28606 28610 0F60F32B1DFBB10700416D6C /* CommonVM.h in Headers */, 28607 28611 7C93F34A1AA6BA5E00A98BAB /* CompiledContentExtension.h in Headers */, 28612 E4E94D6122FF158A00DD191F /* ComplexLineLayout.h in Headers */, 28608 28613 C2F4E78C1E45C3EF006D7105 /* ComplexTextController.h in Headers */, 28609 28614 E4BA50901BCFBD9500E34EF7 /* ComposedTreeAncestorIterator.h in Headers */, … … 30820 30825 E13EF3441684ECF40034C83F /* NetworkStorageSession.h in Headers */, 30821 30826 269397241A4A5B6400E8349D /* NFA.h in Headers */, 30822 E4E94D6122FF158A00DD191F /* ComplexLineLayout.h in Headers */,30823 30827 269397221A4A412F00E8349D /* NFANode.h in Headers */, 30824 30828 267726011A5B3AD9003C24DD /* NFAToDFA.h in Headers */, -
trunk/Source/WebCore/platform/graphics/gpu/GPUBindGroup.h
r247930 r248606 28 28 #if ENABLE(WEBGPU) 29 29 30 #include "GPUBindGroupAllocator.h" 30 31 #include "GPUBuffer.h" 31 32 #include "GPUTexture.h" 33 #include <objc/NSObjCRuntime.h> 34 #include <utility> 32 35 #include <wtf/HashSet.h> 33 36 #include <wtf/RefCounted.h> … … 35 38 #include <wtf/RetainPtr.h> 36 39 40 #if USE(METAL) 37 41 OBJC_PROTOCOL(MTLBuffer); 42 #endif 38 43 39 44 namespace WebCore { … … 41 46 struct GPUBindGroupDescriptor; 42 47 48 #if USE(METAL) 49 using ArgumentBuffer = std::pair<const MTLBuffer *, const GPUBindGroupAllocator::ArgumentBufferOffsets&>; 50 #endif 51 43 52 class GPUBindGroup : public RefCounted<GPUBindGroup> { 44 53 public: 45 static RefPtr<GPUBindGroup> tryCreate(const GPUBindGroupDescriptor&); 54 static RefPtr<GPUBindGroup> tryCreate(const GPUBindGroupDescriptor&, GPUBindGroupAllocator&); 55 56 ~GPUBindGroup(); 46 57 47 58 #if USE(METAL) 48 const MTLBuffer *vertexArgsBuffer() const { return m_vertexArgsBuffer.get(); } 49 const MTLBuffer *fragmentArgsBuffer() const { return m_fragmentArgsBuffer.get(); } 50 const MTLBuffer *computeArgsBuffer() const { return m_computeArgsBuffer.get(); } 59 const ArgumentBuffer argumentBuffer() const { return { m_allocator->argumentBuffer(), m_argumentBufferOffsets }; } 51 60 #endif 52 61 const HashSet<Ref<GPUBuffer>>& boundBuffers() const { return m_boundBuffers; } … … 55 64 private: 56 65 #if USE(METAL) 57 GPUBindGroup( RetainPtr<MTLBuffer>&& vertexBuffer, RetainPtr<MTLBuffer>&& fragmentBuffer, RetainPtr<MTLBuffer>&& computeArgsBuffer, HashSet<Ref<GPUBuffer>>&&, HashSet<Ref<GPUTexture>>&&);66 GPUBindGroup(GPUBindGroupAllocator::ArgumentBufferOffsets&&, GPUBindGroupAllocator&, HashSet<Ref<GPUBuffer>>&&, HashSet<Ref<GPUTexture>>&&); 58 67 59 RetainPtr<MTLBuffer> m_vertexArgsBuffer; 60 RetainPtr<MTLBuffer> m_fragmentArgsBuffer; 61 RetainPtr<MTLBuffer> m_computeArgsBuffer; 68 GPUBindGroupAllocator::ArgumentBufferOffsets m_argumentBufferOffsets; 69 Ref<GPUBindGroupAllocator> m_allocator; 62 70 #endif 63 71 HashSet<Ref<GPUBuffer>> m_boundBuffers; -
trunk/Source/WebCore/platform/graphics/gpu/GPUBindGroupAllocator.h
r248605 r248606 28 28 #if ENABLE(WEBGPU) 29 29 30 #include "GPUBuffer.h"31 #include "GPUTexture.h"32 #include <wtf/ HashSet.h>30 #include <objc/NSObjCRuntime.h> 31 #include <wtf/Optional.h> 32 #include <wtf/Ref.h> 33 33 #include <wtf/RefCounted.h> 34 #include <wtf/RefPtr.h>35 34 #include <wtf/RetainPtr.h> 36 35 36 OBJC_PROTOCOL(MTLArgumentEncoder); 37 37 OBJC_PROTOCOL(MTLBuffer); 38 38 39 39 namespace WebCore { 40 40 41 struct GPUBindGroupDescriptor;41 class GPUErrorScopes; 42 42 43 class GPUBindGroup : public RefCounted<GPUBindGroup> {43 class GPUBindGroupAllocator : public RefCounted<GPUBindGroupAllocator> { 44 44 public: 45 static Ref Ptr<GPUBindGroup> tryCreate(const GPUBindGroupDescriptor&);46 45 static Ref<GPUBindGroupAllocator> create(GPUErrorScopes&); 46 47 47 #if USE(METAL) 48 const MTLBuffer *vertexArgsBuffer() const { return m_vertexArgsBuffer.get(); } 49 const MTLBuffer *fragmentArgsBuffer() const { return m_fragmentArgsBuffer.get(); } 50 const MTLBuffer *computeArgsBuffer() const { return m_computeArgsBuffer.get(); } 48 struct ArgumentBufferOffsets { 49 Optional<NSUInteger> vertex; 50 Optional<NSUInteger> fragment; 51 Optional<NSUInteger> compute; 52 }; 53 54 Optional<ArgumentBufferOffsets> allocateAndSetEncoders(MTLArgumentEncoder *vertex, MTLArgumentEncoder *fragment, MTLArgumentEncoder *compute); 55 56 void tryReset(); 57 58 const MTLBuffer *argumentBuffer() const { return m_argumentBuffer.get(); } 51 59 #endif 52 const HashSet<Ref<GPUBuffer>>& boundBuffers() const { return m_boundBuffers; }53 const HashSet<Ref<GPUTexture>>& boundTextures() const { return m_boundTextures; }54 60 55 61 private: 62 explicit GPUBindGroupAllocator(GPUErrorScopes&); 63 56 64 #if USE(METAL) 57 GPUBindGroup(RetainPtr<MTLBuffer>&& vertexBuffer, RetainPtr<MTLBuffer>&& fragmentBuffer, RetainPtr<MTLBuffer>&& computeArgsBuffer, HashSet<Ref<GPUBuffer>>&&, HashSet<Ref<GPUTexture>>&&); 58 59 RetainPtr<MTLBuffer> m_vertexArgsBuffer; 60 RetainPtr<MTLBuffer> m_fragmentArgsBuffer; 61 RetainPtr<MTLBuffer> m_computeArgsBuffer; 65 bool reallocate(NSUInteger); 66 67 RetainPtr<MTLBuffer> m_argumentBuffer; 68 NSUInteger m_lastOffset { 0 }; 62 69 #endif 63 HashSet<Ref<GPUBuffer>> m_boundBuffers; 64 HashSet<Ref<GPUTexture>> m_boundTextures;70 71 Ref<GPUErrorScopes> m_errorScopes; 65 72 }; 66 73 -
trunk/Source/WebCore/platform/graphics/gpu/GPUBindGroupLayout.h
r246394 r248606 29 29 30 30 #include "GPUBindGroupLayoutDescriptor.h" 31 32 31 #include <wtf/HashMap.h> 33 32 #include <wtf/RefCounted.h> -
trunk/Source/WebCore/platform/graphics/gpu/GPUBuffer.h
r247892 r248606 84 84 bool isReadOnly() const; 85 85 bool isMappable() const { return m_usage.containsAny({ GPUBufferUsage::Flags::MapWrite, GPUBufferUsage::Flags::MapRead }); } 86 unsigned platformUsage() const { return m_platformUsage; } 86 87 State state() const; 87 88 … … 133 134 size_t m_byteLength; 134 135 OptionSet<GPUBufferUsage::Flags> m_usage; 136 unsigned m_platformUsage; 135 137 unsigned m_numScheduledCommandBuffers { 0 }; 136 138 bool m_isMappedFromCreation { false }; -
trunk/Source/WebCore/platform/graphics/gpu/GPUComputePassEncoder.h
r243627 r248606 57 57 #if USE(METAL) 58 58 void useResource(const MTLResource *, unsigned usage) final; 59 void setComputeBuffer(const MTLBuffer *, unsignedoffset, unsigned index) final;59 void setComputeBuffer(const MTLBuffer *, NSUInteger offset, unsigned index) final; 60 60 #endif 61 61 -
trunk/Source/WebCore/platform/graphics/gpu/GPUDevice.cpp
r247892 r248606 29 29 #if ENABLE(WEBGPU) 30 30 31 #include "GPUBindGroup.h" 32 #include "GPUBindGroupAllocator.h" 33 #include "GPUBindGroupDescriptor.h" 31 34 #include "GPUBindGroupLayout.h" 32 35 #include "GPUBindGroupLayoutDescriptor.h" … … 93 96 } 94 97 98 RefPtr<GPUBindGroup> GPUDevice::tryCreateBindGroup(const GPUBindGroupDescriptor& descriptor, GPUErrorScopes& errorScopes) const 99 { 100 if (!m_bindGroupAllocator) 101 m_bindGroupAllocator = GPUBindGroupAllocator::create(errorScopes); 102 103 return GPUBindGroup::tryCreate(descriptor, *m_bindGroupAllocator); 104 } 105 95 106 RefPtr<GPUCommandBuffer> GPUDevice::tryCreateCommandBuffer() const 96 107 { -
trunk/Source/WebCore/platform/graphics/gpu/GPUDevice.h
r247892 r248606 28 28 #if ENABLE(WEBGPU) 29 29 30 #include "GPUBindGroupAllocator.h" 30 31 #include "GPUQueue.h" 31 32 #include "GPUSwapChain.h" 32 33 #include <wtf/Function.h> 33 34 #include <wtf/Optional.h> 35 #include <wtf/Ref.h> 34 36 #include <wtf/RefCounted.h> 37 #include <wtf/RefPtr.h> 35 38 #include <wtf/RetainPtr.h> 36 39 #include <wtf/WeakPtr.h> … … 40 43 namespace WebCore { 41 44 45 class GPUBindGroup; 42 46 class GPUBindGroupLayout; 43 47 class GPUBuffer; … … 51 55 class GPUTexture; 52 56 57 struct GPUBindGroupDescriptor; 53 58 struct GPUBindGroupLayoutDescriptor; 54 59 struct GPUBufferDescriptor; … … 77 82 RefPtr<GPUBindGroupLayout> tryCreateBindGroupLayout(const GPUBindGroupLayoutDescriptor&) const; 78 83 Ref<GPUPipelineLayout> createPipelineLayout(GPUPipelineLayoutDescriptor&&) const; 84 RefPtr<GPUBindGroup> tryCreateBindGroup(const GPUBindGroupDescriptor&, GPUErrorScopes&) const; 79 85 80 86 RefPtr<GPUShaderModule> tryCreateShaderModule(const GPUShaderModuleDescriptor&) const; … … 96 102 mutable RefPtr<GPUQueue> m_queue; 97 103 RefPtr<GPUSwapChain> m_swapChain; 104 mutable RefPtr<GPUBindGroupAllocator> m_bindGroupAllocator; 98 105 }; 99 106 -
trunk/Source/WebCore/platform/graphics/gpu/GPUProgrammablePassEncoder.h
r243627 r248606 30 30 #include "GPUBindGroupBinding.h" 31 31 #include "GPUCommandBuffer.h" 32 #include <objc/NSObjCRuntime.h> 32 33 #include <wtf/RefCounted.h> 33 34 … … 64 65 65 66 // Render command encoder methods. 66 virtual void setVertexBuffer(const MTLBuffer *, unsigned, unsigned) { }67 virtual void setFragmentBuffer(const MTLBuffer *, unsigned, unsigned) { }67 virtual void setVertexBuffer(const MTLBuffer *, NSUInteger, unsigned) { } 68 virtual void setFragmentBuffer(const MTLBuffer *, NSUInteger, unsigned) { } 68 69 // Compute. 69 virtual void setComputeBuffer(const MTLBuffer *, unsigned, unsigned) { }70 virtual void setComputeBuffer(const MTLBuffer *, NSUInteger, unsigned) { } 70 71 #endif // USE(METAL) 71 72 -
trunk/Source/WebCore/platform/graphics/gpu/GPURenderPassEncoder.h
r244147 r248606 71 71 #if USE(METAL) 72 72 void useResource(const MTLResource *, unsigned usage) final; 73 void setVertexBuffer(const MTLBuffer *, unsignedoffset, unsigned index) final;74 void setFragmentBuffer(const MTLBuffer *, unsignedoffset, unsigned index) final;73 void setVertexBuffer(const MTLBuffer *, NSUInteger offset, unsigned index) final; 74 void setFragmentBuffer(const MTLBuffer *, NSUInteger offset, unsigned index) final; 75 75 76 76 RefPtr<GPUBuffer> m_indexBuffer; -
trunk/Source/WebCore/platform/graphics/gpu/GPUTexture.h
r246631 r248606 57 57 bool isSampled() const { return m_usage.contains(GPUTextureUsage::Flags::Sampled); } 58 58 bool isStorage() const { return m_usage.contains(GPUTextureUsage::Flags::Storage); } 59 unsigned platformUsage() const { return m_platformUsage; } 59 60 60 61 RefPtr<GPUTexture> tryCreateDefaultTextureView(); … … 67 68 68 69 OptionSet<GPUTextureUsage::Flags> m_usage; 70 unsigned m_platformUsage; 69 71 }; 70 72 -
trunk/Source/WebCore/platform/graphics/gpu/cocoa/GPUBindGroupMetal.mm
r247930 r248606 29 29 #if ENABLE(WEBGPU) 30 30 31 #import "GPUBindGroupAllocator.h" 31 32 #import "GPUBindGroupBinding.h" 32 33 #import "GPUBindGroupDescriptor.h" … … 40 41 41 42 namespace WebCore { 42 43 static RetainPtr<MTLBuffer> tryCreateArgumentBuffer(MTLArgumentEncoder *encoder)44 {45 RetainPtr<MTLBuffer> buffer;46 BEGIN_BLOCK_OBJC_EXCEPTIONS;47 buffer = adoptNS([encoder.device newBufferWithLength:encoder.encodedLength options:0]);48 [encoder setArgumentBuffer:buffer.get() offset:0];49 END_BLOCK_OBJC_EXCEPTIONS;50 return buffer;51 }52 43 53 44 static Optional<GPUBufferBinding> tryGetResourceAsBufferBinding(const GPUBindingResource& resource, const char* const functionName) … … 63 54 if (!bufferBinding.buffer->platformBuffer()) { 64 55 LOG(WebGPU, "%s: Invalid MTLBuffer in GPUBufferBinding!", functionName); 56 return WTF::nullopt; 57 } 58 if (!WTF::isInBounds<NSUInteger>(bufferBinding.size) || bufferBinding.size > bufferBinding.buffer->byteLength()) { 59 LOG(WebGPU, "%s: GPUBufferBinding size is too large!", functionName); 65 60 return WTF::nullopt; 66 61 } … … 136 131 END_BLOCK_OBJC_EXCEPTIONS; 137 132 } 138 139 RefPtr<GPUBindGroup> GPUBindGroup::tryCreate(const GPUBindGroupDescriptor& descriptor )133 134 RefPtr<GPUBindGroup> GPUBindGroup::tryCreate(const GPUBindGroupDescriptor& descriptor, GPUBindGroupAllocator& allocator) 140 135 { 141 136 const char* const functionName = "GPUBindGroup::tryCreate()"; … … 144 139 MTLArgumentEncoder *fragmentEncoder = descriptor.layout->fragmentEncoder(); 145 140 MTLArgumentEncoder *computeEncoder = descriptor.layout->computeEncoder(); 146 147 RetainPtr<MTLBuffer> vertexArgsBuffer; 148 if (vertexEncoder && !(vertexArgsBuffer = tryCreateArgumentBuffer(vertexEncoder))) { 149 LOG(WebGPU, "%s: Unable to create MTLBuffer for vertex argument buffer!", functionName); 150 return nullptr; 151 } 152 RetainPtr<MTLBuffer> fragmentArgsBuffer; 153 if (fragmentEncoder && !(fragmentArgsBuffer = tryCreateArgumentBuffer(fragmentEncoder))) { 154 LOG(WebGPU, "%s: Unable to create MTLBuffer for fragment argument buffer!", functionName); 155 return nullptr; 156 } 157 RetainPtr<MTLBuffer> computeArgsBuffer; 158 if (computeEncoder && !(computeArgsBuffer = tryCreateArgumentBuffer(computeEncoder))) { 159 LOG(WebGPU, "%s: Unable to create MTLBuffer for compute argument buffer!", functionName); 160 return nullptr; 161 } 141 142 auto offsets = allocator.allocateAndSetEncoders(vertexEncoder, fragmentEncoder, computeEncoder); 143 if (!offsets) 144 return nullptr; 162 145 163 146 HashSet<Ref<GPUBuffer>> boundBuffers; … … 204 187 if (isForCompute) 205 188 setBufferOnEncoder(computeEncoder, *bufferResource, layoutBinding.internalName, internalLengthName); 206 boundBuffers.addVoid( bufferResource->buffer.copyRef());189 boundBuffers.addVoid(WTFMove(bufferResource->buffer)); 207 190 return true; 208 191 }; … … 244 227 } 245 228 246 return adoptRef(new GPUBindGroup(WTFMove(vertexArgsBuffer), WTFMove(fragmentArgsBuffer), WTFMove(computeArgsBuffer), WTFMove(boundBuffers), WTFMove(boundTextures))); 247 } 248 249 GPUBindGroup::GPUBindGroup(RetainPtr<MTLBuffer>&& vertexBuffer, RetainPtr<MTLBuffer>&& fragmentBuffer, RetainPtr<MTLBuffer>&& computeBuffer, HashSet<Ref<GPUBuffer>>&& buffers, HashSet<Ref<GPUTexture>>&& textures) 250 : m_vertexArgsBuffer(WTFMove(vertexBuffer)) 251 , m_fragmentArgsBuffer(WTFMove(fragmentBuffer)) 252 , m_computeArgsBuffer(WTFMove(computeBuffer)) 229 return adoptRef(new GPUBindGroup(WTFMove(*offsets), allocator, WTFMove(boundBuffers), WTFMove(boundTextures))); 230 } 231 232 GPUBindGroup::GPUBindGroup(GPUBindGroupAllocator::ArgumentBufferOffsets&& offsets, GPUBindGroupAllocator& allocator, HashSet<Ref<GPUBuffer>>&& buffers, HashSet<Ref<GPUTexture>>&& textures) 233 : m_argumentBufferOffsets(WTFMove(offsets)) 234 , m_allocator(makeRef(allocator)) 253 235 , m_boundBuffers(WTFMove(buffers)) 254 236 , m_boundTextures(WTFMove(textures)) 255 237 { 256 238 } 239 240 GPUBindGroup::~GPUBindGroup() 241 { 242 GPUBindGroupAllocator& rawAllocator = m_allocator.leakRef(); 243 rawAllocator.deref(); 244 rawAllocator.tryReset(); 245 } 257 246 258 247 } // namespace WebCore -
trunk/Source/WebCore/platform/graphics/gpu/cocoa/GPUBufferMetal.mm
r248532 r248606 119 119 , m_isMappedFromCreation(isMapped == GPUBufferMappedOption::IsMapped) 120 120 { 121 m_platformUsage = MTLResourceUsageRead; 122 if (isStorage()) 123 m_platformUsage |= MTLResourceUsageWrite; 121 124 } 122 125 -
trunk/Source/WebCore/platform/graphics/gpu/cocoa/GPUComputePassEncoderMetal.mm
r246427 r248606 122 122 } 123 123 124 void GPUComputePassEncoder::setComputeBuffer(const MTLBuffer * buffer, unsignedoffset, unsigned index)124 void GPUComputePassEncoder::setComputeBuffer(const MTLBuffer *buffer, NSUInteger offset, unsigned index) 125 125 { 126 126 ASSERT(m_platformComputePassEncoder); -
trunk/Source/WebCore/platform/graphics/gpu/cocoa/GPUDeviceMetal.mm
r246846 r248606 31 31 #import "GPURequestAdapterOptions.h" 32 32 #import "Logging.h" 33 34 33 #import <Metal/Metal.h> 35 34 #import <pal/spi/cocoa/MetalSPI.h> -
trunk/Source/WebCore/platform/graphics/gpu/cocoa/GPUProgrammablePassEncoderMetal.mm
r246631 r248606 55 55 return; 56 56 } 57 58 if (bindGroup.vertexArgsBuffer())59 setVertexBuffer(bindGroup.vertexArgsBuffer(), 0, index);60 if (bindGroup.fragmentArgsBuffer())61 setFragmentBuffer(bindGroup.fragmentArgsBuffer(), 0, index);62 if (bindGroup.computeArgsBuffer())63 setComputeBuffer(bindGroup.computeArgsBuffer(), 0, index);64 57 65 for (auto& bufferRef : bindGroup.boundBuffers()) { 66 MTLResourceUsage usage = 0; 67 if (bufferRef->isUniform()) { 68 ASSERT(!bufferRef->isStorage()); 69 usage = MTLResourceUsageRead; 70 } else if (bufferRef->isStorage()) { 71 ASSERT(!bufferRef->isUniform()); 72 usage = MTLResourceUsageRead | MTLResourceUsageWrite; 73 } 74 useResource(bufferRef->platformBuffer(), usage); 75 m_commandBuffer->useBuffer(bufferRef.copyRef()); 58 auto argumentBuffer = bindGroup.argumentBuffer(); 59 if (!argumentBuffer.first) 60 return; 61 62 if (argumentBuffer.second.vertex) 63 setVertexBuffer(argumentBuffer.first, *argumentBuffer.second.vertex, index); 64 if (argumentBuffer.second.fragment) 65 setFragmentBuffer(argumentBuffer.first, *argumentBuffer.second.fragment, index); 66 if (argumentBuffer.second.compute) 67 setComputeBuffer(argumentBuffer.first, *argumentBuffer.second.compute, index); 68 69 for (auto& buffer : bindGroup.boundBuffers()) { 70 useResource(buffer->platformBuffer(), static_cast<MTLResourceUsage>(buffer->platformUsage())); 71 m_commandBuffer->useBuffer(buffer.copyRef()); 76 72 } 77 for (auto& textureRef : bindGroup.boundTextures()) { 78 MTLResourceUsage usage = 0; 79 if (textureRef->isSampled()) { 80 ASSERT(!textureRef->isStorage()); 81 usage = MTLResourceUsageRead | MTLResourceUsageSample; 82 } else if (textureRef->isStorage()) { 83 ASSERT(!textureRef->isSampled()); 84 usage = MTLResourceUsageRead | MTLResourceUsageWrite; 85 } 86 useResource(textureRef->platformTexture(), usage); 87 m_commandBuffer->useTexture(textureRef.copyRef()); 73 for (auto& texture : bindGroup.boundTextures()) { 74 useResource(texture->platformTexture(), static_cast<MTLResourceUsage>(texture->platformUsage())); 75 m_commandBuffer->useTexture(texture.copyRef()); 88 76 } 89 77 } -
trunk/Source/WebCore/platform/graphics/gpu/cocoa/GPURenderPassEncoderMetal.mm
r244235 r248606 394 394 } 395 395 396 void GPURenderPassEncoder::setVertexBuffer(const MTLBuffer *buffer, unsignedoffset, unsigned index)396 void GPURenderPassEncoder::setVertexBuffer(const MTLBuffer *buffer, NSUInteger offset, unsigned index) 397 397 { 398 398 ASSERT(m_platformRenderPassEncoder); … … 403 403 } 404 404 405 void GPURenderPassEncoder::setFragmentBuffer(const MTLBuffer *buffer, unsignedoffset, unsigned index)405 void GPURenderPassEncoder::setFragmentBuffer(const MTLBuffer *buffer, NSUInteger offset, unsigned index) 406 406 { 407 407 ASSERT(m_platformRenderPassEncoder); -
trunk/Source/WebCore/platform/graphics/gpu/cocoa/GPUTextureMetal.mm
r246631 r248606 179 179 , m_usage(usage) 180 180 { 181 m_platformUsage = MTLResourceUsageRead; 182 if (isSampled()) 183 m_platformUsage |= MTLResourceUsageSample; 184 else if (isStorage()) 185 m_platformUsage |= MTLResourceUsageWrite; 181 186 } 182 187
Note: See TracChangeset
for help on using the changeset viewer.