Bug 1622846 - Update WebGPU API with wgpu r=jgilbert,webidl,smaug

This is another WebGPU API update, it picks up a lot of changes that were made recently:
  - new bind group layout
  - new render pipeline descriptor
  - new vertex formats
  - limits
  - compressed texture formats
  - index format
  - query sets
  - and more small ones!

It also brings in the updated `gfx/wgpu` to support these API changes.

Differential Revision: https://phabricator.services.mozilla.com/D107013
This commit is contained in:
Dzmitry Malyshau 2021-03-04 17:48:28 +00:00
Родитель 210cd0a19d
Коммит 65199ec16a
486 изменённых файлов: 55887 добавлений и 29551 удалений

Просмотреть файл

@ -2,16 +2,6 @@
# It was generated by `mach vendor rust`. # It was generated by `mach vendor rust`.
# Please do not edit. # Please do not edit.
[source."https://github.com/zakarumych/gpu-descriptor"]
git = "https://github.com/zakarumych/gpu-descriptor"
replace-with = "vendored-sources"
rev = "831460c4b5120d9a74744d542f39a95b9816b5ab"
[source."https://github.com/zakarumych/gpu-alloc"]
git = "https://github.com/zakarumych/gpu-alloc"
replace-with = "vendored-sources"
rev = "d07be73f9439a37c89f5b72f2500cbf0eb4ff613"
[source."https://github.com/shravanrn/nix/"] [source."https://github.com/shravanrn/nix/"]
git = "https://github.com/shravanrn/nix/" git = "https://github.com/shravanrn/nix/"
replace-with = "vendored-sources" replace-with = "vendored-sources"
@ -73,7 +63,7 @@ replace-with = "vendored-sources"
rev = "d5d8c00ebd3281d12e0be5dfddbb69f791f836f1" rev = "d5d8c00ebd3281d12e0be5dfddbb69f791f836f1"
[source."https://github.com/kvark/spirv_cross"] [source."https://github.com/kvark/spirv_cross"]
branch = "wgpu4" branch = "wgpu5"
git = "https://github.com/kvark/spirv_cross" git = "https://github.com/kvark/spirv_cross"
replace-with = "vendored-sources" replace-with = "vendored-sources"
@ -105,12 +95,22 @@ rev = "fd4ed671ef495af4dcda4c4cba3ef8d426db8af1"
[source."https://github.com/gfx-rs/naga"] [source."https://github.com/gfx-rs/naga"]
git = "https://github.com/gfx-rs/naga" git = "https://github.com/gfx-rs/naga"
replace-with = "vendored-sources" replace-with = "vendored-sources"
rev = "96c80738650822de35f77ab6a589f309460c8f39" tag = "gfx-12"
[source."https://github.com/gfx-rs/metal-rs"]
git = "https://github.com/gfx-rs/metal-rs"
replace-with = "vendored-sources"
rev = "439c986eb7a9b91e88b61def2daa66e4043fcbef"
[source."https://github.com/gfx-rs/gfx"] [source."https://github.com/gfx-rs/gfx"]
git = "https://github.com/gfx-rs/gfx" git = "https://github.com/gfx-rs/gfx"
replace-with = "vendored-sources" replace-with = "vendored-sources"
rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8"
[source."https://github.com/gfx-rs/d3d12-rs"]
git = "https://github.com/gfx-rs/d3d12-rs"
replace-with = "vendored-sources"
rev = "be19a243b86e0bafb9937d661fc8eabb3e42b44e"
[source."https://github.com/badboy/failure"] [source."https://github.com/badboy/failure"]
git = "https://github.com/badboy/failure" git = "https://github.com/badboy/failure"

123
Cargo.lock сгенерированный
Просмотреть файл

@ -1061,12 +1061,11 @@ dependencies = [
[[package]] [[package]]
name = "d3d12" name = "d3d12"
version = "0.3.0" version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/gfx-rs/d3d12-rs?rev=be19a243b86e0bafb9937d661fc8eabb3e42b44e#be19a243b86e0bafb9937d661fc8eabb3e42b44e"
checksum = "bc7ed48e89905e5e146bcc1951cc3facb9e44aea9adf5dc01078cda1bd24b662"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"libloading 0.5.2", "libloading 0.7.0",
"winapi 0.3.9", "winapi 0.3.9",
] ]
@ -1852,8 +1851,8 @@ dependencies = [
[[package]] [[package]]
name = "gfx-auxil" name = "gfx-auxil"
version = "0.5.0" version = "0.8.0"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
dependencies = [ dependencies = [
"fxhash", "fxhash",
"gfx-hal", "gfx-hal",
@ -1862,14 +1861,14 @@ dependencies = [
[[package]] [[package]]
name = "gfx-backend-dx11" name = "gfx-backend-dx11"
version = "0.6.0" version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"bitflags", "bitflags",
"gfx-auxil", "gfx-auxil",
"gfx-hal", "gfx-hal",
"libloading 0.6.2", "libloading 0.7.0",
"log", "log",
"parking_lot", "parking_lot",
"range-alloc", "range-alloc",
@ -1883,8 +1882,8 @@ dependencies = [
[[package]] [[package]]
name = "gfx-backend-dx12" name = "gfx-backend-dx12"
version = "0.6.2" version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"bit-set", "bit-set",
@ -1903,8 +1902,8 @@ dependencies = [
[[package]] [[package]]
name = "gfx-backend-empty" name = "gfx-backend-empty"
version = "0.6.0" version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
dependencies = [ dependencies = [
"gfx-hal", "gfx-hal",
"log", "log",
@ -1913,8 +1912,8 @@ dependencies = [
[[package]] [[package]]
name = "gfx-backend-metal" name = "gfx-backend-metal"
version = "0.6.0" version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"bitflags", "bitflags",
@ -1922,23 +1921,22 @@ dependencies = [
"cocoa-foundation", "cocoa-foundation",
"copyless", "copyless",
"foreign-types", "foreign-types",
"gfx-auxil", "fxhash",
"gfx-hal", "gfx-hal",
"lazy_static",
"log", "log",
"metal", "metal",
"naga",
"objc", "objc",
"parking_lot", "parking_lot",
"range-alloc", "range-alloc",
"raw-window-handle", "raw-window-handle",
"spirv_cross",
"storage-map", "storage-map",
] ]
[[package]] [[package]]
name = "gfx-backend-vulkan" name = "gfx-backend-vulkan"
version = "0.6.5" version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"ash", "ash",
@ -1946,9 +1944,10 @@ dependencies = [
"core-graphics-types", "core-graphics-types",
"gfx-hal", "gfx-hal",
"inplace_it", "inplace_it",
"lazy_static",
"log", "log",
"naga",
"objc", "objc",
"parking_lot",
"raw-window-handle", "raw-window-handle",
"smallvec", "smallvec",
"winapi 0.3.9", "winapi 0.3.9",
@ -1956,11 +1955,13 @@ dependencies = [
[[package]] [[package]]
name = "gfx-hal" name = "gfx-hal"
version = "0.6.0" version = "0.7.0"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"naga",
"raw-window-handle", "raw-window-handle",
"thiserror",
] ]
[[package]] [[package]]
@ -2184,35 +2185,41 @@ dependencies = [
[[package]] [[package]]
name = "gpu-alloc" name = "gpu-alloc"
version = "0.2.1" version = "0.3.0"
source = "git+https://github.com/zakarumych/gpu-alloc?rev=d07be73f9439a37c89f5b72f2500cbf0eb4ff613#d07be73f9439a37c89f5b72f2500cbf0eb4ff613" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7724b9aef57ea36d70faf54e0ee6265f86e41de16bed8333efdeab5b00e16b"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"gpu-alloc-types", "gpu-alloc-types",
"tracing",
] ]
[[package]] [[package]]
name = "gpu-alloc-types" name = "gpu-alloc-types"
version = "0.1.0" version = "0.2.0"
source = "git+https://github.com/zakarumych/gpu-alloc?rev=d07be73f9439a37c89f5b72f2500cbf0eb4ff613#d07be73f9439a37c89f5b72f2500cbf0eb4ff613" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54804d0d6bc9d7f26db4eaec1ad10def69b599315f487d32c334a80d1efe67a5"
dependencies = [ dependencies = [
"bitflags", "bitflags",
] ]
[[package]] [[package]]
name = "gpu-descriptor" name = "gpu-descriptor"
version = "0.1.0" version = "0.1.1"
source = "git+https://github.com/zakarumych/gpu-descriptor?rev=831460c4b5120d9a74744d542f39a95b9816b5ab#831460c4b5120d9a74744d542f39a95b9816b5ab" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a70f1e87a3840ed6a3e99e02c2b861e4dbdf26f0d07e38f42ea5aff46cfce2"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"gpu-descriptor-types", "gpu-descriptor-types",
"hashbrown", "hashbrown",
"tracing",
] ]
[[package]] [[package]]
name = "gpu-descriptor-types" name = "gpu-descriptor-types"
version = "0.1.0" version = "0.1.1"
source = "git+https://github.com/zakarumych/gpu-descriptor?rev=831460c4b5120d9a74744d542f39a95b9816b5ab#831460c4b5120d9a74744d542f39a95b9816b5ab" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "363e3677e55ad168fef68cf9de3a4a310b53124c5e784c53a1d70e92d23f2126"
dependencies = [ dependencies = [
"bitflags", "bitflags",
] ]
@ -2477,9 +2484,9 @@ dependencies = [
[[package]] [[package]]
name = "inplace_it" name = "inplace_it"
version = "0.3.2" version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd01a2a73f2f399df96b22dc88ea687ef4d76226284e7531ae3c7ee1dc5cb534" checksum = "90953f308a79fe6d62a4643e51f848fbfddcd05975a38e69fdf4ab86a7baf7ca"
[[package]] [[package]]
name = "instant" name = "instant"
@ -2780,6 +2787,16 @@ dependencies = [
"winapi 0.3.9", "winapi 0.3.9",
] ]
[[package]]
name = "libloading"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6f84d96438c15fcd6c3f244c8fce01d1e2b9c6b5623e9c711dc9286d8fc92d6a"
dependencies = [
"cfg-if 1.0.0",
"winapi 0.3.9",
]
[[package]] [[package]]
name = "libsqlite3-sys" name = "libsqlite3-sys"
version = "0.20.1" version = "0.20.1"
@ -3050,9 +3067,8 @@ dependencies = [
[[package]] [[package]]
name = "metal" name = "metal"
version = "0.20.0" version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/gfx-rs/metal-rs?rev=439c986eb7a9b91e88b61def2daa66e4043fcbef#439c986eb7a9b91e88b61def2daa66e4043fcbef"
checksum = "5c4e8a431536529327e28c9ba6992f2cb0c15d4222f0602a16e6d7695ff3bccf"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"block", "block",
@ -3325,9 +3341,10 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]] [[package]]
name = "naga" name = "naga"
version = "0.2.0" version = "0.3.1"
source = "git+https://github.com/gfx-rs/naga?rev=96c80738650822de35f77ab6a589f309460c8f39#96c80738650822de35f77ab6a589f309460c8f39" source = "git+https://github.com/gfx-rs/naga?tag=gfx-12#fa7d4d8b51d4eeffe9f648d285466637f733a4a1"
dependencies = [ dependencies = [
"bit-set",
"bitflags", "bitflags",
"fxhash", "fxhash",
"log", "log",
@ -4155,8 +4172,8 @@ dependencies = [
[[package]] [[package]]
name = "range-alloc" name = "range-alloc"
version = "0.1.1" version = "0.1.2"
source = "git+https://github.com/gfx-rs/gfx?rev=1d14789011cb892f4c1a205d3f8a87d479c2e354#1d14789011cb892f4c1a205d3f8a87d479c2e354" source = "git+https://github.com/gfx-rs/gfx?rev=0a201d1c406b5119ec11068293a40e50ec0be4c8#0a201d1c406b5119ec11068293a40e50ec0be4c8"
[[package]] [[package]]
name = "raw-cpuid" name = "raw-cpuid"
@ -4780,24 +4797,24 @@ dependencies = [
[[package]] [[package]]
name = "spirv-cross-internal" name = "spirv-cross-internal"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu4#e51babbf00427984fe343e48493d8a9339fec473" source = "git+https://github.com/kvark/spirv_cross?branch=wgpu5#a5a90d38ab1f82ad8327b48e161dbfe556ef6c6e"
dependencies = [ dependencies = [
"cc", "cc",
] ]
[[package]] [[package]]
name = "spirv_cross" name = "spirv_cross"
version = "0.22.0" version = "0.23.0"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu4#e51babbf00427984fe343e48493d8a9339fec473" source = "git+https://github.com/kvark/spirv_cross?branch=wgpu5#a5a90d38ab1f82ad8327b48e161dbfe556ef6c6e"
dependencies = [ dependencies = [
"spirv-cross-internal", "spirv-cross-internal",
] ]
[[package]] [[package]]
name = "spirv_headers" name = "spirv_headers"
version = "1.4.2" version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f1418983d16481227ffa3ab3cf44ef92eebc9a76c092fbcd4c51a64ff032622" checksum = "1f5b132530b1ac069df335577e3581765995cba5a13995cdbbdbc8fb057c532c"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"num-traits", "num-traits",
@ -5441,9 +5458,21 @@ checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27"
dependencies = [ dependencies = [
"cfg-if 0.1.10", "cfg-if 0.1.10",
"pin-project-lite 0.1.4", "pin-project-lite 0.1.4",
"tracing-attributes",
"tracing-core", "tracing-core",
] ]
[[package]]
name = "tracing-attributes"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8a9bd1db7706f2373a190b0d067146caa39350c486f3d455b0e33b431f94c07"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "tracing-core" name = "tracing-core"
version = "0.1.17" version = "0.1.17"
@ -5897,7 +5926,7 @@ dependencies = [
[[package]] [[package]]
name = "wgpu-core" name = "wgpu-core"
version = "0.6.0" version = "0.7.0"
dependencies = [ dependencies = [
"arrayvec", "arrayvec",
"bitflags", "bitflags",
@ -5924,7 +5953,7 @@ dependencies = [
[[package]] [[package]]
name = "wgpu-types" name = "wgpu-types"
version = "0.6.0" version = "0.7.0"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"serde", "serde",

Просмотреть файл

@ -73,7 +73,7 @@ libudev-sys = { path = "dom/webauthn/libudev-sys" }
packed_simd = { git = "https://github.com/hsivonen/packed_simd", rev="0917fe780032a6bbb23d71be545f9c1834128d75" } packed_simd = { git = "https://github.com/hsivonen/packed_simd", rev="0917fe780032a6bbb23d71be545f9c1834128d75" }
rlbox_lucet_sandbox = { git = "https://github.com/PLSysSec/rlbox_lucet_sandbox/", rev="f3cace4fb8b53db0849c62af4fa62bade5a620f7" } rlbox_lucet_sandbox = { git = "https://github.com/PLSysSec/rlbox_lucet_sandbox/", rev="f3cace4fb8b53db0849c62af4fa62bade5a620f7" }
nix = { git = "https://github.com/shravanrn/nix/", rev="4af6c367603869a30fddb5ffb0aba2b9477ba92e" } nix = { git = "https://github.com/shravanrn/nix/", rev="4af6c367603869a30fddb5ffb0aba2b9477ba92e" }
spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu4" } spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu5" }
# failure's backtrace feature might break our builds, see bug 1608157. # failure's backtrace feature might break our builds, see bug 1608157.
failure = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" } failure = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" }
failure_derive = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" } failure_derive = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" }

Просмотреть файл

@ -1326,6 +1326,12 @@ DOMInterfaces = {
'GPUAdapter': { 'GPUAdapter': {
'nativeType': 'mozilla::webgpu::Adapter', 'nativeType': 'mozilla::webgpu::Adapter',
}, },
'GPUAdapterFeatures': {
'nativeType': 'mozilla::webgpu::AdapterFeatures',
},
'GPUAdapterLimits': {
'nativeType': 'mozilla::webgpu::AdapterLimits',
},
'GPUBindGroup': { 'GPUBindGroup': {
'nativeType': 'mozilla::webgpu::BindGroup', 'nativeType': 'mozilla::webgpu::BindGroup',
}, },
@ -1345,6 +1351,12 @@ DOMInterfaces = {
'GPUCommandEncoder': { 'GPUCommandEncoder': {
'nativeType': 'mozilla::webgpu::CommandEncoder', 'nativeType': 'mozilla::webgpu::CommandEncoder',
}, },
'GPUCompilationInfo': {
'nativeType': 'mozilla::webgpu::CompilationInfo',
},
'GPUCompilationMessage': {
'nativeType': 'mozilla::webgpu::CompilationMessage',
},
'GPUComputePassEncoder': { 'GPUComputePassEncoder': {
'nativeType': 'mozilla::webgpu::ComputePassEncoder', 'nativeType': 'mozilla::webgpu::ComputePassEncoder',
}, },
@ -1357,15 +1369,15 @@ DOMInterfaces = {
'GPUDeviceLostInfo': { 'GPUDeviceLostInfo': {
'nativeType': 'mozilla::webgpu::DeviceLostInfo', 'nativeType': 'mozilla::webgpu::DeviceLostInfo',
}, },
'GPUFence': {
'nativeType': 'mozilla::webgpu::Fence',
},
'GPUOutOfMemoryError': { 'GPUOutOfMemoryError': {
'nativeType': 'mozilla::webgpu::OutOfMemoryError', 'nativeType': 'mozilla::webgpu::OutOfMemoryError',
}, },
'GPUPipelineLayout': { 'GPUPipelineLayout': {
'nativeType': 'mozilla::webgpu::PipelineLayout', 'nativeType': 'mozilla::webgpu::PipelineLayout',
}, },
'GPUQuerySet': {
'nativeType': 'mozilla::webgpu::QuerySet',
},
'GPUQueue': { 'GPUQueue': {
'nativeType': 'mozilla::webgpu::Queue', 'nativeType': 'mozilla::webgpu::Queue',
}, },

Просмотреть файл

@ -6,6 +6,8 @@
#include "mozilla/dom/WebGPUBinding.h" #include "mozilla/dom/WebGPUBinding.h"
#include "Adapter.h" #include "Adapter.h"
#include "AdapterFeatures.h"
#include "AdapterLimits.h"
#include "Device.h" #include "Device.h"
#include "Instance.h" #include "Instance.h"
#include "ipc/WebGPUChild.h" #include "ipc/WebGPUChild.h"
@ -29,6 +31,9 @@ void Adapter::Cleanup() {
} }
} }
const RefPtr<AdapterFeatures>& Adapter::Features() const { return mFeatures; }
const RefPtr<AdapterLimits>& Adapter::Limits() const { return mLimits; }
already_AddRefed<dom::Promise> Adapter::RequestDevice( already_AddRefed<dom::Promise> Adapter::RequestDevice(
const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv) { const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv) {
RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv); RefPtr<dom::Promise> promise = dom::Promise::Create(GetParentObject(), aRv);

Просмотреть файл

@ -21,6 +21,8 @@ struct GPUFeatures;
} // namespace dom } // namespace dom
namespace webgpu { namespace webgpu {
class AdapterFeatures;
class AdapterLimits;
class Device; class Device;
class Instance; class Instance;
class WebGPUChild; class WebGPUChild;
@ -33,16 +35,19 @@ class Adapter final : public ObjectBase, public ChildOf<Instance> {
RefPtr<WebGPUChild> mBridge; RefPtr<WebGPUChild> mBridge;
private: private:
Adapter() = delete;
~Adapter(); ~Adapter();
void Cleanup(); void Cleanup();
const RawId mId; const RawId mId;
const nsString mName; const nsString mName;
RefPtr<AdapterFeatures> mFeatures;
RefPtr<AdapterLimits> mLimits;
public: public:
explicit Adapter(Instance* const aParent, RawId aId); Adapter(Instance* const aParent, RawId aId);
void GetName(nsString& out) const { out = mName; } void GetName(nsString& out) const { out = mName; }
const RefPtr<AdapterFeatures>& Features() const;
const RefPtr<AdapterLimits>& Limits() const;
already_AddRefed<dom::Promise> RequestDevice( already_AddRefed<dom::Promise> RequestDevice(
const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv); const dom::GPUDeviceDescriptor& aDesc, ErrorResult& aRv);

Просмотреть файл

@ -0,0 +1,19 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AdapterFeatures.h"
#include "Adapter.h"
#include "mozilla/dom/WebGPUBinding.h"
namespace mozilla {
namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(AdapterFeatures, mParent)
GPU_IMPL_JS_WRAP(AdapterFeatures)
AdapterFeatures::AdapterFeatures(Adapter* const aParent) : ChildOf(aParent) {}
} // namespace webgpu
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,30 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef GPU_AdapterFeatures_H_
#define GPU_AdapterFeatures_H_
#include "nsWrapperCache.h"
#include "ObjectModel.h"
namespace mozilla {
namespace webgpu {
class Adapter;
class AdapterFeatures final : public nsWrapperCache, public ChildOf<Adapter> {
public:
GPU_DECL_CYCLE_COLLECTION(AdapterFeatures)
GPU_DECL_JS_WRAP(AdapterFeatures)
private:
explicit AdapterFeatures(Adapter* const aParent);
~AdapterFeatures() = default;
void Cleanup() {}
};
} // namespace webgpu
} // namespace mozilla
#endif // GPU_AdapterFeatures_H_

Просмотреть файл

@ -0,0 +1,76 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "AdapterLimits.h"
#include "Adapter.h"
#include "mozilla/dom/WebGPUBinding.h"
#include "mozilla/webgpu/ffi/wgpu.h"
namespace mozilla {
namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(AdapterLimits, mParent)
GPU_IMPL_JS_WRAP(AdapterLimits)
AdapterLimits::AdapterLimits(Adapter* const aParent,
const ffi::WGPULimits& aLimits)
: ChildOf(aParent), mLimits(new ffi::WGPULimits(aLimits)) {}
AdapterLimits::~AdapterLimits() = default;
uint32_t AdapterLimits::MaxTextureDimension1D() const {
return mLimits->max_texture_dimension_1d;
}
uint32_t AdapterLimits::MaxTextureDimension2D() const {
return mLimits->max_texture_dimension_2d;
}
uint32_t AdapterLimits::MaxTextureDimension3D() const {
return mLimits->max_texture_dimension_3d;
}
uint32_t AdapterLimits::MaxTextureArrayLayers() const {
return mLimits->max_texture_array_layers;
}
uint32_t AdapterLimits::MaxBindGroups() const {
return mLimits->max_bind_groups;
}
uint32_t AdapterLimits::MaxDynamicUniformBuffersPerPipelineLayout() const {
return mLimits->max_dynamic_uniform_buffers_per_pipeline_layout;
}
uint32_t AdapterLimits::MaxDynamicStorageBuffersPerPipelineLayout() const {
return mLimits->max_dynamic_storage_buffers_per_pipeline_layout;
}
uint32_t AdapterLimits::MaxSampledTexturesPerShaderStage() const {
return mLimits->max_sampled_textures_per_shader_stage;
}
uint32_t AdapterLimits::MaxSamplersPerShaderStage() const {
return mLimits->max_samplers_per_shader_stage;
}
uint32_t AdapterLimits::MaxStorageBuffersPerShaderStage() const {
return mLimits->max_storage_buffers_per_shader_stage;
}
uint32_t AdapterLimits::MaxStorageTexturesPerShaderStage() const {
return mLimits->max_storage_textures_per_shader_stage;
}
uint32_t AdapterLimits::MaxUniformBuffersPerShaderStage() const {
return mLimits->max_uniform_buffers_per_shader_stage;
}
uint32_t AdapterLimits::MaxUniformBufferBindingSize() const {
return mLimits->max_uniform_buffer_binding_size;
}
uint32_t AdapterLimits::MaxStorageBufferBindingSize() const {
return mLimits->max_storage_buffer_binding_size;
}
uint32_t AdapterLimits::MaxVertexBuffers() const {
return mLimits->max_vertex_buffers;
}
uint32_t AdapterLimits::MaxVertexAttributes() const {
return mLimits->max_vertex_attributes;
}
uint32_t AdapterLimits::MaxVertexBufferArrayStride() const {
return mLimits->max_vertex_buffer_array_stride;
}
} // namespace webgpu
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,53 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef GPU_AdapterLimitss_H_
#define GPU_AdapterLimitss_H_
#include "nsWrapperCache.h"
#include "ObjectModel.h"
namespace mozilla {
namespace webgpu {
namespace ffi {
struct WGPULimits;
}
class Adapter;
class AdapterLimits final : public nsWrapperCache, public ChildOf<Adapter> {
const UniquePtr<ffi::WGPULimits> mLimits;
public:
GPU_DECL_CYCLE_COLLECTION(AdapterLimits)
GPU_DECL_JS_WRAP(AdapterLimits)
uint32_t MaxTextureDimension1D() const;
uint32_t MaxTextureDimension2D() const;
uint32_t MaxTextureDimension3D() const;
uint32_t MaxTextureArrayLayers() const;
uint32_t MaxBindGroups() const;
uint32_t MaxDynamicUniformBuffersPerPipelineLayout() const;
uint32_t MaxDynamicStorageBuffersPerPipelineLayout() const;
uint32_t MaxSampledTexturesPerShaderStage() const;
uint32_t MaxSamplersPerShaderStage() const;
uint32_t MaxStorageBuffersPerShaderStage() const;
uint32_t MaxStorageTexturesPerShaderStage() const;
uint32_t MaxUniformBuffersPerShaderStage() const;
uint32_t MaxUniformBufferBindingSize() const;
uint32_t MaxStorageBufferBindingSize() const;
uint32_t MaxVertexBuffers() const;
uint32_t MaxVertexAttributes() const;
uint32_t MaxVertexBufferArrayStride() const;
private:
AdapterLimits(Adapter* const aParent, const ffi::WGPULimits& aLimits);
~AdapterLimits();
void Cleanup() {}
};
} // namespace webgpu
} // namespace mozilla
#endif // GPU_AdapterLimitss_H_

Просмотреть файл

@ -66,6 +66,11 @@ bool CanvasContext::UpdateWebRenderCanvasData(
return true; return true;
} }
dom::GPUTextureFormat CanvasContext::GetSwapChainPreferredFormat(
Adapter&) const {
return dom::GPUTextureFormat::Bgra8unorm;
}
RefPtr<SwapChain> CanvasContext::ConfigureSwapChain( RefPtr<SwapChain> CanvasContext::ConfigureSwapChain(
const dom::GPUSwapChainDescriptor& aDesc, ErrorResult& aRv) { const dom::GPUSwapChainDescriptor& aDesc, ErrorResult& aRv) {
Cleanup(); Cleanup();
@ -86,7 +91,7 @@ RefPtr<SwapChain> CanvasContext::ConfigureSwapChain(
dom::GPUExtent3DDict extent; dom::GPUExtent3DDict extent;
extent.mWidth = mWidth; extent.mWidth = mWidth;
extent.mHeight = mHeight; extent.mHeight = mHeight;
extent.mDepth = 1; extent.mDepthOrArrayLayers = 1;
mSwapChain = new SwapChain(aDesc, extent, mExternalImageId, format); mSwapChain = new SwapChain(aDesc, extent, mExternalImageId, format);
// Force a new frame to be built, which will execute the // Force a new frame to be built, which will execute the

Просмотреть файл

@ -15,12 +15,13 @@
namespace mozilla { namespace mozilla {
namespace dom { namespace dom {
class Promise; class Promise;
enum class GPUTextureFormat : uint8_t;
} // namespace dom } // namespace dom
namespace layers { namespace layers {
class WebRenderLocalCanvasData; class WebRenderLocalCanvasData;
}; };
namespace webgpu { namespace webgpu {
class Device; class Adapter;
class SwapChain; class SwapChain;
class Texture; class Texture;
@ -97,6 +98,7 @@ class CanvasContext final : public nsICanvasRenderingContextInternal,
bool IsContextCleanForFrameCapture() override { return false; } bool IsContextCleanForFrameCapture() override { return false; }
public: public:
dom::GPUTextureFormat GetSwapChainPreferredFormat(Adapter& aAdapter) const;
RefPtr<SwapChain> ConfigureSwapChain(const dom::GPUSwapChainDescriptor& aDesc, RefPtr<SwapChain> ConfigureSwapChain(const dom::GPUSwapChainDescriptor& aDesc,
ErrorResult& aRv); ErrorResult& aRv);

Просмотреть файл

@ -60,12 +60,12 @@ void CommandEncoder::ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
const auto& seq = aExtent.GetAsRangeEnforcedUnsignedLongSequence(); const auto& seq = aExtent.GetAsRangeEnforcedUnsignedLongSequence();
aExtentFFI->width = seq.Length() > 0 ? seq[0] : 0; aExtentFFI->width = seq.Length() > 0 ? seq[0] : 0;
aExtentFFI->height = seq.Length() > 1 ? seq[1] : 0; aExtentFFI->height = seq.Length() > 1 ? seq[1] : 0;
aExtentFFI->depth = seq.Length() > 2 ? seq[2] : 0; aExtentFFI->depth_or_array_layers = seq.Length() > 2 ? seq[2] : 0;
} else if (aExtent.IsGPUExtent3DDict()) { } else if (aExtent.IsGPUExtent3DDict()) {
const auto& dict = aExtent.GetAsGPUExtent3DDict(); const auto& dict = aExtent.GetAsGPUExtent3DDict();
aExtentFFI->width = dict.mWidth; aExtentFFI->width = dict.mWidth;
aExtentFFI->height = dict.mHeight; aExtentFFI->height = dict.mHeight;
aExtentFFI->depth = dict.mDepth; aExtentFFI->depth_or_array_layers = dict.mDepthOrArrayLayers;
} else { } else {
MOZ_CRASH("Unexptected extent type"); MOZ_CRASH("Unexptected extent type");
} }

Просмотреть файл

@ -0,0 +1,21 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "CompilationInfo.h"
#include "CompilationMessage.h"
#include "ShaderModule.h"
#include "mozilla/dom/WebGPUBinding.h"
namespace mozilla {
namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(CompilationInfo, mParent)
GPU_IMPL_JS_WRAP(CompilationInfo)
CompilationInfo::CompilationInfo(ShaderModule* const aParent)
: ChildOf(aParent) {}
} // namespace webgpu
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,31 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef GPU_CompilationInfo_H_
#define GPU_CompilationInfo_H_
#include "nsWrapperCache.h"
#include "ObjectModel.h"
namespace mozilla {
namespace webgpu {
class ShaderModule;
class CompilationInfo final : public nsWrapperCache,
public ChildOf<ShaderModule> {
public:
GPU_DECL_CYCLE_COLLECTION(CompilationInfo)
GPU_DECL_JS_WRAP(CompilationInfo)
private:
explicit CompilationInfo(ShaderModule* const aParent);
~CompilationInfo() = default;
void Cleanup() {}
};
} // namespace webgpu
} // namespace mozilla
#endif // GPU_CompilationInfo_H_

Просмотреть файл

@ -0,0 +1,20 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "CompilationMessage.h"
#include "CompilationInfo.h"
#include "mozilla/dom/WebGPUBinding.h"
namespace mozilla {
namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(CompilationMessage, mParent)
GPU_IMPL_JS_WRAP(CompilationMessage)
CompilationMessage::CompilationMessage(CompilationInfo* const aParent)
: ChildOf(aParent) {}
} // namespace webgpu
} // namespace mozilla

Просмотреть файл

@ -0,0 +1,44 @@
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef GPU_CompilationMessage_H_
#define GPU_CompilationMessage_H_
#include "nsWrapperCache.h"
#include "ObjectModel.h"
#include "mozilla/dom/WebGPUBinding.h"
namespace mozilla {
namespace dom {
class DOMString;
} // namespace dom
namespace webgpu {
class CompilationInfo;
class CompilationMessage final : public nsWrapperCache,
public ChildOf<CompilationInfo> {
dom::GPUCompilationMessageType mType = dom::GPUCompilationMessageType::Error;
uint64_t mLineNum = 0;
uint64_t mLinePos = 0;
public:
GPU_DECL_CYCLE_COLLECTION(CompilationMessage)
GPU_DECL_JS_WRAP(CompilationMessage)
void GetMessage(dom::DOMString& aMessage) {}
dom::GPUCompilationMessageType Type() const { return mType; }
uint64_t LineNum() const { return mLineNum; }
uint64_t LinePos() const { return mLinePos; }
private:
explicit CompilationMessage(CompilationInfo* const aParent);
~CompilationMessage() = default;
void Cleanup() {}
};
} // namespace webgpu
} // namespace mozilla
#endif // GPU_CompilationMessage_H_

Просмотреть файл

@ -52,7 +52,7 @@ Device::Device(Adapter* const aParent, RawId aId)
: DOMEventTargetHelper(aParent->GetParentObject()), : DOMEventTargetHelper(aParent->GetParentObject()),
mId(aId), mId(aId),
mBridge(aParent->mBridge), mBridge(aParent->mBridge),
mQueue(new Queue(this, aParent->mBridge, aId)) { mQueue(new class Queue(this, aParent->mBridge, aId)) {
mBridge->RegisterDevice(mId, this); mBridge->RegisterDevice(mId, this);
} }
@ -68,7 +68,7 @@ void Device::Cleanup() {
void Device::GetLabel(nsAString& aValue) const { aValue = mLabel; } void Device::GetLabel(nsAString& aValue) const { aValue = mLabel; }
void Device::SetLabel(const nsAString& aLabel) { mLabel = aLabel; } void Device::SetLabel(const nsAString& aLabel) { mLabel = aLabel; }
Queue* Device::DefaultQueue() const { return mQueue; } Queue* Device::Queue() const { return mQueue; }
already_AddRefed<Buffer> Device::CreateBuffer( already_AddRefed<Buffer> Device::CreateBuffer(
const dom::GPUBufferDescriptor& aDesc, ErrorResult& aRv) { const dom::GPUBufferDescriptor& aDesc, ErrorResult& aRv) {
@ -98,14 +98,12 @@ already_AddRefed<Buffer> Device::CreateBuffer(
// If the buffer is not mapped at creation, and it has Shmem, we send it // If the buffer is not mapped at creation, and it has Shmem, we send it
// to the GPU process. Otherwise, we keep it. // to the GPU process. Otherwise, we keep it.
RawId id = mBridge->DeviceCreateBuffer(mId, aDesc); RawId id = mBridge->DeviceCreateBuffer(mId, aDesc);
if (hasMapFlags && !aDesc.mMappedAtCreation) {
mBridge->SendBufferReturnShmem(id, std::move(shmem));
}
RefPtr<Buffer> buffer = new Buffer(this, id, aDesc.mSize); RefPtr<Buffer> buffer = new Buffer(this, id, aDesc.mSize);
if (aDesc.mMappedAtCreation) { if (aDesc.mMappedAtCreation) {
buffer->SetMapped(std::move(shmem), buffer->SetMapped(std::move(shmem),
!(aDesc.mUsage & dom::GPUBufferUsage_Binding::MAP_READ)); !(aDesc.mUsage & dom::GPUBufferUsage_Binding::MAP_READ));
} else if (hasMapFlags) {
mBridge->SendBufferReturnShmem(id, std::move(shmem));
} }
return buffer.forget(); return buffer.forget();
@ -187,11 +185,8 @@ already_AddRefed<BindGroup> Device::CreateBindGroup(
} }
already_AddRefed<ShaderModule> Device::CreateShaderModule( already_AddRefed<ShaderModule> Device::CreateShaderModule(
const dom::GPUShaderModuleDescriptor& aDesc) { JSContext* aCx, const dom::GPUShaderModuleDescriptor& aDesc) {
if (aDesc.mCode.IsString()) { Unused << aCx;
// we don't yet support WGSL
return nullptr;
}
RawId id = mBridge->DeviceCreateShaderModule(mId, aDesc); RawId id = mBridge->DeviceCreateShaderModule(mId, aDesc);
RefPtr<ShaderModule> object = new ShaderModule(this, id); RefPtr<ShaderModule> object = new ShaderModule(this, id);
return object.forget(); return object.forget();
@ -240,5 +235,9 @@ already_AddRefed<Texture> Device::InitSwapChain(
return CreateTexture(desc); return CreateTexture(desc);
} }
void Device::Destroy() {
// TODO
}
} // namespace webgpu } // namespace webgpu
} // namespace mozilla } // namespace mozilla

Просмотреть файл

@ -105,7 +105,7 @@ class Device final : public DOMEventTargetHelper {
void GetLabel(nsAString& aValue) const; void GetLabel(nsAString& aValue) const;
void SetLabel(const nsAString& aLabel); void SetLabel(const nsAString& aLabel);
Queue* DefaultQueue() const; Queue* Queue() const;
already_AddRefed<Buffer> CreateBuffer(const dom::GPUBufferDescriptor& aDesc, already_AddRefed<Buffer> CreateBuffer(const dom::GPUBufferDescriptor& aDesc,
ErrorResult& aRv); ErrorResult& aRv);
@ -126,12 +126,14 @@ class Device final : public DOMEventTargetHelper {
const dom::GPUBindGroupDescriptor& aDesc); const dom::GPUBindGroupDescriptor& aDesc);
already_AddRefed<ShaderModule> CreateShaderModule( already_AddRefed<ShaderModule> CreateShaderModule(
const dom::GPUShaderModuleDescriptor& aDesc); JSContext* aCx, const dom::GPUShaderModuleDescriptor& aDesc);
already_AddRefed<ComputePipeline> CreateComputePipeline( already_AddRefed<ComputePipeline> CreateComputePipeline(
const dom::GPUComputePipelineDescriptor& aDesc); const dom::GPUComputePipelineDescriptor& aDesc);
already_AddRefed<RenderPipeline> CreateRenderPipeline( already_AddRefed<RenderPipeline> CreateRenderPipeline(
const dom::GPURenderPipelineDescriptor& aDesc); const dom::GPURenderPipelineDescriptor& aDesc);
void Destroy();
IMPL_EVENT_HANDLER(uncapturederror) IMPL_EVENT_HANDLER(uncapturederror)
}; };

Просмотреть файл

@ -6,6 +6,8 @@
#include "ObjectModel.h" #include "ObjectModel.h"
#include "Adapter.h" #include "Adapter.h"
#include "ShaderModule.h"
#include "CompilationInfo.h"
#include "Device.h" #include "Device.h"
#include "CommandEncoder.h" #include "CommandEncoder.h"
#include "Instance.h" #include "Instance.h"
@ -29,6 +31,8 @@ void ObjectBase::GetLabel(nsAString& aValue) const { aValue = mLabel; }
void ObjectBase::SetLabel(const nsAString& aLabel) { mLabel = aLabel; } void ObjectBase::SetLabel(const nsAString& aLabel) { mLabel = aLabel; }
template class ChildOf<Adapter>; template class ChildOf<Adapter>;
template class ChildOf<ShaderModule>;
template class ChildOf<CompilationInfo>;
template class ChildOf<CommandEncoder>; template class ChildOf<CommandEncoder>;
template class ChildOf<Device>; template class ChildOf<Device>;
template class ChildOf<Instance>; template class ChildOf<Instance>;

Просмотреть файл

@ -3,15 +3,22 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "Fence.h"
#include "mozilla/dom/WebGPUBinding.h" #include "mozilla/dom/WebGPUBinding.h"
#include "QuerySet.h"
#include "Device.h"
namespace mozilla { namespace mozilla {
namespace webgpu { namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(Fence, mParent) QuerySet::~QuerySet() = default;
GPU_IMPL_JS_WRAP(Fence)
GPU_IMPL_CYCLE_COLLECTION(QuerySet, mParent)
GPU_IMPL_JS_WRAP(QuerySet)
void QuerySet::Destroy() {
// TODO
}
} // namespace webgpu } // namespace webgpu
} // namespace mozilla } // namespace mozilla

Просмотреть файл

@ -3,34 +3,31 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef GPU_Fence_H_ #ifndef GPU_QuerySet_H_
#define GPU_Fence_H_ #define GPU_QuerySet_H_
#include "nsWrapperCache.h" #include "nsWrapperCache.h"
#include "ObjectModel.h" #include "ObjectModel.h"
namespace mozilla { namespace mozilla {
namespace dom {
class Promise;
} // namespace dom
namespace webgpu { namespace webgpu {
class Device; class Device;
class Fence final : public ObjectBase, public ChildOf<Device> { class QuerySet final : public ObjectBase, public ChildOf<Device> {
public: public:
GPU_DECL_CYCLE_COLLECTION(Fence) GPU_DECL_CYCLE_COLLECTION(QuerySet)
GPU_DECL_JS_WRAP(Fence) GPU_DECL_JS_WRAP(QuerySet)
QuerySet() = delete;
void Destroy();
private: private:
Fence() = delete; virtual ~QuerySet();
~Fence() = default;
void Cleanup() {} void Cleanup() {}
public:
}; };
} // namespace webgpu } // namespace webgpu
} // namespace mozilla } // namespace mozilla
#endif // GPU_Fence_H_ #endif // GPU_QuerySet_H_

Просмотреть файл

@ -4,6 +4,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mozilla/dom/WebGPUBinding.h" #include "mozilla/dom/WebGPUBinding.h"
#include "mozilla/dom/UnionTypes.h"
#include "Queue.h" #include "Queue.h"
#include "CommandBuffer.h" #include "CommandBuffer.h"
@ -36,20 +37,36 @@ void Queue::Submit(
} }
void Queue::WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset, void Queue::WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
const dom::ArrayBuffer& aData, uint64_t aDataOffset, const dom::ArrayBufferViewOrArrayBuffer& aData,
uint64_t aDataOffset,
const dom::Optional<uint64_t>& aSize, const dom::Optional<uint64_t>& aSize,
ErrorResult& aRv) { ErrorResult& aRv) {
aData.ComputeState(); uint64_t length = 0;
const auto checkedSize = uint8_t* data = nullptr;
aSize.WasPassed() ? CheckedInt<size_t>(aSize.Value()) if (aData.IsArrayBufferView()) {
: CheckedInt<size_t>(aData.Length()) - aDataOffset; const auto& view = aData.GetAsArrayBufferView();
view.ComputeState();
length = view.Length();
data = view.Data();
}
if (aData.IsArrayBuffer()) {
const auto& ab = aData.GetAsArrayBuffer();
ab.ComputeState();
length = ab.Length();
data = ab.Data();
}
MOZ_ASSERT(data != nullptr);
const auto checkedSize = aSize.WasPassed()
? CheckedInt<size_t>(aSize.Value())
: CheckedInt<size_t>(length) - aDataOffset;
if (!checkedSize.isValid()) { if (!checkedSize.isValid()) {
aRv.ThrowRangeError("Mapped size is too large"); aRv.ThrowRangeError("Mapped size is too large");
return; return;
} }
const auto& size = checkedSize.value(); const auto& size = checkedSize.value();
if (aDataOffset + size > aData.Length()) { if (aDataOffset + size > length) {
aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size)); aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size));
return; return;
} }
@ -62,13 +79,13 @@ void Queue::WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
return; return;
} }
memcpy(shmem.get<uint8_t>(), aData.Data() + aDataOffset, size); memcpy(shmem.get<uint8_t>(), data + aDataOffset, size);
mBridge->SendQueueWriteBuffer(mId, aBuffer.mId, aBufferOffset, mBridge->SendQueueWriteBuffer(mId, aBuffer.mId, aBufferOffset,
std::move(shmem)); std::move(shmem));
} }
void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination, void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
const dom::ArrayBuffer& aData, const dom::ArrayBufferViewOrArrayBuffer& aData,
const dom::GPUTextureDataLayout& aDataLayout, const dom::GPUTextureDataLayout& aDataLayout,
const dom::GPUExtent3D& aSize, ErrorResult& aRv) { const dom::GPUExtent3D& aSize, ErrorResult& aRv) {
ffi::WGPUTextureCopyView copyView = {}; ffi::WGPUTextureCopyView copyView = {};
@ -79,21 +96,37 @@ void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
ffi::WGPUExtent3d extent = {}; ffi::WGPUExtent3d extent = {};
CommandEncoder::ConvertExtent3DToFFI(aSize, &extent); CommandEncoder::ConvertExtent3DToFFI(aSize, &extent);
uint64_t availableSize = 0;
uint8_t* data = nullptr;
if (aData.IsArrayBufferView()) {
const auto& view = aData.GetAsArrayBufferView();
view.ComputeState();
availableSize = view.Length();
data = view.Data();
}
if (aData.IsArrayBuffer()) {
const auto& ab = aData.GetAsArrayBuffer();
ab.ComputeState();
availableSize = ab.Length();
data = ab.Data();
}
MOZ_ASSERT(data != nullptr);
const auto bpb = aDestination.mTexture->mBytesPerBlock; const auto bpb = aDestination.mTexture->mBytesPerBlock;
if (!bpb) { if (!bpb) {
aRv.ThrowAbortError(nsPrintfCString("Invalid texture format")); aRv.ThrowAbortError(nsPrintfCString("Invalid texture format"));
return; return;
} }
if (extent.width == 0 || extent.height == 0 || extent.depth == 0) { if (extent.width == 0 || extent.height == 0 ||
extent.depth_or_array_layers == 0) {
aRv.ThrowAbortError(nsPrintfCString("Invalid copy size")); aRv.ThrowAbortError(nsPrintfCString("Invalid copy size"));
return; return;
} }
// TODO: support block-compressed formats // TODO: support block-compressed formats
aData.ComputeState(); const auto fullRows = (CheckedInt<size_t>(extent.depth_or_array_layers - 1) *
const auto fullRows = aDataLayout.mRowsPerImage +
(CheckedInt<size_t>(extent.depth - 1) * aDataLayout.mRowsPerImage + extent.height - 1);
extent.height - 1);
const auto checkedSize = fullRows * aDataLayout.mBytesPerRow + const auto checkedSize = fullRows * aDataLayout.mBytesPerRow +
CheckedInt<size_t>(extent.width) * bpb.value(); CheckedInt<size_t>(extent.width) * bpb.value();
if (!checkedSize.isValid()) { if (!checkedSize.isValid()) {
@ -102,7 +135,6 @@ void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
} }
const auto& size = checkedSize.value(); const auto& size = checkedSize.value();
auto availableSize = aData.Length();
if (availableSize < aDataLayout.mOffset || if (availableSize < aDataLayout.mOffset ||
size > (availableSize - aDataLayout.mOffset)) { size > (availableSize - aDataLayout.mOffset)) {
aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size)); aRv.ThrowAbortError(nsPrintfCString("Wrong data size %" PRIuPTR, size));
@ -117,7 +149,7 @@ void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
return; return;
} }
memcpy(shmem.get<uint8_t>(), aData.Data() + aDataLayout.mOffset, size); memcpy(shmem.get<uint8_t>(), data + aDataLayout.mOffset, size);
mBridge->SendQueueWriteTexture(mId, copyView, std::move(shmem), dataLayout, mBridge->SendQueueWriteTexture(mId, copyView, std::move(shmem), dataLayout,
extent); extent);
} }

Просмотреть файл

@ -14,6 +14,7 @@ namespace mozilla {
class ErrorResult; class ErrorResult;
namespace dom { namespace dom {
class RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict; class RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict;
class ArrayBufferViewOrArrayBuffer;
template <typename T> template <typename T>
class Optional; class Optional;
template <typename T> template <typename T>
@ -40,16 +41,16 @@ class Queue final : public ObjectBase, public ChildOf<Device> {
const dom::Sequence<OwningNonNull<CommandBuffer>>& aCommandBuffers); const dom::Sequence<OwningNonNull<CommandBuffer>>& aCommandBuffers);
void WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset, void WriteBuffer(const Buffer& aBuffer, uint64_t aBufferOffset,
const dom::ArrayBuffer& adata, uint64_t aDataOffset, const dom::ArrayBufferViewOrArrayBuffer& aData,
const dom::Optional<uint64_t>& aSize, ErrorResult& aRv); uint64_t aDataOffset, const dom::Optional<uint64_t>& aSize,
ErrorResult& aRv);
void WriteTexture(const dom::GPUTextureCopyView& aDestination, void WriteTexture(const dom::GPUTextureCopyView& aDestination,
const dom::ArrayBuffer& aData, const dom::ArrayBufferViewOrArrayBuffer& aData,
const dom::GPUTextureDataLayout& aDataLayout, const dom::GPUTextureDataLayout& aDataLayout,
const dom::GPUExtent3D& aSize, ErrorResult& aRv); const dom::GPUExtent3D& aSize, ErrorResult& aRv);
private: private:
Queue() = delete;
virtual ~Queue(); virtual ~Queue();
void Cleanup() {} void Cleanup() {}

Просмотреть файл

@ -162,11 +162,16 @@ void RenderPassEncoder::SetPipeline(const RenderPipeline& aPipeline) {
} }
} }
void RenderPassEncoder::SetIndexBuffer(const Buffer& aBuffer, uint64_t aOffset, void RenderPassEncoder::SetIndexBuffer(const Buffer& aBuffer,
uint64_t aSize) { const dom::GPUIndexFormat& aIndexFormat,
uint64_t aOffset, uint64_t aSize) {
if (mValid) { if (mValid) {
mUsedBuffers.AppendElement(&aBuffer); mUsedBuffers.AppendElement(&aBuffer);
ffi::wgpu_render_pass_set_index_buffer(mPass, aBuffer.mId, aOffset, aSize); const auto iformat = aIndexFormat == dom::GPUIndexFormat::Uint32
? ffi::WGPUIndexFormat_Uint32
: ffi::WGPUIndexFormat_Uint16;
ffi::wgpu_render_pass_set_index_buffer(mPass, aBuffer.mId, iformat, aOffset,
aSize);
} }
} }

Просмотреть файл

@ -59,7 +59,9 @@ class RenderPassEncoder final : public ObjectBase,
void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup, void SetBindGroup(uint32_t aSlot, const BindGroup& aBindGroup,
const dom::Sequence<uint32_t>& aDynamicOffsets); const dom::Sequence<uint32_t>& aDynamicOffsets);
void SetPipeline(const RenderPipeline& aPipeline); void SetPipeline(const RenderPipeline& aPipeline);
void SetIndexBuffer(const Buffer& aBuffer, uint64_t aOffset, uint64_t aSize); void SetIndexBuffer(const Buffer& aBuffer,
const dom::GPUIndexFormat& aIndexFormat, uint64_t aOffset,
uint64_t aSize);
void SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer, uint64_t aOffset, void SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer, uint64_t aOffset,
uint64_t aSize); uint64_t aSize);
void Draw(uint32_t aVertexCount, uint32_t aInstanceCount, void Draw(uint32_t aVertexCount, uint32_t aInstanceCount,

Просмотреть файл

@ -59,10 +59,32 @@ static Maybe<uint8_t> GetBytesPerBlock(dom::GPUTextureFormat format) {
case dom::GPUTextureFormat::Rgba32sint: case dom::GPUTextureFormat::Rgba32sint:
case dom::GPUTextureFormat::Rgba32float: case dom::GPUTextureFormat::Rgba32float:
return Some<uint8_t>(16u); return Some<uint8_t>(16u);
case dom::GPUTextureFormat::Stencil8:
return Some<uint8_t>(1u);
case dom::GPUTextureFormat::Depth16unorm:
return Some<uint8_t>(2u);
case dom::GPUTextureFormat::Depth32float: case dom::GPUTextureFormat::Depth32float:
return Some<uint8_t>(4u); return Some<uint8_t>(4u);
case dom::GPUTextureFormat::Bc1_rgba_unorm:
case dom::GPUTextureFormat::Bc1_rgba_unorm_srgb:
case dom::GPUTextureFormat::Bc4_r_unorm:
case dom::GPUTextureFormat::Bc4_r_snorm:
return Some<uint8_t>(8u);
case dom::GPUTextureFormat::Bc2_rgba_unorm:
case dom::GPUTextureFormat::Bc2_rgba_unorm_srgb:
case dom::GPUTextureFormat::Bc3_rgba_unorm:
case dom::GPUTextureFormat::Bc3_rgba_unorm_srgb:
case dom::GPUTextureFormat::Bc5_rg_unorm:
case dom::GPUTextureFormat::Bc5_rg_snorm:
case dom::GPUTextureFormat::Bc6h_rgb_ufloat:
case dom::GPUTextureFormat::Bc6h_rgb_float:
case dom::GPUTextureFormat::Bc7_rgba_unorm:
case dom::GPUTextureFormat::Bc7_rgba_unorm_srgb:
return Some<uint8_t>(16u);
case dom::GPUTextureFormat::Depth24plus: case dom::GPUTextureFormat::Depth24plus:
case dom::GPUTextureFormat::Depth24plus_stencil8: case dom::GPUTextureFormat::Depth24plus_stencil8:
case dom::GPUTextureFormat::Depth24unorm_stencil8:
case dom::GPUTextureFormat::Depth32float_stencil8:
case dom::GPUTextureFormat::EndGuard_: case dom::GPUTextureFormat::EndGuard_:
return Nothing(); return Nothing();
} }

Просмотреть файл

@ -10,8 +10,8 @@ using wr::ExternalImageId from "mozilla/webrender/WebRenderAPI.h";
using RawId from "mozilla/webgpu/WebGPUTypes.h"; using RawId from "mozilla/webgpu/WebGPUTypes.h";
using BufferAddress from "mozilla/webgpu/WebGPUTypes.h"; using BufferAddress from "mozilla/webgpu/WebGPUTypes.h";
using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h"; using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h";
using dom::GPUDeviceDescriptor from "mozilla/dom/WebGPUBinding.h";
using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h"; using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
using webgpu::ffi::WGPUDeviceDescriptor from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUTextureDataLayout from "mozilla/webgpu/ffi/wgpu.h"; using webgpu::ffi::WGPUTextureDataLayout from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUTextureCopyView from "mozilla/webgpu/ffi/wgpu.h"; using webgpu::ffi::WGPUTextureCopyView from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUExtent3d from "mozilla/webgpu/ffi/wgpu.h"; using webgpu::ffi::WGPUExtent3d from "mozilla/webgpu/ffi/wgpu.h";
@ -41,7 +41,7 @@ parent:
async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId); async BumpImplicitBindGroupLayout(RawId pipelineId, bool isCompute, uint32_t index, RawId assignId);
async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (RawId adapterId); async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (RawId adapterId);
async AdapterRequestDevice(RawId selfId, GPUDeviceDescriptor desc, RawId newId); async AdapterRequestDevice(RawId selfId, ByteBuf buf, RawId newId);
async AdapterDestroy(RawId selfId); async AdapterDestroy(RawId selfId);
async BufferReturnShmem(RawId selfId, Shmem shmem); async BufferReturnShmem(RawId selfId, Shmem shmem);
async BufferMap(RawId selfId, WGPUHostMap hostMap, uint64_t offset, uint64_t size) returns (Shmem sm); async BufferMap(RawId selfId, WGPUHostMap hostMap, uint64_t offset, uint64_t size) returns (Shmem sm);

Просмотреть файл

@ -64,7 +64,35 @@ RefPtr<RawIdPromise> WebGPUChild::InstanceRequestAdapter(
Maybe<RawId> WebGPUChild::AdapterRequestDevice( Maybe<RawId> WebGPUChild::AdapterRequestDevice(
RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc) { RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_device_id(mClient, aSelfId); RawId id = ffi::wgpu_client_make_device_id(mClient, aSelfId);
if (SendAdapterRequestDevice(aSelfId, aDesc, id)) {
ffi::WGPUDeviceDescriptor desc = {};
ffi::wgpu_client_fill_default_limits(&desc.limits);
if (aDesc.mNonGuaranteedLimits.WasPassed()) {
for (const auto& entry : aDesc.mNonGuaranteedLimits.Value().Entries()) {
Unused << entry; // TODO
}
/*desc.limits.max_bind_groups = lim.mMaxBindGroups;
desc.limits.max_dynamic_uniform_buffers_per_pipeline_layout =
lim.mMaxDynamicUniformBuffersPerPipelineLayout;
desc.limits.max_dynamic_storage_buffers_per_pipeline_layout =
lim.mMaxDynamicStorageBuffersPerPipelineLayout;
desc.limits.max_sampled_textures_per_shader_stage =
lim.mMaxSampledTexturesPerShaderStage;
desc.limits.max_samplers_per_shader_stage = lim.mMaxSamplersPerShaderStage;
desc.limits.max_storage_buffers_per_shader_stage =
lim.mMaxStorageBuffersPerShaderStage;
desc.limits.max_storage_textures_per_shader_stage =
lim.mMaxStorageTexturesPerShaderStage;
desc.limits.max_uniform_buffers_per_shader_stage =
lim.mMaxUniformBuffersPerShaderStage;
desc.limits.max_uniform_buffer_binding_size =
lim.mMaxUniformBufferBindingSize;*/
}
ByteBuf bb;
ffi::wgpu_client_serialize_device_descriptor(&desc, ToFFI(&bb));
if (SendAdapterRequestDevice(aSelfId, std::move(bb), id)) {
return Some(id); return Some(id);
} }
ffi::wgpu_client_kill_device_id(mClient, id); ffi::wgpu_client_kill_device_id(mClient, id);
@ -104,12 +132,12 @@ RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId,
const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence(); const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence();
desc.size.width = seq.Length() > 0 ? seq[0] : 1; desc.size.width = seq.Length() > 0 ? seq[0] : 1;
desc.size.height = seq.Length() > 1 ? seq[1] : 1; desc.size.height = seq.Length() > 1 ? seq[1] : 1;
desc.size.depth = seq.Length() > 2 ? seq[2] : 1; desc.size.depth_or_array_layers = seq.Length() > 2 ? seq[2] : 1;
} else if (aDesc.mSize.IsGPUExtent3DDict()) { } else if (aDesc.mSize.IsGPUExtent3DDict()) {
const auto& dict = aDesc.mSize.GetAsGPUExtent3DDict(); const auto& dict = aDesc.mSize.GetAsGPUExtent3DDict();
desc.size.width = dict.mWidth; desc.size.width = dict.mWidth;
desc.size.height = dict.mHeight; desc.size.height = dict.mHeight;
desc.size.depth = dict.mDepth; desc.size.depth_or_array_layers = dict.mDepthOrArrayLayers;
} else { } else {
MOZ_CRASH("Unexpected union"); MOZ_CRASH("Unexpected union");
} }
@ -241,30 +269,33 @@ RawId WebGPUChild::DeviceCreateBindGroupLayout(
nsTArray<OptionalData> optional(aDesc.mEntries.Length()); nsTArray<OptionalData> optional(aDesc.mEntries.Length());
for (const auto& entry : aDesc.mEntries) { for (const auto& entry : aDesc.mEntries) {
OptionalData data = {}; OptionalData data = {};
if (entry.mViewDimension.WasPassed()) { if (entry.mTexture.WasPassed()) {
data.dim = ffi::WGPUTextureViewDimension(entry.mViewDimension.Value()); const auto& texture = entry.mTexture.Value();
} data.dim = ffi::WGPUTextureViewDimension(texture.mViewDimension);
if (entry.mTextureComponentType.WasPassed()) { switch (texture.mSampleType) {
switch (entry.mTextureComponentType.Value()) { case dom::GPUTextureSampleType::Float:
case dom::GPUTextureComponentType::Float:
data.type = ffi::WGPURawTextureSampleType_Float; data.type = ffi::WGPURawTextureSampleType_Float;
break; break;
case dom::GPUTextureComponentType::Uint: case dom::GPUTextureSampleType::Unfilterable_float:
data.type = ffi::WGPURawTextureSampleType_UnfilterableFloat;
break;
case dom::GPUTextureSampleType::Uint:
data.type = ffi::WGPURawTextureSampleType_Uint; data.type = ffi::WGPURawTextureSampleType_Uint;
break; break;
case dom::GPUTextureComponentType::Sint: case dom::GPUTextureSampleType::Sint:
data.type = ffi::WGPURawTextureSampleType_Sint; data.type = ffi::WGPURawTextureSampleType_Sint;
break; break;
case dom::GPUTextureComponentType::Depth_comparison: case dom::GPUTextureSampleType::Depth:
data.type = ffi::WGPURawTextureSampleType_Depth; data.type = ffi::WGPURawTextureSampleType_Depth;
break; break;
default: case dom::GPUTextureSampleType::EndGuard_:
MOZ_ASSERT_UNREACHABLE(); MOZ_ASSERT_UNREACHABLE();
break;
} }
} }
if (entry.mStorageTextureFormat.WasPassed()) { if (entry.mStorageTexture.WasPassed()) {
data.format = ffi::WGPUTextureFormat(entry.mStorageTextureFormat.Value()); const auto& texture = entry.mStorageTexture.Value();
data.dim = ffi::WGPUTextureViewDimension(texture.mViewDimension);
data.format = ffi::WGPUTextureFormat(texture.mFormat);
} }
optional.AppendElement(data); optional.AppendElement(data);
} }
@ -275,18 +306,51 @@ RawId WebGPUChild::DeviceCreateBindGroupLayout(
ffi::WGPUBindGroupLayoutEntry e = {}; ffi::WGPUBindGroupLayoutEntry e = {};
e.binding = entry.mBinding; e.binding = entry.mBinding;
e.visibility = entry.mVisibility; e.visibility = entry.mVisibility;
e.ty = ffi::WGPURawBindingType(entry.mType); if (entry.mBuffer.WasPassed()) {
e.multisampled = entry.mMultisampled; switch (entry.mBuffer.Value().mType) {
e.has_dynamic_offset = entry.mHasDynamicOffset; case dom::GPUBufferBindingType::Uniform:
if (entry.mViewDimension.WasPassed()) { e.ty = ffi::WGPURawBindingType_UniformBuffer;
break;
case dom::GPUBufferBindingType::Storage:
e.ty = ffi::WGPURawBindingType_StorageBuffer;
break;
case dom::GPUBufferBindingType::Read_only_storage:
e.ty = ffi::WGPURawBindingType_ReadonlyStorageBuffer;
break;
case dom::GPUBufferBindingType::EndGuard_:
MOZ_ASSERT_UNREACHABLE();
}
e.has_dynamic_offset = entry.mBuffer.Value().mHasDynamicOffset;
}
if (entry.mTexture.WasPassed()) {
e.ty = ffi::WGPURawBindingType_SampledTexture;
e.view_dimension = &optional[i].dim; e.view_dimension = &optional[i].dim;
}
if (entry.mTextureComponentType.WasPassed()) {
e.texture_sample_type = &optional[i].type; e.texture_sample_type = &optional[i].type;
e.multisampled = entry.mTexture.Value().mMultisampled;
} }
if (entry.mStorageTextureFormat.WasPassed()) { if (entry.mStorageTexture.WasPassed()) {
e.ty = entry.mStorageTexture.Value().mAccess ==
dom::GPUStorageTextureAccess::Write_only
? ffi::WGPURawBindingType_WriteonlyStorageTexture
: ffi::WGPURawBindingType_ReadonlyStorageTexture;
e.view_dimension = &optional[i].dim;
e.storage_texture_format = &optional[i].format; e.storage_texture_format = &optional[i].format;
} }
if (entry.mSampler.WasPassed()) {
e.ty = ffi::WGPURawBindingType_Sampler;
switch (entry.mSampler.Value().mType) {
case dom::GPUSamplerBindingType::Filtering:
e.sampler_filter = true;
break;
case dom::GPUSamplerBindingType::Non_filtering:
break;
case dom::GPUSamplerBindingType::Comparison:
e.sampler_compare = true;
break;
case dom::GPUSamplerBindingType::EndGuard_:
MOZ_ASSERT_UNREACHABLE();
}
}
entries.AppendElement(e); entries.AppendElement(e);
} }
@ -379,8 +443,8 @@ RawId WebGPUChild::DeviceCreateShaderModule(
ffi::WGPUShaderModuleDescriptor desc = {}; ffi::WGPUShaderModuleDescriptor desc = {};
nsCString wgsl; nsCString wgsl;
if (aDesc.mCode.IsString()) { if (aDesc.mCode.IsUSVString()) {
LossyCopyUTF16toASCII(aDesc.mCode.GetAsString(), wgsl); LossyCopyUTF16toASCII(aDesc.mCode.GetAsUSVString(), wgsl);
desc.wgsl_chars = wgsl.get(); desc.wgsl_chars = wgsl.get();
} else { } else {
const auto& code = aDesc.mCode.GetAsUint32Array(); const auto& code = aDesc.mCode.GetAsUint32Array();
@ -410,9 +474,9 @@ RawId WebGPUChild::DeviceCreateComputePipeline(
if (aDesc.mLayout.WasPassed()) { if (aDesc.mLayout.WasPassed()) {
desc.layout = aDesc.mLayout.Value().mId; desc.layout = aDesc.mLayout.Value().mId;
} }
desc.compute_stage.module = aDesc.mComputeStage.mModule->mId; desc.stage.module = aDesc.mCompute.mModule->mId;
LossyCopyUTF16toASCII(aDesc.mComputeStage.mEntryPoint, entryPoint); LossyCopyUTF16toASCII(aDesc.mCompute.mEntryPoint, entryPoint);
desc.compute_stage.entry_point = entryPoint.get(); desc.stage.entry_point = entryPoint.get();
ByteBuf bb; ByteBuf bb;
RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {}; RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};
@ -429,39 +493,27 @@ RawId WebGPUChild::DeviceCreateComputePipeline(
return id; return id;
} }
static ffi::WGPURasterizationStateDescriptor ConvertRasterizationDescriptor( static ffi::WGPUMultisampleState ConvertMultisampleState(
const dom::GPURasterizationStateDescriptor& aDesc) { const dom::GPUMultisampleState& aDesc) {
ffi::WGPURasterizationStateDescriptor desc = {}; ffi::WGPUMultisampleState desc = {};
desc.front_face = ffi::WGPUFrontFace(aDesc.mFrontFace); desc.count = aDesc.mCount;
desc.cull_mode = ffi::WGPUCullMode(aDesc.mCullMode); desc.mask = aDesc.mMask;
desc.depth_bias = aDesc.mDepthBias; desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
desc.depth_bias_slope_scale = aDesc.mDepthBiasSlopeScale;
desc.depth_bias_clamp = aDesc.mDepthBiasClamp;
return desc; return desc;
} }
static ffi::WGPUBlendDescriptor ConvertBlendDescriptor( static ffi::WGPUBlendComponent ConvertBlendComponent(
const dom::GPUBlendDescriptor& aDesc) { const dom::GPUBlendComponent& aDesc) {
ffi::WGPUBlendDescriptor desc = {}; ffi::WGPUBlendComponent desc = {};
desc.src_factor = ffi::WGPUBlendFactor(aDesc.mSrcFactor); desc.src_factor = ffi::WGPUBlendFactor(aDesc.mSrcFactor);
desc.dst_factor = ffi::WGPUBlendFactor(aDesc.mDstFactor); desc.dst_factor = ffi::WGPUBlendFactor(aDesc.mDstFactor);
desc.operation = ffi::WGPUBlendOperation(aDesc.mOperation); desc.operation = ffi::WGPUBlendOperation(aDesc.mOperation);
return desc; return desc;
} }
static ffi::WGPUColorStateDescriptor ConvertColorDescriptor( static ffi::WGPUStencilFaceState ConvertStencilFaceState(
const dom::GPUColorStateDescriptor& aDesc) { const dom::GPUStencilFaceState& aDesc) {
ffi::WGPUColorStateDescriptor desc = {}; ffi::WGPUStencilFaceState desc = {};
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
desc.alpha_blend = ConvertBlendDescriptor(aDesc.mAlphaBlend);
desc.color_blend = ConvertBlendDescriptor(aDesc.mColorBlend);
desc.write_mask = aDesc.mWriteMask;
return desc;
}
static ffi::WGPUStencilStateFaceDescriptor ConvertStencilFaceDescriptor(
const dom::GPUStencilStateFaceDescriptor& aDesc) {
ffi::WGPUStencilStateFaceDescriptor desc = {};
desc.compare = ConvertCompareFunction(aDesc.mCompare); desc.compare = ConvertCompareFunction(aDesc.mCompare);
desc.fail_op = ffi::WGPUStencilOperation(aDesc.mFailOp); desc.fail_op = ffi::WGPUStencilOperation(aDesc.mFailOp);
desc.depth_fail_op = ffi::WGPUStencilOperation(aDesc.mDepthFailOp); desc.depth_fail_op = ffi::WGPUStencilOperation(aDesc.mDepthFailOp);
@ -469,26 +521,36 @@ static ffi::WGPUStencilStateFaceDescriptor ConvertStencilFaceDescriptor(
return desc; return desc;
} }
static ffi::WGPUDepthStencilStateDescriptor ConvertDepthStencilDescriptor( static ffi::WGPUDepthStencilState ConvertDepthStencilState(
const dom::GPUDepthStencilStateDescriptor& aDesc) { const dom::GPUDepthStencilState& aDesc) {
ffi::WGPUDepthStencilStateDescriptor desc = {}; ffi::WGPUDepthStencilState desc = {};
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat); desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
desc.depth_write_enabled = aDesc.mDepthWriteEnabled; desc.depth_write_enabled = aDesc.mDepthWriteEnabled;
desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare); desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare);
desc.stencil.front = ConvertStencilFaceDescriptor(aDesc.mStencilFront); desc.stencil.front = ConvertStencilFaceState(aDesc.mStencilFront);
desc.stencil.back = ConvertStencilFaceDescriptor(aDesc.mStencilBack); desc.stencil.back = ConvertStencilFaceState(aDesc.mStencilBack);
desc.stencil.read_mask = aDesc.mStencilReadMask; desc.stencil.read_mask = aDesc.mStencilReadMask;
desc.stencil.write_mask = aDesc.mStencilWriteMask; desc.stencil.write_mask = aDesc.mStencilWriteMask;
desc.bias.constant = aDesc.mDepthBias;
desc.bias.slope_scale = aDesc.mDepthBiasSlopeScale;
desc.bias.clamp = aDesc.mDepthBiasClamp;
return desc; return desc;
} }
RawId WebGPUChild::DeviceCreateRenderPipeline( RawId WebGPUChild::DeviceCreateRenderPipeline(
RawId aSelfId, const dom::GPURenderPipelineDescriptor& aDesc, RawId aSelfId, const dom::GPURenderPipelineDescriptor& aDesc,
nsTArray<RawId>* const aImplicitBindGroupLayoutIds) { nsTArray<RawId>* const aImplicitBindGroupLayoutIds) {
// A bunch of stack locals that we can have pointers into
nsTArray<ffi::WGPUVertexBufferLayout> vertexBuffers;
nsTArray<ffi::WGPUVertexAttribute> vertexAttributes;
ffi::WGPURenderPipelineDescriptor desc = {}; ffi::WGPURenderPipelineDescriptor desc = {};
nsCString label, vsEntry, fsEntry; nsCString label, vsEntry, fsEntry;
ffi::WGPUProgrammableStageDescriptor vertexStage = {}; ffi::WGPUIndexFormat stripIndexFormat = ffi::WGPUIndexFormat_Uint16;
ffi::WGPUProgrammableStageDescriptor fragmentStage = {}; ffi::WGPUFace cullFace = ffi::WGPUFace_Front;
ffi::WGPUVertexState vertexState = {};
ffi::WGPUFragmentState fragmentState = {};
nsTArray<ffi::WGPUColorTargetState> colorStates;
nsTArray<ffi::WGPUBlendState> blendStates;
if (aDesc.mLabel.WasPassed()) { if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label); LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
@ -498,73 +560,95 @@ RawId WebGPUChild::DeviceCreateRenderPipeline(
desc.layout = aDesc.mLayout.Value().mId; desc.layout = aDesc.mLayout.Value().mId;
} }
vertexStage.module = aDesc.mVertexStage.mModule->mId; {
LossyCopyUTF16toASCII(aDesc.mVertexStage.mEntryPoint, vsEntry); const auto& stage = aDesc.mVertex;
vertexStage.entry_point = vsEntry.get(); vertexState.stage.module = stage.mModule->mId;
desc.vertex_stage = &vertexStage; LossyCopyUTF16toASCII(stage.mEntryPoint, vsEntry);
vertexState.stage.entry_point = vsEntry.get();
if (aDesc.mFragmentStage.WasPassed()) { for (const auto& vertex_desc : stage.mBuffers) {
const auto& stage = aDesc.mFragmentStage.Value(); ffi::WGPUVertexBufferLayout vb_desc = {};
fragmentStage.module = stage.mModule->mId; if (!vertex_desc.IsNull()) {
const auto& vd = vertex_desc.Value();
vb_desc.array_stride = vd.mArrayStride;
vb_desc.step_mode = ffi::WGPUInputStepMode(vd.mStepMode);
// Note: we are setting the length but not the pointer
vb_desc.attributes_length = vd.mAttributes.Length();
for (const auto& vat : vd.mAttributes) {
ffi::WGPUVertexAttribute ad = {};
ad.offset = vat.mOffset;
ad.format = ffi::WGPUVertexFormat(vat.mFormat);
ad.shader_location = vat.mShaderLocation;
vertexAttributes.AppendElement(ad);
}
}
vertexBuffers.AppendElement(vb_desc);
}
// Now patch up all the pointers to attribute lists.
size_t numAttributes = 0;
for (auto& vb_desc : vertexBuffers) {
vb_desc.attributes = vertexAttributes.Elements() + numAttributes;
numAttributes += vb_desc.attributes_length;
}
vertexState.buffers = vertexBuffers.Elements();
vertexState.buffers_length = vertexBuffers.Length();
desc.vertex = &vertexState;
}
if (aDesc.mFragment.WasPassed()) {
const auto& stage = aDesc.mFragment.Value();
fragmentState.stage.module = stage.mModule->mId;
LossyCopyUTF16toASCII(stage.mEntryPoint, fsEntry); LossyCopyUTF16toASCII(stage.mEntryPoint, fsEntry);
fragmentStage.entry_point = fsEntry.get(); fragmentState.stage.entry_point = fsEntry.get();
desc.fragment_stage = &fragmentStage;
}
desc.primitive_topology = // Note: we pre-collect the blend states into a different array
ffi::WGPUPrimitiveTopology(aDesc.mPrimitiveTopology); // so that we can have non-stale pointers into it.
const auto rasterization = for (const auto& colorState : stage.mTargets) {
ConvertRasterizationDescriptor(aDesc.mRasterizationState); ffi::WGPUColorTargetState desc = {};
desc.rasterization_state = &rasterization; desc.format = ffi::WGPUTextureFormat(colorState.mFormat);
desc.write_mask = colorState.mWriteMask;
nsTArray<ffi::WGPUColorStateDescriptor> colorStates; colorStates.AppendElement(desc);
for (const auto& colorState : aDesc.mColorStates) { ffi::WGPUBlendState bs = {};
colorStates.AppendElement(ConvertColorDescriptor(colorState)); if (colorState.mBlend.WasPassed()) {
} const auto& blend = colorState.mBlend.Value();
desc.color_states = colorStates.Elements(); bs.alpha = ConvertBlendComponent(blend.mAlpha);
desc.color_states_length = colorStates.Length(); bs.color = ConvertBlendComponent(blend.mColor);
}
ffi::WGPUDepthStencilStateDescriptor depthStencilState = {}; blendStates.AppendElement(bs);
if (aDesc.mDepthStencilState.WasPassed()) { }
depthStencilState = for (size_t i = 0; i < colorStates.Length(); ++i) {
ConvertDepthStencilDescriptor(aDesc.mDepthStencilState.Value()); if (stage.mTargets[i].mBlend.WasPassed()) {
desc.depth_stencil_state = &depthStencilState; colorStates[i].blend = &blendStates[i];
}
desc.vertex_state.index_format =
ffi::WGPUIndexFormat(aDesc.mVertexState.mIndexFormat);
nsTArray<ffi::WGPUVertexBufferDescriptor> vertexBuffers;
nsTArray<ffi::WGPUVertexAttributeDescriptor> vertexAttributes;
for (const auto& vertex_desc : aDesc.mVertexState.mVertexBuffers) {
ffi::WGPUVertexBufferDescriptor vb_desc = {};
if (!vertex_desc.IsNull()) {
const auto& vd = vertex_desc.Value();
vb_desc.stride = vd.mArrayStride;
vb_desc.step_mode = ffi::WGPUInputStepMode(vd.mStepMode);
// Note: we are setting the length but not the pointer
vb_desc.attributes_length = vd.mAttributes.Length();
for (const auto& vat : vd.mAttributes) {
ffi::WGPUVertexAttributeDescriptor ad = {};
ad.offset = vat.mOffset;
ad.format = ffi::WGPUVertexFormat(vat.mFormat);
ad.shader_location = vat.mShaderLocation;
vertexAttributes.AppendElement(ad);
} }
} }
vertexBuffers.AppendElement(vb_desc);
} fragmentState.targets = colorStates.Elements();
// Now patch up all the pointers to attribute lists. fragmentState.targets_length = colorStates.Length();
size_t numAttributes = 0; desc.fragment = &fragmentState;
for (auto& vb_desc : vertexBuffers) {
vb_desc.attributes = vertexAttributes.Elements() + numAttributes;
numAttributes += vb_desc.attributes_length;
} }
desc.vertex_state.vertex_buffers = vertexBuffers.Elements(); {
desc.vertex_state.vertex_buffers_length = vertexBuffers.Length(); const auto& prim = aDesc.mPrimitive;
desc.sample_count = aDesc.mSampleCount; desc.primitive.topology = ffi::WGPUPrimitiveTopology(prim.mTopology);
desc.sample_mask = aDesc.mSampleMask; if (prim.mStripIndexFormat.WasPassed()) {
desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled; stripIndexFormat = ffi::WGPUIndexFormat(prim.mStripIndexFormat.Value());
desc.primitive.strip_index_format = &stripIndexFormat;
}
desc.primitive.front_face = ffi::WGPUFrontFace(prim.mFrontFace);
if (prim.mCullMode != dom::GPUCullMode::None) {
cullFace = prim.mCullMode == dom::GPUCullMode::Front ? ffi::WGPUFace_Front
: ffi::WGPUFace_Back;
desc.primitive.cull_mode = &cullFace;
}
}
desc.multisample = ConvertMultisampleState(aDesc.mMultisample);
ffi::WGPUDepthStencilState depthStencilState = {};
if (aDesc.mDepthStencil.WasPassed()) {
depthStencilState = ConvertDepthStencilState(aDesc.mDepthStencil.Value());
desc.depth_stencil = &depthStencilState;
}
ByteBuf bb; ByteBuf bb;
RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {}; RawId implicit_bgl_ids[WGPUMAX_BIND_GROUPS] = {};

Просмотреть файл

@ -245,35 +245,10 @@ ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
} }
ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice( ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc, RawId aNewId) { RawId aSelfId, const ipc::ByteBuf& aByteBuf, RawId aNewId) {
ffi::WGPUDeviceDescriptor desc = {};
desc.shader_validation = true; // required for implicit pipeline layouts
if (aDesc.mLimits.WasPassed()) {
const auto& lim = aDesc.mLimits.Value();
desc.limits.max_bind_groups = lim.mMaxBindGroups;
desc.limits.max_dynamic_uniform_buffers_per_pipeline_layout =
lim.mMaxDynamicUniformBuffersPerPipelineLayout;
desc.limits.max_dynamic_storage_buffers_per_pipeline_layout =
lim.mMaxDynamicStorageBuffersPerPipelineLayout;
desc.limits.max_sampled_textures_per_shader_stage =
lim.mMaxSampledTexturesPerShaderStage;
desc.limits.max_samplers_per_shader_stage = lim.mMaxSamplersPerShaderStage;
desc.limits.max_storage_buffers_per_shader_stage =
lim.mMaxStorageBuffersPerShaderStage;
desc.limits.max_storage_textures_per_shader_stage =
lim.mMaxStorageTexturesPerShaderStage;
desc.limits.max_uniform_buffers_per_shader_stage =
lim.mMaxUniformBuffersPerShaderStage;
desc.limits.max_uniform_buffer_binding_size =
lim.mMaxUniformBufferBindingSize;
} else {
ffi::wgpu_server_fill_default_limits(&desc.limits);
}
ErrorBuffer error; ErrorBuffer error;
ffi::wgpu_server_adapter_request_device(mContext, aSelfId, &desc, aNewId, ffi::wgpu_server_adapter_request_device(mContext, aSelfId, ToFFI(&aByteBuf),
error.ToFFI()); aNewId, error.ToFFI());
error.CheckAndForward(this, 0); error.CheckAndForward(this, 0);
return IPC_OK(); return IPC_OK();
} }

Просмотреть файл

@ -26,7 +26,7 @@ class WebGPUParent final : public PWebGPUParent {
const nsTArray<RawId>& aTargetIds, const nsTArray<RawId>& aTargetIds,
InstanceRequestAdapterResolver&& resolver); InstanceRequestAdapterResolver&& resolver);
ipc::IPCResult RecvAdapterRequestDevice(RawId aSelfId, ipc::IPCResult RecvAdapterRequestDevice(RawId aSelfId,
const dom::GPUDeviceDescriptor& aDesc, const ipc::ByteBuf& aByteBuf,
RawId aNewId); RawId aNewId);
ipc::IPCResult RecvAdapterDestroy(RawId aSelfId); ipc::IPCResult RecvAdapterDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceDestroy(RawId aSelfId); ipc::IPCResult RecvDeviceDestroy(RawId aSelfId);

Просмотреть файл

@ -32,12 +32,8 @@ DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandBufferDescriptor);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions, DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions,
mPowerPreference); mPowerPreference);
DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUExtensions);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPULimits, mMaxBindGroups);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUDeviceDescriptor,
mExtensions, mLimits);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUExtent3d, width, DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUExtent3d, width,
height, depth); height, depth_or_array_layers);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUOrigin3d, x, y, z); DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUOrigin3d, x, y, z);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDataLayout, DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDataLayout,

Просмотреть файл

@ -16,9 +16,9 @@ const func = async function() {
const buffer = device.createBuffer({size:16, usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC | GPUBufferUsage.VERTEX}); const buffer = device.createBuffer({size:16, usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.COPY_SRC | GPUBufferUsage.VERTEX});
const arrayBuf = new ArrayBuffer(16); const arrayBuf = new ArrayBuffer(16);
(new Int32Array(arrayBuf)).fill(5) (new Int32Array(arrayBuf)).fill(5)
device.defaultQueue.writeBuffer(buffer, 0, arrayBuf, 0); device.queue.writeBuffer(buffer, 0, arrayBuf, 0);
const texture = device.createTexture({size: [2,2,1], dimension: "2d", format: "rgba8unorm", usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC }); const texture = device.createTexture({size: [2,2,1], dimension: "2d", format: "rgba8unorm", usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.COPY_SRC });
device.defaultQueue.writeTexture({ texture }, arrayBuf, { bytesPerRow:8 }, [2,2,1]); device.queue.writeTexture({ texture }, arrayBuf, { bytesPerRow:8 }, [2,2,1]);
// this isn't a process check, we need to read back the contents and verify the writes happened // this isn't a process check, we need to read back the contents and verify the writes happened
ok(device !== undefined, ''); ok(device !== undefined, '');
}; };

Просмотреть файл

@ -17,7 +17,7 @@ const func = async function() {
const pass = encoder.beginComputePass(); const pass = encoder.beginComputePass();
pass.endPass(); pass.endPass();
const command_buffer = encoder.finish(); const command_buffer = encoder.finish();
device.defaultQueue.submit([command_buffer]); device.queue.submit([command_buffer]);
ok(command_buffer !== undefined, 'command_buffer !== undefined'); ok(command_buffer !== undefined, 'command_buffer !== undefined');
}; };

Просмотреть файл

@ -31,7 +31,7 @@ const func = async function() {
}); });
pass.endPass(); pass.endPass();
const command_buffer = encoder.finish(); const command_buffer = encoder.finish();
device.defaultQueue.submit([command_buffer]); device.queue.submit([command_buffer]);
ok(command_buffer !== undefined, 'command_buffer !== undefined'); ok(command_buffer !== undefined, 'command_buffer !== undefined');
}; };

Просмотреть файл

@ -16,21 +16,25 @@ DIRS += []
h_and_cpp = [ h_and_cpp = [
"Adapter", "Adapter",
"AdapterFeatures",
"AdapterLimits",
"BindGroup", "BindGroup",
"BindGroupLayout", "BindGroupLayout",
"Buffer", "Buffer",
"CanvasContext", "CanvasContext",
"CommandBuffer", "CommandBuffer",
"CommandEncoder", "CommandEncoder",
"CompilationInfo",
"CompilationMessage",
"ComputePassEncoder", "ComputePassEncoder",
"ComputePipeline", "ComputePipeline",
"Device", "Device",
"DeviceLostInfo", "DeviceLostInfo",
"Fence",
"Instance", "Instance",
"ObjectModel", "ObjectModel",
"OutOfMemoryError", "OutOfMemoryError",
"PipelineLayout", "PipelineLayout",
"QuerySet",
"Queue", "Queue",
"RenderBundle", "RenderBundle",
"RenderBundleEncoder", "RenderBundleEncoder",

Просмотреть файл

@ -10,7 +10,6 @@
typedef [EnforceRange] unsigned long GPUBufferDynamicOffset; typedef [EnforceRange] unsigned long GPUBufferDynamicOffset;
typedef [EnforceRange] unsigned long long GPUFenceValue;
typedef [EnforceRange] unsigned long GPUStencilValue; typedef [EnforceRange] unsigned long GPUStencilValue;
typedef [EnforceRange] unsigned long GPUSampleMask; typedef [EnforceRange] unsigned long GPUSampleMask;
typedef [EnforceRange] long GPUDepthBias; typedef [EnforceRange] long GPUDepthBias;
@ -41,8 +40,8 @@ dictionary GPUOrigin3DDict {
dictionary GPUExtent3DDict { dictionary GPUExtent3DDict {
required GPUIntegerCoordinate width; required GPUIntegerCoordinate width;
required GPUIntegerCoordinate height; GPUIntegerCoordinate height = 1;
required GPUIntegerCoordinate depth; GPUIntegerCoordinate depthOrArrayLayers = 1;
}; };
typedef (sequence<double> or GPUColorDict) GPUColor; typedef (sequence<double> or GPUColorDict) GPUColor;
@ -51,11 +50,11 @@ typedef (sequence<GPUIntegerCoordinate> or GPUOrigin3DDict) GPUOrigin3D;
typedef (sequence<GPUIntegerCoordinate> or GPUExtent3DDict) GPUExtent3D; typedef (sequence<GPUIntegerCoordinate> or GPUExtent3DDict) GPUExtent3D;
interface mixin GPUObjectBase { interface mixin GPUObjectBase {
attribute DOMString? label; attribute USVString? label;
}; };
dictionary GPUObjectDescriptorBase { dictionary GPUObjectDescriptorBase {
DOMString? label; USVString label;
}; };
// **************************************************************************** // ****************************************************************************
@ -69,7 +68,7 @@ dictionary GPUObjectDescriptorBase {
interface GPU { interface GPU {
// May reject with DOMException // May reject with DOMException
[NewObject] [NewObject]
Promise<GPUAdapter> requestAdapter(optional GPURequestAdapterOptions options = {}); Promise<GPUAdapter?> requestAdapter(optional GPURequestAdapterOptions options = {});
}; };
// Add a "webgpu" member to Navigator/Worker that contains the global instance of a "WebGPU" // Add a "webgpu" member to Navigator/Worker that contains the global instance of a "WebGPU"
@ -86,43 +85,70 @@ dictionary GPURequestAdapterOptions {
GPUPowerPreference powerPreference; GPUPowerPreference powerPreference;
}; };
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUAdapterFeatures {
readonly setlike<GPUFeatureName>;
};
dictionary GPUDeviceDescriptor {
sequence<GPUFeatureName> nonGuaranteedFeatures = [];
record<DOMString, GPUSize32> nonGuaranteedLimits;
};
enum GPUFeatureName {
"depth-clamping",
"depth24unorm-stencil8",
"depth32float-stencil8",
"pipeline-statistics-query",
"texture-compression-bc",
"timestamp-query",
};
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUAdapterLimits {
readonly attribute unsigned long maxTextureDimension1D;
readonly attribute unsigned long maxTextureDimension2D;
readonly attribute unsigned long maxTextureDimension3D;
readonly attribute unsigned long maxTextureArrayLayers;
readonly attribute unsigned long maxBindGroups;
readonly attribute unsigned long maxDynamicUniformBuffersPerPipelineLayout;
readonly attribute unsigned long maxDynamicStorageBuffersPerPipelineLayout;
readonly attribute unsigned long maxSampledTexturesPerShaderStage;
readonly attribute unsigned long maxSamplersPerShaderStage;
readonly attribute unsigned long maxStorageBuffersPerShaderStage;
readonly attribute unsigned long maxStorageTexturesPerShaderStage;
readonly attribute unsigned long maxUniformBuffersPerShaderStage;
readonly attribute unsigned long maxUniformBufferBindingSize;
readonly attribute unsigned long maxStorageBufferBindingSize;
readonly attribute unsigned long maxVertexBuffers;
readonly attribute unsigned long maxVertexAttributes;
readonly attribute unsigned long maxVertexBufferArrayStride;
};
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPUAdapter { interface GPUAdapter {
readonly attribute DOMString name; readonly attribute DOMString name;
//GPUExtensions getExtensions(); [SameObject] readonly attribute GPUAdapterFeatures features;
//readonly attribute GPULimits limits; Don't expose higher limits for now. [SameObject] readonly attribute GPUAdapterLimits limits;
// May reject with DOMException
[NewObject] [NewObject]
Promise<GPUDevice> requestDevice(optional GPUDeviceDescriptor descriptor = {}); Promise<GPUDevice> requestDevice(optional GPUDeviceDescriptor descriptor = {});
}; };
GPUAdapter includes GPUObjectBase;
dictionary GPUExtensions {
};
dictionary GPULimits {
GPUSize32 maxBindGroups = 4;
GPUSize32 maxDynamicUniformBuffersPerPipelineLayout = 8;
GPUSize32 maxDynamicStorageBuffersPerPipelineLayout = 4;
GPUSize32 maxSampledTexturesPerShaderStage = 16;
GPUSize32 maxSamplersPerShaderStage = 16;
GPUSize32 maxStorageBuffersPerShaderStage = 4;
GPUSize32 maxStorageTexturesPerShaderStage = 4;
GPUSize32 maxUniformBuffersPerShaderStage = 12;
GPUSize32 maxUniformBufferBindingSize = 16384;
};
// Device // Device
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPUDevice { interface GPUDevice: EventTarget {
//GPUExtensions getExtensions(); //[SameObject] readonly attribute GPUAdapter adapter;
//GPULimits getLimits(); //readonly attribute FrozenArray<GPUFeatureName> features;
//readonly attribute GPUAdapter adapter; //readonly attribute object limits;
[SameObject] readonly attribute GPUQueue defaultQueue; [SameObject] readonly attribute GPUQueue queue;
void destroy();
[NewObject, Throws] [NewObject, Throws]
GPUBuffer createBuffer(GPUBufferDescriptor descriptor); GPUBuffer createBuffer(GPUBufferDescriptor descriptor);
@ -139,19 +165,18 @@ interface GPUDevice {
GPUComputePipeline createComputePipeline(GPUComputePipelineDescriptor descriptor); GPUComputePipeline createComputePipeline(GPUComputePipelineDescriptor descriptor);
GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor); GPURenderPipeline createRenderPipeline(GPURenderPipelineDescriptor descriptor);
//Promise<GPUComputePipeline> createComputePipelineAsync(GPUComputePipelineDescriptor descriptor);
//Promise<GPURenderPipeline> createRenderPipelineAsync(GPURenderPipelineDescriptor descriptor);
[NewObject] [NewObject]
GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {}); GPUCommandEncoder createCommandEncoder(optional GPUCommandEncoderDescriptor descriptor = {});
//[NewObject]
//GPURenderBundleEncoder createRenderBundleEncoder(GPURenderBundleEncoderDescriptor descriptor); //GPURenderBundleEncoder createRenderBundleEncoder(GPURenderBundleEncoderDescriptor descriptor);
//[NewObject]
//GPUQuerySet createQuerySet(GPUQuerySetDescriptor descriptor);
}; };
GPUDevice includes GPUObjectBase; GPUDevice includes GPUObjectBase;
dictionary GPUDeviceDescriptor {
GPUExtensions extensions;
GPULimits limits;
// TODO are other things configurable like queues?
};
// **************************************************************************** // ****************************************************************************
// ERROR HANDLING // ERROR HANDLING
@ -197,7 +222,7 @@ partial interface GPUDevice {
// **************************************************************************** // ****************************************************************************
// Buffer // Buffer
typedef unsigned long GPUBufferUsageFlags; typedef [EnforceRange] unsigned long GPUBufferUsageFlags;
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPUBufferUsage { interface GPUBufferUsage {
@ -219,7 +244,7 @@ dictionary GPUBufferDescriptor : GPUObjectDescriptorBase {
boolean mappedAtCreation = false; boolean mappedAtCreation = false;
}; };
typedef unsigned long GPUMapModeFlags; typedef [EnforceRange] unsigned long GPUMapModeFlags;
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
@ -300,12 +325,37 @@ enum GPUTextureFormat {
"rgba32float", "rgba32float",
// Depth and stencil formats // Depth and stencil formats
"depth32float", "stencil8",
"depth16unorm",
"depth24plus", "depth24plus",
"depth24plus-stencil8" "depth24plus-stencil8",
"depth32float",
// BC compressed formats usable if "texture-compression-bc" is both
// supported by the device/user agent and enabled in requestDevice.
"bc1-rgba-unorm",
"bc1-rgba-unorm-srgb",
"bc2-rgba-unorm",
"bc2-rgba-unorm-srgb",
"bc3-rgba-unorm",
"bc3-rgba-unorm-srgb",
"bc4-r-unorm",
"bc4-r-snorm",
"bc5-rg-unorm",
"bc5-rg-snorm",
"bc6h-rgb-ufloat",
"bc6h-rgb-float",
"bc7-rgba-unorm",
"bc7-rgba-unorm-srgb",
// "depth24unorm-stencil8" feature
"depth24unorm-stencil8",
// "depth32float-stencil8" feature
"depth32float-stencil8",
}; };
typedef unsigned long GPUTextureUsageFlags; typedef [EnforceRange] unsigned long GPUTextureUsageFlags;
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPUTextureUsage { interface GPUTextureUsage {
@ -398,8 +448,9 @@ dictionary GPUSamplerDescriptor : GPUObjectDescriptorBase {
GPUFilterMode minFilter = "nearest"; GPUFilterMode minFilter = "nearest";
GPUFilterMode mipmapFilter = "nearest"; GPUFilterMode mipmapFilter = "nearest";
float lodMinClamp = 0; float lodMinClamp = 0;
float lodMaxClamp = 1000.0; //TODO? float lodMaxClamp = 1000.0; // TODO: What should this be?
GPUCompareFunction compare; GPUCompareFunction compare;
[Clamp] unsigned short maxAnisotropy = 1;
}; };
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
@ -431,7 +482,7 @@ interface GPUPipelineLayout {
GPUPipelineLayout includes GPUObjectBase; GPUPipelineLayout includes GPUObjectBase;
// BindGroupLayout // BindGroupLayout
typedef unsigned long GPUShaderStageFlags; typedef [EnforceRange] unsigned long GPUShaderStageFlags;
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPUShaderStage { interface GPUShaderStage {
@ -440,26 +491,60 @@ interface GPUShaderStage {
const GPUShaderStageFlags COMPUTE = 4; const GPUShaderStageFlags COMPUTE = 4;
}; };
enum GPUBindingType { enum GPUBufferBindingType {
"uniform-buffer", "uniform",
"storage-buffer", "storage",
"readonly-storage-buffer", "read-only-storage",
"sampler", };
"comparison-sampler",
"sampled-texture", dictionary GPUBufferBindingLayout {
"readonly-storage-texture", GPUBufferBindingType type = "uniform";
"writeonly-storage-texture", boolean hasDynamicOffset = false;
GPUSize64 minBindingSize = 0;
};
enum GPUSamplerBindingType {
"filtering",
"non-filtering",
"comparison",
};
dictionary GPUSamplerBindingLayout {
GPUSamplerBindingType type = "filtering";
};
enum GPUTextureSampleType {
"float",
"unfilterable-float",
"depth",
"sint",
"uint",
};
dictionary GPUTextureBindingLayout {
GPUTextureSampleType sampleType = "float";
GPUTextureViewDimension viewDimension = "2d";
boolean multisampled = false;
};
enum GPUStorageTextureAccess {
"read-only",
"write-only",
};
dictionary GPUStorageTextureBindingLayout {
required GPUStorageTextureAccess access;
required GPUTextureFormat format;
GPUTextureViewDimension viewDimension = "2d";
}; };
dictionary GPUBindGroupLayoutEntry { dictionary GPUBindGroupLayoutEntry {
required GPUIndex32 binding; required GPUIndex32 binding;
required GPUShaderStageFlags visibility; required GPUShaderStageFlags visibility;
required GPUBindingType type; GPUBufferBindingLayout buffer;
GPUTextureViewDimension viewDimension; GPUSamplerBindingLayout sampler;
GPUTextureComponentType textureComponentType; GPUTextureBindingLayout texture;
boolean multisampled = false; GPUStorageTextureBindingLayout storageTexture;
boolean hasDynamicOffset = false;
GPUTextureFormat storageTextureFormat;
}; };
dictionary GPUBindGroupLayoutDescriptor : GPUObjectDescriptorBase { dictionary GPUBindGroupLayoutDescriptor : GPUObjectDescriptorBase {
@ -501,6 +586,168 @@ GPUBindGroup includes GPUObjectBase;
// PIPELINE CREATION (blend state, DS state, ..., pipelines) // PIPELINE CREATION (blend state, DS state, ..., pipelines)
// **************************************************************************** // ****************************************************************************
enum GPUCompilationMessageType {
"error",
"warning",
"info"
};
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUCompilationMessage {
readonly attribute DOMString message;
readonly attribute GPUCompilationMessageType type;
readonly attribute unsigned long long lineNum;
readonly attribute unsigned long long linePos;
};
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUCompilationInfo {
//TODO:
//[Cached, Frozen, Pure]
//readonly attribute sequence<GPUCompilationMessage> messages;
};
// ShaderModule
//TODO: remove the `Uint32Array` variant, it's used for SPIR-V
typedef (Uint32Array or USVString) GPUShaderCode;
dictionary GPUShaderModuleDescriptor : GPUObjectDescriptorBase {
required GPUShaderCode code;
object sourceMap;
};
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUShaderModule {
//TODO:
//Promise<GPUCompilationInfo> compilationInfo();
};
GPUShaderModule includes GPUObjectBase;
// Common stuff for ComputePipeline and RenderPipeline
dictionary GPUPipelineDescriptorBase : GPUObjectDescriptorBase {
GPUPipelineLayout layout;
};
interface mixin GPUPipelineBase {
GPUBindGroupLayout getBindGroupLayout(unsigned long index);
};
dictionary GPUProgrammableStage {
required GPUShaderModule module;
required USVString entryPoint;
};
// ComputePipeline
dictionary GPUComputePipelineDescriptor : GPUPipelineDescriptorBase {
required GPUProgrammableStage compute;
};
//TODO: Serializable
// https://bugzilla.mozilla.org/show_bug.cgi?id=1696219
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUComputePipeline {
};
GPUComputePipeline includes GPUObjectBase;
GPUComputePipeline includes GPUPipelineBase;
// InputState
enum GPUIndexFormat {
"uint16",
"uint32",
};
enum GPUVertexFormat {
"uint8x2",
"uint8x4",
"sint8x2",
"sint8x4",
"unorm8x2",
"unorm8x4",
"snorm8x2",
"snorm8x4",
"uint16x2",
"uint16x4",
"sint16x2",
"sint16x4",
"unorm16x2",
"unorm16x4",
"snorm16x2",
"snorm16x4",
"float16x2",
"float16x4",
"float32",
"float32x2",
"float32x3",
"float32x4",
"uint32",
"uint32x2",
"uint32x3",
"uint32x4",
"sint32",
"sint32x2",
"sint32x3",
"sint32x4",
};
enum GPUInputStepMode {
"vertex",
"instance",
};
dictionary GPUVertexAttribute {
required GPUVertexFormat format;
required GPUSize64 offset;
required GPUIndex32 shaderLocation;
};
dictionary GPUVertexBufferLayout {
required GPUSize64 arrayStride;
GPUInputStepMode stepMode = "vertex";
required sequence<GPUVertexAttribute> attributes;
};
dictionary GPUVertexState: GPUProgrammableStage {
sequence<GPUVertexBufferLayout?> buffers = [];
};
// GPURenderPipeline
enum GPUPrimitiveTopology {
"point-list",
"line-list",
"line-strip",
"triangle-list",
"triangle-strip"
};
enum GPUFrontFace {
"ccw",
"cw"
};
enum GPUCullMode {
"none",
"front",
"back"
};
dictionary GPUPrimitiveState {
GPUPrimitiveTopology topology = "triangle-list";
GPUIndexFormat stripIndexFormat;
GPUFrontFace frontFace = "ccw";
GPUCullMode cullMode = "none";
};
dictionary GPUMultisampleState {
GPUSize32 count = 1;
GPUSampleMask mask = 0xFFFFFFFF;
boolean alphaToCoverageEnabled = false;
};
// BlendState // BlendState
enum GPUBlendFactor { enum GPUBlendFactor {
"zero", "zero",
@ -526,13 +773,18 @@ enum GPUBlendOperation {
"max" "max"
}; };
dictionary GPUBlendDescriptor { dictionary GPUBlendComponent {
GPUBlendFactor srcFactor = "one"; GPUBlendFactor srcFactor = "one";
GPUBlendFactor dstFactor = "zero"; GPUBlendFactor dstFactor = "zero";
GPUBlendOperation operation = "add"; GPUBlendOperation operation = "add";
}; };
typedef unsigned long GPUColorWriteFlags; dictionary GPUBlendState {
required GPUBlendComponent color;
required GPUBlendComponent alpha;
};
typedef [EnforceRange] unsigned long GPUColorWriteFlags;
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPUColorWrite { interface GPUColorWrite {
@ -543,12 +795,14 @@ interface GPUColorWrite {
const GPUColorWriteFlags ALL = 0xF; const GPUColorWriteFlags ALL = 0xF;
}; };
dictionary GPUColorStateDescriptor { dictionary GPUColorTargetState {
required GPUTextureFormat format; required GPUTextureFormat format;
GPUBlendState blend;
GPUColorWriteFlags writeMask = 0xF; // GPUColorWrite.ALL
};
GPUBlendDescriptor alphaBlend = {}; dictionary GPUFragmentState: GPUProgrammableStage {
GPUBlendDescriptor colorBlend = {}; required sequence<GPUColorTargetState> targets;
GPUColorWriteFlags writeMask = 0xF;
}; };
// DepthStencilState // DepthStencilState
@ -563,170 +817,43 @@ enum GPUStencilOperation {
"decrement-wrap" "decrement-wrap"
}; };
dictionary GPUStencilStateFaceDescriptor { dictionary GPUStencilFaceState {
GPUCompareFunction compare = "always"; GPUCompareFunction compare = "always";
GPUStencilOperation failOp = "keep"; GPUStencilOperation failOp = "keep";
GPUStencilOperation depthFailOp = "keep"; GPUStencilOperation depthFailOp = "keep";
GPUStencilOperation passOp = "keep"; GPUStencilOperation passOp = "keep";
}; };
dictionary GPUDepthStencilStateDescriptor { dictionary GPUDepthStencilState {
required GPUTextureFormat format; required GPUTextureFormat format;
boolean depthWriteEnabled = false; boolean depthWriteEnabled = false;
GPUCompareFunction depthCompare = "always"; GPUCompareFunction depthCompare = "always";
GPUStencilStateFaceDescriptor stencilFront = {}; GPUStencilFaceState stencilFront = {};
GPUStencilStateFaceDescriptor stencilBack = {}; GPUStencilFaceState stencilBack = {};
GPUStencilValue stencilReadMask = 0xFFFFFFFF; GPUStencilValue stencilReadMask = 0xFFFFFFFF;
GPUStencilValue stencilWriteMask = 0xFFFFFFFF; GPUStencilValue stencilWriteMask = 0xFFFFFFFF;
};
// InputState
enum GPUIndexFormat {
"uint16",
"uint32",
};
enum GPUVertexFormat {
"uchar2",
"uchar4",
"char2",
"char4",
"uchar2norm",
"uchar4norm",
"char2norm",
"char4norm",
"ushort2",
"ushort4",
"short2",
"short4",
"ushort2norm",
"ushort4norm",
"short2norm",
"short4norm",
"half2",
"half4",
"float",
"float2",
"float3",
"float4",
"uint",
"uint2",
"uint3",
"uint4",
"int",
"int2",
"int3",
"int4",
};
enum GPUInputStepMode {
"vertex",
"instance",
};
dictionary GPUVertexAttributeDescriptor {
required GPUVertexFormat format;
required GPUSize64 offset;
required GPUIndex32 shaderLocation;
};
dictionary GPUVertexBufferLayoutDescriptor {
required GPUSize64 arrayStride;
GPUInputStepMode stepMode = "vertex";
required sequence<GPUVertexAttributeDescriptor> attributes;
};
dictionary GPUVertexStateDescriptor {
GPUIndexFormat indexFormat = "uint32";
sequence<GPUVertexBufferLayoutDescriptor?> vertexBuffers = [];
};
// ShaderModule
typedef (Uint32Array or DOMString) GPUShaderCode;
dictionary GPUShaderModuleDescriptor : GPUObjectDescriptorBase {
required GPUShaderCode code;
};
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUShaderModule {
};
GPUShaderModule includes GPUObjectBase;
// Common stuff for ComputePipeline and RenderPipeline
dictionary GPUPipelineDescriptorBase : GPUObjectDescriptorBase {
GPUPipelineLayout layout;
};
interface mixin GPUPipelineBase {
GPUBindGroupLayout getBindGroupLayout(unsigned long index);
};
dictionary GPUProgrammableStageDescriptor {
required GPUShaderModule module;
required DOMString entryPoint;
};
// ComputePipeline
dictionary GPUComputePipelineDescriptor : GPUPipelineDescriptorBase {
required GPUProgrammableStageDescriptor computeStage;
};
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUComputePipeline {
};
GPUComputePipeline includes GPUObjectBase;
GPUComputePipeline includes GPUPipelineBase;
// GPURenderPipeline
enum GPUPrimitiveTopology {
"point-list",
"line-list",
"line-strip",
"triangle-list",
"triangle-strip"
};
dictionary GPURasterizationStateDescriptor {
GPUFrontFace frontFace = "ccw";
GPUCullMode cullMode = "none";
GPUDepthBias depthBias = 0; GPUDepthBias depthBias = 0;
float depthBiasSlopeScale = 0; float depthBiasSlopeScale = 0;
float depthBiasClamp = 0; float depthBiasClamp = 0;
};
enum GPUFrontFace { // Enable depth clamping (requires "depth-clamping" feature)
"ccw", boolean clampDepth = false;
"cw"
};
enum GPUCullMode {
"none",
"front",
"back"
}; };
dictionary GPURenderPipelineDescriptor : GPUPipelineDescriptorBase { dictionary GPURenderPipelineDescriptor : GPUPipelineDescriptorBase {
required GPUProgrammableStageDescriptor vertexStage; required GPUVertexState vertex;
GPUProgrammableStageDescriptor fragmentStage; GPUPrimitiveState primitive = {};
GPUDepthStencilState depthStencil;
required GPUPrimitiveTopology primitiveTopology; GPUMultisampleState multisample = {};
GPURasterizationStateDescriptor rasterizationState = {}; GPUFragmentState fragment;
required sequence<GPUColorStateDescriptor> colorStates;
GPUDepthStencilStateDescriptor depthStencilState;
GPUVertexStateDescriptor vertexState = {};
GPUSize32 sampleCount = 1;
GPUSampleMask sampleMask = 0xFFFFFFFF;
boolean alphaToCoverageEnabled = false;
}; };
//TODO: Serializable
// https://bugzilla.mozilla.org/show_bug.cgi?id=1696219
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPURenderPipeline { interface GPURenderPipeline {
@ -768,6 +895,7 @@ dictionary GPURenderPassDepthStencilAttachmentDescriptor {
dictionary GPURenderPassDescriptor : GPUObjectDescriptorBase { dictionary GPURenderPassDescriptor : GPUObjectDescriptorBase {
required sequence<GPURenderPassColorAttachmentDescriptor> colorAttachments; required sequence<GPURenderPassColorAttachmentDescriptor> colorAttachments;
GPURenderPassDepthStencilAttachmentDescriptor depthStencilAttachment; GPURenderPassDepthStencilAttachmentDescriptor depthStencilAttachment;
GPUQuerySet occlusionQuerySet;
}; };
dictionary GPUTextureDataLayout { dictionary GPUTextureDataLayout {
@ -831,9 +959,9 @@ interface GPUCommandEncoder {
GPUExtent3D copySize); GPUExtent3D copySize);
*/ */
//void pushDebugGroup(DOMString groupLabel); //void pushDebugGroup(USVString groupLabel);
//void popDebugGroup(); //void popDebugGroup();
//void insertDebugMarker(DOMString markerLabel); //void insertDebugMarker(USVString markerLabel);
[NewObject] [NewObject]
GPUCommandBuffer finish(optional GPUCommandBufferDescriptor descriptor = {}); GPUCommandBuffer finish(optional GPUCommandBufferDescriptor descriptor = {});
@ -844,16 +972,16 @@ interface mixin GPUProgrammablePassEncoder {
void setBindGroup(GPUIndex32 index, GPUBindGroup bindGroup, void setBindGroup(GPUIndex32 index, GPUBindGroup bindGroup,
optional sequence<GPUBufferDynamicOffset> dynamicOffsets = []); optional sequence<GPUBufferDynamicOffset> dynamicOffsets = []);
//void pushDebugGroup(DOMString groupLabel); //void pushDebugGroup(USVString groupLabel);
//void popDebugGroup(); //void popDebugGroup();
//void insertDebugMarker(DOMString markerLabel); //void insertDebugMarker(USVString markerLabel);
}; };
// Render Pass // Render Pass
interface mixin GPURenderEncoderBase { interface mixin GPURenderEncoderBase {
void setPipeline(GPURenderPipeline pipeline); void setPipeline(GPURenderPipeline pipeline);
void setIndexBuffer(GPUBuffer buffer, optional GPUSize64 offset = 0, optional GPUSize64 size = 0); void setIndexBuffer(GPUBuffer buffer, GPUIndexFormat indexFormat, optional GPUSize64 offset = 0, optional GPUSize64 size = 0);
void setVertexBuffer(GPUIndex32 slot, GPUBuffer buffer, optional GPUSize64 offset = 0, optional GPUSize64 size = 0); void setVertexBuffer(GPUIndex32 slot, GPUBuffer buffer, optional GPUSize64 offset = 0, optional GPUSize64 size = 0);
void draw(GPUSize32 vertexCount, void draw(GPUSize32 vertexCount,
@ -927,46 +1055,67 @@ interface GPUCommandBuffer {
}; };
GPUCommandBuffer includes GPUObjectBase; GPUCommandBuffer includes GPUObjectBase;
dictionary GPURenderBundleEncoderDescriptor : GPUObjectDescriptorBase {
required sequence<GPUTextureFormat> colorFormats;
GPUTextureFormat depthStencilFormat;
GPUSize32 sampleCount = 1;
};
// Render Bundle // Render Bundle
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPURenderBundleEncoder {
//GPURenderBundle finish(optional GPURenderBundleDescriptor descriptor = {});
};
GPURenderBundleEncoder includes GPUObjectBase;
//GPURenderBundleEncoder includes GPURenderEncoderBase;
dictionary GPURenderBundleDescriptor : GPUObjectDescriptorBase {
};
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPURenderBundle { interface GPURenderBundle {
}; };
GPURenderBundle includes GPUObjectBase; GPURenderBundle includes GPUObjectBase;
// **************************************************************************** dictionary GPURenderBundleDescriptor : GPUObjectDescriptorBase {
// OTHER (Fence, Queue SwapChain, Device) };
// ****************************************************************************
// Fence dictionary GPURenderBundleEncoderDescriptor : GPUObjectDescriptorBase {
dictionary GPUFenceDescriptor : GPUObjectDescriptorBase { required sequence<GPUTextureFormat> colorFormats;
GPUFenceValue initialValue = 0; GPUTextureFormat depthStencilFormat;
GPUSize32 sampleCount = 1;
}; };
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
Exposed=Window] Exposed=Window]
interface GPUFence { interface GPURenderBundleEncoder {
//GPUFenceValue getCompletedValue(); //GPURenderBundle finish(optional GPURenderBundleDescriptor descriptor = {});
//Promise<void> onCompletion(GPUFenceValue completionValue);
}; };
GPUFence includes GPUObjectBase; GPURenderBundleEncoder includes GPUObjectBase;
//TODO
//GPURenderBundleEncoder includes GPUProgrammablePassEncoder;
//GPURenderBundleEncoder includes GPURenderEncoderBase;
// ****************************************************************************
// OTHER (Query, Queue, SwapChain, Device)
// ****************************************************************************
// Query set
enum GPUQueryType {
"occlusion",
"pipeline-statistics",
"timestamp"
};
enum GPUPipelineStatisticName {
"vertex-shader-invocations",
"clipper-invocations",
"clipper-primitives-out",
"fragment-shader-invocations",
"compute-shader-invocations"
};
dictionary GPUQuerySetDescriptor : GPUObjectDescriptorBase {
required GPUQueryType type;
required GPUSize32 count;
sequence<GPUPipelineStatisticName> pipelineStatistics = [];
};
[Pref="dom.webgpu.enabled",
Exposed=Window]
interface GPUQuerySet {
void destroy();
};
GPUQuerySet includes GPUObjectBase;
//TODO: use [AllowShared] on BufferSource
// https://bugzilla.mozilla.org/show_bug.cgi?id=1696216
// https://github.com/heycam/webidl/issues/961
// Queue // Queue
[Pref="dom.webgpu.enabled", [Pref="dom.webgpu.enabled",
@ -974,21 +1123,21 @@ GPUFence includes GPUObjectBase;
interface GPUQueue { interface GPUQueue {
void submit(sequence<GPUCommandBuffer> buffers); void submit(sequence<GPUCommandBuffer> buffers);
//GPUFence createFence(optional GPUFenceDescriptor descriptor = {}); //TODO:
//void signal(GPUFence fence, GPUFenceValue signalValue); //Promise<void> onSubmittedWorkDone();
[Throws] [Throws]
void writeBuffer( void writeBuffer(
GPUBuffer buffer, GPUBuffer buffer,
GPUSize64 bufferOffset, GPUSize64 bufferOffset,
[AllowShared] ArrayBuffer data, BufferSource data,
optional GPUSize64 dataOffset = 0, optional GPUSize64 dataOffset = 0,
optional GPUSize64 size); optional GPUSize64 size);
[Throws] [Throws]
void writeTexture( void writeTexture(
GPUTextureCopyView destination, GPUTextureCopyView destination,
[AllowShared] ArrayBuffer data, BufferSource data,
GPUTextureDataLayout dataLayout, GPUTextureDataLayout dataLayout,
GPUExtent3D size); GPUExtent3D size);
}; };
@ -1015,5 +1164,5 @@ interface GPUCanvasContext {
[Throws] [Throws]
GPUSwapChain configureSwapChain(GPUSwapChainDescriptor descriptor); GPUSwapChain configureSwapChain(GPUSwapChainDescriptor descriptor);
//Promise<GPUTextureFormat> getSwapChainPreferredFormat(GPUDevice device); GPUTextureFormat getSwapChainPreferredFormat(GPUAdapter adapter);
}; };

Просмотреть файл

@ -7,7 +7,7 @@ assignees: ''
--- ---
<!-- Thank you for filing this! Please read the [debugging tips](https://github.com/gfx-rs/wgpu/wiki/Debbugging-wgpu-Applications). <!-- Thank you for filing this! Please read the [debugging tips](https://github.com/gfx-rs/wgpu/wiki/Debugging-wgpu-Applications).
That may let you investigate on your own, or provide additional information that helps us to assist.--> That may let you investigate on your own, or provide additional information that helps us to assist.-->
**Description** **Description**

20
gfx/wgpu/.github/workflows/ci.yml поставляемый
Просмотреть файл

@ -26,17 +26,21 @@ jobs:
PKG_CONFIG_ALLOW_CROSS: 1 PKG_CONFIG_ALLOW_CROSS: 1
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Prepare - run: echo "$ANDROID_HOME/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH
run: |
sudo apt-get update -y -qq
sudo apt-get install -y -qq libegl1-mesa-dev
echo "$ANDROID_HOME/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH
- run: rustup component add clippy - run: rustup component add clippy
- run: rustup target add ${{ env.TARGET }} - run: rustup target add ${{ env.TARGET }}
- run: cargo clippy --target ${{ env.TARGET }} - run: cargo clippy --target ${{ env.TARGET }}
- name: Additional core features - name: Additional core features
run: cargo check --manifest-path wgpu-core/Cargo.toml --features trace --target ${{ env.TARGET }} run: cargo check --manifest-path wgpu-core/Cargo.toml --features trace --target ${{ env.TARGET }}
webgl_build:
name: Web Assembly
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
- run: rustup target add wasm32-unknown-unknown
- run: cargo build --manifest-path wgpu-core/Cargo.toml --target wasm32-unknown-unknown
build: build:
name: ${{ matrix.name }} name: ${{ matrix.name }}
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
@ -61,9 +65,7 @@ jobs:
- name: Ubuntu Stable - name: Ubuntu Stable
os: ubuntu-18.04 os: ubuntu-18.04
channel: stable channel: stable
prepare_command: | prepare_command:
sudo apt-get update -y -qq
sudo apt-get install -y -qq libegl1-mesa-dev
additional_core_features: trace,replay additional_core_features: trace,replay
additional_player_features: additional_player_features:
- name: Ubuntu Nightly - name: Ubuntu Nightly
@ -71,8 +73,6 @@ jobs:
channel: nightly channel: nightly
prepare_command: | prepare_command: |
sudo apt-get update -y -qq sudo apt-get update -y -qq
echo "Installing EGL"
sudo apt-get install -y -qq libegl1-mesa-dev
echo "Installing Vulkan" echo "Installing Vulkan"
sudo apt-get install -y -qq mesa-vulkan-drivers sudo apt-get install -y -qq mesa-vulkan-drivers
additional_core_features: serial-pass additional_core_features: serial-pass

1
gfx/wgpu/.gitignore поставляемый
Просмотреть файл

@ -1,6 +1,7 @@
/target /target
**/*.rs.bk **/*.rs.bk
#Cargo.lock #Cargo.lock
.fuse_hidden*
.DS_Store .DS_Store
.vscode .vscode
.vs .vs

Просмотреть файл

@ -1,5 +1,26 @@
# Change Log # Change Log
## v0.7 (2021-01-31)
- Major API changes:
- `RenderPipelineDescriptor`
- `BindingType`
- Features:
- (beta) WGSL support, including the ability to bypass SPIR-V entirely
- (beta) implicit bind group layout support
- timestamp and pipeline statistics queries
- ETC2 and ASTC compressed textures
- (beta) targeting WASM with WebGL backend
- reduced dependencies
- Native-only:
- clamp-to-border addressing
- polygon fill modes
- query a format for extra capabilities
- `f64` support in shaders
- Validation:
- shader interface
- render pipeline descriptor
- vertex buffers
## v0.6 (2020-08-17) ## v0.6 (2020-08-17)
- Crates: - Crates:
- C API is moved to [another repository](https://github.com/gfx-rs/wgpu-native) - C API is moved to [another repository](https://github.com/gfx-rs/wgpu-native)

559
gfx/wgpu/Cargo.lock сгенерированный

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -5,3 +5,15 @@ members = [
"wgpu-core", "wgpu-core",
"wgpu-types", "wgpu-types",
] ]
[patch."https://github.com/gfx-rs/gfx"]
#hal = { package = "gfx-hal", path = "../gfx/src/hal" }
#gfx-backend-vulkan = { path = "../gfx/src/backend/vulkan", features = ["naga"] }
#gfx-backend-metal = { path = "../gfx/src/backend/metal", features = ["naga"] }
#gfx-backend-gl = { path = "../gfx/src/backend/gl", features = ["naga"] }
#gfx-backend-dx12 = { path = "../gfx/src/backend/dx12" }
#gfx-backend-dx11 = { path = "../gfx/src/backend/dx11" }
#gfx-backend-empty = { path = "../gfx/src/backend/empty" }
[patch."https://github.com/gfx-rs/naga"]
#naga = { path = "../naga" }

Просмотреть файл

@ -12,7 +12,7 @@ This is the core logic of an experimental [WebGPU](https://www.w3.org/community/
The implementation consists of the following parts: The implementation consists of the following parts:
- [![Crates.io](https://img.shields.io/crates/v/wgpu-core.svg?label=wgpu-core)](https://crates.io/crates/wgpu-core) [![docs.rs](https://docs.rs/wgpu-core/badge.svg)](https://docs.rs/wgpu-core/) - internal Rust API for WebGPU implementations to use - [![Crates.io](https://img.shields.io/crates/v/wgpu-core.svg?label=wgpu-core)](https://crates.io/crates/wgpu-core) [![docs.rs](https://docs.rs/wgpu-core/badge.svg)](https://docs.rs/wgpu-core/) - internal Rust API for WebGPU implementations to use
- [![Crates.io](https://img.shields.io/crates/v/wgpu-types.svg?label=wgpu-types)](https://crates.io/crates/wgpu-types) [![docs.rs](https://docs.rs/wgpu-types/badge.svg)](https://docs.rs/wgpu-types/) - Rust types shared between `wgpu-core`, `wgpu-native`, and `wgpu-rs` - [![Crates.io](https://img.shields.io/crates/v/wgpu-types.svg?label=wgpu-types)](https://crates.io/crates/wgpu-types) [![docs.rs](https://docs.rs/wgpu-types/badge.svg)](https://docs.rs/wgpu-types/) - Rust types shared between `wgpu-core` and `wgpu-rs`
- `player` - standalone application for replaying the API traces, uses `winit` - `player` - standalone application for replaying the API traces, uses `winit`
This repository contains the core of `wgpu`, and is not usable directly by applications. This repository contains the core of `wgpu`, and is not usable directly by applications.
@ -23,10 +23,10 @@ If you are looking for the native implementation or bindings to the API in other
API | Windows 7/10 | Linux & Android | macOS & iOS | API | Windows 7/10 | Linux & Android | macOS & iOS |
----- | ------------------ | ------------------ | ------------------ | ----- | ------------------ | ------------------ | ------------------ |
DX11 | :white_check_mark: | | | DX11 | :ok: | | |
DX12 | :heavy_check_mark: | | | DX12 | :white_check_mark: | | |
Vulkan | :heavy_check_mark: | :heavy_check_mark: | | Vulkan | :white_check_mark: | :white_check_mark: | |
Metal | | | :heavy_check_mark: | Metal | | | :white_check_mark: |
OpenGL | | :construction: | :construction: | GL ES3 | | :construction: | |
:heavy_check_mark: = Primary support — :white_check_mark: = Secondary support — :construction: = Unsupported, but support in progress :white_check_mark: = Primary support — :ok: = Secondary support — :construction: = Unsupported, but support in progress

Просмотреть файл

@ -12,5 +12,4 @@ publish = false
[dependencies.wgc] [dependencies.wgc]
path = "../wgpu-core" path = "../wgpu-core"
package = "wgpu-core" package = "wgpu-core"
version = "0.6"
features = ["serial-pass", "trace"] features = ["serial-pass", "trace"]

Просмотреть файл

@ -13,27 +13,31 @@ license = "MPL-2.0"
publish = false publish = false
[features] [features]
cross = ["wgc/cross"]
[dependencies] [dependencies]
env_logger = "0.7" env_logger = "0.8"
log = "0.4" log = "0.4"
raw-window-handle = "0.3" raw-window-handle = "0.3"
renderdoc = { version = "0.8", optional = true, default_features = false } renderdoc = { version = "0.10", optional = true, default_features = false }
ron = "0.6" ron = "0.6"
winit = { version = "0.22", optional = true } winit = { version = "0.24", optional = true }
[dependencies.wgt] [dependencies.wgt]
path = "../wgpu-types" path = "../wgpu-types"
package = "wgpu-types" package = "wgpu-types"
version = "0.6"
features = ["replay"] features = ["replay"]
[dependencies.wgc] [dependencies.wgc]
path = "../wgpu-core" path = "../wgpu-core"
package = "wgpu-core" package = "wgpu-core"
version = "0.6"
features = ["replay", "raw-window-handle"] features = ["replay", "raw-window-handle"]
#[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies.gfx-backend-metal]
#git = "https://github.com/gfx-rs/gfx"
#rev = "" # insert revision here
#features = ["auto-capture"]
[dependencies.wgpu-subscriber] [dependencies.wgpu-subscriber]
git = "https://github.com/gfx-rs/subscriber.git" git = "https://github.com/gfx-rs/subscriber.git"
rev = "cdc9feb53f152f9c41905ed9efeff2c1ed214361" rev = "cdc9feb53f152f9c41905ed9efeff2c1ed214361"

Просмотреть файл

@ -51,7 +51,7 @@ fn main() {
#[cfg(feature = "winit")] #[cfg(feature = "winit")]
let window = WindowBuilder::new() let window = WindowBuilder::new()
.with_title("wgpu player") .with_title("wgpu player")
.with_resizable(false) .with_resizable(true)
.build(&event_loop) .build(&event_loop)
.unwrap(); .unwrap();
@ -123,6 +123,7 @@ fn main() {
event_loop::ControlFlow, event_loop::ControlFlow,
}; };
let mut resize_desc = None;
let mut frame_count = 0; let mut frame_count = 0;
event_loop.run(move |event, _, control_flow| { event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll; *control_flow = ControlFlow::Poll;
@ -130,21 +131,27 @@ fn main() {
Event::MainEventsCleared => { Event::MainEventsCleared => {
window.request_redraw(); window.request_redraw();
} }
Event::RedrawRequested(_) => loop { Event::RedrawRequested(_) if resize_desc.is_none() => loop {
match actions.pop() { match actions.pop() {
Some(trace::Action::CreateSwapChain(id, desc)) => { Some(trace::Action::CreateSwapChain(id, desc)) => {
log::info!("Initializing the swapchain"); log::info!("Initializing the swapchain");
assert_eq!(id.to_surface_id(), surface); assert_eq!(id.to_surface_id(), surface);
window.set_inner_size(winit::dpi::PhysicalSize::new( let current_size: (u32, u32) = window.inner_size().into();
desc.width, let size = (desc.width, desc.height);
desc.height, if current_size != size {
)); window.set_inner_size(winit::dpi::PhysicalSize::new(
gfx_select!(device => global.device_create_swap_chain(device, surface, &desc)); desc.width,
desc.height,
));
resize_desc = Some(desc);
} else {
gfx_select!(device => global.device_create_swap_chain(device, surface, &desc)).unwrap();
}
} }
Some(trace::Action::PresentSwapChain(id)) => { Some(trace::Action::PresentSwapChain(id)) => {
frame_count += 1; frame_count += 1;
log::debug!("Presenting frame {}", frame_count); log::debug!("Presenting frame {}", frame_count);
gfx_select!(device => global.swap_chain_present(id)); gfx_select!(device => global.swap_chain_present(id)).unwrap();
break; break;
} }
Some(action) => { Some(action) => {
@ -154,6 +161,11 @@ fn main() {
} }
}, },
Event::WindowEvent { event, .. } => match event { Event::WindowEvent { event, .. } => match event {
WindowEvent::Resized(_) => {
if let Some(desc) = resize_desc.take() {
gfx_select!(device => global.device_create_swap_chain(device, surface, &desc)).unwrap();
}
}
WindowEvent::KeyboardInput { WindowEvent::KeyboardInput {
input: input:
KeyboardInput { KeyboardInput {
@ -170,7 +182,7 @@ fn main() {
}, },
Event::LoopDestroyed => { Event::LoopDestroyed => {
log::info!("Closing"); log::info!("Closing");
gfx_select!(device => global.device_poll(device, true)); gfx_select!(device => global.device_poll(device, true)).unwrap();
} }
_ => {} _ => {}
} }

Просмотреть файл

@ -81,6 +81,28 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
trace::Command::CopyTextureToTexture { src, dst, size } => self trace::Command::CopyTextureToTexture { src, dst, size } => self
.command_encoder_copy_texture_to_texture::<B>(encoder, &src, &dst, &size) .command_encoder_copy_texture_to_texture::<B>(encoder, &src, &dst, &size)
.unwrap(), .unwrap(),
trace::Command::WriteTimestamp {
query_set_id,
query_index,
} => self
.command_encoder_write_timestamp::<B>(encoder, query_set_id, query_index)
.unwrap(),
trace::Command::ResolveQuerySet {
query_set_id,
start_query,
query_count,
destination,
destination_offset,
} => self
.command_encoder_resolve_query_set::<B>(
encoder,
query_set_id,
start_query,
query_count,
destination,
destination_offset,
)
.unwrap(),
trace::Command::RunComputePass { base } => { trace::Command::RunComputePass { base } => {
self.command_encoder_run_compute_pass_impl::<B>(encoder, base.as_ref()) self.command_encoder_run_compute_pass_impl::<B>(encoder, base.as_ref())
.unwrap(); .unwrap();
@ -117,6 +139,7 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
) { ) {
use wgc::device::trace::Action as A; use wgc::device::trace::Action as A;
log::info!("action {:?}", action); log::info!("action {:?}", action);
//TODO: find a way to force ID perishing without excessive `maintain()` calls.
match action { match action {
A::Init { .. } => panic!("Unexpected Action::Init: has to be the first action only"), A::Init { .. } => panic!("Unexpected Action::Init: has to be the first action only"),
A::CreateSwapChain { .. } | A::PresentSwapChain(_) => { A::CreateSwapChain { .. } | A::PresentSwapChain(_) => {
@ -160,7 +183,7 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
} }
} }
A::DestroyTextureView(id) => { A::DestroyTextureView(id) => {
self.texture_view_drop::<B>(id).unwrap(); self.texture_view_drop::<B>(id, true).unwrap();
} }
A::CreateSampler(id, desc) => { A::CreateSampler(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap(); self.device_maintain_ids::<B>(device).unwrap();
@ -173,12 +196,11 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
self.sampler_drop::<B>(id); self.sampler_drop::<B>(id);
} }
A::GetSwapChainTexture { id, parent_id } => { A::GetSwapChainTexture { id, parent_id } => {
if let Some(id) = id { self.device_maintain_ids::<B>(device).unwrap();
self.swap_chain_get_current_texture_view::<B>(parent_id, id) self.swap_chain_get_current_texture_view::<B>(parent_id, id)
.unwrap() .unwrap()
.view_id .view_id
.unwrap(); .unwrap();
}
} }
A::CreateBindGroupLayout(id, desc) => { A::CreateBindGroupLayout(id, desc) => {
let (_, error) = self.device_create_bind_group_layout::<B>(device, &desc, id); let (_, error) = self.device_create_bind_group_layout::<B>(device, &desc, id);
@ -209,22 +231,20 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
A::DestroyBindGroup(id) => { A::DestroyBindGroup(id) => {
self.bind_group_drop::<B>(id); self.bind_group_drop::<B>(id);
} }
A::CreateShaderModule { id, data, label } => { A::CreateShaderModule { id, desc, data } => {
let desc = wgc::pipeline::ShaderModuleDescriptor { let source = if data.ends_with(".wgsl") {
source: if data.ends_with(".wgsl") { let code = fs::read_to_string(dir.join(data)).unwrap();
let code = fs::read_to_string(dir.join(data)).unwrap(); wgc::pipeline::ShaderModuleSource::Wgsl(Cow::Owned(code))
wgc::pipeline::ShaderModuleSource::Wgsl(Cow::Owned(code)) } else {
} else { let byte_vec = fs::read(dir.join(&data))
let byte_vec = fs::read(dir.join(data)).unwrap(); .unwrap_or_else(|e| panic!("Unable to open '{}': {:?}", data, e));
let spv = byte_vec let spv = byte_vec
.chunks(4) .chunks(4)
.map(|c| u32::from_le_bytes([c[0], c[1], c[2], c[3]])) .map(|c| u32::from_le_bytes([c[0], c[1], c[2], c[3]]))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
wgc::pipeline::ShaderModuleSource::SpirV(Cow::Owned(spv)) wgc::pipeline::ShaderModuleSource::SpirV(Cow::Owned(spv))
},
label,
}; };
let (_, error) = self.device_create_shader_module::<B>(device, &desc, id); let (_, error) = self.device_create_shader_module::<B>(device, &desc, source, id);
if let Some(e) = error { if let Some(e) = error {
panic!("{:?}", e); panic!("{:?}", e);
} }
@ -232,10 +252,21 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
A::DestroyShaderModule(id) => { A::DestroyShaderModule(id) => {
self.shader_module_drop::<B>(id); self.shader_module_drop::<B>(id);
} }
A::CreateComputePipeline(id, desc) => { A::CreateComputePipeline {
id,
desc,
implicit_context,
} => {
self.device_maintain_ids::<B>(device).unwrap(); self.device_maintain_ids::<B>(device).unwrap();
let implicit_ids =
implicit_context
.as_ref()
.map(|ic| wgc::device::ImplicitPipelineIds {
root_id: ic.root_id,
group_ids: &ic.group_ids,
});
let (_, _, error) = let (_, _, error) =
self.device_create_compute_pipeline::<B>(device, &desc, id, None); self.device_create_compute_pipeline::<B>(device, &desc, id, implicit_ids);
if let Some(e) = error { if let Some(e) = error {
panic!("{:?}", e); panic!("{:?}", e);
} }
@ -243,10 +274,21 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
A::DestroyComputePipeline(id) => { A::DestroyComputePipeline(id) => {
self.compute_pipeline_drop::<B>(id); self.compute_pipeline_drop::<B>(id);
} }
A::CreateRenderPipeline(id, desc) => { A::CreateRenderPipeline {
id,
desc,
implicit_context,
} => {
self.device_maintain_ids::<B>(device).unwrap(); self.device_maintain_ids::<B>(device).unwrap();
let implicit_ids =
implicit_context
.as_ref()
.map(|ic| wgc::device::ImplicitPipelineIds {
root_id: ic.root_id,
group_ids: &ic.group_ids,
});
let (_, _, error) = let (_, _, error) =
self.device_create_render_pipeline::<B>(device, &desc, id, None); self.device_create_render_pipeline::<B>(device, &desc, id, implicit_ids);
if let Some(e) = error { if let Some(e) = error {
panic!("{:?}", e); panic!("{:?}", e);
} }
@ -269,6 +311,16 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
A::DestroyRenderBundle(id) => { A::DestroyRenderBundle(id) => {
self.render_bundle_drop::<B>(id); self.render_bundle_drop::<B>(id);
} }
A::CreateQuerySet { id, desc } => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.device_create_query_set::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyQuerySet(id) => {
self.query_set_drop::<B>(id);
}
A::WriteBuffer { A::WriteBuffer {
id, id,
data, data,
@ -296,6 +348,9 @@ impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
self.queue_write_texture::<B>(device, &to, &bin, &layout, &size) self.queue_write_texture::<B>(device, &to, &bin, &layout, &size)
.unwrap(); .unwrap();
} }
A::Submit(_index, ref commands) if commands.is_empty() => {
self.queue_submit::<B>(device, &[]).unwrap();
}
A::Submit(_index, commands) => { A::Submit(_index, commands) => {
let (encoder, error) = self.device_create_command_encoder::<B>( let (encoder, error) = self.device_create_command_encoder::<B>(
device, device,

Просмотреть файл

@ -2,6 +2,7 @@
backends: (bits: 0xF), backends: (bits: 0xF),
tests: [ tests: [
"buffer-copy.ron", "buffer-copy.ron",
"buffer-zero-init.ron",
"bind-group.ron", "bind-group.ron",
"quad.ron", "quad.ron",
], ],

Просмотреть файл

@ -9,17 +9,23 @@
)), )),
CreateShaderModule( CreateShaderModule(
id: Id(0, 1, Empty), id: Id(0, 1, Empty),
label: None, desc: (
data: "empty.comp.spv", label: None,
), flags: (bits: 3),
CreateComputePipeline(Id(0, 1, Empty), (
label: None,
layout: Some(Id(0, 1, Empty)),
compute_stage: (
module: Id(0, 1, Empty),
entry_point: "main",
), ),
)), data: "empty.wgsl",
),
CreateComputePipeline(
id: Id(0, 1, Empty),
desc: (
label: None,
layout: Some(Id(0, 1, Empty)),
stage: (
module: Id(0, 1, Empty),
entry_point: "main",
),
),
),
CreateBuffer(Id(0, 1, Empty), ( CreateBuffer(Id(0, 1, Empty), (
label: None, label: None,
size: 16, size: 16,

Просмотреть файл

@ -0,0 +1,15 @@
[[builtin(global_invocation_id)]]
var global_id: vec3<u32>;
[[block]]
struct InOutBuffer {
data: [[stride(4)]] array<u32>;
};
[[group(0), binding(0)]]
var<storage> buffer: [[access(read_write)]] InOutBuffer;
[[stage(compute), workgroup_size(1)]]
fn main() {
buffer.data[global_id.x] = buffer.data[global_id.x] + global_id.x;
}

Просмотреть файл

@ -0,0 +1,170 @@
(
features: (bits: 0x0),
expectations: [
// Ensuring that mapping zero-inits buffers.
(
name: "mapped_at_creation: false, with MAP_WRITE",
buffer: (index: 0, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00]),
),
(
name: "mapped_at_creation: false, without MAP_WRITE",
buffer: (index: 1, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00]),
),
(
name: "partially written buffer",
buffer: (index: 2, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x80, 0xBF,
0x00, 0x00, 0x80, 0xBF,
0x00, 0x00, 0x80, 0x3F,
0x00, 0x00, 0x80, 0x3F,
0x00, 0x00, 0x00, 0x00]),
),
// Ensuring that binding zero-inits buffers
// (by observing correct side effects of compute shader reading & writing values)
(
name: "buffer has correct values",
buffer: (index: 3, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00]),
)
],
actions: [
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("mapped_at_creation: false, with MAP_WRITE"),
size: 16,
usage: (
bits: 131, // STORAGE + MAP_READ + MAP_WRITE
),
mapped_at_creation: false,
),
),
CreateBuffer(
Id(1, 1, Empty),
(
label: Some("mapped_at_creation: false, without MAP_WRITE"),
size: 16,
usage: (
bits: 129, // STORAGE + MAP_READ
),
mapped_at_creation: false,
),
),
CreateBuffer(
Id(2, 1, Empty),
(
label: Some("partially written"),
size: 24,
usage: (
bits: 9, // MAP_READ + COPY_DST
),
mapped_at_creation: false,
),
),
WriteBuffer(
id: Id(2, 1, Empty),
data: "data1.bin",
range: (
start: 4,
end: 20,
),
queued: true,
),
CreateShaderModule(
id: Id(0, 1, Empty),
desc: (
label: None,
flags: (bits: 3),
),
data: "buffer-zero-init-for-binding.wgsl",
),
CreateBuffer(Id(3, 1, Empty), (
label: Some("used in binding"),
size: 16,
usage: (
bits: 129, // STORAGE + MAP_READ
),
mapped_at_creation: false,
)),
CreateBindGroupLayout(Id(0, 1, Empty), (
label: None,
entries: [
(
binding: 0,
visibility: (
bits: 4,
),
ty: Buffer(
ty: Storage(
read_only: false,
),
has_dynamic_offset: false,
min_binding_size: Some(16),
),
count: None,
),
],
)),
CreateBindGroup(Id(0, 1, Empty), (
label: None,
layout: Id(0, 1, Empty),
entries: [
(
binding: 0,
resource: Buffer((
buffer_id: Id(3, 1, Empty),
offset: 0,
size: Some(16),
)),
),
],
)),
CreatePipelineLayout(Id(0, 1, Empty), (
label: None,
bind_group_layouts: [
Id(0, 1, Empty),
],
push_constant_ranges: [],
)),
CreateComputePipeline(
id: Id(0, 1, Empty),
desc: (
label: None,
layout: Some(Id(0, 1, Empty)),
stage: (
module: Id(0, 1, Empty),
entry_point: "main",
),
),
),
Submit(1, [
RunComputePass(
base: (
label: None,
commands: [
SetPipeline(Id(0, 1, Empty)),
SetBindGroup(
index: 0,
num_dynamic_offsets: 0,
bind_group_id: Id(0, 1, Empty),
),
Dispatch((4, 1, 1)),
],
dynamic_offsets: [],
string_data: [],
push_constant_data: [],
),
)
]),
]
)

Просмотреть файл

@ -1,5 +0,0 @@
#version 450
layout(local_size_x = 1) in;
void main() {
}

Двоичные данные
gfx/wgpu/player/tests/data/empty.comp.spv

Двоичный файл не отображается.

Просмотреть файл

@ -0,0 +1,3 @@
[[stage(compute), workgroup_size(1)]]
fn main() {
}

Просмотреть файл

@ -1,7 +0,0 @@
#version 450
layout(location = 0) out vec4 outColor;
void main() {
outColor = vec4(1.0, 1.0, 1.0, 1.0);
}

Двоичные данные
gfx/wgpu/player/tests/data/quad.frag.spv

Двоичный файл не отображается.

Просмотреть файл

@ -11,18 +11,18 @@
actions: [ actions: [
CreateShaderModule( CreateShaderModule(
id: Id(0, 1, Empty), id: Id(0, 1, Empty),
data: "quad.vert.spv", desc: (
), label: None,
CreateShaderModule( flags: (bits: 3),
id: Id(1, 1, Empty), ),
data: "quad.frag.spv", data: "quad.wgsl",
), ),
CreateTexture(Id(0, 1, Empty), ( CreateTexture(Id(0, 1, Empty), (
label: Some("Output Texture"), label: Some("Output Texture"),
size: ( size: (
width: 64, width: 64,
height: 64, height: 64,
depth: 1, depth_or_array_layers: 1,
), ),
mip_level_count: 1, mip_level_count: 1,
sample_count: 1, sample_count: 1,
@ -53,46 +53,31 @@
bind_group_layouts: [], bind_group_layouts: [],
push_constant_ranges: [], push_constant_ranges: [],
)), )),
CreateRenderPipeline(Id(0, 1, Empty), ( CreateRenderPipeline(
label: None, id: Id(0, 1, Empty),
layout: Some(Id(0, 1, Empty)), desc: (
vertex_stage: ( label: None,
module: Id(0, 1, Empty), layout: Some(Id(0, 1, Empty)),
entry_point: "main", vertex: (
), stage: (
fragment_stage: Some(( module: Id(0, 1, Empty),
module: Id(1, 1, Empty), entry_point: "vs_main",
entry_point: "main",
)),
rasterization_state: None,
primitive_topology: TriangleList,
color_states: [
(
format: Rgba8Unorm,
alpha_blend: (
src_factor: One,
dst_factor: Zero,
operation: Add,
),
color_blend: (
src_factor: One,
dst_factor: Zero,
operation: Add,
),
write_mask: (
bits: 15,
), ),
buffers: [],
), ),
], fragment: Some((
depth_stencil_state: None, stage: (
vertex_state: ( module: Id(0, 1, Empty),
index_format: Uint16, entry_point: "fs_main",
vertex_buffers: [], ),
targets: [
(
format: Rgba8Unorm,
),
],
)),
), ),
sample_count: 1, ),
sample_mask: 4294967295,
alpha_to_coverage_enabled: false,
)),
Submit(1, [ Submit(1, [
RunRenderPass( RunRenderPass(
base: ( base: (
@ -145,7 +130,7 @@
size: ( size: (
width: 64, width: 64,
height: 64, height: 64,
depth: 1, depth_or_array_layers: 1,
), ),
), ),
]), ]),

Просмотреть файл

@ -1,10 +0,0 @@
#version 450
out gl_PerVertex {
vec4 gl_Position;
};
void main() {
vec2 pos = vec2(gl_VertexIndex == 2 ? 3.0 : -1.0, gl_VertexIndex == 1 ? 3.0 : -1.0);
gl_Position = vec4(pos, 0.0, 1.0);
}

Двоичные данные
gfx/wgpu/player/tests/data/quad.vert.spv

Двоичный файл не отображается.

Просмотреть файл

@ -0,0 +1,24 @@
[[builtin(vertex_index)]]
var<in> in_vertex_index: u32;
[[builtin(position)]]
var<out> out_pos: vec4<f32>;
[[stage(vertex)]]
fn vs_main() {
// hacky way to draw a large triangle
var tmp1: i32 = i32(in_vertex_index) / 2;
var tmp2: i32 = i32(in_vertex_index) & 1;
var pos: vec2<f32> = vec2<f32>(
f32(tmp1) * 4.0 - 1.0,
f32(tmp2) * 4.0 - 1.0
);
out_pos = vec4<f32>(pos, 0.0, 1.0);
}
[[location(0)]]
var<out> out_color: vec4<f32>;
[[stage(fragment)]]
fn fs_main() {
out_color = vec4<f32>(1.0, 1.0, 1.0, 1.0);
}

Просмотреть файл

@ -93,7 +93,6 @@ impl Test<'_> {
label: None, label: None,
features: self.features | wgt::Features::MAPPABLE_PRIMARY_BUFFERS, features: self.features | wgt::Features::MAPPABLE_PRIMARY_BUFFERS,
limits: wgt::Limits::default(), limits: wgt::Limits::default(),
shader_validation: true,
}, },
None, None,
device device
@ -144,7 +143,12 @@ impl Test<'_> {
} }
}; };
assert_eq!(&expected_data[..], contents); if &expected_data[..] != contents {
panic!(
"Test expectation is not met!\nBuffer content was:\n{:?}\nbut expected:\n{:?}",
contents, expected_data
);
}
} }
wgc::gfx_select!(device => global.clear_backend(())); wgc::gfx_select!(device => global.clear_backend(()));
@ -213,5 +217,12 @@ impl Corpus {
#[test] #[test]
fn test_api() { fn test_api() {
wgpu_subscriber::initialize_default_subscriber(
std::env::var("WGPU_CHROME_TRACE")
.as_ref()
.map(Path::new)
.ok(),
);
Corpus::run_from(PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/data/all.ron")) Corpus::run_from(PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/data/all.ron"))
} }

Просмотреть файл

@ -1,6 +1,6 @@
[package] [package]
name = "wgpu-core" name = "wgpu-core"
version = "0.6.0" version = "0.7.0"
authors = ["wgpu developers"] authors = ["wgpu developers"]
edition = "2018" edition = "2018"
description = "WebGPU core logic on gfx-hal" description = "WebGPU core logic on gfx-hal"
@ -13,10 +13,12 @@ license = "MPL-2.0"
[features] [features]
default = [] default = []
# Enable SPIRV-Cross
cross = ["gfx-backend-metal/cross"]
# Enable API tracing # Enable API tracing
trace = ["ron", "serde", "wgt/trace"] trace = ["ron", "serde", "wgt/trace", "arrayvec/serde"]
# Enable API replaying # Enable API replaying
replay = ["serde", "wgt/replay"] replay = ["serde", "wgt/replay", "arrayvec/serde"]
# Enable serializable compute/render passes, and bundle encoders. # Enable serializable compute/render passes, and bundle encoders.
serial-pass = ["serde", "wgt/serde", "arrayvec/serde"] serial-pass = ["serde", "wgt/serde", "arrayvec/serde"]
@ -25,8 +27,6 @@ arrayvec = "0.5"
bitflags = "1.0" bitflags = "1.0"
copyless = "0.1" copyless = "0.1"
fxhash = "0.2" fxhash = "0.2"
hal = { package = "gfx-hal", git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
gfx-backend-empty = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
parking_lot = "0.11" parking_lot = "0.11"
raw-window-handle = { version = "0.3", optional = true } raw-window-handle = { version = "0.3", optional = true }
ron = { version = "0.6", optional = true } ron = { version = "0.6", optional = true }
@ -34,32 +34,38 @@ serde = { version = "1.0", features = ["serde_derive"], optional = true }
smallvec = "1" smallvec = "1"
tracing = { version = "0.1", default-features = false, features = ["std"] } tracing = { version = "0.1", default-features = false, features = ["std"] }
thiserror = "1" thiserror = "1"
gpu-alloc = { git = "https://github.com/zakarumych/gpu-alloc", rev = "d07be73f9439a37c89f5b72f2500cbf0eb4ff613" }
gpu-descriptor = { git = "https://github.com/zakarumych/gpu-descriptor", rev = "831460c4b5120d9a74744d542f39a95b9816b5ab"} gpu-alloc = { version = "0.3", features = ["tracing"] }
gpu-descriptor = { version = "0.1", features = ["tracing"] }
hal = { package = "gfx-hal", git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-empty = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
[target.'cfg(all(not(target_arch = "wasm32"), all(unix, not(target_os = "ios"), not(target_os = "macos"))))'.dependencies]
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8", features = ["naga"] }
#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
[target.'cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))'.dependencies]
gfx-backend-metal = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8", optional = true }
[target.'cfg(all(not(target_arch = "wasm32"), windows))'.dependencies]
gfx-backend-dx12 = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-dx11 = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8", features = ["naga"] }
[target.'cfg(target_arch = "wasm32")'.dependencies]
#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "0a201d1c406b5119ec11068293a40e50ec0be4c8" }
[dependencies.naga] [dependencies.naga]
version = "0.2"
git = "https://github.com/gfx-rs/naga" git = "https://github.com/gfx-rs/naga"
rev = "96c80738650822de35f77ab6a589f309460c8f39" tag = "gfx-12"
features = ["spv-in", "spv-out", "wgsl-in"] features = ["spv-in", "spv-out", "wgsl-in"]
[dependencies.wgt] [dependencies.wgt]
path = "../wgpu-types" path = "../wgpu-types"
package = "wgpu-types" package = "wgpu-types"
version = "0.6" version = "0.7"
[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies]
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
#gfx-backend-gl = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies]
gfx-backend-metal = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354", optional = true }
[target.'cfg(windows)'.dependencies]
gfx-backend-dx12 = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
gfx-backend-dx11 = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
gfx-backend-vulkan = { git = "https://github.com/gfx-rs/gfx", rev = "1d14789011cb892f4c1a205d3f8a87d479c2e354" }
[dev-dependencies] [dev-dependencies]
loom = "0.3" loom = "0.3"

Просмотреть файл

@ -6,15 +6,15 @@ fn main() {
// Setup cfg aliases // Setup cfg aliases
cfg_aliases::cfg_aliases! { cfg_aliases::cfg_aliases! {
// Vendors/systems // Vendors/systems
ios: { target_os = "ios" }, wasm: { target_arch = "wasm32" },
macos: { target_os = "macos" }, apple: { any(target_os = "ios", target_os = "macos") },
apple: { any(ios, macos) }, unix_wo_apple: {all(unix, not(apple))},
// Backends // Backends
vulkan: { any(windows, all(unix, not(apple)), feature = "gfx-backend-vulkan") }, vulkan: { all(not(wasm), any(windows, unix_wo_apple, feature = "gfx-backend-vulkan")) },
metal: { apple }, metal: { all(not(wasm), apple) },
dx12: { windows }, dx12: { all(not(wasm), windows) },
dx11: { windows }, dx11: { all(not(wasm), windows) },
gl: { all(not(unix), not(apple), not(windows)) }, gl: { false },
} }
} }

Просмотреть файл

@ -9,6 +9,7 @@ use crate::{
}, },
hub::Resource, hub::Resource,
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId, Valid}, id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId, Valid},
memory_init_tracker::MemoryInitTrackerAction,
track::{TrackerSet, DUMMY_SELECTOR}, track::{TrackerSet, DUMMY_SELECTOR},
validation::{MissingBufferUsageError, MissingTextureUsageError}, validation::{MissingBufferUsageError, MissingTextureUsageError},
FastHashMap, Label, LifeGuard, MultiRefCount, Stored, MAX_BIND_GROUPS, FastHashMap, Label, LifeGuard, MultiRefCount, Stored, MAX_BIND_GROUPS,
@ -58,11 +59,18 @@ pub enum CreateBindGroupError {
BindingArrayLengthMismatch { actual: usize, expected: usize }, BindingArrayLengthMismatch { actual: usize, expected: usize },
#[error("bound buffer range {range:?} does not fit in buffer of size {size}")] #[error("bound buffer range {range:?} does not fit in buffer of size {size}")]
BindingRangeTooLarge { BindingRangeTooLarge {
buffer: BufferId,
range: Range<wgt::BufferAddress>, range: Range<wgt::BufferAddress>,
size: u64, size: u64,
}, },
#[error("buffer binding size {actual} is less than minimum {min}")] #[error("buffer binding size {actual} is less than minimum {min}")]
BindingSizeTooSmall { actual: u64, min: u64 }, BindingSizeTooSmall {
buffer: BufferId,
actual: u64,
min: u64,
},
#[error("buffer binding size is zero")]
BindingZeroSize(BufferId),
#[error("number of bindings in bind group descriptor ({actual}) does not match the number of bindings defined in the bind group layout ({expected})")] #[error("number of bindings in bind group descriptor ({actual}) does not match the number of bindings defined in the bind group layout ({expected})")]
BindingsNumMismatch { actual: usize, expected: usize }, BindingsNumMismatch { actual: usize, expected: usize },
#[error("binding {0} is used at least twice in the descriptor")] #[error("binding {0} is used at least twice in the descriptor")]
@ -81,8 +89,14 @@ pub enum CreateBindGroupError {
SwapChainImage, SwapChainImage,
#[error("buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")] #[error("buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(wgt::BufferAddress), UnalignedBufferOffset(wgt::BufferAddress),
#[error("uniform buffer binding range exceeds `max_uniform_buffer_binding_size` limit")] #[error(
UniformBufferRangeTooLarge, "buffer binding {binding} range {given} exceeds `max_*_buffer_binding_size` limit {limit}"
)]
BufferRangeTooLarge {
binding: u32,
given: u32,
limit: u32,
},
#[error("binding {binding} has a different type ({actual:?}) than the one in the layout ({expected:?})")] #[error("binding {binding} has a different type ({actual:?}) than the one in the layout ({expected:?})")]
WrongBindingType { WrongBindingType {
// Index of the binding // Index of the binding
@ -92,10 +106,36 @@ pub enum CreateBindGroupError {
// Human-readable description of expected types // Human-readable description of expected types
expected: &'static str, expected: &'static str,
}, },
#[error("texture binding {binding} expects multisampled = {layout_multisampled}, but given a view with samples = {view_samples}")]
InvalidTextureMultisample {
binding: u32,
layout_multisampled: bool,
view_samples: u32,
},
#[error("texture binding {binding} expects sample type = {layout_sample_type:?}, but given a view with format = {view_format:?}")]
InvalidTextureSampleType {
binding: u32,
layout_sample_type: wgt::TextureSampleType,
view_format: wgt::TextureFormat,
},
#[error("texture binding {binding} expects dimension = {layout_dimension:?}, but given a view with dimension = {view_dimension:?}")]
InvalidTextureDimension {
binding: u32,
layout_dimension: wgt::TextureViewDimension,
view_dimension: wgt::TextureViewDimension,
},
#[error("storage texture binding {binding} expects format = {layout_format:?}, but given a view with format = {view_format:?}")]
InvalidStorageTextureFormat {
binding: u32,
layout_format: wgt::TextureFormat,
view_format: wgt::TextureFormat,
},
#[error("the given sampler is/is not a comparison sampler, while the layout type indicates otherwise")] #[error("the given sampler is/is not a comparison sampler, while the layout type indicates otherwise")]
WrongSamplerComparison, WrongSamplerComparison,
#[error("bound texture views can not have both depth and stencil aspects enabled")] #[error("bound texture views can not have both depth and stencil aspects enabled")]
DepthStencilAspect, DepthStencilAspect,
#[error("the adapter does not support simultaneous read + write storage texture access for the format {0:?}")]
StorageReadWriteNotSupported(wgt::TextureFormat),
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
@ -107,10 +147,11 @@ pub enum BindingZone {
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
#[error("too many bindings of type {kind:?} in {zone}, limit is {count}")] #[error("too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}")]
pub struct BindingTypeMaxCountError { pub struct BindingTypeMaxCountError {
pub kind: BindingTypeMaxCountErrorKind, pub kind: BindingTypeMaxCountErrorKind,
pub zone: BindingZone, pub zone: BindingZone,
pub limit: u32,
pub count: u32, pub count: u32,
} }
@ -173,7 +214,12 @@ impl PerStageBindingTypeCounter {
) -> Result<(), BindingTypeMaxCountError> { ) -> Result<(), BindingTypeMaxCountError> {
let (zone, count) = self.max(); let (zone, count) = self.max();
if limit < count { if limit < count {
Err(BindingTypeMaxCountError { kind, zone, count }) Err(BindingTypeMaxCountError {
kind,
zone,
limit,
count,
})
} else { } else {
Ok(()) Ok(())
} }
@ -242,6 +288,7 @@ impl BindingTypeMaxCountValidator {
return Err(BindingTypeMaxCountError { return Err(BindingTypeMaxCountError {
kind: BindingTypeMaxCountErrorKind::DynamicUniformBuffers, kind: BindingTypeMaxCountErrorKind::DynamicUniformBuffers,
zone: BindingZone::Pipeline, zone: BindingZone::Pipeline,
limit: limits.max_dynamic_uniform_buffers_per_pipeline_layout,
count: self.dynamic_uniform_buffers, count: self.dynamic_uniform_buffers,
}); });
} }
@ -249,6 +296,7 @@ impl BindingTypeMaxCountValidator {
return Err(BindingTypeMaxCountError { return Err(BindingTypeMaxCountError {
kind: BindingTypeMaxCountErrorKind::DynamicStorageBuffers, kind: BindingTypeMaxCountErrorKind::DynamicStorageBuffers,
zone: BindingZone::Pipeline, zone: BindingZone::Pipeline,
limit: limits.max_dynamic_storage_buffers_per_pipeline_layout,
count: self.dynamic_storage_buffers, count: self.dynamic_storage_buffers,
}); });
} }
@ -423,7 +471,7 @@ pub struct PipelineLayoutDescriptor<'a> {
/// must define the range in push constant memory that corresponds to its single `layout(push_constant)` /// must define the range in push constant memory that corresponds to its single `layout(push_constant)`
/// uniform block. /// uniform block.
/// ///
/// If this array is non-empty, the [`Features::PUSH_CONSTANTS`] must be enabled. /// If this array is non-empty, the [`Features::PUSH_CONSTANTS`](wgt::Features::PUSH_CONSTANTS) must be enabled.
pub push_constant_ranges: Cow<'a, [wgt::PushConstantRange]>, pub push_constant_ranges: Cow<'a, [wgt::PushConstantRange]>,
} }
@ -571,6 +619,7 @@ pub struct BindGroup<B: hal::Backend> {
pub(crate) layout_id: Valid<BindGroupLayoutId>, pub(crate) layout_id: Valid<BindGroupLayoutId>,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
pub(crate) used: TrackerSet, pub(crate) used: TrackerSet,
pub(crate) used_buffer_ranges: Vec<MemoryInitTrackerAction<BufferId>>,
pub(crate) dynamic_binding_info: Vec<BindGroupDynamicBindingData>, pub(crate) dynamic_binding_info: Vec<BindGroupDynamicBindingData>,
} }

Просмотреть файл

@ -62,6 +62,13 @@ impl<B: hal::Backend> CommandPool<B> {
} }
self.available.pop().unwrap() self.available.pop().unwrap()
} }
fn destroy(mut self, device: &B::Device) {
unsafe {
self.raw.free(self.available.into_iter());
device.destroy_command_pool(self.raw);
}
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -87,13 +94,11 @@ impl<B: GfxBackend> CommandAllocator<B> {
#[cfg(feature = "trace")] enable_tracing: bool, #[cfg(feature = "trace")] enable_tracing: bool,
) -> Result<CommandBuffer<B>, CommandAllocatorError> { ) -> Result<CommandBuffer<B>, CommandAllocatorError> {
//debug_assert_eq!(device_id.backend(), B::VARIANT); //debug_assert_eq!(device_id.backend(), B::VARIANT);
let _ = label; // silence warning on release
let thread_id = thread::current().id(); let thread_id = thread::current().id();
let mut inner = self.inner.lock(); let mut inner = self.inner.lock();
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
let pool = match inner.pools.entry(thread_id) { let pool = match inner.pools.entry(thread_id) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => { Entry::Vacant(e) => {
tracing::info!("Starting on thread {:?}", thread_id); tracing::info!("Starting on thread {:?}", thread_id);
let raw = unsafe { let raw = unsafe {
@ -104,27 +109,30 @@ impl<B: GfxBackend> CommandAllocator<B> {
) )
.or(Err(DeviceError::OutOfMemory))? .or(Err(DeviceError::OutOfMemory))?
}; };
let pool = CommandPool { e.insert(CommandPool {
raw, raw,
total: 0, total: 0,
available: Vec::new(), available: Vec::new(),
pending: Vec::new(), pending: Vec::new(),
}; })
e.insert(pool)
} }
Entry::Occupied(e) => e.into_mut(),
}; };
let init = pool.allocate(); //Note: we have to allocate the first buffer right here, or otherwise
// the pool may be cleaned up by maintenance called from another thread.
Ok(CommandBuffer { Ok(CommandBuffer {
raw: vec![init], raw: vec![pool.allocate()],
is_recording: true, is_recording: true,
recorded_thread_id: thread_id, recorded_thread_id: thread_id,
device_id, device_id,
trackers: TrackerSet::new(B::VARIANT), trackers: TrackerSet::new(B::VARIANT),
used_swap_chain: None, used_swap_chains: Default::default(),
buffer_memory_init_actions: Default::default(),
limits, limits,
private_features, private_features,
has_labels: label.is_some(),
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
commands: if enable_tracing { commands: if enable_tracing {
Some(Vec::new()) Some(Vec::new())
@ -209,15 +217,26 @@ impl<B: hal::Backend> CommandAllocator<B> {
.push((raw, submit_index)); .push((raw, submit_index));
} }
pub fn after_submit(&self, cmd_buf: CommandBuffer<B>, submit_index: SubmissionIndex) { pub fn after_submit(
&self,
cmd_buf: CommandBuffer<B>,
device: &B::Device,
submit_index: SubmissionIndex,
) {
// Record this command buffer as pending // Record this command buffer as pending
let mut inner = self.inner.lock(); let mut inner = self.inner.lock();
let clear_label = cmd_buf.has_labels;
inner inner
.pools .pools
.get_mut(&cmd_buf.recorded_thread_id) .get_mut(&cmd_buf.recorded_thread_id)
.unwrap() .unwrap()
.pending .pending
.extend(cmd_buf.raw.into_iter().map(|raw| (raw, submit_index))); .extend(cmd_buf.raw.into_iter().map(|mut raw| {
if clear_label {
unsafe { device.set_command_buffer_name(&mut raw, "") };
}
(raw, submit_index)
}));
} }
pub fn maintain(&self, device: &B::Device, last_done_index: SubmissionIndex) { pub fn maintain(&self, device: &B::Device, last_done_index: SubmissionIndex) {
@ -232,11 +251,8 @@ impl<B: hal::Backend> CommandAllocator<B> {
} }
for thread_id in remove_threads { for thread_id in remove_threads {
tracing::info!("Removing from thread {:?}", thread_id); tracing::info!("Removing from thread {:?}", thread_id);
let mut pool = inner.pools.remove(&thread_id).unwrap(); let pool = inner.pools.remove(&thread_id).unwrap();
unsafe { pool.destroy(device);
pool.raw.free(pool.available);
device.destroy_command_pool(pool.raw);
}
} }
} }
@ -253,10 +269,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
pool.total pool.total
); );
} }
unsafe { pool.destroy(device);
pool.raw.free(pool.available);
device.destroy_command_pool(pool.raw);
}
} }
} }
} }

Просмотреть файл

@ -11,236 +11,213 @@ use crate::{
}; };
use arrayvec::ArrayVec; use arrayvec::ArrayVec;
use std::slice;
use wgt::DynamicOffset;
type BindGroupMask = u8; type BindGroupMask = u8;
#[derive(Clone, Debug)] mod compat {
pub(super) struct BindGroupPair { use std::ops::Range;
layout_id: Valid<BindGroupLayoutId>,
group_id: Stored<BindGroupId>,
}
#[derive(Debug)] #[derive(Debug)]
pub(super) enum LayoutChange<'a> { struct Entry<T> {
Unchanged, assigned: Option<T>,
Match(Valid<BindGroupId>, &'a [DynamicOffset]), expected: Option<T>,
Mismatch,
}
#[derive(Debug)]
pub enum Provision {
Unchanged,
Changed { was_compatible: bool },
}
#[derive(Clone)]
pub(super) struct FollowUpIter<'a> {
iter: slice::Iter<'a, BindGroupEntry>,
}
impl<'a> Iterator for FollowUpIter<'a> {
type Item = (Valid<BindGroupId>, &'a [DynamicOffset]);
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
.and_then(|entry| Some((entry.actual_value()?, entry.dynamic_offsets.as_slice())))
} }
} impl<T> Default for Entry<T> {
fn default() -> Self {
#[derive(Clone, Default, Debug)] Entry {
pub(super) struct BindGroupEntry { assigned: None,
expected_layout_id: Option<Valid<BindGroupLayoutId>>, expected: None,
provided: Option<BindGroupPair>,
dynamic_offsets: Vec<DynamicOffset>,
}
impl BindGroupEntry {
fn provide<B: GfxBackend>(
&mut self,
bind_group_id: Valid<BindGroupId>,
bind_group: &BindGroup<B>,
offsets: &[DynamicOffset],
) -> Provision {
debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
let was_compatible = match self.provided {
Some(BindGroupPair {
layout_id,
ref group_id,
}) => {
if group_id.value == bind_group_id && offsets == self.dynamic_offsets.as_slice() {
assert_eq!(layout_id, bind_group.layout_id);
return Provision::Unchanged;
}
self.expected_layout_id == Some(layout_id)
} }
None => false, }
};
self.provided = Some(BindGroupPair {
layout_id: bind_group.layout_id,
group_id: Stored {
value: bind_group_id,
ref_count: bind_group.life_guard.add_ref(),
},
});
self.dynamic_offsets.clear();
self.dynamic_offsets.extend_from_slice(offsets);
Provision::Changed { was_compatible }
} }
impl<T: Copy + PartialEq> Entry<T> {
fn is_active(&self) -> bool {
self.assigned.is_some() && self.expected.is_some()
}
pub fn expect_layout( fn is_valid(&self) -> bool {
&mut self, self.expected.is_none() || self.expected == self.assigned
bind_group_layout_id: Valid<BindGroupLayoutId>,
) -> LayoutChange {
let some = Some(bind_group_layout_id);
if self.expected_layout_id != some {
self.expected_layout_id = some;
match self.provided {
Some(BindGroupPair {
layout_id,
ref group_id,
}) if layout_id == bind_group_layout_id => {
LayoutChange::Match(group_id.value, &self.dynamic_offsets)
}
Some(_) | None => LayoutChange::Mismatch,
}
} else {
LayoutChange::Unchanged
} }
} }
fn is_valid(&self) -> Option<bool> { #[derive(Debug)]
match (self.expected_layout_id, self.provided.as_ref()) { pub struct Manager<T> {
(None, None) => Some(true), entries: [Entry<T>; crate::MAX_BIND_GROUPS],
(None, Some(_)) => None,
(Some(_), None) => Some(false),
(Some(layout), Some(pair)) => Some(layout == pair.layout_id),
}
} }
fn actual_value(&self) -> Option<Valid<BindGroupId>> { impl<T: Copy + PartialEq> Manager<T> {
self.expected_layout_id.and_then(|layout_id| { pub fn new() -> Self {
self.provided.as_ref().and_then(|pair| { Manager {
if pair.layout_id == layout_id { entries: Default::default(),
Some(pair.group_id.value) }
}
fn make_range(&self, start_index: usize) -> Range<usize> {
// find first incompatible entry
let end = self
.entries
.iter()
.position(|e| e.expected.is_none() || e.assigned != e.expected)
.unwrap_or(self.entries.len());
start_index..end.max(start_index)
}
pub fn update_expectations(&mut self, expectations: &[T]) -> Range<usize> {
let start_index = self
.entries
.iter()
.zip(expectations)
.position(|(e, &expect)| e.expected != Some(expect))
.unwrap_or(expectations.len());
for (e, &expect) in self.entries[start_index..]
.iter_mut()
.zip(expectations[start_index..].iter())
{
e.expected = Some(expect);
}
for e in self.entries[expectations.len()..].iter_mut() {
e.expected = None;
}
self.make_range(start_index)
}
pub fn assign(&mut self, index: usize, value: T) -> Range<usize> {
self.entries[index].assigned = Some(value);
self.make_range(index)
}
pub fn list_active(&self) -> impl Iterator<Item = usize> + '_ {
self.entries
.iter()
.enumerate()
.filter_map(|(i, e)| if e.is_active() { Some(i) } else { None })
}
pub fn invalid_mask(&self) -> super::BindGroupMask {
self.entries.iter().enumerate().fold(0, |mask, (i, entry)| {
if entry.is_valid() {
mask
} else { } else {
None mask | 1u8 << i
} }
}) })
}) }
}
#[test]
fn test_compatibility() {
let mut man = Manager::<i32>::new();
man.entries[0] = Entry {
expected: Some(3),
assigned: Some(2),
};
man.entries[1] = Entry {
expected: Some(1),
assigned: Some(1),
};
man.entries[2] = Entry {
expected: Some(4),
assigned: Some(5),
};
// check that we rebind [1] after [0] became compatible
assert_eq!(man.assign(0, 3), 0..2);
// check that nothing is rebound
assert_eq!(man.update_expectations(&[3, 2]), 1..1);
// check that [1] and [2] are rebound on expectations change
assert_eq!(man.update_expectations(&[3, 1, 5]), 1..3);
// reset the first two bindings
assert_eq!(man.update_expectations(&[4, 6, 5]), 0..0);
// check that nothing is rebound, even if there is a match,
// since earlier binding is incompatible.
assert_eq!(man.assign(1, 6), 1..1);
// finally, bind everything
assert_eq!(man.assign(0, 4), 0..3);
} }
} }
#[derive(Debug, Default)]
pub(super) struct EntryPayload {
pub(super) group_id: Option<Stored<BindGroupId>>,
pub(super) dynamic_offsets: Vec<wgt::DynamicOffset>,
}
#[derive(Debug)] #[derive(Debug)]
pub struct Binder { pub(super) struct Binder {
pub(super) pipeline_layout_id: Option<Valid<PipelineLayoutId>>, //TODO: strongly `Stored` pub(super) pipeline_layout_id: Option<Valid<PipelineLayoutId>>, //TODO: strongly `Stored`
pub(super) entries: ArrayVec<[BindGroupEntry; MAX_BIND_GROUPS]>, manager: compat::Manager<Valid<BindGroupLayoutId>>,
payloads: [EntryPayload; MAX_BIND_GROUPS],
} }
impl Binder { impl Binder {
pub(super) fn new(max_bind_groups: u32) -> Self { pub(super) fn new() -> Self {
Self { Binder {
pipeline_layout_id: None, pipeline_layout_id: None,
entries: (0..max_bind_groups) manager: compat::Manager::new(),
.map(|_| BindGroupEntry::default()) payloads: Default::default(),
.collect(),
} }
} }
pub(super) fn reset(&mut self) { pub(super) fn reset(&mut self) {
self.pipeline_layout_id = None; self.pipeline_layout_id = None;
self.entries.clear(); self.manager = compat::Manager::new();
} for payload in self.payloads.iter_mut() {
payload.group_id = None;
pub(super) fn change_pipeline_layout<B: GfxBackend>( payload.dynamic_offsets.clear();
&mut self,
guard: &Storage<PipelineLayout<B>, PipelineLayoutId>,
new_id: Valid<PipelineLayoutId>,
) {
let old_id_opt = self.pipeline_layout_id.replace(new_id);
let new = &guard[new_id];
let length = if let Some(old_id) = old_id_opt {
let old = &guard[old_id];
if old.push_constant_ranges == new.push_constant_ranges {
new.bind_group_layout_ids.len()
} else {
0
}
} else {
0
};
for entry in self.entries[length..].iter_mut() {
entry.expected_layout_id = None;
} }
} }
/// Attempt to set the value of the specified bind group index. pub(super) fn change_pipeline_layout<'a, B: GfxBackend>(
/// Returns Some() when the new bind group is ready to be actually bound &'a mut self,
/// (i.e. compatible with current expectations). Also returns an iterator guard: &Storage<PipelineLayout<B>, PipelineLayoutId>,
/// of bind group IDs to be bound with it: those are compatible bind groups new_id: Valid<PipelineLayoutId>,
/// that were previously blocked because the current one was incompatible. ) -> (usize, &'a [EntryPayload]) {
pub(super) fn provide_entry<'a, B: GfxBackend>( let old_id_opt = self.pipeline_layout_id.replace(new_id);
let new = &guard[new_id];
let mut bind_range = self.manager.update_expectations(&new.bind_group_layout_ids);
if let Some(old_id) = old_id_opt {
let old = &guard[old_id];
// root constants are the base compatibility property
if old.push_constant_ranges != new.push_constant_ranges {
bind_range.start = 0;
}
}
(bind_range.start, &self.payloads[bind_range])
}
pub(super) fn assign_group<'a, B: GfxBackend>(
&'a mut self, &'a mut self,
index: usize, index: usize,
bind_group_id: Valid<BindGroupId>, bind_group_id: Valid<BindGroupId>,
bind_group: &BindGroup<B>, bind_group: &BindGroup<B>,
offsets: &[DynamicOffset], offsets: &[wgt::DynamicOffset],
) -> Option<(Valid<PipelineLayoutId>, FollowUpIter<'a>)> { ) -> &'a [EntryPayload] {
tracing::trace!("\tBinding [{}] = group {:?}", index, bind_group_id); tracing::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
debug_assert_eq!(B::VARIANT, bind_group_id.0.backend()); debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
match self.entries[index].provide(bind_group_id, bind_group, offsets) { let payload = &mut self.payloads[index];
Provision::Unchanged => None, payload.group_id = Some(Stored {
Provision::Changed { was_compatible, .. } => { value: bind_group_id,
let compatible_count = self.compatible_count(); ref_count: bind_group.life_guard.add_ref(),
if index < compatible_count { });
let end = compatible_count.min(if was_compatible { payload.dynamic_offsets.clear();
index + 1 payload.dynamic_offsets.extend_from_slice(offsets);
} else {
self.entries.len() let bind_range = self.manager.assign(index, bind_group.layout_id);
}); &self.payloads[bind_range]
tracing::trace!("\t\tbinding up to {}", end);
Some((
self.pipeline_layout_id?,
FollowUpIter {
iter: self.entries[index + 1..end].iter(),
},
))
} else {
tracing::trace!("\t\tskipping above compatible {}", compatible_count);
None
}
}
}
} }
pub(super) fn list_active(&self) -> impl Iterator<Item = Valid<BindGroupId>> + '_ { pub(super) fn list_active(&self) -> impl Iterator<Item = Valid<BindGroupId>> + '_ {
self.entries.iter().filter_map(|e| match e.provided { let payloads = &self.payloads;
Some(ref pair) if e.expected_layout_id.is_some() => Some(pair.group_id.value), self.manager
_ => None, .list_active()
}) .map(move |index| payloads[index].group_id.as_ref().unwrap().value)
} }
pub(super) fn invalid_mask(&self) -> BindGroupMask { pub(super) fn invalid_mask(&self) -> BindGroupMask {
self.entries.iter().enumerate().fold(0, |mask, (i, entry)| { self.manager.invalid_mask()
if entry.is_valid().unwrap_or(true) {
mask
} else {
mask | 1u8 << i
}
})
}
fn compatible_count(&self) -> usize {
self.entries
.iter()
.position(|entry| !entry.is_valid().unwrap_or(false))
.unwrap_or_else(|| self.entries.len())
} }
} }

Просмотреть файл

@ -49,6 +49,7 @@ use crate::{
}, },
hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Resource, Storage, Token}, hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Resource, Storage, Token},
id, id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::BufferUse, resource::BufferUse,
span, span,
track::{TrackerSet, UsageConflict}, track::{TrackerSet, UsageConflict},
@ -56,7 +57,7 @@ use crate::{
Label, LabelHelpers, LifeGuard, Stored, MAX_BIND_GROUPS, Label, LabelHelpers, LifeGuard, Stored, MAX_BIND_GROUPS,
}; };
use arrayvec::ArrayVec; use arrayvec::ArrayVec;
use std::{borrow::Cow, iter, ops::Range}; use std::{borrow::Cow, iter, mem, ops::Range};
use thiserror::Error; use thiserror::Error;
/// Describes a [`RenderBundleEncoder`]. /// Describes a [`RenderBundleEncoder`].
@ -93,7 +94,7 @@ impl RenderBundleEncoder {
) -> Result<Self, CreateRenderBundleError> { ) -> Result<Self, CreateRenderBundleError> {
span!(_guard, INFO, "RenderBundleEncoder::new"); span!(_guard, INFO, "RenderBundleEncoder::new");
Ok(Self { Ok(Self {
base: base.unwrap_or_else(BasePass::new), base: base.unwrap_or_else(|| BasePass::new(&desc.label)),
parent_id, parent_id,
context: RenderPassContext { context: RenderPassContext {
attachments: AttachmentData { attachments: AttachmentData {
@ -114,7 +115,7 @@ impl RenderBundleEncoder {
pub fn dummy(parent_id: id::DeviceId) -> Self { pub fn dummy(parent_id: id::DeviceId) -> Self {
Self { Self {
base: BasePass::new(), base: BasePass::new(&None),
parent_id, parent_id,
context: RenderPassContext { context: RenderPassContext {
attachments: AttachmentData { attachments: AttachmentData {
@ -127,6 +128,11 @@ impl RenderBundleEncoder {
} }
} }
#[cfg(feature = "trace")]
pub(crate) fn to_base_pass(&self) -> BasePass<RenderCommand> {
BasePass::from_ref(self.base.as_ref())
}
pub fn parent(&self) -> id::DeviceId { pub fn parent(&self) -> id::DeviceId {
self.parent_id self.parent_id
} }
@ -159,6 +165,7 @@ impl RenderBundleEncoder {
let mut commands = Vec::new(); let mut commands = Vec::new();
let mut base = self.base.as_ref(); let mut base = self.base.as_ref();
let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>; let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
let mut buffer_memory_init_actions = Vec::new();
for &command in base.commands { for &command in base.commands {
match command { match command {
@ -204,6 +211,8 @@ impl RenderBundleEncoder {
.map_pass_err(scope); .map_pass_err(scope);
} }
buffer_memory_init_actions.extend_from_slice(&bind_group.used_buffer_ranges);
state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets); state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets);
state state
.trackers .trackers
@ -233,7 +242,7 @@ impl RenderBundleEncoder {
pipeline_layout_id = Some(pipeline.layout_id.value); pipeline_layout_id = Some(pipeline.layout_id.value);
state.set_pipeline( state.set_pipeline(
pipeline.index_format, pipeline.strip_index_format,
&pipeline.vertex_strides, &pipeline.vertex_strides,
&layout.bind_group_layout_ids, &layout.bind_group_layout_ids,
&layout.push_constant_ranges, &layout.push_constant_ranges,
@ -245,6 +254,7 @@ impl RenderBundleEncoder {
} }
RenderCommand::SetIndexBuffer { RenderCommand::SetIndexBuffer {
buffer_id, buffer_id,
index_format,
offset, offset,
size, size,
} => { } => {
@ -261,6 +271,12 @@ impl RenderBundleEncoder {
Some(s) => offset + s.get(), Some(s) => offset + s.get(),
None => buffer.size, None => buffer.size,
}; };
buffer_memory_init_actions.push(MemoryInitTrackerAction {
id: buffer_id,
range: offset..end,
kind: MemoryInitKind::NeedsInitializedMemory,
});
state.index.set_format(index_format);
state.index.set_buffer(buffer_id, offset..end); state.index.set_buffer(buffer_id, offset..end);
} }
RenderCommand::SetVertexBuffer { RenderCommand::SetVertexBuffer {
@ -282,6 +298,11 @@ impl RenderBundleEncoder {
Some(s) => offset + s.get(), Some(s) => offset + s.get(),
None => buffer.size, None => buffer.size,
}; };
buffer_memory_init_actions.push(MemoryInitTrackerAction {
id: buffer_id,
range: offset..end,
kind: MemoryInitKind::NeedsInitializedMemory,
});
state.vertex[slot as usize].set_buffer(buffer_id, offset..end); state.vertex[slot as usize].set_buffer(buffer_id, offset..end);
} }
RenderCommand::SetPushConstant { RenderCommand::SetPushConstant {
@ -310,21 +331,27 @@ impl RenderBundleEncoder {
first_vertex, first_vertex,
first_instance, first_instance,
} => { } => {
let scope = PassErrorScope::Draw; let scope = PassErrorScope::Draw {
let (vertex_limit, instance_limit) = state.vertex_limits(); indexed: false,
indirect: false,
pipeline: state.pipeline.last_state,
};
let vertex_limits = state.vertex_limits();
let last_vertex = first_vertex + vertex_count; let last_vertex = first_vertex + vertex_count;
if last_vertex > vertex_limit { if last_vertex > vertex_limits.vertex_limit {
return Err(DrawError::VertexBeyondLimit { return Err(DrawError::VertexBeyondLimit {
last_vertex, last_vertex,
vertex_limit, vertex_limit: vertex_limits.vertex_limit,
slot: vertex_limits.vertex_limit_slot,
}) })
.map_pass_err(scope); .map_pass_err(scope);
} }
let last_instance = first_instance + instance_count; let last_instance = first_instance + instance_count;
if last_instance > instance_limit { if last_instance > vertex_limits.instance_limit {
return Err(DrawError::InstanceBeyondLimit { return Err(DrawError::InstanceBeyondLimit {
last_instance, last_instance,
instance_limit, instance_limit: vertex_limits.instance_limit,
slot: vertex_limits.instance_limit_slot,
}) })
.map_pass_err(scope); .map_pass_err(scope);
} }
@ -339,9 +366,13 @@ impl RenderBundleEncoder {
base_vertex: _, base_vertex: _,
first_instance, first_instance,
} => { } => {
let scope = PassErrorScope::DrawIndexed; let scope = PassErrorScope::Draw {
indexed: true,
indirect: false,
pipeline: state.pipeline.last_state,
};
//TODO: validate that base_vertex + max_index() is within the provided range //TODO: validate that base_vertex + max_index() is within the provided range
let (_, instance_limit) = state.vertex_limits(); let vertex_limits = state.vertex_limits();
let index_limit = state.index.limit(); let index_limit = state.index.limit();
let last_index = first_index + index_count; let last_index = first_index + index_count;
if last_index > index_limit { if last_index > index_limit {
@ -352,10 +383,11 @@ impl RenderBundleEncoder {
.map_pass_err(scope); .map_pass_err(scope);
} }
let last_instance = first_instance + instance_count; let last_instance = first_instance + instance_count;
if last_instance > instance_limit { if last_instance > vertex_limits.instance_limit {
return Err(DrawError::InstanceBeyondLimit { return Err(DrawError::InstanceBeyondLimit {
last_instance, last_instance,
instance_limit, instance_limit: vertex_limits.instance_limit,
slot: vertex_limits.instance_limit_slot,
}) })
.map_pass_err(scope); .map_pass_err(scope);
} }
@ -366,11 +398,15 @@ impl RenderBundleEncoder {
} }
RenderCommand::MultiDrawIndirect { RenderCommand::MultiDrawIndirect {
buffer_id, buffer_id,
offset: _, offset,
count: None, count: None,
indexed: false, indexed: false,
} => { } => {
let scope = PassErrorScope::DrawIndirect; let scope = PassErrorScope::Draw {
indexed: false,
indirect: true,
pipeline: state.pipeline.last_state,
};
let buffer = state let buffer = state
.trackers .trackers
.buffers .buffers
@ -379,17 +415,34 @@ impl RenderBundleEncoder {
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT) check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
.map_pass_err(scope)?; .map_pass_err(scope)?;
buffer_memory_init_actions.extend(
buffer
.initialization_status
.check(
offset..(offset + mem::size_of::<wgt::DrawIndirectArgs>() as u64),
)
.map(|range| MemoryInitTrackerAction {
id: buffer_id,
range,
kind: MemoryInitKind::NeedsInitializedMemory,
}),
);
commands.extend(state.flush_vertices()); commands.extend(state.flush_vertices());
commands.extend(state.flush_binds()); commands.extend(state.flush_binds());
commands.push(command); commands.push(command);
} }
RenderCommand::MultiDrawIndirect { RenderCommand::MultiDrawIndirect {
buffer_id, buffer_id,
offset: _, offset,
count: None, count: None,
indexed: true, indexed: true,
} => { } => {
let scope = PassErrorScope::DrawIndexedIndirect; let scope = PassErrorScope::Draw {
indexed: true,
indirect: true,
pipeline: state.pipeline.last_state,
};
let buffer = state let buffer = state
.trackers .trackers
.buffers .buffers
@ -399,6 +452,19 @@ impl RenderBundleEncoder {
check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT) check_buffer_usage(buffer.usage, wgt::BufferUsage::INDIRECT)
.map_pass_err(scope)?; .map_pass_err(scope)?;
buffer_memory_init_actions.extend(
buffer
.initialization_status
.check(
offset..(offset + mem::size_of::<wgt::DrawIndirectArgs>() as u64),
)
.map(|range| MemoryInitTrackerAction {
id: buffer_id,
range,
kind: MemoryInitKind::NeedsInitializedMemory,
}),
);
commands.extend(state.index.flush()); commands.extend(state.index.flush());
commands.extend(state.flush_vertices()); commands.extend(state.flush_vertices());
commands.extend(state.flush_binds()); commands.extend(state.flush_binds());
@ -409,6 +475,9 @@ impl RenderBundleEncoder {
RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(), RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(),
RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(), RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(),
RenderCommand::PopDebugGroup => unimplemented!(), RenderCommand::PopDebugGroup => unimplemented!(),
RenderCommand::WriteTimestamp { .. }
| RenderCommand::BeginPipelineStatisticsQuery { .. }
| RenderCommand::EndPipelineStatisticsQuery => unimplemented!(),
RenderCommand::ExecuteBundle(_) RenderCommand::ExecuteBundle(_)
| RenderCommand::SetBlendColor(_) | RenderCommand::SetBlendColor(_)
| RenderCommand::SetStencilReference(_) | RenderCommand::SetStencilReference(_)
@ -417,9 +486,9 @@ impl RenderBundleEncoder {
} }
} }
let _ = desc.label; //TODO: actually use
Ok(RenderBundle { Ok(RenderBundle {
base: BasePass { base: BasePass {
label: desc.label.as_ref().map(|cow| cow.to_string()),
commands, commands,
dynamic_offsets: state.flat_dynamic_offsets, dynamic_offsets: state.flat_dynamic_offsets,
string_data: Vec::new(), string_data: Vec::new(),
@ -430,10 +499,27 @@ impl RenderBundleEncoder {
ref_count: device.life_guard.add_ref(), ref_count: device.life_guard.add_ref(),
}, },
used: state.trackers, used: state.trackers,
buffer_memory_init_actions,
context: self.context, context: self.context,
life_guard: LifeGuard::new(desc.label.borrow_or_default()), life_guard: LifeGuard::new(desc.label.borrow_or_default()),
}) })
} }
pub fn set_index_buffer(
&mut self,
buffer_id: id::BufferId,
index_format: wgt::IndexFormat,
offset: wgt::BufferAddress,
size: Option<wgt::BufferSize>,
) {
span!(_guard, DEBUG, "RenderBundle::set_index_buffer");
self.base.commands.push(RenderCommand::SetIndexBuffer {
buffer_id,
index_format,
offset,
size,
});
}
} }
/// Error type returned from `RenderBundleEncoder::new` if the sample count is invalid. /// Error type returned from `RenderBundleEncoder::new` if the sample count is invalid.
@ -462,6 +548,7 @@ pub struct RenderBundle {
base: BasePass<RenderCommand>, base: BasePass<RenderCommand>,
pub(crate) device_id: Stored<id::DeviceId>, pub(crate) device_id: Stored<id::DeviceId>,
pub(crate) used: TrackerSet, pub(crate) used: TrackerSet,
pub(crate) buffer_memory_init_actions: Vec<MemoryInitTrackerAction<id::BufferId>>,
pub(crate) context: RenderPassContext, pub(crate) context: RenderPassContext,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
} }
@ -470,11 +557,6 @@ unsafe impl Send for RenderBundle {}
unsafe impl Sync for RenderBundle {} unsafe impl Sync for RenderBundle {}
impl RenderBundle { impl RenderBundle {
#[cfg(feature = "trace")]
pub(crate) fn to_base_pass(&self) -> BasePass<RenderCommand> {
BasePass::from_ref(self.base.as_ref())
}
/// Actually encode the contents into a native command buffer. /// Actually encode the contents into a native command buffer.
/// ///
/// This is partially duplicating the logic of `command_encoder_run_render_pass`. /// This is partially duplicating the logic of `command_encoder_run_render_pass`.
@ -498,8 +580,10 @@ impl RenderBundle {
use hal::command::CommandBuffer as _; use hal::command::CommandBuffer as _;
let mut offsets = self.base.dynamic_offsets.as_slice(); let mut offsets = self.base.dynamic_offsets.as_slice();
let mut index_type = hal::IndexType::U16;
let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>; let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
if let Some(ref label) = self.base.label {
cmd_buf.begin_debug_marker(label, 0);
}
for command in self.base.commands.iter() { for command in self.base.commands.iter() {
match *command { match *command {
@ -513,21 +597,24 @@ impl RenderBundle {
&pipeline_layout_guard[pipeline_layout_id.unwrap()].raw, &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw,
index as usize, index as usize,
iter::once(bind_group.raw.raw()), iter::once(bind_group.raw.raw()),
&offsets[..num_dynamic_offsets as usize], offsets.iter().take(num_dynamic_offsets as usize).cloned(),
); );
offsets = &offsets[num_dynamic_offsets as usize..]; offsets = &offsets[num_dynamic_offsets as usize..];
} }
RenderCommand::SetPipeline(pipeline_id) => { RenderCommand::SetPipeline(pipeline_id) => {
let pipeline = pipeline_guard.get(pipeline_id).unwrap(); let pipeline = pipeline_guard.get(pipeline_id).unwrap();
cmd_buf.bind_graphics_pipeline(&pipeline.raw); cmd_buf.bind_graphics_pipeline(&pipeline.raw);
index_type = conv::map_index_format(pipeline.index_format);
pipeline_layout_id = Some(pipeline.layout_id.value); pipeline_layout_id = Some(pipeline.layout_id.value);
} }
RenderCommand::SetIndexBuffer { RenderCommand::SetIndexBuffer {
buffer_id, buffer_id,
index_format,
offset, offset,
size, size,
} => { } => {
let index_type = conv::map_index_format(index_format);
let &(ref buffer, _) = buffer_guard let &(ref buffer, _) = buffer_guard
.get(buffer_id) .get(buffer_id)
.unwrap() .unwrap()
@ -651,6 +738,9 @@ impl RenderBundle {
RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(), RenderCommand::PushDebugGroup { color: _, len: _ } => unimplemented!(),
RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(), RenderCommand::InsertDebugMarker { color: _, len: _ } => unimplemented!(),
RenderCommand::PopDebugGroup => unimplemented!(), RenderCommand::PopDebugGroup => unimplemented!(),
RenderCommand::WriteTimestamp { .. }
| RenderCommand::BeginPipelineStatisticsQuery { .. }
| RenderCommand::EndPipelineStatisticsQuery => unimplemented!(),
RenderCommand::ExecuteBundle(_) RenderCommand::ExecuteBundle(_)
| RenderCommand::SetBlendColor(_) | RenderCommand::SetBlendColor(_)
| RenderCommand::SetStencilReference(_) | RenderCommand::SetStencilReference(_)
@ -659,6 +749,10 @@ impl RenderBundle {
} }
} }
if let Some(_) = self.base.label {
cmd_buf.end_debug_marker();
}
Ok(()) Ok(())
} }
} }
@ -675,6 +769,7 @@ impl Resource for RenderBundle {
struct IndexState { struct IndexState {
buffer: Option<id::BufferId>, buffer: Option<id::BufferId>,
format: wgt::IndexFormat, format: wgt::IndexFormat,
pipeline_format: Option<wgt::IndexFormat>,
range: Range<wgt::BufferAddress>, range: Range<wgt::BufferAddress>,
is_dirty: bool, is_dirty: bool,
} }
@ -684,6 +779,7 @@ impl IndexState {
Self { Self {
buffer: None, buffer: None,
format: wgt::IndexFormat::default(), format: wgt::IndexFormat::default(),
pipeline_format: None,
range: 0..0, range: 0..0,
is_dirty: false, is_dirty: false,
} }
@ -703,6 +799,7 @@ impl IndexState {
self.is_dirty = false; self.is_dirty = false;
Some(RenderCommand::SetIndexBuffer { Some(RenderCommand::SetIndexBuffer {
buffer_id: self.buffer.unwrap(), buffer_id: self.buffer.unwrap(),
index_format: self.format,
offset: self.range.start, offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start), size: wgt::BufferSize::new(self.range.end - self.range.start),
}) })
@ -825,6 +922,18 @@ impl PushConstantState {
} }
} }
#[derive(Debug)]
struct VertexLimitState {
/// Length of the shortest vertex rate vertex buffer
vertex_limit: u32,
/// Buffer slot which the shortest vertex rate vertex buffer is bound to
vertex_limit_slot: u32,
/// Length of the shortest instance rate vertex buffer
instance_limit: u32,
/// Buffer slot which the shortest instance rate vertex buffer is bound to
instance_limit_slot: u32,
}
#[derive(Debug)] #[derive(Debug)]
struct State { struct State {
trackers: TrackerSet, trackers: TrackerSet,
@ -839,20 +948,34 @@ struct State {
} }
impl State { impl State {
fn vertex_limits(&self) -> (u32, u32) { fn vertex_limits(&self) -> VertexLimitState {
let mut vertex_limit = !0; let mut vert_state = VertexLimitState {
let mut instance_limit = !0; vertex_limit: u32::MAX,
for vbs in &self.vertex { vertex_limit_slot: 0,
instance_limit: u32::MAX,
instance_limit_slot: 0,
};
for (idx, vbs) in self.vertex.iter().enumerate() {
if vbs.stride == 0 { if vbs.stride == 0 {
continue; continue;
} }
let limit = ((vbs.range.end - vbs.range.start) / vbs.stride) as u32; let limit = ((vbs.range.end - vbs.range.start) / vbs.stride) as u32;
match vbs.rate { match vbs.rate {
wgt::InputStepMode::Vertex => vertex_limit = vertex_limit.min(limit), wgt::InputStepMode::Vertex => {
wgt::InputStepMode::Instance => instance_limit = instance_limit.min(limit), if limit < vert_state.vertex_limit {
vert_state.vertex_limit = limit;
vert_state.vertex_limit_slot = idx as _;
}
}
wgt::InputStepMode::Instance => {
if limit < vert_state.instance_limit {
vert_state.instance_limit = limit;
vert_state.instance_limit_slot = idx as _;
}
}
} }
} }
(vertex_limit, instance_limit) vert_state
} }
fn invalidate_group_from(&mut self, slot: usize) { fn invalidate_group_from(&mut self, slot: usize) {
@ -883,12 +1006,13 @@ impl State {
fn set_pipeline( fn set_pipeline(
&mut self, &mut self,
index_format: wgt::IndexFormat, index_format: Option<wgt::IndexFormat>,
vertex_strides: &[(wgt::BufferAddress, wgt::InputStepMode)], vertex_strides: &[(wgt::BufferAddress, wgt::InputStepMode)],
layout_ids: &[id::Valid<id::BindGroupLayoutId>], layout_ids: &[id::Valid<id::BindGroupLayoutId>],
push_constant_layouts: &[wgt::PushConstantRange], push_constant_layouts: &[wgt::PushConstantRange],
) { ) {
self.index.set_format(index_format); self.index.pipeline_format = index_format;
for (vs, &(stride, step_mode)) in self.vertex.iter_mut().zip(vertex_strides) { for (vs, &(stride, step_mode)) in self.vertex.iter_mut().zip(vertex_strides) {
if vs.stride != stride || vs.rate != step_mode { if vs.stride != stride || vs.rate != step_mode {
vs.stride = stride; vs.stride = stride;
@ -1050,10 +1174,12 @@ pub mod bundle_ffi {
num_dynamic_offsets: offset_length.try_into().unwrap(), num_dynamic_offsets: offset_length.try_into().unwrap(),
bind_group_id, bind_group_id,
}); });
bundle if offset_length != 0 {
.base bundle
.dynamic_offsets .base
.extend_from_slice(slice::from_raw_parts(offsets, offset_length)); .dynamic_offsets
.extend_from_slice(slice::from_raw_parts(offsets, offset_length));
}
} }
#[no_mangle] #[no_mangle]
@ -1068,21 +1194,6 @@ pub mod bundle_ffi {
.push(RenderCommand::SetPipeline(pipeline_id)); .push(RenderCommand::SetPipeline(pipeline_id));
} }
#[no_mangle]
pub extern "C" fn wgpu_render_bundle_set_index_buffer(
bundle: &mut RenderBundleEncoder,
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferSize>,
) {
span!(_guard, DEBUG, "RenderBundle::set_index_buffer");
bundle.base.commands.push(RenderCommand::SetIndexBuffer {
buffer_id,
offset,
size,
});
}
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_render_bundle_set_vertex_buffer( pub extern "C" fn wgpu_render_bundle_set_vertex_buffer(
bundle: &mut RenderBundleEncoder, bundle: &mut RenderBundleEncoder,

Просмотреть файл

@ -5,25 +5,25 @@
use crate::{ use crate::{
binding_model::{BindError, BindGroup, PushConstantUploadError}, binding_model::{BindError, BindGroup, PushConstantUploadError},
command::{ command::{
bind::{Binder, LayoutChange}, bind::Binder, end_pipeline_statistics_query, BasePass, BasePassRef, CommandBuffer,
BasePass, BasePassRef, CommandBuffer, CommandEncoderError, MapPassErr, PassErrorScope, CommandEncoderError, MapPassErr, PassErrorScope, QueryUseError, StateChange,
StateChange,
}, },
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token}, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id, id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{Buffer, BufferUse, Texture}, resource::{Buffer, BufferUse, Texture},
span, span,
track::{TrackerSet, UsageConflict}, track::{TrackerSet, UsageConflict},
validation::{check_buffer_usage, MissingBufferUsageError}, validation::{check_buffer_usage, MissingBufferUsageError},
MAX_BIND_GROUPS, Label,
}; };
use arrayvec::ArrayVec;
use hal::command::CommandBuffer as _; use hal::command::CommandBuffer as _;
use thiserror::Error; use thiserror::Error;
use wgt::{BufferAddress, BufferUsage, ShaderStage}; use wgt::{BufferAddress, BufferUsage, ShaderStage};
use std::{fmt, iter, str}; use crate::track::UseExtendError;
use std::{fmt, mem, str};
#[doc(hidden)] #[doc(hidden)]
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
@ -61,6 +61,15 @@ pub enum ComputeCommand {
color: u32, color: u32,
len: usize, len: usize,
}, },
WriteTimestamp {
query_set_id: id::QuerySetId,
query_index: u32,
},
BeginPipelineStatisticsQuery {
query_set_id: id::QuerySetId,
query_index: u32,
},
EndPipelineStatisticsQuery,
} }
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))] #[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
@ -70,9 +79,9 @@ pub struct ComputePass {
} }
impl ComputePass { impl ComputePass {
pub fn new(parent_id: id::CommandEncoderId) -> Self { pub fn new(parent_id: id::CommandEncoderId, desc: &ComputePassDescriptor) -> Self {
Self { Self {
base: BasePass::new(), base: BasePass::new(&desc.label),
parent_id, parent_id,
} }
} }
@ -99,10 +108,9 @@ impl fmt::Debug for ComputePass {
} }
} }
#[repr(C)]
#[derive(Clone, Debug, Default)] #[derive(Clone, Debug, Default)]
pub struct ComputePassDescriptor { pub struct ComputePassDescriptor<'a> {
pub todo: u32, pub label: Label<'a>,
} }
#[derive(Clone, Debug, Error, PartialEq)] #[derive(Clone, Debug, Error, PartialEq)]
@ -128,8 +136,18 @@ pub enum ComputePassErrorInner {
BindGroupIndexOutOfRange { index: u8, max: u32 }, BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("compute pipeline {0:?} is invalid")] #[error("compute pipeline {0:?} is invalid")]
InvalidPipeline(id::ComputePipelineId), InvalidPipeline(id::ComputePipelineId),
#[error("QuerySet {0:?} is invalid")]
InvalidQuerySet(id::QuerySetId),
#[error("indirect buffer {0:?} is invalid or destroyed")] #[error("indirect buffer {0:?} is invalid or destroyed")]
InvalidIndirectBuffer(id::BufferId), InvalidIndirectBuffer(id::BufferId),
#[error("indirect buffer uses bytes {offset}..{end_offset} which overruns indirect buffer of size {buffer_size}")]
IndirectBufferOverrun {
offset: u64,
end_offset: u64,
buffer_size: u64,
},
#[error("buffer {0:?} is invalid or destroyed")]
InvalidBuffer(id::BufferId),
#[error(transparent)] #[error(transparent)]
ResourceUsageConflict(#[from] UsageConflict), ResourceUsageConflict(#[from] UsageConflict),
#[error(transparent)] #[error(transparent)]
@ -142,6 +160,8 @@ pub enum ComputePassErrorInner {
Bind(#[from] BindError), Bind(#[from] BindError),
#[error(transparent)] #[error(transparent)]
PushConstants(#[from] PushConstantUploadError), PushConstants(#[from] PushConstantUploadError),
#[error(transparent)]
QueryUse(#[from] QueryUseError),
} }
/// Error encountered when performing a compute pass. /// Error encountered when performing a compute pass.
@ -231,7 +251,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn command_encoder_run_compute_pass_impl<B: GfxBackend>( pub fn command_encoder_run_compute_pass_impl<B: GfxBackend>(
&self, &self,
encoder_id: id::CommandEncoderId, encoder_id: id::CommandEncoderId,
mut base: BasePassRef<ComputeCommand>, base: BasePassRef<ComputeCommand>,
) -> Result<(), ComputePassError> { ) -> Result<(), ComputePassError> {
span!(_guard, INFO, "CommandEncoder::run_compute_pass"); span!(_guard, INFO, "CommandEncoder::run_compute_pass");
let scope = PassErrorScope::Pass(encoder_id); let scope = PassErrorScope::Pass(encoder_id);
@ -241,7 +261,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = let cmd_buf =
CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id).map_pass_err(scope)?; CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id).map_pass_err(scope)?;
let raw = cmd_buf.raw.last_mut().unwrap(); let raw = cmd_buf.raw.last_mut().unwrap();
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
@ -251,20 +271,30 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}); });
} }
if let Some(ref label) = base.label {
unsafe {
raw.begin_debug_marker(label, 0);
}
}
let (_, mut token) = hub.render_bundles.read(&mut token); let (_, mut token) = hub.render_bundles.read(&mut token);
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token); let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token); let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token); let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token); let (texture_guard, _) = hub.textures.read(&mut token);
let mut state = State { let mut state = State {
binder: Binder::new(cmd_buf.limits.max_bind_groups), binder: Binder::new(),
pipeline: StateChange::new(), pipeline: StateChange::new(),
trackers: TrackerSet::new(B::VARIANT), trackers: TrackerSet::new(B::VARIANT),
debug_scope_depth: 0, debug_scope_depth: 0,
}; };
let mut temp_offsets = Vec::new(); let mut temp_offsets = Vec::new();
let mut dynamic_offset_count = 0;
let mut string_offset = 0;
let mut active_query = None;
for command in base.commands { for command in base.commands {
match *command { match *command {
@ -285,9 +315,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} }
temp_offsets.clear(); temp_offsets.clear();
temp_offsets temp_offsets.extend_from_slice(
.extend_from_slice(&base.dynamic_offsets[..num_dynamic_offsets as usize]); &base.dynamic_offsets[dynamic_offset_count
base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..]; ..dynamic_offset_count + (num_dynamic_offsets as usize)],
);
dynamic_offset_count += num_dynamic_offsets as usize;
let bind_group = cmd_buf let bind_group = cmd_buf
.trackers .trackers
@ -299,26 +331,44 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.validate_dynamic_bindings(&temp_offsets) .validate_dynamic_bindings(&temp_offsets)
.map_pass_err(scope)?; .map_pass_err(scope)?;
if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry( cmd_buf.buffer_memory_init_actions.extend(
bind_group.used_buffer_ranges.iter().filter_map(
|action| match buffer_guard.get(action.id) {
Ok(buffer) => buffer
.initialization_status
.check(action.range.clone())
.map(|range| MemoryInitTrackerAction {
id: action.id,
range,
kind: action.kind,
}),
Err(_) => None,
},
),
);
let pipeline_layout_id = state.binder.pipeline_layout_id;
let entries = state.binder.assign_group(
index as usize, index as usize,
id::Valid(bind_group_id), id::Valid(bind_group_id),
bind_group, bind_group,
&temp_offsets, &temp_offsets,
) { );
let bind_groups = iter::once(bind_group.raw.raw()) if !entries.is_empty() {
.chain( let pipeline_layout =
follow_ups &pipeline_layout_guard[pipeline_layout_id.unwrap()].raw;
.clone() let desc_sets = entries.iter().map(|e| {
.map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()), bind_group_guard[e.group_id.as_ref().unwrap().value]
) .raw
.collect::<ArrayVec<[_; MAX_BIND_GROUPS]>>(); .raw()
temp_offsets.extend(follow_ups.flat_map(|(_, offsets)| offsets)); });
let offsets = entries.iter().flat_map(|e| &e.dynamic_offsets).cloned();
unsafe { unsafe {
raw.bind_compute_descriptor_sets( raw.bind_compute_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id].raw, pipeline_layout,
index as usize, index as usize,
bind_groups, desc_sets,
&temp_offsets, offsets,
); );
} }
} }
@ -345,36 +395,24 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) { if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value]; let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value];
state.binder.change_pipeline_layout( let (start_index, entries) = state.binder.change_pipeline_layout(
&*pipeline_layout_guard, &*pipeline_layout_guard,
pipeline.layout_id.value, pipeline.layout_id.value,
); );
if !entries.is_empty() {
let mut is_compatible = true; let desc_sets = entries.iter().map(|e| {
bind_group_guard[e.group_id.as_ref().unwrap().value]
for (index, (entry, &bgl_id)) in state .raw
.binder .raw()
.entries });
.iter_mut() let offsets = entries.iter().flat_map(|e| &e.dynamic_offsets).cloned();
.zip(&pipeline_layout.bind_group_layout_ids) unsafe {
.enumerate() raw.bind_compute_descriptor_sets(
{ &pipeline_layout.raw,
match entry.expect_layout(bgl_id) { start_index,
LayoutChange::Match(bg_id, offsets) if is_compatible => { desc_sets,
let desc_set = bind_group_guard[bg_id].raw.raw(); offsets,
unsafe { );
raw.bind_compute_descriptor_sets(
&pipeline_layout.raw,
index,
iter::once(desc_set),
offsets.iter().cloned(),
);
}
}
LayoutChange::Match(..) | LayoutChange::Unchanged => {}
LayoutChange::Mismatch => {
is_compatible = false;
}
} }
} }
@ -433,7 +471,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
unsafe { raw.push_compute_constants(&pipeline_layout.raw, offset, data_slice) } unsafe { raw.push_compute_constants(&pipeline_layout.raw, offset, data_slice) }
} }
ComputeCommand::Dispatch(groups) => { ComputeCommand::Dispatch(groups) => {
let scope = PassErrorScope::Dispatch; let scope = PassErrorScope::Dispatch {
indirect: false,
pipeline: state.pipeline.last_state,
};
state.is_ready().map_pass_err(scope)?; state.is_ready().map_pass_err(scope)?;
state state
@ -450,7 +491,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} }
} }
ComputeCommand::DispatchIndirect { buffer_id, offset } => { ComputeCommand::DispatchIndirect { buffer_id, offset } => {
let scope = PassErrorScope::DispatchIndirect; let scope = PassErrorScope::Dispatch {
indirect: true,
pipeline: state.pipeline.last_state,
};
state.is_ready().map_pass_err(scope)?; state.is_ready().map_pass_err(scope)?;
@ -462,12 +506,36 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_pass_err(scope)?; .map_pass_err(scope)?;
check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT) check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)
.map_pass_err(scope)?; .map_pass_err(scope)?;
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
if end_offset > indirect_buffer.size {
return Err(ComputePassErrorInner::IndirectBufferOverrun {
offset,
end_offset,
buffer_size: indirect_buffer.size,
})
.map_pass_err(scope);
}
let &(ref buf_raw, _) = indirect_buffer let &(ref buf_raw, _) = indirect_buffer
.raw .raw
.as_ref() .as_ref()
.ok_or(ComputePassErrorInner::InvalidIndirectBuffer(buffer_id)) .ok_or(ComputePassErrorInner::InvalidIndirectBuffer(buffer_id))
.map_pass_err(scope)?; .map_pass_err(scope)?;
let stride = 3 * 4; // 3 integers, x/y/z group size
cmd_buf.buffer_memory_init_actions.extend(
indirect_buffer
.initialization_status
.check(offset..(offset + stride))
.map(|range| MemoryInitTrackerAction {
id: buffer_id,
range,
kind: MemoryInitKind::NeedsInitializedMemory,
}),
);
state state
.flush_states( .flush_states(
raw, raw,
@ -483,12 +551,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} }
ComputeCommand::PushDebugGroup { color, len } => { ComputeCommand::PushDebugGroup { color, len } => {
state.debug_scope_depth += 1; state.debug_scope_depth += 1;
let label =
let label = str::from_utf8(&base.string_data[..len]).unwrap(); str::from_utf8(&base.string_data[string_offset..string_offset + len])
.unwrap();
string_offset += len;
unsafe { unsafe {
raw.begin_debug_marker(label, color); raw.begin_debug_marker(label, color);
} }
base.string_data = &base.string_data[len..];
} }
ComputeCommand::PopDebugGroup => { ComputeCommand::PopDebugGroup => {
let scope = PassErrorScope::PopDebugGroup; let scope = PassErrorScope::PopDebugGroup;
@ -503,10 +572,74 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} }
} }
ComputeCommand::InsertDebugMarker { color, len } => { ComputeCommand::InsertDebugMarker { color, len } => {
let label = str::from_utf8(&base.string_data[..len]).unwrap(); let label =
str::from_utf8(&base.string_data[string_offset..string_offset + len])
.unwrap();
string_offset += len;
unsafe { raw.insert_debug_marker(label, color) } unsafe { raw.insert_debug_marker(label, color) }
base.string_data = &base.string_data[len..];
} }
ComputeCommand::WriteTimestamp {
query_set_id,
query_index,
} => {
let scope = PassErrorScope::WriteTimestamp;
let query_set = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => {
ComputePassErrorInner::InvalidQuerySet(query_set_id)
}
_ => unreachable!(),
})
.map_pass_err(scope)?;
query_set
.validate_and_write_timestamp(raw, query_set_id, query_index, None)
.map_pass_err(scope)?;
}
ComputeCommand::BeginPipelineStatisticsQuery {
query_set_id,
query_index,
} => {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let query_set = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => {
ComputePassErrorInner::InvalidQuerySet(query_set_id)
}
_ => unreachable!(),
})
.map_pass_err(scope)?;
query_set
.validate_and_begin_pipeline_statistics_query(
raw,
query_set_id,
query_index,
None,
&mut active_query,
)
.map_pass_err(scope)?;
}
ComputeCommand::EndPipelineStatisticsQuery => {
let scope = PassErrorScope::EndPipelineStatisticsQuery;
end_pipeline_statistics_query(raw, &*query_set_guard, &mut active_query)
.map_pass_err(scope)?;
}
}
}
if let Some(_) = base.label {
unsafe {
raw.end_debug_marker();
} }
} }
@ -540,9 +673,11 @@ pub mod compute_ffi {
num_dynamic_offsets: offset_length.try_into().unwrap(), num_dynamic_offsets: offset_length.try_into().unwrap(),
bind_group_id, bind_group_id,
}); });
pass.base if offset_length != 0 {
.dynamic_offsets pass.base
.extend_from_slice(slice::from_raw_parts(offsets, offset_length)); .dynamic_offsets
.extend_from_slice(slice::from_raw_parts(offsets, offset_length));
}
} }
#[no_mangle] #[no_mangle]
@ -654,4 +789,49 @@ pub mod compute_ffi {
len: bytes.len(), len: bytes.len(),
}); });
} }
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_write_timestamp(
pass: &mut ComputePass,
query_set_id: id::QuerySetId,
query_index: u32,
) {
span!(_guard, DEBUG, "ComputePass::write_timestamp");
pass.base.commands.push(ComputeCommand::WriteTimestamp {
query_set_id,
query_index,
});
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_begin_pipeline_statistics_query(
pass: &mut ComputePass,
query_set_id: id::QuerySetId,
query_index: u32,
) {
span!(
_guard,
DEBUG,
"ComputePass::begin_pipeline_statistics query"
);
pass.base
.commands
.push(ComputeCommand::BeginPipelineStatisticsQuery {
query_set_id,
query_index,
});
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_end_pipeline_statistics_query(
pass: &mut ComputePass,
) {
span!(_guard, DEBUG, "ComputePass::end_pipeline_statistics_query");
pass.base
.commands
.push(ComputeCommand::EndPipelineStatisticsQuery);
}
} }

Просмотреть файл

@ -26,21 +26,37 @@ pub enum DrawError {
MissingBlendColor, MissingBlendColor,
#[error("render pipeline must be set")] #[error("render pipeline must be set")]
MissingPipeline, MissingPipeline,
#[error("vertex buffer {index} must be set")]
MissingVertexBuffer { index: u32 },
#[error("index buffer must be set")]
MissingIndexBuffer,
#[error("current render pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")] #[error("current render pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
IncompatibleBindGroup { IncompatibleBindGroup {
index: u32, index: u32,
//expected: BindGroupLayoutId, //expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>, //provided: Option<(BindGroupLayoutId, BindGroupId)>,
}, },
#[error("vertex {last_vertex} extends beyond limit {vertex_limit}")] #[error("vertex {last_vertex} extends beyond limit {vertex_limit} imposed by the buffer in slot {slot}. Did you bind the correct `Vertex` step-rate vertex buffer?")]
VertexBeyondLimit { last_vertex: u32, vertex_limit: u32 }, VertexBeyondLimit {
#[error("instance {last_instance} extends beyond limit {instance_limit}")] last_vertex: u32,
vertex_limit: u32,
slot: u32,
},
#[error("instance {last_instance} extends beyond limit {instance_limit} imposed by the buffer in slot {slot}. Did you bind the correct `Instance` step-rate vertex buffer?")]
InstanceBeyondLimit { InstanceBeyondLimit {
last_instance: u32, last_instance: u32,
instance_limit: u32, instance_limit: u32,
slot: u32,
}, },
#[error("index {last_index} extends beyond limit {index_limit}")] #[error("index {last_index} extends beyond limit {index_limit}. Did you bind the correct index buffer?")]
IndexBeyondLimit { last_index: u32, index_limit: u32 }, IndexBeyondLimit { last_index: u32, index_limit: u32 },
#[error(
"pipeline index format ({pipeline:?}) and buffer index format ({buffer:?}) do not match"
)]
UnmatchedIndexFormats {
pipeline: wgt::IndexFormat,
buffer: wgt::IndexFormat,
},
} }
/// Error encountered when encoding a render command. /// Error encountered when encoding a render command.
@ -49,6 +65,8 @@ pub enum DrawError {
pub enum RenderCommandError { pub enum RenderCommandError {
#[error("bind group {0:?} is invalid")] #[error("bind group {0:?} is invalid")]
InvalidBindGroup(id::BindGroupId), InvalidBindGroup(id::BindGroupId),
#[error("render bundle {0:?} is invalid")]
InvalidRenderBundle(id::RenderBundleId),
#[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")] #[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u8, max: u32 }, BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("dynamic buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")] #[error("dynamic buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
@ -57,6 +75,8 @@ pub enum RenderCommandError {
InvalidDynamicOffsetCount { actual: usize, expected: usize }, InvalidDynamicOffsetCount { actual: usize, expected: usize },
#[error("render pipeline {0:?} is invalid")] #[error("render pipeline {0:?} is invalid")]
InvalidPipeline(id::RenderPipelineId), InvalidPipeline(id::RenderPipelineId),
#[error("QuerySet {0:?} is invalid")]
InvalidQuerySet(id::QuerySetId),
#[error("Render pipeline is incompatible with render pass")] #[error("Render pipeline is incompatible with render pass")]
IncompatiblePipeline(#[from] crate::device::RenderPassCompatibilityError), IncompatiblePipeline(#[from] crate::device::RenderPassCompatibilityError),
#[error("pipeline is not compatible with the depth-stencil read-only render pass")] #[error("pipeline is not compatible with the depth-stencil read-only render pass")]
@ -112,6 +132,7 @@ pub enum RenderCommand {
SetPipeline(id::RenderPipelineId), SetPipeline(id::RenderPipelineId),
SetIndexBuffer { SetIndexBuffer {
buffer_id: id::BufferId, buffer_id: id::BufferId,
index_format: wgt::IndexFormat,
offset: BufferAddress, offset: BufferAddress,
size: Option<BufferSize>, size: Option<BufferSize>,
}, },
@ -176,5 +197,14 @@ pub enum RenderCommand {
color: u32, color: u32,
len: usize, len: usize,
}, },
WriteTimestamp {
query_set_id: id::QuerySetId,
query_index: u32,
},
BeginPipelineStatisticsQuery {
query_set_id: id::QuerySetId,
query_index: u32,
},
EndPipelineStatisticsQuery,
ExecuteBundle(id::RenderBundleId), ExecuteBundle(id::RenderBundleId),
} }

Просмотреть файл

@ -7,6 +7,7 @@ mod bind;
mod bundle; mod bundle;
mod compute; mod compute;
mod draw; mod draw;
mod query;
mod render; mod render;
mod transfer; mod transfer;
@ -15,6 +16,7 @@ pub use self::allocator::CommandAllocatorError;
pub use self::bundle::*; pub use self::bundle::*;
pub use self::compute::*; pub use self::compute::*;
pub use self::draw::*; pub use self::draw::*;
pub use self::query::*;
pub use self::render::*; pub use self::render::*;
pub use self::transfer::*; pub use self::transfer::*;
@ -22,6 +24,7 @@ use crate::{
device::{all_buffer_stages, all_image_stages}, device::{all_buffer_stages, all_image_stages},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token}, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id, id,
memory_init_tracker::MemoryInitTrackerAction,
resource::{Buffer, Texture}, resource::{Buffer, Texture},
span, span,
track::TrackerSet, track::TrackerSet,
@ -29,6 +32,7 @@ use crate::{
}; };
use hal::command::CommandBuffer as _; use hal::command::CommandBuffer as _;
use smallvec::SmallVec;
use thiserror::Error; use thiserror::Error;
use std::thread::ThreadId; use std::thread::ThreadId;
@ -42,9 +46,11 @@ pub struct CommandBuffer<B: hal::Backend> {
recorded_thread_id: ThreadId, recorded_thread_id: ThreadId,
pub(crate) device_id: Stored<id::DeviceId>, pub(crate) device_id: Stored<id::DeviceId>,
pub(crate) trackers: TrackerSet, pub(crate) trackers: TrackerSet,
pub(crate) used_swap_chain: Option<(Stored<id::SwapChainId>, B::Framebuffer)>, pub(crate) used_swap_chains: SmallVec<[Stored<id::SwapChainId>; 1]>,
pub(crate) buffer_memory_init_actions: Vec<MemoryInitTrackerAction<id::BufferId>>,
limits: wgt::Limits, limits: wgt::Limits,
private_features: PrivateFeatures, private_features: PrivateFeatures,
has_labels: bool,
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
pub(crate) commands: Option<Vec<crate::device::trace::Command>>, pub(crate) commands: Option<Vec<crate::device::trace::Command>>,
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
@ -52,7 +58,7 @@ pub struct CommandBuffer<B: hal::Backend> {
} }
impl<B: GfxBackend> CommandBuffer<B> { impl<B: GfxBackend> CommandBuffer<B> {
fn get_encoder( fn get_encoder_mut(
storage: &mut Storage<Self, id::CommandEncoderId>, storage: &mut Storage<Self, id::CommandEncoderId>,
id: id::CommandEncoderId, id: id::CommandEncoderId,
) -> Result<&mut Self, CommandEncoderError> { ) -> Result<&mut Self, CommandEncoderError> {
@ -120,6 +126,7 @@ impl<B: hal::Backend> crate::hub::Resource for CommandBuffer<B> {
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
pub struct BasePassRef<'a, C> { pub struct BasePassRef<'a, C> {
pub label: Option<&'a str>,
pub commands: &'a [C], pub commands: &'a [C],
pub dynamic_offsets: &'a [wgt::DynamicOffset], pub dynamic_offsets: &'a [wgt::DynamicOffset],
pub string_data: &'a [u8], pub string_data: &'a [u8],
@ -137,6 +144,7 @@ pub struct BasePassRef<'a, C> {
derive(serde::Deserialize) derive(serde::Deserialize)
)] )]
pub struct BasePass<C> { pub struct BasePass<C> {
pub label: Option<String>,
pub commands: Vec<C>, pub commands: Vec<C>,
pub dynamic_offsets: Vec<wgt::DynamicOffset>, pub dynamic_offsets: Vec<wgt::DynamicOffset>,
pub string_data: Vec<u8>, pub string_data: Vec<u8>,
@ -144,8 +152,9 @@ pub struct BasePass<C> {
} }
impl<C: Clone> BasePass<C> { impl<C: Clone> BasePass<C> {
fn new() -> Self { fn new(label: &Label) -> Self {
Self { Self {
label: label.as_ref().map(|cow| cow.to_string()),
commands: Vec::new(), commands: Vec::new(),
dynamic_offsets: Vec::new(), dynamic_offsets: Vec::new(),
string_data: Vec::new(), string_data: Vec::new(),
@ -156,6 +165,7 @@ impl<C: Clone> BasePass<C> {
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
fn from_ref(base: BasePassRef<C>) -> Self { fn from_ref(base: BasePassRef<C>) -> Self {
Self { Self {
label: base.label.map(str::to_string),
commands: base.commands.to_vec(), commands: base.commands.to_vec(),
dynamic_offsets: base.dynamic_offsets.to_vec(), dynamic_offsets: base.dynamic_offsets.to_vec(),
string_data: base.string_data.to_vec(), string_data: base.string_data.to_vec(),
@ -165,6 +175,7 @@ impl<C: Clone> BasePass<C> {
pub fn as_ref(&self) -> BasePassRef<C> { pub fn as_ref(&self) -> BasePassRef<C> {
BasePassRef { BasePassRef {
label: self.label.as_ref().map(String::as_str),
commands: &self.commands, commands: &self.commands,
dynamic_offsets: &self.dynamic_offsets, dynamic_offsets: &self.dynamic_offsets,
string_data: &self.string_data, string_data: &self.string_data,
@ -195,11 +206,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
//TODO: actually close the last recorded command buffer //TODO: actually close the last recorded command buffer
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let error = match CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id) { let error = match CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id) {
Ok(cmd_buf) => { Ok(cmd_buf) => {
cmd_buf.is_recording = false; cmd_buf.is_recording = false;
// stop tracking the swapchain image, if used // stop tracking the swapchain image, if used
if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain { for sc_id in cmd_buf.used_swap_chains.iter() {
let view_id = swap_chain_guard[sc_id.value] let view_id = swap_chain_guard[sc_id.value]
.acquired_view_id .acquired_view_id
.as_ref() .as_ref()
@ -226,11 +237,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root(); let mut token = Token::root();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?; let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap(); let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe { unsafe {
cmb_raw.begin_debug_marker(label, 0); cmd_buf_raw.begin_debug_marker(label, 0);
} }
Ok(()) Ok(())
} }
@ -246,11 +257,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root(); let mut token = Token::root();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?; let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap(); let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe { unsafe {
cmb_raw.insert_debug_marker(label, 0); cmd_buf_raw.insert_debug_marker(label, 0);
} }
Ok(()) Ok(())
} }
@ -265,11 +276,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root(); let mut token = Token::root();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?; let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap(); let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe { unsafe {
cmb_raw.end_debug_marker(); cmd_buf_raw.end_debug_marker();
} }
Ok(()) Ok(())
} }
@ -343,20 +354,27 @@ pub enum PassErrorScope {
SetViewport, SetViewport,
#[error("In a set_scissor_rect command")] #[error("In a set_scissor_rect command")]
SetScissorRect, SetScissorRect,
#[error("In a draw command")] #[error("In a draw command, indexed:{indexed} indirect:{indirect}")]
Draw, Draw {
#[error("In a draw_indexed command")] indexed: bool,
DrawIndexed, indirect: bool,
#[error("In a draw_indirect command")] pipeline: Option<id::RenderPipelineId>,
DrawIndirect, },
#[error("In a draw_indexed_indirect command")] #[error("While resetting queries after the renderpass was ran")]
DrawIndexedIndirect, QueryReset,
#[error("In a write_timestamp command")]
WriteTimestamp,
#[error("In a begin_pipeline_statistics_query command")]
BeginPipelineStatisticsQuery,
#[error("In a end_pipeline_statistics_query command")]
EndPipelineStatisticsQuery,
#[error("In a execute_bundle command")] #[error("In a execute_bundle command")]
ExecuteBundle, ExecuteBundle,
#[error("In a dispatch command")] #[error("In a dispatch command, indirect:{indirect}")]
Dispatch, Dispatch {
#[error("In a dispatch_indirect command")] indirect: bool,
DispatchIndirect, pipeline: Option<id::ComputePipelineId>,
},
#[error("In a pop_debug_group command")] #[error("In a pop_debug_group command")]
PopDebugGroup, PopDebugGroup,
} }

Просмотреть файл

@ -0,0 +1,424 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use hal::command::CommandBuffer as _;
#[cfg(feature = "trace")]
use crate::device::trace::Command as TraceCommand;
use crate::{
command::{CommandBuffer, CommandEncoderError},
device::all_buffer_stages,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id::{self, Id, TypedId},
resource::{BufferUse, QuerySet},
track::UseExtendError,
Epoch, FastHashMap, Index,
};
use std::{iter, marker::PhantomData};
use thiserror::Error;
use wgt::BufferAddress;
#[derive(Debug)]
pub(super) struct QueryResetMap<B: hal::Backend> {
map: FastHashMap<Index, (Vec<bool>, Epoch)>,
_phantom: PhantomData<B>,
}
impl<B: hal::Backend> QueryResetMap<B> {
pub fn new() -> Self {
Self {
map: FastHashMap::default(),
_phantom: PhantomData,
}
}
pub fn use_query_set(
&mut self,
id: id::QuerySetId,
query_set: &QuerySet<B>,
query: u32,
) -> bool {
let (index, epoch, _) = id.unzip();
let (vec, _) = self
.map
.entry(index)
.or_insert_with(|| (vec![false; query_set.desc.count as usize], epoch));
std::mem::replace(&mut vec[query as usize], true)
}
pub fn reset_queries(
self,
cmd_buf_raw: &mut B::CommandBuffer,
query_set_storage: &Storage<QuerySet<B>, id::QuerySetId>,
backend: wgt::Backend,
) -> Result<(), id::QuerySetId> {
for (query_set_id, (state, epoch)) in self.map.into_iter() {
let id = Id::zip(query_set_id, epoch, backend);
let query_set = query_set_storage.get(id).map_err(|_| id)?;
debug_assert_eq!(state.len(), query_set.desc.count as usize);
// Need to find all "runs" of values which need resets. If the state vector is:
// [false, true, true, false, true], we want to reset [1..3, 4..5]. This minimizes
// the amount of resets needed.
let mut state_iter = state.into_iter().chain(iter::once(false)).enumerate();
let mut run_start: Option<u32> = None;
while let Some((idx, value)) = state_iter.next() {
match (run_start, value) {
// We're inside of a run, do nothing
(Some(..), true) => {}
// We've hit the end of a run, dispatch a reset
(Some(start), false) => {
run_start = None;
unsafe { cmd_buf_raw.reset_query_pool(&query_set.raw, start..idx as u32) };
}
// We're starting a run
(None, true) => {
run_start = Some(idx as u32);
}
// We're in a run of falses, do nothing.
(None, false) => {}
}
}
}
Ok(())
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SimplifiedQueryType {
Timestamp,
PipelineStatistics,
}
impl From<wgt::QueryType> for SimplifiedQueryType {
fn from(q: wgt::QueryType) -> Self {
match q {
wgt::QueryType::Timestamp => SimplifiedQueryType::Timestamp,
wgt::QueryType::PipelineStatistics(..) => SimplifiedQueryType::PipelineStatistics,
}
}
}
/// Error encountered when dealing with queries
#[derive(Clone, Debug, Error)]
pub enum QueryError {
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
#[error("Error encountered while trying to use queries")]
Use(#[from] QueryUseError),
#[error("Error encountered while trying to resolve a query")]
Resolve(#[from] ResolveError),
#[error("Buffer {0:?} is invalid or destroyed")]
InvalidBuffer(id::BufferId),
#[error("QuerySet {0:?} is invalid or destroyed")]
InvalidQuerySet(id::QuerySetId),
}
/// Error encountered while trying to use queries
#[derive(Clone, Debug, Error)]
pub enum QueryUseError {
#[error("Query {query_index} is out of bounds for a query set of size {query_set_size}")]
OutOfBounds {
query_index: u32,
query_set_size: u32,
},
#[error("Query {query_index} has already been used within the same renderpass. Queries must only be used once per renderpass")]
UsedTwiceInsideRenderpass { query_index: u32 },
#[error("Query {new_query_index} was started while query {active_query_index} was already active. No more than one statistic or occlusion query may be active at once")]
AlreadyStarted {
active_query_index: u32,
new_query_index: u32,
},
#[error("Query was stopped while there was no active query")]
AlreadyStopped,
#[error("A query of type {query_type:?} was started using a query set of type {set_type:?}")]
IncompatibleType {
set_type: SimplifiedQueryType,
query_type: SimplifiedQueryType,
},
}
/// Error encountered while trying to resolve a query.
#[derive(Clone, Debug, Error)]
pub enum ResolveError {
#[error("Queries can only be resolved to buffers that contain the COPY_DST usage")]
MissingBufferUsage,
#[error("Resolving queries {start_query}..{end_query} would overrun the query set of size {query_set_size}")]
QueryOverrun {
start_query: u32,
end_query: u32,
query_set_size: u32,
},
#[error("Resolving queries {start_query}..{end_query} ({stride} byte queries) will end up overruning the bounds of the destination buffer of size {buffer_size} using offsets {buffer_start_offset}..{buffer_end_offset}")]
BufferOverrun {
start_query: u32,
end_query: u32,
stride: u32,
buffer_size: BufferAddress,
buffer_start_offset: BufferAddress,
buffer_end_offset: BufferAddress,
},
}
impl<B: GfxBackend> QuerySet<B> {
fn validate_query(
&self,
query_set_id: id::QuerySetId,
query_type: SimplifiedQueryType,
query_index: u32,
reset_state: Option<&mut QueryResetMap<B>>,
) -> Result<hal::query::Query<'_, B>, QueryUseError> {
// We need to defer our resets because we are in a renderpass, add the usage to the reset map.
if let Some(reset) = reset_state {
let used = reset.use_query_set(query_set_id, self, query_index);
if used {
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index }.into());
}
}
let simple_set_type = SimplifiedQueryType::from(self.desc.ty);
if simple_set_type != query_type {
return Err(QueryUseError::IncompatibleType {
query_type,
set_type: simple_set_type,
}
.into());
}
if query_index >= self.desc.count {
return Err(QueryUseError::OutOfBounds {
query_index,
query_set_size: self.desc.count,
}
.into());
}
let hal_query = hal::query::Query::<B> {
pool: &self.raw,
id: query_index,
};
Ok(hal_query)
}
pub(super) fn validate_and_write_timestamp(
&self,
cmd_buf_raw: &mut B::CommandBuffer,
query_set_id: id::QuerySetId,
query_index: u32,
reset_state: Option<&mut QueryResetMap<B>>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
let hal_query = self.validate_query(
query_set_id,
SimplifiedQueryType::Timestamp,
query_index,
reset_state,
)?;
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
cmd_buf_raw.reset_query_pool(&self.raw, query_index..(query_index + 1));
}
cmd_buf_raw.write_timestamp(hal::pso::PipelineStage::BOTTOM_OF_PIPE, hal_query);
}
Ok(())
}
pub(super) fn validate_and_begin_pipeline_statistics_query(
&self,
cmd_buf_raw: &mut B::CommandBuffer,
query_set_id: id::QuerySetId,
query_index: u32,
reset_state: Option<&mut QueryResetMap<B>>,
active_query: &mut Option<(id::QuerySetId, u32)>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
let hal_query = self.validate_query(
query_set_id,
SimplifiedQueryType::PipelineStatistics,
query_index,
reset_state,
)?;
if let Some((_old_id, old_idx)) = active_query.replace((query_set_id, query_index)) {
return Err(QueryUseError::AlreadyStarted {
active_query_index: old_idx,
new_query_index: query_index,
}
.into());
}
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
cmd_buf_raw.reset_query_pool(&self.raw, query_index..(query_index + 1));
}
cmd_buf_raw.begin_query(hal_query, hal::query::ControlFlags::empty());
}
Ok(())
}
}
pub(super) fn end_pipeline_statistics_query<B: GfxBackend>(
cmd_buf_raw: &mut B::CommandBuffer,
storage: &Storage<QuerySet<B>, id::QuerySetId>,
active_query: &mut Option<(id::QuerySetId, u32)>,
) -> Result<(), QueryUseError> {
if let Some((query_set_id, query_index)) = active_query.take() {
// We can unwrap here as the validity was validated when the active query was set
let query_set = storage.get(query_set_id).unwrap();
let hal_query = hal::query::Query::<B> {
pool: &query_set.raw,
id: query_index,
};
unsafe { cmd_buf_raw.end_query(hal_query) }
Ok(())
} else {
Err(QueryUseError::AlreadyStopped)
}
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn command_encoder_write_timestamp<B: GfxBackend>(
&self,
command_encoder_id: id::CommandEncoderId,
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), QueryError> {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let (query_set_guard, _) = hub.query_sets.read(&mut token);
let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?;
let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf.commands {
list.push(TraceCommand::WriteTimestamp {
query_set_id,
query_index,
});
}
let query_set = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => QueryError::InvalidQuerySet(query_set_id),
_ => unreachable!(),
})?;
query_set.validate_and_write_timestamp(cmd_buf_raw, query_set_id, query_index, None)?;
Ok(())
}
pub fn command_encoder_resolve_query_set<B: GfxBackend>(
&self,
command_encoder_id: id::CommandEncoderId,
query_set_id: id::QuerySetId,
start_query: u32,
query_count: u32,
destination: id::BufferId,
destination_offset: BufferAddress,
) -> Result<(), QueryError> {
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let cmd_buf = CommandBuffer::get_encoder_mut(&mut cmd_buf_guard, command_encoder_id)?;
let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf.commands {
list.push(TraceCommand::ResolveQuerySet {
query_set_id,
start_query,
query_count,
destination,
destination_offset,
});
}
let query_set = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => QueryError::InvalidQuerySet(query_set_id),
_ => unreachable!(),
})?;
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, destination, (), BufferUse::COPY_DST)
.map_err(QueryError::InvalidBuffer)?;
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
if !dst_buffer.usage.contains(wgt::BufferUsage::COPY_DST) {
return Err(ResolveError::MissingBufferUsage.into());
}
let end_query = start_query + query_count;
if end_query > query_set.desc.count {
return Err(ResolveError::QueryOverrun {
start_query,
end_query,
query_set_size: query_set.desc.count,
}
.into());
}
let stride = query_set.elements * wgt::QUERY_SIZE;
let bytes_used = (stride * query_count) as BufferAddress;
let buffer_start_offset = destination_offset;
let buffer_end_offset = buffer_start_offset + bytes_used;
if buffer_end_offset > dst_buffer.size {
return Err(ResolveError::BufferOverrun {
start_query,
end_query,
stride,
buffer_size: dst_buffer.size,
buffer_start_offset,
buffer_end_offset,
}
.into());
}
unsafe {
cmd_buf_raw.pipeline_barrier(
all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
dst_barrier,
);
cmd_buf_raw.copy_query_pool_results(
&query_set.raw,
start_query..end_query,
&dst_buffer.raw.as_ref().unwrap().0,
destination_offset,
stride,
hal::query::ResultFlags::WAIT | hal::query::ResultFlags::BITS_64,
);
}
Ok(())
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -10,6 +10,7 @@ use crate::{
device::{all_buffer_stages, all_image_stages}, device::{all_buffer_stages, all_image_stages},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token}, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id::{BufferId, CommandEncoderId, TextureId}, id::{BufferId, CommandEncoderId, TextureId},
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{BufferUse, Texture, TextureErrorDimension, TextureUse}, resource::{BufferUse, Texture, TextureErrorDimension, TextureUse},
span, span,
track::TextureSelector, track::TextureSelector,
@ -121,7 +122,7 @@ pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
let (layer, layer_count, z) = match texture.dimension { let (layer, layer_count, z) = match texture.dimension {
wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => ( wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => (
view.origin.z as hal::image::Layer, view.origin.z as hal::image::Layer,
size.depth as hal::image::Layer, size.depth_or_array_layers as hal::image::Layer,
0, 0,
), ),
wgt::TextureDimension::D3 => (0, 1, view.origin.z as i32), wgt::TextureDimension::D3 => (0, 1, view.origin.z as i32),
@ -149,6 +150,7 @@ pub(crate) fn texture_copy_view_to_hal<B: hal::Backend>(
} }
/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range /// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range
/// If successful, returns number of buffer bytes required for this copy.
pub(crate) fn validate_linear_texture_data( pub(crate) fn validate_linear_texture_data(
layout: &wgt::TextureDataLayout, layout: &wgt::TextureDataLayout,
format: wgt::TextureFormat, format: wgt::TextureFormat,
@ -156,17 +158,17 @@ pub(crate) fn validate_linear_texture_data(
buffer_side: CopySide, buffer_side: CopySide,
bytes_per_block: BufferAddress, bytes_per_block: BufferAddress,
copy_size: &Extent3d, copy_size: &Extent3d,
) -> Result<(), TransferError> { ) -> Result<BufferAddress, TransferError> {
// Convert all inputs to BufferAddress (u64) to prevent overflow issues // Convert all inputs to BufferAddress (u64) to prevent overflow issues
let copy_width = copy_size.width as BufferAddress; let copy_width = copy_size.width as BufferAddress;
let copy_height = copy_size.height as BufferAddress; let copy_height = copy_size.height as BufferAddress;
let copy_depth = copy_size.depth as BufferAddress; let copy_depth = copy_size.depth_or_array_layers as BufferAddress;
let offset = layout.offset; let offset = layout.offset;
let rows_per_image = layout.rows_per_image as BufferAddress; let rows_per_image = layout.rows_per_image as BufferAddress;
let bytes_per_row = layout.bytes_per_row as BufferAddress; let bytes_per_row = layout.bytes_per_row as BufferAddress;
let (block_width, block_height) = conv::texture_block_size(format); let (block_width, block_height) = format.describe().block_dimensions;
let block_width = block_width as BufferAddress; let block_width = block_width as BufferAddress;
let block_height = block_height as BufferAddress; let block_height = block_height as BufferAddress;
let block_size = bytes_per_block; let block_size = bytes_per_block;
@ -217,10 +219,10 @@ pub(crate) fn validate_linear_texture_data(
if copy_depth > 1 && rows_per_image == 0 { if copy_depth > 1 && rows_per_image == 0 {
return Err(TransferError::InvalidRowsPerImage); return Err(TransferError::InvalidRowsPerImage);
} }
Ok(()) Ok(required_bytes_in_copy)
} }
/// Function copied with minor modifications from webgpu standard https://gpuweb.github.io/gpuweb/#valid-texture-copy-range /// Function copied with minor modifications from webgpu standard <https://gpuweb.github.io/gpuweb/#valid-texture-copy-range>
pub(crate) fn validate_texture_copy_range( pub(crate) fn validate_texture_copy_range(
texture_copy_view: &TextureCopyView, texture_copy_view: &TextureCopyView,
texture_format: wgt::TextureFormat, texture_format: wgt::TextureFormat,
@ -228,12 +230,21 @@ pub(crate) fn validate_texture_copy_range(
texture_side: CopySide, texture_side: CopySide,
copy_size: &Extent3d, copy_size: &Extent3d,
) -> Result<(), TransferError> { ) -> Result<(), TransferError> {
let (block_width, block_height) = conv::texture_block_size(texture_format); let (block_width, block_height) = texture_format.describe().block_dimensions;
let block_width = block_width as u32;
let block_height = block_height as u32;
let mut extent = texture_dimension.level_extent(texture_copy_view.mip_level as u8); let mut extent = texture_dimension.level_extent(texture_copy_view.mip_level as u8);
// Adjust extent for the physical size of mips
if texture_copy_view.mip_level != 0 {
extent.width = conv::align_up(extent.width, block_width);
extent.height = conv::align_up(extent.height, block_height);
}
match texture_dimension { match texture_dimension {
hal::image::Kind::D1(..) => { hal::image::Kind::D1(..) => {
if (copy_size.height, copy_size.depth) != (1, 1) { if (copy_size.height, copy_size.depth_or_array_layers) != (1, 1) {
return Err(TransferError::InvalidCopySize); return Err(TransferError::InvalidCopySize);
} }
} }
@ -263,7 +274,7 @@ pub(crate) fn validate_texture_copy_range(
side: texture_side, side: texture_side,
}); });
} }
let z_copy_max = texture_copy_view.origin.z + copy_size.depth; let z_copy_max = texture_copy_view.origin.z + copy_size.depth_or_array_layers;
if z_copy_max > extent.depth { if z_copy_max > extent.depth {
return Err(TransferError::TextureOverrun { return Err(TransferError::TextureOverrun {
start_offset: texture_copy_view.origin.z, start_offset: texture_copy_view.origin.z,
@ -308,11 +319,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root(); let mut token = Token::root();
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?;
let (buffer_guard, _) = hub.buffers.read(&mut token); let (buffer_guard, _) = hub.buffers.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf.commands { if let Some(ref mut list) = cmd_buf.commands {
@ -337,7 +345,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if !src_buffer.usage.contains(BufferUsage::COPY_SRC) { if !src_buffer.usage.contains(BufferUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)? Err(TransferError::MissingCopySrcUsageFlag)?
} }
barriers.extend(src_pending.map(|pending| pending.into_hal(src_buffer))); // expecting only a single barrier
let src_barrier = src_pending
.map(|pending| pending.into_hal(src_buffer))
.next();
let (dst_buffer, dst_pending) = cmd_buf let (dst_buffer, dst_pending) = cmd_buf
.trackers .trackers
@ -354,7 +365,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
None, None,
))? ))?
} }
barriers.extend(dst_pending.map(|pending| pending.into_hal(dst_buffer))); let dst_barrier = dst_pending
.map(|pending| pending.into_hal(dst_buffer))
.next();
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 { if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedCopySize(size))? Err(TransferError::UnalignedCopySize(size))?
@ -390,19 +403,41 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Ok(()); return Ok(());
} }
// Make sure source is initialized memory and mark dest as initialized.
cmd_buf.buffer_memory_init_actions.extend(
dst_buffer
.initialization_status
.check(destination_offset..(destination_offset + size))
.map(|range| MemoryInitTrackerAction {
id: destination,
range,
kind: MemoryInitKind::ImplicitlyInitialized,
}),
);
cmd_buf.buffer_memory_init_actions.extend(
src_buffer
.initialization_status
.check(source_offset..(source_offset + size))
.map(|range| MemoryInitTrackerAction {
id: source,
range,
kind: MemoryInitKind::NeedsInitializedMemory,
}),
);
let region = hal::command::BufferCopy { let region = hal::command::BufferCopy {
src: source_offset, src: source_offset,
dst: destination_offset, dst: destination_offset,
size, size,
}; };
let cmb_raw = cmd_buf.raw.last_mut().unwrap(); let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe { unsafe {
cmb_raw.pipeline_barrier( cmd_buf_raw.pipeline_barrier(
all_buffer_stages()..hal::pso::PipelineStage::TRANSFER, all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(), hal::memory::Dependencies::empty(),
barriers, src_barrier.into_iter().chain(dst_barrier),
); );
cmb_raw.copy_buffer(src_raw, dst_raw, iter::once(region)); cmd_buf_raw.copy_buffer(src_raw, dst_raw, iter::once(region));
} }
Ok(()) Ok(())
} }
@ -419,7 +454,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self); let hub = B::hub(self);
let mut token = Token::root(); let mut token = Token::root();
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?;
let (buffer_guard, mut token) = hub.buffers.read(&mut token); let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token); let (texture_guard, _) = hub.textures.read(&mut token);
let (dst_layers, dst_selector, dst_offset) = let (dst_layers, dst_selector, dst_offset) =
@ -434,7 +469,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}); });
} }
if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 { if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring copy_buffer_to_texture of size 0"); tracing::trace!("Ignoring copy_buffer_to_texture of size 0");
return Ok(()); return Ok(());
} }
@ -494,7 +529,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
CopySide::Destination, CopySide::Destination,
copy_size, copy_size,
)?; )?;
validate_linear_texture_data( let required_buffer_bytes_in_copy = validate_linear_texture_data(
&source.layout, &source.layout,
dst_texture.format, dst_texture.format,
src_buffer.size, src_buffer.size,
@ -503,30 +538,52 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
copy_size, copy_size,
)?; )?;
let (block_width, _) = conv::texture_block_size(dst_texture.format); cmd_buf.buffer_memory_init_actions.extend(
src_buffer
.initialization_status
.check(source.layout.offset..(source.layout.offset + required_buffer_bytes_in_copy))
.map(|range| MemoryInitTrackerAction {
id: source.buffer,
range,
kind: MemoryInitKind::NeedsInitializedMemory,
}),
);
let (block_width, _) = dst_texture.format.describe().block_dimensions;
if !conv::is_valid_copy_dst_texture_format(dst_texture.format) { if !conv::is_valid_copy_dst_texture_format(dst_texture.format) {
Err(TransferError::CopyToForbiddenTextureFormat( Err(TransferError::CopyToForbiddenTextureFormat(
dst_texture.format, dst_texture.format,
))? ))?
} }
let buffer_width = (source.layout.bytes_per_row / bytes_per_block) * block_width; // WebGPU uses the physical size of the texture for copies whereas vulkan uses
// the virtual size. We have passed validation, so it's safe to use the
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let max_image_extent = dst_texture.kind.level_extent(destination.mip_level as _);
let image_extent = Extent3d {
width: copy_size.width.min(max_image_extent.width),
height: copy_size.height.min(max_image_extent.height),
depth_or_array_layers: copy_size.depth_or_array_layers,
};
let buffer_width = (source.layout.bytes_per_row / bytes_per_block) * block_width as u32;
let region = hal::command::BufferImageCopy { let region = hal::command::BufferImageCopy {
buffer_offset: source.layout.offset, buffer_offset: source.layout.offset,
buffer_width, buffer_width,
buffer_height: source.layout.rows_per_image, buffer_height: source.layout.rows_per_image,
image_layers: dst_layers, image_layers: dst_layers,
image_offset: dst_offset, image_offset: dst_offset,
image_extent: conv::map_extent(copy_size, dst_texture.dimension), image_extent: conv::map_extent(&image_extent, dst_texture.dimension),
}; };
let cmb_raw = cmd_buf.raw.last_mut().unwrap(); let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe { unsafe {
cmb_raw.pipeline_barrier( cmd_buf_raw.pipeline_barrier(
all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER, all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(), hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barriers), src_barriers.chain(dst_barriers),
); );
cmb_raw.copy_buffer_to_image( cmd_buf_raw.copy_buffer_to_image(
src_raw, src_raw,
dst_raw, dst_raw,
hal::image::Layout::TransferDstOptimal, hal::image::Layout::TransferDstOptimal,
@ -548,7 +605,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self); let hub = B::hub(self);
let mut token = Token::root(); let mut token = Token::root();
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?;
let (buffer_guard, mut token) = hub.buffers.read(&mut token); let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token); let (texture_guard, _) = hub.textures.read(&mut token);
let (src_layers, src_selector, src_offset) = let (src_layers, src_selector, src_offset) =
@ -563,7 +620,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}); });
} }
if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 { if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring copy_texture_to_buffer of size 0"); tracing::trace!("Ignoring copy_texture_to_buffer of size 0");
return Ok(()); return Ok(());
} }
@ -623,7 +680,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
CopySide::Source, CopySide::Source,
copy_size, copy_size,
)?; )?;
validate_linear_texture_data( let required_buffer_bytes_in_copy = validate_linear_texture_data(
&destination.layout, &destination.layout,
src_texture.format, src_texture.format,
dst_buffer.size, dst_buffer.size,
@ -632,30 +689,56 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
copy_size, copy_size,
)?; )?;
let (block_width, _) = conv::texture_block_size(src_texture.format); let (block_width, _) = src_texture.format.describe().block_dimensions;
if !conv::is_valid_copy_src_texture_format(src_texture.format) { if !conv::is_valid_copy_src_texture_format(src_texture.format) {
Err(TransferError::CopyFromForbiddenTextureFormat( Err(TransferError::CopyFromForbiddenTextureFormat(
src_texture.format, src_texture.format,
))? ))?
} }
let buffer_width = (destination.layout.bytes_per_row / bytes_per_block) * block_width; cmd_buf.buffer_memory_init_actions.extend(
dst_buffer
.initialization_status
.check(
destination.layout.offset
..(destination.layout.offset + required_buffer_bytes_in_copy),
)
.map(|range| MemoryInitTrackerAction {
id: destination.buffer,
range,
kind: MemoryInitKind::ImplicitlyInitialized,
}),
);
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
// the virtual size. We have passed validation, so it's safe to use the
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let max_image_extent = src_texture.kind.level_extent(source.mip_level as _);
let image_extent = Extent3d {
width: copy_size.width.min(max_image_extent.width),
height: copy_size.height.min(max_image_extent.height),
depth_or_array_layers: copy_size.depth_or_array_layers,
};
let buffer_width =
(destination.layout.bytes_per_row / bytes_per_block) * block_width as u32;
let region = hal::command::BufferImageCopy { let region = hal::command::BufferImageCopy {
buffer_offset: destination.layout.offset, buffer_offset: destination.layout.offset,
buffer_width, buffer_width,
buffer_height: destination.layout.rows_per_image, buffer_height: destination.layout.rows_per_image,
image_layers: src_layers, image_layers: src_layers,
image_offset: src_offset, image_offset: src_offset,
image_extent: conv::map_extent(copy_size, src_texture.dimension), image_extent: conv::map_extent(&image_extent, src_texture.dimension),
}; };
let cmb_raw = cmd_buf.raw.last_mut().unwrap(); let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe { unsafe {
cmb_raw.pipeline_barrier( cmd_buf_raw.pipeline_barrier(
all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER, all_buffer_stages() | all_image_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(), hal::memory::Dependencies::empty(),
src_barriers.chain(dst_barrier), src_barriers.chain(dst_barrier),
); );
cmb_raw.copy_image_to_buffer( cmd_buf_raw.copy_image_to_buffer(
src_raw, src_raw,
hal::image::Layout::TransferSrcOptimal, hal::image::Layout::TransferSrcOptimal,
dst_raw, dst_raw,
@ -678,12 +761,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root(); let mut token = Token::root();
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token); let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, command_encoder_id)?; let cmd_buf = CommandBuffer::get_encoder_mut(&mut *cmd_buf_guard, command_encoder_id)?;
let (_, mut token) = hub.buffers.read(&mut token); // skip token let (_, mut token) = hub.buffers.read(&mut token); // skip token
let (texture_guard, _) = hub.textures.read(&mut token); let (texture_guard, _) = hub.textures.read(&mut token);
// we can't hold both src_pending and dst_pending in scope because they
// borrow the buffer tracker mutably...
let mut barriers = Vec::new();
let (src_layers, src_selector, src_offset) = let (src_layers, src_selector, src_offset) =
texture_copy_view_to_hal(source, copy_size, &*texture_guard)?; texture_copy_view_to_hal(source, copy_size, &*texture_guard)?;
let (dst_layers, dst_selector, dst_offset) = let (dst_layers, dst_selector, dst_offset) =
@ -701,7 +781,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}); });
} }
if copy_size.width == 0 || copy_size.height == 0 || copy_size.width == 0 { if copy_size.width == 0 || copy_size.height == 0 || copy_size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring copy_texture_to_texture of size 0"); tracing::trace!("Ignoring copy_texture_to_texture of size 0");
return Ok(()); return Ok(());
} }
@ -723,7 +803,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if !src_texture.usage.contains(TextureUsage::COPY_SRC) { if !src_texture.usage.contains(TextureUsage::COPY_SRC) {
Err(TransferError::MissingCopySrcUsageFlag)? Err(TransferError::MissingCopySrcUsageFlag)?
} }
barriers.extend(src_pending.map(|pending| pending.into_hal(src_texture))); //TODO: try to avoid this the collection. It's needed because both
// `src_pending` and `dst_pending` try to hold `trackers.textures` mutably.
let mut barriers = src_pending
.map(|pending| pending.into_hal(src_texture))
.collect::<Vec<_>>();
let (dst_texture, dst_pending) = cmd_buf let (dst_texture, dst_pending) = cmd_buf
.trackers .trackers
@ -762,21 +846,37 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
copy_size, copy_size,
)?; )?;
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
// the virtual size. We have passed validation, so it's safe to use the
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let max_src_image_extent = src_texture.kind.level_extent(source.mip_level as _);
let max_dst_image_extent = dst_texture.kind.level_extent(destination.mip_level as _);
let image_extent = Extent3d {
width: copy_size
.width
.min(max_src_image_extent.width.min(max_dst_image_extent.width)),
height: copy_size
.height
.min(max_src_image_extent.height.min(max_dst_image_extent.height)),
depth_or_array_layers: copy_size.depth_or_array_layers,
};
let region = hal::command::ImageCopy { let region = hal::command::ImageCopy {
src_subresource: src_layers, src_subresource: src_layers,
src_offset, src_offset,
dst_subresource: dst_layers, dst_subresource: dst_layers,
dst_offset, dst_offset,
extent: conv::map_extent(copy_size, src_texture.dimension), extent: conv::map_extent(&image_extent, src_texture.dimension),
}; };
let cmb_raw = cmd_buf.raw.last_mut().unwrap(); let cmd_buf_raw = cmd_buf.raw.last_mut().unwrap();
unsafe { unsafe {
cmb_raw.pipeline_barrier( cmd_buf_raw.pipeline_barrier(
all_image_stages()..hal::pso::PipelineStage::TRANSFER, all_image_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(), hal::memory::Dependencies::empty(),
barriers, barriers.into_iter(),
); );
cmb_raw.copy_image( cmd_buf_raw.copy_image(
src_raw, src_raw,
hal::image::Layout::TransferSrcOptimal, hal::image::Layout::TransferSrcOptimal,
dst_raw, dst_raw,

Просмотреть файл

@ -9,6 +9,27 @@ use crate::{
use std::convert::TryInto; use std::convert::TryInto;
pub fn map_adapter_info(
info: hal::adapter::AdapterInfo,
backend: wgt::Backend,
) -> wgt::AdapterInfo {
use hal::adapter::DeviceType as Dt;
wgt::AdapterInfo {
name: info.name,
vendor: info.vendor,
device: info.device,
device_type: match info.device_type {
Dt::Other => wgt::DeviceType::Other,
Dt::IntegratedGpu => wgt::DeviceType::IntegratedGpu,
Dt::DiscreteGpu => wgt::DeviceType::DiscreteGpu,
Dt::VirtualGpu => wgt::DeviceType::VirtualGpu,
Dt::Cpu => wgt::DeviceType::Cpu,
},
backend,
}
}
pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) { pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) {
use hal::buffer::Usage as U; use hal::buffer::Usage as U;
use hal::memory::Properties as P; use hal::memory::Properties as P;
@ -139,7 +160,7 @@ pub fn map_extent(extent: &wgt::Extent3d, dim: wgt::TextureDimension) -> hal::im
height: extent.height, height: extent.height,
depth: match dim { depth: match dim {
wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => 1, wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => 1,
wgt::TextureDimension::D3 => extent.depth, wgt::TextureDimension::D3 => extent.depth_or_array_layers,
}, },
} }
} }
@ -156,21 +177,15 @@ pub fn map_primitive_topology(primitive_topology: wgt::PrimitiveTopology) -> hal
} }
} }
pub fn map_color_state_descriptor(desc: &wgt::ColorStateDescriptor) -> hal::pso::ColorBlendDesc { pub fn map_color_target_state(desc: &wgt::ColorTargetState) -> hal::pso::ColorBlendDesc {
let color_mask = desc.write_mask; let color_mask = desc.write_mask;
let blend_state = if desc.color_blend != wgt::BlendDescriptor::REPLACE let blend = desc.blend.as_ref().map(|bs| hal::pso::BlendState {
|| desc.alpha_blend != wgt::BlendDescriptor::REPLACE color: map_blend_component(&bs.color),
{ alpha: map_blend_component(&bs.alpha),
Some(hal::pso::BlendState { });
color: map_blend_descriptor(&desc.color_blend),
alpha: map_blend_descriptor(&desc.alpha_blend),
})
} else {
None
};
hal::pso::ColorBlendDesc { hal::pso::ColorBlendDesc {
mask: map_color_write_flags(color_mask), mask: map_color_write_flags(color_mask),
blend: blend_state, blend,
} }
} }
@ -194,21 +209,21 @@ fn map_color_write_flags(flags: wgt::ColorWrite) -> hal::pso::ColorMask {
value value
} }
fn map_blend_descriptor(blend_desc: &wgt::BlendDescriptor) -> hal::pso::BlendOp { fn map_blend_component(component: &wgt::BlendComponent) -> hal::pso::BlendOp {
use hal::pso::BlendOp as H; use hal::pso::BlendOp as H;
use wgt::BlendOperation as Bo; use wgt::BlendOperation as Bo;
match blend_desc.operation { match component.operation {
Bo::Add => H::Add { Bo::Add => H::Add {
src: map_blend_factor(blend_desc.src_factor), src: map_blend_factor(component.src_factor),
dst: map_blend_factor(blend_desc.dst_factor), dst: map_blend_factor(component.dst_factor),
}, },
Bo::Subtract => H::Sub { Bo::Subtract => H::Sub {
src: map_blend_factor(blend_desc.src_factor), src: map_blend_factor(component.src_factor),
dst: map_blend_factor(blend_desc.dst_factor), dst: map_blend_factor(component.dst_factor),
}, },
Bo::ReverseSubtract => H::RevSub { Bo::ReverseSubtract => H::RevSub {
src: map_blend_factor(blend_desc.src_factor), src: map_blend_factor(component.src_factor),
dst: map_blend_factor(blend_desc.dst_factor), dst: map_blend_factor(component.dst_factor),
}, },
Bo::Min => H::Min, Bo::Min => H::Min,
Bo::Max => H::Max, Bo::Max => H::Max,
@ -235,9 +250,7 @@ fn map_blend_factor(blend_factor: wgt::BlendFactor) -> hal::pso::Factor {
} }
} }
pub fn map_depth_stencil_state_descriptor( pub fn map_depth_stencil_state(desc: &wgt::DepthStencilState) -> hal::pso::DepthStencilDesc {
desc: &wgt::DepthStencilStateDescriptor,
) -> hal::pso::DepthStencilDesc {
hal::pso::DepthStencilDesc { hal::pso::DepthStencilDesc {
depth: if desc.is_depth_enabled() { depth: if desc.is_depth_enabled() {
Some(hal::pso::DepthTest { Some(hal::pso::DepthTest {
@ -269,9 +282,7 @@ pub fn map_depth_stencil_state_descriptor(
} }
} }
fn map_stencil_face( fn map_stencil_face(stencil_state_face_desc: &wgt::StencilFaceState) -> hal::pso::StencilFace {
stencil_state_face_desc: &wgt::StencilStateFaceDescriptor,
) -> hal::pso::StencilFace {
hal::pso::StencilFace { hal::pso::StencilFace {
fun: map_compare_function(stencil_state_face_desc.compare), fun: map_compare_function(stencil_state_face_desc.compare),
op_fail: map_stencil_operation(stencil_state_face_desc.fail_op), op_fail: map_stencil_operation(stencil_state_face_desc.fail_op),
@ -396,124 +407,48 @@ pub(crate) fn map_texture_format(
Tf::Bc6hRgbUfloat => H::Bc6hUfloat, Tf::Bc6hRgbUfloat => H::Bc6hUfloat,
Tf::Bc7RgbaUnorm => H::Bc7Unorm, Tf::Bc7RgbaUnorm => H::Bc7Unorm,
Tf::Bc7RgbaUnormSrgb => H::Bc7Srgb, Tf::Bc7RgbaUnormSrgb => H::Bc7Srgb,
}
}
pub fn texture_block_size(format: wgt::TextureFormat) -> (u32, u32) { // ETC compressed formats
use wgt::TextureFormat as Tf; Tf::Etc2RgbUnorm => H::Etc2R8g8b8Unorm,
match format { Tf::Etc2RgbUnormSrgb => H::Etc2R8g8b8Srgb,
Tf::R8Unorm Tf::Etc2RgbA1Unorm => H::Etc2R8g8b8a1Unorm,
| Tf::R8Snorm Tf::Etc2RgbA1UnormSrgb => H::Etc2R8g8b8a1Srgb,
| Tf::R8Uint Tf::Etc2RgbA8Unorm => H::Etc2R8g8b8a8Unorm,
| Tf::R8Sint Tf::Etc2RgbA8UnormSrgb => H::Etc2R8g8b8a8Unorm,
| Tf::R16Uint Tf::EacRUnorm => H::EacR11Unorm,
| Tf::R16Sint Tf::EacRSnorm => H::EacR11Snorm,
| Tf::R16Float Tf::EtcRgUnorm => H::EacR11g11Unorm,
| Tf::Rg8Unorm Tf::EtcRgSnorm => H::EacR11g11Snorm,
| Tf::Rg8Snorm
| Tf::Rg8Uint
| Tf::Rg8Sint
| Tf::R32Uint
| Tf::R32Sint
| Tf::R32Float
| Tf::Rg16Uint
| Tf::Rg16Sint
| Tf::Rg16Float
| Tf::Rgba8Unorm
| Tf::Rgba8UnormSrgb
| Tf::Rgba8Snorm
| Tf::Rgba8Uint
| Tf::Rgba8Sint
| Tf::Bgra8Unorm
| Tf::Bgra8UnormSrgb
| Tf::Rgb10a2Unorm
| Tf::Rg11b10Float
| Tf::Rg32Uint
| Tf::Rg32Sint
| Tf::Rg32Float
| Tf::Rgba16Uint
| Tf::Rgba16Sint
| Tf::Rgba16Float
| Tf::Rgba32Uint
| Tf::Rgba32Sint
| Tf::Rgba32Float
| Tf::Depth32Float
| Tf::Depth24Plus
| Tf::Depth24PlusStencil8 => (1, 1),
Tf::Bc1RgbaUnorm // ASTC compressed formats
| Tf::Bc1RgbaUnormSrgb Tf::Astc4x4RgbaUnorm => H::Astc4x4Srgb,
| Tf::Bc2RgbaUnorm Tf::Astc4x4RgbaUnormSrgb => H::Astc4x4Srgb,
| Tf::Bc2RgbaUnormSrgb Tf::Astc5x4RgbaUnorm => H::Astc5x4Unorm,
| Tf::Bc3RgbaUnorm Tf::Astc5x4RgbaUnormSrgb => H::Astc5x4Srgb,
| Tf::Bc3RgbaUnormSrgb Tf::Astc5x5RgbaUnorm => H::Astc5x5Unorm,
| Tf::Bc4RUnorm Tf::Astc5x5RgbaUnormSrgb => H::Astc5x5Srgb,
| Tf::Bc4RSnorm Tf::Astc6x5RgbaUnorm => H::Astc6x5Unorm,
| Tf::Bc5RgUnorm Tf::Astc6x5RgbaUnormSrgb => H::Astc6x5Srgb,
| Tf::Bc5RgSnorm Tf::Astc6x6RgbaUnorm => H::Astc6x6Unorm,
| Tf::Bc6hRgbUfloat Tf::Astc6x6RgbaUnormSrgb => H::Astc6x6Srgb,
| Tf::Bc6hRgbSfloat Tf::Astc8x5RgbaUnorm => H::Astc8x5Unorm,
| Tf::Bc7RgbaUnorm Tf::Astc8x5RgbaUnormSrgb => H::Astc8x5Srgb,
| Tf::Bc7RgbaUnormSrgb => (4, 4), Tf::Astc8x6RgbaUnorm => H::Astc8x6Unorm,
} Tf::Astc8x6RgbaUnormSrgb => H::Astc8x6Srgb,
} Tf::Astc10x5RgbaUnorm => H::Astc10x5Unorm,
Tf::Astc10x5RgbaUnormSrgb => H::Astc10x5Srgb,
pub fn texture_features(format: wgt::TextureFormat) -> wgt::Features { Tf::Astc10x6RgbaUnorm => H::Astc10x6Unorm,
use wgt::TextureFormat as Tf; Tf::Astc10x6RgbaUnormSrgb => H::Astc10x6Srgb,
match format { Tf::Astc8x8RgbaUnorm => H::Astc8x8Unorm,
Tf::R8Unorm Tf::Astc8x8RgbaUnormSrgb => H::Astc8x8Srgb,
| Tf::R8Snorm Tf::Astc10x8RgbaUnorm => H::Astc10x8Unorm,
| Tf::R8Uint Tf::Astc10x8RgbaUnormSrgb => H::Astc10x8Srgb,
| Tf::R8Sint Tf::Astc10x10RgbaUnorm => H::Astc10x10Unorm,
| Tf::R16Uint Tf::Astc10x10RgbaUnormSrgb => H::Astc10x10Srgb,
| Tf::R16Sint Tf::Astc12x10RgbaUnorm => H::Astc12x10Unorm,
| Tf::R16Float Tf::Astc12x10RgbaUnormSrgb => H::Astc12x10Srgb,
| Tf::Rg8Unorm Tf::Astc12x12RgbaUnorm => H::Astc12x12Unorm,
| Tf::Rg8Snorm Tf::Astc12x12RgbaUnormSrgb => H::Astc12x12Srgb,
| Tf::Rg8Uint
| Tf::Rg8Sint
| Tf::R32Uint
| Tf::R32Sint
| Tf::R32Float
| Tf::Rg16Uint
| Tf::Rg16Sint
| Tf::Rg16Float
| Tf::Rgba8Unorm
| Tf::Rgba8UnormSrgb
| Tf::Rgba8Snorm
| Tf::Rgba8Uint
| Tf::Rgba8Sint
| Tf::Bgra8Unorm
| Tf::Bgra8UnormSrgb
| Tf::Rgb10a2Unorm
| Tf::Rg11b10Float
| Tf::Rg32Uint
| Tf::Rg32Sint
| Tf::Rg32Float
| Tf::Rgba16Uint
| Tf::Rgba16Sint
| Tf::Rgba16Float
| Tf::Rgba32Uint
| Tf::Rgba32Sint
| Tf::Rgba32Float
| Tf::Depth32Float
| Tf::Depth24Plus
| Tf::Depth24PlusStencil8 => wgt::Features::empty(),
Tf::Bc1RgbaUnorm
| Tf::Bc1RgbaUnormSrgb
| Tf::Bc2RgbaUnorm
| Tf::Bc2RgbaUnormSrgb
| Tf::Bc3RgbaUnorm
| Tf::Bc3RgbaUnormSrgb
| Tf::Bc4RUnorm
| Tf::Bc4RSnorm
| Tf::Bc5RgUnorm
| Tf::Bc5RgSnorm
| Tf::Bc6hRgbUfloat
| Tf::Bc6hRgbSfloat
| Tf::Bc7RgbaUnorm
| Tf::Bc7RgbaUnormSrgb => wgt::Features::TEXTURE_COMPRESSION_BC,
} }
} }
@ -521,36 +456,40 @@ pub fn map_vertex_format(vertex_format: wgt::VertexFormat) -> hal::format::Forma
use hal::format::Format as H; use hal::format::Format as H;
use wgt::VertexFormat as Vf; use wgt::VertexFormat as Vf;
match vertex_format { match vertex_format {
Vf::Uchar2 => H::Rg8Uint, Vf::Uint8x2 => H::Rg8Uint,
Vf::Uchar4 => H::Rgba8Uint, Vf::Uint8x4 => H::Rgba8Uint,
Vf::Char2 => H::Rg8Sint, Vf::Sint8x2 => H::Rg8Sint,
Vf::Char4 => H::Rgba8Sint, Vf::Sint8x4 => H::Rgba8Sint,
Vf::Uchar2Norm => H::Rg8Unorm, Vf::Unorm8x2 => H::Rg8Unorm,
Vf::Uchar4Norm => H::Rgba8Unorm, Vf::Unorm8x4 => H::Rgba8Unorm,
Vf::Char2Norm => H::Rg8Snorm, Vf::Snorm8x2 => H::Rg8Snorm,
Vf::Char4Norm => H::Rgba8Snorm, Vf::Snorm8x4 => H::Rgba8Snorm,
Vf::Ushort2 => H::Rg16Uint, Vf::Uint16x2 => H::Rg16Uint,
Vf::Ushort4 => H::Rgba16Uint, Vf::Uint16x4 => H::Rgba16Uint,
Vf::Short2 => H::Rg16Sint, Vf::Sint16x2 => H::Rg16Sint,
Vf::Short4 => H::Rgba16Sint, Vf::Sint16x4 => H::Rgba16Sint,
Vf::Ushort2Norm => H::Rg16Unorm, Vf::Unorm16x2 => H::Rg16Unorm,
Vf::Ushort4Norm => H::Rgba16Unorm, Vf::Unorm16x4 => H::Rgba16Unorm,
Vf::Short2Norm => H::Rg16Snorm, Vf::Snorm16x2 => H::Rg16Snorm,
Vf::Short4Norm => H::Rgba16Snorm, Vf::Snorm16x4 => H::Rgba16Snorm,
Vf::Half2 => H::Rg16Sfloat, Vf::Float16x2 => H::Rg16Sfloat,
Vf::Half4 => H::Rgba16Sfloat, Vf::Float16x4 => H::Rgba16Sfloat,
Vf::Float => H::R32Sfloat, Vf::Float32 => H::R32Sfloat,
Vf::Float2 => H::Rg32Sfloat, Vf::Float32x2 => H::Rg32Sfloat,
Vf::Float3 => H::Rgb32Sfloat, Vf::Float32x3 => H::Rgb32Sfloat,
Vf::Float4 => H::Rgba32Sfloat, Vf::Float32x4 => H::Rgba32Sfloat,
Vf::Uint => H::R32Uint, Vf::Uint32 => H::R32Uint,
Vf::Uint2 => H::Rg32Uint, Vf::Uint32x2 => H::Rg32Uint,
Vf::Uint3 => H::Rgb32Uint, Vf::Uint32x3 => H::Rgb32Uint,
Vf::Uint4 => H::Rgba32Uint, Vf::Uint32x4 => H::Rgba32Uint,
Vf::Int => H::R32Sint, Vf::Sint32 => H::R32Sint,
Vf::Int2 => H::Rg32Sint, Vf::Sint32x2 => H::Rg32Sint,
Vf::Int3 => H::Rgb32Sint, Vf::Sint32x3 => H::Rgb32Sint,
Vf::Int4 => H::Rgba32Sint, Vf::Sint32x4 => H::Rgba32Sint,
Vf::Float64 => H::R64Sfloat,
Vf::Float64x2 => H::Rg64Sfloat,
Vf::Float64x3 => H::Rgb64Sfloat,
Vf::Float64x4 => H::Rgba64Sfloat,
} }
} }
@ -579,52 +518,63 @@ pub fn map_texture_dimension_size(
wgt::Extent3d { wgt::Extent3d {
width, width,
height, height,
depth, depth_or_array_layers,
}: wgt::Extent3d, }: wgt::Extent3d,
sample_size: u32, sample_size: u32,
limits: &wgt::Limits,
) -> Result<hal::image::Kind, resource::TextureDimensionError> { ) -> Result<hal::image::Kind, resource::TextureDimensionError> {
use hal::image::Kind as H; use hal::image::Kind as H;
use resource::TextureDimensionError as Tde; use resource::{TextureDimensionError as Tde, TextureErrorDimension as Ted};
use wgt::TextureDimension::*; use wgt::TextureDimension::*;
let zero_dim = if width == 0 { let layers = depth_or_array_layers.try_into().unwrap_or(!0);
Some(resource::TextureErrorDimension::X) let (kind, extent_limits, sample_limit) = match dimension {
} else if height == 0 { D1 => (
Some(resource::TextureErrorDimension::Y) H::D1(width, layers),
} else if depth == 0 { [
Some(resource::TextureErrorDimension::Z) limits.max_texture_dimension_1d,
} else { 1,
None limits.max_texture_array_layers,
],
1,
),
D2 => (
H::D2(width, height, layers, sample_size as u8),
[
limits.max_texture_dimension_2d,
limits.max_texture_dimension_2d,
limits.max_texture_array_layers,
],
32,
),
D3 => (
H::D3(width, height, depth_or_array_layers),
[
limits.max_texture_dimension_3d,
limits.max_texture_dimension_3d,
limits.max_texture_dimension_3d,
],
1,
),
}; };
if let Some(dim) = zero_dim {
return Err(resource::TextureDimensionError::Zero(dim)); for (&dim, (&given, &limit)) in [Ted::X, Ted::Y, Ted::Z].iter().zip(
[width, height, depth_or_array_layers]
.iter()
.zip(extent_limits.iter()),
) {
if given == 0 {
return Err(Tde::Zero(dim));
}
if given > limit {
return Err(Tde::LimitExceeded { dim, given, limit });
}
}
if sample_size == 0 || sample_size > sample_limit || !is_power_of_two(sample_size) {
return Err(Tde::InvalidSampleCount(sample_size));
} }
Ok(match dimension { Ok(kind)
D1 => {
if height != 1 {
return Err(Tde::InvalidHeight);
}
if sample_size != 1 {
return Err(Tde::InvalidSampleCount(sample_size));
}
let layers = depth.try_into().unwrap_or(!0);
H::D1(width, layers)
}
D2 => {
if sample_size > 32 || !is_power_of_two(sample_size) {
return Err(Tde::InvalidSampleCount(sample_size));
}
let layers = depth.try_into().unwrap_or(!0);
H::D2(width, height, layers, sample_size as u8)
}
D3 => {
if sample_size != 1 {
return Err(Tde::InvalidSampleCount(sample_size));
}
H::D3(width, height, depth)
}
})
} }
pub fn map_texture_view_dimension(dimension: wgt::TextureViewDimension) -> hal::image::ViewKind { pub fn map_texture_view_dimension(dimension: wgt::TextureViewDimension) -> hal::image::ViewKind {
@ -670,7 +620,7 @@ pub(crate) fn map_buffer_state(usage: resource::BufferUse) -> hal::buffer::State
access |= A::SHADER_READ; access |= A::SHADER_READ;
} }
if usage.contains(W::STORAGE_STORE) { if usage.contains(W::STORAGE_STORE) {
access |= A::SHADER_WRITE; access |= A::SHADER_READ | A::SHADER_WRITE;
} }
if usage.contains(W::INDIRECT) { if usage.contains(W::INDIRECT) {
access |= A::INDIRECT_COMMAND_READ; access |= A::INDIRECT_COMMAND_READ;
@ -732,6 +682,43 @@ pub(crate) fn map_texture_state(
(access, layout) (access, layout)
} }
pub fn map_query_type(ty: &wgt::QueryType) -> (hal::query::Type, u32) {
match ty {
wgt::QueryType::PipelineStatistics(pipeline_statistics) => {
let mut ps = hal::query::PipelineStatistic::empty();
ps.set(
hal::query::PipelineStatistic::VERTEX_SHADER_INVOCATIONS,
pipeline_statistics
.contains(wgt::PipelineStatisticsTypes::VERTEX_SHADER_INVOCATIONS),
);
ps.set(
hal::query::PipelineStatistic::CLIPPING_INVOCATIONS,
pipeline_statistics.contains(wgt::PipelineStatisticsTypes::CLIPPER_INVOCATIONS),
);
ps.set(
hal::query::PipelineStatistic::CLIPPING_PRIMITIVES,
pipeline_statistics.contains(wgt::PipelineStatisticsTypes::CLIPPER_PRIMITIVES_OUT),
);
ps.set(
hal::query::PipelineStatistic::FRAGMENT_SHADER_INVOCATIONS,
pipeline_statistics
.contains(wgt::PipelineStatisticsTypes::FRAGMENT_SHADER_INVOCATIONS),
);
ps.set(
hal::query::PipelineStatistic::COMPUTE_SHADER_INVOCATIONS,
pipeline_statistics
.contains(wgt::PipelineStatisticsTypes::COMPUTE_SHADER_INVOCATIONS),
);
(
hal::query::Type::PipelineStatistics(ps),
pipeline_statistics.bits().count_ones(),
)
}
wgt::QueryType::Timestamp => (hal::query::Type::Timestamp, 1),
}
}
pub fn map_load_store_ops<V>(channel: &PassChannel<V>) -> hal::pass::AttachmentOps { pub fn map_load_store_ops<V>(channel: &PassChannel<V>) -> hal::pass::AttachmentOps {
hal::pass::AttachmentOps { hal::pass::AttachmentOps {
load: match channel.load_op { load: match channel.load_op {
@ -788,46 +775,84 @@ pub fn map_wrap(address: wgt::AddressMode) -> hal::image::WrapMode {
} }
} }
pub fn map_rasterization_state_descriptor( pub fn map_primitive_state_to_input_assembler(
desc: &wgt::RasterizationStateDescriptor, desc: &wgt::PrimitiveState,
) -> hal::pso::InputAssemblerDesc {
hal::pso::InputAssemblerDesc {
primitive: map_primitive_topology(desc.topology),
with_adjacency: false,
restart_index: desc.strip_index_format.map(map_index_format),
}
}
pub fn map_primitive_state_to_rasterizer(
desc: &wgt::PrimitiveState,
depth_stencil: Option<&wgt::DepthStencilState>,
) -> hal::pso::Rasterizer { ) -> hal::pso::Rasterizer {
use hal::pso; use hal::pso;
let (depth_clamping, depth_bias) = match depth_stencil {
Some(dsd) => {
let bias = if dsd.bias.is_enabled() {
Some(pso::State::Static(pso::DepthBias {
const_factor: dsd.bias.constant as f32,
slope_factor: dsd.bias.slope_scale,
clamp: dsd.bias.clamp,
}))
} else {
None
};
(dsd.clamp_depth, bias)
}
None => (false, None),
};
pso::Rasterizer { pso::Rasterizer {
depth_clamping: desc.clamp_depth, depth_clamping,
polygon_mode: match desc.polygon_mode { polygon_mode: match desc.polygon_mode {
wgt::PolygonMode::Fill => pso::PolygonMode::Fill, wgt::PolygonMode::Fill => pso::PolygonMode::Fill,
wgt::PolygonMode::Line => pso::PolygonMode::Line, wgt::PolygonMode::Line => pso::PolygonMode::Line,
wgt::PolygonMode::Point => pso::PolygonMode::Point, wgt::PolygonMode::Point => pso::PolygonMode::Point,
}, },
cull_face: match desc.cull_mode { cull_face: match desc.cull_mode {
wgt::CullMode::None => pso::Face::empty(), None => pso::Face::empty(),
wgt::CullMode::Front => pso::Face::FRONT, Some(wgt::Face::Front) => pso::Face::FRONT,
wgt::CullMode::Back => pso::Face::BACK, Some(wgt::Face::Back) => pso::Face::BACK,
}, },
front_face: match desc.front_face { front_face: match desc.front_face {
wgt::FrontFace::Ccw => pso::FrontFace::CounterClockwise, wgt::FrontFace::Ccw => pso::FrontFace::CounterClockwise,
wgt::FrontFace::Cw => pso::FrontFace::Clockwise, wgt::FrontFace::Cw => pso::FrontFace::Clockwise,
}, },
depth_bias: if desc.depth_bias != 0 depth_bias,
|| desc.depth_bias_slope_scale != 0.0
|| desc.depth_bias_clamp != 0.0
{
Some(pso::State::Static(pso::DepthBias {
const_factor: desc.depth_bias as f32,
slope_factor: desc.depth_bias_slope_scale,
clamp: desc.depth_bias_clamp,
}))
} else {
None
},
conservative: false, conservative: false,
line_width: pso::State::Static(1.0), line_width: pso::State::Static(1.0),
} }
} }
pub fn map_multisample_state(desc: &wgt::MultisampleState) -> hal::pso::Multisampling {
hal::pso::Multisampling {
rasterization_samples: desc.count as _,
sample_shading: None,
sample_mask: desc.mask,
alpha_coverage: desc.alpha_to_coverage_enabled,
alpha_to_one: false,
}
}
pub fn map_index_format(index_format: wgt::IndexFormat) -> hal::IndexType { pub fn map_index_format(index_format: wgt::IndexFormat) -> hal::IndexType {
match index_format { match index_format {
wgt::IndexFormat::Uint16 => hal::IndexType::U16, wgt::IndexFormat::Uint16 => hal::IndexType::U16,
wgt::IndexFormat::Uint32 => hal::IndexType::U32, wgt::IndexFormat::Uint32 => hal::IndexType::U32,
} }
} }
/// Take `value` and round it up to the nearest alignment `alignment`.
///
/// ```text
/// (0, 3) -> 0
/// (1, 3) -> 3
/// (2, 3) -> 3
/// (3, 3) -> 3
/// (4, 3) -> 6
/// ...
pub fn align_up(value: u32, alignment: u32) -> u32 {
((value + alignment - 1) / alignment) * alignment
}

Просмотреть файл

@ -3,21 +3,15 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::DeviceError; use super::DeviceError;
use hal::device::Device; use hal::device::Device as _;
use std::{borrow::Cow, fmt, iter, ptr::NonNull, sync::Arc}; use std::{borrow::Cow, iter, ptr::NonNull};
pub struct MemoryAllocator<B: hal::Backend>(gpu_alloc::GpuAllocator<Arc<B::Memory>>);
#[derive(Debug)] #[derive(Debug)]
pub struct MemoryBlock<B: hal::Backend>(gpu_alloc::MemoryBlock<Arc<B::Memory>>); pub struct MemoryAllocator<B: hal::Backend>(gpu_alloc::GpuAllocator<B::Memory>);
#[derive(Debug)]
pub struct MemoryBlock<B: hal::Backend>(gpu_alloc::MemoryBlock<B::Memory>);
struct MemoryDevice<'a, B: hal::Backend>(&'a B::Device); struct MemoryDevice<'a, B: hal::Backend>(&'a B::Device);
//TODO: https://github.com/zakarumych/gpu-alloc/issues/9
impl<B: hal::Backend> fmt::Debug for MemoryAllocator<B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "MemoryAllocator")
}
}
impl<B: hal::Backend> MemoryAllocator<B> { impl<B: hal::Backend> MemoryAllocator<B> {
pub fn new(mem_props: hal::adapter::MemoryProperties, limits: hal::Limits) -> Self { pub fn new(mem_props: hal::adapter::MemoryProperties, limits: hal::Limits) -> Self {
let mem_config = gpu_alloc::Config { let mem_config = gpu_alloc::Config {
@ -99,17 +93,19 @@ impl<B: hal::Backend> MemoryBlock<B> {
device: &B::Device, device: &B::Device,
buffer: &mut B::Buffer, buffer: &mut B::Buffer,
) -> Result<(), DeviceError> { ) -> Result<(), DeviceError> {
let mem = self.0.memory();
unsafe { unsafe {
device device
.bind_buffer_memory(self.0.memory(), self.0.offset(), buffer) .bind_buffer_memory(mem, self.0.offset(), buffer)
.map_err(DeviceError::from_bind) .map_err(DeviceError::from_bind)
} }
} }
pub fn bind_image(&self, device: &B::Device, image: &mut B::Image) -> Result<(), DeviceError> { pub fn bind_image(&self, device: &B::Device, image: &mut B::Image) -> Result<(), DeviceError> {
let mem = self.0.memory();
unsafe { unsafe {
device device
.bind_image_memory(self.0.memory(), self.0.offset(), image) .bind_image_memory(mem, self.0.offset(), image)
.map_err(DeviceError::from_bind) .map_err(DeviceError::from_bind)
} }
} }
@ -184,9 +180,10 @@ impl<B: hal::Backend> MemoryBlock<B> {
size: Option<wgt::BufferAddress>, size: Option<wgt::BufferAddress>,
) -> Result<(), DeviceError> { ) -> Result<(), DeviceError> {
let segment = self.segment(inner_offset, size); let segment = self.segment(inner_offset, size);
let mem = self.0.memory();
unsafe { unsafe {
device device
.flush_mapped_memory_ranges(iter::once((&**self.0.memory(), segment))) .flush_mapped_memory_ranges(iter::once((mem, segment)))
.or(Err(DeviceError::OutOfMemory)) .or(Err(DeviceError::OutOfMemory))
} }
} }
@ -198,40 +195,39 @@ impl<B: hal::Backend> MemoryBlock<B> {
size: Option<wgt::BufferAddress>, size: Option<wgt::BufferAddress>,
) -> Result<(), DeviceError> { ) -> Result<(), DeviceError> {
let segment = self.segment(inner_offset, size); let segment = self.segment(inner_offset, size);
let mem = self.0.memory();
unsafe { unsafe {
device device
.invalidate_mapped_memory_ranges(iter::once((&**self.0.memory(), segment))) .invalidate_mapped_memory_ranges(iter::once((mem, segment)))
.or(Err(DeviceError::OutOfMemory)) .or(Err(DeviceError::OutOfMemory))
} }
} }
} }
impl<B: hal::Backend> gpu_alloc::MemoryDevice<Arc<B::Memory>> for MemoryDevice<'_, B> { impl<B: hal::Backend> gpu_alloc::MemoryDevice<B::Memory> for MemoryDevice<'_, B> {
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))] #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn allocate_memory( unsafe fn allocate_memory(
&self, &self,
size: u64, size: u64,
memory_type: u32, memory_type: u32,
flags: gpu_alloc::AllocationFlags, flags: gpu_alloc::AllocationFlags,
) -> Result<Arc<B::Memory>, gpu_alloc::OutOfMemory> { ) -> Result<B::Memory, gpu_alloc::OutOfMemory> {
assert!(flags.is_empty()); assert!(flags.is_empty());
self.0 self.0
.allocate_memory(hal::MemoryTypeId(memory_type as _), size) .allocate_memory(hal::MemoryTypeId(memory_type as _), size)
.map(Arc::new)
.map_err(|_| gpu_alloc::OutOfMemory::OutOfDeviceMemory) .map_err(|_| gpu_alloc::OutOfMemory::OutOfDeviceMemory)
} }
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))] #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn deallocate_memory(&self, memory: Arc<B::Memory>) { unsafe fn deallocate_memory(&self, memory: B::Memory) {
let memory = Arc::try_unwrap(memory).expect("Memory must not be used anywhere");
self.0.free_memory(memory); self.0.free_memory(memory);
} }
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))] #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn map_memory( unsafe fn map_memory(
&self, &self,
memory: &Arc<B::Memory>, memory: &mut B::Memory,
offset: u64, offset: u64,
size: u64, size: u64,
) -> Result<NonNull<u8>, gpu_alloc::DeviceMapError> { ) -> Result<NonNull<u8>, gpu_alloc::DeviceMapError> {
@ -252,22 +248,22 @@ impl<B: hal::Backend> gpu_alloc::MemoryDevice<Arc<B::Memory>> for MemoryDevice<'
} }
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))] #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn unmap_memory(&self, memory: &Arc<B::Memory>) { unsafe fn unmap_memory(&self, memory: &mut B::Memory) {
self.0.unmap_memory(memory); self.0.unmap_memory(memory);
} }
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))] #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn invalidate_memory_ranges( unsafe fn invalidate_memory_ranges(
&self, &self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, Arc<B::Memory>>], ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> { ) -> Result<(), gpu_alloc::OutOfMemory> {
self.0 self.0
.invalidate_mapped_memory_ranges(ranges.iter().map(|range| { .invalidate_mapped_memory_ranges(ranges.iter().map(|r| {
( (
&**range.memory, r.memory,
hal::memory::Segment { hal::memory::Segment {
offset: range.offset, offset: r.offset,
size: Some(range.size), size: Some(r.size),
}, },
) )
})) }))
@ -277,15 +273,15 @@ impl<B: hal::Backend> gpu_alloc::MemoryDevice<Arc<B::Memory>> for MemoryDevice<'
#[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))] #[cfg_attr(feature = "tracing", tracing::instrument(skip(self)))]
unsafe fn flush_memory_ranges( unsafe fn flush_memory_ranges(
&self, &self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, Arc<B::Memory>>], ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> { ) -> Result<(), gpu_alloc::OutOfMemory> {
self.0 self.0
.flush_mapped_memory_ranges(ranges.iter().map(|range| { .flush_mapped_memory_ranges(ranges.iter().map(|r| {
( (
&**range.memory, r.memory,
hal::memory::Segment { hal::memory::Segment {
offset: range.offset, offset: r.offset,
size: Some(range.size), size: Some(r.size),
}, },
) )
})) }))

Просмотреть файл

@ -17,7 +17,7 @@ struct DescriptorDevice<'a, B: hal::Backend>(&'a B::Device);
impl<B: hal::Backend> DescriptorAllocator<B> { impl<B: hal::Backend> DescriptorAllocator<B> {
pub fn new() -> Self { pub fn new() -> Self {
DescriptorAllocator(unsafe { gpu_descriptor::DescriptorAllocator::new(0) }) DescriptorAllocator(gpu_descriptor::DescriptorAllocator::new(0))
} }
pub fn allocate( pub fn allocate(
@ -27,18 +27,19 @@ impl<B: hal::Backend> DescriptorAllocator<B> {
layout_descriptor_count: &DescriptorTotalCount, layout_descriptor_count: &DescriptorTotalCount,
count: u32, count: u32,
) -> Result<Vec<DescriptorSet<B>>, DeviceError> { ) -> Result<Vec<DescriptorSet<B>>, DeviceError> {
self.0 unsafe {
.allocate( self.0.allocate(
&DescriptorDevice::<B>(device), &DescriptorDevice::<B>(device),
layout, layout,
gpu_descriptor::DescriptorSetLayoutCreateFlags::empty(), gpu_descriptor::DescriptorSetLayoutCreateFlags::empty(),
layout_descriptor_count, layout_descriptor_count,
count, count,
) )
.map_err(|err| { }
tracing::warn!("Descriptor set allocation failed: {}", err); .map_err(|err| {
DeviceError::OutOfMemory tracing::warn!("Descriptor set allocation failed: {}", err);
}) DeviceError::OutOfMemory
})
} }
pub fn free(&mut self, device: &B::Device, sets: impl IntoIterator<Item = DescriptorSet<B>>) { pub fn free(&mut self, device: &B::Device, sets: impl IntoIterator<Item = DescriptorSet<B>>) {
@ -46,7 +47,7 @@ impl<B: hal::Backend> DescriptorAllocator<B> {
} }
pub fn cleanup(&mut self, device: &B::Device) { pub fn cleanup(&mut self, device: &B::Device) {
self.0.cleanup(&DescriptorDevice::<B>(device)) unsafe { self.0.cleanup(&DescriptorDevice::<B>(device)) }
} }
} }
@ -120,8 +121,8 @@ impl<B: hal::Backend>
match hal::device::Device::create_descriptor_pool( match hal::device::Device::create_descriptor_pool(
self.0, self.0,
max_sets as usize, max_sets as usize,
ranges, ranges.into_iter(),
hal::pso::DescriptorPoolCreateFlags::from_bits_truncate(flags.bits() as u32), hal::pso::DescriptorPoolCreateFlags::from_bits_truncate(flags.bits()),
) { ) {
Ok(pool) => Ok(pool), Ok(pool) => Ok(pool),
Err(hal::device::OutOfMemory::Host) => { Err(hal::device::OutOfMemory::Host) => {
@ -140,7 +141,7 @@ impl<B: hal::Backend>
unsafe fn alloc_descriptor_sets<'a>( unsafe fn alloc_descriptor_sets<'a>(
&self, &self,
pool: &mut B::DescriptorPool, pool: &mut B::DescriptorPool,
layouts: impl Iterator<Item = &'a B::DescriptorSetLayout>, layouts: impl ExactSizeIterator<Item = &'a B::DescriptorSetLayout>,
sets: &mut impl Extend<B::DescriptorSet>, sets: &mut impl Extend<B::DescriptorSet>,
) -> Result<(), gpu_descriptor::DeviceAllocationError> { ) -> Result<(), gpu_descriptor::DeviceAllocationError> {
use gpu_descriptor::DeviceAllocationError as Dae; use gpu_descriptor::DeviceAllocationError as Dae;

Просмотреть файл

@ -14,7 +14,7 @@ use crate::{
hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token}, hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
id, resource, id, resource,
track::TrackerSet, track::TrackerSet,
FastHashMap, RefCount, Stored, SubmissionIndex, RefCount, Stored, SubmissionIndex,
}; };
use copyless::VecHelper as _; use copyless::VecHelper as _;
@ -28,7 +28,7 @@ const CLEANUP_WAIT_MS: u64 = 5000;
/// A struct that keeps lists of resources that are no longer needed by the user. /// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct SuspectedResources { pub(super) struct SuspectedResources {
pub(crate) buffers: Vec<id::Valid<id::BufferId>>, pub(crate) buffers: Vec<id::Valid<id::BufferId>>,
pub(crate) textures: Vec<id::Valid<id::TextureId>>, pub(crate) textures: Vec<id::Valid<id::TextureId>>,
pub(crate) texture_views: Vec<id::Valid<id::TextureViewId>>, pub(crate) texture_views: Vec<id::Valid<id::TextureViewId>>,
@ -39,10 +39,11 @@ pub struct SuspectedResources {
pub(crate) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>, pub(crate) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>, pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
pub(crate) render_bundles: Vec<id::Valid<id::RenderBundleId>>, pub(crate) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
pub(crate) query_sets: Vec<id::Valid<id::QuerySetId>>,
} }
impl SuspectedResources { impl SuspectedResources {
pub(crate) fn clear(&mut self) { pub(super) fn clear(&mut self) {
self.buffers.clear(); self.buffers.clear();
self.textures.clear(); self.textures.clear();
self.texture_views.clear(); self.texture_views.clear();
@ -53,9 +54,10 @@ impl SuspectedResources {
self.bind_group_layouts.clear(); self.bind_group_layouts.clear();
self.pipeline_layouts.clear(); self.pipeline_layouts.clear();
self.render_bundles.clear(); self.render_bundles.clear();
self.query_sets.clear();
} }
pub(crate) fn extend(&mut self, other: &Self) { pub(super) fn extend(&mut self, other: &Self) {
self.buffers.extend_from_slice(&other.buffers); self.buffers.extend_from_slice(&other.buffers);
self.textures.extend_from_slice(&other.textures); self.textures.extend_from_slice(&other.textures);
self.texture_views.extend_from_slice(&other.texture_views); self.texture_views.extend_from_slice(&other.texture_views);
@ -70,9 +72,10 @@ impl SuspectedResources {
self.pipeline_layouts self.pipeline_layouts
.extend_from_slice(&other.pipeline_layouts); .extend_from_slice(&other.pipeline_layouts);
self.render_bundles.extend_from_slice(&other.render_bundles); self.render_bundles.extend_from_slice(&other.render_bundles);
self.query_sets.extend_from_slice(&other.query_sets);
} }
pub(crate) fn add_trackers(&mut self, trackers: &TrackerSet) { pub(super) fn add_trackers(&mut self, trackers: &TrackerSet) {
self.buffers.extend(trackers.buffers.used()); self.buffers.extend(trackers.buffers.used());
self.textures.extend(trackers.textures.used()); self.textures.extend(trackers.textures.used());
self.texture_views.extend(trackers.views.used()); self.texture_views.extend(trackers.views.used());
@ -81,6 +84,7 @@ impl SuspectedResources {
self.compute_pipelines.extend(trackers.compute_pipes.used()); self.compute_pipelines.extend(trackers.compute_pipes.used());
self.render_pipelines.extend(trackers.render_pipes.used()); self.render_pipelines.extend(trackers.render_pipes.used());
self.render_bundles.extend(trackers.bundles.used()); self.render_bundles.extend(trackers.bundles.used());
self.query_sets.extend(trackers.query_sets.used());
} }
} }
@ -99,6 +103,7 @@ struct NonReferencedResources<B: hal::Backend> {
graphics_pipes: Vec<B::GraphicsPipeline>, graphics_pipes: Vec<B::GraphicsPipeline>,
descriptor_set_layouts: Vec<B::DescriptorSetLayout>, descriptor_set_layouts: Vec<B::DescriptorSetLayout>,
pipeline_layouts: Vec<B::PipelineLayout>, pipeline_layouts: Vec<B::PipelineLayout>,
query_sets: Vec<B::QueryPool>,
} }
impl<B: hal::Backend> NonReferencedResources<B> { impl<B: hal::Backend> NonReferencedResources<B> {
@ -114,6 +119,7 @@ impl<B: hal::Backend> NonReferencedResources<B> {
graphics_pipes: Vec::new(), graphics_pipes: Vec::new(),
descriptor_set_layouts: Vec::new(), descriptor_set_layouts: Vec::new(),
pipeline_layouts: Vec::new(), pipeline_layouts: Vec::new(),
query_sets: Vec::new(),
} }
} }
@ -126,6 +132,7 @@ impl<B: hal::Backend> NonReferencedResources<B> {
self.desc_sets.extend(other.desc_sets); self.desc_sets.extend(other.desc_sets);
self.compute_pipes.extend(other.compute_pipes); self.compute_pipes.extend(other.compute_pipes);
self.graphics_pipes.extend(other.graphics_pipes); self.graphics_pipes.extend(other.graphics_pipes);
self.query_sets.extend(other.query_sets);
assert!(other.descriptor_set_layouts.is_empty()); assert!(other.descriptor_set_layouts.is_empty());
assert!(other.pipeline_layouts.is_empty()); assert!(other.pipeline_layouts.is_empty());
} }
@ -178,6 +185,9 @@ impl<B: hal::Backend> NonReferencedResources<B> {
for raw in self.pipeline_layouts.drain(..) { for raw in self.pipeline_layouts.drain(..) {
device.destroy_pipeline_layout(raw); device.destroy_pipeline_layout(raw);
} }
for raw in self.query_sets.drain(..) {
device.destroy_query_pool(raw);
}
} }
} }
@ -206,7 +216,7 @@ pub enum WaitIdleError {
/// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector. /// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
/// 4. Finally, `handle_mapping` issues all the callbacks. /// 4. Finally, `handle_mapping` issues all the callbacks.
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct LifetimeTracker<B: hal::Backend> { pub(super) struct LifetimeTracker<B: hal::Backend> {
/// Resources that the user has requested be mapped, but are still in use. /// Resources that the user has requested be mapped, but are still in use.
mapped: Vec<Stored<id::BufferId>>, mapped: Vec<Stored<id::BufferId>>,
/// Buffers can be used in a submission that is yet to be made, by the /// Buffers can be used in a submission that is yet to be made, by the
@ -316,10 +326,9 @@ impl<B: hal::Backend> LifetimeTracker<B> {
.iter() .iter()
.position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap_or(false) }) .position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap_or(false) })
.unwrap_or_else(|| self.active.len()); .unwrap_or_else(|| self.active.len());
let last_done = if done_count != 0 { let last_done = match done_count.checked_sub(1) {
self.active[done_count - 1].index Some(i) => self.active[i].index,
} else { None => return Ok(0),
return Ok(0);
}; };
for a in self.active.drain(..done_count) { for a in self.active.drain(..done_count) {
@ -366,7 +375,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
} }
impl<B: GfxBackend> LifetimeTracker<B> { impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn triage_suspected<G: GlobalIdentityHandlerFactory>( pub(super) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
&mut self, &mut self,
hub: &Hub<B, G>, hub: &Hub<B, G>,
trackers: &Mutex<TrackerSet>, trackers: &Mutex<TrackerSet>,
@ -374,8 +383,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
token: &mut Token<super::Device<B>>, token: &mut Token<super::Device<B>>,
) { ) {
if !self.suspected_resources.render_bundles.is_empty() { if !self.suspected_resources.render_bundles.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.render_bundles.write(token); let (mut guard, _) = hub.render_bundles.write(token);
let mut trackers = trackers.lock();
while let Some(id) = self.suspected_resources.render_bundles.pop() { while let Some(id) = self.suspected_resources.render_bundles.pop() {
if trackers.bundles.remove_abandoned(id) { if trackers.bundles.remove_abandoned(id) {
@ -390,8 +399,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
if !self.suspected_resources.bind_groups.is_empty() { if !self.suspected_resources.bind_groups.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.bind_groups.write(token); let (mut guard, _) = hub.bind_groups.write(token);
let mut trackers = trackers.lock();
while let Some(id) = self.suspected_resources.bind_groups.pop() { while let Some(id) = self.suspected_resources.bind_groups.pop() {
if trackers.bind_groups.remove_abandoned(id) { if trackers.bind_groups.remove_abandoned(id) {
@ -414,8 +423,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
if !self.suspected_resources.texture_views.is_empty() { if !self.suspected_resources.texture_views.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.texture_views.write(token); let (mut guard, _) = hub.texture_views.write(token);
let mut trackers = trackers.lock();
for id in self.suspected_resources.texture_views.drain(..) { for id in self.suspected_resources.texture_views.drain(..) {
if trackers.views.remove_abandoned(id) { if trackers.views.remove_abandoned(id) {
@ -444,8 +453,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
if !self.suspected_resources.textures.is_empty() { if !self.suspected_resources.textures.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.textures.write(token); let (mut guard, _) = hub.textures.write(token);
let mut trackers = trackers.lock();
for id in self.suspected_resources.textures.drain(..) { for id in self.suspected_resources.textures.drain(..) {
if trackers.textures.remove_abandoned(id) { if trackers.textures.remove_abandoned(id) {
@ -466,8 +475,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
if !self.suspected_resources.samplers.is_empty() { if !self.suspected_resources.samplers.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.samplers.write(token); let (mut guard, _) = hub.samplers.write(token);
let mut trackers = trackers.lock();
for id in self.suspected_resources.samplers.drain(..) { for id in self.suspected_resources.samplers.drain(..) {
if trackers.samplers.remove_abandoned(id) { if trackers.samplers.remove_abandoned(id) {
@ -488,8 +497,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
if !self.suspected_resources.buffers.is_empty() { if !self.suspected_resources.buffers.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.buffers.write(token); let (mut guard, _) = hub.buffers.write(token);
let mut trackers = trackers.lock();
for id in self.suspected_resources.buffers.drain(..) { for id in self.suspected_resources.buffers.drain(..) {
if trackers.buffers.remove_abandoned(id) { if trackers.buffers.remove_abandoned(id) {
@ -499,6 +508,16 @@ impl<B: GfxBackend> LifetimeTracker<B> {
if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) { if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire); let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
if let resource::BufferMapState::Init {
stage_buffer,
stage_memory,
..
} = res.map_state
{
self.free_resources
.buffers
.push((stage_buffer, stage_memory));
}
self.active self.active
.iter_mut() .iter_mut()
.find(|a| a.index == submit_index) .find(|a| a.index == submit_index)
@ -511,8 +530,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
if !self.suspected_resources.compute_pipelines.is_empty() { if !self.suspected_resources.compute_pipelines.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.compute_pipelines.write(token); let (mut guard, _) = hub.compute_pipelines.write(token);
let mut trackers = trackers.lock();
for id in self.suspected_resources.compute_pipelines.drain(..) { for id in self.suspected_resources.compute_pipelines.drain(..) {
if trackers.compute_pipes.remove_abandoned(id) { if trackers.compute_pipes.remove_abandoned(id) {
@ -533,8 +552,8 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
if !self.suspected_resources.render_pipelines.is_empty() { if !self.suspected_resources.render_pipelines.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.render_pipelines.write(token); let (mut guard, _) = hub.render_pipelines.write(token);
let mut trackers = trackers.lock();
for id in self.suspected_resources.render_pipelines.drain(..) { for id in self.suspected_resources.render_pipelines.drain(..) {
if trackers.render_pipes.remove_abandoned(id) { if trackers.render_pipes.remove_abandoned(id) {
@ -594,9 +613,30 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
} }
} }
if !self.suspected_resources.query_sets.is_empty() {
let (mut guard, _) = hub.query_sets.write(token);
let mut trackers = trackers.lock();
for id in self.suspected_resources.query_sets.drain(..) {
if trackers.query_sets.remove_abandoned(id) {
// #[cfg(feature = "trace")]
// trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
if let Some(res) = hub.query_sets.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.query_sets
.push(res.raw);
}
}
}
}
} }
pub(crate) fn triage_mapped<G: GlobalIdentityHandlerFactory>( pub(super) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
&mut self, &mut self,
hub: &Hub<B, G>, hub: &Hub<B, G>,
token: &mut Token<super::Device<B>>, token: &mut Token<super::Device<B>>,
@ -626,75 +666,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
} }
} }
pub(crate) fn triage_framebuffers<G: GlobalIdentityHandlerFactory>( pub(super) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
&mut self,
hub: &Hub<B, G>,
framebuffers: &mut FastHashMap<super::FramebufferKey, B::Framebuffer>,
token: &mut Token<super::Device<B>>,
) {
let (texture_view_guard, _) = hub.texture_views.read(token);
let remove_list = framebuffers
.keys()
.filter_map(|key| {
let mut last_submit = None;
let mut needs_cleanup = false;
// A framebuffer needs to be scheduled for cleanup, if there's at least one
// attachment is no longer valid.
for &at in key.all() {
// If this attachment is still registered, it's still valid
if texture_view_guard.contains(at.0) {
continue;
}
// This attachment is no longer registered, this framebuffer needs cleanup
needs_cleanup = true;
// Check if there's any active submissions that are still referring to this
// attachment, if there are we need to get the greatest submission index, as
// that's the last time this attachment is still valid
let mut attachment_last_submit = None;
for a in &self.active {
if a.last_resources.image_views.iter().any(|&(id, _)| id == at) {
let max = attachment_last_submit.unwrap_or(0).max(a.index);
attachment_last_submit = Some(max);
}
}
// Between all attachments, we need the smallest index, because that's the last
// time this framebuffer is still valid
if let Some(attachment_last_submit) = attachment_last_submit {
let min = last_submit
.unwrap_or(std::usize::MAX)
.min(attachment_last_submit);
last_submit = Some(min);
}
}
if needs_cleanup {
Some((key.clone(), last_submit.unwrap_or(0)))
} else {
None
}
})
.collect::<FastHashMap<_, _>>();
if !remove_list.is_empty() {
tracing::debug!("Free framebuffers {:?}", remove_list);
for (ref key, submit_index) in remove_list {
let framebuffer = framebuffers.remove(key).unwrap();
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.framebuffers
.push(framebuffer);
}
}
}
pub(crate) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
&mut self, &mut self,
hub: &Hub<B, G>, hub: &Hub<B, G>,
raw: &B::Device, raw: &B::Device,
@ -726,6 +698,14 @@ impl<B: GfxBackend> LifetimeTracker<B> {
resource::BufferMapState::Idle, resource::BufferMapState::Idle,
) { ) {
resource::BufferMapState::Waiting(pending_mapping) => pending_mapping, resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
// Mapping cancelled
resource::BufferMapState::Idle => continue,
// Mapping queued at least twice by map -> unmap -> map
// and was already successfully mapped below
active @ resource::BufferMapState::Active { .. } => {
buffer.map_state = active;
continue;
}
_ => panic!("No pending mapping."), _ => panic!("No pending mapping."),
}; };
let status = if mapping.range.start != mapping.range.end { let status = if mapping.range.start != mapping.range.end {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -13,13 +13,14 @@ use crate::{
device::{alloc, DeviceError, WaitIdleError}, device::{alloc, DeviceError, WaitIdleError},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token}, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id, id,
memory_init_tracker::MemoryInitKind,
resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse}, resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse},
span, FastHashSet, span, FastHashMap, FastHashSet,
}; };
use hal::{command::CommandBuffer as _, device::Device as _, queue::CommandQueue as _}; use hal::{command::CommandBuffer as _, device::Device as _, queue::Queue as _};
use smallvec::SmallVec; use smallvec::SmallVec;
use std::{iter, ptr}; use std::{iter, ops::Range, ptr};
use thiserror::Error; use thiserror::Error;
struct StagingData<B: hal::Backend> { struct StagingData<B: hal::Backend> {
@ -147,6 +148,10 @@ impl<B: hal::Backend> super::Device<B> {
} }
} }
#[derive(Clone, Debug, Error)]
#[error("queue is invalid")]
pub struct InvalidQueue;
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum QueueWriteError { pub enum QueueWriteError {
#[error(transparent)] #[error(transparent)]
@ -271,6 +276,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device.pending_writes.consume(stage); device.pending_writes.consume(stage);
device.pending_writes.dst_buffers.insert(buffer_id); device.pending_writes.dst_buffers.insert(buffer_id);
// Ensure the overwritten bytes are marked as initialized so they don't need to be nulled prior to mapping or binding.
{
drop(buffer_guard);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let dst = buffer_guard.get_mut(buffer_id).unwrap();
dst.initialization_status
.clear(buffer_offset..(buffer_offset + data_size));
}
Ok(()) Ok(())
} }
@ -306,7 +321,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}); });
} }
if size.width == 0 || size.height == 0 || size.depth == 0 { if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
tracing::trace!("Ignoring write_texture of size 0"); tracing::trace!("Ignoring write_texture of size 0");
return Ok(()); return Ok(());
} }
@ -324,7 +339,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
bytes_per_block as wgt::BufferAddress, bytes_per_block as wgt::BufferAddress,
size, size,
)?; )?;
let (block_width, block_height) = conv::texture_block_size(texture_format);
let (block_width, block_height) = texture_format.describe().block_dimensions;
let block_width = block_width as u32;
let block_height = block_height as u32;
if !conv::is_valid_copy_dst_texture_format(texture_format) { if !conv::is_valid_copy_dst_texture_format(texture_format) {
Err(TransferError::CopyToForbiddenTextureFormat(texture_format))? Err(TransferError::CopyToForbiddenTextureFormat(texture_format))?
} }
@ -340,7 +359,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
); );
let stage_bytes_per_row = align_to(bytes_per_block * width_blocks, bytes_per_row_alignment); let stage_bytes_per_row = align_to(bytes_per_block * width_blocks, bytes_per_row_alignment);
let block_rows_in_copy = (size.depth - 1) * block_rows_per_image + height_blocks; let block_rows_in_copy =
(size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks;
let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64; let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64;
let mut stage = device.prepare_stage(stage_size)?; let mut stage = device.prepare_stage(stage_size)?;
@ -384,7 +404,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// Copy row by row into the optimal alignment. // Copy row by row into the optimal alignment.
let copy_bytes_per_row = let copy_bytes_per_row =
stage_bytes_per_row.min(data_layout.bytes_per_row) as usize; stage_bytes_per_row.min(data_layout.bytes_per_row) as usize;
for layer in 0..size.depth { for layer in 0..size.depth_or_array_layers {
let rows_offset = layer * block_rows_per_image; let rows_offset = layer * block_rows_per_image;
for row in 0..height_blocks { for row in 0..height_blocks {
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
@ -405,13 +425,24 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
stage.memory.flush_range(&device.raw, 0, None)?; stage.memory.flush_range(&device.raw, 0, None)?;
} }
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
// the virtual size. We have passed validation, so it's safe to use the
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let max_image_extent = dst.kind.level_extent(destination.mip_level as _);
let image_extent = wgt::Extent3d {
width: size.width.min(max_image_extent.width),
height: size.height.min(max_image_extent.height),
depth_or_array_layers: size.depth_or_array_layers,
};
let region = hal::command::BufferImageCopy { let region = hal::command::BufferImageCopy {
buffer_offset: 0, buffer_offset: 0,
buffer_width: (stage_bytes_per_row / bytes_per_block) * block_width, buffer_width: (stage_bytes_per_row / bytes_per_block) * block_width,
buffer_height: texel_rows_per_image, buffer_height: texel_rows_per_image,
image_layers, image_layers,
image_offset, image_offset,
image_extent: conv::map_extent(size, dst.dimension), image_extent: conv::map_extent(&image_extent, dst.dimension),
}; };
unsafe { unsafe {
stage.cmdbuf.pipeline_barrier( stage.cmdbuf.pipeline_barrier(
@ -443,6 +474,135 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Ok(()) Ok(())
} }
// Enacts all zero initializations required by the given command buffers
// Required commands are appended to device.pending_writes
fn initialize_used_uninitialized_memory<B: GfxBackend>(
&self,
queue_id: id::QueueId,
command_buffer_ids: &[id::CommandBufferId],
) -> Result<(), QueueSubmitError> {
if command_buffer_ids.is_empty() {
return Ok(());
}
let hub = B::hub(self);
let mut token = Token::root();
let mut required_buffer_inits = {
let (command_buffer_guard, mut token) = hub.command_buffers.read(&mut token);
let mut required_buffer_inits: FastHashMap<
id::BufferId,
Vec<Range<wgt::BufferAddress>>,
> = FastHashMap::default();
for &cmb_id in command_buffer_ids {
let cmdbuf = command_buffer_guard
.get(cmb_id)
.map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
if cmdbuf.buffer_memory_init_actions.len() == 0 {
continue;
}
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
for buffer_use in cmdbuf.buffer_memory_init_actions.iter() {
let buffer = buffer_guard
.get_mut(buffer_use.id)
.map_err(|_| QueueSubmitError::DestroyedBuffer(buffer_use.id))?;
let uninitialized_ranges =
buffer.initialization_status.drain(buffer_use.range.clone());
match buffer_use.kind {
MemoryInitKind::ImplicitlyInitialized => {
uninitialized_ranges.for_each(drop);
}
MemoryInitKind::NeedsInitializedMemory => {
required_buffer_inits
.entry(buffer_use.id)
.or_default()
.extend(uninitialized_ranges);
}
}
}
}
required_buffer_inits
};
// Memory init is expected to be rare (means user relies on default zero!), so most of the time we early here!
if required_buffer_inits.is_empty() {
return Ok(());
}
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
device
.pending_writes
.dst_buffers
.extend(required_buffer_inits.keys());
device.borrow_pending_writes(); // Call ensures there is a pending_writes cmdbuffer, but using the reference returned would make the borrow checker unhappy!
let pending_writes_cmd_buf = device.pending_writes.command_buffer.as_mut().unwrap();
let mut trackers = device.trackers.lock();
for (buffer_id, mut ranges) in required_buffer_inits.drain() {
// Collapse touching ranges. We can't do this any earlier since we only now gathered ranges from several different command buffers!
ranges.sort_by(|a, b| a.start.cmp(&b.start));
for i in (1..ranges.len()).rev() {
assert!(ranges[i - 1].end <= ranges[i].start); // The memory init tracker made sure of this!
if ranges[i].start == ranges[i - 1].end {
ranges[i - 1].end = ranges[i].end;
ranges.swap_remove(i); // Ordering not important at this point
}
}
// Don't do use_replace since the buffer may already no longer have a ref_count.
// However, we *know* that it is currently in use, so the tracker must already know about it.
let transition = trackers.buffers.change_replace_tracked(
id::Valid(buffer_id),
(),
BufferUse::COPY_DST,
);
let buffer = buffer_guard
.get(buffer_id)
.map_err(|_| QueueSubmitError::DestroyedBuffer(buffer_id))?;
let &(ref buffer_raw, _) = buffer
.raw
.as_ref()
.ok_or(QueueSubmitError::DestroyedBuffer(buffer_id))?;
unsafe {
pending_writes_cmd_buf.pipeline_barrier(
super::all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
transition.map(|pending| pending.into_hal(buffer)),
);
}
for range in ranges {
let size = range.end - range.start;
assert!(range.start % 4 == 0, "Buffer {:?} has an uninitialized range with a start not aligned to 4 (start was {})", buffer, range.start);
assert!(size % 4 == 0, "Buffer {:?} has an uninitialized range with a size not aligned to 4 (size was {})", buffer, size);
unsafe {
pending_writes_cmd_buf.fill_buffer(
buffer_raw,
hal::buffer::SubRange {
offset: range.start,
size: Some(size),
},
0,
);
}
}
}
Ok(())
}
pub fn queue_submit<B: GfxBackend>( pub fn queue_submit<B: GfxBackend>(
&self, &self,
queue_id: id::QueueId, queue_id: id::QueueId,
@ -450,10 +610,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
) -> Result<(), QueueSubmitError> { ) -> Result<(), QueueSubmitError> {
span!(_guard, INFO, "Queue::submit"); span!(_guard, INFO, "Queue::submit");
self.initialize_used_uninitialized_memory::<B>(queue_id, command_buffer_ids)?;
let hub = B::hub(self); let hub = B::hub(self);
let mut token = Token::root();
let callbacks = { let callbacks = {
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token); let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = device_guard let device = device_guard
.get_mut(queue_id) .get_mut(queue_id)
@ -468,12 +630,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token); let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token); let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token);
{ if !command_buffer_ids.is_empty() {
let (render_bundle_guard, mut token) = hub.render_bundles.read(&mut token);
let (_, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token); let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token); let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token); let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
let (mut buffer_guard, mut token) = hub.buffers.write(&mut token); let (mut buffer_guard, mut token) = hub.buffers.write(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token); let (texture_guard, mut token) = hub.textures.write(&mut token);
let (texture_view_guard, mut token) = hub.texture_views.read(&mut token); let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token); let (sampler_guard, _) = hub.samplers.read(&mut token);
@ -497,19 +661,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
)); ));
} }
if let Some((sc_id, fbo)) = cmdbuf.used_swap_chain.take() { for sc_id in cmdbuf.used_swap_chains.drain(..) {
let sc = &mut swap_chain_guard[sc_id.value]; let sc = &mut swap_chain_guard[sc_id.value];
sc.active_submission_index = submit_index;
if sc.acquired_view_id.is_none() { if sc.acquired_view_id.is_none() {
return Err(QueueSubmitError::SwapChainOutputDropped); return Err(QueueSubmitError::SwapChainOutputDropped);
} }
// For each swapchain, we only want to have at most 1 signaled semaphore. if sc.active_submission_index != submit_index {
if sc.acquired_framebuffers.is_empty() { sc.active_submission_index = submit_index;
// Only add a signal if this is the first time for this swapchain // Only add a signal if this is the first time for this swapchain
// to be used in the submission. // to be used in the submission.
signal_swapchain_semaphores.push(sc_id.value); signal_swapchain_semaphores.push(sc_id.value);
} }
sc.acquired_framebuffers.push(fbo);
} }
// optimize the tracked states // optimize the tracked states
@ -568,6 +730,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device.temp_suspected.render_pipelines.push(id); device.temp_suspected.render_pipelines.push(id);
} }
} }
for id in cmdbuf.trackers.bundles.used() {
if !render_bundle_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.render_bundles.push(id);
}
}
// execute resource transitions // execute resource transitions
let mut transit = device.cmd_allocator.extend(cmdbuf); let mut transit = device.cmd_allocator.extend(cmdbuf);
@ -595,24 +762,26 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} }
// now prepare the GPU submission // now prepare the GPU submission
let fence = device let mut fence = device
.raw .raw
.create_fence(false) .create_fence(false)
.or(Err(DeviceError::OutOfMemory))?; .or(Err(DeviceError::OutOfMemory))?;
let submission = hal::queue::Submission { let command_buffers = pending_write_command_buffer.as_ref().into_iter().chain(
command_buffers: pending_write_command_buffer.as_ref().into_iter().chain( command_buffer_ids.iter().flat_map(|&cmd_buf_id| {
command_buffer_ids command_buffer_guard.get(cmd_buf_id).unwrap().raw.iter()
.iter() }),
.flat_map(|&cmb_id| &command_buffer_guard.get(cmb_id).unwrap().raw), );
), let signal_semaphores = signal_swapchain_semaphores
wait_semaphores: Vec::new(), .into_iter()
signal_semaphores: signal_swapchain_semaphores .map(|sc_id| &swap_chain_guard[sc_id].semaphore);
.into_iter()
.map(|sc_id| &swap_chain_guard[sc_id].semaphore),
};
unsafe { unsafe {
device.queue_group.queues[0].submit(submission, Some(&fence)); device.queue_group.queues[0].submit(
command_buffers,
iter::empty(),
signal_semaphores,
Some(&mut fence),
);
} }
fence fence
}; };
@ -638,17 +807,36 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// finally, return the command buffers to the allocator // finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids { for &cmb_id in command_buffer_ids {
if let (Some(cmd_buf), _) = hub.command_buffers.unregister(cmb_id, &mut token) { if let (Some(cmd_buf), _) = hub.command_buffers.unregister(cmb_id, &mut token) {
device.cmd_allocator.after_submit(cmd_buf, submit_index); device
.cmd_allocator
.after_submit(cmd_buf, &device.raw, submit_index);
} }
} }
callbacks callbacks
}; };
// the map callbacks should execute with nothing locked!
drop(token);
super::fire_map_callbacks(callbacks); super::fire_map_callbacks(callbacks);
Ok(()) Ok(())
} }
pub fn queue_get_timestamp_period<B: GfxBackend>(
&self,
queue_id: id::QueueId,
) -> Result<f32, InvalidQueue> {
span!(_guard, INFO, "Queue::get_timestamp_period");
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
match device_guard.get(queue_id) {
Ok(device) => Ok(device.queue_group.queues[0].timestamp_period()),
Err(_) => Err(InvalidQueue),
}
}
} }
fn get_lowest_common_denom(a: u32, b: u32) -> u32 { fn get_lowest_common_denom(a: u32, b: u32) -> u32 {

Просмотреть файл

@ -15,11 +15,11 @@ pub const FILE_NAME: &str = "trace.ron";
#[cfg(feature = "trace")] #[cfg(feature = "trace")]
pub(crate) fn new_render_bundle_encoder_descriptor<'a>( pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
label: Option<&'a str>, label: crate::Label<'a>,
context: &'a super::RenderPassContext, context: &'a super::RenderPassContext,
) -> crate::command::RenderBundleEncoderDescriptor<'a> { ) -> crate::command::RenderBundleEncoderDescriptor<'a> {
crate::command::RenderBundleEncoderDescriptor { crate::command::RenderBundleEncoderDescriptor {
label: label.map(Cow::Borrowed), label,
color_formats: Cow::Borrowed(&context.attachments.colors), color_formats: Cow::Borrowed(&context.attachments.colors),
depth_stencil_format: context.attachments.depth_stencil, depth_stencil_format: context.attachments.depth_stencil,
sample_count: context.sample_count as u32, sample_count: context.sample_count as u32,
@ -50,7 +50,7 @@ pub enum Action<'a> {
DestroySampler(id::SamplerId), DestroySampler(id::SamplerId),
CreateSwapChain(id::SwapChainId, wgt::SwapChainDescriptor), CreateSwapChain(id::SwapChainId, wgt::SwapChainDescriptor),
GetSwapChainTexture { GetSwapChainTexture {
id: Option<id::TextureViewId>, id: id::TextureViewId,
parent_id: id::SwapChainId, parent_id: id::SwapChainId,
}, },
PresentSwapChain(id::SwapChainId), PresentSwapChain(id::SwapChainId),
@ -71,19 +71,23 @@ pub enum Action<'a> {
DestroyBindGroup(id::BindGroupId), DestroyBindGroup(id::BindGroupId),
CreateShaderModule { CreateShaderModule {
id: id::ShaderModuleId, id: id::ShaderModuleId,
label: crate::Label<'a>, desc: crate::pipeline::ShaderModuleDescriptor<'a>,
data: FileName, data: FileName,
}, },
DestroyShaderModule(id::ShaderModuleId), DestroyShaderModule(id::ShaderModuleId),
CreateComputePipeline( CreateComputePipeline {
id::ComputePipelineId, id: id::ComputePipelineId,
crate::pipeline::ComputePipelineDescriptor<'a>, desc: crate::pipeline::ComputePipelineDescriptor<'a>,
), #[cfg_attr(feature = "replay", serde(default))]
implicit_context: Option<super::ImplicitPipelineContext>,
},
DestroyComputePipeline(id::ComputePipelineId), DestroyComputePipeline(id::ComputePipelineId),
CreateRenderPipeline( CreateRenderPipeline {
id::RenderPipelineId, id: id::RenderPipelineId,
crate::pipeline::RenderPipelineDescriptor<'a>, desc: crate::pipeline::RenderPipelineDescriptor<'a>,
), #[cfg_attr(feature = "replay", serde(default))]
implicit_context: Option<super::ImplicitPipelineContext>,
},
DestroyRenderPipeline(id::RenderPipelineId), DestroyRenderPipeline(id::RenderPipelineId),
CreateRenderBundle { CreateRenderBundle {
id: id::RenderBundleId, id: id::RenderBundleId,
@ -91,6 +95,11 @@ pub enum Action<'a> {
base: crate::command::BasePass<crate::command::RenderCommand>, base: crate::command::BasePass<crate::command::RenderCommand>,
}, },
DestroyRenderBundle(id::RenderBundleId), DestroyRenderBundle(id::RenderBundleId),
CreateQuerySet {
id: id::QuerySetId,
desc: wgt::QuerySetDescriptor,
},
DestroyQuerySet(id::QuerySetId),
WriteBuffer { WriteBuffer {
id: id::BufferId, id: id::BufferId,
data: FileName, data: FileName,
@ -132,6 +141,17 @@ pub enum Command {
dst: crate::command::TextureCopyView, dst: crate::command::TextureCopyView,
size: wgt::Extent3d, size: wgt::Extent3d,
}, },
WriteTimestamp {
query_set_id: id::QuerySetId,
query_index: u32,
},
ResolveQuerySet {
query_set_id: id::QuerySetId,
start_query: u32,
query_count: u32,
destination: id::BufferId,
destination_offset: wgt::BufferAddress,
},
RunComputePass { RunComputePass {
base: crate::command::BasePass<crate::command::ComputeCommand>, base: crate::command::BasePass<crate::command::ComputeCommand>,
}, },

Просмотреть файл

@ -23,6 +23,8 @@ use crate::{
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}; use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use wgt::Backend; use wgt::Backend;
use crate::id::QuerySetId;
use crate::resource::QuerySet;
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
use std::cell::Cell; use std::cell::Cell;
use std::{fmt::Debug, marker::PhantomData, ops, thread}; use std::{fmt::Debug, marker::PhantomData, ops, thread};
@ -264,6 +266,11 @@ impl<B: hal::Backend> Access<ComputePipeline<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for Device<B> {} impl<B: hal::Backend> Access<RenderPipeline<B>> for Device<B> {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for BindGroup<B> {} impl<B: hal::Backend> Access<RenderPipeline<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for ComputePipeline<B> {} impl<B: hal::Backend> Access<RenderPipeline<B>> for ComputePipeline<B> {}
impl<B: hal::Backend> Access<QuerySet<B>> for Root {}
impl<B: hal::Backend> Access<QuerySet<B>> for Device<B> {}
impl<B: hal::Backend> Access<QuerySet<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<QuerySet<B>> for RenderPipeline<B> {}
impl<B: hal::Backend> Access<QuerySet<B>> for ComputePipeline<B> {}
impl<B: hal::Backend> Access<ShaderModule<B>> for Device<B> {} impl<B: hal::Backend> Access<ShaderModule<B>> for Device<B> {}
impl<B: hal::Backend> Access<ShaderModule<B>> for BindGroupLayout<B> {} impl<B: hal::Backend> Access<ShaderModule<B>> for BindGroupLayout<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for Root {} impl<B: hal::Backend> Access<Buffer<B>> for Root {}
@ -273,6 +280,7 @@ impl<B: hal::Backend> Access<Buffer<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for CommandBuffer<B> {} impl<B: hal::Backend> Access<Buffer<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for ComputePipeline<B> {} impl<B: hal::Backend> Access<Buffer<B>> for ComputePipeline<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for RenderPipeline<B> {} impl<B: hal::Backend> Access<Buffer<B>> for RenderPipeline<B> {}
impl<B: hal::Backend> Access<Buffer<B>> for QuerySet<B> {}
impl<B: hal::Backend> Access<Texture<B>> for Root {} impl<B: hal::Backend> Access<Texture<B>> for Root {}
impl<B: hal::Backend> Access<Texture<B>> for Device<B> {} impl<B: hal::Backend> Access<Texture<B>> for Device<B> {}
impl<B: hal::Backend> Access<Texture<B>> for Buffer<B> {} impl<B: hal::Backend> Access<Texture<B>> for Buffer<B> {}
@ -294,7 +302,7 @@ thread_local! {
/// ///
/// Note: there can only be one non-borrowed `Token` alive on a thread /// Note: there can only be one non-borrowed `Token` alive on a thread
/// at a time, which is enforced by `ACTIVE_TOKEN`. /// at a time, which is enforced by `ACTIVE_TOKEN`.
pub struct Token<'a, T: 'a> { pub(crate) struct Token<'a, T: 'a> {
level: PhantomData<&'a T>, level: PhantomData<&'a T>,
} }
@ -374,6 +382,7 @@ pub trait GlobalIdentityHandlerFactory:
+ IdentityHandlerFactory<RenderBundleId> + IdentityHandlerFactory<RenderBundleId>
+ IdentityHandlerFactory<RenderPipelineId> + IdentityHandlerFactory<RenderPipelineId>
+ IdentityHandlerFactory<ComputePipelineId> + IdentityHandlerFactory<ComputePipelineId>
+ IdentityHandlerFactory<QuerySetId>
+ IdentityHandlerFactory<BufferId> + IdentityHandlerFactory<BufferId>
+ IdentityHandlerFactory<TextureId> + IdentityHandlerFactory<TextureId>
+ IdentityHandlerFactory<TextureViewId> + IdentityHandlerFactory<TextureViewId>
@ -430,60 +439,58 @@ impl<T: Resource, I: TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
} }
} }
impl<T: Resource, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> { #[must_use]
pub fn register<A: Access<T>>(&self, id: I, value: T, _token: &mut Token<A>) { pub(crate) struct FutureId<'a, I: TypedId, T> {
debug_assert_eq!(id.unzip().2, self.backend); id: I,
self.data.write().insert(id, value); data: &'a RwLock<Storage<T, I>>,
}
impl<I: TypedId + Copy, T> FutureId<'_, I, T> {
#[cfg(feature = "trace")]
pub fn id(&self) -> I {
self.id
} }
pub fn read<'a, A: Access<T>>( pub fn into_id(self) -> I {
self.id
}
pub fn assign<'a, A: Access<T>>(self, value: T, _: &'a mut Token<A>) -> Valid<I> {
self.data.write().insert(self.id, value);
Valid(self.id)
}
pub fn assign_error<'a, A: Access<T>>(self, label: &str, _: &'a mut Token<A>) -> I {
self.data.write().insert_error(self.id, label);
self.id
}
}
impl<T: Resource, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
pub(crate) fn prepare(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
) -> FutureId<I, T> {
FutureId {
id: self.identity.process(id_in, self.backend),
data: &self.data,
}
}
pub(crate) fn read<'a, A: Access<T>>(
&'a self, &'a self,
_token: &'a mut Token<A>, _token: &'a mut Token<A>,
) -> (RwLockReadGuard<'a, Storage<T, I>>, Token<'a, T>) { ) -> (RwLockReadGuard<'a, Storage<T, I>>, Token<'a, T>) {
(self.data.read(), Token::new()) (self.data.read(), Token::new())
} }
pub fn write<'a, A: Access<T>>( pub(crate) fn write<'a, A: Access<T>>(
&'a self, &'a self,
_token: &'a mut Token<A>, _token: &'a mut Token<A>,
) -> (RwLockWriteGuard<'a, Storage<T, I>>, Token<'a, T>) { ) -> (RwLockWriteGuard<'a, Storage<T, I>>, Token<'a, T>) {
(self.data.write(), Token::new()) (self.data.write(), Token::new())
} }
pub(crate) fn register_identity<A: Access<T>>(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
value: T,
token: &mut Token<A>,
) -> Valid<I> {
let id = self.identity.process(id_in, self.backend);
self.register(id, value, token);
Valid(id)
}
pub(crate) fn register_identity_locked(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
value: T,
guard: &mut Storage<T, I>,
) -> Valid<I> {
let id = self.identity.process(id_in, self.backend);
guard.insert(id, value);
Valid(id)
}
pub fn register_error<A: Access<T>>(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
label: &str,
_token: &mut Token<A>,
) -> I {
let id = self.identity.process(id_in, self.backend);
debug_assert_eq!(id.unzip().2, self.backend);
self.data.write().insert_error(id, label);
id
}
pub fn unregister_locked(&self, id: I, guard: &mut Storage<T, I>) -> Option<T> { pub fn unregister_locked(&self, id: I, guard: &mut Storage<T, I>) -> Option<T> {
let value = guard.remove(id); let value = guard.remove(id);
//Note: careful about the order here! //Note: careful about the order here!
@ -492,7 +499,7 @@ impl<T: Resource, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I
value value
} }
pub fn unregister<'a, A: Access<T>>( pub(crate) fn unregister<'a, A: Access<T>>(
&self, &self,
id: I, id: I,
_token: &'a mut Token<A>, _token: &'a mut Token<A>,
@ -504,14 +511,6 @@ impl<T: Resource, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I
(value, Token::new()) (value, Token::new())
} }
pub fn process_id(&self, id_in: <F::Filter as IdentityHandler<I>>::Input) -> I {
self.identity.process(id_in, self.backend)
}
pub fn free_id(&self, id: I) {
self.identity.free(id)
}
pub fn label_for_resource(&self, id: I) -> String { pub fn label_for_resource(&self, id: I) -> String {
let guard = self.data.read(); let guard = self.data.read();
@ -547,6 +546,7 @@ pub struct Hub<B: hal::Backend, F: GlobalIdentityHandlerFactory> {
pub render_bundles: Registry<RenderBundle, RenderBundleId, F>, pub render_bundles: Registry<RenderBundle, RenderBundleId, F>,
pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId, F>, pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId, F>,
pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId, F>, pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId, F>,
pub query_sets: Registry<QuerySet<B>, QuerySetId, F>,
pub buffers: Registry<Buffer<B>, BufferId, F>, pub buffers: Registry<Buffer<B>, BufferId, F>,
pub textures: Registry<Texture<B>, TextureId, F>, pub textures: Registry<Texture<B>, TextureId, F>,
pub texture_views: Registry<TextureView<B>, TextureViewId, F>, pub texture_views: Registry<TextureView<B>, TextureViewId, F>,
@ -567,6 +567,7 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
render_bundles: Registry::new(B::VARIANT, factory), render_bundles: Registry::new(B::VARIANT, factory),
render_pipelines: Registry::new(B::VARIANT, factory), render_pipelines: Registry::new(B::VARIANT, factory),
compute_pipelines: Registry::new(B::VARIANT, factory), compute_pipelines: Registry::new(B::VARIANT, factory),
query_sets: Registry::new(B::VARIANT, factory),
buffers: Registry::new(B::VARIANT, factory), buffers: Registry::new(B::VARIANT, factory),
textures: Registry::new(B::VARIANT, factory), textures: Registry::new(B::VARIANT, factory),
texture_views: Registry::new(B::VARIANT, factory), texture_views: Registry::new(B::VARIANT, factory),
@ -576,7 +577,10 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
} }
impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> { impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
fn clear(&self, surface_guard: &mut Storage<Surface, SurfaceId>) { //TODO: instead of having a hacky `with_adapters` parameter,
// we should have `clear_device(device_id)` that specifically destroys
// everything related to a logical device.
fn clear(&self, surface_guard: &mut Storage<Surface, SurfaceId>, with_adapters: bool) {
use crate::resource::TextureViewInner; use crate::resource::TextureViewInner;
use hal::{device::Device as _, window::PresentationSurface as _}; use hal::{device::Device as _, window::PresentationSurface as _};
@ -626,9 +630,10 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
} }
for element in self.command_buffers.data.write().map.drain(..) { for element in self.command_buffers.data.write().map.drain(..) {
if let Element::Occupied(command_buffer, _) = element { if let Element::Occupied(command_buffer, _) = element {
devices[command_buffer.device_id.value] let device = &devices[command_buffer.device_id.value];
device
.cmd_allocator .cmd_allocator
.after_submit(command_buffer, 0); .after_submit(command_buffer, &device.raw, 0);
} }
} }
for element in self.bind_groups.data.write().map.drain(..) { for element in self.bind_groups.data.write().map.drain(..) {
@ -697,11 +702,23 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
} }
} }
for element in self.query_sets.data.write().map.drain(..) {
if let Element::Occupied(query_set, _) = element {
let device = &devices[query_set.device_id.value];
unsafe {
device.raw.destroy_query_pool(query_set.raw);
}
}
}
for element in devices.map.drain(..) { for element in devices.map.drain(..) {
if let Element::Occupied(device, _) = element { if let Element::Occupied(device, _) = element {
device.dispose(); device.dispose();
} }
} }
if with_adapters {
self.adapters.data.write().map.clear();
}
} }
} }
@ -756,7 +773,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn clear_backend<B: GfxBackend>(&self, _dummy: ()) { pub fn clear_backend<B: GfxBackend>(&self, _dummy: ()) {
let mut surface_guard = self.surfaces.data.write(); let mut surface_guard = self.surfaces.data.write();
let hub = B::hub(self); let hub = B::hub(self);
hub.clear(&mut *surface_guard); // this is used for tests, which keep the adapter
hub.clear(&mut *surface_guard, false);
} }
} }
@ -769,23 +787,23 @@ impl<G: GlobalIdentityHandlerFactory> Drop for Global<G> {
// destroy hubs // destroy hubs
#[cfg(vulkan)] #[cfg(vulkan)]
{ {
self.hubs.vulkan.clear(&mut *surface_guard); self.hubs.vulkan.clear(&mut *surface_guard, true);
} }
#[cfg(metal)] #[cfg(metal)]
{ {
self.hubs.metal.clear(&mut *surface_guard); self.hubs.metal.clear(&mut *surface_guard, true);
} }
#[cfg(dx12)] #[cfg(dx12)]
{ {
self.hubs.dx12.clear(&mut *surface_guard); self.hubs.dx12.clear(&mut *surface_guard, true);
} }
#[cfg(dx11)] #[cfg(dx11)]
{ {
self.hubs.dx11.clear(&mut *surface_guard); self.hubs.dx11.clear(&mut *surface_guard, true);
} }
#[cfg(gl)] #[cfg(gl)]
{ {
self.hubs.gl.clear(&mut *surface_guard); self.hubs.gl.clear(&mut *surface_guard, true);
} }
// destroy surfaces // destroy surfaces

Просмотреть файл

@ -164,6 +164,7 @@ pub type RenderPassEncoderId = *mut crate::command::RenderPass;
pub type ComputePassEncoderId = *mut crate::command::ComputePass; pub type ComputePassEncoderId = *mut crate::command::ComputePass;
pub type RenderBundleEncoderId = *mut crate::command::RenderBundleEncoder; pub type RenderBundleEncoderId = *mut crate::command::RenderBundleEncoder;
pub type RenderBundleId = Id<crate::command::RenderBundle>; pub type RenderBundleId = Id<crate::command::RenderBundle>;
pub type QuerySetId = Id<crate::resource::QuerySet<Dummy>>;
// Swap chain // Swap chain
pub type SwapChainId = Id<crate::swap_chain::SwapChain<Dummy>>; pub type SwapChainId = Id<crate::swap_chain::SwapChain<Dummy>>;

Просмотреть файл

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{ use crate::{
backend, backend, conv,
device::{Device, DeviceDescriptor}, device::{Device, DeviceDescriptor},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token}, hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{AdapterId, DeviceId, SurfaceId, Valid}, id::{AdapterId, DeviceId, SurfaceId, Valid},
@ -13,10 +13,7 @@ use crate::{
use wgt::{Backend, BackendBit, PowerPreference, BIND_BUFFER_ALIGNMENT}; use wgt::{Backend, BackendBit, PowerPreference, BIND_BUFFER_ALIGNMENT};
use hal::{ use hal::{
adapter::{AdapterInfo as HalAdapterInfo, DeviceType as HalDeviceType, PhysicalDevice as _}, adapter::PhysicalDevice as _, queue::QueueFamily as _, window::Surface as _, Instance as _,
queue::QueueFamily as _,
window::Surface as _,
Instance as _,
}; };
use thiserror::Error; use thiserror::Error;
@ -123,6 +120,7 @@ impl crate::hub::Resource for Surface {
pub struct Adapter<B: hal::Backend> { pub struct Adapter<B: hal::Backend> {
pub(crate) raw: hal::adapter::Adapter<B>, pub(crate) raw: hal::adapter::Adapter<B>,
features: wgt::Features, features: wgt::Features,
pub(crate) private_features: PrivateFeatures,
limits: wgt::Limits, limits: wgt::Limits,
life_guard: LifeGuard, life_guard: LifeGuard,
} }
@ -132,10 +130,12 @@ impl<B: GfxBackend> Adapter<B> {
span!(_guard, INFO, "Adapter::new"); span!(_guard, INFO, "Adapter::new");
let adapter_features = raw.physical_device.features(); let adapter_features = raw.physical_device.features();
let properties = raw.physical_device.properties();
let mut features = wgt::Features::default() let mut features = wgt::Features::default()
| wgt::Features::MAPPABLE_PRIMARY_BUFFERS | wgt::Features::MAPPABLE_PRIMARY_BUFFERS
| wgt::Features::PUSH_CONSTANTS; | wgt::Features::PUSH_CONSTANTS
| wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES;
features.set( features.set(
wgt::Features::DEPTH_CLAMPING, wgt::Features::DEPTH_CLAMPING,
adapter_features.contains(hal::Features::DEPTH_CLAMP), adapter_features.contains(hal::Features::DEPTH_CLAMP),
@ -144,6 +144,14 @@ impl<B: GfxBackend> Adapter<B> {
wgt::Features::TEXTURE_COMPRESSION_BC, wgt::Features::TEXTURE_COMPRESSION_BC,
adapter_features.contains(hal::Features::FORMAT_BC), adapter_features.contains(hal::Features::FORMAT_BC),
); );
features.set(
wgt::Features::TEXTURE_COMPRESSION_ETC2,
adapter_features.contains(hal::Features::FORMAT_ETC2),
);
features.set(
wgt::Features::TEXTURE_COMPRESSION_ASTC_LDR,
adapter_features.contains(hal::Features::FORMAT_ASTC_LDR),
);
features.set( features.set(
wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY, wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY,
adapter_features.contains(hal::Features::TEXTURE_DESCRIPTOR_ARRAY), adapter_features.contains(hal::Features::TEXTURE_DESCRIPTOR_ARRAY),
@ -172,62 +180,188 @@ impl<B: GfxBackend> Adapter<B> {
wgt::Features::NON_FILL_POLYGON_MODE, wgt::Features::NON_FILL_POLYGON_MODE,
adapter_features.contains(hal::Features::NON_FILL_POLYGON_MODE), adapter_features.contains(hal::Features::NON_FILL_POLYGON_MODE),
); );
features.set(
wgt::Features::TIMESTAMP_QUERY,
properties.limits.timestamp_compute_and_graphics,
);
features.set(
wgt::Features::PIPELINE_STATISTICS_QUERY,
adapter_features.contains(hal::Features::PIPELINE_STATISTICS_QUERY),
);
features.set(
wgt::Features::SHADER_FLOAT64,
adapter_features.contains(hal::Features::SHADER_FLOAT64),
);
#[cfg(not(target_os = "ios"))] #[cfg(not(target_os = "ios"))]
//TODO: https://github.com/gfx-rs/gfx/issues/3346 //TODO: https://github.com/gfx-rs/gfx/issues/3346
features.set(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER, true); features.set(wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER, true);
let adapter_limits = raw.physical_device.limits(); let private_features = PrivateFeatures {
anisotropic_filtering: adapter_features.contains(hal::Features::SAMPLER_ANISOTROPY),
texture_d24: raw
.physical_device
.format_properties(Some(hal::format::Format::X8D24Unorm))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
texture_d24_s8: raw
.physical_device
.format_properties(Some(hal::format::Format::D24UnormS8Uint))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
};
let default_limits = wgt::Limits::default(); let default_limits = wgt::Limits::default();
// All these casts to u32 are safe as the underlying vulkan types are u32s. // All these casts to u32 are safe as the underlying vulkan types are u32s.
// If another backend provides larger limits than u32, we need to clamp them to u32::MAX. // If another backend provides larger limits than u32, we need to clamp them to u32::MAX.
// TODO: fix all gfx-hal backends to produce limits we care about, and remove .max // TODO: fix all gfx-hal backends to produce limits we care about, and remove .max
let desc_limits = &properties.limits.descriptor_limits;
let limits = wgt::Limits { let limits = wgt::Limits {
max_bind_groups: (adapter_limits.max_bound_descriptor_sets as u32) max_texture_dimension_1d: properties
.limits
.max_image_1d_size
.max(default_limits.max_texture_dimension_1d),
max_texture_dimension_2d: properties
.limits
.max_image_2d_size
.max(default_limits.max_texture_dimension_1d),
max_texture_dimension_3d: properties
.limits
.max_image_3d_size
.max(default_limits.max_texture_dimension_1d),
max_texture_array_layers: (properties.limits.max_image_array_layers as u32)
.max(default_limits.max_texture_array_layers),
max_bind_groups: (properties.limits.max_bound_descriptor_sets as u32)
.min(MAX_BIND_GROUPS as u32) .min(MAX_BIND_GROUPS as u32)
.max(default_limits.max_bind_groups), .max(default_limits.max_bind_groups),
max_dynamic_uniform_buffers_per_pipeline_layout: (adapter_limits max_dynamic_uniform_buffers_per_pipeline_layout: desc_limits
.max_descriptor_set_uniform_buffers_dynamic .max_descriptor_set_uniform_buffers_dynamic
as u32)
.max(default_limits.max_dynamic_uniform_buffers_per_pipeline_layout), .max(default_limits.max_dynamic_uniform_buffers_per_pipeline_layout),
max_dynamic_storage_buffers_per_pipeline_layout: (adapter_limits max_dynamic_storage_buffers_per_pipeline_layout: desc_limits
.max_descriptor_set_storage_buffers_dynamic .max_descriptor_set_storage_buffers_dynamic
as u32)
.max(default_limits.max_dynamic_storage_buffers_per_pipeline_layout), .max(default_limits.max_dynamic_storage_buffers_per_pipeline_layout),
max_sampled_textures_per_shader_stage: (adapter_limits max_sampled_textures_per_shader_stage: desc_limits
.max_per_stage_descriptor_sampled_images .max_per_stage_descriptor_sampled_images
as u32)
.max(default_limits.max_sampled_textures_per_shader_stage), .max(default_limits.max_sampled_textures_per_shader_stage),
max_samplers_per_shader_stage: (adapter_limits.max_per_stage_descriptor_samplers max_samplers_per_shader_stage: desc_limits
as u32) .max_per_stage_descriptor_samplers
.max(default_limits.max_samplers_per_shader_stage), .max(default_limits.max_samplers_per_shader_stage),
max_storage_buffers_per_shader_stage: (adapter_limits max_storage_buffers_per_shader_stage: desc_limits
.max_per_stage_descriptor_storage_buffers .max_per_stage_descriptor_storage_buffers
as u32)
.max(default_limits.max_storage_buffers_per_shader_stage), .max(default_limits.max_storage_buffers_per_shader_stage),
max_storage_textures_per_shader_stage: (adapter_limits max_storage_textures_per_shader_stage: desc_limits
.max_per_stage_descriptor_storage_images .max_per_stage_descriptor_storage_images
as u32)
.max(default_limits.max_storage_textures_per_shader_stage), .max(default_limits.max_storage_textures_per_shader_stage),
max_uniform_buffers_per_shader_stage: (adapter_limits max_uniform_buffers_per_shader_stage: desc_limits
.max_per_stage_descriptor_uniform_buffers .max_per_stage_descriptor_uniform_buffers
as u32)
.max(default_limits.max_uniform_buffers_per_shader_stage), .max(default_limits.max_uniform_buffers_per_shader_stage),
max_uniform_buffer_binding_size: (adapter_limits.max_uniform_buffer_range as u32) max_uniform_buffer_binding_size: (properties.limits.max_uniform_buffer_range as u32)
.max(default_limits.max_uniform_buffer_binding_size), .max(default_limits.max_uniform_buffer_binding_size),
max_push_constant_size: (adapter_limits.max_push_constants_size as u32) max_storage_buffer_binding_size: (properties.limits.max_storage_buffer_range as u32)
.max(default_limits.max_storage_buffer_binding_size),
max_vertex_buffers: (properties.limits.max_vertex_input_bindings as u32)
.max(default_limits.max_vertex_buffers),
max_vertex_attributes: (properties.limits.max_vertex_input_attributes as u32)
.max(default_limits.max_vertex_attributes),
max_vertex_buffer_array_stride: (properties.limits.max_vertex_input_binding_stride
as u32)
.max(default_limits.max_vertex_buffer_array_stride),
max_push_constant_size: (properties.limits.max_push_constants_size as u32)
.max(MIN_PUSH_CONSTANT_SIZE), // As an extension, the default is always 0, so define a separate minimum. .max(MIN_PUSH_CONSTANT_SIZE), // As an extension, the default is always 0, so define a separate minimum.
}; };
Self { Self {
raw, raw,
features, features,
private_features,
limits, limits,
life_guard: LifeGuard::new("<Adapter>"), life_guard: LifeGuard::new("<Adapter>"),
} }
} }
pub fn get_swap_chain_preferred_format(
&self,
surface: &mut Surface,
) -> Result<wgt::TextureFormat, GetSwapChainPreferredFormatError> {
span!(_guard, INFO, "Adapter::get_swap_chain_preferred_format");
let formats = {
let surface = B::get_surface_mut(surface);
let queue_family = &self.raw.queue_families[0];
if !surface.supports_queue_family(queue_family) {
return Err(GetSwapChainPreferredFormatError::UnsupportedQueueFamily);
}
surface.supported_formats(&self.raw.physical_device)
};
if let Some(formats) = formats {
// Check the four formats mentioned in the WebGPU spec:
// Bgra8UnormSrgb, Rgba8UnormSrgb, Bgra8Unorm, Rgba8Unorm
// Also, prefer sRGB over linear as it is better in
// representing perceived colors.
if formats.contains(&hal::format::Format::Bgra8Srgb) {
return Ok(wgt::TextureFormat::Bgra8UnormSrgb);
}
if formats.contains(&hal::format::Format::Rgba8Srgb) {
return Ok(wgt::TextureFormat::Rgba8UnormSrgb);
}
if formats.contains(&hal::format::Format::Bgra8Unorm) {
return Ok(wgt::TextureFormat::Bgra8Unorm);
}
if formats.contains(&hal::format::Format::Rgba8Unorm) {
return Ok(wgt::TextureFormat::Rgba8Unorm);
}
return Err(GetSwapChainPreferredFormatError::NotFound);
}
// If no formats were returned, use Bgra8UnormSrgb
Ok(wgt::TextureFormat::Bgra8UnormSrgb)
}
pub(crate) fn get_texture_format_features(
&self,
format: wgt::TextureFormat,
) -> wgt::TextureFormatFeatures {
let texture_format_properties = self
.raw
.physical_device
.format_properties(Some(conv::map_texture_format(
format,
self.private_features,
)))
.optimal_tiling;
let mut allowed_usages = format.describe().guaranteed_format_features.allowed_usages;
if texture_format_properties.contains(hal::format::ImageFeature::SAMPLED) {
allowed_usages |= wgt::TextureUsage::SAMPLED;
}
if texture_format_properties.contains(hal::format::ImageFeature::STORAGE) {
allowed_usages |= wgt::TextureUsage::STORAGE;
}
if texture_format_properties.contains(hal::format::ImageFeature::COLOR_ATTACHMENT) {
allowed_usages |= wgt::TextureUsage::RENDER_ATTACHMENT;
}
if texture_format_properties.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT) {
allowed_usages |= wgt::TextureUsage::RENDER_ATTACHMENT;
}
let mut flags = wgt::TextureFormatFeatureFlags::empty();
if texture_format_properties.contains(hal::format::ImageFeature::STORAGE_ATOMIC) {
flags |= wgt::TextureFormatFeatureFlags::STORAGE_ATOMICS;
}
if texture_format_properties.contains(hal::format::ImageFeature::STORAGE_READ_WRITE) {
flags |= wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE;
}
let filterable =
texture_format_properties.contains(hal::format::ImageFeature::SAMPLED_LINEAR);
wgt::TextureFormatFeatures {
allowed_usages,
flags,
filterable,
}
}
fn create_device( fn create_device(
&self, &self,
self_id: AdapterId, self_id: AdapterId,
@ -270,6 +404,25 @@ impl<B: GfxBackend> Adapter<B> {
} }
// Features // Features
enabled_features.set(
hal::Features::DEPTH_CLAMP,
desc.features.contains(wgt::Features::DEPTH_CLAMPING),
);
enabled_features.set(
hal::Features::FORMAT_BC,
desc.features
.contains(wgt::Features::TEXTURE_COMPRESSION_BC),
);
enabled_features.set(
hal::Features::FORMAT_ETC2,
desc.features
.contains(wgt::Features::TEXTURE_COMPRESSION_ETC2),
);
enabled_features.set(
hal::Features::FORMAT_ASTC_LDR,
desc.features
.contains(wgt::Features::TEXTURE_COMPRESSION_ASTC_LDR),
);
enabled_features.set( enabled_features.set(
hal::Features::TEXTURE_DESCRIPTOR_ARRAY, hal::Features::TEXTURE_DESCRIPTOR_ARRAY,
desc.features desc.features
@ -302,6 +455,15 @@ impl<B: GfxBackend> Adapter<B> {
hal::Features::NON_FILL_POLYGON_MODE, hal::Features::NON_FILL_POLYGON_MODE,
desc.features.contains(wgt::Features::NON_FILL_POLYGON_MODE), desc.features.contains(wgt::Features::NON_FILL_POLYGON_MODE),
); );
enabled_features.set(
hal::Features::PIPELINE_STATISTICS_QUERY,
desc.features
.contains(wgt::Features::PIPELINE_STATISTICS_QUERY),
);
enabled_features.set(
hal::Features::SHADER_FLOAT64,
desc.features.contains(wgt::Features::SHADER_FLOAT64),
);
let family = self let family = self
.raw .raw
@ -309,6 +471,7 @@ impl<B: GfxBackend> Adapter<B> {
.iter() .iter()
.find(|family| family.queue_type().supports_graphics()) .find(|family| family.queue_type().supports_graphics())
.ok_or(RequestDeviceError::NoGraphicsQueue)?; .ok_or(RequestDeviceError::NoGraphicsQueue)?;
let mut gpu = let mut gpu =
unsafe { phd.open(&[(family, &[1.0])], enabled_features) }.map_err(|err| { unsafe { phd.open(&[(family, &[1.0])], enabled_features) }.map_err(|err| {
use hal::device::CreationError::*; use hal::device::CreationError::*;
@ -324,7 +487,7 @@ impl<B: GfxBackend> Adapter<B> {
//TODO //TODO
} }
let limits = phd.limits(); let limits = phd.properties().limits;
assert_eq!( assert_eq!(
0, 0,
BIND_BUFFER_ALIGNMENT % limits.min_storage_buffer_offset_alignment, BIND_BUFFER_ALIGNMENT % limits.min_storage_buffer_offset_alignment,
@ -340,21 +503,6 @@ impl<B: GfxBackend> Adapter<B> {
} }
let mem_props = phd.memory_properties(); let mem_props = phd.memory_properties();
if !desc.shader_validation {
tracing::warn!("Shader validation is disabled");
}
let private_features = PrivateFeatures {
shader_validation: desc.shader_validation,
anisotropic_filtering: enabled_features.contains(hal::Features::SAMPLER_ANISOTROPY),
texture_d24: phd
.format_properties(Some(hal::format::Format::X8D24Unorm))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
texture_d24_s8: phd
.format_properties(Some(hal::format::Format::D24UnormS8Uint))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
};
Device::new( Device::new(
gpu.device, gpu.device,
@ -365,7 +513,7 @@ impl<B: GfxBackend> Adapter<B> {
gpu.queue_groups.swap_remove(0), gpu.queue_groups.swap_remove(0),
mem_props, mem_props,
limits, limits,
private_features, self.private_features,
desc, desc,
trace_path, trace_path,
) )
@ -381,40 +529,16 @@ impl<B: hal::Backend> crate::hub::Resource for Adapter<B> {
} }
} }
/// Metadata about a backend adapter. #[derive(Clone, Debug, Error)]
#[derive(Clone, Debug, PartialEq)] pub enum GetSwapChainPreferredFormatError {
#[cfg_attr(feature = "trace", derive(serde::Serialize))] #[error("no suitable format found")]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))] NotFound,
pub struct AdapterInfo { #[error("invalid adapter")]
/// Adapter name InvalidAdapter,
pub name: String, #[error("invalid surface")]
/// Vendor PCI id of the adapter InvalidSurface,
pub vendor: usize, #[error("surface does not support the adapter's queue family")]
/// PCI id of the adapter UnsupportedQueueFamily,
pub device: usize,
/// Type of device
pub device_type: DeviceType,
/// Backend used for device
pub backend: Backend,
}
impl AdapterInfo {
fn from_gfx(adapter_info: HalAdapterInfo, backend: Backend) -> Self {
let HalAdapterInfo {
name,
vendor,
device,
device_type,
} = adapter_info;
Self {
name,
vendor,
device,
device_type: device_type.into(),
backend,
}
}
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
@ -436,36 +560,6 @@ pub enum RequestDeviceError {
UnsupportedFeature(wgt::Features), UnsupportedFeature(wgt::Features),
} }
/// Supported physical device types.
#[repr(u8)]
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum DeviceType {
/// Other.
Other,
/// Integrated GPU with shared CPU/GPU memory.
IntegratedGpu,
/// Discrete GPU with separate CPU/GPU memory.
DiscreteGpu,
/// Virtual / Hosted.
VirtualGpu,
/// Cpu / Software Rendering.
Cpu,
}
impl From<HalDeviceType> for DeviceType {
fn from(device_type: HalDeviceType) -> Self {
match device_type {
HalDeviceType::Other => Self::Other,
HalDeviceType::IntegratedGpu => Self::IntegratedGpu,
HalDeviceType::DiscreteGpu => Self::DiscreteGpu,
HalDeviceType::VirtualGpu => Self::VirtualGpu,
HalDeviceType::Cpu => Self::Cpu,
}
}
}
pub enum AdapterInputs<'a, I> { pub enum AdapterInputs<'a, I> {
IdSet(&'a [I], fn(&I) -> Backend), IdSet(&'a [I], fn(&I) -> Backend),
Mask(BackendBit, fn(Backend) -> I), Mask(BackendBit, fn(Backend) -> I),
@ -486,8 +580,8 @@ impl<I: Clone> AdapterInputs<'_, I> {
} }
} }
#[error("adapter is invalid")]
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
#[error("adapter is invalid")]
pub struct InvalidAdapter; pub struct InvalidAdapter;
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
@ -533,7 +627,29 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}; };
let mut token = Token::root(); let mut token = Token::root();
let id = self.surfaces.register_identity(id_in, surface, &mut token); let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
id.0
}
#[cfg(metal)]
pub fn instance_create_surface_metal(
&self,
layer: *mut std::ffi::c_void,
id_in: Input<G, SurfaceId>,
) -> SurfaceId {
span!(_guard, INFO, "Instance::instance_create_surface_metal");
let surface =
Surface {
#[cfg(feature = "vulkan-portability")]
vulkan: None, //TODO: create_surface_from_layer ?
metal: self.instance.metal.as_ref().map(|inst| {
inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) })
}),
};
let mut token = Token::root();
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
id.0 id.0
} }
@ -559,11 +675,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
for raw in inst.enumerate_adapters() { for raw in inst.enumerate_adapters() {
let adapter = Adapter::new(raw); let adapter = Adapter::new(raw);
tracing::info!("Adapter {} {:?}", backend_info, adapter.raw.info); tracing::info!("Adapter {} {:?}", backend_info, adapter.raw.info);
let id = hub.adapters.register_identity( let id = hub.adapters
id_backend.clone(), .prepare(id_backend.clone())
adapter, .assign(adapter, &mut token);
&mut token,
);
adapters.push(id.0); adapters.push(id.0);
} }
} }
@ -674,7 +788,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Err(RequestAdapterError::NotFound); return Err(RequestAdapterError::NotFound);
} }
let (mut integrated, mut discrete, mut virt, mut other) = (None, None, None, None); let (mut integrated, mut discrete, mut virt, mut cpu, mut other) =
(None, None, None, None, None);
for (i, ty) in device_types.into_iter().enumerate() { for (i, ty) in device_types.into_iter().enumerate() {
match ty { match ty {
@ -687,15 +802,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::adapter::DeviceType::VirtualGpu => { hal::adapter::DeviceType::VirtualGpu => {
virt = virt.or(Some(i)); virt = virt.or(Some(i));
} }
_ => { hal::adapter::DeviceType::Cpu => {
cpu = cpu.or(Some(i));
}
hal::adapter::DeviceType::Other => {
other = other.or(Some(i)); other = other.or(Some(i));
} }
} }
} }
let preferred_gpu = match desc.power_preference { let preferred_gpu = match desc.power_preference {
PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt), PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt).or(cpu),
PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt), PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt).or(cpu),
}; };
let mut selected = preferred_gpu.unwrap_or(0); let mut selected = preferred_gpu.unwrap_or(0);
@ -705,11 +823,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if selected < adapters_backend.len() { if selected < adapters_backend.len() {
let adapter = Adapter::new(adapters_backend.swap_remove(selected)); let adapter = Adapter::new(adapters_backend.swap_remove(selected));
tracing::info!("Adapter {} {:?}", info_adapter, adapter.raw.info); tracing::info!("Adapter {} {:?}", info_adapter, adapter.raw.info);
let id = backend_hub(self).adapters.register_identity( let id = backend_hub(self).adapters
id_backend.take().unwrap(), .prepare(id_backend.take().unwrap())
adapter, .assign(adapter, &mut token);
&mut token,
);
return Ok(id.0); return Ok(id.0);
} }
selected -= adapters_backend.len(); selected -= adapters_backend.len();
@ -724,7 +840,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
#[cfg(dx11)] #[cfg(dx11)]
map(("Dx11", &mut id_dx11, adapters_dx11, backend::Dx11::hub)), map(("Dx11", &mut id_dx11, adapters_dx11, backend::Dx11::hub)),
#[cfg(gl)] #[cfg(gl)]
map(("GL", &mut id_dx11, adapters_gl, backend::Gl::hub)), map(("GL", &mut id_gl, adapters_gl, backend::Gl::hub)),
} }
let _ = ( let _ = (
@ -742,7 +858,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn adapter_get_info<B: GfxBackend>( pub fn adapter_get_info<B: GfxBackend>(
&self, &self,
adapter_id: AdapterId, adapter_id: AdapterId,
) -> Result<AdapterInfo, InvalidAdapter> { ) -> Result<wgt::AdapterInfo, InvalidAdapter> {
span!(_guard, INFO, "Adapter::get_info"); span!(_guard, INFO, "Adapter::get_info");
let hub = B::hub(self); let hub = B::hub(self);
@ -750,7 +866,23 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (adapter_guard, _) = hub.adapters.read(&mut token); let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard adapter_guard
.get(adapter_id) .get(adapter_id)
.map(|adapter| AdapterInfo::from_gfx(adapter.raw.info.clone(), adapter_id.backend())) .map(|adapter| conv::map_adapter_info(adapter.raw.info.clone(), adapter_id.backend()))
.map_err(|_| InvalidAdapter)
}
pub fn adapter_get_texture_format_features<B: GfxBackend>(
&self,
adapter_id: AdapterId,
format: wgt::TextureFormat,
) -> Result<wgt::TextureFormatFeatures, InvalidAdapter> {
span!(_guard, INFO, "Adapter::get_texture_format_features");
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.get_texture_format_features(format))
.map_err(|_| InvalidAdapter) .map_err(|_| InvalidAdapter)
} }
@ -791,16 +923,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root(); let mut token = Token::root();
let (mut adapter_guard, _) = hub.adapters.write(&mut token); let (mut adapter_guard, _) = hub.adapters.write(&mut token);
match adapter_guard.get_mut(adapter_id) { let free = match adapter_guard.get_mut(adapter_id) {
Ok(adapter) => { Ok(adapter) => adapter.life_guard.ref_count.take().unwrap().load() == 1,
if adapter.life_guard.ref_count.take().unwrap().load() == 1 { Err(_) => true,
hub.adapters };
.unregister_locked(adapter_id, &mut *adapter_guard); if free {
} hub.adapters
} .unregister_locked(adapter_id, &mut *adapter_guard);
Err(_) => {
hub.adapters.free_id(adapter_id);
}
} }
} }
} }
@ -817,6 +946,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self); let hub = B::hub(self);
let mut token = Token::root(); let mut token = Token::root();
let fid = hub.devices.prepare(id_in);
let error = loop { let error = loop {
let (adapter_guard, mut token) = hub.adapters.read(&mut token); let (adapter_guard, mut token) = hub.adapters.read(&mut token);
@ -828,13 +958,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Ok(device) => device, Ok(device) => device,
Err(e) => break e, Err(e) => break e,
}; };
let id = hub.devices.register_identity(id_in, device, &mut token); let id = fid.assign(device, &mut token);
return (id.0, None); return (id.0, None);
}; };
let id = hub let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
.devices
.register_error(id_in, desc.label.borrow_or_default(), &mut token);
(id, Some(error)) (id, Some(error))
} }
} }

Просмотреть файл

@ -36,6 +36,7 @@ pub mod device;
pub mod hub; pub mod hub;
pub mod id; pub mod id;
pub mod instance; pub mod instance;
mod memory_init_tracker;
pub mod pipeline; pub mod pipeline;
pub mod resource; pub mod resource;
pub mod swap_chain; pub mod swap_chain;
@ -219,7 +220,6 @@ struct Stored<T> {
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
struct PrivateFeatures { struct PrivateFeatures {
shader_validation: bool,
anisotropic_filtering: bool, anisotropic_filtering: bool,
texture_d24: bool, texture_d24: bool,
texture_d24_s8: bool, texture_d24_s8: bool,
@ -231,15 +231,15 @@ macro_rules! gfx_select {
// Note: For some reason the cfg aliases defined in build.rs don't succesfully apply in this // Note: For some reason the cfg aliases defined in build.rs don't succesfully apply in this
// macro so we must specify their equivalents manually // macro so we must specify their equivalents manually
match $id.backend() { match $id.backend() {
#[cfg(any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan"))] #[cfg(all(not(target_arch = "wasm32"), any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan")))]
wgt::Backend::Vulkan => $global.$method::<$crate::backend::Vulkan>( $($param),* ), wgt::Backend::Vulkan => $global.$method::<$crate::backend::Vulkan>( $($param),* ),
#[cfg(any(target_os = "ios", target_os = "macos"))] #[cfg(all(not(target_arch = "wasm32"), any(target_os = "ios", target_os = "macos")))]
wgt::Backend::Metal => $global.$method::<$crate::backend::Metal>( $($param),* ), wgt::Backend::Metal => $global.$method::<$crate::backend::Metal>( $($param),* ),
#[cfg(windows)] #[cfg(all(not(target_arch = "wasm32"), windows))]
wgt::Backend::Dx12 => $global.$method::<$crate::backend::Dx12>( $($param),* ), wgt::Backend::Dx12 => $global.$method::<$crate::backend::Dx12>( $($param),* ),
#[cfg(windows)] #[cfg(all(not(target_arch = "wasm32"), windows))]
wgt::Backend::Dx11 => $global.$method::<$crate::backend::Dx11>( $($param),* ), wgt::Backend::Dx11 => $global.$method::<$crate::backend::Dx11>( $($param),* ),
//#[cfg(all(unix, not(any(target_os = "ios", target_os = "macos"))))] //#[cfg(any(target_arch = "wasm32", all(unix, not(any(target_os = "ios", target_os = "macos")))))]
//wgt::Backend::Gl => $global.$method::<$crate::backend::Gl>( $($param),+ ), //wgt::Backend::Gl => $global.$method::<$crate::backend::Gl>( $($param),+ ),
other => panic!("Unexpected backend {:?}", other), other => panic!("Unexpected backend {:?}", other),
} }

Просмотреть файл

@ -0,0 +1,283 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::ops::Range;
#[derive(Debug, Clone, Copy)]
pub(crate) enum MemoryInitKind {
// The memory range is going to be written by an already initialized source, thus doesn't need extra attention other than marking as initialized.
ImplicitlyInitialized,
// The memory range is going to be read, therefore needs to ensure prior initialization.
NeedsInitializedMemory,
}
#[derive(Debug, Clone)]
pub(crate) struct MemoryInitTrackerAction<ResourceId> {
pub(crate) id: ResourceId,
pub(crate) range: Range<wgt::BufferAddress>,
pub(crate) kind: MemoryInitKind,
}
/// Tracks initialization status of a linear range from 0..size
#[derive(Debug)]
pub(crate) struct MemoryInitTracker {
// Ordered, non overlapping list of all uninitialized ranges.
uninitialized_ranges: Vec<Range<wgt::BufferAddress>>,
}
pub(crate) struct MemoryInitTrackerDrain<'a> {
uninitialized_ranges: &'a mut Vec<Range<wgt::BufferAddress>>,
drain_range: Range<wgt::BufferAddress>,
first_index: usize,
next_index: usize,
}
impl<'a> Iterator for MemoryInitTrackerDrain<'a> {
type Item = Range<wgt::BufferAddress>;
fn next(&mut self) -> Option<Self::Item> {
if let Some(r) = self
.uninitialized_ranges
.get(self.next_index)
.and_then(|range| {
if range.start < self.drain_range.end {
Some(range.clone())
} else {
None
}
})
{
self.next_index += 1;
Some(r.start.max(self.drain_range.start)..r.end.min(self.drain_range.end))
} else {
let num_affected = self.next_index - self.first_index;
if num_affected == 0 {
return None;
}
let first_range = &mut self.uninitialized_ranges[self.first_index];
// Split one "big" uninitialized range?
if num_affected == 1
&& first_range.start < self.drain_range.start
&& first_range.end > self.drain_range.end
{
let old_start = first_range.start;
first_range.start = self.drain_range.end;
self.uninitialized_ranges
.insert(self.first_index, old_start..self.drain_range.start);
}
// Adjust border ranges and delete everything in-between.
else {
let remove_start = if first_range.start >= self.drain_range.start {
self.first_index
} else {
first_range.end = self.drain_range.start;
self.first_index + 1
};
let last_range = &mut self.uninitialized_ranges[self.next_index - 1];
let remove_end = if last_range.end <= self.drain_range.end {
self.next_index
} else {
last_range.start = self.drain_range.end;
self.next_index - 1
};
self.uninitialized_ranges.drain(remove_start..remove_end);
}
None
}
}
}
impl MemoryInitTracker {
pub(crate) fn new(size: wgt::BufferAddress) -> Self {
Self {
uninitialized_ranges: vec![0..size],
}
}
// Search smallest range.end which is bigger than bound in O(log n) (with n being number of uninitialized ranges)
fn lower_bound(&self, bound: wgt::BufferAddress) -> usize {
// This is equivalent to, except that it may return an out of bounds index instead of
//self.uninitialized_ranges.iter().position(|r| r.end > bound)
// In future Rust versions this operation can be done with partition_point
// See https://github.com/rust-lang/rust/pull/73577/
let mut left = 0;
let mut right = self.uninitialized_ranges.len();
while left != right {
let mid = left + (right - left) / 2;
let value = unsafe { self.uninitialized_ranges.get_unchecked(mid) };
if value.end <= bound {
left = mid + 1;
} else {
right = mid;
}
}
left
}
// Checks if there's any uninitialized ranges within a query.
// If there are any, the range returned a the subrange of the query_range that contains all these uninitialized regions.
// Returned range may be larger than necessary (tradeoff for making this function O(log n))
pub(crate) fn check(
&self,
query_range: Range<wgt::BufferAddress>,
) -> Option<Range<wgt::BufferAddress>> {
let index = self.lower_bound(query_range.start);
self.uninitialized_ranges
.get(index)
.map(|start_range| {
if start_range.start < query_range.end {
let start = start_range.start.max(query_range.start);
match self.uninitialized_ranges.get(index + 1) {
Some(next_range) => {
if next_range.start < query_range.end {
// Would need to keep iterating for more accurate upper bound. Don't do that here.
Some(start..query_range.end)
} else {
Some(start..start_range.end.min(query_range.end))
}
}
None => Some(start..start_range.end.min(query_range.end)),
}
} else {
None
}
})
.flatten()
}
// Drains uninitialized ranges in a query range.
#[must_use]
pub(crate) fn drain<'a>(
&'a mut self,
drain_range: Range<wgt::BufferAddress>,
) -> MemoryInitTrackerDrain<'a> {
let index = self.lower_bound(drain_range.start);
MemoryInitTrackerDrain {
drain_range,
uninitialized_ranges: &mut self.uninitialized_ranges,
first_index: index,
next_index: index,
}
}
// Clears uninitialized ranges in a query range.
pub(crate) fn clear(&mut self, range: Range<wgt::BufferAddress>) {
self.drain(range).for_each(drop);
}
}
#[cfg(test)]
mod test {
use super::MemoryInitTracker;
use std::ops::Range;
#[test]
fn check_for_newly_created_tracker() {
let tracker = MemoryInitTracker::new(10);
assert_eq!(tracker.check(0..10), Some(0..10));
assert_eq!(tracker.check(0..3), Some(0..3));
assert_eq!(tracker.check(3..4), Some(3..4));
assert_eq!(tracker.check(4..10), Some(4..10));
}
#[test]
fn check_for_cleared_tracker() {
let mut tracker = MemoryInitTracker::new(10);
tracker.clear(0..10);
assert_eq!(tracker.check(0..10), None);
assert_eq!(tracker.check(0..3), None);
assert_eq!(tracker.check(3..4), None);
assert_eq!(tracker.check(4..10), None);
}
#[test]
fn check_for_partially_filled_tracker() {
let mut tracker = MemoryInitTracker::new(25);
// Two regions of uninitialized memory
tracker.clear(0..5);
tracker.clear(10..15);
tracker.clear(20..25);
assert_eq!(tracker.check(0..25), Some(5..25)); // entire range
assert_eq!(tracker.check(0..5), None); // left non-overlapping
assert_eq!(tracker.check(3..8), Some(5..8)); // left overlapping region
assert_eq!(tracker.check(3..17), Some(5..17)); // left overlapping region + contained region
assert_eq!(tracker.check(8..22), Some(8..22)); // right overlapping region + contained region (yes, doesn't fix range end!)
assert_eq!(tracker.check(17..22), Some(17..20)); // right overlapping region
assert_eq!(tracker.check(20..25), None); // right non-overlapping
}
#[test]
fn clear_already_cleared() {
let mut tracker = MemoryInitTracker::new(30);
tracker.clear(10..20);
// Overlapping with non-cleared
tracker.clear(5..15); // Left overlap
tracker.clear(15..25); // Right overlap
tracker.clear(0..30); // Inner overlap
// Clear fully cleared
tracker.clear(0..30);
assert_eq!(tracker.check(0..30), None);
}
#[test]
fn drain_never_returns_ranges_twice_for_same_range() {
let mut tracker = MemoryInitTracker::new(19);
assert_eq!(tracker.drain(0..19).count(), 1);
assert_eq!(tracker.drain(0..19).count(), 0);
let mut tracker = MemoryInitTracker::new(17);
assert_eq!(tracker.drain(5..8).count(), 1);
assert_eq!(tracker.drain(5..8).count(), 0);
assert_eq!(tracker.drain(1..3).count(), 1);
assert_eq!(tracker.drain(1..3).count(), 0);
assert_eq!(tracker.drain(7..13).count(), 1);
assert_eq!(tracker.drain(7..13).count(), 0);
}
#[test]
fn drain_splits_ranges_correctly() {
let mut tracker = MemoryInitTracker::new(1337);
assert_eq!(
tracker
.drain(21..42)
.collect::<Vec<Range<wgt::BufferAddress>>>(),
vec![21..42]
);
assert_eq!(
tracker
.drain(900..1000)
.collect::<Vec<Range<wgt::BufferAddress>>>(),
vec![900..1000]
);
// Splitted ranges.
assert_eq!(
tracker
.drain(5..1003)
.collect::<Vec<Range<wgt::BufferAddress>>>(),
vec![5..21, 42..900, 1000..1003]
);
assert_eq!(
tracker
.drain(0..1337)
.collect::<Vec<Range<wgt::BufferAddress>>>(),
vec![0..5, 1003..1337]
);
}
}

Просмотреть файл

@ -7,37 +7,32 @@ use crate::{
device::{DeviceError, RenderPassContext}, device::{DeviceError, RenderPassContext},
hub::Resource, hub::Resource,
id::{DeviceId, PipelineLayoutId, ShaderModuleId}, id::{DeviceId, PipelineLayoutId, ShaderModuleId},
validation::StageError, validation, Label, LifeGuard, Stored,
Label, LifeGuard, Stored,
}; };
use std::borrow::Cow; use std::borrow::Cow;
use thiserror::Error; use thiserror::Error;
use wgt::{BufferAddress, IndexFormat, InputStepMode};
#[derive(Debug)] #[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum ShaderModuleSource<'a> { pub enum ShaderModuleSource<'a> {
SpirV(Cow<'a, [u32]>), SpirV(Cow<'a, [u32]>),
Wgsl(Cow<'a, str>), Wgsl(Cow<'a, str>),
// Unable to serialize with `naga::Module` in here: Naga(naga::Module),
// requires naga serialization feature.
//Naga(naga::Module),
} }
#[derive(Debug)] #[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))] #[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))] #[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ShaderModuleDescriptor<'a> { pub struct ShaderModuleDescriptor<'a> {
pub label: Label<'a>, pub label: Label<'a>,
pub source: ShaderModuleSource<'a>, #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
pub flags: wgt::ShaderFlags,
} }
#[derive(Debug)] #[derive(Debug)]
pub struct ShaderModule<B: hal::Backend> { pub struct ShaderModule<B: hal::Backend> {
pub(crate) raw: B::ShaderModule, pub(crate) raw: B::ShaderModule,
pub(crate) device_id: Stored<DeviceId>, pub(crate) device_id: Stored<DeviceId>,
pub(crate) module: Option<naga::Module>, pub(crate) interface: Option<validation::Interface>,
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
pub(crate) label: String, pub(crate) label: String,
} }
@ -59,10 +54,14 @@ impl<B: hal::Backend> Resource for ShaderModule<B> {
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum CreateShaderModuleError { pub enum CreateShaderModuleError {
#[error("Failed to parse WGSL")]
Parsing,
#[error(transparent)] #[error(transparent)]
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error(transparent)] #[error(transparent)]
Validation(#[from] naga::proc::ValidationError), Validation(#[from] naga::proc::ValidationError),
#[error("missing required device features {0:?}")]
MissingFeature(wgt::Features),
} }
/// Describes a programmable pipeline stage. /// Describes a programmable pipeline stage.
@ -101,7 +100,7 @@ pub struct ComputePipelineDescriptor<'a> {
/// The layout of bind groups for this pipeline. /// The layout of bind groups for this pipeline.
pub layout: Option<PipelineLayoutId>, pub layout: Option<PipelineLayoutId>,
/// The compiled compute stage and its entry point. /// The compiled compute stage and its entry point.
pub compute_stage: ProgrammableStageDescriptor<'a>, pub stage: ProgrammableStageDescriptor<'a>,
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
@ -113,7 +112,7 @@ pub enum CreateComputePipelineError {
#[error("unable to derive an implicit layout")] #[error("unable to derive an implicit layout")]
Implicit(#[from] ImplicitLayoutError), Implicit(#[from] ImplicitLayoutError),
#[error(transparent)] #[error(transparent)]
Stage(StageError), Stage(validation::StageError),
} }
#[derive(Debug)] #[derive(Debug)]
@ -136,24 +135,35 @@ impl<B: hal::Backend> Resource for ComputePipeline<B> {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))] #[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))] #[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct VertexBufferDescriptor<'a> { pub struct VertexBufferLayout<'a> {
/// The stride, in bytes, between elements of this buffer. /// The stride, in bytes, between elements of this buffer.
pub stride: BufferAddress, pub array_stride: wgt::BufferAddress,
/// How often this vertex buffer is "stepped" forward. /// How often this vertex buffer is "stepped" forward.
pub step_mode: InputStepMode, pub step_mode: wgt::InputStepMode,
/// The list of attributes which comprise a single vertex. /// The list of attributes which comprise a single vertex.
pub attributes: Cow<'a, [wgt::VertexAttributeDescriptor]>, pub attributes: Cow<'a, [wgt::VertexAttribute]>,
} }
/// Describes vertex input state for a render pipeline. /// Describes the vertex process in a render pipeline.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))] #[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))] #[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct VertexStateDescriptor<'a> { pub struct VertexState<'a> {
/// The format of any index buffers used with this pipeline. /// The compiled vertex stage and its entry point.
pub index_format: IndexFormat, pub stage: ProgrammableStageDescriptor<'a>,
/// The format of any vertex buffers used with this pipeline. /// The format of any vertex buffers used with this pipeline.
pub vertex_buffers: Cow<'a, [VertexBufferDescriptor<'a>]>, pub buffers: Cow<'a, [VertexBufferLayout<'a>]>,
}
/// Describes fragment processing in a render pipeline.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct FragmentState<'a> {
/// The compiled fragment stage and its entry point.
pub stage: ProgrammableStageDescriptor<'a>,
/// The effect of draw calls on the color aspect of the output target.
pub targets: Cow<'a, [wgt::ColorTargetState]>,
} }
/// Describes a render (graphics) pipeline. /// Describes a render (graphics) pipeline.
@ -164,40 +174,26 @@ pub struct RenderPipelineDescriptor<'a> {
pub label: Label<'a>, pub label: Label<'a>,
/// The layout of bind groups for this pipeline. /// The layout of bind groups for this pipeline.
pub layout: Option<PipelineLayoutId>, pub layout: Option<PipelineLayoutId>,
/// The compiled vertex stage and its entry point. /// The vertex processing state for this pipeline.
pub vertex_stage: ProgrammableStageDescriptor<'a>, pub vertex: VertexState<'a>,
/// The compiled fragment stage and its entry point, if any. /// The properties of the pipeline at the primitive assembly and rasterization level.
pub fragment_stage: Option<ProgrammableStageDescriptor<'a>>, #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
/// The rasterization process for this pipeline. pub primitive: wgt::PrimitiveState,
pub rasterization_state: Option<wgt::RasterizationStateDescriptor>,
/// The primitive topology used to interpret vertices.
pub primitive_topology: wgt::PrimitiveTopology,
/// The effect of draw calls on the color aspect of the output target.
pub color_states: Cow<'a, [wgt::ColorStateDescriptor]>,
/// The effect of draw calls on the depth and stencil aspects of the output target, if any. /// The effect of draw calls on the depth and stencil aspects of the output target, if any.
pub depth_stencil_state: Option<wgt::DepthStencilStateDescriptor>, #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
/// The vertex input state for this pipeline. pub depth_stencil: Option<wgt::DepthStencilState>,
pub vertex_state: VertexStateDescriptor<'a>, /// The multi-sampling properties of the pipeline.
/// The number of samples calculated per pixel (for MSAA). For non-multisampled textures, #[cfg_attr(any(feature = "replay", feature = "trace"), serde(default))]
/// this should be `1` pub multisample: wgt::MultisampleState,
pub sample_count: u32, /// The fragment processing state for this pipeline.
/// Bitmask that restricts the samples of a pixel modified by this pipeline. All samples pub fragment: Option<FragmentState<'a>>,
/// can be enabled using the value `!0`
pub sample_mask: u32,
/// When enabled, produces another sample mask per pixel based on the alpha output value, that
/// is ANDed with the sample_mask and the primitive coverage to restrict the set of samples
/// affected by a primitive.
///
/// The implicit mask produced for alpha of zero is guaranteed to be zero, and for alpha of one
/// is guaranteed to be all 1-s.
pub alpha_to_coverage_enabled: bool,
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum CreateRenderPipelineError { pub enum CreateRenderPipelineError {
#[error(transparent)] #[error(transparent)]
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error("pipelie layout is invalid")] #[error("pipeline layout is invalid")]
InvalidLayout, InvalidLayout,
#[error("unable to derive an implicit layout")] #[error("unable to derive an implicit layout")]
Implicit(#[from] ImplicitLayoutError), Implicit(#[from] ImplicitLayoutError),
@ -207,12 +203,26 @@ pub enum CreateRenderPipelineError {
IncompatibleOutputFormat { index: u8 }, IncompatibleOutputFormat { index: u8 },
#[error("invalid sample count {0}")] #[error("invalid sample count {0}")]
InvalidSampleCount(u32), InvalidSampleCount(u32),
#[error("the number of vertex buffers {given} exceeds the limit {limit}")]
TooManyVertexBuffers { given: u32, limit: u32 },
#[error("the total number of vertex attributes {given} exceeds the limit {limit}")]
TooManyVertexAttributes { given: u32, limit: u32 },
#[error("vertex buffer {index} stride {given} exceeds the limit {limit}")]
VertexStrideTooLarge { index: u32, given: u32, limit: u32 },
#[error("vertex buffer {index} stride {stride} does not respect `VERTEX_STRIDE_ALIGNMENT`")] #[error("vertex buffer {index} stride {stride} does not respect `VERTEX_STRIDE_ALIGNMENT`")]
UnalignedVertexStride { index: u32, stride: BufferAddress }, UnalignedVertexStride {
index: u32,
stride: wgt::BufferAddress,
},
#[error("vertex attribute at location {location} has invalid offset {offset}")] #[error("vertex attribute at location {location} has invalid offset {offset}")]
InvalidVertexAttributeOffset { InvalidVertexAttributeOffset {
location: wgt::ShaderLocation, location: wgt::ShaderLocation,
offset: BufferAddress, offset: wgt::BufferAddress,
},
#[error("strip index format was not set to None but to {strip_index_format:?} while using the non-strip topology {topology:?}")]
StripIndexFormatForNonStripTopology {
strip_index_format: Option<wgt::IndexFormat>,
topology: wgt::PrimitiveTopology,
}, },
#[error("missing required device features {0:?}")] #[error("missing required device features {0:?}")]
MissingFeature(wgt::Features), MissingFeature(wgt::Features),
@ -220,7 +230,7 @@ pub enum CreateRenderPipelineError {
Stage { Stage {
flag: wgt::ShaderStage, flag: wgt::ShaderStage,
#[source] #[source]
error: StageError, error: validation::StageError,
}, },
} }
@ -240,8 +250,8 @@ pub struct RenderPipeline<B: hal::Backend> {
pub(crate) device_id: Stored<DeviceId>, pub(crate) device_id: Stored<DeviceId>,
pub(crate) pass_context: RenderPassContext, pub(crate) pass_context: RenderPassContext,
pub(crate) flags: PipelineFlags, pub(crate) flags: PipelineFlags,
pub(crate) index_format: IndexFormat, pub(crate) strip_index_format: Option<wgt::IndexFormat>,
pub(crate) vertex_strides: Vec<(BufferAddress, InputStepMode)>, pub(crate) vertex_strides: Vec<(wgt::BufferAddress, wgt::InputStepMode)>,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
} }

Просмотреть файл

@ -6,6 +6,7 @@ use crate::{
device::{alloc::MemoryBlock, DeviceError, HostMap}, device::{alloc::MemoryBlock, DeviceError, HostMap},
hub::Resource, hub::Resource,
id::{DeviceId, SwapChainId, TextureId}, id::{DeviceId, SwapChainId, TextureId},
memory_init_tracker::MemoryInitTracker,
track::{TextureSelector, DUMMY_SELECTOR}, track::{TextureSelector, DUMMY_SELECTOR},
validation::MissingBufferUsageError, validation::MissingBufferUsageError,
Label, LifeGuard, RefCount, Stored, Label, LifeGuard, RefCount, Stored,
@ -75,6 +76,7 @@ bitflags::bitflags! {
pub enum BufferMapAsyncStatus { pub enum BufferMapAsyncStatus {
Success, Success,
Error, Error,
Aborted,
Unknown, Unknown,
ContextLost, ContextLost,
} }
@ -160,7 +162,7 @@ pub struct Buffer<B: hal::Backend> {
pub(crate) device_id: Stored<DeviceId>, pub(crate) device_id: Stored<DeviceId>,
pub(crate) usage: wgt::BufferUsage, pub(crate) usage: wgt::BufferUsage,
pub(crate) size: wgt::BufferAddress, pub(crate) size: wgt::BufferAddress,
pub(crate) full_range: (), pub(crate) initialization_status: MemoryInitTracker,
pub(crate) sync_mapped_writes: Option<hal::memory::Segment>, pub(crate) sync_mapped_writes: Option<hal::memory::Segment>,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
pub(crate) map_state: BufferMapState<B>, pub(crate) map_state: BufferMapState<B>,
@ -174,6 +176,8 @@ pub enum CreateBufferError {
AccessError(#[from] BufferAccessError), AccessError(#[from] BufferAccessError),
#[error("buffers that are mapped at creation have to be aligned to `COPY_BUFFER_ALIGNMENT`")] #[error("buffers that are mapped at creation have to be aligned to `COPY_BUFFER_ALIGNMENT`")]
UnalignedSize, UnalignedSize,
#[error("Buffers cannot have empty usage flags")]
EmptyUsage,
#[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")] #[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")]
UsageMismatch(wgt::BufferUsage), UsageMismatch(wgt::BufferUsage),
} }
@ -203,11 +207,13 @@ pub struct Texture<B: hal::Backend> {
pub(crate) dimension: wgt::TextureDimension, pub(crate) dimension: wgt::TextureDimension,
pub(crate) kind: hal::image::Kind, pub(crate) kind: hal::image::Kind,
pub(crate) format: wgt::TextureFormat, pub(crate) format: wgt::TextureFormat,
pub(crate) format_features: wgt::TextureFormatFeatures,
pub(crate) framebuffer_attachment: hal::image::FramebufferAttachment,
pub(crate) full_range: TextureSelector, pub(crate) full_range: TextureSelector,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
} }
#[derive(Clone, Debug)] #[derive(Clone, Copy, Debug)]
pub enum TextureErrorDimension { pub enum TextureErrorDimension {
X, X,
Y, Y,
@ -218,8 +224,12 @@ pub enum TextureErrorDimension {
pub enum TextureDimensionError { pub enum TextureDimensionError {
#[error("Dimension {0:?} is zero")] #[error("Dimension {0:?} is zero")]
Zero(TextureErrorDimension), Zero(TextureErrorDimension),
#[error("1D textures must have height set to 1")] #[error("Dimension {0:?} value {given} exceeds the limit of {limit}")]
InvalidHeight, LimitExceeded {
dim: TextureErrorDimension,
given: u32,
limit: u32,
},
#[error("sample count {0} is invalid")] #[error("sample count {0} is invalid")]
InvalidSampleCount(u32), InvalidSampleCount(u32),
} }
@ -230,10 +240,14 @@ pub enum CreateTextureError {
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error("D24Plus textures cannot be copied")] #[error("D24Plus textures cannot be copied")]
CannotCopyD24Plus, CannotCopyD24Plus,
#[error("Textures cannot have empty usage flags")]
EmptyUsage,
#[error(transparent)] #[error(transparent)]
InvalidDimension(#[from] TextureDimensionError), InvalidDimension(#[from] TextureDimensionError),
#[error("texture descriptor mip level count ({0}) is invalid")] #[error("texture descriptor mip level count ({0}) is invalid")]
InvalidMipLevelCount(u32), InvalidMipLevelCount(u32),
#[error("The texture usages {0:?} are not allowed on a texture of type {1:?}")]
InvalidUsages(wgt::TextureUsage, wgt::TextureFormat),
#[error("Feature {0:?} must be enabled to create a texture of type {1:?}")] #[error("Feature {0:?} must be enabled to create a texture of type {1:?}")]
MissingFeature(wgt::Features, wgt::TextureFormat), MissingFeature(wgt::Features, wgt::TextureFormat),
} }
@ -265,7 +279,7 @@ pub struct TextureViewDescriptor<'a> {
/// The dimension of the texture view. For 1D textures, this must be `1D`. For 2D textures it must be one of /// The dimension of the texture view. For 1D textures, this must be `1D`. For 2D textures it must be one of
/// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `3D` /// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `3D`
pub dimension: Option<wgt::TextureViewDimension>, pub dimension: Option<wgt::TextureViewDimension>,
/// Aspect of the texture. Color textures must be [`TextureAspect::All`]. /// Aspect of the texture. Color textures must be [`TextureAspect::All`](wgt::TextureAspect::All).
pub aspect: wgt::TextureAspect, pub aspect: wgt::TextureAspect,
/// Base mip level. /// Base mip level.
pub base_mip_level: u32, pub base_mip_level: u32,
@ -299,8 +313,13 @@ pub struct TextureView<B: hal::Backend> {
//TODO: store device_id for quick access? //TODO: store device_id for quick access?
pub(crate) aspects: hal::format::Aspects, pub(crate) aspects: hal::format::Aspects,
pub(crate) format: wgt::TextureFormat, pub(crate) format: wgt::TextureFormat,
pub(crate) extent: hal::image::Extent, pub(crate) format_features: wgt::TextureFormatFeatures,
pub(crate) dimension: wgt::TextureViewDimension,
pub(crate) extent: wgt::Extent3d,
pub(crate) samples: hal::image::NumSamples, pub(crate) samples: hal::image::NumSamples,
pub(crate) framebuffer_attachment: hal::image::FramebufferAttachment,
/// Internal use of this texture view when used as `BindingType::Texture`.
pub(crate) sampled_internal_use: TextureUse,
pub(crate) selector: TextureSelector, pub(crate) selector: TextureSelector,
pub(crate) life_guard: LifeGuard, pub(crate) life_guard: LifeGuard,
} }
@ -381,7 +400,7 @@ pub struct SamplerDescriptor<'a> {
pub compare: Option<wgt::CompareFunction>, pub compare: Option<wgt::CompareFunction>,
/// Valid values: 1, 2, 4, 8, and 16. /// Valid values: 1, 2, 4, 8, and 16.
pub anisotropy_clamp: Option<NonZeroU8>, pub anisotropy_clamp: Option<NonZeroU8>,
/// Border color to use when address_mode is [`AddressMode::ClampToBorder`] /// Border color to use when address_mode is [`AddressMode::ClampToBorder`](wgt::AddressMode::ClampToBorder)
pub border_color: Option<wgt::SamplerBorderColor>, pub border_color: Option<wgt::SamplerBorderColor>,
} }
@ -437,6 +456,42 @@ impl<B: hal::Backend> Borrow<()> for Sampler<B> {
&DUMMY_SELECTOR &DUMMY_SELECTOR
} }
} }
#[derive(Clone, Debug, Error)]
pub enum CreateQuerySetError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("QuerySets cannot be made with zero queries")]
ZeroCount,
#[error("{count} is too many queries for a single QuerySet. QuerySets cannot be made more than {maximum} queries.")]
TooManyQueries { count: u32, maximum: u32 },
#[error("Feature {0:?} must be enabled")]
MissingFeature(wgt::Features),
}
#[derive(Debug)]
pub struct QuerySet<B: hal::Backend> {
pub(crate) raw: B::QueryPool,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
/// Amount of queries in the query set.
pub(crate) desc: wgt::QuerySetDescriptor,
/// Amount of numbers in each query (i.e. a pipeline statistics query for two attributes will have this number be two)
pub(crate) elements: u32,
}
impl<B: hal::Backend> Resource for QuerySet<B> {
const TYPE: &'static str = "QuerySet";
fn life_guard(&self) -> &LifeGuard {
&self.life_guard
}
}
impl<B: hal::Backend> Borrow<()> for QuerySet<B> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
pub enum DestroyError { pub enum DestroyError {

Просмотреть файл

@ -44,7 +44,7 @@ use crate::{
LifeGuard, PrivateFeatures, Stored, SubmissionIndex, LifeGuard, PrivateFeatures, Stored, SubmissionIndex,
}; };
use hal::{self, device::Device as _, queue::CommandQueue as _, window::PresentationSurface as _}; use hal::{queue::Queue as _, window::PresentationSurface as _};
use thiserror::Error; use thiserror::Error;
use wgt::{SwapChainDescriptor, SwapChainStatus}; use wgt::{SwapChainDescriptor, SwapChainStatus};
@ -59,8 +59,8 @@ pub struct SwapChain<B: hal::Backend> {
pub(crate) num_frames: hal::window::SwapImageIndex, pub(crate) num_frames: hal::window::SwapImageIndex,
pub(crate) semaphore: B::Semaphore, pub(crate) semaphore: B::Semaphore,
pub(crate) acquired_view_id: Option<Stored<TextureViewId>>, pub(crate) acquired_view_id: Option<Stored<TextureViewId>>,
pub(crate) acquired_framebuffers: Vec<B::Framebuffer>,
pub(crate) active_submission_index: SubmissionIndex, pub(crate) active_submission_index: SubmissionIndex,
pub(crate) framebuffer_attachment: hal::image::FramebufferAttachment,
} }
impl<B: hal::Backend> crate::hub::Resource for SwapChain<B> { impl<B: hal::Backend> crate::hub::Resource for SwapChain<B> {
@ -81,6 +81,8 @@ pub enum SwapChainError {
Device(#[from] DeviceError), Device(#[from] DeviceError),
#[error("swap chain image is already acquired")] #[error("swap chain image is already acquired")]
AlreadyAcquired, AlreadyAcquired,
#[error("acquired frame is still referenced")]
StillReferenced,
} }
#[derive(Clone, Debug, Error)] #[derive(Clone, Debug, Error)]
@ -91,6 +93,8 @@ pub enum CreateSwapChainError {
InvalidSurface, InvalidSurface,
#[error("`SwapChainOutput` must be dropped before a new `SwapChain` is made")] #[error("`SwapChainOutput` must be dropped before a new `SwapChain` is made")]
SwapChainOutputExists, SwapChainOutputExists,
#[error("Both `SwapChain` width and height must be non-zero. Wait to recreate the `SwapChain` until the window has non-zero area.")]
ZeroArea,
#[error("surface does not support the adapter's queue family")] #[error("surface does not support the adapter's queue family")]
UnsupportedQueueFamily, UnsupportedQueueFamily,
#[error("requested format {requested:?} is not in list of supported formats: {available:?}")] #[error("requested format {requested:?} is not in list of supported formats: {available:?}")]
@ -139,6 +143,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let hub = B::hub(self); let hub = B::hub(self);
let mut token = Token::root(); let mut token = Token::root();
let fid = hub.texture_views.prepare(view_id_in);
let (mut surface_guard, mut token) = self.surfaces.write(&mut token); let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = surface_guard let surface = surface_guard
@ -149,8 +154,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let sc = swap_chain_guard let sc = swap_chain_guard
.get_mut(swap_chain_id) .get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?; .map_err(|_| SwapChainError::Invalid)?;
#[cfg_attr(not(feature = "trace"), allow(unused_variables))]
#[allow(unused_variables)]
let device = &device_guard[sc.device_id.value]; let device = &device_guard[sc.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(Action::GetSwapChainTexture {
id: fid.id(),
parent_id: swap_chain_id,
});
}
let suf = B::get_surface_mut(surface); let suf = B::get_surface_mut(surface);
let (image, status) = match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } { let (image, status) = match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } {
@ -160,9 +173,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
None, None,
match err { match err {
hal::window::AcquireError::OutOfMemory(_) => Err(DeviceError::OutOfMemory)?, hal::window::AcquireError::OutOfMemory(_) => Err(DeviceError::OutOfMemory)?,
hal::window::AcquireError::NotReady => unreachable!(), // we always set a timeout hal::window::AcquireError::NotReady { .. } => SwapChainStatus::Timeout,
hal::window::AcquireError::Timeout => SwapChainStatus::Timeout, hal::window::AcquireError::OutOfDate(_) => SwapChainStatus::Outdated,
hal::window::AcquireError::OutOfDate => SwapChainStatus::Outdated,
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost, hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
hal::window::AcquireError::DeviceLost(_) => Err(DeviceError::Lost)?, hal::window::AcquireError::DeviceLost(_) => Err(DeviceError::Lost)?,
}, },
@ -181,12 +193,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}, },
aspects: hal::format::Aspects::COLOR, aspects: hal::format::Aspects::COLOR,
format: sc.desc.format, format: sc.desc.format,
extent: hal::image::Extent { format_features: wgt::TextureFormatFeatures {
allowed_usages: wgt::TextureUsage::RENDER_ATTACHMENT,
flags: wgt::TextureFormatFeatureFlags::empty(),
filterable: false,
},
dimension: wgt::TextureViewDimension::D2,
extent: wgt::Extent3d {
width: sc.desc.width, width: sc.desc.width,
height: sc.desc.height, height: sc.desc.height,
depth: 1, depth_or_array_layers: 1,
}, },
samples: 1, samples: 1,
framebuffer_attachment: sc.framebuffer_attachment.clone(),
sampled_internal_use: resource::TextureUse::empty(),
selector: TextureSelector { selector: TextureSelector {
layers: 0..1, layers: 0..1,
levels: 0..1, levels: 0..1,
@ -195,9 +215,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}; };
let ref_count = view.life_guard.add_ref(); let ref_count = view.life_guard.add_ref();
let id = hub let id = fid.assign(view, &mut token);
.texture_views
.register_identity(view_id_in, view, &mut token);
if sc.acquired_view_id.is_some() { if sc.acquired_view_id.is_some() {
return Err(SwapChainError::AlreadyAcquired); return Err(SwapChainError::AlreadyAcquired);
@ -213,14 +231,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
None => None, None => None,
}; };
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(Action::GetSwapChainTexture {
id: view_id,
parent_id: swap_chain_id,
});
}
Ok(SwapChainOutput { status, view_id }) Ok(SwapChainOutput { status, view_id })
} }
@ -249,19 +259,24 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
trace.lock().add(Action::PresentSwapChain(swap_chain_id)); trace.lock().add(Action::PresentSwapChain(swap_chain_id));
} }
let view_id = sc let view = {
.acquired_view_id let view_id = sc
.take() .acquired_view_id
.ok_or(SwapChainError::AlreadyAcquired)?; .take()
let (view_maybe, _) = hub.texture_views.unregister(view_id.value.0, &mut token); .ok_or(SwapChainError::AlreadyAcquired)?;
let view = view_maybe.ok_or(SwapChainError::Invalid)?; let (view_maybe, _) = hub.texture_views.unregister(view_id.value.0, &mut token);
view_maybe.ok_or(SwapChainError::Invalid)?
};
if view.life_guard.ref_count.unwrap().load() != 1 {
return Err(SwapChainError::StillReferenced);
}
let image = match view.inner { let image = match view.inner {
resource::TextureViewInner::Native { .. } => unreachable!(), resource::TextureViewInner::Native { .. } => unreachable!(),
resource::TextureViewInner::SwapChain { image, .. } => image, resource::TextureViewInner::SwapChain { image, .. } => image,
}; };
let sem = if sc.active_submission_index > device.last_completed_submission_index() { let sem = if sc.active_submission_index > device.last_completed_submission_index() {
Some(&sc.semaphore) Some(&mut sc.semaphore)
} else { } else {
None None
}; };
@ -270,12 +285,6 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
tracing::debug!(trace = true, "Presented. End of Frame"); tracing::debug!(trace = true, "Presented. End of Frame");
for fbo in sc.acquired_framebuffers.drain(..) {
unsafe {
device.raw.destroy_framebuffer(fbo);
}
}
match result { match result {
Ok(None) => Ok(SwapChainStatus::Good), Ok(None) => Ok(SwapChainStatus::Good),
Ok(Some(_)) => Ok(SwapChainStatus::Suboptimal), Ok(Some(_)) => Ok(SwapChainStatus::Suboptimal),
@ -283,7 +292,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal::window::PresentError::OutOfMemory(_) => { hal::window::PresentError::OutOfMemory(_) => {
Err(SwapChainError::Device(DeviceError::OutOfMemory)) Err(SwapChainError::Device(DeviceError::OutOfMemory))
} }
hal::window::PresentError::OutOfDate => Ok(SwapChainStatus::Outdated), hal::window::PresentError::OutOfDate(_) => Ok(SwapChainStatus::Outdated),
hal::window::PresentError::SurfaceLost(_) => Ok(SwapChainStatus::Lost), hal::window::PresentError::SurfaceLost(_) => Ok(SwapChainStatus::Lost),
hal::window::PresentError::DeviceLost(_) => { hal::window::PresentError::DeviceLost(_) => {
Err(SwapChainError::Device(DeviceError::Lost)) Err(SwapChainError::Device(DeviceError::Lost))

Просмотреть файл

@ -315,6 +315,18 @@ impl<S: ResourceState> ResourceTracker<S> {
} }
} }
fn get<'a>(
self_backend: wgt::Backend,
map: &'a mut FastHashMap<Index, Resource<S>>,
id: Valid<S::Id>,
) -> &'a mut Resource<S> {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(self_backend, backend);
let e = map.get_mut(&index).unwrap();
assert_eq!(e.epoch, epoch);
e
}
/// Extend the usage of a specified resource. /// Extend the usage of a specified resource.
/// ///
/// Returns conflicting transition as an error. /// Returns conflicting transition as an error.
@ -345,6 +357,21 @@ impl<S: ResourceState> ResourceTracker<S> {
self.temp.drain(..) self.temp.drain(..)
} }
/// Replace the usage of a specified already tracked resource.
/// (panics if the resource is not yet tracked)
pub(crate) fn change_replace_tracked(
&mut self,
id: Valid<S::Id>,
selector: S::Selector,
usage: S::Usage,
) -> Drain<PendingTransition<S>> {
let res = Self::get(self.backend, &mut self.map, id);
res.state
.change(id, selector, usage, Some(&mut self.temp))
.ok();
self.temp.drain(..)
}
/// Turn the tracking from the "expand" mode into the "replace" one, /// Turn the tracking from the "expand" mode into the "replace" one,
/// installing the selected usage as the "first". /// installing the selected usage as the "first".
/// This is a special operation only used by the render pass attachments. /// This is a special operation only used by the render pass attachments.
@ -370,7 +397,12 @@ impl<S: ResourceState> ResourceTracker<S> {
e.insert(new.clone()); e.insert(new.clone());
} }
Entry::Occupied(e) => { Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch); assert_eq!(
e.get().epoch,
new.epoch,
"ID {:?} wasn't properly removed",
S::Id::zip(index, e.get().epoch, self.backend)
);
let id = Valid(S::Id::zip(index, new.epoch, self.backend)); let id = Valid(S::Id::zip(index, new.epoch, self.backend));
e.into_mut().state.merge(id, &new.state, None)?; e.into_mut().state.merge(id, &new.state, None)?;
} }
@ -388,7 +420,12 @@ impl<S: ResourceState> ResourceTracker<S> {
e.insert(new.clone()); e.insert(new.clone());
} }
Entry::Occupied(e) => { Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch); assert_eq!(
e.get().epoch,
new.epoch,
"ID {:?} wasn't properly removed",
S::Id::zip(index, e.get().epoch, self.backend)
);
let id = Valid(S::Id::zip(index, new.epoch, self.backend)); let id = Valid(S::Id::zip(index, new.epoch, self.backend));
e.into_mut() e.into_mut()
.state .state
@ -518,6 +555,7 @@ pub(crate) struct TrackerSet {
pub compute_pipes: ResourceTracker<PhantomData<id::ComputePipelineId>>, pub compute_pipes: ResourceTracker<PhantomData<id::ComputePipelineId>>,
pub render_pipes: ResourceTracker<PhantomData<id::RenderPipelineId>>, pub render_pipes: ResourceTracker<PhantomData<id::RenderPipelineId>>,
pub bundles: ResourceTracker<PhantomData<id::RenderBundleId>>, pub bundles: ResourceTracker<PhantomData<id::RenderBundleId>>,
pub query_sets: ResourceTracker<PhantomData<id::QuerySetId>>,
} }
impl TrackerSet { impl TrackerSet {
@ -532,6 +570,7 @@ impl TrackerSet {
compute_pipes: ResourceTracker::new(backend), compute_pipes: ResourceTracker::new(backend),
render_pipes: ResourceTracker::new(backend), render_pipes: ResourceTracker::new(backend),
bundles: ResourceTracker::new(backend), bundles: ResourceTracker::new(backend),
query_sets: ResourceTracker::new(backend),
} }
} }
@ -545,6 +584,7 @@ impl TrackerSet {
self.compute_pipes.clear(); self.compute_pipes.clear();
self.render_pipes.clear(); self.render_pipes.clear();
self.bundles.clear(); self.bundles.clear();
self.query_sets.clear();
} }
/// Try to optimize the tracking representation. /// Try to optimize the tracking representation.
@ -557,6 +597,7 @@ impl TrackerSet {
self.compute_pipes.optimize(); self.compute_pipes.optimize();
self.render_pipes.optimize(); self.render_pipes.optimize();
self.bundles.optimize(); self.bundles.optimize();
self.query_sets.optimize();
} }
/// Merge all the trackers of another instance by extending /// Merge all the trackers of another instance by extending
@ -584,6 +625,7 @@ impl TrackerSet {
.unwrap(); .unwrap();
self.render_pipes.merge_extend(&other.render_pipes).unwrap(); self.render_pipes.merge_extend(&other.render_pipes).unwrap();
self.bundles.merge_extend(&other.bundles).unwrap(); self.bundles.merge_extend(&other.bundles).unwrap();
self.query_sets.merge_extend(&other.query_sets).unwrap();
Ok(()) Ok(())
} }

Просмотреть файл

@ -207,26 +207,28 @@ impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
(Some(&(ref ra, va)), Some(&(ref rb, vb))) => { (Some(&(ref ra, va)), Some(&(ref rb, vb))) => {
let (range, usage) = if ra.start < self.base { let (range, usage) = if ra.start < self.base {
// in the middle of the left stream // in the middle of the left stream
if self.base == rb.start { let (end, end_value) = if self.base == rb.start {
// right stream is starting // right stream is starting
debug_assert!(self.base < ra.end); debug_assert!(self.base < ra.end);
(self.base..ra.end.min(rb.end), Some(*va)..Some(*vb)) (rb.end, Some(*vb))
} else { } else {
// right hasn't started yet // right hasn't started yet
debug_assert!(self.base < rb.start); debug_assert!(self.base < rb.start);
(self.base..rb.start, Some(*va)..None) (rb.start, None)
} };
(self.base..ra.end.min(end), Some(*va)..end_value)
} else if rb.start < self.base { } else if rb.start < self.base {
// in the middle of the right stream // in the middle of the right stream
if self.base == ra.start { let (end, start_value) = if self.base == ra.start {
// left stream is starting // left stream is starting
debug_assert!(self.base < rb.end); debug_assert!(self.base < rb.end);
(self.base..ra.end.min(rb.end), Some(*va)..Some(*vb)) (ra.end, Some(*va))
} else { } else {
// left hasn't started yet // left hasn't started yet
debug_assert!(self.base < ra.start); debug_assert!(self.base < ra.start);
(self.base..ra.start, None..Some(*vb)) (ra.start, None)
} };
(self.base..rb.end.min(end), start_value..Some(*vb))
} else { } else {
// no active streams // no active streams
match ra.start.cmp(&rb.start) { match ra.start.cmp(&rb.start) {
@ -396,4 +398,40 @@ mod test {
] ]
); );
} }
#[test]
fn merge_complex() {
assert_eq!(
&easy_merge(
&[
(0..8, 0u8),
(8..9, 1),
(9..16, 2),
(16..17, 3),
(17..118, 4),
(118..119, 5),
(119..124, 6),
(124..125, 7),
(125..512, 8),
],
&[(15..16, 10u8), (51..52, 11), (126..127, 12),],
),
&[
(0..8, Some(0)..None),
(8..9, Some(1)..None),
(9..15, Some(2)..None),
(15..16, Some(2)..Some(10)),
(16..17, Some(3)..None),
(17..51, Some(4)..None),
(51..52, Some(4)..Some(11)),
(52..118, Some(4)..None),
(118..119, Some(5)..None),
(119..124, Some(6)..None),
(124..125, Some(7)..None),
(125..126, Some(8)..None),
(126..127, Some(8)..Some(12)),
(127..512, Some(8)..None),
]
);
}
} }

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,6 +1,6 @@
[package] [package]
name = "wgpu-types" name = "wgpu-types"
version = "0.6.0" version = "0.7.0"
authors = ["wgpu developers"] authors = ["wgpu developers"]
edition = "2018" edition = "2018"
description = "WebGPU types" description = "WebGPU types"

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -4,7 +4,7 @@
use crate::{ use crate::{
cow_label, ByteBuf, CommandEncoderAction, DeviceAction, DropAction, ImplicitLayout, RawString, cow_label, ByteBuf, CommandEncoderAction, DeviceAction, DropAction, ImplicitLayout, RawString,
TextureAction, ShaderModuleSource, TextureAction,
}; };
use wgc::{hub::IdentityManager, id}; use wgc::{hub::IdentityManager, id};
@ -17,9 +17,19 @@ use parking_lot::Mutex;
use std::{ use std::{
borrow::Cow, borrow::Cow,
num::{NonZeroU32, NonZeroU8}, num::{NonZeroU32, NonZeroU8},
ptr, slice, ptr,
}; };
// we can't call `from_raw_parts` unconditionally because the caller
// may not even have a valid pointer (e.g. NULL) if the `length` is zero.
fn make_slice<'a, T>(pointer: *const T, length: usize) -> &'a [T] {
if length == 0 {
&[]
} else {
unsafe { std::slice::from_raw_parts(pointer, length) }
}
}
fn make_byte_buf<T: serde::Serialize>(data: &T) -> ByteBuf { fn make_byte_buf<T: serde::Serialize>(data: &T) -> ByteBuf {
let vec = bincode::serialize(data).unwrap(); let vec = bincode::serialize(data).unwrap();
ByteBuf::from_vec(vec) ByteBuf::from_vec(vec)
@ -27,6 +37,7 @@ fn make_byte_buf<T: serde::Serialize>(data: &T) -> ByteBuf {
#[repr(C)] #[repr(C)]
pub struct ShaderModuleDescriptor { pub struct ShaderModuleDescriptor {
label: RawString,
spirv_words: *const u32, spirv_words: *const u32,
spirv_words_length: usize, spirv_words_length: usize,
wgsl_chars: RawString, wgsl_chars: RawString,
@ -51,39 +62,102 @@ impl ProgrammableStageDescriptor {
pub struct ComputePipelineDescriptor { pub struct ComputePipelineDescriptor {
label: RawString, label: RawString,
layout: Option<id::PipelineLayoutId>, layout: Option<id::PipelineLayoutId>,
compute_stage: ProgrammableStageDescriptor, stage: ProgrammableStageDescriptor,
} }
#[repr(C)] #[repr(C)]
pub struct VertexBufferDescriptor { pub struct VertexBufferLayout {
stride: wgt::BufferAddress, array_stride: wgt::BufferAddress,
step_mode: wgt::InputStepMode, step_mode: wgt::InputStepMode,
attributes: *const wgt::VertexAttributeDescriptor, attributes: *const wgt::VertexAttribute,
attributes_length: usize, attributes_length: usize,
} }
#[repr(C)] #[repr(C)]
pub struct VertexStateDescriptor { pub struct VertexState {
index_format: wgt::IndexFormat, stage: ProgrammableStageDescriptor,
vertex_buffers: *const VertexBufferDescriptor, buffers: *const VertexBufferLayout,
vertex_buffers_length: usize, buffers_length: usize,
}
impl VertexState {
fn to_wgpu(&self) -> wgc::pipeline::VertexState {
let buffer_layouts = make_slice(self.buffers, self.buffers_length)
.iter()
.map(|vb| wgc::pipeline::VertexBufferLayout {
array_stride: vb.array_stride,
step_mode: vb.step_mode,
attributes: Cow::Borrowed(make_slice(vb.attributes, vb.attributes_length)),
})
.collect();
wgc::pipeline::VertexState {
stage: self.stage.to_wgpu(),
buffers: Cow::Owned(buffer_layouts),
}
}
}
#[repr(C)]
pub struct ColorTargetState<'a> {
format: wgt::TextureFormat,
blend: Option<&'a wgt::BlendState>,
write_mask: wgt::ColorWrite,
}
#[repr(C)]
pub struct FragmentState<'a> {
stage: ProgrammableStageDescriptor,
targets: *const ColorTargetState<'a>,
targets_length: usize,
}
impl FragmentState<'_> {
fn to_wgpu(&self) -> wgc::pipeline::FragmentState {
let color_targets = make_slice(self.targets, self.targets_length)
.iter()
.map(|ct| wgt::ColorTargetState {
format: ct.format,
blend: ct.blend.cloned(),
write_mask: ct.write_mask,
})
.collect();
wgc::pipeline::FragmentState {
stage: self.stage.to_wgpu(),
targets: Cow::Owned(color_targets),
}
}
}
#[repr(C)]
pub struct PrimitiveState<'a> {
topology: wgt::PrimitiveTopology,
strip_index_format: Option<&'a wgt::IndexFormat>,
front_face: wgt::FrontFace,
cull_mode: Option<&'a wgt::Face>,
polygon_mode: wgt::PolygonMode,
}
impl PrimitiveState<'_> {
fn to_wgpu(&self) -> wgt::PrimitiveState {
wgt::PrimitiveState {
topology: self.topology,
strip_index_format: self.strip_index_format.cloned(),
front_face: self.front_face.clone(),
cull_mode: self.cull_mode.cloned(),
polygon_mode: self.polygon_mode,
}
}
} }
#[repr(C)] #[repr(C)]
pub struct RenderPipelineDescriptor<'a> { pub struct RenderPipelineDescriptor<'a> {
label: RawString, label: RawString,
layout: Option<id::PipelineLayoutId>, layout: Option<id::PipelineLayoutId>,
vertex_stage: &'a ProgrammableStageDescriptor, vertex: &'a VertexState,
fragment_stage: Option<&'a ProgrammableStageDescriptor>, primitive: PrimitiveState<'a>,
primitive_topology: wgt::PrimitiveTopology, fragment: Option<&'a FragmentState<'a>>,
rasterization_state: Option<&'a wgt::RasterizationStateDescriptor>, depth_stencil: Option<&'a wgt::DepthStencilState>,
color_states: *const wgt::ColorStateDescriptor, multisample: wgt::MultisampleState,
color_states_length: usize,
depth_stencil_state: Option<&'a wgt::DepthStencilStateDescriptor>,
vertex_state: VertexStateDescriptor,
sample_count: u32,
sample_mask: u32,
alpha_to_coverage_enabled: bool,
} }
#[repr(C)] #[repr(C)]
@ -101,7 +175,6 @@ pub enum RawBindingType {
StorageBuffer, StorageBuffer,
ReadonlyStorageBuffer, ReadonlyStorageBuffer,
Sampler, Sampler,
ComparisonSampler,
SampledTexture, SampledTexture,
ReadonlyStorageTexture, ReadonlyStorageTexture,
WriteonlyStorageTexture, WriteonlyStorageTexture,
@ -118,6 +191,8 @@ pub struct BindGroupLayoutEntry<'a> {
texture_sample_type: Option<&'a RawTextureSampleType>, texture_sample_type: Option<&'a RawTextureSampleType>,
multisampled: bool, multisampled: bool,
storage_texture_format: Option<&'a wgt::TextureFormat>, storage_texture_format: Option<&'a wgt::TextureFormat>,
sampler_filter: bool,
sampler_compare: bool,
} }
#[repr(C)] #[repr(C)]
@ -323,7 +398,7 @@ pub unsafe extern "C" fn wgpu_client_make_adapter_ids(
) -> usize { ) -> usize {
let mut identities = client.identities.lock(); let mut identities = client.identities.lock();
assert_ne!(id_length, 0); assert_ne!(id_length, 0);
let mut ids = slice::from_raw_parts_mut(ids, id_length).iter_mut(); let mut ids = std::slice::from_raw_parts_mut(ids, id_length).iter_mut();
*ids.next().unwrap() = identities.vulkan.adapters.alloc(Backend::Vulkan); *ids.next().unwrap() = identities.vulkan.adapters.alloc(Backend::Vulkan);
@ -339,6 +414,19 @@ pub unsafe extern "C" fn wgpu_client_make_adapter_ids(
id_length - ids.len() id_length - ids.len()
} }
#[no_mangle]
pub extern "C" fn wgpu_client_fill_default_limits(limits: &mut wgt::Limits) {
*limits = wgt::Limits::default();
}
#[no_mangle]
pub extern "C" fn wgpu_client_serialize_device_descriptor(
desc: &wgt::DeviceDescriptor<RawString>,
bb: &mut ByteBuf,
) {
*bb = make_byte_buf(&desc.map_label(cow_label));
}
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_client_make_device_id( pub extern "C" fn wgpu_client_make_device_id(
client: &Client, client: &Client,
@ -504,12 +592,22 @@ pub extern "C" fn wgpu_client_create_command_encoder(
id id
} }
#[repr(C)]
pub struct ComputePassDescriptor {
pub label: RawString,
}
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass( pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass(
encoder_id: id::CommandEncoderId, encoder_id: id::CommandEncoderId,
_desc: Option<&wgc::command::ComputePassDescriptor>, desc: &ComputePassDescriptor,
) -> *mut wgc::command::ComputePass { ) -> *mut wgc::command::ComputePass {
let pass = wgc::command::ComputePass::new(encoder_id); let pass = wgc::command::ComputePass::new(
encoder_id,
&wgc::command::ComputePassDescriptor {
label: cow_label(&desc.label),
},
);
Box::into_raw(Box::new(pass)) Box::into_raw(Box::new(pass))
} }
@ -529,6 +627,7 @@ pub unsafe extern "C" fn wgpu_compute_pass_destroy(pass: *mut wgc::command::Comp
#[repr(C)] #[repr(C)]
pub struct RenderPassDescriptor { pub struct RenderPassDescriptor {
pub label: RawString,
pub color_attachments: *const wgc::command::ColorAttachmentDescriptor, pub color_attachments: *const wgc::command::ColorAttachmentDescriptor,
pub color_attachments_length: usize, pub color_attachments_length: usize,
pub depth_stencil_attachment: *const wgc::command::DepthStencilAttachmentDescriptor, pub depth_stencil_attachment: *const wgc::command::DepthStencilAttachmentDescriptor,
@ -541,8 +640,9 @@ pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
) -> *mut wgc::command::RenderPass { ) -> *mut wgc::command::RenderPass {
let pass = wgc::command::RenderPass::new( let pass = wgc::command::RenderPass::new(
encoder_id, encoder_id,
wgc::command::RenderPassDescriptor { &wgc::command::RenderPassDescriptor {
color_attachments: Cow::Borrowed(slice::from_raw_parts( label: cow_label(&desc.label),
color_attachments: Cow::Borrowed(make_slice(
desc.color_attachments, desc.color_attachments,
desc.color_attachments_length, desc.color_attachments_length,
)), )),
@ -582,7 +682,7 @@ pub unsafe extern "C" fn wgpu_client_create_bind_group_layout(
.alloc(backend); .alloc(backend);
let mut entries = Vec::with_capacity(desc.entries_length); let mut entries = Vec::with_capacity(desc.entries_length);
for entry in slice::from_raw_parts(desc.entries, desc.entries_length) { for entry in make_slice(desc.entries, desc.entries_length) {
entries.push(wgt::BindGroupLayoutEntry { entries.push(wgt::BindGroupLayoutEntry {
binding: entry.binding, binding: entry.binding,
visibility: entry.visibility, visibility: entry.visibility,
@ -604,12 +704,8 @@ pub unsafe extern "C" fn wgpu_client_create_bind_group_layout(
min_binding_size: entry.min_binding_size, min_binding_size: entry.min_binding_size,
}, },
RawBindingType::Sampler => wgt::BindingType::Sampler { RawBindingType::Sampler => wgt::BindingType::Sampler {
comparison: false, comparison: entry.sampler_compare,
filtering: false, filtering: entry.sampler_filter,
},
RawBindingType::ComparisonSampler => wgt::BindingType::Sampler {
comparison: true,
filtering: false,
}, },
RawBindingType::SampledTexture => wgt::BindingType::Texture { RawBindingType::SampledTexture => wgt::BindingType::Texture {
//TODO: the spec has a bug here //TODO: the spec has a bug here
@ -669,7 +765,7 @@ pub unsafe extern "C" fn wgpu_client_create_pipeline_layout(
let wgpu_desc = wgc::binding_model::PipelineLayoutDescriptor { let wgpu_desc = wgc::binding_model::PipelineLayoutDescriptor {
label: cow_label(&desc.label), label: cow_label(&desc.label),
bind_group_layouts: Cow::Borrowed(slice::from_raw_parts( bind_group_layouts: Cow::Borrowed(make_slice(
desc.bind_group_layouts, desc.bind_group_layouts,
desc.bind_group_layouts_length, desc.bind_group_layouts_length,
)), )),
@ -697,7 +793,7 @@ pub unsafe extern "C" fn wgpu_client_create_bind_group(
.alloc(backend); .alloc(backend);
let mut entries = Vec::with_capacity(desc.entries_length); let mut entries = Vec::with_capacity(desc.entries_length);
for entry in slice::from_raw_parts(desc.entries, desc.entries_length) { for entry in make_slice(desc.entries, desc.entries_length) {
entries.push(wgc::binding_model::BindGroupEntry { entries.push(wgc::binding_model::BindGroupEntry {
binding: entry.binding, binding: entry.binding,
resource: if let Some(id) = entry.buffer { resource: if let Some(id) = entry.buffer {
@ -741,16 +837,19 @@ pub unsafe extern "C" fn wgpu_client_create_shader_module(
.shader_modules .shader_modules
.alloc(backend); .alloc(backend);
assert!(!desc.spirv_words.is_null()); let source = match cow_label(&desc.wgsl_chars) {
let spv = Cow::Borrowed(if desc.spirv_words.is_null() { Some(code) => ShaderModuleSource::Wgsl(code),
&[][..] None => ShaderModuleSource::SpirV(Cow::Borrowed(make_slice(
} else { desc.spirv_words,
slice::from_raw_parts(desc.spirv_words, desc.spirv_words_length) desc.spirv_words_length,
}); ))),
};
let desc = wgc::pipeline::ShaderModuleDescriptor {
label: cow_label(&desc.label),
flags: wgt::ShaderFlags::VALIDATION, // careful here!
};
let wgsl = cow_label(&desc.wgsl_chars).unwrap_or_default(); let action = DeviceAction::CreateShaderModule(id, desc, source);
let action = DeviceAction::CreateShaderModule(id, spv, wgsl);
*bb = make_byte_buf(&action); *bb = make_byte_buf(&action);
id id
} }
@ -770,7 +869,7 @@ pub unsafe extern "C" fn wgpu_client_create_compute_pipeline(
let wgpu_desc = wgc::pipeline::ComputePipelineDescriptor { let wgpu_desc = wgc::pipeline::ComputePipelineDescriptor {
label: cow_label(&desc.label), label: cow_label(&desc.label),
layout: desc.layout, layout: desc.layout,
compute_stage: desc.compute_stage.to_wgpu(), stage: desc.stage.to_wgpu(),
}; };
let implicit = match desc.layout { let implicit = match desc.layout {
@ -804,42 +903,11 @@ pub unsafe extern "C" fn wgpu_client_create_render_pipeline(
let wgpu_desc = wgc::pipeline::RenderPipelineDescriptor { let wgpu_desc = wgc::pipeline::RenderPipelineDescriptor {
label: cow_label(&desc.label), label: cow_label(&desc.label),
layout: desc.layout, layout: desc.layout,
vertex_stage: desc.vertex_stage.to_wgpu(), vertex: desc.vertex.to_wgpu(),
fragment_stage: desc fragment: desc.fragment.map(FragmentState::to_wgpu),
.fragment_stage primitive: desc.primitive.to_wgpu(),
.map(ProgrammableStageDescriptor::to_wgpu), depth_stencil: desc.depth_stencil.cloned(),
rasterization_state: desc.rasterization_state.cloned(), multisample: desc.multisample.clone(),
primitive_topology: desc.primitive_topology,
color_states: Cow::Borrowed(slice::from_raw_parts(
desc.color_states,
desc.color_states_length,
)),
depth_stencil_state: desc.depth_stencil_state.cloned(),
vertex_state: wgc::pipeline::VertexStateDescriptor {
index_format: desc.vertex_state.index_format,
vertex_buffers: {
let vbufs = slice::from_raw_parts(
desc.vertex_state.vertex_buffers,
desc.vertex_state.vertex_buffers_length,
);
let owned = vbufs
.iter()
.map(|vb| wgc::pipeline::VertexBufferDescriptor {
stride: vb.stride,
step_mode: vb.step_mode,
attributes: Cow::Borrowed(if vb.attributes.is_null() {
&[]
} else {
slice::from_raw_parts(vb.attributes, vb.attributes_length)
}),
})
.collect();
Cow::Owned(owned)
},
},
sample_count: desc.sample_count,
sample_mask: desc.sample_mask,
alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled,
}; };
let implicit = match desc.layout { let implicit = match desc.layout {
@ -909,3 +977,14 @@ pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
let action = CommandEncoderAction::CopyTextureToTexture { src, dst, size }; let action = CommandEncoderAction::CopyTextureToTexture { src, dst, size };
*bb = make_byte_buf(&action); *bb = make_byte_buf(&action);
} }
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_set_index_buffer(
pass: &mut wgc::command::RenderPass,
buffer: wgc::id::BufferId,
index_format: wgt::IndexFormat,
offset: wgt::BufferAddress,
size: Option<wgt::BufferSize>,
) {
pass.set_index_buffer(buffer, index_format, offset, size);
}

Просмотреть файл

@ -43,6 +43,7 @@ pub struct IdentityRecyclerFactory {
free_render_bundle: extern "C" fn(id::RenderBundleId, FactoryParam), free_render_bundle: extern "C" fn(id::RenderBundleId, FactoryParam),
free_render_pipeline: extern "C" fn(id::RenderPipelineId, FactoryParam), free_render_pipeline: extern "C" fn(id::RenderPipelineId, FactoryParam),
free_compute_pipeline: extern "C" fn(id::ComputePipelineId, FactoryParam), free_compute_pipeline: extern "C" fn(id::ComputePipelineId, FactoryParam),
free_query_set: extern "C" fn(id::QuerySetId, FactoryParam),
free_buffer: extern "C" fn(id::BufferId, FactoryParam), free_buffer: extern "C" fn(id::BufferId, FactoryParam),
free_texture: extern "C" fn(id::TextureId, FactoryParam), free_texture: extern "C" fn(id::TextureId, FactoryParam),
free_texture_view: extern "C" fn(id::TextureViewId, FactoryParam), free_texture_view: extern "C" fn(id::TextureViewId, FactoryParam),
@ -160,6 +161,16 @@ impl wgc::hub::IdentityHandlerFactory<id::ComputePipelineId> for IdentityRecycle
} }
} }
} }
impl wgc::hub::IdentityHandlerFactory<id::QuerySetId> for IdentityRecyclerFactory {
type Filter = IdentityRecycler<id::QuerySetId>;
fn spawn(&self, _min_index: u32) -> Self::Filter {
IdentityRecycler {
fun: self.free_query_set,
param: self.param,
kind: "query_set",
}
}
}
impl wgc::hub::IdentityHandlerFactory<id::BufferId> for IdentityRecyclerFactory { impl wgc::hub::IdentityHandlerFactory<id::BufferId> for IdentityRecyclerFactory {
type Filter = IdentityRecycler<id::BufferId>; type Filter = IdentityRecycler<id::BufferId>;
fn spawn(&self, _min_index: u32) -> Self::Filter { fn spawn(&self, _min_index: u32) -> Self::Filter {

Просмотреть файл

@ -58,6 +58,12 @@ impl ByteBuf {
} }
} }
#[derive(serde::Serialize, serde::Deserialize)]
enum ShaderModuleSource<'a> {
SpirV(Cow<'a, [u32]>),
Wgsl(Cow<'a, str>),
}
#[derive(serde::Serialize, serde::Deserialize)] #[derive(serde::Serialize, serde::Deserialize)]
struct ImplicitLayout<'a> { struct ImplicitLayout<'a> {
pipeline: id::PipelineLayoutId, pipeline: id::PipelineLayoutId,
@ -78,7 +84,11 @@ enum DeviceAction<'a> {
wgc::binding_model::PipelineLayoutDescriptor<'a>, wgc::binding_model::PipelineLayoutDescriptor<'a>,
), ),
CreateBindGroup(id::BindGroupId, wgc::binding_model::BindGroupDescriptor<'a>), CreateBindGroup(id::BindGroupId, wgc::binding_model::BindGroupDescriptor<'a>),
CreateShaderModule(id::ShaderModuleId, Cow<'a, [u32]>, Cow<'a, str>), CreateShaderModule(
id::ShaderModuleId,
wgc::pipeline::ShaderModuleDescriptor<'a>,
ShaderModuleSource<'a>,
),
CreateComputePipeline( CreateComputePipeline(
id::ComputePipelineId, id::ComputePipelineId,
wgc::pipeline::ComputePipelineDescriptor<'a>, wgc::pipeline::ComputePipelineDescriptor<'a>,

Просмотреть файл

@ -4,12 +4,12 @@
use crate::{ use crate::{
cow_label, identity::IdentityRecyclerFactory, ByteBuf, CommandEncoderAction, DeviceAction, cow_label, identity::IdentityRecyclerFactory, ByteBuf, CommandEncoderAction, DeviceAction,
DropAction, RawString, TextureAction, DropAction, RawString, ShaderModuleSource, TextureAction,
}; };
use wgc::{gfx_select, id}; use wgc::{gfx_select, id};
use std::{fmt::Display, os::raw::c_char, ptr, slice}; use std::{error::Error, os::raw::c_char, ptr, slice};
#[repr(C)] #[repr(C)]
pub struct ErrorBuffer { pub struct ErrorBuffer {
@ -18,9 +18,17 @@ pub struct ErrorBuffer {
} }
impl ErrorBuffer { impl ErrorBuffer {
fn init(&mut self, error: impl Display) { fn init(&mut self, error: impl Error) {
use std::fmt::Write;
let mut string = format!("{}", error);
let mut e = error.source();
while let Some(source) = e {
write!(string, ", caused by: {}", source).unwrap();
e = source.source();
}
assert_ne!(self.capacity, 0); assert_ne!(self.capacity, 0);
let string = format!("{}", error);
let length = if string.len() >= self.capacity { let length = if string.len() >= self.capacity {
log::warn!( log::warn!(
"Error length {} reached capacity {}", "Error length {} reached capacity {}",
@ -105,24 +113,19 @@ pub unsafe extern "C" fn wgpu_server_instance_request_adapter(
} }
} }
#[no_mangle]
pub extern "C" fn wgpu_server_fill_default_limits(limits: &mut wgt::Limits) {
*limits = wgt::Limits::default();
}
#[no_mangle] #[no_mangle]
pub unsafe extern "C" fn wgpu_server_adapter_request_device( pub unsafe extern "C" fn wgpu_server_adapter_request_device(
global: &Global, global: &Global,
self_id: id::AdapterId, self_id: id::AdapterId,
desc: &wgt::DeviceDescriptor<RawString>, byte_buf: &ByteBuf,
new_id: id::DeviceId, new_id: id::DeviceId,
mut error_buf: ErrorBuffer, mut error_buf: ErrorBuffer,
) { ) {
let desc: wgc::device::DeviceDescriptor = bincode::deserialize(byte_buf.as_slice()).unwrap();
let trace_string = std::env::var("WGPU_TRACE").ok(); let trace_string = std::env::var("WGPU_TRACE").ok();
let trace_path = trace_string let trace_path = trace_string
.as_ref() .as_ref()
.map(|string| std::path::Path::new(string.as_str())); .map(|string| std::path::Path::new(string.as_str()));
let desc = desc.map_label(cow_label);
let (_, error) = let (_, error) =
gfx_select!(self_id => global.adapter_request_device(self_id, &desc, trace_path, new_id)); gfx_select!(self_id => global.adapter_request_device(self_id, &desc, trace_path, new_id));
if let Some(err) = error { if let Some(err) = error {
@ -266,16 +269,14 @@ impl GlobalExt for Global {
error_buf.init(err); error_buf.init(err);
} }
} }
DeviceAction::CreateShaderModule(id, spirv, wgsl) => { DeviceAction::CreateShaderModule(id, desc, source) => {
let desc = wgc::pipeline::ShaderModuleDescriptor { let source = match source {
label: None, //TODO ShaderModuleSource::SpirV(data) => {
source: if spirv.is_empty() { wgc::pipeline::ShaderModuleSource::SpirV(data)
wgc::pipeline::ShaderModuleSource::Wgsl(wgsl) }
} else { ShaderModuleSource::Wgsl(data) => wgc::pipeline::ShaderModuleSource::Wgsl(data),
wgc::pipeline::ShaderModuleSource::SpirV(spirv)
},
}; };
let (_, error) = self.device_create_shader_module::<B>(self_id, &desc, id); let (_, error) = self.device_create_shader_module::<B>(self_id, &desc, source, id);
if let Some(err) = error { if let Some(err) = error {
error_buf.init(err); error_buf.init(err);
} }
@ -401,6 +402,34 @@ impl GlobalExt for Global {
error_buf.init(err); error_buf.init(err);
} }
} }
CommandEncoderAction::WriteTimestamp {
query_set_id,
query_index,
} => {
if let Err(err) =
self.command_encoder_write_timestamp::<B>(self_id, query_set_id, query_index)
{
error_buf.init(err);
}
}
CommandEncoderAction::ResolveQuerySet {
query_set_id,
start_query,
query_count,
destination,
destination_offset,
} => {
if let Err(err) = self.command_encoder_resolve_query_set::<B>(
self_id,
query_set_id,
start_query,
query_count,
destination,
destination_offset,
) {
error_buf.init(err);
}
}
CommandEncoderAction::RunRenderPass { CommandEncoderAction::RunRenderPass {
base, base,
target_colors, target_colors,
@ -604,7 +633,7 @@ pub extern "C" fn wgpu_server_texture_drop(global: &Global, self_id: id::Texture
#[no_mangle] #[no_mangle]
pub extern "C" fn wgpu_server_texture_view_drop(global: &Global, self_id: id::TextureViewId) { pub extern "C" fn wgpu_server_texture_view_drop(global: &Global, self_id: id::TextureViewId) {
gfx_select!(self_id => global.texture_view_drop(self_id)).unwrap(); gfx_select!(self_id => global.texture_view_drop(self_id, false)).unwrap();
} }
#[no_mangle] #[no_mangle]

2
third_party/rust/d3d12/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"9dcf05b4a1c295a2775190bcb1df8cb3ffe8f39a5e91ea551d7af84793f941e7","README.md":"4708fe5036c6bb6902f1b2d191a99117ba436ffec5678a0dfc6c99a9da5f2f1c","appveyor.yml":"14da30f5712e0eaec1acbb74649f391597095d8f8aaccf7a528a358936e8f98b","bors.toml":"31d348faf24d2bac765198bce62b4c4f1d6987e567eaed3d380a6704fec5183c","src/com.rs":"874a6ecea743a1f37f10649d71850cad807bd87cce883479cdb106d57aea863b","src/command_allocator.rs":"cda791b138019bae082fe664bd735de1e5fa6b4ee979e180e1945d50b4858ef0","src/command_list.rs":"a50a8bdebd859cfbd64c02640468665c221bb107b3ae5c1a30a1de20f4e7a299","src/debug.rs":"b26d102c033933c7935cedadfa3ea69e6b4ab95d58d5165911debec729b8bdb5","src/descriptor.rs":"93b4f24565494fb1aecf5cc8f677d3fc56bbaf742717b77d9846259fa300891e","src/device.rs":"2738fce770e3392c263f2745d1bdcb40b80b60288fb08e4904419000a85bffed","src/dxgi.rs":"93547cdf0c90dd0681b5b5dfa3ebcb6f9764728537286b546d708b4e281bad06","src/heap.rs":"ee397804e083d04486fc6e48d71acdce341ee717cc19fa5c370455d7bf7c042b","src/lib.rs":"d1421cacbdc80528eb1086a6bb1d778afd70b2746ba4ab905d9c067179601e41","src/pso.rs":"073a936f7004c813b2e19fe3c5a541d0554327c598ef6aeadb774cd3c78e9743","src/query.rs":"ea36425db9a27422c361c706f3521341fa3a6fe34ef2d211ff7cfbe792c3f93b","src/queue.rs":"d0cbecfb3e538dd37e573a76a4bd2c78cde33b17c96af5b94672f992a195ede6","src/resource.rs":"cbe66c54ba11c994f644235b725402c7180113d0ed965f1878d64f824cd437df","src/sync.rs":"a6921a1f64eb0153e52e22c3c1cc12c7381c2823ed47a0f7de5834f14f3acd2b"},"package":"bc7ed48e89905e5e146bcc1951cc3facb9e44aea9adf5dc01078cda1bd24b662"} {"files":{"CHANGELOG.md":"d83cbbe16898f7e60c52bc464361bf2a7c29d0158f2095e23b714691cdbd959e","Cargo.toml":"ec2a1bf3f22f60f112ac6da06efd2146d2fba23c80d70dc74ac9dee50247083b","README.md":"71f2c62c9f9a892b436adf130dab47348744ea05c98af95599d2e79b54fb25a5","appveyor.yml":"69e6279a533b60e4f7ba70e645a9c6b7aba1654105a1e1362e67ed14deca5368","bors.toml":"366ea95cdc64dae238edd4fe70a3d5f698b4dd22064abeb8d91550c81c8ccc67","rustfmt.toml":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","src/com.rs":"1610750dcf9bd9b4ca469d37675ddbad286880e2b187952df4af0b83641bf4c7","src/command_allocator.rs":"b430fa99c7c541f5e592d6e9b8c83265b44f3f1b70205f9d44b4519e2d161cea","src/command_list.rs":"e3e6d23943827ee7d1daf82a97cdb361a6b4f1c138bbc84e18be995d66ce778c","src/debug.rs":"6d04c96fa2073ca8e3a4c29088c4f654c9bbd4944021196a209166ecf21520f9","src/descriptor.rs":"a9dd3534d8d79f8c52d0b3c3e6e2b7e218e2a4ffc915841f8270e67a8769ef4d","src/device.rs":"b4ac053d9a85d070d049eac7f243edae7afceb5c9f6d75ae1faddc2ec2875ca9","src/dxgi.rs":"36251ec2d55009796070df53591b3129e1eccadeeb0442818bc5d81065305050","src/heap.rs":"bb4c0996c63da3dc14366aaa23068e7a3d2fb43d617f8645a5aef74767a463d6","src/lib.rs":"21b72a9ef5ee0f4ad1fb1879e6e117568804de7ed01933739274e48a1c0d324d","src/pso.rs":"1dcf102f061a3cadfc0de3fd75e2414f06c1bf9ac5727be1cbdd2204883093e4","src/query.rs":"53f64ef6f2212a1367f248191e4ab93a3facb18c37709f05f850c30bdc7be8cf","src/queue.rs":"3cd807b1df00ef9dd6ba5a28dcee883033ea174d3715b6de754c6f890159302a","src/resource.rs":"9a0f53f8f23fd671ae44370413274606ce62942bb16fc7370e6f32a2410b4255","src/sync.rs":"dcce20cfd2a408ad43ad6765a91d65dfe1998615cb56a3cfb60306094f2365a8"},"package":null}

17
third_party/rust/d3d12/CHANGELOG.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
# Change Log
## v0.3.1 (2020-07-07)
- create shader from IL
- fix default doc target
- debug impl for root descriptors
## v0.3.0 (2019-11-01)
- resource transitions
- dynamic library loading
## v0.2.2 (2019-10-04)
- add `D3DHeap`
- add root descriptor
## v0.1.0 (2018-12-26)
- basic version

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше