Bug 1726626 - Move gfx/wgpu into a 3rd party dependency r=jgilbert,bholley

This update makes wgpu a vendored dependency instead of having it in gfx/wgpu.

## Notes


It relies on https://phabricator.services.mozilla.com/D123157

It has a quirk related to OpenGL ES backend. Previousy, we manually had to disable GL backend
in order to avoid vendoring WASM dependencies in. This time, manual editing is more complicated,
so instead this change adds a few cargo patch lines to point WASM dependencies to dummy projects.

The update also totally removes SPIRV-Cross, since the latest `wgpu` doesn't depend on it any more.
The compiled binary size for Gecko should improve with this.

Differential Revision: https://phabricator.services.mozilla.com/D123153
This commit is contained in:
Dzmitry Malyshau 2021-09-03 16:52:08 +00:00
Родитель edabfc6951
Коммит 5695f517a5
664 изменённых файлов: 142901 добавлений и 186815 удалений

Просмотреть файл

@ -47,9 +47,8 @@ git = "https://github.com/mozilla-spidermonkey/jsparagus"
replace-with = "vendored-sources"
rev = "2e56bb9bae5d8211137980a717ee991cc4a5eb98"
[source."https://github.com/kvark/spirv_cross"]
branch = "wgpu5"
git = "https://github.com/kvark/spirv_cross"
[source."https://github.com/kvark/dummy-web"]
git = "https://github.com/kvark/dummy-web"
replace-with = "vendored-sources"
[source."https://github.com/kinetiknz/mio-named-pipes"]
@ -82,6 +81,16 @@ git = "https://github.com/hsivonen/chardetng"
replace-with = "vendored-sources"
rev = "302c995f91f44cf26e77dc4758ad56c3ff0153ad"
[source."https://github.com/gfx-rs/wgpu"]
git = "https://github.com/gfx-rs/wgpu"
replace-with = "vendored-sources"
rev = "d23288e"
[source."https://github.com/gfx-rs/naga"]
git = "https://github.com/gfx-rs/naga"
replace-with = "vendored-sources"
rev = "93db57c"
[source."https://github.com/bytecodealliance/wasmtime"]
git = "https://github.com/bytecodealliance/wasmtime"
replace-with = "vendored-sources"

Просмотреть файл

@ -119,7 +119,6 @@ gfx/vr/service/openvr/src/strtools_public.cpp
gfx/vr/service/openvr/src/strtools_public.h
gfx/vr/service/openvr/src/vrpathregistry_public.cpp
gfx/vr/service/openvr/src/vrpathregistry_public.h
gfx/wgpu/.*
gfx/ycbcr/.*
intl/hyphenation/hyphen/.*
intl/icu/.*

365
Cargo.lock сгенерированный
Просмотреть файл

@ -87,15 +87,21 @@ name = "arrayvec"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "arrayvec"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd"
dependencies = [
"serde",
]
[[package]]
name = "ash"
version = "0.32.1"
version = "0.33.0+1.2.186"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06063a002a77d2734631db74e8f4ce7148b77fe522e6bca46f2ae7774fd48112"
checksum = "a2142f1fa77cc4d24ffd2f24dc84f88ce5b1e588d524f10fb473a04b93aef14f"
dependencies = [
"libloading 0.7.0",
]
@ -393,7 +399,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b83b7baab1e671718d78204225800d6b170e648188ac7dc992e9d6bddf87d0c0"
dependencies = [
"arrayref",
"arrayvec",
"arrayvec 0.5.2",
"constant_time_eq",
]
@ -510,9 +516,6 @@ name = "cc"
version = "1.0.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
dependencies = [
"jobserver",
]
[[package]]
name = "cert_storage"
@ -638,21 +641,6 @@ dependencies = [
"cc",
]
[[package]]
name = "cocoa-foundation"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ade49b65d560ca58c403a479bb396592b155c0185eada742ee323d1d68d6318"
dependencies = [
"bitflags",
"block",
"core-foundation",
"core-graphics-types",
"foreign-types",
"libc",
"objc",
]
[[package]]
name = "codespan-reporting"
version = "0.11.1"
@ -1052,9 +1040,9 @@ dependencies = [
[[package]]
name = "d3d12"
version = "0.4.0"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "091ed1b25fe47c7ff129fc440c23650b6114f36aa00bc7212cc8041879294428"
checksum = "2daefd788d1e96e0a9d66dee4b828b883509bc3ea9ce30665f04c3246372690c"
dependencies = [
"bitflags",
"libloading 0.7.0",
@ -1219,15 +1207,6 @@ dependencies = [
"smallbitvec",
]
[[package]]
name = "drm-fourcc"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ebbf3a5ed4671aabffefce172ff43d69c1f27dd2c6aea28e5212a70f32ada0cf"
dependencies = [
"serde",
]
[[package]]
name = "dtoa"
version = "0.4.8"
@ -1349,16 +1328,6 @@ dependencies = [
"serde",
]
[[package]]
name = "external-memory"
version = "0.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4dfe8d292b014422776a8c516862d2bff8a81b223a4461dfdc45f3862dc9d39"
dependencies = [
"bitflags",
"drm-fourcc",
]
[[package]]
name = "fake-simd"
version = "0.1.2"
@ -1844,148 +1813,6 @@ dependencies = [
"wasi",
]
[[package]]
name = "gfx-auxil"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1694991b11d642680e82075a75c7c2bd75556b805efa7660b705689f05b1ab1c"
dependencies = [
"fxhash",
"gfx-hal",
"spirv_cross",
]
[[package]]
name = "gfx-backend-dx11"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f9e453baf3aaef2b0c354ce0b3d63d76402e406a59b64b7182d123cfa6635ae"
dependencies = [
"arrayvec",
"bitflags",
"gfx-auxil",
"gfx-hal",
"gfx-renderdoc",
"libloading 0.7.0",
"log",
"parking_lot",
"range-alloc",
"raw-window-handle",
"smallvec",
"spirv_cross",
"thunderdome",
"winapi",
"wio",
]
[[package]]
name = "gfx-backend-dx12"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21506399f64a3c4d389182a89a30073856ae33eb712315456b4fd8f39ee7682a"
dependencies = [
"arrayvec",
"bit-set",
"bitflags",
"d3d12",
"gfx-auxil",
"gfx-hal",
"gfx-renderdoc",
"log",
"parking_lot",
"range-alloc",
"raw-window-handle",
"smallvec",
"spirv_cross",
"thunderdome",
"winapi",
]
[[package]]
name = "gfx-backend-empty"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29c8f813c47791918aa00dc9c9ddf961d23fa8c2a5d869e6cb8ea84f944820f4"
dependencies = [
"gfx-hal",
"log",
"raw-window-handle",
]
[[package]]
name = "gfx-backend-metal"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0de85808e2a98994c6af925253f8a9593bc57180ef1ea137deab6d35cc949517"
dependencies = [
"arrayvec",
"bitflags",
"block",
"cocoa-foundation",
"copyless",
"core-graphics-types",
"foreign-types",
"fxhash",
"gfx-auxil",
"gfx-hal",
"log",
"metal",
"naga",
"objc",
"parking_lot",
"profiling",
"range-alloc",
"raw-window-handle",
"spirv_cross",
"storage-map",
]
[[package]]
name = "gfx-backend-vulkan"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9861ec855acbbc65c0e4f966d761224886e811dc2c6d413a4776e9293d0e5c0"
dependencies = [
"arrayvec",
"ash",
"byteorder",
"core-graphics-types",
"gfx-hal",
"gfx-renderdoc",
"inplace_it",
"log",
"naga",
"objc",
"parking_lot",
"raw-window-handle",
"smallvec",
"winapi",
]
[[package]]
name = "gfx-hal"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fbb575ea793dd0507b3082f4f2cde62dc9f3cebd98f5cd49ba2a4da97a976fd"
dependencies = [
"bitflags",
"external-memory",
"naga",
"raw-window-handle",
"thiserror",
]
[[package]]
name = "gfx-renderdoc"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8027995e247e2426d3a00d13f5191dd56c314bff02dc4b54cbf727f1ba9c40a"
dependencies = [
"libloading 0.7.0",
"log",
"renderdoc-sys",
]
[[package]]
name = "gkrust"
version = "0.1.0"
@ -2172,6 +1999,18 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
[[package]]
name = "glow"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f04649123493bc2483cbef4daddb45d40bbdae5adb221a63a23efdb0cc99520"
dependencies = [
"js-sys",
"slotmap",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "glsl"
version = "4.0.3"
@ -2228,9 +2067,9 @@ dependencies = [
[[package]]
name = "gpu-alloc"
version = "0.4.7"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cbc1b6ca374e81862526786d9cb42357ce03706ed1b8761730caafd02ab91f3a"
checksum = "c481459c44304a1dfed23bd650bb3912e12c9f77d7871f86d7ed7c9730a52e79"
dependencies = [
"bitflags",
"gpu-alloc-types",
@ -2247,9 +2086,9 @@ dependencies = [
[[package]]
name = "gpu-descriptor"
version = "0.1.1"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a70f1e87a3840ed6a3e99e02c2b861e4dbdf26f0d07e38f42ea5aff46cfce2"
checksum = "d7a237f0419ab10d17006d55c62ac4f689a6bf52c75d3f38b8361d249e8d4b0b"
dependencies = [
"bitflags",
"gpu-descriptor-types",
@ -2569,6 +2408,11 @@ dependencies = [
"libc",
]
[[package]]
name = "js-sys"
version = "0.3.100"
source = "git+https://github.com/kvark/dummy-web#5731e569d865a1ebaf116f48dad781f355a99243"
[[package]]
name = "jsparagus"
version = "0.1.0"
@ -2625,7 +2469,7 @@ name = "jsparagus-parser"
version = "0.1.0"
source = "git+https://github.com/mozilla-spidermonkey/jsparagus?rev=2e56bb9bae5d8211137980a717ee991cc4a5eb98#2e56bb9bae5d8211137980a717ee991cc4a5eb98"
dependencies = [
"arrayvec",
"arrayvec 0.5.2",
"bumpalo",
"jsparagus-ast",
"jsparagus-generated-parser",
@ -2672,6 +2516,16 @@ dependencies = [
"smoosh",
]
[[package]]
name = "khronos-egl"
version = "4.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8c2352bd1d0bceb871cb9d40f24360c8133c11d7486b68b5381c1dd1a32015e3"
dependencies = [
"libc",
"libloading 0.7.0",
]
[[package]]
name = "khronos_api"
version = "3.1.0"
@ -3035,9 +2889,9 @@ dependencies = [
[[package]]
name = "metal"
version = "0.23.0"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79d7d769f1c104b8388294d6594d491d2e21240636f5f94d37f8a0f3d7904450"
checksum = "e0514f491f4cc03632ab399ee01e2c1c1b12d3e1cf2d667c1ff5f87d6dcd2084"
dependencies = [
"bitflags",
"block",
@ -3191,7 +3045,7 @@ dependencies = [
name = "mozglue-static"
version = "0.1.0"
dependencies = [
"arrayvec",
"arrayvec 0.5.2",
"cc",
"rustc_version",
]
@ -3309,9 +3163,8 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef670817eef03d356d5a509ea275e7dd3a78ea9e24261ea3cb2dfed1abb08f64"
version = "0.6.0"
source = "git+https://github.com/gfx-rs/naga?rev=93db57c#93db57c12b4a5eff48bdd00c494efa5ec89567ad"
dependencies = [
"bit-set",
"bitflags",
@ -3320,8 +3173,8 @@ dependencies = [
"log",
"num-traits",
"petgraph",
"rose_tree",
"spirv_headers",
"serde",
"spirv",
"thiserror",
]
@ -4281,15 +4134,6 @@ dependencies = [
"serde",
]
[[package]]
name = "rose_tree"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "284de9dae38774e2813aaabd7e947b4a6fe9b8c58c2309f754a487cdd50de1c2"
dependencies = [
"petgraph",
]
[[package]]
name = "rsclientcerts"
version = "0.1.0"
@ -4365,7 +4209,7 @@ version = "1.14.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9787e62372fc0c5a0f3af64c392652db72d3ec1cc0cff1becc175d2c11e6fbcc"
dependencies = [
"arrayvec",
"arrayvec 0.5.2",
"num-traits",
"serde",
]
@ -4649,6 +4493,11 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527"
[[package]]
name = "slotmap"
version = "1.100.0"
source = "git+https://github.com/kvark/dummy-web#5731e569d865a1ebaf116f48dad781f355a99243"
[[package]]
name = "smallbitvec"
version = "2.5.0"
@ -4686,26 +4535,10 @@ dependencies = [
]
[[package]]
name = "spirv-cross-internal"
version = "0.1.0"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu5#a5a90d38ab1f82ad8327b48e161dbfe556ef6c6e"
dependencies = [
"cc",
]
[[package]]
name = "spirv_cross"
version = "0.23.0"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu5#a5a90d38ab1f82ad8327b48e161dbfe556ef6c6e"
dependencies = [
"spirv-cross-internal",
]
[[package]]
name = "spirv_headers"
version = "1.5.0"
name = "spirv"
version = "0.2.0+1.5.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f5b132530b1ac069df335577e3581765995cba5a13995cdbbdbc8fb057c532c"
checksum = "246bfa38fe3db3f1dfc8ca5a2cdeb7348c78be2112740cc0ec8ef18b6d94f830"
dependencies = [
"bitflags",
"num-traits",
@ -4750,15 +4583,6 @@ dependencies = [
"xpcom",
]
[[package]]
name = "storage-map"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "418bb14643aa55a7841d5303f72cf512cfb323b8cc221d51580500a1ca75206c"
dependencies = [
"lock_api",
]
[[package]]
name = "storage_variant"
version = "0.1.0"
@ -4780,7 +4604,7 @@ name = "style"
version = "0.0.1"
dependencies = [
"app_units",
"arrayvec",
"arrayvec 0.5.2",
"atomic_refcell",
"bindgen",
"bitflags",
@ -5040,12 +4864,6 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b5b2bd897775cb425729882f0710639eb69f3d784db834ee85941ae9c35bb83"
[[package]]
name = "thunderdome"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "87b4947742c93ece24a0032141d9caa3d853752e694a57e35029dd2bd08673e0"
[[package]]
name = "time"
version = "0.1.43"
@ -5338,7 +5156,7 @@ version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6ee6f45294ecb6e220e76b8afddb50a8b69d433f31ad9eb2d16fb44029ef5db"
dependencies = [
"arrayvec",
"arrayvec 0.5.2",
]
[[package]]
@ -5540,6 +5358,11 @@ version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "wasm-bindgen"
version = "0.2.100"
source = "git+https://github.com/kvark/dummy-web#5731e569d865a1ebaf116f48dad781f355a99243"
[[package]]
name = "wasmparser"
version = "0.78.2"
@ -5564,6 +5387,11 @@ dependencies = [
"wast",
]
[[package]]
name = "web-sys"
version = "0.3.100"
source = "git+https://github.com/kvark/dummy-web#5731e569d865a1ebaf116f48dad781f355a99243"
[[package]]
name = "webdriver"
version = "0.43.1"
@ -5738,21 +5566,14 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.9.2"
version = "0.10.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=d23288e#d23288e455eec657c5632d7c24410d0d2ef2057e"
dependencies = [
"arrayvec",
"arrayvec 0.7.1",
"bitflags",
"cfg_aliases",
"copyless",
"fxhash",
"gfx-backend-dx11",
"gfx-backend-dx12",
"gfx-backend-empty",
"gfx-backend-metal",
"gfx-backend-vulkan",
"gfx-hal",
"gpu-alloc",
"gpu-descriptor",
"log",
"naga",
"parking_lot",
@ -5761,12 +5582,47 @@ dependencies = [
"serde",
"smallvec",
"thiserror",
"wgpu-hal",
"wgpu-types",
]
[[package]]
name = "wgpu-hal"
version = "0.10.1"
source = "git+https://github.com/gfx-rs/wgpu?rev=d23288e#d23288e455eec657c5632d7c24410d0d2ef2057e"
dependencies = [
"arrayvec 0.7.1",
"ash",
"bit-set",
"bitflags",
"block",
"core-graphics-types",
"d3d12",
"foreign-types",
"fxhash",
"glow",
"gpu-alloc",
"gpu-descriptor",
"inplace_it",
"khronos-egl",
"libloading 0.7.0",
"log",
"metal",
"naga",
"objc",
"parking_lot",
"range-alloc",
"raw-window-handle",
"renderdoc-sys",
"thiserror",
"wgpu-types",
"winapi",
]
[[package]]
name = "wgpu-types"
version = "0.9.0"
version = "0.10.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=d23288e#d23288e455eec657c5632d7c24410d0d2ef2057e"
dependencies = [
"bitflags",
"serde",
@ -5781,6 +5637,7 @@ dependencies = [
"parking_lot",
"serde",
"wgpu-core",
"wgpu-hal",
"wgpu-types",
]

Просмотреть файл

@ -30,7 +30,6 @@ exclude = [
# Excluded because these crates have their own Cargo workspaces so they can't
# be included in the top-level one.
"gfx/wgpu",
"gfx/wr",
# Excluded because they are used only as dependencies, not top-level targets,
@ -44,6 +43,7 @@ exclude = [
]
# Use the new dependency resolver to reduce some of the platform-specific dependencies.
# This is required for 'third_party/rust/wgpu-hal'
resolver = "2"
# Explicitly specify what our profiles use. The opt-level setting here is
@ -82,8 +82,13 @@ chardetng = { git = "https://github.com/hsivonen/chardetng", rev="302c995f91f44c
chardetng_c = { git = "https://github.com/hsivonen/chardetng_c", rev="ed8a4c6f900a90d4dbc1d64b856e61490a1c3570" }
libudev-sys = { path = "dom/webauthn/libudev-sys" }
packed_simd = { git = "https://github.com/hsivonen/packed_simd", rev="6a16f954950401b92b4e220fbf2dfaf6f00e1fb2" }
spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu5" }
minidump_writer_linux = { git = "https://github.com/msirringhaus/minidump_writer_linux.git", rev = "029ac0d54b237f27dc7d8d4e51bc0fb076e5e852" }
# The following overrides point to dummy projects, as a temporary measure until this is resolved:
# https://github.com/rust-lang/cargo/issues/6179
js-sys = { git = "https://github.com/kvark/dummy-web" }
slotmap = { git = "https://github.com/kvark/dummy-web" }
wasm-bindgen = { git = "https://github.com/kvark/dummy-web" }
web-sys = { git = "https://github.com/kvark/dummy-web" }
[patch.crates-io.cranelift-codegen]
git = "https://github.com/bytecodealliance/wasmtime"

Просмотреть файл

@ -115,7 +115,6 @@ gfx/tests/
gfx/thebes/
gfx/vr/
gfx/webrender_bindings/
gfx/wgpu/
gfx/wgpu_bindings/
gfx/wr/
gfx/ycbcr/

Просмотреть файл

@ -2,7 +2,6 @@
%include build/sparse-profiles/taskgraph
[include]
path:gfx/wgpu/
path:gfx/wr/
path:taskcluster/scripts/misc/
path:tools/github-sync/

Просмотреть файл

@ -67,9 +67,6 @@ path:.cron.yml
path:gfx/wr/Cargo.lock
path:gfx/wr/ci-scripts/
# for the wgpu-deps toolchain task
path:gfx/wgpu/Cargo.lock
# for the mar-tools toolchain task
path:mfbt/
path:modules/libmar/

Просмотреть файл

@ -1,4 +0,0 @@
%include build/sparse-profiles/mach
[include]
path:gfx/wgpu/

Просмотреть файл

@ -207,7 +207,7 @@ bool GetCanvasContextType(const nsAString& str,
}
if (StaticPrefs::dom_webgpu_enabled()) {
if (str.EqualsLiteral("gpupresent")) {
if (str.EqualsLiteral("webgpu")) {
*out_type = dom::CanvasContextType::WebGPU;
return true;
}

Просмотреть файл

@ -41,7 +41,7 @@ ffi::WGPUStoreOp ConvertStoreOp(const dom::GPUStoreOp& aOp) {
case dom::GPUStoreOp::Store:
return ffi::WGPUStoreOp_Store;
case dom::GPUStoreOp::Discard:
return ffi::WGPUStoreOp_Clear;
return ffi::WGPUStoreOp_Discard;
default:
MOZ_CRASH("Unexpected load op");
}

Просмотреть файл

@ -735,7 +735,7 @@ RawId WebGPUChild::DeviceCreateRenderPipeline(
if (!vertex_desc.IsNull()) {
const auto& vd = vertex_desc.Value();
vb_desc.array_stride = vd.mArrayStride;
vb_desc.step_mode = ffi::WGPUInputStepMode(vd.mStepMode);
vb_desc.step_mode = ffi::WGPUVertexStepMode(vd.mStepMode);
// Note: we are setting the length but not the pointer
vb_desc.attributes_length = vd.mAttributes.Length();
for (const auto& vat : vd.mAttributes) {

Просмотреть файл

@ -81,10 +81,6 @@ static void FreeDevice(RawId id, void* param) {
NS_ERROR("Unable FreeDevice");
}
}
static void FreeSwapChain(RawId id, void* param) {
Unused << id;
Unused << param;
}
static void FreeShaderModule(RawId id, void* param) {
ipc::ByteBuf byteBuf;
wgpu_server_shader_module_free(id, ToFFI(&byteBuf));
@ -178,7 +174,6 @@ static ffi::WGPUIdentityRecyclerFactory MakeFactory(void* param) {
ffi::WGPUIdentityRecyclerFactory factory = {param};
factory.free_adapter = FreeAdapter;
factory.free_device = FreeDevice;
factory.free_swap_chain = FreeSwapChain;
factory.free_pipeline_layout = FreePipelineLayout;
factory.free_shader_module = FreeShaderModule;
factory.free_bind_group_layout = FreeBindGroupLayout;
@ -606,8 +601,8 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
bufferId = data->mUnassignedBufferIds.back();
data->mUnassignedBufferIds.pop_back();
ffi::WGPUBufferUsage usage =
WGPUBufferUsage_COPY_DST | WGPUBufferUsage_MAP_READ;
ffi::WGPUBufferUsages usage =
WGPUBufferUsages_COPY_DST | WGPUBufferUsages_MAP_READ;
ffi::WGPUBufferDescriptor desc = {};
desc.size = bufferSize;
desc.usage = usage;

2
gfx/wgpu/.gitattributes поставляемый
Просмотреть файл

@ -1,2 +0,0 @@
*.mtl binary
*.obj binary

28
gfx/wgpu/.github/ISSUE_TEMPLATE/bug_report.md поставляемый
Просмотреть файл

@ -1,28 +0,0 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: bug
assignees: ''
---
<!-- Thank you for filing this! Please read the [debugging tips](https://github.com/gfx-rs/wgpu/wiki/Debugging-wgpu-Applications).
That may let you investigate on your own, or provide additional information that helps us to assist.-->
**Description**
A clear and concise description of what the bug is.
**Repro steps**
Ideally, a runnable example we can check out.
**Expected vs observed behavior**
Clearly describe what you get, and how it goes across your expectations.
**Extra materials**
Screenshots to help explain your problem.
Validation logs can be attached in case there are warnings and errors.
Zip-compressed API traces and GPU captures can also land here.
**Platform**
Information about your OS, version of `wgpu`, your tech stack, etc.

8
gfx/wgpu/.github/ISSUE_TEMPLATE/config.yml поставляемый
Просмотреть файл

@ -1,8 +0,0 @@
blank_issues_enabled: false
contact_links:
- name: Issues with shaders
url: https://github.com/gfx-rs/naga/issues/new/choose
about: Issues with or enhancements for the shader translation.
- name: Question about wgpu
url: https://github.com/gfx-rs/wgpu-rs/discussions/new
about: Any questions about how to use wgpu should go here.

Просмотреть файл

@ -1,20 +0,0 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.

7
gfx/wgpu/.github/ISSUE_TEMPLATE/other.md поставляемый
Просмотреть файл

@ -1,7 +0,0 @@
---
name: Other
about: Strange things you want to tell us
title: ''
labels: question
assignees: ''
---

8
gfx/wgpu/.github/pull_request_template.md поставляемый
Просмотреть файл

@ -1,8 +0,0 @@
**Connections**
_Link to the issues addressed by this PR, or dependent PRs in other repositories_
**Description**
_Describe what problem this is solving, and how it's solved._
**Testing**
_Explain how this change is tested._

157
gfx/wgpu/.github/workflows/ci.yml поставляемый
Просмотреть файл

@ -1,157 +0,0 @@
name: CI
on:
push:
branches: [master, staging]
tags: [v0.*]
pull_request:
branches-ignore: [staging.tmp]
jobs:
ios_build:
name: iOS Stable
runs-on: macos-10.15
env:
TARGET: aarch64-apple-ios
steps:
- uses: actions/checkout@v2
- run: rustup component add clippy
- run: rustup target add ${{ env.TARGET }}
- run: cargo clippy --target ${{ env.TARGET }}
android_build:
name: Android Stable
runs-on: ubuntu-18.04
env:
TARGET: aarch64-linux-android
PKG_CONFIG_ALLOW_CROSS: 1
steps:
- uses: actions/checkout@v2
- run: echo "$ANDROID_HOME/ndk-bundle/toolchains/llvm/prebuilt/linux-x86_64/bin" >> $GITHUB_PATH
- run: rustup component add clippy
- run: rustup target add ${{ env.TARGET }}
- run: cargo clippy --target ${{ env.TARGET }}
- name: Additional core features
run: cargo check --manifest-path wgpu-core/Cargo.toml --features trace --target ${{ env.TARGET }}
wasm:
name: Web Assembly
runs-on: ubuntu-18.04
env:
RUSTFLAGS: --cfg=web_sys_unstable_apis
steps:
- uses: actions/checkout@v2
- run: rustup target add wasm32-unknown-unknown
- name: Check WebGPU
run: cargo check --all-targets --target=wasm32-unknown-unknown
- name: Check WebGL
run: cargo check --all-targets --target=wasm32-unknown-unknown --features webgl
build:
name: ${{ matrix.name }}
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [macos-10.15, ubuntu-18.04, windows-2019]
channel: [stable, nightly]
include:
- name: MacOS Stable
channel: stable
os: macos-10.15
prepare_command:
additional_core_features: trace
additional_player_features: winit
- name: MacOS Nightly
os: macos-10.15
channel: nightly
prepare_command:
additional_core_features:
additional_player_features:
- name: Ubuntu Stable
os: ubuntu-18.04
channel: stable
prepare_command:
additional_core_features: trace,replay
additional_player_features:
- name: Ubuntu Nightly
os: ubuntu-18.04
channel: nightly
prepare_command: |
echo "Installing Vulkan"
sudo apt-get update -y -qq
sudo add-apt-repository ppa:kisak/kisak-mesa -y
sudo apt-get update
sudo apt install -y libxcb-xfixes0-dev mesa-vulkan-drivers
additional_core_features: serial-pass
additional_player_features: winit
- name: Windows Stable
os: windows-2019
channel: stable
prepare_command: rustup default stable-msvc
additional_core_features: trace,serial-pass
additional_player_features:
- name: Windows Nightly
os: windows-2019
channel: nightly
prepare_command: rustup default nightly-msvc
additional_core_features:
additional_player_features:
steps:
- uses: actions/checkout@v2
- if: matrix.channel == 'nightly'
name: Install latest nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
- if: matrix.channel == 'stable'
run: rustup component add clippy
# prepare
- if: matrix.prepare_command != ''
run: ${{ matrix.prepare_command }}
# build with no features first
- if: matrix.additional_core_features == ''
run: cargo check --manifest-path wgpu-core/Cargo.toml --no-default-features
- if: matrix.additional_core_features != ''
run: cargo check --manifest-path wgpu-core/Cargo.toml --features ${{ matrix.additional_core_features }}
- if: matrix.additional_player_features != ''
run: cargo check --manifest-path player/Cargo.toml --features ${{ matrix.additional_player_features }}
- if: matrix.channel == 'stable'
run: cargo clippy
- if: matrix.channel == 'nightly'
run: cargo test -- --nocapture
docs:
runs-on: [ubuntu-18.04]
steps:
- uses: actions/checkout@v2
- name: Install latest nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
continue-on-error: true
- name: cargo doc
run: cargo --version; cargo doc --no-deps
continue-on-error: true
lint:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
- run: rustup component add clippy
- uses: actions-rs/cargo@v1
with:
command: clippy
args: -- -D warnings
- uses: actions-rs/cargo@v1
with:
command: fmt
args: -- --check

45
gfx/wgpu/.github/workflows/docs.yml поставляемый
Просмотреть файл

@ -1,45 +0,0 @@
name: Documentation
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Install latest nightly
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
override: true
continue-on-error: true
- name: Add EGL for OpenGL
run: |
sudo apt-get update -y -qq
sudo apt-get install -y -qq libegl1-mesa-dev
- name: Build the docs (nightly)
run: |
cargo +nightly doc --lib --all-features
- name: Build the docs (stable)
run: cargo +stable doc --lib --all-features
if: ${{ failure() }}
- name: Deploy the docs
uses: JamesIves/github-pages-deploy-action@releases/v3
with:
ACCESS_TOKEN: ${{ secrets.WEB_DEPLOY }}
FOLDER: target/doc
REPOSITORY_NAME: gfx-rs/wgpu-rs.github.io
BRANCH: master
TARGET_FOLDER: doc

30
gfx/wgpu/.github/workflows/lazy.yaml поставляемый
Просмотреть файл

@ -1,30 +0,0 @@
# Lazy jobs running on master post merges.
name: lazy
on:
push:
branches: [master]
jobs:
coverage:
name: Coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
- run: sudo apt-get update -y -qq
- run: sudo add-apt-repository ppa:kisak/kisak-mesa -y
- run: sudo apt-get update
- run: sudo apt install -y libxcb-xfixes0-dev mesa-vulkan-drivers
- name: Generate report
uses: actions-rs/tarpaulin@v0.1
- name: Upload to codecov.io
uses: codecov/codecov-action@v1
with:
token: ${{ secrets.CODECOV_TOKEN }}
- name: Archive code coverage results
uses: actions/upload-artifact@v1
with:
name: code-coverage-report
path: cobertura.xml

47
gfx/wgpu/.github/workflows/publish.yml поставляемый
Просмотреть файл

@ -1,47 +0,0 @@
name: Publish
on:
push:
branches:
- gecko
env:
RUSTFLAGS: --cfg=web_sys_unstable_apis
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout the code
uses: actions/checkout@v2
with:
persist-credentials: false
- name: Install Rust WASM toolchain
uses: actions-rs/toolchain@v1
with:
toolchain: stable
target: wasm32-unknown-unknown
- name: Build the examples
run: cargo build --release --target wasm32-unknown-unknown --examples
- name: Install wasm-bindgen-cli
run: cargo install wasm-bindgen-cli
- name: Generate JS bindings for the examples
run: |
for i in target/wasm32-unknown-unknown/release/examples/*.wasm;
do
wasm-bindgen --no-typescript --out-dir target/generated --web "$i";
done
- name: Deploy the examples
uses: JamesIves/github-pages-deploy-action@releases/v3
with:
ACCESS_TOKEN: ${{ secrets.WEB_DEPLOY }}
FOLDER: target/generated
REPOSITORY_NAME: gfx-rs/wgpu-rs.github.io
BRANCH: master
TARGET_FOLDER: examples/wasm

17
gfx/wgpu/.gitignore поставляемый
Просмотреть файл

@ -1,17 +0,0 @@
# Generated by Cargo
# will have compiled files and executables
/target/
# These are backup files generated by rustfmt
**/*.rs.bk
# Other
.fuse_hidden*
.DS_Store
# IDE/Editor configuration files
.vscode
.idea
# Output from capture example
wgpu/red.png

Просмотреть файл

@ -1,293 +0,0 @@
# Change Log
## wgpu-core-0.9.2
- fix `Features::TEXTURE_SPECIFIC_FORMAT_FEATURES` not being supported for rendertargets
## wgpu-core-0.9.1 (2021-07-13)
- fix buffer inits delayed by a frame
- fix query resolves to initialize buffers
- fix pipeline statistics stride
- fix the check for maximum query count
## v0.9 (2021-06-18)
- Updated:
- naga to `v0.5`.
- Added:
- `Features::VERTEX_WRITABLE_STORAGE`.
- `Features::CLEAR_COMMANDS` which allows you to use `cmd_buf.clear_texture` and `cmd_buf.clear_buffer`.
- Changed:
- Updated default storage buffer/image limit to `8` from `4`.
- Fixed:
- `Buffer::get_mapped_range` can now have a range of zero.
- Fixed output spirv requiring the "kernal" capability.
- Fixed segfault due to improper drop order.
- Fixed incorrect dynamic stencil reference for Replace ops.
- Fixed tracking of temporary resources.
- Stopped unconditionally adding cubemap flags when the backend doesn't support cubemaps.
- Validation:
- Ensure that if resources are viewed from the vertex stage, they are read only unless `Features::VERTEX_WRITABLE_STORAGE` is true.
- Ensure storage class (i.e. storage vs uniform) is consistent between the shader and the pipeline layout.
- Error when a color texture is used as a depth/stencil texture.
- Check that pipeline output formats are logical
- Added shader label to log messages if validation fails.
- Tracing:
- Make renderpasses show up in the trace before they are run.
- Docs:
- Fix typo in `PowerPreference::LowPower` description.
- Player:
- Automatically start and stop RenderDoc captures.
- Examples:
- Handle winit's unconditional exception.
- Internal:
- Merged wgpu-rs and wgpu back into a single repository.
- The tracker was split into two different stateful/stateless trackers to reduce overhead.
- Added code coverage testing
- CI can now test on lavapipe
- Add missing extern "C" in wgpu-core on `wgpu_render_pass_execute_bundles`
- Fix incorrect function name `wgpu_render_pass_bundle_indexed_indirect` to `wgpu_render_bundle_draw_indexed_indirect`.
## wgpu-types-0.8.1 (2021-06-08)
- fix dynamic stencil reference for Replace ops
## v0.8.1 (2021-05-06)
- fix SPIR-V generation from WGSL, which was broken due to "Kernel" capability
- validate buffer storage classes
## v0.8 (2021-04-29)
- Naga is used by default to translate shaders, SPIRV-Cross is optional behind `cross` feature
- Features:
- buffers are zero-initialized
- downlevel limits for DX11/OpenGL support
- conservative rasterization (native-only)
- buffer resource indexing (native-only)
- API adjustments to the spec:
- Renamed `RenderPassColorAttachmentDescriptor` to `RenderPassColorAttachment`:
- Renamed the `attachment` member to `view`
- Renamed `RenderPassDepthStencilAttachmentDescriptor` to `RenderPassDepthStencilAttachment`:
- Renamed the `attachment` member to `view`
- Renamed `VertexFormat` values
- Examples: `Float3` -> `Float32x3`, `Ushort2` -> `Uint16x2`
- Renamed the `depth` value of `Extent3d` to `depth_or_array_layers`
- Updated blending options in `ColorTargetState`:
- Renamed `BlendState` to `BlendComponent`
- Added `BlendState` struct to hold color and alpha blend state
- Moved `color_blend` and `alpha_blend` members into `blend` member
- Moved `clamp_depth` from `RastizerState` to `PrimitiveState`
- Updated `PrimitiveState`:
- Added `conservative` member for enabling conservative rasterization
- Updated copy view structs:
- Renamed `TextureCopyView` to `ImageCopyTexture`
- Renamed `TextureDataLayout` to `ImageDataLayout`
- Changed `bytes_per_row` and `rows_per_image` members of `ImageDataLayout` from `u32` to `Option<NonZeroU32>` <!-- wgpu-rs only -->
- Changed `BindingResource::Binding` from containing fields directly to containing a `BufferBinding`
- Added `BindingResource::BufferArray`
- Infrastructure:
- switch from `tracing` to `profiling`
- more concrete and detailed errors
- API traces include the command that crashed/panicked
- Vulkan Portability support is removed from Apple platforms
- Validation:
- texture bindings
- filtering of textures by samplers
- interpolation qualifiers
- allow vertex components to be underspecified
## wgpu-core-0.7.1 (2021-02-25)
- expose `wgc::device::queue` sub-module in public
- fix the indexed buffer check
- fix command allocator race condition
## v0.7 (2021-01-31)
- Major API changes:
- `RenderPipelineDescriptor`
- `BindingType`
- new `ShaderModuleDescriptor`
- new `RenderEncoder`
- Features:
- (beta) WGSL support, including the ability to bypass SPIR-V entirely
- (beta) implicit bind group layout support
- better error messages
- timestamp and pipeline statistics queries
- ETC2 and ASTC compressed textures
- (beta) targeting WASM with WebGL backend
- reduced dependencies
- Native-only:
- clamp-to-border addressing
- polygon fill modes
- query a format for extra capabilities
- `f64` support in shaders
- Validation:
- shader interface
- render pipeline descriptor
- vertex buffers
### wgpu-0.6.2 (2020-11-24)
- don't panic in the staging belt if the channel is dropped
## v0.6 (2020-08-17)
- Crates:
- C API is moved to [another repository](https://github.com/gfx-rs/wgpu-native)
- `player`: standalone API replayer and tester
- Features:
- Proper error handling with all functions returning `Result`
- Graceful handling of "error" objects
- API tracing [infrastructure](http://kvark.github.io/wgpu/debug/test/ron/2020/07/18/wgpu-api-tracing.html)
- uploading data with `write_buffer`/`write_texture` queue operations
- reusable render bundles
- read-only depth/stencil attachments
- bind group layout deduplication
- Cows, cows everywhere
- Web+Native features:
- Depth clamping (feature)
- BC texture compression
- Native-only features:
- mappable primary buffers
- texture array bindings
- push constants
- multi-draw indirect
- Validation:
- all transfer operations
- all resource creation
- bind group matching to the layout
- experimental shader interface matching with Naga
## wgpu-core-0.5.6 (2020-07-09)
- add debug markers support
## wgpu-core-0.5.5 (2020-05-20)
- fix destruction of adapters, swap chains, and bind group layouts
- fix command pool leak with temporary threads
- improve assertion messages
- implement `From<TextureFormat>` for `TextureComponentType`
## wgpu-core-0.5.4 (2020-04-24)
- fix memory management of staging buffers
## wgpu-core-0.5.3 (2020-04-18)
- fix reading access to storage textures
- another fix to layout transitions for swapchain images
## wgpu-core-0.5.2 (2020-04-15)
- fix read-only storage flags
- fix pipeline layout life time
- improve various assert messages
## wgpu-core-0.5.1 (2020-04-10)
- fix tracking of swapchain images that are used multiple times in a command buffer
- fix tracking of initial usage of a resource across a command buffer
## v0.5 (2020-04-06)
- Crates:
- `wgpu-types`: common types between native and web targets
- `wgpu-core`: internal API for the native and remote wrappers
- Features:
- based on gfx-hal-0.5
- moved from Rendy to the new `gfx-memory` and `gfx-descriptor` crates
- passes are now recorded on the client side. The user is also responsible to keep all resources referenced in the pass up until it ends recording.
- coordinate system is changed to have Y up in the rendering space
- revised GPU lifetime tracking of all resources
- revised usage tracking logic
- all IDs are now non-zero
- Mailbox present mode
- Validation:
- active pipeline
- Fixes:
- lots of small API changes to closely match upstream WebGPU
- true read-only storage bindings
- unmapping dropped buffers
- better error messages on misused swapchain frames
## wgpu-core-0.4.3 (2020-01-20)
- improved swap chain error handling
## wgpu-core-0.4.2 (2019-12-15)
- fixed render pass transitions
## wgpu-core-0.4.1 (2019-11-28)
- fixed depth/stencil transitions
- fixed dynamic offset iteration
## v0.4 (2019-11-03)
- Platforms: removed OpenGL/WebGL support temporarily
- Features:
- based on gfx-hal-0.4 with the new swapchain model
- exposing adapters from all available backends on a system
- tracking of samplers
- cube map support with an example
- Validation:
- buffer and texture usage
## wgpu-core-0.3.3 (2019-08-22)
- fixed instance creation on Windows
## wgpu-core-0.3.1 (2019-08-21)
- fixed pipeline barriers that aren't transitions
## v0.3 (2019-08-21)
- Platforms: experimental OpenGL/WebGL
- Crates:
- Rust API is moved out to [another repository](https://github.com/gfx-rs/wgpu-rs)
- Features:
- based on gfx-hal-0.3 with help of `rendy-memory` and `rendy-descriptor`
- type-system-assisted deadlock prevention (for locking internal structures)
- texture sub-resource tracking
- `raw-window-handle` integration instead of `winit`
- multisampling with an example
- indirect draws and dispatches
- stencil masks and reference values
- native "compute" example
- everything implements `Debug`
- Validation
- vertex/index/instance ranges at draw calls
- bing groups vs their expected layouts
- bind group buffer ranges
- required stencil reference, blend color
## wgpu-core-0.2.6 (2019-04-04)
- fixed frame acquisition GPU waits
## wgpu-core-0.2.5 (2019-03-31)
- fixed submission tracking
- added support for blend colors
- fixed bind group compatibility at the gfx-hal level
- validating the bind groups and blend colors
## wgpu-core-0.2.3 (2019-03-20)
- fixed vertex format mapping
- fixed building with "empty" backend on Windows
- bumped the default descriptor pool size
- fixed host mapping alignments
- validating the uniform buffer offset
## v0.2 (2019-03-06)
- Platforms: iOS/Metal, D3D11
- Crates:
- `wgpu-remote`: remoting layer for the cross-process boundary
- `gfx-examples`: selected gfx pre-ll examples ported over
- Features:
- native example for compute
- "gfx-cube" and "gfx-shadow" examples
- copies between buffers and textures
- separate object identity for the remote client
- texture view tracking
- native swapchain resize support
- buffer mapping
- object index epochs
- comprehensive list of vertex and texture formats
- validation of pipeline compatibility with the pass
- Fixes
- fixed resource destruction
## v0.1 (2019-01-24)
- Platforms: Linux/Vulkan, Windows/Vulkan, D3D12, macOS/Metal
- Crates:
- `wgpu-native`: C API implementation of WebGPU, based on gfx-hal
- `wgpu-bindings`: auto-generated C headers
- `wgpu`: idiomatic Rust wrapper
- `examples`: native C examples
- Features:
- native examples for triangle rendering
- basic native swapchain integration
- concept of the storage hub
- basic recording of passes and command buffers
- submission-based lifetime tracking and command buffer recycling
- automatic resource transitions

1746
gfx/wgpu/Cargo.lock сгенерированный

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,32 +0,0 @@
[workspace]
members = [
"dummy",
"player",
#"wgpu",
"wgpu-core",
"wgpu-types",
]
default-members = ["player"]
[patch."https://github.com/gfx-rs/naga"]
#naga = { path = "../naga" }
[patch."https://github.com/zakarumych/gpu-descriptor"]
#gpu-descriptor = { path = "../gpu-descriptor/gpu-descriptor" }
[patch."https://github.com/zakarumych/gpu-alloc"]
#gpu-alloc = { path = "../gpu-alloc/gpu-alloc" }
[patch."https://github.com/gfx-rs/gfx"]
#gfx-hal = { path = "../gfx/src/hal" }
#gfx-backend-empty = { path = "../gfx/src/backend/empty" }
#gfx-backend-vulkan = { path = "../gfx/src/backend/vulkan" }
#gfx-backend-gl = { path = "../gfx/src/backend/gl" }
#gfx-backend-dx12 = { path = "../gfx/src/backend/dx12" }
#gfx-backend-dx11 = { path = "../gfx/src/backend/dx11" }
#gfx-backend-metal = { path = "../gfx/src/backend/metal" }
[patch.crates-io]
#web-sys = { path = "../wasm-bindgen/crates/web-sys" }
#js-sys = { path = "../wasm-bindgen/crates/js-sys" }
#wasm-bindgen = { path = "../wasm-bindgen" }

Просмотреть файл

@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

Просмотреть файл

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2021 The gfx-rs developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

Просмотреть файл

@ -1,33 +0,0 @@
<img align="right" width="25%" src="logo.png">
# wgpu
[![Matrix](https://img.shields.io/badge/Dev_Matrix-%23wgpu%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu:matrix.org) [![Matrix](https://img.shields.io/badge/User_Matrix-%23wgpu--users%3Amatrix.org-blueviolet.svg)](https://matrix.to/#/#wgpu-users:matrix.org)
[![Build Status](https://github.com/gfx-rs/wgpu/workflows/CI/badge.svg)](https://github.com/gfx-rs/wgpu/actions)
[![codecov.io](https://codecov.io/gh/gfx-rs/wgpu/branch/master/graph/badge.svg?token=84qJTesmeS)](https://codecov.io/gh/gfx-rs/wgpu)
This is an implementation of [WebGPU](https://www.w3.org/community/gpu/) API in Rust, targeting both native and the Web.
It's written in Rust and is based on [gfx-hal](https://github.com/gfx-rs/gfx) with help of [gpu-alloc](https://github.com/zakarumych/gpu-alloc) and [gpu-descriptor](https://github.com/zakarumych/gpu-descriptor). See the upstream [WebGPU specification](https://gpuweb.github.io/gpuweb/) (work in progress).
The repository hosts the following parts:
- [![Crates.io](https://img.shields.io/crates/v/wgpu.svg?label=wgpu)](https://crates.io/crates/wgpu) [![docs.rs](https://docs.rs/wgpu/badge.svg)](https://docs.rs/wgpu/) - public Rust API for users
- [![Crates.io](https://img.shields.io/crates/v/wgpu-core.svg?label=wgpu-core)](https://crates.io/crates/wgpu-core) [![docs.rs](https://docs.rs/wgpu-core/badge.svg)](https://docs.rs/wgpu-core/) - internal Rust API for WebGPU implementations to use
- [![Crates.io](https://img.shields.io/crates/v/wgpu-types.svg?label=wgpu-types)](https://crates.io/crates/wgpu-types) [![docs.rs](https://docs.rs/wgpu-types/badge.svg)](https://docs.rs/wgpu-types/) - Rust types shared between `wgpu-core` and `wgpu-rs`
- `player` - standalone application for replaying the API traces, uses `winit`
Rust examples can be found at `wgpu/examples`. `wgpu` is a default member, so you can run the examples directly from the root, e.g. `cargo run --example boids`.
If you are looking for the native implementation or bindings to the API in other languages, you need [wgpu-native](https://github.com/gfx-rs/wgpu-native).
## Supported Platforms
API | Windows 7/10 | Linux & Android | macOS & iOS |
----- | ------------------ | ------------------ | ------------------ |
DX11 | :ok: | | |
DX12 | :white_check_mark: | | |
Vulkan | :white_check_mark: | :white_check_mark: | |
Metal | | | :white_check_mark: |
GL ES3 | | :construction: | |
:white_check_mark: = Primary support — :ok: = Secondary support — :construction: = Unsupported, but support in progress

Просмотреть файл

@ -1,12 +0,0 @@
status = [
"iOS Stable",
"MacOS Stable",
"MacOS Nightly",
"Android Stable",
"Ubuntu Stable",
"Ubuntu Nightly",
"Windows Stable",
"Windows Nightly",
"Web Assembly",
#"Clippy",
]

Просмотреть файл

@ -1,16 +0,0 @@
[package]
name = "dummy"
version = "0.1.0"
authors = [
"Dzmitry Malyshau <kvark@mozilla.com>",
]
edition = "2018"
license = "MIT OR Apache-2.0"
publish = false
[features]
[dependencies.wgc]
path = "../wgpu-core"
package = "wgpu-core"
features = ["serial-pass", "trace"]

Просмотреть файл

@ -1,3 +0,0 @@
# wgpu dummy
This is a dummy build target that makes `cargo check` and `cargo test` in the workspace to cover all the API.

Просмотреть файл

@ -1,3 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */

Просмотреть файл

@ -1,8 +0,0 @@
<!-- This Source Code Form is subject to the terms of the Mozilla Public
- License, v. 2.0. If a copy of the MPL was not distributed with this file,
- You can obtain one at http://mozilla.org/MPL/2.0/. -->
<mxfile host="app.diagrams.net" modified="2020-11-16T21:27:20.203Z" agent="5.0 (Macintosh)" etag="9OfcpEHAtX6aquDOhW6o" version="13.9.9" type="device">
<diagram id="D9TiAgX86u4XTTQGlvGa" name="Page-1">
7V1tk5s2EP41nmk/+AYk3vzxXtI0aZrenKe9Jl8yspExPQwuYJ+vv77CIBskgTEGgdNeZjJICDB69tldrVZiBO9Xu/chWi9/DWzsjYBi70bwYQTARDPI/0nFW1qhQz2tcELXTqvUY8XU/QdnlUpWu3FtHBUaxkHgxe66WDkPfB/P40IdCsPgtdhsEXjFp66Rg7mK6Rx5fO2za8fLtNbSlWP9z9h1lvTJqpKdmaH5ixMGGz97nh/4OD2zQvQ2WdNoiezgNfc8+G4E78MgiNOj1e4ee0mv0h5Lr/up5OzhJ4fYj+tc4H18dh6/or9/Htvbr78+xar2dTU2QXqbLfI2WV98nI59FLtb8h7KM569f/w96eQ1QSJ9jfiN9trr0o3xdI3mSfmVCMYI3i3jlUdKKjm0UbTEdlZYuJ53H3hBuL8ULhYLMJ+T+i0OY5cAceu5jk/OxUFyF5SV5uTdcJhcHvjxNHtycrsoDoMXnLuhbcwM3SBn+G7Jeip5Et6x/U8kGgcrHIdvpMmOSm8GWSbNwMjKr0fZUGndMicXtA5l4ugcbn3EhRxk0JwBk2FxMH3uHCOs2jo2W8RoYpgQtYWRUcRItQQY6QKMzBYwQr9vP8cPzz+9C55U7+nbt1/86YexanCdj22iZbJiEMbLwAl85L071t4daz8FSa/uO+4vHMdvmZJEmzgoQoZ3bvwnOVay4y/J8Y2elR52uVMPb1mhAHP6M5PfVqpJsqoo2IRzXCWXhhikEHupeBYUtaDHs0sfA5c8+gCuxmCr68U7xCh0cJxdxMB2+BUXsM0oZ9sfG+8F+f81th3sHUUECNhmdsQ2MUYKhxFRhAO0VC30PmR0HYR87wNL0PtWV72vT7jeHwHDi5O+dLfk0EkOnzZRfEAkPTsL6UlaQ56fu6RV5Kw5rkCuA6DApAiUwCZBEU6dsQRYw7VJpJvDtz/pDZJC7qqkeLxsX6LXpYqOuumgCrmT9ouONfb2pKqdJjSaFxi+i2CFpmRYb4CeQ1ath2sOyi8FJOvgWsnIk7jCuri2Dev+0tswRG+5BuvEUYkq3B1Gv3MjObY90Kvak4P0F7TqFEFZqqRELYBTeqGJDmpTlxhmr0LHSYnOGCOgMV50+kadedFwIllglBuzoHDUugJT1G6KNIkBtc3PINSUrond0FI1petV7btRU7oqSeqaCE9jUZXg4YBBeTg6H5cUDDCe308/lY0juMbRZr0mQDceiSQ98wnNsFcUBG4AyI42Vq5t76UsxJH7D5rt75eAmvGN3Fy/G+kPQpgrZZwbmBwC5NlTRplnUDpgUW5UMMl826bmhjYJFosId2JJ1FJZSMZwBZSMvzcBPTGO9uS9JQ1Ip+6OJynEr856kwM/vVkJ+tESrZNDAg7yPOwFTohWCYg4dMn7JcgXzz0eT9ydGMQu3B2mkyKCcaytY8vWRCEeC8yg0VKIBxqMU6kJQjxAZtBb12Sr8u4HORc4CzUVORyEr6BZbAj3hK8wqWzfja9Qy8bc4e1bTQvRo444xLo4HTGzdE2vFL36OkIDDEpqTR0BW9AR4kkXXa6OUEf1B5dHHXHmSLar2Zq6UZF0JNtbcItjZWKnxyEf1f/ubbLGTEQLJzm74pvYJkvmW0ObbEoyytpVGWWDncUzqo2yzqp7Q4JR1jj639zcDIn6NUxtcgWlRCbbLWgDNv4i1fqKtQG8Cm0AJGkD/aq0ATurfEobsLMUUrSBzmmDz8j3g82VKYQW2A9Z5d07+4H0WchGkVYgKdJq1GT/MIL57BziqWA+nPQQzOfzsKY43AaDIj9ND+o0I5UNzh3KvZHf5KF5/PD0x/g+DKKz86/4vjMnMyWho+OhKMqoeUigLnGxohccz5dlMGFjD1MLYADGLYa9j8ro5E0ODR85aFg8yQAox7oNaNgwotk3NEB6ENsctJVU6w6ZBzYhCXlPVBAsdrCPQxQnicLJyhDs2/WnJ4l2w2HzRMnhTE9Skb98enJM5BIqVoHSdPQ03OlKICvxReK8lSAk3Sn7zWGRn3dE/yd/tfC3Qn6V/BXt+dC5r8lKP2qYPF1K3dP22GibkuKBJoBa0btWGNesJG3x3AHwwTVknlM2AObaZ1OP3QbD+XV/9o56RoPkv1bmszfITYKTCbgy+ssOiJ/v6relMFr09Fs39o0UAqCrKmgCFJRBcD6Osn0ZMr3p7HwL9FYUq9jj4yHxW5xuIla9ktZTmd0sqBKyrz3OCztS64Ph6oQ1+ZNqk8+219vVCGIR4/NfiMkXu5VSdEJtqhdFpoI/FTpBNY3B6wA+Q2EVe33qBXCVekHcUL9QMVwWR+ehdRa7oaWeIWwthJF0Y27h2aKlSDozySFayyyMpGstRNKFvJMdVStQrJphbTLlUgKUTN8yaJpyd9RQ+eg5uSXy/pPUYtK4hLsLSU2ivo6czgZJXO0wUpDY0YlPexGGA97ooXUsutaONLQnTTvy0ws2tFXwv3bMhX/7W2KicCB8l9S6dNFHPccDSqYWH19D0XJIrFroyT8hq/Z/o3Qxa64+/evGFxHt6yU1X0aT7Yyc74oMMIJuCRWB9Ag6mxinVU+pAeXC9hMZEXreODviaM9AIvR0qNXGBLyiXl2IXnZSek/W2pJhrQ9J37KsNT/d7XjB65DMdT9OcO8ZxodXOCLzHs9f+FT8k73bQ/a8rjLdWTfi0kYsU7ywQbSlCau2fPs22dqclPa7igsy5uk2VHv3MZ8dTwniuf5LVR+edBhyPSTaTJnWXZrzU8RHU5l+L0n5OXWfA6ay9g0GAlS5XL/fprXTAu2Q9OkxLfBk+/0+4FeWMGiU0blBRoFpQVh0V8CA3BXx60sPvhrN9vLUVPOcAOy+lDO7/AqeVvderD1San0j12YjJUU87i5VbGZl+45W3/GzoaKs58VunOyrhmau58alW+Vw1/1AfpRy+/jhx8ZJz9TILfEOETIw/l9WexBBcL7rx+/t3YcvqDIrNQ9fNejNGTT4oNp+ixb62ZDvEmspG77oJrMJW++LvE3Z+2meMZ4ui8iZp0JyFXbosvUtVE3LXwR+Gcj8xxQesB+UszXP7Y/Tqye3pGGozhhxmaN6YaxswF+sEfLjJCm7yzU9GSrr7hs15S9VNMGuj9ZuKbmucWpLMCbkBKB2DK3ufqYN2EaKx++4pQgfP5MH3/0L
</diagram>
</mxfile>

Двоичные данные
gfx/wgpu/logo.png

Двоичный файл не отображается.

До

Ширина:  |  Высота:  |  Размер: 37 KiB

Просмотреть файл

@ -1,36 +0,0 @@
[package]
name = "player"
version = "0.1.0"
authors = [
"Dzmitry Malyshau <kvark@mozilla.com>",
]
edition = "2018"
description = "WebGPU trace player"
homepage = "https://github.com/gfx-rs/wgpu"
repository = "https://github.com/gfx-rs/wgpu"
keywords = ["graphics"]
license = "MIT OR Apache-2.0"
publish = false
[features]
cross = ["wgc/cross"]
[dependencies]
env_logger = "0.8"
log = "0.4"
raw-window-handle = "0.3"
ron = "0.6"
winit = { version = "0.24", optional = true }
[dependencies.wgt]
path = "../wgpu-types"
package = "wgpu-types"
features = ["replay"]
[dependencies.wgc]
path = "../wgpu-core"
package = "wgpu-core"
features = ["replay", "raw-window-handle"]
[dev-dependencies]
serde = "1"

Просмотреть файл

@ -1,13 +0,0 @@
# wgpu player
This is an application that allows replaying the `wgpu` workloads recorded elsewhere. It requires the player to be built from
the same revision as an application was linking to, or otherwise the data may fail to load.
Launch as:
```rust
play <trace-dir>
```
When built with "winit" feature, it's able to replay the workloads that operate on a swapchain. It renders each frame sequentially, then waits for the user to close the window. When built without "winit", it launches in console mode and can replay any trace that doesn't use swapchains.
Note: replaying is currently restricted to the same backend, as one used for recording a trace. It is straightforward, however, to just replace the backend in RON, since it's serialized as plain text. Valid values are: Vulkan, Metal, Dx12, and Dx11.

Просмотреть файл

@ -1,193 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! This is a player for WebGPU traces.
!*/
use player::{GlobalPlay as _, IdentityPassThroughFactory};
use wgc::{device::trace, gfx_select};
use std::{
fs,
path::{Path, PathBuf},
};
fn main() {
#[cfg(feature = "winit")]
use winit::{event_loop::EventLoop, window::WindowBuilder};
env_logger::init();
//TODO: setting for the backend bits
//TODO: setting for the target frame, or controls
let dir = match std::env::args().nth(1) {
Some(arg) if Path::new(&arg).is_dir() => PathBuf::from(arg),
_ => panic!("Provide the dir path as the parameter"),
};
log::info!("Loading trace '{:?}'", dir);
let file = fs::File::open(dir.join(trace::FILE_NAME)).unwrap();
let mut actions: Vec<trace::Action> = ron::de::from_reader(file).unwrap();
actions.reverse(); // allows us to pop from the top
log::info!("Found {} actions", actions.len());
#[cfg(feature = "winit")]
let event_loop = {
log::info!("Creating a window");
EventLoop::new()
};
#[cfg(feature = "winit")]
let window = WindowBuilder::new()
.with_title("wgpu player")
.with_resizable(true)
.build(&event_loop)
.unwrap();
let global = wgc::hub::Global::new(
"player",
IdentityPassThroughFactory,
wgt::BackendBit::PRIMARY,
);
let mut command_buffer_id_manager = wgc::hub::IdentityManager::default();
#[cfg(feature = "winit")]
let surface =
global.instance_create_surface(&window, wgc::id::TypedId::zip(0, 1, wgt::Backend::Empty));
let device = match actions.pop() {
Some(trace::Action::Init { desc, backend }) => {
log::info!("Initializing the device for backend: {:?}", backend);
let adapter = global
.request_adapter(
&wgc::instance::RequestAdapterOptions {
power_preference: wgt::PowerPreference::LowPower,
#[cfg(feature = "winit")]
compatible_surface: Some(surface),
#[cfg(not(feature = "winit"))]
compatible_surface: None,
},
wgc::instance::AdapterInputs::IdSet(
&[wgc::id::TypedId::zip(0, 0, backend)],
|id| id.backend(),
),
)
.expect("Unable to find an adapter for selected backend");
let info = gfx_select!(adapter => global.adapter_get_info(adapter)).unwrap();
log::info!("Picked '{}'", info.name);
let id = wgc::id::TypedId::zip(1, 0, backend);
let (_, error) = gfx_select!(adapter => global.adapter_request_device(
adapter,
&desc,
None,
id
));
if let Some(e) = error {
panic!("{:?}", e);
}
id
}
_ => panic!("Expected Action::Init"),
};
log::info!("Executing actions");
#[cfg(not(feature = "winit"))]
{
gfx_select!(device => global.device_start_capture(device));
while let Some(action) = actions.pop() {
gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
}
gfx_select!(device => global.device_stop_capture(device));
gfx_select!(device => global.device_poll(device, true)).unwrap();
}
#[cfg(feature = "winit")]
{
use winit::{
event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
event_loop::ControlFlow,
};
let mut resize_desc = None;
let mut frame_count = 0;
let mut done = false;
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
window.request_redraw();
}
Event::RedrawRequested(_) if resize_desc.is_none() => loop {
match actions.pop() {
Some(trace::Action::CreateSwapChain(id, desc)) => {
log::info!("Initializing the swapchain");
assert_eq!(id.to_surface_id(), surface);
let current_size: (u32, u32) = window.inner_size().into();
let size = (desc.width, desc.height);
if current_size != size {
window.set_inner_size(winit::dpi::PhysicalSize::new(
desc.width,
desc.height,
));
resize_desc = Some(desc);
break;
} else {
let (_, error) = gfx_select!(device => global.device_create_swap_chain(device, surface, &desc));
if let Some(e) = error {
panic!("{:?}", e);
}
}
}
Some(trace::Action::PresentSwapChain(id)) => {
frame_count += 1;
log::debug!("Presenting frame {}", frame_count);
gfx_select!(device => global.swap_chain_present(id)).unwrap();
break;
}
Some(action) => {
gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
}
None => {
if !done {
println!("Finished the end at frame {}", frame_count);
done = true;
}
break;
}
}
},
Event::WindowEvent { event, .. } => match event {
WindowEvent::Resized(_) => {
if let Some(desc) = resize_desc.take() {
let (_, error) = gfx_select!(device => global.device_create_swap_chain(device, surface, &desc));
if let Some(e) = error {
panic!("{:?}", e);
}
}
}
WindowEvent::KeyboardInput {
input:
KeyboardInput {
virtual_keycode: Some(VirtualKeyCode::Escape),
state: ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
_ => {}
},
Event::LoopDestroyed => {
log::info!("Closing");
gfx_select!(device => global.device_poll(device, true)).unwrap();
}
_ => {}
}
});
}
}

Просмотреть файл

@ -1,377 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! This is a player library for WebGPU traces.
*
* # Notes
* - we call device_maintain_ids() before creating any refcounted resource,
* which is basically everything except for BGL and shader modules,
* so that we don't accidentally try to use the same ID.
!*/
use wgc::device::trace;
use std::{borrow::Cow, fmt::Debug, fs, marker::PhantomData, path::Path};
#[derive(Debug)]
pub struct IdentityPassThrough<I>(PhantomData<I>);
impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandler<I> for IdentityPassThrough<I> {
type Input = I;
fn process(&self, id: I, backend: wgt::Backend) -> I {
let (index, epoch, _backend) = id.unzip();
I::zip(index, epoch, backend)
}
fn free(&self, _id: I) {}
}
pub struct IdentityPassThroughFactory;
impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandlerFactory<I>
for IdentityPassThroughFactory
{
type Filter = IdentityPassThrough<I>;
fn spawn(&self, _min_index: u32) -> Self::Filter {
IdentityPassThrough(PhantomData)
}
}
impl wgc::hub::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {}
pub trait GlobalPlay {
fn encode_commands<B: wgc::hub::GfxBackend>(
&self,
encoder: wgc::id::CommandEncoderId,
commands: Vec<trace::Command>,
) -> wgc::id::CommandBufferId;
fn process<B: wgc::hub::GfxBackend>(
&self,
device: wgc::id::DeviceId,
action: trace::Action,
dir: &Path,
comb_manager: &mut wgc::hub::IdentityManager,
);
}
impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
fn encode_commands<B: wgc::hub::GfxBackend>(
&self,
encoder: wgc::id::CommandEncoderId,
commands: Vec<trace::Command>,
) -> wgc::id::CommandBufferId {
for command in commands {
match command {
trace::Command::CopyBufferToBuffer {
src,
src_offset,
dst,
dst_offset,
size,
} => self
.command_encoder_copy_buffer_to_buffer::<B>(
encoder, src, src_offset, dst, dst_offset, size,
)
.unwrap(),
trace::Command::CopyBufferToTexture { src, dst, size } => self
.command_encoder_copy_buffer_to_texture::<B>(encoder, &src, &dst, &size)
.unwrap(),
trace::Command::CopyTextureToBuffer { src, dst, size } => self
.command_encoder_copy_texture_to_buffer::<B>(encoder, &src, &dst, &size)
.unwrap(),
trace::Command::CopyTextureToTexture { src, dst, size } => self
.command_encoder_copy_texture_to_texture::<B>(encoder, &src, &dst, &size)
.unwrap(),
trace::Command::ClearBuffer { dst, offset, size } => self
.command_encoder_clear_buffer::<B>(encoder, dst, offset, size)
.unwrap(),
trace::Command::ClearImage {
dst,
subresource_range,
} => self
.command_encoder_clear_image::<B>(encoder, dst, &subresource_range)
.unwrap(),
trace::Command::WriteTimestamp {
query_set_id,
query_index,
} => self
.command_encoder_write_timestamp::<B>(encoder, query_set_id, query_index)
.unwrap(),
trace::Command::ResolveQuerySet {
query_set_id,
start_query,
query_count,
destination,
destination_offset,
} => self
.command_encoder_resolve_query_set::<B>(
encoder,
query_set_id,
start_query,
query_count,
destination,
destination_offset,
)
.unwrap(),
trace::Command::RunComputePass { base } => {
self.command_encoder_run_compute_pass_impl::<B>(encoder, base.as_ref())
.unwrap();
}
trace::Command::RunRenderPass {
base,
target_colors,
target_depth_stencil,
} => {
self.command_encoder_run_render_pass_impl::<B>(
encoder,
base.as_ref(),
&target_colors,
target_depth_stencil.as_ref(),
)
.unwrap();
}
}
}
let (cmd_buf, error) = self
.command_encoder_finish::<B>(encoder, &wgt::CommandBufferDescriptor { label: None });
if let Some(e) = error {
panic!("{:?}", e);
}
cmd_buf
}
fn process<B: wgc::hub::GfxBackend>(
&self,
device: wgc::id::DeviceId,
action: trace::Action,
dir: &Path,
comb_manager: &mut wgc::hub::IdentityManager,
) {
use wgc::device::trace::Action as A;
log::info!("action {:?}", action);
//TODO: find a way to force ID perishing without excessive `maintain()` calls.
match action {
A::Init { .. } => panic!("Unexpected Action::Init: has to be the first action only"),
A::CreateSwapChain { .. } | A::PresentSwapChain(_) => {
panic!("Unexpected SwapChain action: winit feature is not enabled")
}
A::CreateBuffer(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.device_create_buffer::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::FreeBuffer(id) => {
self.buffer_destroy::<B>(id).unwrap();
}
A::DestroyBuffer(id) => {
self.buffer_drop::<B>(id, true);
}
A::CreateTexture(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.device_create_texture::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::FreeTexture(id) => {
self.texture_destroy::<B>(id).unwrap();
}
A::DestroyTexture(id) => {
self.texture_drop::<B>(id, true);
}
A::CreateTextureView {
id,
parent_id,
desc,
} => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.texture_create_view::<B>(parent_id, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyTextureView(id) => {
self.texture_view_drop::<B>(id, true).unwrap();
}
A::CreateSampler(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.device_create_sampler::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroySampler(id) => {
self.sampler_drop::<B>(id);
}
A::GetSwapChainTexture { id, parent_id } => {
self.device_maintain_ids::<B>(device).unwrap();
self.swap_chain_get_current_texture_view::<B>(parent_id, id)
.unwrap()
.view_id
.unwrap();
}
A::CreateBindGroupLayout(id, desc) => {
let (_, error) = self.device_create_bind_group_layout::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyBindGroupLayout(id) => {
self.bind_group_layout_drop::<B>(id);
}
A::CreatePipelineLayout(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.device_create_pipeline_layout::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyPipelineLayout(id) => {
self.pipeline_layout_drop::<B>(id);
}
A::CreateBindGroup(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.device_create_bind_group::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyBindGroup(id) => {
self.bind_group_drop::<B>(id);
}
A::CreateShaderModule { id, desc, data } => {
let source = if data.ends_with(".wgsl") {
let code = fs::read_to_string(dir.join(data)).unwrap();
wgc::pipeline::ShaderModuleSource::Wgsl(Cow::Owned(code))
} else {
let byte_vec = fs::read(dir.join(&data))
.unwrap_or_else(|e| panic!("Unable to open '{}': {:?}", data, e));
let spv = byte_vec
.chunks(4)
.map(|c| u32::from_le_bytes([c[0], c[1], c[2], c[3]]))
.collect::<Vec<_>>();
wgc::pipeline::ShaderModuleSource::SpirV(Cow::Owned(spv))
};
let (_, error) = self.device_create_shader_module::<B>(device, &desc, source, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyShaderModule(id) => {
self.shader_module_drop::<B>(id);
}
A::CreateComputePipeline {
id,
desc,
implicit_context,
} => {
self.device_maintain_ids::<B>(device).unwrap();
let implicit_ids =
implicit_context
.as_ref()
.map(|ic| wgc::device::ImplicitPipelineIds {
root_id: ic.root_id,
group_ids: &ic.group_ids,
});
let (_, error) =
self.device_create_compute_pipeline::<B>(device, &desc, id, implicit_ids);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyComputePipeline(id) => {
self.compute_pipeline_drop::<B>(id);
}
A::CreateRenderPipeline {
id,
desc,
implicit_context,
} => {
self.device_maintain_ids::<B>(device).unwrap();
let implicit_ids =
implicit_context
.as_ref()
.map(|ic| wgc::device::ImplicitPipelineIds {
root_id: ic.root_id,
group_ids: &ic.group_ids,
});
let (_, error) =
self.device_create_render_pipeline::<B>(device, &desc, id, implicit_ids);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyRenderPipeline(id) => {
self.render_pipeline_drop::<B>(id);
}
A::CreateRenderBundle { id, desc, base } => {
let bundle =
wgc::command::RenderBundleEncoder::new(&desc, device, Some(base)).unwrap();
let (_, error) = self.render_bundle_encoder_finish::<B>(
bundle,
&wgt::RenderBundleDescriptor { label: desc.label },
id,
);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyRenderBundle(id) => {
self.render_bundle_drop::<B>(id);
}
A::CreateQuerySet { id, desc } => {
self.device_maintain_ids::<B>(device).unwrap();
let (_, error) = self.device_create_query_set::<B>(device, &desc, id);
if let Some(e) = error {
panic!("{:?}", e);
}
}
A::DestroyQuerySet(id) => {
self.query_set_drop::<B>(id);
}
A::WriteBuffer {
id,
data,
range,
queued,
} => {
let bin = std::fs::read(dir.join(data)).unwrap();
let size = (range.end - range.start) as usize;
if queued {
self.queue_write_buffer::<B>(device, id, range.start, &bin)
.unwrap();
} else {
self.device_wait_for_buffer::<B>(device, id).unwrap();
self.device_set_buffer_sub_data::<B>(device, id, range.start, &bin[..size])
.unwrap();
}
}
A::WriteTexture {
to,
data,
layout,
size,
} => {
let bin = std::fs::read(dir.join(data)).unwrap();
self.queue_write_texture::<B>(device, &to, &bin, &layout, &size)
.unwrap();
}
A::Submit(_index, ref commands) if commands.is_empty() => {
self.queue_submit::<B>(device, &[]).unwrap();
}
A::Submit(_index, commands) => {
let (encoder, error) = self.device_create_command_encoder::<B>(
device,
&wgt::CommandEncoderDescriptor { label: None },
comb_manager.alloc(device.backend()),
);
if let Some(e) = error {
panic!("{:?}", e);
}
let cmdbuf = self.encode_commands::<B>(encoder, commands);
self.queue_submit::<B>(device, &[cmdbuf]).unwrap();
}
}
}
}

Просмотреть файл

@ -1,11 +0,0 @@
(
backends: (bits: 0xF),
tests: [
"bind-group.ron",
"buffer-copy.ron",
"clear-buffer-image.ron",
"buffer-zero-init.ron",
"pipeline-statistics-query.ron",
"quad.ron",
],
)

Просмотреть файл

@ -1,81 +0,0 @@
(
features: (bits: 0x0),
expectations: [], //not crash!
actions: [
CreatePipelineLayout(Id(0, 1, Empty), (
label: Some("empty"),
bind_group_layouts: [],
push_constant_ranges: [],
)),
CreateShaderModule(
id: Id(0, 1, Empty),
desc: (
label: None,
flags: (bits: 3),
),
data: "empty.wgsl",
),
CreateComputePipeline(
id: Id(0, 1, Empty),
desc: (
label: None,
layout: Some(Id(0, 1, Empty)),
stage: (
module: Id(0, 1, Empty),
entry_point: "main",
),
),
),
CreateBuffer(Id(0, 1, Empty), (
label: None,
size: 16,
usage: (
bits: 64,
),
mapped_at_creation: false,
)),
CreateBindGroupLayout(Id(0, 1, Empty), (
label: None,
entries: [
(
binding: 0,
visibility: (bits: 0x3),
ty: Buffer(
ty: Uniform,
),
),
],
)),
CreateBindGroup(Id(0, 1, Empty), (
label: None,
layout: Id(0, 1, Empty),
entries: [
(
binding: 0,
resource: Buffer((
buffer_id: Id(0, 1, Empty),
offset: 0,
size: None,
)),
)
],
)),
Submit(1, [
RunComputePass(
base: (
commands: [
SetPipeline(Id(0, 1, Empty)),
SetBindGroup(
index: 0,
num_dynamic_offsets: 0,
bind_group_id: Id(0, 1, Empty),
),
],
dynamic_offsets: [],
string_data: [],
push_constant_data: [],
),
),
]),
],
)

Просмотреть файл

@ -1,36 +0,0 @@
(
features: (bits: 0x0000_0001_0000_0000),
expectations: [
(
name: "basic",
buffer: (index: 0, epoch: 1),
offset: 0,
data: Raw([
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
]),
)
],
actions: [
CreateBuffer(
Id(0, 1, Empty),
(
label: None,
size: 16,
usage: (
bits: 41,
),
mapped_at_creation: false,
),
),
Submit(1, [
ClearBuffer(
dst: Id(0, 1, Empty),
offset: 4,
size: Some(8),
)
]),
],
)

Просмотреть файл

@ -1,34 +0,0 @@
(
features: (bits: 0x0),
expectations: [
(
name: "basic",
buffer: (index: 0, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x80, 0xBF]),
)
],
actions: [
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("dummy"),
size: 16,
usage: (
bits: 41,
),
mapped_at_creation: false,
),
),
WriteBuffer(
id: Id(0, 1, Empty),
data: "data1.bin",
range: (
start: 0,
end: 16,
),
queued: true,
),
Submit(1, []),
],
)

Просмотреть файл

@ -1,12 +0,0 @@
[[block]]
struct InOutBuffer {
data: [[stride(4)]] array<u32>;
};
[[group(0), binding(0)]]
var<storage> buffer: [[access(read_write)]] InOutBuffer;
[[stage(compute), workgroup_size(1)]]
fn main([[builtin(global_invocation_id)]] global_id: vec3<u32>) {
buffer.data[global_id.x] = buffer.data[global_id.x] + global_id.x;
}

Просмотреть файл

@ -1,170 +0,0 @@
(
features: (bits: 0x0),
expectations: [
// Ensuring that mapping zero-inits buffers.
(
name: "mapped_at_creation: false, with MAP_WRITE",
buffer: (index: 0, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00]),
),
(
name: "mapped_at_creation: false, without MAP_WRITE",
buffer: (index: 1, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00]),
),
(
name: "partially written buffer",
buffer: (index: 2, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x80, 0xBF,
0x00, 0x00, 0x80, 0xBF,
0x00, 0x00, 0x80, 0x3F,
0x00, 0x00, 0x80, 0x3F,
0x00, 0x00, 0x00, 0x00]),
),
// Ensuring that binding zero-inits buffers
// (by observing correct side effects of compute shader reading & writing values)
(
name: "buffer has correct values",
buffer: (index: 3, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00,
0x02, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00]),
)
],
actions: [
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("mapped_at_creation: false, with MAP_WRITE"),
size: 16,
usage: (
bits: 131, // STORAGE + MAP_READ + MAP_WRITE
),
mapped_at_creation: false,
),
),
CreateBuffer(
Id(1, 1, Empty),
(
label: Some("mapped_at_creation: false, without MAP_WRITE"),
size: 16,
usage: (
bits: 129, // STORAGE + MAP_READ
),
mapped_at_creation: false,
),
),
CreateBuffer(
Id(2, 1, Empty),
(
label: Some("partially written"),
size: 24,
usage: (
bits: 9, // MAP_READ + COPY_DST
),
mapped_at_creation: false,
),
),
WriteBuffer(
id: Id(2, 1, Empty),
data: "data1.bin",
range: (
start: 4,
end: 20,
),
queued: true,
),
CreateShaderModule(
id: Id(0, 1, Empty),
desc: (
label: None,
flags: (bits: 3),
),
data: "buffer-zero-init-for-binding.wgsl",
),
CreateBuffer(Id(3, 1, Empty), (
label: Some("used in binding"),
size: 16,
usage: (
bits: 129, // STORAGE + MAP_READ
),
mapped_at_creation: false,
)),
CreateBindGroupLayout(Id(0, 1, Empty), (
label: None,
entries: [
(
binding: 0,
visibility: (
bits: 4,
),
ty: Buffer(
ty: Storage(
read_only: false,
),
has_dynamic_offset: false,
min_binding_size: Some(16),
),
count: None,
),
],
)),
CreateBindGroup(Id(0, 1, Empty), (
label: None,
layout: Id(0, 1, Empty),
entries: [
(
binding: 0,
resource: Buffer((
buffer_id: Id(3, 1, Empty),
offset: 0,
size: Some(16),
)),
),
],
)),
CreatePipelineLayout(Id(0, 1, Empty), (
label: None,
bind_group_layouts: [
Id(0, 1, Empty),
],
push_constant_ranges: [],
)),
CreateComputePipeline(
id: Id(0, 1, Empty),
desc: (
label: None,
layout: Some(Id(0, 1, Empty)),
stage: (
module: Id(0, 1, Empty),
entry_point: "main",
),
),
),
Submit(1, [
RunComputePass(
base: (
label: None,
commands: [
SetPipeline(Id(0, 1, Empty)),
SetBindGroup(
index: 0,
num_dynamic_offsets: 0,
bind_group_id: Id(0, 1, Empty),
),
Dispatch((4, 1, 1)),
],
dynamic_offsets: [],
string_data: [],
push_constant_data: [],
),
)
]),
]
)

Просмотреть файл

@ -1,98 +0,0 @@
(
features: (bits: 0x0000_0001_0000_0000),
expectations: [
(
name: "Quad",
buffer: (index: 0, epoch: 1),
offset: 0,
data: File("clear-image.bin", 16384),
),
(
name: "buffer clear",
buffer: (index: 1, epoch: 1),
offset: 0,
data: Raw([
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
]),
)
],
actions: [
CreateTexture(Id(0, 1, Empty), (
label: Some("Output Texture"),
size: (
width: 64,
height: 64,
depth_or_array_layers: 1,
),
mip_level_count: 1,
sample_count: 1,
dimension: D2,
format: Rgba8Unorm,
usage: (
bits: 27,
),
)),
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("Output Buffer"),
size: 16384,
usage: (
bits: 9,
),
mapped_at_creation: false,
),
),
CreateBuffer(
Id(1, 1, Empty),
(
label: Some("Buffer to be cleared"),
size: 16,
usage: (
bits: 41,
),
mapped_at_creation: false,
),
),
Submit(1, [
ClearImage(
dst: Id(0, 1, Empty),
subresource_range: ImageSubresourceRange(
aspect: All,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: 0,
array_layer_count: None,
),
),
CopyTextureToBuffer(
src: (
texture: Id(0, 1, Empty),
mip_level: 0,
array_layer: 0,
),
dst: (
buffer: Id(0, 1, Empty),
layout: (
offset: 0,
bytes_per_row: Some(256),
rows_per_image: None,
),
),
size: (
width: 64,
height: 64,
depth_or_array_layers: 1,
),
),
ClearBuffer(
dst: Id(1, 1, Empty),
offset: 4,
size: Some(8),
)
]),
],
)

Двоичные данные
gfx/wgpu/player/tests/data/clear-image.bin

Двоичный файл не отображается.

Двоичные данные
gfx/wgpu/player/tests/data/data1.bin

Двоичный файл не отображается.

Просмотреть файл

@ -1,3 +0,0 @@
[[stage(compute), workgroup_size(1)]]
fn main() {
}

Просмотреть файл

@ -1,81 +0,0 @@
(
features: (bits: 0x0000_0000_0000_0008), // PIPELINE_STATISTICS_QUERY
expectations: [
(
name: "Queried number of compute invocations is correct",
buffer: (index: 0, epoch: 1),
offset: 0,
data: Raw([0x2A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]),
),
],
actions: [
CreatePipelineLayout(Id(0, 1, Empty), (
label: Some("empty"),
bind_group_layouts: [],
push_constant_ranges: [],
)),
CreateShaderModule(
id: Id(0, 1, Empty),
desc: (
label: None,
flags: (bits: 3),
),
data: "empty.wgsl",
),
CreateComputePipeline(
id: Id(0, 1, Empty),
desc: (
label: None,
layout: Some(Id(0, 1, Empty)),
stage: (
module: Id(0, 1, Empty),
entry_point: "main",
),
),
),
CreateQuerySet(
id: Id(0, 1, Empty),
desc: (
label: Some("Compute Invocation QuerySet"),
count: 2,
ty: PipelineStatistics((bits: 0x10)), // COMPUTE_SHADER_INVOCATIONS
),
),
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("Compute Invocation Result Buffer"),
size: 8,
usage: (
bits: 9, // COPY_DST | MAP_READ
),
mapped_at_creation: false,
),
),
Submit(1, [
RunComputePass(
base: (
commands: [
SetPipeline(Id(0, 1, Empty)),
BeginPipelineStatisticsQuery(
query_set_id: Id(0, 1, Empty),
query_index: 0,
),
Dispatch((2, 3, 7,)),
EndPipelineStatisticsQuery,
],
dynamic_offsets: [],
string_data: [],
push_constant_data: [],
),
),
ResolveQuerySet(
query_set_id: Id(0, 1, Empty),
start_query: 0,
query_count: 1,
destination: Id(0, 1, Empty),
destination_offset: 0,
)
]),
],
)

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -1,138 +0,0 @@
(
features: (bits: 0x0),
expectations: [
(
name: "Quad",
buffer: (index: 0, epoch: 1),
offset: 0,
data: File("quad.bin", 16384),
)
],
actions: [
CreateShaderModule(
id: Id(0, 1, Empty),
desc: (
label: None,
flags: (bits: 3),
),
data: "quad.wgsl",
),
CreateTexture(Id(0, 1, Empty), (
label: Some("Output Texture"),
size: (
width: 64,
height: 64,
depth_or_array_layers: 1,
),
mip_level_count: 1,
sample_count: 1,
dimension: D2,
format: Rgba8Unorm,
usage: (
bits: 27,
),
)),
CreateTextureView(
id: Id(0, 1, Empty),
parent_id: Id(0, 1, Empty),
desc: (),
),
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("Output Buffer"),
size: 16384,
usage: (
bits: 9,
),
mapped_at_creation: false,
),
),
CreatePipelineLayout(Id(0, 1, Empty), (
label: None,
bind_group_layouts: [],
push_constant_ranges: [],
)),
CreateRenderPipeline(
id: Id(0, 1, Empty),
desc: (
label: None,
layout: Some(Id(0, 1, Empty)),
vertex: (
stage: (
module: Id(0, 1, Empty),
entry_point: "vs_main",
),
buffers: [],
),
fragment: Some((
stage: (
module: Id(0, 1, Empty),
entry_point: "fs_main",
),
targets: [
(
format: Rgba8Unorm,
),
],
)),
),
),
Submit(1, [
RunRenderPass(
base: (
commands: [
SetPipeline(Id(0, 1, Empty)),
Draw(
vertex_count: 3,
instance_count: 1,
first_vertex: 0,
first_instance: 0,
),
],
dynamic_offsets: [],
string_data: [],
push_constant_data: [],
),
target_colors: [
(
view: Id(0, 1, Empty),
resolve_target: None,
channel: (
load_op: Clear,
store_op: Store,
clear_value: (
r: 0,
g: 0,
b: 0,
a: 1,
),
read_only: false,
),
),
],
target_depth_stencil: None,
),
CopyTextureToBuffer(
src: (
texture: Id(0, 1, Empty),
mip_level: 0,
array_layer: 0,
),
dst: (
buffer: Id(0, 1, Empty),
layout: (
offset: 0,
bytes_per_row: Some(256),
rows_per_image: Some(64),
),
),
size: (
width: 64,
height: 64,
depth_or_array_layers: 1,
),
),
]),
],
)

Просмотреть файл

@ -1,16 +0,0 @@
[[stage(vertex)]]
fn vs_main([[builtin(vertex_index)]] vertex_index: u32) -> [[builtin(position)]] vec4<f32> {
// hacky way to draw a large triangle
let tmp1 = i32(vertex_index) / 2;
let tmp2 = i32(vertex_index) & 1;
let pos = vec2<f32>(
f32(tmp1) * 4.0 - 1.0,
f32(tmp2) * 4.0 - 1.0
);
return vec4<f32>(pos, 0.0, 1.0);
}
[[stage(fragment)]]
fn fs_main() -> [[location(0)]] vec4<f32> {
return vec4<f32>(1.0, 1.0, 1.0, 1.0);
}

Просмотреть файл

@ -1,223 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Tester for WebGPU
* It enumerates the available backends on the system,
* and run the tests through them.
*
* Test requirements:
* - all IDs have the backend `Empty`
* - all expected buffers have `MAP_READ` usage
* - last action is `Submit`
* - no swapchain use
!*/
use player::{GlobalPlay, IdentityPassThroughFactory};
use std::{
fs::{read_to_string, File},
io::{Read, Seek, SeekFrom},
path::{Path, PathBuf},
ptr, slice,
};
#[derive(serde::Deserialize)]
struct RawId {
index: u32,
epoch: u32,
}
#[derive(serde::Deserialize)]
enum ExpectedData {
Raw(Vec<u8>),
File(String, usize),
}
impl ExpectedData {
fn len(&self) -> usize {
match self {
ExpectedData::Raw(vec) => vec.len(),
ExpectedData::File(_, size) => *size,
}
}
}
#[derive(serde::Deserialize)]
struct Expectation {
name: String,
buffer: RawId,
offset: wgt::BufferAddress,
data: ExpectedData,
}
#[derive(serde::Deserialize)]
struct Test<'a> {
features: wgt::Features,
expectations: Vec<Expectation>,
actions: Vec<wgc::device::trace::Action<'a>>,
}
extern "C" fn map_callback(status: wgc::resource::BufferMapAsyncStatus, _user_data: *mut u8) {
match status {
wgc::resource::BufferMapAsyncStatus::Success => (),
_ => panic!("Unable to map"),
}
}
impl Test<'_> {
fn load(path: PathBuf, backend: wgt::Backend) -> Self {
let backend_name = match backend {
wgt::Backend::Vulkan => "Vulkan",
wgt::Backend::Metal => "Metal",
wgt::Backend::Dx12 => "Dx12",
wgt::Backend::Dx11 => "Dx11",
wgt::Backend::Gl => "Gl",
_ => unreachable!(),
};
let string = read_to_string(path).unwrap().replace("Empty", backend_name);
ron::de::from_str(&string).unwrap()
}
fn run(
self,
dir: &Path,
global: &wgc::hub::Global<IdentityPassThroughFactory>,
adapter: wgc::id::AdapterId,
test_num: u32,
) {
let backend = adapter.backend();
let device = wgc::id::TypedId::zip(test_num, 0, backend);
let (_, error) = wgc::gfx_select!(adapter => global.adapter_request_device(
adapter,
&wgt::DeviceDescriptor {
label: None,
features: self.features | wgt::Features::MAPPABLE_PRIMARY_BUFFERS,
limits: wgt::Limits::default(),
},
None,
device
));
if let Some(e) = error {
panic!("{:?}", e);
}
let mut command_buffer_id_manager = wgc::hub::IdentityManager::default();
println!("\t\t\tRunning...");
for action in self.actions {
wgc::gfx_select!(device => global.process(device, action, dir, &mut command_buffer_id_manager));
}
println!("\t\t\tMapping...");
for expect in &self.expectations {
let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend);
wgc::gfx_select!(device => global.buffer_map_async(
buffer,
expect.offset .. expect.offset+expect.data.len() as wgt::BufferAddress,
wgc::resource::BufferMapOperation {
host: wgc::device::HostMap::Read,
callback: map_callback,
user_data: ptr::null_mut(),
}
))
.unwrap();
}
println!("\t\t\tWaiting...");
wgc::gfx_select!(device => global.device_poll(device, true)).unwrap();
for expect in self.expectations {
println!("\t\t\tChecking {}", expect.name);
let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend);
let (ptr, size) =
wgc::gfx_select!(device => global.buffer_get_mapped_range(buffer, expect.offset, Some(expect.data.len() as wgt::BufferAddress)))
.unwrap();
let contents = unsafe { slice::from_raw_parts(ptr, size as usize) };
let expected_data = match expect.data {
ExpectedData::Raw(vec) => vec,
ExpectedData::File(name, size) => {
let mut bin = vec![0; size];
let mut file = File::open(dir.join(name)).unwrap();
file.seek(SeekFrom::Start(expect.offset)).unwrap();
file.read_exact(&mut bin[..]).unwrap();
bin
}
};
if &expected_data[..] != contents {
panic!(
"Test expectation is not met!\nBuffer content was:\n{:?}\nbut expected:\n{:?}",
contents, expected_data
);
}
}
wgc::gfx_select!(device => global.clear_backend(()));
}
}
#[derive(serde::Deserialize)]
struct Corpus {
backends: wgt::BackendBit,
tests: Vec<String>,
}
const BACKENDS: &[wgt::Backend] = &[
wgt::Backend::Vulkan,
wgt::Backend::Metal,
wgt::Backend::Dx12,
wgt::Backend::Dx11,
wgt::Backend::Gl,
];
impl Corpus {
fn run_from(path: PathBuf) {
println!("Corpus {:?}", path);
let dir = path.parent().unwrap();
let corpus: Corpus = ron::de::from_reader(File::open(&path).unwrap()).unwrap();
let global = wgc::hub::Global::new("test", IdentityPassThroughFactory, corpus.backends);
for &backend in BACKENDS {
if !corpus.backends.contains(backend.into()) {
continue;
}
let adapter = match global.request_adapter(
&wgc::instance::RequestAdapterOptions {
power_preference: wgt::PowerPreference::LowPower,
compatible_surface: None,
},
wgc::instance::AdapterInputs::IdSet(
&[wgc::id::TypedId::zip(0, 0, backend)],
|id| id.backend(),
),
) {
Ok(adapter) => adapter,
Err(_) => continue,
};
println!("\tBackend {:?}", backend);
let supported_features =
wgc::gfx_select!(adapter => global.adapter_features(adapter)).unwrap();
let mut test_num = 0;
for test_path in &corpus.tests {
println!("\t\tTest '{:?}'", test_path);
let test = Test::load(dir.join(test_path), adapter.backend());
if !supported_features.contains(test.features) {
println!(
"\t\tSkipped due to missing features {:?}",
test.features - supported_features
);
continue;
}
test.run(dir, &global, adapter, test_num);
test_num += 1;
}
}
}
}
#[test]
fn test_api() {
env_logger::init();
Corpus::run_from(PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/data/all.ron"))
}

Просмотреть файл

@ -1,287 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{CommandBuffer, CommandEncoderStatus};
use crate::{
device::DeviceError, hub::GfxBackend, id::DeviceId, track::TrackerSet, FastHashMap,
PrivateFeatures, Stored, SubmissionIndex,
};
#[cfg(debug_assertions)]
use crate::LabelHelpers;
use hal::{command::CommandBuffer as _, device::Device as _, pool::CommandPool as _};
use parking_lot::Mutex;
use thiserror::Error;
use std::thread;
const GROW_AMOUNT: usize = 20;
#[derive(Debug)]
struct CommandPool<B: hal::Backend> {
raw: B::CommandPool,
total: usize,
available: Vec<B::CommandBuffer>,
pending: Vec<(B::CommandBuffer, SubmissionIndex)>,
}
impl<B: hal::Backend> CommandPool<B> {
fn maintain(&mut self, last_done_index: SubmissionIndex) {
for i in (0..self.pending.len()).rev() {
if self.pending[i].1 <= last_done_index {
let (cmd_buf, index) = self.pending.swap_remove(i);
log::trace!(
"recycling cmdbuf submitted in {} when {} is last done",
index,
last_done_index,
);
self.recycle(cmd_buf);
}
}
}
fn recycle(&mut self, mut raw: B::CommandBuffer) {
unsafe {
raw.reset(false);
}
self.available.push(raw);
}
fn allocate(&mut self) -> B::CommandBuffer {
if self.available.is_empty() {
self.total += GROW_AMOUNT;
unsafe {
self.raw.allocate(
GROW_AMOUNT,
hal::command::Level::Primary,
&mut self.available,
)
};
}
self.available.pop().unwrap()
}
fn destroy(mut self, device: &B::Device) {
unsafe {
self.raw.free(self.available.into_iter());
device.destroy_command_pool(self.raw);
}
}
}
#[derive(Debug)]
struct Inner<B: hal::Backend> {
pools: FastHashMap<thread::ThreadId, CommandPool<B>>,
}
#[derive(Debug)]
pub struct CommandAllocator<B: hal::Backend> {
queue_family: hal::queue::QueueFamilyId,
internal_thread_id: thread::ThreadId,
inner: Mutex<Inner<B>>,
}
impl<B: GfxBackend> CommandAllocator<B> {
#[allow(clippy::too_many_arguments)]
pub(crate) fn allocate(
&self,
device_id: Stored<DeviceId>,
device: &B::Device,
limits: wgt::Limits,
downlevel: wgt::DownlevelProperties,
features: wgt::Features,
private_features: PrivateFeatures,
label: &crate::Label,
#[cfg(feature = "trace")] enable_tracing: bool,
) -> Result<CommandBuffer<B>, CommandAllocatorError> {
//debug_assert_eq!(device_id.backend(), B::VARIANT);
let thread_id = thread::current().id();
let mut inner = self.inner.lock();
use std::collections::hash_map::Entry;
let pool = match inner.pools.entry(thread_id) {
Entry::Vacant(e) => {
log::info!("Starting on thread {:?}", thread_id);
let raw = unsafe {
device
.create_command_pool(
self.queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.or(Err(DeviceError::OutOfMemory))?
};
e.insert(CommandPool {
raw,
total: 0,
available: Vec::new(),
pending: Vec::new(),
})
}
Entry::Occupied(e) => e.into_mut(),
};
//Note: we have to allocate the first buffer right here, or otherwise
// the pool may be cleaned up by maintenance called from another thread.
Ok(CommandBuffer {
raw: vec![pool.allocate()],
status: CommandEncoderStatus::Recording,
recorded_thread_id: thread_id,
device_id,
trackers: TrackerSet::new(B::VARIANT),
used_swap_chains: Default::default(),
buffer_memory_init_actions: Default::default(),
limits,
downlevel,
private_features,
support_fill_buffer_texture: features.contains(wgt::Features::CLEAR_COMMANDS),
has_labels: label.is_some(),
#[cfg(feature = "trace")]
commands: if enable_tracing {
Some(Vec::new())
} else {
None
},
#[cfg(debug_assertions)]
label: label.to_string_or_default(),
})
}
}
impl<B: hal::Backend> CommandAllocator<B> {
pub fn new(
queue_family: hal::queue::QueueFamilyId,
device: &B::Device,
) -> Result<Self, CommandAllocatorError> {
let internal_thread_id = thread::current().id();
log::info!("Starting on (internal) thread {:?}", internal_thread_id);
let mut pools = FastHashMap::default();
pools.insert(
internal_thread_id,
CommandPool {
raw: unsafe {
device
.create_command_pool(
queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.or(Err(DeviceError::OutOfMemory))?
},
total: 0,
available: Vec::new(),
pending: Vec::new(),
},
);
Ok(Self {
queue_family,
internal_thread_id,
inner: Mutex::new(Inner { pools }),
})
}
fn allocate_for_thread_id(&self, thread_id: thread::ThreadId) -> B::CommandBuffer {
let mut inner = self.inner.lock();
inner.pools.get_mut(&thread_id).unwrap().allocate()
}
pub fn allocate_internal(&self) -> B::CommandBuffer {
self.allocate_for_thread_id(self.internal_thread_id)
}
pub fn extend(&self, cmd_buf: &CommandBuffer<B>) -> B::CommandBuffer {
self.allocate_for_thread_id(cmd_buf.recorded_thread_id)
}
pub fn discard_internal(&self, raw: B::CommandBuffer) {
let mut inner = self.inner.lock();
inner
.pools
.get_mut(&self.internal_thread_id)
.unwrap()
.recycle(raw);
}
pub fn discard(&self, mut cmd_buf: CommandBuffer<B>) {
cmd_buf.trackers.clear();
let mut inner = self.inner.lock();
let pool = inner.pools.get_mut(&cmd_buf.recorded_thread_id).unwrap();
for raw in cmd_buf.raw {
pool.recycle(raw);
}
}
pub fn after_submit_internal(&self, raw: B::CommandBuffer, submit_index: SubmissionIndex) {
let mut inner = self.inner.lock();
inner
.pools
.get_mut(&self.internal_thread_id)
.unwrap()
.pending
.push((raw, submit_index));
}
pub fn after_submit(
&self,
cmd_buf: CommandBuffer<B>,
device: &B::Device,
submit_index: SubmissionIndex,
) {
// Record this command buffer as pending
let mut inner = self.inner.lock();
let clear_label = cmd_buf.has_labels;
inner
.pools
.get_mut(&cmd_buf.recorded_thread_id)
.unwrap()
.pending
.extend(cmd_buf.raw.into_iter().map(|mut raw| {
if clear_label {
unsafe { device.set_command_buffer_name(&mut raw, "") };
}
(raw, submit_index)
}));
}
pub fn maintain(&self, device: &B::Device, last_done_index: SubmissionIndex) {
profiling::scope!("maintain", "CommandAllocator");
let mut inner = self.inner.lock();
let mut remove_threads = Vec::new();
for (&thread_id, pool) in inner.pools.iter_mut() {
pool.maintain(last_done_index);
if pool.total == pool.available.len() && thread_id != self.internal_thread_id {
assert!(pool.pending.is_empty());
remove_threads.push(thread_id);
}
}
for thread_id in remove_threads {
log::info!("Removing from thread {:?}", thread_id);
let pool = inner.pools.remove(&thread_id).unwrap();
pool.destroy(device);
}
}
pub fn destroy(self, device: &B::Device) {
let mut inner = self.inner.lock();
for (_, mut pool) in inner.pools.drain() {
while let Some((raw, _)) = pool.pending.pop() {
pool.recycle(raw);
}
if pool.total != pool.available.len() {
log::error!(
"Some command buffers are still recorded, only tracking {} / {}",
pool.available.len(),
pool.total
);
}
pool.destroy(device);
}
}
}
#[derive(Clone, Debug, Error)]
pub enum CommandAllocatorError {
#[error(transparent)]
Device(#[from] DeviceError),
}

Просмотреть файл

@ -1,902 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
command::{LoadOp, PassChannel, StoreOp},
pipeline::ColorStateError,
resource, PrivateFeatures,
};
use std::convert::TryInto;
pub fn map_adapter_info(
info: hal::adapter::AdapterInfo,
backend: wgt::Backend,
) -> wgt::AdapterInfo {
use hal::adapter::DeviceType as Dt;
wgt::AdapterInfo {
name: info.name,
vendor: info.vendor,
device: info.device,
device_type: match info.device_type {
Dt::Other => wgt::DeviceType::Other,
Dt::IntegratedGpu => wgt::DeviceType::IntegratedGpu,
Dt::DiscreteGpu => wgt::DeviceType::DiscreteGpu,
Dt::VirtualGpu => wgt::DeviceType::VirtualGpu,
Dt::Cpu => wgt::DeviceType::Cpu,
},
backend,
}
}
pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) {
use hal::buffer::Usage as U;
use hal::memory::Properties as P;
use wgt::BufferUsage as W;
let mut hal_memory = P::empty();
if usage.contains(W::MAP_READ) {
hal_memory |= P::CPU_VISIBLE | P::CPU_CACHED;
}
if usage.contains(W::MAP_WRITE) {
hal_memory |= P::CPU_VISIBLE;
}
let mut hal_usage = U::empty();
if usage.contains(W::COPY_SRC) {
hal_usage |= U::TRANSFER_SRC;
}
if usage.contains(W::COPY_DST) {
hal_usage |= U::TRANSFER_DST;
}
if usage.contains(W::INDEX) {
hal_usage |= U::INDEX;
}
if usage.contains(W::VERTEX) {
hal_usage |= U::VERTEX;
}
if usage.contains(W::UNIFORM) {
hal_usage |= U::UNIFORM;
}
if usage.contains(W::STORAGE) {
hal_usage |= U::STORAGE;
}
if usage.contains(W::INDIRECT) {
hal_usage |= U::INDIRECT;
}
(hal_usage, hal_memory)
}
pub fn map_texture_usage(
usage: wgt::TextureUsage,
aspects: hal::format::Aspects,
) -> hal::image::Usage {
use hal::image::Usage as U;
use wgt::TextureUsage as W;
let mut value = U::empty();
if usage.contains(W::COPY_SRC) {
value |= U::TRANSFER_SRC;
}
if usage.contains(W::COPY_DST) {
value |= U::TRANSFER_DST;
}
if usage.contains(W::SAMPLED) {
value |= U::SAMPLED;
}
if usage.contains(W::STORAGE) {
value |= U::STORAGE;
}
if usage.contains(W::RENDER_ATTACHMENT) {
if aspects.intersects(hal::format::Aspects::DEPTH | hal::format::Aspects::STENCIL) {
value |= U::DEPTH_STENCIL_ATTACHMENT;
} else {
value |= U::COLOR_ATTACHMENT;
}
}
// Note: TextureUsage::Present does not need to be handled explicitly
// TODO: HAL Transient Attachment, HAL Input Attachment
value
}
pub fn map_binding_type(binding: &wgt::BindGroupLayoutEntry) -> hal::pso::DescriptorType {
use hal::pso;
use wgt::BindingType as Bt;
match binding.ty {
Bt::Buffer {
ty,
has_dynamic_offset,
min_binding_size: _,
} => pso::DescriptorType::Buffer {
ty: match ty {
wgt::BufferBindingType::Uniform => pso::BufferDescriptorType::Uniform,
wgt::BufferBindingType::Storage { read_only } => {
pso::BufferDescriptorType::Storage { read_only }
}
},
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: has_dynamic_offset,
},
},
Bt::Sampler { .. } => pso::DescriptorType::Sampler,
Bt::Texture { .. } => pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
Bt::StorageTexture { access, .. } => pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Storage {
read_only: match access {
wgt::StorageTextureAccess::ReadOnly => true,
_ => false,
},
},
},
}
}
pub fn map_shader_stage_flags(shader_stage_flags: wgt::ShaderStage) -> hal::pso::ShaderStageFlags {
use hal::pso::ShaderStageFlags as H;
use wgt::ShaderStage as Ss;
let mut value = H::empty();
if shader_stage_flags.contains(Ss::VERTEX) {
value |= H::VERTEX;
}
if shader_stage_flags.contains(Ss::FRAGMENT) {
value |= H::FRAGMENT;
}
if shader_stage_flags.contains(Ss::COMPUTE) {
value |= H::COMPUTE;
}
value
}
pub fn map_hal_flags_to_shader_stage(
shader_stage_flags: hal::pso::ShaderStageFlags,
) -> wgt::ShaderStage {
use hal::pso::ShaderStageFlags as H;
use wgt::ShaderStage as Ss;
let mut value = Ss::empty();
if shader_stage_flags.contains(H::VERTEX) {
value |= Ss::VERTEX;
}
if shader_stage_flags.contains(H::FRAGMENT) {
value |= Ss::FRAGMENT;
}
if shader_stage_flags.contains(H::COMPUTE) {
value |= Ss::COMPUTE;
}
value
}
pub fn map_extent(extent: &wgt::Extent3d, dim: wgt::TextureDimension) -> hal::image::Extent {
hal::image::Extent {
width: extent.width,
height: extent.height,
depth: match dim {
wgt::TextureDimension::D1 | wgt::TextureDimension::D2 => 1,
wgt::TextureDimension::D3 => extent.depth_or_array_layers,
},
}
}
pub fn map_primitive_topology(primitive_topology: wgt::PrimitiveTopology) -> hal::pso::Primitive {
use hal::pso::Primitive as H;
use wgt::PrimitiveTopology as Pt;
match primitive_topology {
Pt::PointList => H::PointList,
Pt::LineList => H::LineList,
Pt::LineStrip => H::LineStrip,
Pt::TriangleList => H::TriangleList,
Pt::TriangleStrip => H::TriangleStrip,
}
}
pub fn map_color_target_state(
desc: &wgt::ColorTargetState,
) -> Result<hal::pso::ColorBlendDesc, ColorStateError> {
let color_mask = desc.write_mask;
let blend = desc
.blend
.as_ref()
.map(|bs| {
Ok(hal::pso::BlendState {
color: map_blend_component(&bs.color)?,
alpha: map_blend_component(&bs.alpha)?,
})
})
.transpose()?;
Ok(hal::pso::ColorBlendDesc {
mask: map_color_write_flags(color_mask),
blend,
})
}
fn map_color_write_flags(flags: wgt::ColorWrite) -> hal::pso::ColorMask {
use hal::pso::ColorMask as H;
use wgt::ColorWrite as Cw;
let mut value = H::empty();
if flags.contains(Cw::RED) {
value |= H::RED;
}
if flags.contains(Cw::GREEN) {
value |= H::GREEN;
}
if flags.contains(Cw::BLUE) {
value |= H::BLUE;
}
if flags.contains(Cw::ALPHA) {
value |= H::ALPHA;
}
value
}
fn map_blend_component(
component: &wgt::BlendComponent,
) -> Result<hal::pso::BlendOp, ColorStateError> {
use hal::pso::BlendOp as H;
use wgt::BlendOperation as Bo;
Ok(match *component {
wgt::BlendComponent {
operation: Bo::Add,
src_factor,
dst_factor,
} => H::Add {
src: map_blend_factor(src_factor),
dst: map_blend_factor(dst_factor),
},
wgt::BlendComponent {
operation: Bo::Subtract,
src_factor,
dst_factor,
} => H::Sub {
src: map_blend_factor(src_factor),
dst: map_blend_factor(dst_factor),
},
wgt::BlendComponent {
operation: Bo::ReverseSubtract,
src_factor,
dst_factor,
} => H::RevSub {
src: map_blend_factor(src_factor),
dst: map_blend_factor(dst_factor),
},
wgt::BlendComponent {
operation: Bo::Min,
src_factor: wgt::BlendFactor::One,
dst_factor: wgt::BlendFactor::One,
} => H::Min,
wgt::BlendComponent {
operation: Bo::Max,
src_factor: wgt::BlendFactor::One,
dst_factor: wgt::BlendFactor::One,
} => H::Max,
_ => return Err(ColorStateError::InvalidMinMaxBlendFactors(*component)),
})
}
fn map_blend_factor(blend_factor: wgt::BlendFactor) -> hal::pso::Factor {
use hal::pso::Factor as H;
use wgt::BlendFactor as Bf;
match blend_factor {
Bf::Zero => H::Zero,
Bf::One => H::One,
Bf::Src => H::SrcColor,
Bf::OneMinusSrc => H::OneMinusSrcColor,
Bf::SrcAlpha => H::SrcAlpha,
Bf::OneMinusSrcAlpha => H::OneMinusSrcAlpha,
Bf::Dst => H::DstColor,
Bf::OneMinusDst => H::OneMinusDstColor,
Bf::DstAlpha => H::DstAlpha,
Bf::OneMinusDstAlpha => H::OneMinusDstAlpha,
Bf::SrcAlphaSaturated => H::SrcAlphaSaturate,
Bf::Constant => H::ConstColor,
Bf::OneMinusConstant => H::OneMinusConstColor,
}
}
pub fn map_depth_stencil_state(desc: &wgt::DepthStencilState) -> hal::pso::DepthStencilDesc {
hal::pso::DepthStencilDesc {
depth: if desc.is_depth_enabled() {
Some(hal::pso::DepthTest {
fun: map_compare_function(desc.depth_compare),
write: desc.depth_write_enabled,
})
} else {
None
},
depth_bounds: false, // TODO
stencil: if desc.stencil.is_enabled() {
let s = &desc.stencil;
Some(hal::pso::StencilTest {
faces: hal::pso::Sided {
front: map_stencil_face(&s.front),
back: map_stencil_face(&s.back),
},
read_masks: hal::pso::State::Static(hal::pso::Sided::new(s.read_mask)),
write_masks: hal::pso::State::Static(hal::pso::Sided::new(s.write_mask)),
reference_values: if s.needs_ref_value() {
hal::pso::State::Dynamic
} else {
hal::pso::State::Static(hal::pso::Sided::new(0))
},
})
} else {
None
},
}
}
fn map_stencil_face(stencil_state_face_desc: &wgt::StencilFaceState) -> hal::pso::StencilFace {
hal::pso::StencilFace {
fun: map_compare_function(stencil_state_face_desc.compare),
op_fail: map_stencil_operation(stencil_state_face_desc.fail_op),
op_depth_fail: map_stencil_operation(stencil_state_face_desc.depth_fail_op),
op_pass: map_stencil_operation(stencil_state_face_desc.pass_op),
}
}
pub fn map_compare_function(compare_function: wgt::CompareFunction) -> hal::pso::Comparison {
use hal::pso::Comparison as H;
use wgt::CompareFunction as Cf;
match compare_function {
Cf::Never => H::Never,
Cf::Less => H::Less,
Cf::Equal => H::Equal,
Cf::LessEqual => H::LessEqual,
Cf::Greater => H::Greater,
Cf::NotEqual => H::NotEqual,
Cf::GreaterEqual => H::GreaterEqual,
Cf::Always => H::Always,
}
}
fn map_stencil_operation(stencil_operation: wgt::StencilOperation) -> hal::pso::StencilOp {
use hal::pso::StencilOp as H;
use wgt::StencilOperation as So;
match stencil_operation {
So::Keep => H::Keep,
So::Zero => H::Zero,
So::Replace => H::Replace,
So::Invert => H::Invert,
So::IncrementClamp => H::IncrementClamp,
So::DecrementClamp => H::DecrementClamp,
So::IncrementWrap => H::IncrementWrap,
So::DecrementWrap => H::DecrementWrap,
}
}
pub(crate) fn map_texture_format(
texture_format: wgt::TextureFormat,
private_features: PrivateFeatures,
) -> hal::format::Format {
use hal::format::Format as H;
use wgt::TextureFormat as Tf;
match texture_format {
// Normal 8 bit formats
Tf::R8Unorm => H::R8Unorm,
Tf::R8Snorm => H::R8Snorm,
Tf::R8Uint => H::R8Uint,
Tf::R8Sint => H::R8Sint,
// Normal 16 bit formats
Tf::R16Uint => H::R16Uint,
Tf::R16Sint => H::R16Sint,
Tf::R16Float => H::R16Sfloat,
Tf::Rg8Unorm => H::Rg8Unorm,
Tf::Rg8Snorm => H::Rg8Snorm,
Tf::Rg8Uint => H::Rg8Uint,
Tf::Rg8Sint => H::Rg8Sint,
// Normal 32 bit formats
Tf::R32Uint => H::R32Uint,
Tf::R32Sint => H::R32Sint,
Tf::R32Float => H::R32Sfloat,
Tf::Rg16Uint => H::Rg16Uint,
Tf::Rg16Sint => H::Rg16Sint,
Tf::Rg16Float => H::Rg16Sfloat,
Tf::Rgba8Unorm => H::Rgba8Unorm,
Tf::Rgba8UnormSrgb => H::Rgba8Srgb,
Tf::Rgba8Snorm => H::Rgba8Snorm,
Tf::Rgba8Uint => H::Rgba8Uint,
Tf::Rgba8Sint => H::Rgba8Sint,
Tf::Bgra8Unorm => H::Bgra8Unorm,
Tf::Bgra8UnormSrgb => H::Bgra8Srgb,
// Packed 32 bit formats
Tf::Rgb10a2Unorm => H::A2r10g10b10Unorm,
Tf::Rg11b10Float => H::B10g11r11Ufloat,
// Normal 64 bit formats
Tf::Rg32Uint => H::Rg32Uint,
Tf::Rg32Sint => H::Rg32Sint,
Tf::Rg32Float => H::Rg32Sfloat,
Tf::Rgba16Uint => H::Rgba16Uint,
Tf::Rgba16Sint => H::Rgba16Sint,
Tf::Rgba16Float => H::Rgba16Sfloat,
// Normal 128 bit formats
Tf::Rgba32Uint => H::Rgba32Uint,
Tf::Rgba32Sint => H::Rgba32Sint,
Tf::Rgba32Float => H::Rgba32Sfloat,
// Depth and stencil formats
Tf::Depth32Float => H::D32Sfloat,
Tf::Depth24Plus => {
if private_features.texture_d24 {
H::X8D24Unorm
} else {
H::D32Sfloat
}
}
Tf::Depth24PlusStencil8 => {
if private_features.texture_d24_s8 {
H::D24UnormS8Uint
} else {
H::D32SfloatS8Uint
}
}
// BCn compressed formats
Tf::Bc1RgbaUnorm => H::Bc1RgbaUnorm,
Tf::Bc1RgbaUnormSrgb => H::Bc1RgbaSrgb,
Tf::Bc2RgbaUnorm => H::Bc2Unorm,
Tf::Bc2RgbaUnormSrgb => H::Bc2Srgb,
Tf::Bc3RgbaUnorm => H::Bc3Unorm,
Tf::Bc3RgbaUnormSrgb => H::Bc3Srgb,
Tf::Bc4RUnorm => H::Bc4Unorm,
Tf::Bc4RSnorm => H::Bc4Snorm,
Tf::Bc5RgUnorm => H::Bc5Unorm,
Tf::Bc5RgSnorm => H::Bc5Snorm,
Tf::Bc6hRgbSfloat => H::Bc6hSfloat,
Tf::Bc6hRgbUfloat => H::Bc6hUfloat,
Tf::Bc7RgbaUnorm => H::Bc7Unorm,
Tf::Bc7RgbaUnormSrgb => H::Bc7Srgb,
// ETC compressed formats
Tf::Etc2RgbUnorm => H::Etc2R8g8b8Unorm,
Tf::Etc2RgbUnormSrgb => H::Etc2R8g8b8Srgb,
Tf::Etc2RgbA1Unorm => H::Etc2R8g8b8a1Unorm,
Tf::Etc2RgbA1UnormSrgb => H::Etc2R8g8b8a1Srgb,
Tf::Etc2RgbA8Unorm => H::Etc2R8g8b8a8Unorm,
Tf::Etc2RgbA8UnormSrgb => H::Etc2R8g8b8a8Unorm,
Tf::EacRUnorm => H::EacR11Unorm,
Tf::EacRSnorm => H::EacR11Snorm,
Tf::EtcRgUnorm => H::EacR11g11Unorm,
Tf::EtcRgSnorm => H::EacR11g11Snorm,
// ASTC compressed formats
Tf::Astc4x4RgbaUnorm => H::Astc4x4Srgb,
Tf::Astc4x4RgbaUnormSrgb => H::Astc4x4Srgb,
Tf::Astc5x4RgbaUnorm => H::Astc5x4Unorm,
Tf::Astc5x4RgbaUnormSrgb => H::Astc5x4Srgb,
Tf::Astc5x5RgbaUnorm => H::Astc5x5Unorm,
Tf::Astc5x5RgbaUnormSrgb => H::Astc5x5Srgb,
Tf::Astc6x5RgbaUnorm => H::Astc6x5Unorm,
Tf::Astc6x5RgbaUnormSrgb => H::Astc6x5Srgb,
Tf::Astc6x6RgbaUnorm => H::Astc6x6Unorm,
Tf::Astc6x6RgbaUnormSrgb => H::Astc6x6Srgb,
Tf::Astc8x5RgbaUnorm => H::Astc8x5Unorm,
Tf::Astc8x5RgbaUnormSrgb => H::Astc8x5Srgb,
Tf::Astc8x6RgbaUnorm => H::Astc8x6Unorm,
Tf::Astc8x6RgbaUnormSrgb => H::Astc8x6Srgb,
Tf::Astc10x5RgbaUnorm => H::Astc10x5Unorm,
Tf::Astc10x5RgbaUnormSrgb => H::Astc10x5Srgb,
Tf::Astc10x6RgbaUnorm => H::Astc10x6Unorm,
Tf::Astc10x6RgbaUnormSrgb => H::Astc10x6Srgb,
Tf::Astc8x8RgbaUnorm => H::Astc8x8Unorm,
Tf::Astc8x8RgbaUnormSrgb => H::Astc8x8Srgb,
Tf::Astc10x8RgbaUnorm => H::Astc10x8Unorm,
Tf::Astc10x8RgbaUnormSrgb => H::Astc10x8Srgb,
Tf::Astc10x10RgbaUnorm => H::Astc10x10Unorm,
Tf::Astc10x10RgbaUnormSrgb => H::Astc10x10Srgb,
Tf::Astc12x10RgbaUnorm => H::Astc12x10Unorm,
Tf::Astc12x10RgbaUnormSrgb => H::Astc12x10Srgb,
Tf::Astc12x12RgbaUnorm => H::Astc12x12Unorm,
Tf::Astc12x12RgbaUnormSrgb => H::Astc12x12Srgb,
}
}
pub fn map_vertex_format(vertex_format: wgt::VertexFormat) -> hal::format::Format {
use hal::format::Format as H;
use wgt::VertexFormat as Vf;
match vertex_format {
Vf::Uint8x2 => H::Rg8Uint,
Vf::Uint8x4 => H::Rgba8Uint,
Vf::Sint8x2 => H::Rg8Sint,
Vf::Sint8x4 => H::Rgba8Sint,
Vf::Unorm8x2 => H::Rg8Unorm,
Vf::Unorm8x4 => H::Rgba8Unorm,
Vf::Snorm8x2 => H::Rg8Snorm,
Vf::Snorm8x4 => H::Rgba8Snorm,
Vf::Uint16x2 => H::Rg16Uint,
Vf::Uint16x4 => H::Rgba16Uint,
Vf::Sint16x2 => H::Rg16Sint,
Vf::Sint16x4 => H::Rgba16Sint,
Vf::Unorm16x2 => H::Rg16Unorm,
Vf::Unorm16x4 => H::Rgba16Unorm,
Vf::Snorm16x2 => H::Rg16Snorm,
Vf::Snorm16x4 => H::Rgba16Snorm,
Vf::Float16x2 => H::Rg16Sfloat,
Vf::Float16x4 => H::Rgba16Sfloat,
Vf::Float32 => H::R32Sfloat,
Vf::Float32x2 => H::Rg32Sfloat,
Vf::Float32x3 => H::Rgb32Sfloat,
Vf::Float32x4 => H::Rgba32Sfloat,
Vf::Uint32 => H::R32Uint,
Vf::Uint32x2 => H::Rg32Uint,
Vf::Uint32x3 => H::Rgb32Uint,
Vf::Uint32x4 => H::Rgba32Uint,
Vf::Sint32 => H::R32Sint,
Vf::Sint32x2 => H::Rg32Sint,
Vf::Sint32x3 => H::Rgb32Sint,
Vf::Sint32x4 => H::Rgba32Sint,
Vf::Float64 => H::R64Sfloat,
Vf::Float64x2 => H::Rg64Sfloat,
Vf::Float64x3 => H::Rgb64Sfloat,
Vf::Float64x4 => H::Rgba64Sfloat,
}
}
pub fn is_power_of_two(val: u32) -> bool {
val != 0 && (val & (val - 1)) == 0
}
pub fn is_valid_copy_src_texture_format(format: wgt::TextureFormat) -> bool {
use wgt::TextureFormat as Tf;
match format {
Tf::Depth24Plus | Tf::Depth24PlusStencil8 => false,
_ => true,
}
}
pub fn is_valid_copy_dst_texture_format(format: wgt::TextureFormat) -> bool {
use wgt::TextureFormat as Tf;
match format {
Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => false,
_ => true,
}
}
pub fn map_texture_dimension_size(
dimension: wgt::TextureDimension,
wgt::Extent3d {
width,
height,
depth_or_array_layers,
}: wgt::Extent3d,
sample_size: u32,
limits: &wgt::Limits,
) -> Result<hal::image::Kind, resource::TextureDimensionError> {
use hal::image::Kind as H;
use resource::{TextureDimensionError as Tde, TextureErrorDimension as Ted};
use wgt::TextureDimension::*;
let layers = depth_or_array_layers.try_into().unwrap_or(!0);
let (kind, extent_limits, sample_limit) = match dimension {
D1 => (
H::D1(width, layers),
[
limits.max_texture_dimension_1d,
1,
limits.max_texture_array_layers,
],
1,
),
D2 => (
H::D2(width, height, layers, sample_size as u8),
[
limits.max_texture_dimension_2d,
limits.max_texture_dimension_2d,
limits.max_texture_array_layers,
],
32,
),
D3 => (
H::D3(width, height, depth_or_array_layers),
[
limits.max_texture_dimension_3d,
limits.max_texture_dimension_3d,
limits.max_texture_dimension_3d,
],
1,
),
};
for (&dim, (&given, &limit)) in [Ted::X, Ted::Y, Ted::Z].iter().zip(
[width, height, depth_or_array_layers]
.iter()
.zip(extent_limits.iter()),
) {
if given == 0 {
return Err(Tde::Zero(dim));
}
if given > limit {
return Err(Tde::LimitExceeded { dim, given, limit });
}
}
if sample_size == 0 || sample_size > sample_limit || !is_power_of_two(sample_size) {
return Err(Tde::InvalidSampleCount(sample_size));
}
Ok(kind)
}
pub fn map_texture_view_dimension(dimension: wgt::TextureViewDimension) -> hal::image::ViewKind {
use hal::image::ViewKind as H;
use wgt::TextureViewDimension::*;
match dimension {
D1 => H::D1,
D2 => H::D2,
D2Array => H::D2Array,
Cube => H::Cube,
CubeArray => H::CubeArray,
D3 => H::D3,
}
}
pub(crate) fn map_buffer_state(usage: resource::BufferUse) -> hal::buffer::State {
use crate::resource::BufferUse as W;
use hal::buffer::Access as A;
let mut access = A::empty();
if usage.contains(W::MAP_READ) {
access |= A::HOST_READ;
}
if usage.contains(W::MAP_WRITE) {
access |= A::HOST_WRITE;
}
if usage.contains(W::COPY_SRC) {
access |= A::TRANSFER_READ;
}
if usage.contains(W::COPY_DST) {
access |= A::TRANSFER_WRITE;
}
if usage.contains(W::INDEX) {
access |= A::INDEX_BUFFER_READ;
}
if usage.contains(W::VERTEX) {
access |= A::VERTEX_BUFFER_READ;
}
if usage.contains(W::UNIFORM) {
access |= A::UNIFORM_READ | A::SHADER_READ;
}
if usage.contains(W::STORAGE_LOAD) {
access |= A::SHADER_READ;
}
if usage.contains(W::STORAGE_STORE) {
access |= A::SHADER_READ | A::SHADER_WRITE;
}
if usage.contains(W::INDIRECT) {
access |= A::INDIRECT_COMMAND_READ;
}
access
}
pub(crate) fn map_texture_state(
usage: resource::TextureUse,
aspects: hal::format::Aspects,
) -> hal::image::State {
use crate::resource::TextureUse as W;
use hal::image::{Access as A, Layout as L};
let is_color = aspects.contains(hal::format::Aspects::COLOR);
let layout = match usage {
W::UNINITIALIZED => return (A::empty(), L::Undefined),
W::COPY_SRC => L::TransferSrcOptimal,
W::COPY_DST => L::TransferDstOptimal,
W::SAMPLED if is_color => L::ShaderReadOnlyOptimal,
W::ATTACHMENT_READ | W::ATTACHMENT_WRITE if is_color => L::ColorAttachmentOptimal,
_ if is_color => L::General,
W::ATTACHMENT_WRITE => L::DepthStencilAttachmentOptimal,
_ => L::DepthStencilReadOnlyOptimal,
};
let mut access = A::empty();
if usage.contains(W::COPY_SRC) {
access |= A::TRANSFER_READ;
}
if usage.contains(W::COPY_DST) {
access |= A::TRANSFER_WRITE;
}
if usage.contains(W::SAMPLED) {
access |= A::SHADER_READ;
}
if usage.contains(W::ATTACHMENT_READ) {
access |= if is_color {
A::COLOR_ATTACHMENT_READ
} else {
A::DEPTH_STENCIL_ATTACHMENT_READ
};
}
if usage.contains(W::ATTACHMENT_WRITE) {
access |= if is_color {
A::COLOR_ATTACHMENT_WRITE
} else {
A::DEPTH_STENCIL_ATTACHMENT_WRITE
};
}
if usage.contains(W::STORAGE_LOAD) {
access |= A::SHADER_READ;
}
if usage.contains(W::STORAGE_STORE) {
access |= A::SHADER_WRITE;
}
(access, layout)
}
pub fn map_query_type(ty: &wgt::QueryType) -> (hal::query::Type, u32) {
match *ty {
wgt::QueryType::PipelineStatistics(pipeline_statistics) => {
let mut ps = hal::query::PipelineStatistic::empty();
ps.set(
hal::query::PipelineStatistic::VERTEX_SHADER_INVOCATIONS,
pipeline_statistics
.contains(wgt::PipelineStatisticsTypes::VERTEX_SHADER_INVOCATIONS),
);
ps.set(
hal::query::PipelineStatistic::CLIPPING_INVOCATIONS,
pipeline_statistics.contains(wgt::PipelineStatisticsTypes::CLIPPER_INVOCATIONS),
);
ps.set(
hal::query::PipelineStatistic::CLIPPING_PRIMITIVES,
pipeline_statistics.contains(wgt::PipelineStatisticsTypes::CLIPPER_PRIMITIVES_OUT),
);
ps.set(
hal::query::PipelineStatistic::FRAGMENT_SHADER_INVOCATIONS,
pipeline_statistics
.contains(wgt::PipelineStatisticsTypes::FRAGMENT_SHADER_INVOCATIONS),
);
ps.set(
hal::query::PipelineStatistic::COMPUTE_SHADER_INVOCATIONS,
pipeline_statistics
.contains(wgt::PipelineStatisticsTypes::COMPUTE_SHADER_INVOCATIONS),
);
(
hal::query::Type::PipelineStatistics(ps),
pipeline_statistics.bits().count_ones(),
)
}
wgt::QueryType::Timestamp => (hal::query::Type::Timestamp, 1),
}
}
pub fn map_load_store_ops<V>(channel: &PassChannel<V>) -> hal::pass::AttachmentOps {
hal::pass::AttachmentOps {
load: match channel.load_op {
LoadOp::Clear => hal::pass::AttachmentLoadOp::Clear,
LoadOp::Load => hal::pass::AttachmentLoadOp::Load,
},
store: match channel.store_op {
StoreOp::Clear => hal::pass::AttachmentStoreOp::DontCare, //TODO!
StoreOp::Store => hal::pass::AttachmentStoreOp::Store,
},
}
}
pub fn map_color_f32(color: &wgt::Color) -> hal::pso::ColorValue {
[
color.r as f32,
color.g as f32,
color.b as f32,
color.a as f32,
]
}
pub fn map_color_i32(color: &wgt::Color) -> [i32; 4] {
[
color.r as i32,
color.g as i32,
color.b as i32,
color.a as i32,
]
}
pub fn map_color_u32(color: &wgt::Color) -> [u32; 4] {
[
color.r as u32,
color.g as u32,
color.b as u32,
color.a as u32,
]
}
pub fn map_filter(filter: wgt::FilterMode) -> hal::image::Filter {
match filter {
wgt::FilterMode::Nearest => hal::image::Filter::Nearest,
wgt::FilterMode::Linear => hal::image::Filter::Linear,
}
}
pub fn map_wrap(address: wgt::AddressMode) -> hal::image::WrapMode {
use hal::image::WrapMode as W;
use wgt::AddressMode as Am;
match address {
Am::ClampToEdge => W::Clamp,
Am::Repeat => W::Tile,
Am::MirrorRepeat => W::Mirror,
Am::ClampToBorder => W::Border,
}
}
pub fn map_primitive_state_to_input_assembler(
desc: &wgt::PrimitiveState,
) -> hal::pso::InputAssemblerDesc {
hal::pso::InputAssemblerDesc {
primitive: map_primitive_topology(desc.topology),
with_adjacency: false,
restart_index: desc.strip_index_format.map(map_index_format),
}
}
pub fn map_primitive_state_to_rasterizer(
desc: &wgt::PrimitiveState,
depth_stencil: Option<&wgt::DepthStencilState>,
) -> hal::pso::Rasterizer {
use hal::pso;
let depth_bias = match depth_stencil {
Some(dsd) if dsd.bias.is_enabled() => Some(pso::State::Static(pso::DepthBias {
const_factor: dsd.bias.constant as f32,
slope_factor: dsd.bias.slope_scale,
clamp: dsd.bias.clamp,
})),
_ => None,
};
pso::Rasterizer {
depth_clamping: desc.clamp_depth,
polygon_mode: match desc.polygon_mode {
wgt::PolygonMode::Fill => pso::PolygonMode::Fill,
wgt::PolygonMode::Line => pso::PolygonMode::Line,
wgt::PolygonMode::Point => pso::PolygonMode::Point,
},
cull_face: match desc.cull_mode {
None => pso::Face::empty(),
Some(wgt::Face::Front) => pso::Face::FRONT,
Some(wgt::Face::Back) => pso::Face::BACK,
},
front_face: match desc.front_face {
wgt::FrontFace::Ccw => pso::FrontFace::CounterClockwise,
wgt::FrontFace::Cw => pso::FrontFace::Clockwise,
},
depth_bias,
conservative: desc.conservative,
line_width: pso::State::Static(1.0),
}
}
pub fn map_multisample_state(desc: &wgt::MultisampleState) -> hal::pso::Multisampling {
hal::pso::Multisampling {
rasterization_samples: desc.count as _,
sample_shading: None,
sample_mask: desc.mask,
alpha_coverage: desc.alpha_to_coverage_enabled,
alpha_to_one: false,
}
}
pub fn map_index_format(index_format: wgt::IndexFormat) -> hal::IndexType {
match index_format {
wgt::IndexFormat::Uint16 => hal::IndexType::U16,
wgt::IndexFormat::Uint32 => hal::IndexType::U32,
}
}
/// Take `value` and round it up to the nearest alignment `alignment`.
///
/// ```text
/// (0, 3) -> 0
/// (1, 3) -> 3
/// (2, 3) -> 3
/// (3, 3) -> 3
/// (4, 3) -> 6
/// ...
pub fn align_up(value: u32, alignment: u32) -> u32 {
((value + alignment - 1) / alignment) * alignment
}

Просмотреть файл

@ -1,293 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::DeviceError;
use hal::device::Device as _;
use std::{borrow::Cow, iter, ptr::NonNull};
#[derive(Debug)]
pub struct MemoryAllocator<B: hal::Backend>(gpu_alloc::GpuAllocator<B::Memory>);
#[derive(Debug)]
pub struct MemoryBlock<B: hal::Backend>(gpu_alloc::MemoryBlock<B::Memory>);
struct MemoryDevice<'a, B: hal::Backend>(&'a B::Device);
impl<B: hal::Backend> MemoryAllocator<B> {
pub fn new(mem_props: hal::adapter::MemoryProperties, limits: hal::Limits) -> Self {
let mem_config = gpu_alloc::Config {
dedicated_threshold: 32 << 20,
preferred_dedicated_threshold: 8 << 20,
transient_dedicated_threshold: 128 << 20,
linear_chunk: 128 << 20,
minimal_buddy_size: 1 << 10,
initial_buddy_dedicated_size: 8 << 20,
};
let properties = gpu_alloc::DeviceProperties {
memory_types: Cow::Owned(
mem_props
.memory_types
.iter()
.map(|mt| gpu_alloc::MemoryType {
heap: mt.heap_index as u32,
props: gpu_alloc::MemoryPropertyFlags::from_bits_truncate(
mt.properties.bits() as u8,
),
})
.collect::<Vec<_>>(),
),
memory_heaps: Cow::Owned(
mem_props
.memory_heaps
.iter()
.map(|mh| gpu_alloc::MemoryHeap { size: mh.size })
.collect::<Vec<_>>(),
),
max_memory_allocation_count: if limits.max_memory_allocation_count == 0 {
log::warn!("max_memory_allocation_count is not set by gfx-rs backend");
!0
} else {
limits.max_memory_allocation_count.min(!0u32 as usize) as u32
},
max_memory_allocation_size: !0,
non_coherent_atom_size: limits.non_coherent_atom_size as u64,
buffer_device_address: false,
};
MemoryAllocator(gpu_alloc::GpuAllocator::new(mem_config, properties))
}
pub fn allocate(
&mut self,
device: &B::Device,
requirements: hal::memory::Requirements,
usage: gpu_alloc::UsageFlags,
) -> Result<MemoryBlock<B>, DeviceError> {
assert!(requirements.alignment.is_power_of_two());
let request = gpu_alloc::Request {
size: requirements.size,
align_mask: requirements.alignment - 1,
memory_types: requirements.type_mask,
usage,
};
unsafe { self.0.alloc(&MemoryDevice::<B>(device), request) }
.map(MemoryBlock)
.map_err(|err| match err {
gpu_alloc::AllocationError::OutOfHostMemory
| gpu_alloc::AllocationError::OutOfDeviceMemory => DeviceError::OutOfMemory,
_ => panic!("Unable to allocate memory: {:?}", err),
})
}
pub fn free(&mut self, device: &B::Device, block: MemoryBlock<B>) {
unsafe { self.0.dealloc(&MemoryDevice::<B>(device), block.0) }
}
pub fn clear(&mut self, device: &B::Device) {
unsafe { self.0.cleanup(&MemoryDevice::<B>(device)) }
}
}
impl<B: hal::Backend> MemoryBlock<B> {
pub fn bind_buffer(
&self,
device: &B::Device,
buffer: &mut B::Buffer,
) -> Result<(), DeviceError> {
let mem = self.0.memory();
unsafe {
device
.bind_buffer_memory(mem, self.0.offset(), buffer)
.map_err(DeviceError::from_bind)
}
}
pub fn bind_image(&self, device: &B::Device, image: &mut B::Image) -> Result<(), DeviceError> {
let mem = self.0.memory();
unsafe {
device
.bind_image_memory(mem, self.0.offset(), image)
.map_err(DeviceError::from_bind)
}
}
pub fn is_coherent(&self) -> bool {
self.0
.props()
.contains(gpu_alloc::MemoryPropertyFlags::HOST_COHERENT)
}
pub fn map(
&mut self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
size: wgt::BufferAddress,
) -> Result<NonNull<u8>, DeviceError> {
let offset = inner_offset;
unsafe {
self.0
.map(&MemoryDevice::<B>(device), offset, size as usize)
.map_err(DeviceError::from)
}
}
pub fn unmap(&mut self, device: &B::Device) {
unsafe { self.0.unmap(&MemoryDevice::<B>(device)) };
}
pub fn write_bytes(
&mut self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
data: &[u8],
) -> Result<(), DeviceError> {
profiling::scope!("write_bytes");
let offset = inner_offset;
unsafe {
self.0
.write_bytes(&MemoryDevice::<B>(device), offset, data)
.map_err(DeviceError::from)
}
}
pub fn read_bytes(
&mut self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
data: &mut [u8],
) -> Result<(), DeviceError> {
profiling::scope!("read_bytes");
let offset = inner_offset;
unsafe {
self.0
.read_bytes(&MemoryDevice::<B>(device), offset, data)
.map_err(DeviceError::from)
}
}
fn segment(
&self,
inner_offset: wgt::BufferAddress,
size: Option<wgt::BufferAddress>,
) -> hal::memory::Segment {
hal::memory::Segment {
offset: self.0.offset() + inner_offset,
size: size.or_else(|| Some(self.0.size())),
}
}
pub fn flush_range(
&self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
size: Option<wgt::BufferAddress>,
) -> Result<(), DeviceError> {
let segment = self.segment(inner_offset, size);
let mem = self.0.memory();
unsafe {
device
.flush_mapped_memory_ranges(iter::once((mem, segment)))
.or(Err(DeviceError::OutOfMemory))
}
}
pub fn invalidate_range(
&self,
device: &B::Device,
inner_offset: wgt::BufferAddress,
size: Option<wgt::BufferAddress>,
) -> Result<(), DeviceError> {
let segment = self.segment(inner_offset, size);
let mem = self.0.memory();
unsafe {
device
.invalidate_mapped_memory_ranges(iter::once((mem, segment)))
.or(Err(DeviceError::OutOfMemory))
}
}
}
impl<B: hal::Backend> gpu_alloc::MemoryDevice<B::Memory> for MemoryDevice<'_, B> {
unsafe fn allocate_memory(
&self,
size: u64,
memory_type: u32,
flags: gpu_alloc::AllocationFlags,
) -> Result<B::Memory, gpu_alloc::OutOfMemory> {
profiling::scope!("allocate_memory");
assert!(flags.is_empty());
self.0
.allocate_memory(hal::MemoryTypeId(memory_type as _), size)
.map_err(|_| gpu_alloc::OutOfMemory::OutOfDeviceMemory)
}
unsafe fn deallocate_memory(&self, memory: B::Memory) {
profiling::scope!("deallocate_memory");
self.0.free_memory(memory);
}
unsafe fn map_memory(
&self,
memory: &mut B::Memory,
offset: u64,
size: u64,
) -> Result<NonNull<u8>, gpu_alloc::DeviceMapError> {
profiling::scope!("map_memory");
match self.0.map_memory(
memory,
hal::memory::Segment {
offset,
size: Some(size),
},
) {
Ok(ptr) => Ok(NonNull::new(ptr).expect("Pointer to memory mapping must not be null")),
Err(hal::device::MapError::OutOfMemory(_)) => {
Err(gpu_alloc::DeviceMapError::OutOfDeviceMemory)
}
Err(hal::device::MapError::MappingFailed) => Err(gpu_alloc::DeviceMapError::MapFailed),
Err(other) => panic!("Unexpected map error: {:?}", other),
}
}
unsafe fn unmap_memory(&self, memory: &mut B::Memory) {
profiling::scope!("unmap_memory");
self.0.unmap_memory(memory);
}
unsafe fn invalidate_memory_ranges(
&self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> {
profiling::scope!("invalidate_memory_ranges");
self.0
.invalidate_mapped_memory_ranges(ranges.iter().map(|r| {
(
r.memory,
hal::memory::Segment {
offset: r.offset,
size: Some(r.size),
},
)
}))
.map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
}
unsafe fn flush_memory_ranges(
&self,
ranges: &[gpu_alloc::MappedMemoryRange<'_, B::Memory>],
) -> Result<(), gpu_alloc::OutOfMemory> {
profiling::scope!("flush_memory_ranges");
self.0
.flush_mapped_memory_ranges(ranges.iter().map(|r| {
(
r.memory,
hal::memory::Segment {
offset: r.offset,
size: Some(r.size),
},
)
}))
.map_err(|_| gpu_alloc::OutOfMemory::OutOfHostMemory)
}
}

Просмотреть файл

@ -1,175 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::DeviceError;
use arrayvec::ArrayVec;
pub use gpu_descriptor::DescriptorTotalCount;
pub type DescriptorSet<B> = gpu_descriptor::DescriptorSet<<B as hal::Backend>::DescriptorSet>;
#[derive(Debug)]
pub struct DescriptorAllocator<B: hal::Backend>(
gpu_descriptor::DescriptorAllocator<B::DescriptorPool, B::DescriptorSet>,
);
struct DescriptorDevice<'a, B: hal::Backend>(&'a B::Device);
impl<B: hal::Backend> DescriptorAllocator<B> {
pub fn new() -> Self {
DescriptorAllocator(gpu_descriptor::DescriptorAllocator::new(0))
}
pub fn allocate(
&mut self,
device: &B::Device,
layout: &B::DescriptorSetLayout,
layout_descriptor_count: &DescriptorTotalCount,
count: u32,
) -> Result<Vec<DescriptorSet<B>>, DeviceError> {
unsafe {
self.0.allocate(
&DescriptorDevice::<B>(device),
layout,
gpu_descriptor::DescriptorSetLayoutCreateFlags::empty(),
layout_descriptor_count,
count,
)
}
.map_err(|err| {
log::warn!("Descriptor set allocation failed: {}", err);
DeviceError::OutOfMemory
})
}
pub fn free(&mut self, device: &B::Device, sets: impl IntoIterator<Item = DescriptorSet<B>>) {
unsafe { self.0.free(&DescriptorDevice::<B>(device), sets) }
}
pub fn cleanup(&mut self, device: &B::Device) {
unsafe { self.0.cleanup(&DescriptorDevice::<B>(device)) }
}
}
impl<B: hal::Backend>
gpu_descriptor::DescriptorDevice<B::DescriptorSetLayout, B::DescriptorPool, B::DescriptorSet>
for DescriptorDevice<'_, B>
{
unsafe fn create_descriptor_pool(
&self,
descriptor_count: &DescriptorTotalCount,
max_sets: u32,
flags: gpu_descriptor::DescriptorPoolCreateFlags,
) -> Result<B::DescriptorPool, gpu_descriptor::CreatePoolError> {
profiling::scope!("create_descriptor_pool");
let mut ranges = ArrayVec::<[_; 7]>::new();
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Sampler,
count: descriptor_count.sampler as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Image {
ty: hal::pso::ImageDescriptorType::Sampled {
with_sampler: false,
},
},
count: descriptor_count.sampled_image as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Image {
ty: hal::pso::ImageDescriptorType::Storage { read_only: false },
},
count: descriptor_count.storage_image as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Uniform,
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: descriptor_count.uniform_buffer as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: false,
},
},
count: descriptor_count.storage_buffer as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Uniform,
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: true,
},
},
count: descriptor_count.uniform_buffer_dynamic as _,
});
ranges.push(hal::pso::DescriptorRangeDesc {
ty: hal::pso::DescriptorType::Buffer {
ty: hal::pso::BufferDescriptorType::Storage { read_only: false },
format: hal::pso::BufferDescriptorFormat::Structured {
dynamic_offset: true,
},
},
count: descriptor_count.storage_buffer_dynamic as _,
});
ranges.retain(|rd| rd.count != 0);
match hal::device::Device::create_descriptor_pool(
self.0,
max_sets as usize,
ranges.into_iter(),
hal::pso::DescriptorPoolCreateFlags::from_bits_truncate(flags.bits()),
) {
Ok(pool) => Ok(pool),
Err(hal::device::OutOfMemory::Host) => {
Err(gpu_descriptor::CreatePoolError::OutOfHostMemory)
}
Err(hal::device::OutOfMemory::Device) => {
Err(gpu_descriptor::CreatePoolError::OutOfDeviceMemory)
}
}
}
unsafe fn destroy_descriptor_pool(&self, pool: B::DescriptorPool) {
profiling::scope!("destroy_descriptor_pool");
hal::device::Device::destroy_descriptor_pool(self.0, pool);
}
unsafe fn alloc_descriptor_sets<'a>(
&self,
pool: &mut B::DescriptorPool,
layouts: impl ExactSizeIterator<Item = &'a B::DescriptorSetLayout>,
sets: &mut impl Extend<B::DescriptorSet>,
) -> Result<(), gpu_descriptor::DeviceAllocationError> {
use gpu_descriptor::DeviceAllocationError as Dae;
profiling::scope!("alloc_descriptor_sets");
match hal::pso::DescriptorPool::allocate(pool, layouts, sets) {
Ok(()) => Ok(()),
Err(hal::pso::AllocationError::OutOfMemory(oom)) => Err(match oom {
hal::device::OutOfMemory::Host => Dae::OutOfHostMemory,
hal::device::OutOfMemory::Device => Dae::OutOfDeviceMemory,
}),
Err(hal::pso::AllocationError::OutOfPoolMemory) => Err(Dae::OutOfPoolMemory),
Err(hal::pso::AllocationError::FragmentedPool) => Err(Dae::FragmentedPool),
Err(hal::pso::AllocationError::IncompatibleLayout) => {
panic!("Incompatible descriptor set layout")
}
}
}
unsafe fn dealloc_descriptor_sets<'a>(
&self,
pool: &mut B::DescriptorPool,
sets: impl Iterator<Item = B::DescriptorSet>,
) {
profiling::scope!("dealloc_descriptor_sets");
hal::pso::DescriptorPool::free(pool, sets)
}
}

Просмотреть файл

@ -1,890 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(feature = "trace")]
use crate::device::trace::Action;
use crate::{
command::{
texture_copy_view_to_hal, validate_linear_texture_data, validate_texture_copy_range,
CommandAllocator, CommandBuffer, CopySide, ImageCopyTexture, TransferError, BITS_PER_BYTE,
},
conv,
device::{alloc, DeviceError, WaitIdleError},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id,
memory_init_tracker::{MemoryInitKind, MemoryInitTrackerAction},
resource::{Buffer, BufferAccessError, BufferMapState, BufferUse, TextureUse},
FastHashMap, FastHashSet,
};
use hal::{command::CommandBuffer as _, device::Device as _, queue::Queue as _};
use smallvec::SmallVec;
use std::{iter, ops::Range, ptr};
use thiserror::Error;
struct StagingData<B: hal::Backend> {
buffer: B::Buffer,
memory: alloc::MemoryBlock<B>,
cmdbuf: B::CommandBuffer,
}
#[derive(Debug)]
pub enum TempResource<B: hal::Backend> {
Buffer(B::Buffer),
Image(B::Image),
}
#[derive(Debug)]
pub(crate) struct PendingWrites<B: hal::Backend> {
pub command_buffer: Option<B::CommandBuffer>,
pub temp_resources: Vec<(TempResource<B>, alloc::MemoryBlock<B>)>,
pub dst_buffers: FastHashSet<id::BufferId>,
pub dst_textures: FastHashSet<id::TextureId>,
}
impl<B: hal::Backend> PendingWrites<B> {
pub fn new() -> Self {
Self {
command_buffer: None,
temp_resources: Vec::new(),
dst_buffers: FastHashSet::default(),
dst_textures: FastHashSet::default(),
}
}
pub fn dispose(
self,
device: &B::Device,
cmd_allocator: &CommandAllocator<B>,
mem_allocator: &mut alloc::MemoryAllocator<B>,
) {
if let Some(raw) = self.command_buffer {
cmd_allocator.discard_internal(raw);
}
for (resource, memory) in self.temp_resources {
mem_allocator.free(device, memory);
match resource {
TempResource::Buffer(buffer) => unsafe {
device.destroy_buffer(buffer);
},
TempResource::Image(image) => unsafe {
device.destroy_image(image);
},
}
}
}
pub fn consume_temp(&mut self, resource: TempResource<B>, memory: alloc::MemoryBlock<B>) {
self.temp_resources.push((resource, memory));
}
fn consume(&mut self, stage: StagingData<B>) {
self.temp_resources
.push((TempResource::Buffer(stage.buffer), stage.memory));
self.command_buffer = Some(stage.cmdbuf);
}
#[must_use]
fn finish(&mut self) -> Option<B::CommandBuffer> {
self.dst_buffers.clear();
self.dst_textures.clear();
self.command_buffer.take().map(|mut cmd_buf| unsafe {
cmd_buf.finish();
cmd_buf
})
}
fn borrow_cmd_buf(&mut self, cmd_allocator: &CommandAllocator<B>) -> &mut B::CommandBuffer {
if self.command_buffer.is_none() {
let mut cmdbuf = cmd_allocator.allocate_internal();
unsafe {
cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
self.command_buffer = Some(cmdbuf);
}
self.command_buffer.as_mut().unwrap()
}
}
#[derive(Default)]
struct RequiredBufferInits {
map: FastHashMap<id::BufferId, Vec<Range<wgt::BufferAddress>>>,
}
impl RequiredBufferInits {
fn add<B: hal::Backend>(
&mut self,
buffer_memory_init_actions: &[MemoryInitTrackerAction<id::BufferId>],
buffer_guard: &mut Storage<Buffer<B>, id::BufferId>,
) -> Result<(), QueueSubmitError> {
for buffer_use in buffer_memory_init_actions.iter() {
let buffer = buffer_guard
.get_mut(buffer_use.id)
.map_err(|_| QueueSubmitError::DestroyedBuffer(buffer_use.id))?;
let uninitialized_ranges = buffer.initialization_status.drain(buffer_use.range.clone());
match buffer_use.kind {
MemoryInitKind::ImplicitlyInitialized => {
uninitialized_ranges.for_each(drop);
}
MemoryInitKind::NeedsInitializedMemory => {
self.map
.entry(buffer_use.id)
.or_default()
.extend(uninitialized_ranges);
}
}
}
Ok(())
}
}
impl<B: hal::Backend> super::Device<B> {
pub fn borrow_pending_writes(&mut self) -> &mut B::CommandBuffer {
self.pending_writes.borrow_cmd_buf(&self.cmd_allocator)
}
fn prepare_stage(&mut self, size: wgt::BufferAddress) -> Result<StagingData<B>, DeviceError> {
profiling::scope!("prepare_stage");
let mut buffer = unsafe {
self.raw
.create_buffer(
size,
hal::buffer::Usage::TRANSFER_SRC,
hal::memory::SparseFlags::empty(),
)
.map_err(|err| match err {
hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to create staging buffer: {}", err),
})?
};
//TODO: do we need to transition into HOST_WRITE access first?
let requirements = unsafe {
self.raw.set_buffer_name(&mut buffer, "<write_buffer_temp>");
self.raw.get_buffer_requirements(&buffer)
};
let block = self.mem_allocator.lock().allocate(
&self.raw,
requirements,
gpu_alloc::UsageFlags::UPLOAD | gpu_alloc::UsageFlags::TRANSIENT,
)?;
block.bind_buffer(&self.raw, &mut buffer)?;
let cmdbuf = match self.pending_writes.command_buffer.take() {
Some(cmdbuf) => cmdbuf,
None => {
let mut cmdbuf = self.cmd_allocator.allocate_internal();
unsafe {
cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
cmdbuf
}
};
Ok(StagingData {
buffer,
memory: block,
cmdbuf,
})
}
fn initialize_buffer_memory(
&mut self,
mut required_buffer_inits: RequiredBufferInits,
buffer_guard: &mut Storage<Buffer<B>, id::BufferId>,
) -> Result<(), QueueSubmitError> {
self.pending_writes
.dst_buffers
.extend(required_buffer_inits.map.keys());
let cmd_buf = self.pending_writes.borrow_cmd_buf(&self.cmd_allocator);
let mut trackers = self.trackers.lock();
for (buffer_id, mut ranges) in required_buffer_inits.map.drain() {
// Collapse touching ranges. We can't do this any earlier since we only now gathered ranges from several different command buffers!
ranges.sort_by(|a, b| a.start.cmp(&b.start));
for i in (1..ranges.len()).rev() {
assert!(ranges[i - 1].end <= ranges[i].start); // The memory init tracker made sure of this!
if ranges[i].start == ranges[i - 1].end {
ranges[i - 1].end = ranges[i].end;
ranges.swap_remove(i); // Ordering not important at this point
}
}
// Don't do use_replace since the buffer may already no longer have a ref_count.
// However, we *know* that it is currently in use, so the tracker must already know about it.
let transition = trackers.buffers.change_replace_tracked(
id::Valid(buffer_id),
(),
BufferUse::COPY_DST,
);
let buffer = buffer_guard.get(buffer_id).unwrap();
let &(ref buffer_raw, _) = buffer
.raw
.as_ref()
.ok_or(QueueSubmitError::DestroyedBuffer(buffer_id))?;
unsafe {
cmd_buf.pipeline_barrier(
super::all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
transition.map(|pending| pending.into_hal(buffer)),
);
}
for range in ranges {
let size = range.end - range.start;
assert!(range.start % 4 == 0, "Buffer {:?} has an uninitialized range with a start not aligned to 4 (start was {})", buffer, range.start);
assert!(size % 4 == 0, "Buffer {:?} has an uninitialized range with a size not aligned to 4 (size was {})", buffer, size);
unsafe {
cmd_buf.fill_buffer(
buffer_raw,
hal::buffer::SubRange {
offset: range.start,
size: Some(size),
},
0,
);
}
}
}
Ok(())
}
}
#[derive(Clone, Debug, Error)]
#[error("queue is invalid")]
pub struct InvalidQueue;
#[derive(Clone, Debug, Error)]
pub enum QueueWriteError {
#[error(transparent)]
Queue(#[from] DeviceError),
#[error(transparent)]
Transfer(#[from] TransferError),
}
#[derive(Clone, Debug, Error)]
pub enum QueueSubmitError {
#[error(transparent)]
Queue(#[from] DeviceError),
#[error("buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error("texture {0:?} is destroyed")]
DestroyedTexture(id::TextureId),
#[error(transparent)]
Unmap(#[from] BufferAccessError),
#[error("swap chain output was dropped before the command buffer got submitted")]
SwapChainOutputDropped,
#[error("GPU got stuck :(")]
StuckGpu,
}
//TODO: move out common parts of write_xxx.
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn queue_write_buffer<B: GfxBackend>(
&self,
queue_id: id::QueueId,
buffer_id: id::BufferId,
buffer_offset: wgt::BufferAddress,
data: &[u8],
) -> Result<(), QueueWriteError> {
profiling::scope!("write_buffer", "Queue");
let hub = B::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let (buffer_guard, _) = hub.buffers.read(&mut token);
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(Action::WriteBuffer {
id: buffer_id,
data: data_path,
range: buffer_offset..buffer_offset + data.len() as wgt::BufferAddress,
queued: true,
});
}
let data_size = data.len() as wgt::BufferAddress;
if data_size == 0 {
log::trace!("Ignoring write_buffer of size 0");
return Ok(());
}
let mut stage = device.prepare_stage(data_size)?;
stage.memory.write_bytes(&device.raw, 0, data)?;
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
return Err(TransferError::MissingCopyDstUsageFlag(Some(buffer_id), None).into());
}
dst.life_guard.use_at(device.active_submission_index + 1);
if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedCopySize(data_size).into());
}
if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedBufferOffset(buffer_offset).into());
}
if buffer_offset + data_size > dst.size {
return Err(TransferError::BufferOverrun {
start_offset: buffer_offset,
end_offset: buffer_offset + data_size,
buffer_size: dst.size,
side: CopySide::Destination,
}
.into());
}
let region = hal::command::BufferCopy {
src: 0,
dst: buffer_offset,
size: data.len() as _,
};
unsafe {
stage.cmdbuf.pipeline_barrier(
super::all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
iter::once(hal::memory::Barrier::Buffer {
states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
target: &stage.buffer,
range: hal::buffer::SubRange::WHOLE,
families: None,
})
.chain(transition.map(|pending| pending.into_hal(dst))),
);
stage
.cmdbuf
.copy_buffer(&stage.buffer, dst_raw, iter::once(region));
}
device.pending_writes.consume(stage);
device.pending_writes.dst_buffers.insert(buffer_id);
// Ensure the overwritten bytes are marked as initialized so they don't need to be nulled prior to mapping or binding.
{
drop(buffer_guard);
let (mut buffer_guard, _) = hub.buffers.write(&mut token);
let dst = buffer_guard.get_mut(buffer_id).unwrap();
dst.initialization_status
.clear(buffer_offset..(buffer_offset + data_size));
}
Ok(())
}
pub fn queue_write_texture<B: GfxBackend>(
&self,
queue_id: id::QueueId,
destination: &ImageCopyTexture,
data: &[u8],
data_layout: &wgt::ImageDataLayout,
size: &wgt::Extent3d,
) -> Result<(), QueueWriteError> {
profiling::scope!("write_texture", "Queue");
let hub = B::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let (texture_guard, _) = hub.textures.read(&mut token);
let (image_layers, image_range, image_offset) =
texture_copy_view_to_hal(destination, size, &*texture_guard)?;
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(Action::WriteTexture {
to: destination.clone(),
data: data_path,
layout: *data_layout,
size: *size,
});
}
if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
log::trace!("Ignoring write_texture of size 0");
return Ok(());
}
let texture_format = texture_guard.get(destination.texture).unwrap().format;
let bytes_per_block = conv::map_texture_format(texture_format, device.private_features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
validate_linear_texture_data(
data_layout,
texture_format,
data.len() as wgt::BufferAddress,
CopySide::Source,
bytes_per_block as wgt::BufferAddress,
size,
false,
)?;
let (block_width, block_height) = texture_format.describe().block_dimensions;
let block_width = block_width as u32;
let block_height = block_height as u32;
if !conv::is_valid_copy_dst_texture_format(texture_format) {
return Err(TransferError::CopyToForbiddenTextureFormat(texture_format).into());
}
let width_blocks = size.width / block_width;
let height_blocks = size.height / block_width;
let texel_rows_per_image = if let Some(rows_per_image) = data_layout.rows_per_image {
rows_per_image.get()
} else {
// doesn't really matter because we need this only if we copy more than one layer, and then we validate for this being not None
size.height
};
let block_rows_per_image = texel_rows_per_image / block_height;
let bytes_per_row_alignment = get_lowest_common_denom(
device.hal_limits.optimal_buffer_copy_pitch_alignment as u32,
bytes_per_block,
);
let stage_bytes_per_row = align_to(bytes_per_block * width_blocks, bytes_per_row_alignment);
let block_rows_in_copy =
(size.depth_or_array_layers - 1) * block_rows_per_image + height_blocks;
let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64;
let mut stage = device.prepare_stage(stage_size)?;
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers
.textures
.use_replace(
&*texture_guard,
destination.texture,
image_range,
TextureUse::COPY_DST,
)
.unwrap();
let &(ref dst_raw, _) = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
}
validate_texture_copy_range(
destination,
dst.format,
dst.kind,
CopySide::Destination,
size,
)?;
dst.life_guard.use_at(device.active_submission_index + 1);
let bytes_per_row = if let Some(bytes_per_row) = data_layout.bytes_per_row {
bytes_per_row.get()
} else {
width_blocks * bytes_per_block
};
let ptr = stage.memory.map(&device.raw, 0, stage_size)?;
unsafe {
profiling::scope!("copy");
//TODO: https://github.com/zakarumych/gpu-alloc/issues/13
if stage_bytes_per_row == bytes_per_row {
// Fast path if the data isalready being aligned optimally.
ptr::copy_nonoverlapping(data.as_ptr(), ptr.as_ptr(), stage_size as usize);
} else {
// Copy row by row into the optimal alignment.
let copy_bytes_per_row = stage_bytes_per_row.min(bytes_per_row) as usize;
for layer in 0..size.depth_or_array_layers {
let rows_offset = layer * block_rows_per_image;
for row in 0..height_blocks {
ptr::copy_nonoverlapping(
data.as_ptr()
.offset((rows_offset + row) as isize * bytes_per_row as isize),
ptr.as_ptr().offset(
(rows_offset + row) as isize * stage_bytes_per_row as isize,
),
copy_bytes_per_row,
);
}
}
}
}
stage.memory.unmap(&device.raw);
if !stage.memory.is_coherent() {
stage.memory.flush_range(&device.raw, 0, None)?;
}
// WebGPU uses the physical size of the texture for copies whereas vulkan uses
// the virtual size. We have passed validation, so it's safe to use the
// image extent data directly. We want the provided copy size to be no larger than
// the virtual size.
let max_image_extent = dst.kind.level_extent(destination.mip_level as _);
let image_extent = wgt::Extent3d {
width: size.width.min(max_image_extent.width),
height: size.height.min(max_image_extent.height),
depth_or_array_layers: size.depth_or_array_layers,
};
let region = hal::command::BufferImageCopy {
buffer_offset: 0,
buffer_width: (stage_bytes_per_row / bytes_per_block) * block_width,
buffer_height: texel_rows_per_image,
image_layers,
image_offset,
image_extent: conv::map_extent(&image_extent, dst.dimension),
};
unsafe {
stage.cmdbuf.pipeline_barrier(
super::all_image_stages() | hal::pso::PipelineStage::HOST
..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
iter::once(hal::memory::Barrier::Buffer {
states: hal::buffer::Access::HOST_WRITE..hal::buffer::Access::TRANSFER_READ,
target: &stage.buffer,
range: hal::buffer::SubRange::WHOLE,
families: None,
})
.chain(transition.map(|pending| pending.into_hal(dst))),
);
stage.cmdbuf.copy_buffer_to_image(
&stage.buffer,
dst_raw,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}
device.pending_writes.consume(stage);
device
.pending_writes
.dst_textures
.insert(destination.texture);
Ok(())
}
pub fn queue_submit<B: GfxBackend>(
&self,
queue_id: id::QueueId,
command_buffer_ids: &[id::CommandBufferId],
) -> Result<(), QueueSubmitError> {
profiling::scope!("submit", "Queue");
let hub = B::hub(self);
let mut token = Token::root();
let callbacks = {
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
device.temp_suspected.clear();
device.active_submission_index += 1;
let submit_index = device.active_submission_index;
let (fence, pending_write_command_buffer) = {
let mut signal_swapchain_semaphores = SmallVec::<[_; 1]>::new();
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token);
if !command_buffer_ids.is_empty() {
profiling::scope!("prepare");
let (render_bundle_guard, mut token) = hub.render_bundles.read(&mut token);
let (_, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (compute_pipe_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (render_pipe_guard, mut token) = hub.render_pipelines.read(&mut token);
let (mut buffer_guard, mut token) = hub.buffers.write(&mut token);
let (texture_guard, mut token) = hub.textures.write(&mut token);
let (texture_view_guard, mut token) = hub.texture_views.read(&mut token);
let (sampler_guard, _) = hub.samplers.read(&mut token);
let mut required_buffer_inits = RequiredBufferInits::default();
//Note: locking the trackers has to be done after the storages
let mut trackers = device.trackers.lock();
//TODO: if multiple command buffers are submitted, we can re-use the last
// native command buffer of the previous chain instead of always creating
// a temporary one, since the chains are not finished.
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
let cmdbuf = match command_buffer_guard.get_mut(cmb_id) {
Ok(cmdbuf) => cmdbuf,
Err(_) => continue,
};
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(Action::Submit(
submit_index,
cmdbuf.commands.take().unwrap(),
));
}
if !cmdbuf.is_finished() {
continue;
}
required_buffer_inits
.add(&cmdbuf.buffer_memory_init_actions, &mut *buffer_guard)?;
// optimize the tracked states
cmdbuf.trackers.optimize();
for sc_id in cmdbuf.used_swap_chains.drain(..) {
let sc = &mut swap_chain_guard[sc_id.value];
if sc.acquired_view_id.is_none() {
return Err(QueueSubmitError::SwapChainOutputDropped);
}
if sc.active_submission_index != submit_index {
sc.active_submission_index = submit_index;
// Only add a signal if this is the first time for this swapchain
// to be used in the submission.
signal_swapchain_semaphores.push(sc_id.value);
}
}
// update submission IDs
for id in cmdbuf.trackers.buffers.used() {
let buffer = &mut buffer_guard[id];
if buffer.raw.is_none() {
return Err(QueueSubmitError::DestroyedBuffer(id.0));
}
if !buffer.life_guard.use_at(submit_index) {
if let BufferMapState::Active { .. } = buffer.map_state {
log::warn!("Dropped buffer has a pending mapping.");
super::unmap_buffer(&device.raw, buffer)?;
}
device.temp_suspected.buffers.push(id);
} else {
match buffer.map_state {
BufferMapState::Idle => (),
_ => panic!("Buffer {:?} is still mapped", id),
}
}
}
for id in cmdbuf.trackers.textures.used() {
let texture = &texture_guard[id];
if texture.raw.is_none() {
return Err(QueueSubmitError::DestroyedTexture(id.0));
}
if !texture.life_guard.use_at(submit_index) {
device.temp_suspected.textures.push(id);
}
}
for id in cmdbuf.trackers.views.used() {
if !texture_view_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.texture_views.push(id);
}
}
for id in cmdbuf.trackers.bind_groups.used() {
if !bind_group_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.bind_groups.push(id);
}
}
for id in cmdbuf.trackers.samplers.used() {
if !sampler_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.samplers.push(id);
}
}
for id in cmdbuf.trackers.compute_pipes.used() {
if !compute_pipe_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.compute_pipelines.push(id);
}
}
for id in cmdbuf.trackers.render_pipes.used() {
if !render_pipe_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.render_pipelines.push(id);
}
}
for id in cmdbuf.trackers.bundles.used() {
if !render_bundle_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.render_bundles.push(id);
}
}
// execute resource transitions
let mut transit = device.cmd_allocator.extend(cmdbuf);
unsafe {
// the last buffer was open, closing now
cmdbuf.raw.last_mut().unwrap().finish();
transit
.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
log::trace!("Stitching command buffer {:?} before submission", cmb_id);
trackers.merge_extend_stateless(&cmdbuf.trackers);
CommandBuffer::insert_barriers(
&mut transit,
&mut *trackers,
&cmdbuf.trackers.buffers,
&cmdbuf.trackers.textures,
&*buffer_guard,
&*texture_guard,
);
unsafe {
transit.finish();
}
cmdbuf.raw.insert(0, transit);
}
log::trace!("Device after submission {}: {:#?}", submit_index, trackers);
drop(trackers);
if !required_buffer_inits.map.is_empty() {
device
.initialize_buffer_memory(required_buffer_inits, &mut *buffer_guard)?;
}
}
// Finish pending writes. Don't do this earlier since buffer init may lead to additional writes (see initialize_buffer_memory).
let pending_write_command_buffer = device.pending_writes.finish();
// now prepare the GPU submission
let mut fence = device
.raw
.create_fence(false)
.or(Err(DeviceError::OutOfMemory))?;
let signal_semaphores = signal_swapchain_semaphores
.into_iter()
.map(|sc_id| &swap_chain_guard[sc_id].semaphore);
//Note: we could technically avoid the heap Vec here
let mut command_buffers = Vec::new();
command_buffers.extend(pending_write_command_buffer.as_ref());
for &cmd_buf_id in command_buffer_ids.iter() {
match command_buffer_guard.get(cmd_buf_id) {
Ok(cmd_buf) if cmd_buf.is_finished() => {
command_buffers.extend(cmd_buf.raw.iter());
}
_ => {}
}
}
unsafe {
device.queue_group.queues[0].submit(
command_buffers.into_iter(),
iter::empty(),
signal_semaphores,
Some(&mut fence),
);
}
(fence, pending_write_command_buffer)
};
if let Some(comb_raw) = pending_write_command_buffer {
device
.cmd_allocator
.after_submit_internal(comb_raw, submit_index);
}
let callbacks = match device.maintain(&hub, false, &mut token) {
Ok(callbacks) => callbacks,
Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
};
device.temp_suspected.clear();
profiling::scope!("cleanup");
super::Device::lock_life_internal(&device.life_tracker, &mut token).track_submission(
submit_index,
fence,
device.pending_writes.temp_resources.drain(..),
);
// finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids {
if let (Some(cmd_buf), _) = hub.command_buffers.unregister(cmb_id, &mut token) {
device
.cmd_allocator
.after_submit(cmd_buf, &device.raw, submit_index);
}
}
callbacks
};
// the map callbacks should execute with nothing locked!
drop(token);
super::fire_map_callbacks(callbacks);
Ok(())
}
pub fn queue_get_timestamp_period<B: GfxBackend>(
&self,
queue_id: id::QueueId,
) -> Result<f32, InvalidQueue> {
let hub = B::hub(self);
let mut token = Token::root();
let (device_guard, _) = hub.devices.read(&mut token);
match device_guard.get(queue_id) {
Ok(device) => Ok(device.queue_group.queues[0].timestamp_period()),
Err(_) => Err(InvalidQueue),
}
}
}
fn get_lowest_common_denom(a: u32, b: u32) -> u32 {
let gcd = if a >= b {
get_greatest_common_divisor(a, b)
} else {
get_greatest_common_divisor(b, a)
};
a * b / gcd
}
fn get_greatest_common_divisor(mut a: u32, mut b: u32) -> u32 {
assert!(a >= b);
loop {
let c = a % b;
if c == 0 {
return b;
} else {
a = b;
b = c;
}
}
}
fn align_to(value: u32, alignment: u32) -> u32 {
match value % alignment {
0 => value,
other => value - other + alignment,
}
}
#[test]
fn test_lcd() {
assert_eq!(get_lowest_common_denom(2, 2), 2);
assert_eq!(get_lowest_common_denom(2, 3), 6);
assert_eq!(get_lowest_common_denom(6, 4), 12);
}
#[test]
fn test_gcd() {
assert_eq!(get_greatest_common_divisor(5, 1), 1);
assert_eq!(get_greatest_common_divisor(4, 2), 2);
assert_eq!(get_greatest_common_divisor(6, 4), 2);
assert_eq!(get_greatest_common_divisor(7, 7), 7);
}

Просмотреть файл

@ -1,989 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
backend, conv,
device::{Device, DeviceDescriptor},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{AdapterId, DeviceId, SurfaceId, Valid},
LabelHelpers, LifeGuard, PrivateFeatures, Stored, DOWNLEVEL_WARNING_MESSAGE, MAX_BIND_GROUPS,
};
use wgt::{Backend, BackendBit, PowerPreference, BIND_BUFFER_ALIGNMENT};
use hal::{
adapter::PhysicalDevice as _, queue::QueueFamily as _, window::Surface as _, Instance as _,
};
use thiserror::Error;
/// Size that is guaranteed to be available in push constants.
///
/// This is needed because non-vulkan backends might not
/// provide a push-constant size limit.
const MIN_PUSH_CONSTANT_SIZE: u32 = 128;
pub type RequestAdapterOptions = wgt::RequestAdapterOptions<SurfaceId>;
#[derive(Debug)]
pub struct Instance {
#[cfg(vulkan)]
pub vulkan: Option<gfx_backend_vulkan::Instance>,
#[cfg(metal)]
pub metal: Option<gfx_backend_metal::Instance>,
#[cfg(dx12)]
pub dx12: Option<gfx_backend_dx12::Instance>,
#[cfg(dx11)]
pub dx11: Option<gfx_backend_dx11::Instance>,
#[cfg(gl)]
pub gl: Option<gfx_backend_gl::Instance>,
}
impl Instance {
pub fn new(name: &str, version: u32, backends: BackendBit) -> Self {
backends_map! {
let map = |(backend, backend_create)| {
if backends.contains(backend.into()) {
backend_create(name, version).ok()
} else {
None
}
};
Self {
#[cfg(vulkan)]
vulkan: map((Backend::Vulkan, gfx_backend_vulkan::Instance::create)),
#[cfg(metal)]
metal: map((Backend::Metal, gfx_backend_metal::Instance::create)),
#[cfg(dx12)]
dx12: map((Backend::Dx12, gfx_backend_dx12::Instance::create)),
#[cfg(dx11)]
dx11: map((Backend::Dx11, gfx_backend_dx11::Instance::create)),
#[cfg(gl)]
gl: map((Backend::Gl, gfx_backend_gl::Instance::create)),
}
}
}
pub(crate) fn destroy_surface(&self, surface: Surface) {
backends_map! {
let map = |(surface_backend, self_backend)| {
unsafe {
if let Some(suf) = surface_backend {
self_backend.as_ref().unwrap().destroy_surface(suf);
}
}
};
#[cfg(vulkan)]
map((surface.vulkan, &self.vulkan)),
#[cfg(metal)]
map((surface.metal, &self.metal)),
#[cfg(dx12)]
map((surface.dx12, &self.dx12)),
#[cfg(dx11)]
map((surface.dx11, &self.dx11)),
#[cfg(gl)]
map((surface.gl, &self.gl)),
}
}
}
type GfxSurface<B> = <B as hal::Backend>::Surface;
#[derive(Debug)]
pub struct Surface {
#[cfg(vulkan)]
pub vulkan: Option<GfxSurface<backend::Vulkan>>,
#[cfg(metal)]
pub metal: Option<GfxSurface<backend::Metal>>,
#[cfg(dx12)]
pub dx12: Option<GfxSurface<backend::Dx12>>,
#[cfg(dx11)]
pub dx11: Option<GfxSurface<backend::Dx11>>,
#[cfg(gl)]
pub gl: Option<GfxSurface<backend::Gl>>,
}
impl crate::hub::Resource for Surface {
const TYPE: &'static str = "Surface";
fn life_guard(&self) -> &LifeGuard {
unreachable!()
}
fn label(&self) -> &str {
"<Surface>"
}
}
const FEATURE_MAP: &[(wgt::Features, hal::Features)] = &[
(wgt::Features::DEPTH_CLAMPING, hal::Features::DEPTH_CLAMP),
(
wgt::Features::TEXTURE_COMPRESSION_BC,
hal::Features::FORMAT_BC,
),
(
wgt::Features::TEXTURE_COMPRESSION_ETC2,
hal::Features::FORMAT_ETC2,
),
(
wgt::Features::TEXTURE_COMPRESSION_ASTC_LDR,
hal::Features::FORMAT_ASTC_LDR,
),
(
wgt::Features::SAMPLED_TEXTURE_BINDING_ARRAY,
hal::Features::TEXTURE_DESCRIPTOR_ARRAY,
),
(
wgt::Features::SAMPLED_TEXTURE_ARRAY_DYNAMIC_INDEXING,
hal::Features::SHADER_SAMPLED_IMAGE_ARRAY_DYNAMIC_INDEXING,
),
(
wgt::Features::SAMPLED_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
hal::Features::SAMPLED_TEXTURE_DESCRIPTOR_INDEXING,
),
(
wgt::Features::UNSIZED_BINDING_ARRAY,
hal::Features::UNSIZED_DESCRIPTOR_ARRAY,
),
(
wgt::Features::MULTI_DRAW_INDIRECT,
hal::Features::MULTI_DRAW_INDIRECT,
),
(
wgt::Features::MULTI_DRAW_INDIRECT_COUNT,
hal::Features::DRAW_INDIRECT_COUNT,
),
(
wgt::Features::NON_FILL_POLYGON_MODE,
hal::Features::NON_FILL_POLYGON_MODE,
),
(
wgt::Features::PIPELINE_STATISTICS_QUERY,
hal::Features::PIPELINE_STATISTICS_QUERY,
),
(wgt::Features::SHADER_FLOAT64, hal::Features::SHADER_FLOAT64),
(
wgt::Features::CONSERVATIVE_RASTERIZATION,
hal::Features::CONSERVATIVE_RASTERIZATION,
),
(
wgt::Features::BUFFER_BINDING_ARRAY,
hal::Features::BUFFER_DESCRIPTOR_ARRAY,
),
(
wgt::Features::UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING,
hal::Features::SHADER_UNIFORM_BUFFER_ARRAY_DYNAMIC_INDEXING,
),
(
wgt::Features::UNIFORM_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
hal::Features::UNIFORM_BUFFER_DESCRIPTOR_INDEXING,
),
(
wgt::Features::STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING,
hal::Features::SHADER_STORAGE_BUFFER_ARRAY_DYNAMIC_INDEXING,
),
(
wgt::Features::STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
hal::Features::STORAGE_BUFFER_DESCRIPTOR_INDEXING,
),
(
wgt::Features::VERTEX_WRITABLE_STORAGE,
hal::Features::VERTEX_STORES_AND_ATOMICS,
),
(
wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER,
hal::Features::SAMPLER_BORDER_COLOR,
),
];
#[derive(Debug)]
pub struct Adapter<B: hal::Backend> {
pub(crate) raw: hal::adapter::Adapter<B>,
features: wgt::Features,
pub(crate) private_features: PrivateFeatures,
limits: wgt::Limits,
downlevel: wgt::DownlevelProperties,
life_guard: LifeGuard,
}
impl<B: GfxBackend> Adapter<B> {
fn new(raw: hal::adapter::Adapter<B>) -> Self {
profiling::scope!("new", "Adapter");
let adapter_features = raw.physical_device.features();
let properties = raw.physical_device.properties();
let mut features = wgt::Features::default()
| wgt::Features::MAPPABLE_PRIMARY_BUFFERS
| wgt::Features::PUSH_CONSTANTS
| wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
| wgt::Features::CLEAR_COMMANDS;
for &(hi, lo) in FEATURE_MAP.iter() {
features.set(hi, adapter_features.contains(lo));
}
features.set(
wgt::Features::TIMESTAMP_QUERY,
properties.limits.timestamp_compute_and_graphics,
);
let private_features = PrivateFeatures {
anisotropic_filtering: adapter_features.contains(hal::Features::SAMPLER_ANISOTROPY),
texture_d24: raw
.physical_device
.format_properties(Some(hal::format::Format::X8D24Unorm))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
texture_d24_s8: raw
.physical_device
.format_properties(Some(hal::format::Format::D24UnormS8Uint))
.optimal_tiling
.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT),
};
let default_limits = wgt::Limits::default();
// All these casts to u32 are safe as the underlying vulkan types are u32s.
// If another backend provides larger limits than u32, we need to clamp them to u32::MAX.
// TODO: fix all gfx-hal backends to produce limits we care about, and remove .max
let desc_limits = &properties.limits.descriptor_limits;
let limits = wgt::Limits {
max_texture_dimension_1d: properties
.limits
.max_image_1d_size
.max(default_limits.max_texture_dimension_1d),
max_texture_dimension_2d: properties
.limits
.max_image_2d_size
.max(default_limits.max_texture_dimension_1d),
max_texture_dimension_3d: properties
.limits
.max_image_3d_size
.max(default_limits.max_texture_dimension_1d),
max_texture_array_layers: (properties.limits.max_image_array_layers as u32)
.max(default_limits.max_texture_array_layers),
max_bind_groups: (properties.limits.max_bound_descriptor_sets as u32)
.min(MAX_BIND_GROUPS as u32)
.max(default_limits.max_bind_groups),
max_dynamic_uniform_buffers_per_pipeline_layout: desc_limits
.max_descriptor_set_uniform_buffers_dynamic
.max(default_limits.max_dynamic_uniform_buffers_per_pipeline_layout),
max_dynamic_storage_buffers_per_pipeline_layout: desc_limits
.max_descriptor_set_storage_buffers_dynamic
.max(default_limits.max_dynamic_storage_buffers_per_pipeline_layout),
max_sampled_textures_per_shader_stage: desc_limits
.max_per_stage_descriptor_sampled_images
.max(default_limits.max_sampled_textures_per_shader_stage),
max_samplers_per_shader_stage: desc_limits
.max_per_stage_descriptor_samplers
.max(default_limits.max_samplers_per_shader_stage),
max_storage_buffers_per_shader_stage: desc_limits
.max_per_stage_descriptor_storage_buffers
.max(default_limits.max_storage_buffers_per_shader_stage),
max_storage_textures_per_shader_stage: desc_limits
.max_per_stage_descriptor_storage_images
.max(default_limits.max_storage_textures_per_shader_stage),
max_uniform_buffers_per_shader_stage: desc_limits
.max_per_stage_descriptor_uniform_buffers
.max(default_limits.max_uniform_buffers_per_shader_stage),
max_uniform_buffer_binding_size: (properties.limits.max_uniform_buffer_range as u32)
.max(default_limits.max_uniform_buffer_binding_size),
max_storage_buffer_binding_size: (properties.limits.max_storage_buffer_range as u32)
.max(default_limits.max_storage_buffer_binding_size),
max_vertex_buffers: (properties.limits.max_vertex_input_bindings as u32)
.max(default_limits.max_vertex_buffers),
max_vertex_attributes: (properties.limits.max_vertex_input_attributes as u32)
.max(default_limits.max_vertex_attributes),
max_vertex_buffer_array_stride: (properties.limits.max_vertex_input_binding_stride
as u32)
.max(default_limits.max_vertex_buffer_array_stride),
max_push_constant_size: (properties.limits.max_push_constants_size as u32)
.max(MIN_PUSH_CONSTANT_SIZE), // As an extension, the default is always 0, so define a separate minimum.
};
let mut downlevel_flags = wgt::DownlevelFlags::empty();
downlevel_flags.set(
wgt::DownlevelFlags::COMPUTE_SHADERS,
properties.downlevel.compute_shaders,
);
downlevel_flags.set(
wgt::DownlevelFlags::STORAGE_IMAGES,
properties.downlevel.storage_images,
);
downlevel_flags.set(
wgt::DownlevelFlags::READ_ONLY_DEPTH_STENCIL,
properties.downlevel.read_only_depth_stencil,
);
downlevel_flags.set(
wgt::DownlevelFlags::DEVICE_LOCAL_IMAGE_COPIES,
properties.downlevel.device_local_image_copies,
);
downlevel_flags.set(
wgt::DownlevelFlags::NON_POWER_OF_TWO_MIPMAPPED_TEXTURES,
properties.downlevel.non_power_of_two_mipmapped_textures,
);
downlevel_flags.set(
wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES,
adapter_features.contains(hal::Features::IMAGE_CUBE_ARRAY),
);
downlevel_flags.set(
wgt::DownlevelFlags::ANISOTROPIC_FILTERING,
private_features.anisotropic_filtering,
);
let downlevel = wgt::DownlevelProperties {
flags: downlevel_flags,
shader_model: match properties.downlevel.shader_model {
hal::DownlevelShaderModel::ShaderModel2 => wgt::ShaderModel::Sm2,
hal::DownlevelShaderModel::ShaderModel4 => wgt::ShaderModel::Sm4,
hal::DownlevelShaderModel::ShaderModel5 => wgt::ShaderModel::Sm5,
},
};
Self {
raw,
features,
private_features,
limits,
downlevel,
life_guard: LifeGuard::new("<Adapter>"),
}
}
pub fn get_swap_chain_preferred_format(
&self,
surface: &mut Surface,
) -> Result<wgt::TextureFormat, GetSwapChainPreferredFormatError> {
let formats = {
let surface = B::get_surface_mut(surface);
let queue_family = &self.raw.queue_families[0];
if !surface.supports_queue_family(queue_family) {
return Err(GetSwapChainPreferredFormatError::UnsupportedQueueFamily);
}
surface.supported_formats(&self.raw.physical_device)
};
if let Some(formats) = formats {
// Check the four formats mentioned in the WebGPU spec:
// Bgra8UnormSrgb, Rgba8UnormSrgb, Bgra8Unorm, Rgba8Unorm
// Also, prefer sRGB over linear as it is better in
// representing perceived colors.
if formats.contains(&hal::format::Format::Bgra8Srgb) {
return Ok(wgt::TextureFormat::Bgra8UnormSrgb);
}
if formats.contains(&hal::format::Format::Rgba8Srgb) {
return Ok(wgt::TextureFormat::Rgba8UnormSrgb);
}
if formats.contains(&hal::format::Format::Bgra8Unorm) {
return Ok(wgt::TextureFormat::Bgra8Unorm);
}
if formats.contains(&hal::format::Format::Rgba8Unorm) {
return Ok(wgt::TextureFormat::Rgba8Unorm);
}
return Err(GetSwapChainPreferredFormatError::NotFound);
}
// If no formats were returned, use Bgra8UnormSrgb
Ok(wgt::TextureFormat::Bgra8UnormSrgb)
}
pub(crate) fn get_texture_format_features(
&self,
format: wgt::TextureFormat,
) -> wgt::TextureFormatFeatures {
let texture_format_properties = self
.raw
.physical_device
.format_properties(Some(conv::map_texture_format(
format,
self.private_features,
)))
.optimal_tiling;
let mut allowed_usages = format.describe().guaranteed_format_features.allowed_usages;
if texture_format_properties.contains(hal::format::ImageFeature::SAMPLED) {
allowed_usages |= wgt::TextureUsage::SAMPLED;
}
if texture_format_properties.contains(hal::format::ImageFeature::STORAGE) {
allowed_usages |= wgt::TextureUsage::STORAGE;
}
if texture_format_properties.contains(hal::format::ImageFeature::COLOR_ATTACHMENT) {
allowed_usages |= wgt::TextureUsage::RENDER_ATTACHMENT;
}
if texture_format_properties.contains(hal::format::ImageFeature::DEPTH_STENCIL_ATTACHMENT) {
allowed_usages |= wgt::TextureUsage::RENDER_ATTACHMENT;
}
let mut flags = wgt::TextureFormatFeatureFlags::empty();
if texture_format_properties.contains(hal::format::ImageFeature::STORAGE_ATOMIC) {
flags |= wgt::TextureFormatFeatureFlags::STORAGE_ATOMICS;
}
if texture_format_properties.contains(hal::format::ImageFeature::STORAGE_READ_WRITE) {
flags |= wgt::TextureFormatFeatureFlags::STORAGE_READ_WRITE;
}
let filterable =
texture_format_properties.contains(hal::format::ImageFeature::SAMPLED_LINEAR);
wgt::TextureFormatFeatures {
allowed_usages,
flags,
filterable,
}
}
fn create_device(
&self,
self_id: AdapterId,
desc: &DeviceDescriptor,
trace_path: Option<&std::path::Path>,
) -> Result<Device<B>, RequestDeviceError> {
// Verify all features were exposed by the adapter
if !self.features.contains(desc.features) {
return Err(RequestDeviceError::UnsupportedFeature(
desc.features - self.features,
));
}
if !self.downlevel.is_webgpu_compliant() {
log::warn!("{}", DOWNLEVEL_WARNING_MESSAGE);
}
// Verify feature preconditions
if desc
.features
.contains(wgt::Features::MAPPABLE_PRIMARY_BUFFERS)
&& self.raw.info.device_type == hal::adapter::DeviceType::DiscreteGpu
{
log::warn!("Feature MAPPABLE_PRIMARY_BUFFERS enabled on a discrete gpu. This is a massive performance footgun and likely not what you wanted");
}
let phd = &self.raw.physical_device;
let available_features = phd.features();
// Check features that are always needed
let wishful_features = hal::Features::ROBUST_BUFFER_ACCESS
| hal::Features::FRAGMENT_STORES_AND_ATOMICS
| hal::Features::NDC_Y_UP
| hal::Features::INDEPENDENT_BLENDING
| hal::Features::SAMPLER_ANISOTROPY
| hal::Features::IMAGE_CUBE_ARRAY
| hal::Features::SAMPLE_RATE_SHADING;
let mut enabled_features = available_features & wishful_features;
if enabled_features != wishful_features {
log::warn!(
"Missing internal features: {:?}",
wishful_features - enabled_features
);
}
// Enable low-level features
for &(hi, lo) in FEATURE_MAP.iter() {
enabled_features.set(lo, desc.features.contains(hi));
}
let family = self
.raw
.queue_families
.iter()
.find(|family| family.queue_type().supports_graphics())
.ok_or(RequestDeviceError::NoGraphicsQueue)?;
let mut gpu =
unsafe { phd.open(&[(family, &[1.0])], enabled_features) }.map_err(|err| {
use hal::device::CreationError::*;
match err {
DeviceLost => RequestDeviceError::DeviceLost,
InitializationFailed => RequestDeviceError::Internal,
OutOfMemory(_) => RequestDeviceError::OutOfMemory,
_ => panic!("failed to create `gfx-hal` device: {}", err),
}
})?;
if let Some(_) = desc.label {
//TODO
}
let limits = phd.properties().limits;
assert_eq!(
0,
BIND_BUFFER_ALIGNMENT % limits.min_storage_buffer_offset_alignment,
"Adapter storage buffer offset alignment not compatible with WGPU"
);
assert_eq!(
0,
BIND_BUFFER_ALIGNMENT % limits.min_uniform_buffer_offset_alignment,
"Adapter uniform buffer offset alignment not compatible with WGPU"
);
if self.limits < desc.limits {
return Err(RequestDeviceError::LimitsExceeded);
}
let mem_props = phd.memory_properties();
Device::new(
gpu.device,
Stored {
value: Valid(self_id),
ref_count: self.life_guard.add_ref(),
},
gpu.queue_groups.swap_remove(0),
mem_props,
limits,
self.private_features,
self.downlevel,
desc,
trace_path,
)
.or(Err(RequestDeviceError::OutOfMemory))
}
}
impl<B: hal::Backend> crate::hub::Resource for Adapter<B> {
const TYPE: &'static str = "Adapter";
fn life_guard(&self) -> &LifeGuard {
&self.life_guard
}
}
#[derive(Clone, Debug, Error)]
pub enum GetSwapChainPreferredFormatError {
#[error("no suitable format found")]
NotFound,
#[error("invalid adapter")]
InvalidAdapter,
#[error("invalid surface")]
InvalidSurface,
#[error("surface does not support the adapter's queue family")]
UnsupportedQueueFamily,
}
#[derive(Clone, Debug, Error)]
/// Error when requesting a device from the adaptor
pub enum RequestDeviceError {
#[error("parent adapter is invalid")]
InvalidAdapter,
#[error("connection to device was lost during initialization")]
DeviceLost,
#[error("device initialization failed due to implementation specific errors")]
Internal,
#[error("some of the requested device limits are not supported")]
LimitsExceeded,
#[error("device has no queue supporting graphics")]
NoGraphicsQueue,
#[error("not enough memory left")]
OutOfMemory,
#[error("unsupported features were requested: {0:?}")]
UnsupportedFeature(wgt::Features),
}
pub enum AdapterInputs<'a, I> {
IdSet(&'a [I], fn(&I) -> Backend),
Mask(BackendBit, fn(Backend) -> I),
}
impl<I: Clone> AdapterInputs<'_, I> {
fn find(&self, b: Backend) -> Option<I> {
match *self {
Self::IdSet(ids, ref fun) => ids.iter().find(|id| fun(id) == b).cloned(),
Self::Mask(bits, ref fun) => {
if bits.contains(b.into()) {
Some(fun(b))
} else {
None
}
}
}
}
}
#[derive(Clone, Debug, Error)]
#[error("adapter is invalid")]
pub struct InvalidAdapter;
#[derive(Clone, Debug, Error)]
pub enum RequestAdapterError {
#[error("no suitable adapter found")]
NotFound,
#[error("surface {0:?} is invalid")]
InvalidSurface(SurfaceId),
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
#[cfg(feature = "raw-window-handle")]
pub fn instance_create_surface(
&self,
handle: &impl raw_window_handle::HasRawWindowHandle,
id_in: Input<G, SurfaceId>,
) -> SurfaceId {
profiling::scope!("create_surface", "Instance");
let surface = unsafe {
backends_map! {
let map = |inst| {
inst
.as_ref()
.and_then(|inst| inst.create_surface(handle).map_err(|e| {
log::warn!("Error: {:?}", e);
}).ok())
};
Surface {
#[cfg(vulkan)]
vulkan: map(&self.instance.vulkan),
#[cfg(metal)]
metal: map(&self.instance.metal),
#[cfg(dx12)]
dx12: map(&self.instance.dx12),
#[cfg(dx11)]
dx11: map(&self.instance.dx11),
#[cfg(gl)]
gl: map(&self.instance.gl),
}
}
};
let mut token = Token::root();
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
id.0
}
#[cfg(metal)]
pub fn instance_create_surface_metal(
&self,
layer: *mut std::ffi::c_void,
id_in: Input<G, SurfaceId>,
) -> SurfaceId {
profiling::scope!("create_surface_metal", "Instance");
let surface = Surface {
metal: self.instance.metal.as_ref().map(|inst| {
// we don't want to link to metal-rs for this
#[allow(clippy::transmute_ptr_to_ref)]
inst.create_surface_from_layer(unsafe { std::mem::transmute(layer) })
}),
};
let mut token = Token::root();
let id = self.surfaces.prepare(id_in).assign(surface, &mut token);
id.0
}
pub fn surface_drop(&self, id: SurfaceId) {
profiling::scope!("drop", "Surface");
let mut token = Token::root();
let (surface, _) = self.surfaces.unregister(id, &mut token);
self.instance.destroy_surface(surface.unwrap());
}
pub fn enumerate_adapters(&self, inputs: AdapterInputs<Input<G, AdapterId>>) -> Vec<AdapterId> {
profiling::scope!("enumerate_adapters", "Instance");
let instance = &self.instance;
let mut token = Token::root();
let mut adapters = Vec::new();
backends_map! {
let map = |(instance_field, backend, backend_info, backend_hub)| {
if let Some(ref inst) = *instance_field {
let hub = backend_hub(self);
if let Some(id_backend) = inputs.find(backend) {
for raw in inst.enumerate_adapters() {
let adapter = Adapter::new(raw);
log::info!("Adapter {} {:?}", backend_info, adapter.raw.info);
let id = hub.adapters
.prepare(id_backend.clone())
.assign(adapter, &mut token);
adapters.push(id.0);
}
}
}
};
#[cfg(vulkan)]
map((&instance.vulkan, Backend::Vulkan, "Vulkan", backend::Vulkan::hub)),
#[cfg(metal)]
map((&instance.metal, Backend::Metal, "Metal", backend::Metal::hub)),
#[cfg(dx12)]
map((&instance.dx12, Backend::Dx12, "Dx12", backend::Dx12::hub)),
#[cfg(dx11)]
map((&instance.dx11, Backend::Dx11, "Dx11", backend::Dx11::hub)),
#[cfg(gl)]
map((&instance.gl, Backend::Gl, "GL", backend::Gl::hub)),
}
adapters
}
pub fn request_adapter(
&self,
desc: &RequestAdapterOptions,
inputs: AdapterInputs<Input<G, AdapterId>>,
) -> Result<AdapterId, RequestAdapterError> {
profiling::scope!("pick_adapter", "Instance");
let instance = &self.instance;
let mut token = Token::root();
let (surface_guard, mut token) = self.surfaces.read(&mut token);
let compatible_surface = desc
.compatible_surface
.map(|id| {
surface_guard
.get(id)
.map_err(|_| RequestAdapterError::InvalidSurface(id))
})
.transpose()?;
let mut device_types = Vec::new();
let mut id_vulkan = inputs.find(Backend::Vulkan);
let mut id_metal = inputs.find(Backend::Metal);
let mut id_dx12 = inputs.find(Backend::Dx12);
let mut id_dx11 = inputs.find(Backend::Dx11);
let mut id_gl = inputs.find(Backend::Gl);
backends_map! {
let map = |(instance_backend, id_backend, surface_backend)| {
match *instance_backend {
Some(ref inst) if id_backend.is_some() => {
let mut adapters = inst.enumerate_adapters();
if let Some(surface_backend) = compatible_surface.and_then(surface_backend) {
adapters.retain(|a| {
a.queue_families
.iter()
.find(|qf| qf.queue_type().supports_graphics())
.map_or(false, |qf| surface_backend.supports_queue_family(qf))
});
}
device_types.extend(adapters.iter().map(|ad| ad.info.device_type.clone()));
adapters
}
_ => Vec::new(),
}
};
// NB: The internal function definitions are a workaround for Rust
// being weird with lifetimes for closure literals...
#[cfg(vulkan)]
let adapters_vk = map((&instance.vulkan, &id_vulkan, {
fn surface_vulkan(surf: &Surface) -> Option<&GfxSurface<backend::Vulkan>> {
surf.vulkan.as_ref()
}
surface_vulkan
}));
#[cfg(metal)]
let adapters_mtl = map((&instance.metal, &id_metal, {
fn surface_metal(surf: &Surface) -> Option<&GfxSurface<backend::Metal>> {
surf.metal.as_ref()
}
surface_metal
}));
#[cfg(dx12)]
let adapters_dx12 = map((&instance.dx12, &id_dx12, {
fn surface_dx12(surf: &Surface) -> Option<&GfxSurface<backend::Dx12>> {
surf.dx12.as_ref()
}
surface_dx12
}));
#[cfg(dx11)]
let adapters_dx11 = map((&instance.dx11, &id_dx11, {
fn surface_dx11(surf: &Surface) -> Option<&GfxSurface<backend::Dx11>> {
surf.dx11.as_ref()
}
surface_dx11
}));
#[cfg(gl)]
let adapters_gl = map((&instance.gl, &id_gl, {
fn surface_gl(surf: &Surface) -> Option<&GfxSurface<backend::Gl>> {
surf.gl.as_ref()
}
surface_gl
}));
}
if device_types.is_empty() {
return Err(RequestAdapterError::NotFound);
}
let (mut integrated, mut discrete, mut virt, mut cpu, mut other) =
(None, None, None, None, None);
for (i, ty) in device_types.into_iter().enumerate() {
match ty {
hal::adapter::DeviceType::IntegratedGpu => {
integrated = integrated.or(Some(i));
}
hal::adapter::DeviceType::DiscreteGpu => {
discrete = discrete.or(Some(i));
}
hal::adapter::DeviceType::VirtualGpu => {
virt = virt.or(Some(i));
}
hal::adapter::DeviceType::Cpu => {
cpu = cpu.or(Some(i));
}
hal::adapter::DeviceType::Other => {
other = other.or(Some(i));
}
}
}
let preferred_gpu = match desc.power_preference {
PowerPreference::LowPower => integrated.or(other).or(discrete).or(virt).or(cpu),
PowerPreference::HighPerformance => discrete.or(other).or(integrated).or(virt).or(cpu),
};
let mut selected = preferred_gpu.unwrap_or(0);
backends_map! {
let map = |(info_adapter, id_backend, mut adapters_backend, backend_hub)| {
if selected < adapters_backend.len() {
let adapter = Adapter::new(adapters_backend.swap_remove(selected));
log::info!("Adapter {} {:?}", info_adapter, adapter.raw.info);
let id = backend_hub(self).adapters
.prepare(id_backend.take().unwrap())
.assign(adapter, &mut token);
return Ok(id.0);
}
selected -= adapters_backend.len();
};
#[cfg(vulkan)]
map(("Vulkan", &mut id_vulkan, adapters_vk, backend::Vulkan::hub)),
#[cfg(metal)]
map(("Metal", &mut id_metal, adapters_mtl, backend::Metal::hub)),
#[cfg(dx12)]
map(("Dx12", &mut id_dx12, adapters_dx12, backend::Dx12::hub)),
#[cfg(dx11)]
map(("Dx11", &mut id_dx11, adapters_dx11, backend::Dx11::hub)),
#[cfg(gl)]
map(("GL", &mut id_gl, adapters_gl, backend::Gl::hub)),
}
let _ = (
selected,
id_vulkan.take(),
id_metal.take(),
id_dx12.take(),
id_dx11.take(),
id_gl.take(),
);
log::warn!("Some adapters are present, but enumerating them failed!");
Err(RequestAdapterError::NotFound)
}
pub fn adapter_get_info<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<wgt::AdapterInfo, InvalidAdapter> {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| conv::map_adapter_info(adapter.raw.info.clone(), adapter_id.backend()))
.map_err(|_| InvalidAdapter)
}
pub fn adapter_get_texture_format_features<B: GfxBackend>(
&self,
adapter_id: AdapterId,
format: wgt::TextureFormat,
) -> Result<wgt::TextureFormatFeatures, InvalidAdapter> {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.get_texture_format_features(format))
.map_err(|_| InvalidAdapter)
}
pub fn adapter_features<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<wgt::Features, InvalidAdapter> {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.features)
.map_err(|_| InvalidAdapter)
}
pub fn adapter_limits<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<wgt::Limits, InvalidAdapter> {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.limits.clone())
.map_err(|_| InvalidAdapter)
}
pub fn adapter_downlevel_properties<B: GfxBackend>(
&self,
adapter_id: AdapterId,
) -> Result<wgt::DownlevelProperties, InvalidAdapter> {
let hub = B::hub(self);
let mut token = Token::root();
let (adapter_guard, _) = hub.adapters.read(&mut token);
adapter_guard
.get(adapter_id)
.map(|adapter| adapter.downlevel)
.map_err(|_| InvalidAdapter)
}
pub fn adapter_drop<B: GfxBackend>(&self, adapter_id: AdapterId) {
profiling::scope!("drop", "Adapter");
let hub = B::hub(self);
let mut token = Token::root();
let (mut adapter_guard, _) = hub.adapters.write(&mut token);
let free = match adapter_guard.get_mut(adapter_id) {
Ok(adapter) => adapter.life_guard.ref_count.take().unwrap().load() == 1,
Err(_) => true,
};
if free {
hub.adapters
.unregister_locked(adapter_id, &mut *adapter_guard);
}
}
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn adapter_request_device<B: GfxBackend>(
&self,
adapter_id: AdapterId,
desc: &DeviceDescriptor,
trace_path: Option<&std::path::Path>,
id_in: Input<G, DeviceId>,
) -> (DeviceId, Option<RequestDeviceError>) {
profiling::scope!("request_device", "Adapter");
let hub = B::hub(self);
let mut token = Token::root();
let fid = hub.devices.prepare(id_in);
let error = loop {
let (adapter_guard, mut token) = hub.adapters.read(&mut token);
let adapter = match adapter_guard.get(adapter_id) {
Ok(adapter) => adapter,
Err(_) => break RequestDeviceError::InvalidAdapter,
};
let device = match adapter.create_device(adapter_id, desc, trace_path) {
Ok(device) => device,
Err(e) => break e,
};
let id = fid.assign(device, &mut token);
return (id.0, None);
};
let id = fid.assign_error(desc.label.borrow_or_default(), &mut token);
(id, Some(error))
}
}

Просмотреть файл

@ -1,307 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Swap chain management.
## Lifecycle
At the low level, the swap chain is using the new simplified model of gfx-rs.
A swap chain is a separate object that is backend-dependent but shares the index with
the parent surface, which is backend-independent. This ensures a 1:1 correspondence
between them.
`get_next_image()` requests a new image from the surface. It becomes a part of
`TextureViewInner::SwapChain` of the resulted view. The view is registered in the HUB
but not in the device tracker.
The only operation allowed on the view is to be either a color or a resolve attachment.
It can only be used in one command buffer, which needs to be submitted before presenting.
Command buffer tracker knows about the view, but only for the duration of recording.
The view ID is erased from it at the end, so that it's not merged into the device tracker.
When a swapchain view is used in `begin_render_pass()`, we assume the start and end image
layouts purely based on whether or not this view was used in this command buffer before.
It always starts with `Uninitialized` and ends with `Present`, so that no barriers are
needed when we need to actually present it.
In `queue_submit()` we make sure to signal the semaphore whenever we render to a swap
chain view.
In `present()` we return the swap chain image back and wait on the semaphore.
!*/
#[cfg(feature = "trace")]
use crate::device::trace::Action;
use crate::{
conv,
device::DeviceError,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{DeviceId, SwapChainId, TextureViewId, Valid},
resource,
track::TextureSelector,
LifeGuard, PrivateFeatures, Stored, SubmissionIndex,
};
use hal::{queue::Queue as _, window::PresentationSurface as _};
use thiserror::Error;
use wgt::{SwapChainDescriptor, SwapChainStatus};
const FRAME_TIMEOUT_MS: u64 = 1000;
pub const DESIRED_NUM_FRAMES: u32 = 3;
#[derive(Debug)]
pub struct SwapChain<B: hal::Backend> {
pub(crate) life_guard: LifeGuard,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) desc: SwapChainDescriptor,
pub(crate) num_frames: hal::window::SwapImageIndex,
pub(crate) semaphore: B::Semaphore,
pub(crate) acquired_view_id: Option<Stored<TextureViewId>>,
pub(crate) active_submission_index: SubmissionIndex,
pub(crate) framebuffer_attachment: hal::image::FramebufferAttachment,
}
impl<B: hal::Backend> crate::hub::Resource for SwapChain<B> {
const TYPE: &'static str = "SwapChain";
fn life_guard(&self) -> &LifeGuard {
&self.life_guard
}
}
#[derive(Clone, Debug, Error)]
pub enum SwapChainError {
#[error("swap chain is invalid")]
Invalid,
#[error("parent surface is invalid")]
InvalidSurface,
#[error(transparent)]
Device(#[from] DeviceError),
#[error("swap chain image is already acquired")]
AlreadyAcquired,
#[error("acquired frame is still referenced")]
StillReferenced,
}
#[derive(Clone, Debug, Error)]
pub enum CreateSwapChainError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("invalid surface")]
InvalidSurface,
#[error("`SwapChainOutput` must be dropped before a new `SwapChain` is made")]
SwapChainOutputExists,
#[error("Both `SwapChain` width and height must be non-zero. Wait to recreate the `SwapChain` until the window has non-zero area.")]
ZeroArea,
#[error("surface does not support the adapter's queue family")]
UnsupportedQueueFamily,
#[error("requested format {requested:?} is not in list of supported formats: {available:?}")]
UnsupportedFormat {
requested: hal::format::Format,
available: Vec<hal::format::Format>,
},
}
pub(crate) fn swap_chain_descriptor_to_hal(
desc: &SwapChainDescriptor,
num_frames: u32,
private_features: PrivateFeatures,
) -> hal::window::SwapchainConfig {
let mut config = hal::window::SwapchainConfig::new(
desc.width,
desc.height,
conv::map_texture_format(desc.format, private_features),
num_frames,
);
//TODO: check for supported
config.image_usage = conv::map_texture_usage(desc.usage, hal::format::Aspects::COLOR);
config.composite_alpha_mode = hal::window::CompositeAlphaMode::OPAQUE;
config.present_mode = match desc.present_mode {
wgt::PresentMode::Immediate => hal::window::PresentMode::IMMEDIATE,
wgt::PresentMode::Mailbox => hal::window::PresentMode::MAILBOX,
wgt::PresentMode::Fifo => hal::window::PresentMode::FIFO,
};
config
}
#[repr(C)]
#[derive(Debug)]
pub struct SwapChainOutput {
pub status: SwapChainStatus,
pub view_id: Option<TextureViewId>,
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn swap_chain_get_current_texture_view<B: GfxBackend>(
&self,
swap_chain_id: SwapChainId,
view_id_in: Input<G, TextureViewId>,
) -> Result<SwapChainOutput, SwapChainError> {
profiling::scope!("get_next_texture", "SwapChain");
let hub = B::hub(self);
let mut token = Token::root();
let fid = hub.texture_views.prepare(view_id_in);
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = surface_guard
.get_mut(swap_chain_id.to_surface_id())
.map_err(|_| SwapChainError::InvalidSurface)?;
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = swap_chain_guard
.get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?;
#[allow(unused_variables)]
let device = &device_guard[sc.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(Action::GetSwapChainTexture {
id: fid.id(),
parent_id: swap_chain_id,
});
}
let suf = B::get_surface_mut(surface);
let (image, status) = match unsafe { suf.acquire_image(FRAME_TIMEOUT_MS * 1_000_000) } {
Ok((surface_image, None)) => (Some(surface_image), SwapChainStatus::Good),
Ok((surface_image, Some(_))) => (Some(surface_image), SwapChainStatus::Suboptimal),
Err(err) => (
None,
match err {
hal::window::AcquireError::OutOfMemory(_) => {
return Err(DeviceError::OutOfMemory.into())
}
hal::window::AcquireError::NotReady { .. } => SwapChainStatus::Timeout,
hal::window::AcquireError::OutOfDate(_) => SwapChainStatus::Outdated,
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
hal::window::AcquireError::DeviceLost(_) => {
return Err(DeviceError::Lost.into())
}
},
),
};
let view_id = match image {
Some(image) => {
let view = resource::TextureView {
inner: resource::TextureViewInner::SwapChain {
image,
source_id: Stored {
value: Valid(swap_chain_id),
ref_count: sc.life_guard.add_ref(),
},
},
aspects: hal::format::Aspects::COLOR,
format: sc.desc.format,
format_features: wgt::TextureFormatFeatures {
allowed_usages: wgt::TextureUsage::RENDER_ATTACHMENT,
flags: wgt::TextureFormatFeatureFlags::empty(),
filterable: false,
},
dimension: wgt::TextureViewDimension::D2,
extent: wgt::Extent3d {
width: sc.desc.width,
height: sc.desc.height,
depth_or_array_layers: 1,
},
samples: 1,
framebuffer_attachment: sc.framebuffer_attachment.clone(),
sampled_internal_use: resource::TextureUse::empty(),
selector: TextureSelector {
layers: 0..1,
levels: 0..1,
},
life_guard: LifeGuard::new("<SwapChain View>"),
};
let ref_count = view.life_guard.add_ref();
let id = fid.assign(view, &mut token);
if sc.acquired_view_id.is_some() {
return Err(SwapChainError::AlreadyAcquired);
}
sc.acquired_view_id = Some(Stored {
value: id,
ref_count,
});
Some(id.0)
}
None => None,
};
Ok(SwapChainOutput { status, view_id })
}
pub fn swap_chain_present<B: GfxBackend>(
&self,
swap_chain_id: SwapChainId,
) -> Result<SwapChainStatus, SwapChainError> {
profiling::scope!("present", "SwapChain");
let hub = B::hub(self);
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = surface_guard
.get_mut(swap_chain_id.to_surface_id())
.map_err(|_| SwapChainError::InvalidSurface)?;
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = swap_chain_guard
.get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?;
let device = &mut device_guard[sc.device_id.value];
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(Action::PresentSwapChain(swap_chain_id));
}
let view = {
let view_id = sc
.acquired_view_id
.take()
.ok_or(SwapChainError::AlreadyAcquired)?;
let (view_maybe, _) = hub.texture_views.unregister(view_id.value.0, &mut token);
view_maybe.ok_or(SwapChainError::Invalid)?
};
if view.life_guard.ref_count.unwrap().load() != 1 {
return Err(SwapChainError::StillReferenced);
}
let image = match view.inner {
resource::TextureViewInner::Native { .. } => unreachable!(),
resource::TextureViewInner::SwapChain { image, .. } => image,
};
let sem = if sc.active_submission_index > device.last_completed_submission_index() {
Some(&mut sc.semaphore)
} else {
None
};
let queue = &mut device.queue_group.queues[0];
let result = unsafe { queue.present(B::get_surface_mut(surface), image, sem) };
log::debug!("Presented. End of Frame");
match result {
Ok(None) => Ok(SwapChainStatus::Good),
Ok(Some(_)) => Ok(SwapChainStatus::Suboptimal),
Err(err) => match err {
hal::window::PresentError::OutOfMemory(_) => {
Err(SwapChainError::Device(DeviceError::OutOfMemory))
}
hal::window::PresentError::OutOfDate(_) => Ok(SwapChainStatus::Outdated),
hal::window::PresentError::SurfaceLost(_) => Ok(SwapChainStatus::Lost),
hal::window::PresentError::DeviceLost(_) => {
Err(SwapChainError::Device(DeviceError::Lost))
}
},
}
}
}

Просмотреть файл

@ -15,13 +15,20 @@ publish = false
default = []
[dependencies.wgc]
path = "../wgpu/wgpu-core"
package = "wgpu-core"
features = ["cross", "replay", "trace", "serial-pass"]
git = "https://github.com/gfx-rs/wgpu"
rev = "d23288e"
features = ["replay", "trace", "serial-pass"]
[dependencies.wgt]
path = "../wgpu/wgpu-types"
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "d23288e"
[dependencies.wgh]
package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu"
rev = "d23288e"
[dependencies]
bincode = "1"

Просмотреть файл

@ -47,9 +47,9 @@ exclude = [
[parse]
parse_deps = true
include = ["wgpu-core", "wgpu-types"]
include = ["wgpu-core", "wgpu-types", "wgpu-hal"]
extra_bindings = ["wgpu-core", "wgpu-types"]
extra_bindings = ["wgpu-core", "wgpu-types", "wgpu-hal"]
[fn]
prefix = "WGPU_INLINE"

Просмотреть файл

@ -11,7 +11,14 @@ EXPORTS.mozilla.webgpu.ffi += [
UNIFIED_SOURCES += []
if CONFIG["COMPILE_ENVIRONMENT"]:
CbindgenHeader("wgpu_ffi_generated.h", inputs=["/gfx/wgpu_bindings", "/gfx/wgpu"])
CbindgenHeader(
"wgpu_ffi_generated.h",
inputs=[
"/gfx/wgpu_bindings",
"/third_party/rust/wgpu-core",
"/third_party/rust/wgpu-types",
],
)
EXPORTS.mozilla.webgpu.ffi += [
"!wgpu_ffi_generated.h",

Просмотреть файл

@ -4,7 +4,7 @@
use crate::{
cow_label, AdapterInformation, ByteBuf, CommandEncoderAction, DeviceAction, DropAction,
ImplicitLayout, QueueWriteAction, RawString, ShaderModuleSource, TextureAction,
ImplicitLayout, QueueWriteAction, RawString, TextureAction,
};
use wgc::{hub::IdentityManager, id};
@ -68,7 +68,7 @@ pub struct ComputePipelineDescriptor {
#[repr(C)]
pub struct VertexBufferLayout {
array_stride: wgt::BufferAddress,
step_mode: wgt::InputStepMode,
step_mode: wgt::VertexStepMode,
attributes: *const wgt::VertexAttribute,
attributes_length: usize,
}
@ -101,7 +101,7 @@ impl VertexState {
pub struct ColorTargetState<'a> {
format: wgt::TextureFormat,
blend: Option<&'a wgt::BlendState>,
write_mask: wgt::ColorWrite,
write_mask: wgt::ColorWrites,
}
#[repr(C)]
@ -186,7 +186,7 @@ pub enum RawBindingType {
#[repr(C)]
pub struct BindGroupLayoutEntry<'a> {
binding: u32,
visibility: wgt::ShaderStage,
visibility: wgt::ShaderStages,
ty: RawBindingType,
has_dynamic_offset: bool,
min_binding_size: Option<wgt::BufferSize>,
@ -288,7 +288,7 @@ impl ImplicitLayout<'_> {
ImplicitLayout {
pipeline: identities.pipeline_layouts.alloc(backend),
bind_groups: Cow::Owned(
(0..wgc::MAX_BIND_GROUPS)
(0..8) // hal::MAX_BIND_GROUPS
.map(|_| identities.bind_group_layouts.alloc(backend))
.collect(),
),
@ -622,7 +622,13 @@ pub extern "C" fn wgpu_device_create_render_bundle_encoder(
let descriptor = wgc::command::RenderBundleEncoderDescriptor {
label: cow_label(&desc.label),
color_formats: Cow::Borrowed(make_slice(desc.color_formats, desc.color_formats_length)),
depth_stencil_format: desc.depth_stencil_format.cloned(),
depth_stencil: desc
.depth_stencil_format
.map(|&format| wgt::RenderBundleDepthStencil {
format,
depth_read_only: false, //TODO: add to `RenderBundleEncoderDescriptor`
stencil_read_only: false,
}),
sample_count: desc.sample_count,
};
match wgc::command::RenderBundleEncoder::new(&descriptor, device_id, None) {
@ -907,19 +913,12 @@ pub unsafe extern "C" fn wgpu_client_create_shader_module(
.shader_modules
.alloc(backend);
let source = match cow_label(&desc.wgsl_chars) {
Some(code) => ShaderModuleSource::Wgsl(code),
None => ShaderModuleSource::SpirV(Cow::Borrowed(make_slice(
desc.spirv_words,
desc.spirv_words_length,
))),
};
let code = cow_label(&desc.wgsl_chars).unwrap_or_default();
let desc = wgc::pipeline::ShaderModuleDescriptor {
label: cow_label(&desc.label),
flags: wgt::ShaderFlags::VALIDATION, // careful here!
};
let action = DeviceAction::CreateShaderModule(id, desc, source);
let action = DeviceAction::CreateShaderModule(id, desc, code);
*bb = make_byte_buf(&action);
id
}

Просмотреть файл

@ -34,7 +34,6 @@ pub struct IdentityRecyclerFactory {
param: FactoryParam,
free_adapter: extern "C" fn(id::AdapterId, FactoryParam),
free_device: extern "C" fn(id::DeviceId, FactoryParam),
free_swap_chain: extern "C" fn(id::SwapChainId, FactoryParam),
free_pipeline_layout: extern "C" fn(id::PipelineLayoutId, FactoryParam),
free_shader_module: extern "C" fn(id::ShaderModuleId, FactoryParam),
free_bind_group_layout: extern "C" fn(id::BindGroupLayoutId, FactoryParam),
@ -71,16 +70,6 @@ impl wgc::hub::IdentityHandlerFactory<id::DeviceId> for IdentityRecyclerFactory
}
}
}
impl wgc::hub::IdentityHandlerFactory<id::SwapChainId> for IdentityRecyclerFactory {
type Filter = IdentityRecycler<id::SwapChainId>;
fn spawn(&self, _min_index: u32) -> Self::Filter {
IdentityRecycler {
fun: self.free_swap_chain,
param: self.param,
kind: "swap_chain",
}
}
}
impl wgc::hub::IdentityHandlerFactory<id::PipelineLayoutId> for IdentityRecyclerFactory {
type Filter = IdentityRecycler<id::PipelineLayoutId>;
fn spawn(&self, _min_index: u32) -> Self::Filter {

Просмотреть файл

@ -67,12 +67,6 @@ pub struct AdapterInformation {
features: wgt::Features,
}
#[derive(serde::Serialize, serde::Deserialize)]
enum ShaderModuleSource<'a> {
SpirV(Cow<'a, [u32]>),
Wgsl(Cow<'a, str>),
}
#[derive(serde::Serialize, serde::Deserialize)]
struct ImplicitLayout<'a> {
pipeline: id::PipelineLayoutId,
@ -96,7 +90,7 @@ enum DeviceAction<'a> {
CreateShaderModule(
id::ShaderModuleId,
wgc::pipeline::ShaderModuleDescriptor<'a>,
ShaderModuleSource<'a>,
Cow<'a, str>,
),
CreateComputePipeline(
id::ComputePipelineId,

Просмотреть файл

@ -4,8 +4,7 @@
use crate::{
cow_label, identity::IdentityRecyclerFactory, AdapterInformation, ByteBuf,
CommandEncoderAction, DeviceAction, DropAction, QueueWriteAction, RawString,
ShaderModuleSource, TextureAction,
CommandEncoderAction, DeviceAction, DropAction, QueueWriteAction, RawString, TextureAction,
};
use wgc::{gfx_select, id};
@ -63,7 +62,7 @@ pub extern "C" fn wgpu_server_new(factory: IdentityRecyclerFactory) -> *mut Glob
let global = Global(wgc::hub::Global::new(
"wgpu",
factory,
wgt::BackendBit::PRIMARY,
wgt::Backends::PRIMARY | wgt::Backends::GL,
));
Box::into_raw(Box::new(global))
}
@ -231,19 +230,19 @@ pub extern "C" fn wgpu_server_buffer_drop(global: &Global, self_id: id::BufferId
}
trait GlobalExt {
fn device_action<B: wgc::hub::GfxBackend>(
fn device_action<A: wgc::hub::HalApi>(
&self,
self_id: id::DeviceId,
action: DeviceAction,
error_buf: ErrorBuffer,
);
fn texture_action<B: wgc::hub::GfxBackend>(
fn texture_action<A: wgc::hub::HalApi>(
&self,
self_id: id::TextureId,
action: TextureAction,
error_buf: ErrorBuffer,
);
fn command_encoder_action<B: wgc::hub::GfxBackend>(
fn command_encoder_action<A: wgc::hub::HalApi>(
&self,
self_id: id::CommandEncoderId,
action: CommandEncoderAction,
@ -252,7 +251,7 @@ trait GlobalExt {
}
impl GlobalExt for Global {
fn device_action<B: wgc::hub::GfxBackend>(
fn device_action<A: wgc::hub::HalApi>(
&self,
self_id: id::DeviceId,
action: DeviceAction,
@ -260,49 +259,44 @@ impl GlobalExt for Global {
) {
match action {
DeviceAction::CreateBuffer(id, desc) => {
let (_, error) = self.device_create_buffer::<B>(self_id, &desc, id);
let (_, error) = self.device_create_buffer::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreateTexture(id, desc) => {
let (_, error) = self.device_create_texture::<B>(self_id, &desc, id);
let (_, error) = self.device_create_texture::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreateSampler(id, desc) => {
let (_, error) = self.device_create_sampler::<B>(self_id, &desc, id);
let (_, error) = self.device_create_sampler::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreateBindGroupLayout(id, desc) => {
let (_, error) = self.device_create_bind_group_layout::<B>(self_id, &desc, id);
let (_, error) = self.device_create_bind_group_layout::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreatePipelineLayout(id, desc) => {
let (_, error) = self.device_create_pipeline_layout::<B>(self_id, &desc, id);
let (_, error) = self.device_create_pipeline_layout::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreateBindGroup(id, desc) => {
let (_, error) = self.device_create_bind_group::<B>(self_id, &desc, id);
let (_, error) = self.device_create_bind_group::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreateShaderModule(id, desc, source) => {
let source = match source {
ShaderModuleSource::SpirV(data) => {
wgc::pipeline::ShaderModuleSource::SpirV(data)
}
ShaderModuleSource::Wgsl(data) => wgc::pipeline::ShaderModuleSource::Wgsl(data),
};
let (_, error) = self.device_create_shader_module::<B>(self_id, &desc, source, id);
DeviceAction::CreateShaderModule(id, desc, code) => {
let source = wgc::pipeline::ShaderModuleSource::Wgsl(code);
let (_, error) = self.device_create_shader_module::<A>(self_id, &desc, source, id);
if let Some(err) = error {
error_buf.init(err);
}
@ -315,7 +309,7 @@ impl GlobalExt for Global {
group_ids: &imp.bind_groups,
});
let (_, error) =
self.device_create_compute_pipeline::<B>(self_id, &desc, id, implicit_ids);
self.device_create_compute_pipeline::<A>(self_id, &desc, id, implicit_ids);
if let Some(err) = error {
error_buf.init(err);
}
@ -328,19 +322,19 @@ impl GlobalExt for Global {
group_ids: &imp.bind_groups,
});
let (_, error) =
self.device_create_render_pipeline::<B>(self_id, &desc, id, implicit_ids);
self.device_create_render_pipeline::<A>(self_id, &desc, id, implicit_ids);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreateRenderBundle(id, encoder, desc) => {
let (_, error) = self.render_bundle_encoder_finish::<B>(encoder, &desc, id);
let (_, error) = self.render_bundle_encoder_finish::<A>(encoder, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
}
DeviceAction::CreateCommandEncoder(id, desc) => {
let (_, error) = self.device_create_command_encoder::<B>(self_id, &desc, id);
let (_, error) = self.device_create_command_encoder::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
@ -348,7 +342,7 @@ impl GlobalExt for Global {
}
}
fn texture_action<B: wgc::hub::GfxBackend>(
fn texture_action<A: wgc::hub::HalApi>(
&self,
self_id: id::TextureId,
action: TextureAction,
@ -356,7 +350,7 @@ impl GlobalExt for Global {
) {
match action {
TextureAction::CreateView(id, desc) => {
let (_, error) = self.texture_create_view::<B>(self_id, &desc, id);
let (_, error) = self.texture_create_view::<A>(self_id, &desc, id);
if let Some(err) = error {
error_buf.init(err);
}
@ -364,7 +358,7 @@ impl GlobalExt for Global {
}
}
fn command_encoder_action<B: wgc::hub::GfxBackend>(
fn command_encoder_action<A: wgc::hub::HalApi>(
&self,
self_id: id::CommandEncoderId,
action: CommandEncoderAction,
@ -378,7 +372,7 @@ impl GlobalExt for Global {
dst_offset,
size,
} => {
if let Err(err) = self.command_encoder_copy_buffer_to_buffer::<B>(
if let Err(err) = self.command_encoder_copy_buffer_to_buffer::<A>(
self_id, src, src_offset, dst, dst_offset, size,
) {
error_buf.init(err);
@ -386,28 +380,28 @@ impl GlobalExt for Global {
}
CommandEncoderAction::CopyBufferToTexture { src, dst, size } => {
if let Err(err) =
self.command_encoder_copy_buffer_to_texture::<B>(self_id, &src, &dst, &size)
self.command_encoder_copy_buffer_to_texture::<A>(self_id, &src, &dst, &size)
{
error_buf.init(err);
}
}
CommandEncoderAction::CopyTextureToBuffer { src, dst, size } => {
if let Err(err) =
self.command_encoder_copy_texture_to_buffer::<B>(self_id, &src, &dst, &size)
self.command_encoder_copy_texture_to_buffer::<A>(self_id, &src, &dst, &size)
{
error_buf.init(err);
}
}
CommandEncoderAction::CopyTextureToTexture { src, dst, size } => {
if let Err(err) =
self.command_encoder_copy_texture_to_texture::<B>(self_id, &src, &dst, &size)
self.command_encoder_copy_texture_to_texture::<A>(self_id, &src, &dst, &size)
{
error_buf.init(err);
}
}
CommandEncoderAction::RunComputePass { base } => {
if let Err(err) =
self.command_encoder_run_compute_pass_impl::<B>(self_id, base.as_ref())
self.command_encoder_run_compute_pass_impl::<A>(self_id, base.as_ref())
{
error_buf.init(err);
}
@ -417,7 +411,7 @@ impl GlobalExt for Global {
query_index,
} => {
if let Err(err) =
self.command_encoder_write_timestamp::<B>(self_id, query_set_id, query_index)
self.command_encoder_write_timestamp::<A>(self_id, query_set_id, query_index)
{
error_buf.init(err);
}
@ -429,7 +423,7 @@ impl GlobalExt for Global {
destination,
destination_offset,
} => {
if let Err(err) = self.command_encoder_resolve_query_set::<B>(
if let Err(err) = self.command_encoder_resolve_query_set::<A>(
self_id,
query_set_id,
start_query,
@ -445,7 +439,7 @@ impl GlobalExt for Global {
target_colors,
target_depth_stencil,
} => {
if let Err(err) = self.command_encoder_run_render_pass_impl::<B>(
if let Err(err) = self.command_encoder_run_render_pass_impl::<A>(
self_id,
base.as_ref(),
&target_colors,
@ -455,7 +449,7 @@ impl GlobalExt for Global {
}
}
CommandEncoderAction::ClearBuffer { dst, offset, size } => {
if let Err(err) = self.command_encoder_clear_buffer::<B>(self_id, dst, offset, size)
if let Err(err) = self.command_encoder_clear_buffer::<A>(self_id, dst, offset, size)
{
error_buf.init(err);
}
@ -465,7 +459,7 @@ impl GlobalExt for Global {
ref subresource_range,
} => {
if let Err(err) =
self.command_encoder_clear_image::<B>(self_id, dst, subresource_range)
self.command_encoder_clear_image::<A>(self_id, dst, subresource_range)
{
error_buf.init(err);
}

Просмотреть файл

@ -30,18 +30,6 @@ job-defaults:
using: run-task
jobs:
wgpu:
description: Sync wgpu to github mirror
secret: gecko/gfx-github-sync/token
run:
command: '$GECKO_PATH/tools/github-sync/sync-to-github.sh wgpu gfx/wgpu gfx-rs/wgpu bors'
run-on-projects: []
when:
files-changed:
- 'gfx/wgpu/**'
treeherder:
symbol: GhS(wgpu)
webrender:
description: Sync webrender to github mirror
secret: gecko/gfx-github-sync/token

Просмотреть файл

@ -200,25 +200,6 @@ wrench-deps:
toolchain:
- linux64-rust-1.47 # whatever m-c is built with
wgpu-deps:
description: "Downloads all the crates needed for testing wgpu"
treeherder:
symbol: Wgpu(deps)
worker:
docker-image: {in-tree: wgpu}
run:
script: wgpu-deps-vendoring.sh
sparse-profile: null
resources:
- 'gfx/wgpu/Cargo.lock'
toolchain-artifact: public/build/wgpu-deps.tar.bz2
fetches:
fetch:
- android-rs-glue
toolchain:
# this requires resolver=2 in Naga
- linux64-rust-1.51 # whatever m-c is built with
linux64-liblowercase:
description: "liblowercase"
treeherder:

Просмотреть файл

@ -1,53 +0,0 @@
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
---
loader: taskgraph.loader.transform:loader
kind-dependencies:
- toolchain
- build
transforms:
- taskgraph.transforms.job:transforms
- taskgraph.transforms.task:transforms
job-defaults:
attributes:
retrigger: true
run-on-projects: ['mozilla-beta', 'trunk']
treeherder:
tier: 1
kind: other
worker:
max-run-time: 1800
env:
RUST_BACKTRACE: 'full'
RUSTFLAGS: '--deny warnings'
run:
sparse-profile: wgpu
when:
files-changed:
- 'gfx/wgpu/**'
jobs:
linux64-debug:
description: Runs debug-mode wgpu CI tests on a Linux worker
worker-type: b-linux
worker:
docker-image: {in-tree: wgpu}
fetches:
toolchain:
- linux64-rust
- wgpu-deps
run:
using: run-task
command: >-
export PATH=$PATH:$MOZ_FETCHES_DIR/rustc/bin &&
cd $GECKO_PATH/gfx/wgpu &&
mv $MOZ_FETCHES_DIR/wgpu-deps/{vendor,.cargo} ./ &&
cargo test --verbose --frozen
treeherder:
platform: linux64-qr/debug
symbol: Wgpu(test)
kind: other

Просмотреть файл

@ -669,11 +669,6 @@ webrender
Tasks used to do testing of WebRender standalone (without gecko). The
WebRender code lives in gfx/wr and has its own testing infrastructure.
wgpu
---------
Tasks used to do testing of WebGPU standalone (without gecko). The
WebGPU code lives in gfx/wgpu and has its own testing infrastructure.
github-sync
------------
Tasks used to do synchronize parts of Gecko that have downstream GitHub

Просмотреть файл

@ -1,26 +0,0 @@
#!/bin/bash
set -x -e -v
# This scripts uses `cargo-vendor` to download all the dependencies needed
# to test `wgpu`, and exports those dependencies as a tarball.
# This avoids having to download these dependencies on every test job
# that tests `wgpu`.
UPLOAD_DIR=$HOME/artifacts
cd $GECKO_PATH
export PATH=$PATH:$MOZ_FETCHES_DIR/rustc/bin:$HOME/.cargo/bin
cd gfx/wgpu/
mkdir .cargo
cargo vendor --sync ./Cargo.toml > .cargo/config
mkdir wgpu-deps
mv vendor .cargo wgpu-deps/
mkdir wgpu-deps/cargo-apk
# Until there's a version of cargo-apk published on crates.io that has
# https://github.com/rust-windowing/android-rs-glue/pull/223, we need to use
# an unpublished version.
cargo install --path $MOZ_FETCHES_DIR/android-rs-glue/cargo-apk --root wgpu-deps/cargo-apk cargo-apk
tar caf wgpu-deps.tar.bz2 wgpu-deps
mkdir -p $UPLOAD_DIR
mv wgpu-deps.tar.bz2 $UPLOAD_DIR/

1
third_party/rust/arrayvec-0.5.2/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"c9e49774ee89a3b7b533362d82060a14f2777ccefbe03b956f6f08057b6c3600","Cargo.toml":"c736151a4747b2c041404d730e17dec631393c5feea287edc8a3e482f83a8927","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0245ee104228a100ce5fceecf43e25faae450494d9173f43fd94c27d69fdac13","README.md":"f42eb73acbc7729825a836a532d1c8c6b71e006e0d87e549ea7a483da7472425","benches/arraystring.rs":"f12b890977117ebde4ca42bcd6b91f2a6a087f2b235aaca6d15e30d125ae9f67","benches/extend.rs":"c3d69cc488ec5341b019cfed545ebbfea252f98718037b413f6a349da9489d1b","ci/miri.sh":"59172afe080a3431f4e7dbd66d2040afa27ab9c0359532bd68f8f423261738de","custom.css":"e6f2cd299392337b4e2959c52f422e5b7be11920ea98d10db44d10ddef5ed47c","src/array.rs":"5fa75554ebdf595c918fe923a84678989fc420d800310ee19a21597f7d683b66","src/array_string.rs":"48c175371aed3372158a9331e939cffc2a11a09120253fa9d0521e5cbca7cfca","src/char.rs":"3fe9e9cc68fc7cedb238d53924b552b876a60c4fea85935d2f5d1ca0d41ffa3e","src/errors.rs":"ca44c0987f59ae57623088d80013e75129101caea93c278c8ebb0df898bc6b1b","src/lib.rs":"08270486d9e9d34e02e0edf227baf5e87b36d38d264da209d3b7f8962dce1b54","src/maybe_uninit.rs":"c81cf16f976bfaf7c1fe371aa2fba84872874fb0e43c96f63bef01cceb5e1d64","tests/serde.rs":"18c165cf6024f04a25b19aa139657d7c59f72d1541c9b24b44f9eaea01f507db","tests/tests.rs":"b9a3db8a0b957695d9ecc539a7ea2ded1eea3c6f76de8b5624c2b4eae95f1fdd"},"package":"23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"}

234
third_party/rust/arrayvec-0.5.2/CHANGELOG.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,234 @@
Recent Changes (arrayvec)
-------------------------
- 0.5.2
- Add `is_empty` methods for ArrayVec and ArrayString by @nicbn
- Implement `TryFrom<Slice>` for ArrayVec by @paulkernfeld
- Add `unstable-const-fn` to make `new` methods const by @m-ou-se
- Run miri in CI and a few related fixes by @RalfJung
- Fix outdated comment by @Phlosioneer
- Move changelog to a separate file by @Luro02
- Remove deprecated `Error::description` by @AnderEnder
- Use pointer method `add` by @hbina
- 0.5.1
- Add `as_ptr`, `as_mut_ptr` accessors directly on the `ArrayVec` by @tbu-
(matches the same addition to `Vec` which happened in Rust 1.37).
- Add method `ArrayString::len` (now available directly, not just through deref to str).
- Use raw pointers instead of `&mut [u8]` for encoding chars into `ArrayString`
(uninit best practice fix).
- Use raw pointers instead of `get_unchecked_mut` where the target may be
uninitialized everywhere relevant in the ArrayVec implementation
(uninit best practice fix).
- Changed inline hints on many methods, mainly removing inline hints
- `ArrayVec::dispose` is now deprecated (it has no purpose anymore)
- 0.4.12
- Use raw pointers instead of `get_unchecked_mut` where the target may be
uninitialized everywhere relevant in the ArrayVec implementation.
- 0.5.0
- Use `MaybeUninit` (now unconditionally) in the implementation of
`ArrayVec`
- Use `MaybeUninit` (now unconditionally) in the implementation of
`ArrayString`
- The crate feature for serde serialization is now named `serde`.
- Updated the `Array` trait interface, and it is now easier to use for
users outside the crate.
- Add `FromStr` impl for `ArrayString` by @despawnerer
- Add method `try_extend_from_slice` to `ArrayVec`, which is always
effecient by @Thomasdezeeuw.
- Add method `remaining_capacity` by @Thomasdezeeuw
- Improve performance of the `extend` method.
- The index type of zero capacity vectors is now itself zero size, by
@clarfon
- Use `drop_in_place` for truncate and clear methods. This affects drop order
and resume from panic during drop.
- Use Rust 2018 edition for the implementation
- Require Rust 1.36 or later, for the unconditional `MaybeUninit`
improvements.
- 0.4.11
- In Rust 1.36 or later, use newly stable `MaybeUninit`. This extends the
soundness work introduced in 0.4.9, we are finally able to use this in
stable. We use feature detection (build script) to enable this at build
time.
- 0.4.10
- Use `repr(C)` in the `union` version that was introduced in 0.4.9, to
allay some soundness concerns.
- 0.4.9
- Use `union` in the implementation on when this is detected to be supported
(nightly only for now). This is a better solution for treating uninitialized
regions correctly, and we'll use it in stable Rust as soon as we are able.
When this is enabled, the `ArrayVec` has no space overhead in its memory
layout, although the size of the vec should not be relied upon. (See [#114](https://github.com/bluss/arrayvec/pull/114))
- `ArrayString` updated to not use uninitialized memory, it instead zeros its
backing array. This will be refined in the next version, since we
need to make changes to the user visible API.
- The `use_union` feature now does nothing (like its documentation foretold).
- 0.4.8
- Implement Clone and Debug for `IntoIter` by @clarcharr
- Add more array sizes under crate features. These cover all in the range
up to 128 and 129 to 255 respectively (we have a few of those by default):
- `array-size-33-128`
- `array-size-129-255`
- 0.4.7
- Fix future compat warning about raw pointer casts
- Use `drop_in_place` when dropping the arrayvec by-value iterator
- Decrease mininum Rust version (see docs) by @jeehoonkang
- 0.3.25
- Fix future compat warning about raw pointer casts
- 0.4.6
- Fix compilation on 16-bit targets. This means, the 65536 array size is not
included on these targets.
- 0.3.24
- Fix compilation on 16-bit targets. This means, the 65536 array size is not
included on these targets.
- Fix license files so that they are both included (was fixed in 0.4 before)
- 0.4.5
- Add methods to `ArrayString` by @DenialAdams:
- `.pop() -> Option<char>`
- `.truncate(new_len)`
- `.remove(index) -> char`
- Remove dependency on crate odds
- Document debug assertions in unsafe methods better
- 0.4.4
- Add method `ArrayVec::truncate()` by @niklasf
- 0.4.3
- Improve performance for `ArrayVec::extend` with a lower level
implementation (#74)
- Small cleanup in dependencies (use no std for crates where we don't need more)
- 0.4.2
- Add constructor method `new` to `CapacityError`.
- 0.4.1
- Add `Default` impl to `ArrayString` by @tbu-
- 0.4.0
- Reformed signatures and error handling by @bluss and @tbu-:
- `ArrayVec`'s `push, insert, remove, swap_remove` now match `Vec`'s
corresponding signature and panic on capacity errors where applicable.
- Add fallible methods `try_push, insert` and checked methods
`pop_at, swap_pop`.
- Similar changes to `ArrayString`'s push methods.
- Use a local version of the `RangeArgument` trait
- Add array sizes 50, 150, 200 by @daboross
- Support serde 1.0 by @daboross
- New method `.push_unchecked()` by @niklasf
- `ArrayString` implements `PartialOrd, Ord` by @tbu-
- Require Rust 1.14
- crate feature `use_generic_array` was dropped.
- 0.3.23
- Implement `PartialOrd, Ord` as well as `PartialOrd<str>` for
`ArrayString`.
- 0.3.22
- Implement `Array` for the 65536 size
- 0.3.21
- Use `encode_utf8` from crate odds
- Add constructor `ArrayString::from_byte_string`
- 0.3.20
- Simplify and speed up `ArrayString`s `.push(char)`-
- 0.3.19
- Add new crate feature `use_generic_array` which allows using their
`GenericArray` just like a regular fixed size array for the storage
of an `ArrayVec`.
- 0.3.18
- Fix bounds check in `ArrayVec::insert`!
It would be buggy if `self.len() < index < self.capacity()`. Take note of
the push out behavior specified in the docs.
- 0.3.17
- Added crate feature `use_union` which forwards to the nodrop crate feature
- Added methods `.is_full()` to `ArrayVec` and `ArrayString`.
- 0.3.16
- Added method `.retain()` to `ArrayVec`.
- Added methods `.as_slice(), .as_mut_slice()` to `ArrayVec` and `.as_str()`
to `ArrayString`.
- 0.3.15
- Add feature std, which you can opt out of to use `no_std` (requires Rust 1.6
to opt out).
- Implement `Clone::clone_from` for ArrayVec and ArrayString
- 0.3.14
- Add `ArrayString::from(&str)`
- 0.3.13
- Added `DerefMut` impl for `ArrayString`.
- Added method `.simplify()` to drop the element for `CapacityError`.
- Added method `.dispose()` to `ArrayVec`
- 0.3.12
- Added ArrayString, a fixed capacity analogy of String
- 0.3.11
- Added trait impls Default, PartialOrd, Ord, Write for ArrayVec
- 0.3.10
- Go back to using external NoDrop, fixing a panic safety bug (issue #3)
- 0.3.8
- Inline the non-dropping logic to remove one drop flag in the
ArrayVec representation.
- 0.3.7
- Added method .into_inner()
- Added unsafe method .set_len()

63
third_party/rust/arrayvec-0.5.2/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,63 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "arrayvec"
version = "0.5.2"
authors = ["bluss"]
description = "A vector with fixed capacity, backed by an array (it can be stored on the stack too). Implements fixed capacity ArrayVec and ArrayString."
documentation = "https://docs.rs/arrayvec/"
keywords = ["stack", "vector", "array", "data-structure", "no_std"]
categories = ["data-structures", "no-std"]
license = "MIT/Apache-2.0"
repository = "https://github.com/bluss/arrayvec"
[package.metadata.docs.rs]
features = ["serde"]
[package.metadata.release]
no-dev-version = true
tag-name = "{{version}}"
[profile.bench]
debug = true
[profile.release]
debug = true
[[bench]]
name = "extend"
harness = false
[[bench]]
name = "arraystring"
harness = false
[dependencies.serde]
version = "1.0"
optional = true
default-features = false
[dev-dependencies.bencher]
version = "0.1.4"
[dev-dependencies.matches]
version = "0.1"
[dev-dependencies.serde_test]
version = "1.0"
[build-dependencies]
[features]
array-sizes-129-255 = []
array-sizes-33-128 = []
default = ["std"]
std = []
unstable-const-fn = []

Просмотреть файл

25
third_party/rust/arrayvec-0.5.2/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
Copyright (c) Ulrik Sverdrup "bluss" 2015-2017
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

24
third_party/rust/arrayvec-0.5.2/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
arrayvec
========
[![Crates.io: arrayvec](https://img.shields.io/crates/v/arrayvec.svg)](https://crates.io/crates/arrayvec)
[![Crates.io: nodrop](https://img.shields.io/crates/v/nodrop.svg)](https://crates.io/crates/nodrop)
[![Documentation](https://docs.rs/arrayvec/badge.svg)](https://docs.rs/arrayvec)
[![Build Status](https://travis-ci.org/bluss/arrayvec.svg?branch=master)](https://travis-ci.org/bluss/arrayvec)
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-red.svg)](LICENSE-APACHE)
OR
[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)
A vector with fixed capacity.
Please read the [`API documentation here`](https://docs.rs/arrayvec)
# License
Dual-licensed to be compatible with the Rust project.
Licensed under the Apache License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0 or the MIT license
http://opensource.org/licenses/MIT, at your
option. This file may not be copied, modified, or distributed
except according to those terms.

90
third_party/rust/arrayvec-0.5.2/benches/arraystring.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,90 @@
extern crate arrayvec;
#[macro_use] extern crate bencher;
use arrayvec::ArrayString;
use bencher::Bencher;
fn try_push_c(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
b.iter(|| {
v.clear();
while v.try_push('c').is_ok() {
}
v.len()
});
b.bytes = v.capacity() as u64;
}
fn try_push_alpha(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
b.iter(|| {
v.clear();
while v.try_push('α').is_ok() {
}
v.len()
});
b.bytes = v.capacity() as u64;
}
// Yes, pushing a string char-by-char is slow. Use .push_str.
fn try_push_string(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let input = "abcαβγ“”";
b.iter(|| {
v.clear();
for ch in input.chars().cycle() {
if !v.try_push(ch).is_ok() {
break;
}
}
v.len()
});
b.bytes = v.capacity() as u64;
}
fn push_c(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
b.iter(|| {
v.clear();
while !v.is_full() {
v.push('c');
}
v.len()
});
b.bytes = v.capacity() as u64;
}
fn push_alpha(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
b.iter(|| {
v.clear();
while !v.is_full() {
v.push('α');
}
v.len()
});
b.bytes = v.capacity() as u64;
}
fn push_string(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let input = "abcαβγ“”";
b.iter(|| {
v.clear();
for ch in input.chars().cycle() {
if !v.is_full() {
v.push(ch);
} else {
break;
}
}
v.len()
});
b.bytes = v.capacity() as u64;
}
benchmark_group!(benches, try_push_c, try_push_alpha, try_push_string, push_c,
push_alpha, push_string);
benchmark_main!(benches);

78
third_party/rust/arrayvec-0.5.2/benches/extend.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,78 @@
extern crate arrayvec;
#[macro_use] extern crate bencher;
use std::io::Write;
use arrayvec::ArrayVec;
use bencher::Bencher;
use bencher::black_box;
fn extend_with_constant(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let cap = v.capacity();
b.iter(|| {
v.clear();
let constant = black_box(1);
v.extend((0..cap).map(move |_| constant));
v[511]
});
b.bytes = v.capacity() as u64;
}
fn extend_with_range(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let cap = v.capacity();
b.iter(|| {
v.clear();
let range = 0..cap;
v.extend(range.map(|x| black_box(x as _)));
v[511]
});
b.bytes = v.capacity() as u64;
}
fn extend_with_slice(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let data = [1; 512];
b.iter(|| {
v.clear();
let iter = data.iter().map(|&x| x);
v.extend(iter);
v[511]
});
b.bytes = v.capacity() as u64;
}
fn extend_with_write(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let data = [1; 512];
b.iter(|| {
v.clear();
v.write(&data[..]).ok();
v[511]
});
b.bytes = v.capacity() as u64;
}
fn extend_from_slice(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let data = [1; 512];
b.iter(|| {
v.clear();
v.try_extend_from_slice(&data).ok();
v[511]
});
b.bytes = v.capacity() as u64;
}
benchmark_group!(benches,
extend_with_constant,
extend_with_range,
extend_with_slice,
extend_with_write,
extend_from_slice
);
benchmark_main!(benches);

15
third_party/rust/arrayvec-0.5.2/ci/miri.sh поставляемый Normal file
Просмотреть файл

@ -0,0 +1,15 @@
#!/usr/bin/env sh
set -ex
export CARGO_NET_RETRY=5
export CARGO_NET_TIMEOUT=10
MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
rustup default "$MIRI_NIGHTLY"
rustup component add miri
cargo miri setup
cargo miri test

Просмотреть файл

Просмотреть файл

582
third_party/rust/arrayvec-0.5.2/src/array_string.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,582 @@
use std::borrow::Borrow;
use std::cmp;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ptr;
use std::ops::{Deref, DerefMut};
use std::str;
use std::str::FromStr;
use std::str::Utf8Error;
use std::slice;
use crate::array::Array;
use crate::array::Index;
use crate::CapacityError;
use crate::char::encode_utf8;
#[cfg(feature="serde")]
use serde::{Serialize, Deserialize, Serializer, Deserializer};
use super::MaybeUninit as MaybeUninitCopy;
/// A string with a fixed capacity.
///
/// The `ArrayString` is a string backed by a fixed size array. It keeps track
/// of its length.
///
/// The string is a contiguous value that you can store directly on the stack
/// if needed.
#[derive(Copy)]
pub struct ArrayString<A>
where A: Array<Item=u8> + Copy
{
xs: MaybeUninitCopy<A>,
len: A::Index,
}
impl<A> Default for ArrayString<A>
where A: Array<Item=u8> + Copy
{
/// Return an empty `ArrayString`
fn default() -> ArrayString<A> {
ArrayString::new()
}
}
impl<A> ArrayString<A>
where A: Array<Item=u8> + Copy
{
/// Create a new empty `ArrayString`.
///
/// Capacity is inferred from the type parameter.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 16]>::new();
/// string.push_str("foo");
/// assert_eq!(&string[..], "foo");
/// assert_eq!(string.capacity(), 16);
/// ```
#[cfg(not(feature="unstable-const-fn"))]
pub fn new() -> ArrayString<A> {
unsafe {
ArrayString {
xs: MaybeUninitCopy::uninitialized(),
len: Index::ZERO,
}
}
}
#[cfg(feature="unstable-const-fn")]
pub const fn new() -> ArrayString<A> {
unsafe {
ArrayString {
xs: MaybeUninitCopy::uninitialized(),
len: Index::ZERO,
}
}
}
/// Return the length of the string.
#[inline]
pub fn len(&self) -> usize { self.len.to_usize() }
/// Returns whether the string is empty.
#[inline]
pub fn is_empty(&self) -> bool { self.len() == 0 }
/// Create a new `ArrayString` from a `str`.
///
/// Capacity is inferred from the type parameter.
///
/// **Errors** if the backing array is not large enough to fit the string.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 3]>::from("foo").unwrap();
/// assert_eq!(&string[..], "foo");
/// assert_eq!(string.len(), 3);
/// assert_eq!(string.capacity(), 3);
/// ```
pub fn from(s: &str) -> Result<Self, CapacityError<&str>> {
let mut arraystr = Self::new();
arraystr.try_push_str(s)?;
Ok(arraystr)
}
/// Create a new `ArrayString` from a byte string literal.
///
/// **Errors** if the byte string literal is not valid UTF-8.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let string = ArrayString::from_byte_string(b"hello world").unwrap();
/// ```
pub fn from_byte_string(b: &A) -> Result<Self, Utf8Error> {
let len = str::from_utf8(b.as_slice())?.len();
debug_assert_eq!(len, A::CAPACITY);
Ok(ArrayString {
xs: MaybeUninitCopy::from(*b),
len: Index::from(A::CAPACITY),
})
}
/// Return the capacity of the `ArrayString`.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let string = ArrayString::<[_; 3]>::new();
/// assert_eq!(string.capacity(), 3);
/// ```
#[inline(always)]
pub fn capacity(&self) -> usize { A::CAPACITY }
/// Return if the `ArrayString` is completely filled.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 1]>::new();
/// assert!(!string.is_full());
/// string.push_str("A");
/// assert!(string.is_full());
/// ```
pub fn is_full(&self) -> bool { self.len() == self.capacity() }
/// Adds the given char to the end of the string.
///
/// ***Panics*** if the backing array is not large enough to fit the additional char.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
///
/// string.push('a');
/// string.push('b');
///
/// assert_eq!(&string[..], "ab");
/// ```
pub fn push(&mut self, c: char) {
self.try_push(c).unwrap();
}
/// Adds the given char to the end of the string.
///
/// Returns `Ok` if the push succeeds.
///
/// **Errors** if the backing array is not large enough to fit the additional char.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
///
/// string.try_push('a').unwrap();
/// string.try_push('b').unwrap();
/// let overflow = string.try_push('c');
///
/// assert_eq!(&string[..], "ab");
/// assert_eq!(overflow.unwrap_err().element(), 'c');
/// ```
pub fn try_push(&mut self, c: char) -> Result<(), CapacityError<char>> {
let len = self.len();
unsafe {
let ptr = self.xs.ptr_mut().add(len);
let remaining_cap = self.capacity() - len;
match encode_utf8(c, ptr, remaining_cap) {
Ok(n) => {
self.set_len(len + n);
Ok(())
}
Err(_) => Err(CapacityError::new(c)),
}
}
}
/// Adds the given string slice to the end of the string.
///
/// ***Panics*** if the backing array is not large enough to fit the string.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
///
/// string.push_str("a");
/// string.push_str("d");
///
/// assert_eq!(&string[..], "ad");
/// ```
pub fn push_str(&mut self, s: &str) {
self.try_push_str(s).unwrap()
}
/// Adds the given string slice to the end of the string.
///
/// Returns `Ok` if the push succeeds.
///
/// **Errors** if the backing array is not large enough to fit the string.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
///
/// string.try_push_str("a").unwrap();
/// let overflow1 = string.try_push_str("bc");
/// string.try_push_str("d").unwrap();
/// let overflow2 = string.try_push_str("ef");
///
/// assert_eq!(&string[..], "ad");
/// assert_eq!(overflow1.unwrap_err().element(), "bc");
/// assert_eq!(overflow2.unwrap_err().element(), "ef");
/// ```
pub fn try_push_str<'a>(&mut self, s: &'a str) -> Result<(), CapacityError<&'a str>> {
if s.len() > self.capacity() - self.len() {
return Err(CapacityError::new(s));
}
unsafe {
let dst = self.xs.ptr_mut().add(self.len());
let src = s.as_ptr();
ptr::copy_nonoverlapping(src, dst, s.len());
let newl = self.len() + s.len();
self.set_len(newl);
}
Ok(())
}
/// Removes the last character from the string and returns it.
///
/// Returns `None` if this `ArrayString` is empty.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut s = ArrayString::<[_; 3]>::from("foo").unwrap();
///
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('f'));
///
/// assert_eq!(s.pop(), None);
/// ```
pub fn pop(&mut self) -> Option<char> {
let ch = match self.chars().rev().next() {
Some(ch) => ch,
None => return None,
};
let new_len = self.len() - ch.len_utf8();
unsafe {
self.set_len(new_len);
}
Some(ch)
}
/// Shortens this `ArrayString` to the specified length.
///
/// If `new_len` is greater than the strings current length, this has no
/// effect.
///
/// ***Panics*** if `new_len` does not lie on a `char` boundary.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 6]>::from("foobar").unwrap();
/// string.truncate(3);
/// assert_eq!(&string[..], "foo");
/// string.truncate(4);
/// assert_eq!(&string[..], "foo");
/// ```
pub fn truncate(&mut self, new_len: usize) {
if new_len <= self.len() {
assert!(self.is_char_boundary(new_len));
unsafe {
// In libstd truncate is called on the underlying vector,
// which in turns drops each element.
// As we know we don't have to worry about Drop,
// we can just set the length (a la clear.)
self.set_len(new_len);
}
}
}
/// Removes a `char` from this `ArrayString` at a byte position and returns it.
///
/// This is an `O(n)` operation, as it requires copying every element in the
/// array.
///
/// ***Panics*** if `idx` is larger than or equal to the `ArrayString`s length,
/// or if it does not lie on a `char` boundary.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut s = ArrayString::<[_; 3]>::from("foo").unwrap();
///
/// assert_eq!(s.remove(0), 'f');
/// assert_eq!(s.remove(1), 'o');
/// assert_eq!(s.remove(0), 'o');
/// ```
pub fn remove(&mut self, idx: usize) -> char {
let ch = match self[idx..].chars().next() {
Some(ch) => ch,
None => panic!("cannot remove a char from the end of a string"),
};
let next = idx + ch.len_utf8();
let len = self.len();
unsafe {
ptr::copy(self.xs.ptr().add(next),
self.xs.ptr_mut().add(idx),
len - next);
self.set_len(len - (next - idx));
}
ch
}
/// Make the string empty.
pub fn clear(&mut self) {
unsafe {
self.set_len(0);
}
}
/// Set the stringss length.
///
/// This function is `unsafe` because it changes the notion of the
/// number of “valid” bytes in the string. Use with care.
///
/// This method uses *debug assertions* to check the validity of `length`
/// and may use other debug assertions.
pub unsafe fn set_len(&mut self, length: usize) {
debug_assert!(length <= self.capacity());
self.len = Index::from(length);
}
/// Return a string slice of the whole `ArrayString`.
pub fn as_str(&self) -> &str {
self
}
}
impl<A> Deref for ArrayString<A>
where A: Array<Item=u8> + Copy
{
type Target = str;
#[inline]
fn deref(&self) -> &str {
unsafe {
let sl = slice::from_raw_parts(self.xs.ptr(), self.len.to_usize());
str::from_utf8_unchecked(sl)
}
}
}
impl<A> DerefMut for ArrayString<A>
where A: Array<Item=u8> + Copy
{
#[inline]
fn deref_mut(&mut self) -> &mut str {
unsafe {
let sl = slice::from_raw_parts_mut(self.xs.ptr_mut(), self.len.to_usize());
str::from_utf8_unchecked_mut(sl)
}
}
}
impl<A> PartialEq for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn eq(&self, rhs: &Self) -> bool {
**self == **rhs
}
}
impl<A> PartialEq<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn eq(&self, rhs: &str) -> bool {
&**self == rhs
}
}
impl<A> PartialEq<ArrayString<A>> for str
where A: Array<Item=u8> + Copy
{
fn eq(&self, rhs: &ArrayString<A>) -> bool {
self == &**rhs
}
}
impl<A> Eq for ArrayString<A>
where A: Array<Item=u8> + Copy
{ }
impl<A> Hash for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn hash<H: Hasher>(&self, h: &mut H) {
(**self).hash(h)
}
}
impl<A> Borrow<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn borrow(&self) -> &str { self }
}
impl<A> AsRef<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn as_ref(&self) -> &str { self }
}
impl<A> fmt::Debug for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) }
}
impl<A> fmt::Display for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) }
}
/// `Write` appends written data to the end of the string.
impl<A> fmt::Write for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn write_char(&mut self, c: char) -> fmt::Result {
self.try_push(c).map_err(|_| fmt::Error)
}
fn write_str(&mut self, s: &str) -> fmt::Result {
self.try_push_str(s).map_err(|_| fmt::Error)
}
}
impl<A> Clone for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn clone(&self) -> ArrayString<A> {
*self
}
fn clone_from(&mut self, rhs: &Self) {
// guaranteed to fit due to types matching.
self.clear();
self.try_push_str(rhs).ok();
}
}
impl<A> PartialOrd for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
(**self).partial_cmp(&**rhs)
}
fn lt(&self, rhs: &Self) -> bool { **self < **rhs }
fn le(&self, rhs: &Self) -> bool { **self <= **rhs }
fn gt(&self, rhs: &Self) -> bool { **self > **rhs }
fn ge(&self, rhs: &Self) -> bool { **self >= **rhs }
}
impl<A> PartialOrd<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn partial_cmp(&self, rhs: &str) -> Option<cmp::Ordering> {
(**self).partial_cmp(rhs)
}
fn lt(&self, rhs: &str) -> bool { &**self < rhs }
fn le(&self, rhs: &str) -> bool { &**self <= rhs }
fn gt(&self, rhs: &str) -> bool { &**self > rhs }
fn ge(&self, rhs: &str) -> bool { &**self >= rhs }
}
impl<A> PartialOrd<ArrayString<A>> for str
where A: Array<Item=u8> + Copy
{
fn partial_cmp(&self, rhs: &ArrayString<A>) -> Option<cmp::Ordering> {
self.partial_cmp(&**rhs)
}
fn lt(&self, rhs: &ArrayString<A>) -> bool { self < &**rhs }
fn le(&self, rhs: &ArrayString<A>) -> bool { self <= &**rhs }
fn gt(&self, rhs: &ArrayString<A>) -> bool { self > &**rhs }
fn ge(&self, rhs: &ArrayString<A>) -> bool { self >= &**rhs }
}
impl<A> Ord for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn cmp(&self, rhs: &Self) -> cmp::Ordering {
(**self).cmp(&**rhs)
}
}
impl<A> FromStr for ArrayString<A>
where A: Array<Item=u8> + Copy
{
type Err = CapacityError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from(s).map_err(CapacityError::simplify)
}
}
#[cfg(feature="serde")]
/// Requires crate feature `"serde"`
impl<A> Serialize for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
{
serializer.serialize_str(&*self)
}
}
#[cfg(feature="serde")]
/// Requires crate feature `"serde"`
impl<'de, A> Deserialize<'de> for ArrayString<A>
where A: Array<Item=u8> + Copy
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
{
use serde::de::{self, Visitor};
use std::marker::PhantomData;
struct ArrayStringVisitor<A: Array<Item=u8>>(PhantomData<A>);
impl<'de, A: Copy + Array<Item=u8>> Visitor<'de> for ArrayStringVisitor<A> {
type Value = ArrayString<A>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a string no more than {} bytes long", A::CAPACITY)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where E: de::Error,
{
ArrayString::from(v).map_err(|_| E::invalid_length(v.len(), &self))
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where E: de::Error,
{
let s = str::from_utf8(v).map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?;
ArrayString::from(s).map_err(|_| E::invalid_length(s.len(), &self))
}
}
deserializer.deserialize_str(ArrayStringVisitor::<A>(PhantomData))
}
}

99
third_party/rust/arrayvec-0.5.2/src/char.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,99 @@
// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//
// Original authors: alexchrichton, bluss
use std::ptr;
// UTF-8 ranges and tags for encoding characters
const TAG_CONT: u8 = 0b1000_0000;
const TAG_TWO_B: u8 = 0b1100_0000;
const TAG_THREE_B: u8 = 0b1110_0000;
const TAG_FOUR_B: u8 = 0b1111_0000;
const MAX_ONE_B: u32 = 0x80;
const MAX_TWO_B: u32 = 0x800;
const MAX_THREE_B: u32 = 0x10000;
/// Placeholder
pub struct EncodeUtf8Error;
#[inline]
unsafe fn write(ptr: *mut u8, index: usize, byte: u8) {
ptr::write(ptr.add(index), byte)
}
/// Encode a char into buf using UTF-8.
///
/// On success, return the byte length of the encoding (1, 2, 3 or 4).<br>
/// On error, return `EncodeUtf8Error` if the buffer was too short for the char.
///
/// Safety: `ptr` must be writable for `len` bytes.
#[inline]
pub unsafe fn encode_utf8(ch: char, ptr: *mut u8, len: usize) -> Result<usize, EncodeUtf8Error>
{
let code = ch as u32;
if code < MAX_ONE_B && len >= 1 {
write(ptr, 0, code as u8);
return Ok(1);
} else if code < MAX_TWO_B && len >= 2 {
write(ptr, 0, (code >> 6 & 0x1F) as u8 | TAG_TWO_B);
write(ptr, 1, (code & 0x3F) as u8 | TAG_CONT);
return Ok(2);
} else if code < MAX_THREE_B && len >= 3 {
write(ptr, 0, (code >> 12 & 0x0F) as u8 | TAG_THREE_B);
write(ptr, 1, (code >> 6 & 0x3F) as u8 | TAG_CONT);
write(ptr, 2, (code & 0x3F) as u8 | TAG_CONT);
return Ok(3);
} else if len >= 4 {
write(ptr, 0, (code >> 18 & 0x07) as u8 | TAG_FOUR_B);
write(ptr, 1, (code >> 12 & 0x3F) as u8 | TAG_CONT);
write(ptr, 2, (code >> 6 & 0x3F) as u8 | TAG_CONT);
write(ptr, 3, (code & 0x3F) as u8 | TAG_CONT);
return Ok(4);
};
Err(EncodeUtf8Error)
}
#[test]
#[cfg_attr(miri, ignore)] // Miri is too slow
fn test_encode_utf8() {
// Test that all codepoints are encoded correctly
let mut data = [0u8; 16];
for codepoint in 0..=(std::char::MAX as u32) {
if let Some(ch) = std::char::from_u32(codepoint) {
for elt in &mut data { *elt = 0; }
let ptr = data.as_mut_ptr();
let len = data.len();
unsafe {
let res = encode_utf8(ch, ptr, len).ok().unwrap();
assert_eq!(res, ch.len_utf8());
}
let string = std::str::from_utf8(&data).unwrap();
assert_eq!(string.chars().next(), Some(ch));
}
}
}
#[test]
fn test_encode_utf8_oob() {
// test that we report oob if the buffer is too short
let mut data = [0u8; 16];
let chars = ['a', 'α', '<27>', '𐍈'];
for (len, &ch) in (1..=4).zip(&chars) {
assert_eq!(len, ch.len_utf8(), "Len of ch={}", ch);
let ptr = data.as_mut_ptr();
unsafe {
assert!(matches::matches!(encode_utf8(ch, ptr, len - 1), Err(_)));
assert!(matches::matches!(encode_utf8(ch, ptr, len), Ok(_)));
}
}
}

49
third_party/rust/arrayvec-0.5.2/src/errors.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,49 @@
use std::fmt;
#[cfg(feature="std")]
use std::any::Any;
#[cfg(feature="std")]
use std::error::Error;
/// Error value indicating insufficient capacity
#[derive(Clone, Copy, Eq, Ord, PartialEq, PartialOrd)]
pub struct CapacityError<T = ()> {
element: T,
}
impl<T> CapacityError<T> {
/// Create a new `CapacityError` from `element`.
pub fn new(element: T) -> CapacityError<T> {
CapacityError {
element: element,
}
}
/// Extract the overflowing element
pub fn element(self) -> T {
self.element
}
/// Convert into a `CapacityError` that does not carry an element.
pub fn simplify(self) -> CapacityError {
CapacityError { element: () }
}
}
const CAPERROR: &'static str = "insufficient capacity";
#[cfg(feature="std")]
/// Requires `features="std"`.
impl<T: Any> Error for CapacityError<T> {}
impl<T> fmt::Display for CapacityError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", CAPERROR)
}
}
impl<T> fmt::Debug for CapacityError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}: {}", "CapacityError", CAPERROR)
}
}

1213
third_party/rust/arrayvec-0.5.2/src/lib.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

79
third_party/rust/arrayvec-0.5.2/tests/serde.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,79 @@
#![cfg(feature = "serde")]
extern crate arrayvec;
extern crate serde_test;
mod array_vec {
use arrayvec::ArrayVec;
use serde_test::{Token, assert_tokens, assert_de_tokens_error};
#[test]
fn test_ser_de_empty() {
let vec = ArrayVec::<[u32; 0]>::new();
assert_tokens(&vec, &[
Token::Seq { len: Some(0) },
Token::SeqEnd,
]);
}
#[test]
fn test_ser_de() {
let mut vec = ArrayVec::<[u32; 3]>::new();
vec.push(20);
vec.push(55);
vec.push(123);
assert_tokens(&vec, &[
Token::Seq { len: Some(3) },
Token::U32(20),
Token::U32(55),
Token::U32(123),
Token::SeqEnd,
]);
}
#[test]
fn test_de_too_large() {
assert_de_tokens_error::<ArrayVec<[u32; 2]>>(&[
Token::Seq { len: Some(3) },
Token::U32(13),
Token::U32(42),
Token::U32(68),
], "invalid length 3, expected an array with no more than 2 items");
}
}
mod array_string {
use arrayvec::ArrayString;
use serde_test::{Token, assert_tokens, assert_de_tokens_error};
#[test]
fn test_ser_de_empty() {
let string = ArrayString::<[u8; 0]>::new();
assert_tokens(&string, &[
Token::Str(""),
]);
}
#[test]
fn test_ser_de() {
let string = ArrayString::<[u8; 9]>::from("1234 abcd")
.expect("expected exact specified capacity to be enough");
assert_tokens(&string, &[
Token::Str("1234 abcd"),
]);
}
#[test]
fn test_de_too_large() {
assert_de_tokens_error::<ArrayString<[u8; 2]>>(&[
Token::Str("afd")
], "invalid length 3, expected a string no more than 2 bytes long");
}
}

688
third_party/rust/arrayvec-0.5.2/tests/tests.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,688 @@
extern crate arrayvec;
#[macro_use] extern crate matches;
use arrayvec::ArrayVec;
use arrayvec::ArrayString;
use std::mem;
use arrayvec::CapacityError;
use std::collections::HashMap;
#[test]
fn test_simple() {
use std::ops::Add;
let mut vec: ArrayVec<[Vec<i32>; 3]> = ArrayVec::new();
vec.push(vec![1, 2, 3, 4]);
vec.push(vec![10]);
vec.push(vec![-1, 13, -2]);
for elt in &vec {
assert_eq!(elt.iter().fold(0, Add::add), 10);
}
let sum_len = vec.into_iter().map(|x| x.len()).fold(0, Add::add);
assert_eq!(sum_len, 8);
}
#[test]
fn test_capacity_left() {
let mut vec: ArrayVec<[usize; 4]> = ArrayVec::new();
assert_eq!(vec.remaining_capacity(), 4);
vec.push(1);
assert_eq!(vec.remaining_capacity(), 3);
vec.push(2);
assert_eq!(vec.remaining_capacity(), 2);
vec.push(3);
assert_eq!(vec.remaining_capacity(), 1);
vec.push(4);
assert_eq!(vec.remaining_capacity(), 0);
}
#[test]
fn test_extend_from_slice() {
let mut vec: ArrayVec<[usize; 10]> = ArrayVec::new();
vec.try_extend_from_slice(&[1, 2, 3]).unwrap();
assert_eq!(vec.len(), 3);
assert_eq!(&vec[..], &[1, 2, 3]);
assert_eq!(vec.pop(), Some(3));
assert_eq!(&vec[..], &[1, 2]);
}
#[test]
fn test_extend_from_slice_error() {
let mut vec: ArrayVec<[usize; 10]> = ArrayVec::new();
vec.try_extend_from_slice(&[1, 2, 3]).unwrap();
let res = vec.try_extend_from_slice(&[0; 8]);
assert_matches!(res, Err(_));
let mut vec: ArrayVec<[usize; 0]> = ArrayVec::new();
let res = vec.try_extend_from_slice(&[0; 1]);
assert_matches!(res, Err(_));
}
#[test]
fn test_try_from_slice_error() {
use arrayvec::ArrayVec;
use std::convert::TryInto as _;
let res: Result<ArrayVec<[_; 2]>, _> = (&[1, 2, 3] as &[_]).try_into();
assert_matches!(res, Err(_));
}
#[test]
fn test_u16_index() {
const N: usize = 4096;
let mut vec: ArrayVec<[_; N]> = ArrayVec::new();
for _ in 0..N {
assert!(vec.try_push(1u8).is_ok());
}
assert!(vec.try_push(0).is_err());
assert_eq!(vec.len(), N);
}
#[test]
fn test_iter() {
let mut iter = ArrayVec::from([1, 2, 3]).into_iter();
assert_eq!(iter.size_hint(), (3, Some(3)));
assert_eq!(iter.next_back(), Some(3));
assert_eq!(iter.next(), Some(1));
assert_eq!(iter.next_back(), Some(2));
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.next_back(), None);
}
#[test]
fn test_drop() {
use std::cell::Cell;
let flag = &Cell::new(0);
#[derive(Clone)]
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
}
assert_eq!(flag.get(), 2);
// test something with the nullable pointer optimization
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(vec![Bump(flag)]);
array.push(vec![Bump(flag), Bump(flag)]);
array.push(vec![]);
let push4 = array.try_push(vec![Bump(flag)]);
assert_eq!(flag.get(), 0);
drop(push4);
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 1);
drop(array.pop());
assert_eq!(flag.get(), 3);
}
assert_eq!(flag.get(), 4);
// test into_inner
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let inner = array.into_inner();
assert!(inner.is_ok());
assert_eq!(flag.get(), 0);
drop(inner);
assert_eq!(flag.get(), 3);
}
// test cloning into_iter
flag.set(0);
{
let mut array = ArrayVec::<[_; 3]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let mut iter = array.into_iter();
assert_eq!(flag.get(), 0);
iter.next();
assert_eq!(flag.get(), 1);
let clone = iter.clone();
assert_eq!(flag.get(), 1);
drop(clone);
assert_eq!(flag.get(), 3);
drop(iter);
assert_eq!(flag.get(), 5);
}
}
#[test]
fn test_drop_panics() {
use std::cell::Cell;
use std::panic::catch_unwind;
use std::panic::AssertUnwindSafe;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
// Panic in the first drop
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
if n == 0 {
panic!("Panic in Bump's drop");
}
}
}
// check if rust is new enough
flag.set(0);
{
let array = vec![Bump(flag), Bump(flag)];
let res = catch_unwind(AssertUnwindSafe(|| {
drop(array);
}));
assert!(res.is_err());
}
if flag.get() != 2 {
println!("test_drop_panics: skip, this version of Rust doesn't continue in drop_in_place");
return;
}
flag.set(0);
{
let mut array = ArrayVec::<[Bump; 128]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let res = catch_unwind(AssertUnwindSafe(|| {
drop(array);
}));
assert!(res.is_err());
}
// Check that all the elements drop, even if the first drop panics.
assert_eq!(flag.get(), 3);
flag.set(0);
{
let mut array = ArrayVec::<[Bump; 16]>::new();
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
array.push(Bump(flag));
let i = 2;
let tail_len = array.len() - i;
let res = catch_unwind(AssertUnwindSafe(|| {
array.truncate(i);
}));
assert!(res.is_err());
// Check that all the tail elements drop, even if the first drop panics.
assert_eq!(flag.get(), tail_len as i32);
}
}
#[test]
fn test_extend() {
let mut range = 0..10;
let mut array: ArrayVec<[_; 5]> = range.by_ref().collect();
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
assert_eq!(range.next(), Some(5));
array.extend(range.by_ref());
assert_eq!(range.next(), Some(6));
let mut array: ArrayVec<[_; 10]> = (0..3).collect();
assert_eq!(&array[..], &[0, 1, 2]);
array.extend(3..5);
assert_eq!(&array[..], &[0, 1, 2, 3, 4]);
}
#[test]
fn test_is_send_sync() {
let data = ArrayVec::<[Vec<i32>; 5]>::new();
&data as &dyn Send;
&data as &dyn Sync;
}
#[test]
fn test_compact_size() {
// Future rust will kill these drop flags!
// 4 elements size + 1 len + 1 enum tag + [1 drop flag]
type ByteArray = ArrayVec<[u8; 4]>;
println!("{}", mem::size_of::<ByteArray>());
assert!(mem::size_of::<ByteArray>() <= 8);
// 1 enum tag + 1 drop flag
type EmptyArray = ArrayVec<[u8; 0]>;
println!("{}", mem::size_of::<EmptyArray>());
assert!(mem::size_of::<EmptyArray>() <= 2);
// 12 element size + 1 enum tag + 3 padding + 1 len + 1 drop flag + 2 padding
type QuadArray = ArrayVec<[u32; 3]>;
println!("{}", mem::size_of::<QuadArray>());
assert!(mem::size_of::<QuadArray>() <= 24);
}
#[test]
fn test_still_works_with_option_arrayvec() {
type RefArray = ArrayVec<[&'static i32; 2]>;
let array = Some(RefArray::new());
assert!(array.is_some());
println!("{:?}", array);
}
#[test]
fn test_drain() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain(1..4);
assert_eq!(&v[..], &[0, 4, 5, 6, 7]);
let u: ArrayVec<[_; 3]> = v.drain(1..4).rev().collect();
assert_eq!(&u[..], &[6, 5, 4]);
assert_eq!(&v[..], &[0, 7]);
v.drain(..);
assert_eq!(&v[..], &[]);
}
#[test]
fn test_drain_range_inclusive() {
let mut v = ArrayVec::from([0; 8]);
v.drain(0..=7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain(1..=4);
assert_eq!(&v[..], &[0, 5, 6, 7]);
let u: ArrayVec<[_; 3]> = v.drain(1..=2).rev().collect();
assert_eq!(&u[..], &[6, 5]);
assert_eq!(&v[..], &[0, 7]);
v.drain(..);
assert_eq!(&v[..], &[]);
}
#[test]
#[should_panic]
fn test_drain_range_inclusive_oob() {
let mut v = ArrayVec::from([0; 0]);
v.drain(0..=0);
}
#[test]
fn test_retain() {
let mut v = ArrayVec::from([0; 8]);
for (i, elt) in v.iter_mut().enumerate() {
*elt = i;
}
v.retain(|_| true);
assert_eq!(&v[..], &[0, 1, 2, 3, 4, 5, 6, 7]);
v.retain(|elt| {
*elt /= 2;
*elt % 2 == 0
});
assert_eq!(&v[..], &[0, 0, 2, 2]);
v.retain(|_| false);
assert_eq!(&v[..], &[]);
}
#[test]
#[should_panic]
fn test_drain_oob() {
let mut v = ArrayVec::from([0; 8]);
v.pop();
v.drain(0..8);
}
#[test]
#[should_panic]
fn test_drop_panic() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
}
#[test]
#[should_panic]
fn test_drop_panic_into_iter() {
struct DropPanic;
impl Drop for DropPanic {
fn drop(&mut self) {
panic!("drop");
}
}
let mut array = ArrayVec::<[DropPanic; 1]>::new();
array.push(DropPanic);
array.into_iter();
}
#[test]
fn test_insert() {
let mut v = ArrayVec::from([]);
assert_matches!(v.try_push(1), Err(_));
let mut v = ArrayVec::<[_; 3]>::new();
v.insert(0, 0);
v.insert(1, 1);
//let ret1 = v.try_insert(3, 3);
//assert_matches!(ret1, Err(InsertError::OutOfBounds(_)));
assert_eq!(&v[..], &[0, 1]);
v.insert(2, 2);
assert_eq!(&v[..], &[0, 1, 2]);
let ret2 = v.try_insert(1, 9);
assert_eq!(&v[..], &[0, 1, 2]);
assert_matches!(ret2, Err(_));
let mut v = ArrayVec::from([2]);
assert_matches!(v.try_insert(0, 1), Err(CapacityError { .. }));
assert_matches!(v.try_insert(1, 1), Err(CapacityError { .. }));
//assert_matches!(v.try_insert(2, 1), Err(CapacityError { .. }));
}
#[test]
fn test_into_inner_1() {
let mut v = ArrayVec::from([1, 2]);
v.pop();
let u = v.clone();
assert_eq!(v.into_inner(), Err(u));
}
#[test]
fn test_into_inner_2() {
let mut v = ArrayVec::<[String; 4]>::new();
v.push("a".into());
v.push("b".into());
v.push("c".into());
v.push("d".into());
assert_eq!(v.into_inner().unwrap(), ["a", "b", "c", "d"]);
}
#[test]
fn test_into_inner_3_() {
let mut v = ArrayVec::<[i32; 4]>::new();
v.extend(1..);
assert_eq!(v.into_inner().unwrap(), [1, 2, 3, 4]);
}
#[cfg(feature="std")]
#[test]
fn test_write() {
use std::io::Write;
let mut v = ArrayVec::<[_; 8]>::new();
write!(&mut v, "\x01\x02\x03").unwrap();
assert_eq!(&v[..], &[1, 2, 3]);
let r = v.write(&[9; 16]).unwrap();
assert_eq!(r, 5);
assert_eq!(&v[..], &[1, 2, 3, 9, 9, 9, 9, 9]);
}
#[test]
fn array_clone_from() {
let mut v = ArrayVec::<[_; 4]>::new();
v.push(vec![1, 2]);
v.push(vec![3, 4, 5]);
v.push(vec![6]);
let reference = v.to_vec();
let mut u = ArrayVec::<[_; 4]>::new();
u.clone_from(&v);
assert_eq!(&u, &reference[..]);
let mut t = ArrayVec::<[_; 4]>::new();
t.push(vec![97]);
t.push(vec![]);
t.push(vec![5, 6, 2]);
t.push(vec![2]);
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
t.clear();
t.clone_from(&v);
assert_eq!(&t, &reference[..]);
}
#[cfg(feature="std")]
#[test]
fn test_string() {
use std::error::Error;
let text = "hello world";
let mut s = ArrayString::<[_; 16]>::new();
s.try_push_str(text).unwrap();
assert_eq!(&s, text);
assert_eq!(text, &s);
// Make sure Hash / Eq / Borrow match up so we can use HashMap
let mut map = HashMap::new();
map.insert(s, 1);
assert_eq!(map[text], 1);
let mut t = ArrayString::<[_; 2]>::new();
assert!(t.try_push_str(text).is_err());
assert_eq!(&t, "");
t.push_str("ab");
// DerefMut
let tmut: &mut str = &mut t;
assert_eq!(tmut, "ab");
// Test Error trait / try
let t = || -> Result<(), Box<dyn Error>> {
let mut t = ArrayString::<[_; 2]>::new();
t.try_push_str(text)?;
Ok(())
}();
assert!(t.is_err());
}
#[test]
fn test_string_from() {
let text = "hello world";
// Test `from` constructor
let u = ArrayString::<[_; 11]>::from(text).unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_parse_from_str() {
let text = "hello world";
let u: ArrayString<[_; 11]> = text.parse().unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_from_bytes() {
let text = "hello world";
let u = ArrayString::from_byte_string(b"hello world").unwrap();
assert_eq!(&u, text);
assert_eq!(u.len(), text.len());
}
#[test]
fn test_string_clone() {
let text = "hi";
let mut s = ArrayString::<[_; 4]>::new();
s.push_str("abcd");
let t = ArrayString::<[_; 4]>::from(text).unwrap();
s.clone_from(&t);
assert_eq!(&t, &s);
}
#[test]
fn test_string_push() {
let text = "abcαβγ";
let mut s = ArrayString::<[_; 8]>::new();
for c in text.chars() {
if let Err(_) = s.try_push(c) {
break;
}
}
assert_eq!("abcαβ", &s[..]);
s.push('x');
assert_eq!("abcαβx", &s[..]);
assert!(s.try_push('x').is_err());
}
#[test]
fn test_insert_at_length() {
let mut v = ArrayVec::<[_; 8]>::new();
let result1 = v.try_insert(0, "a");
let result2 = v.try_insert(1, "b");
assert!(result1.is_ok() && result2.is_ok());
assert_eq!(&v[..], &["a", "b"]);
}
#[should_panic]
#[test]
fn test_insert_out_of_bounds() {
let mut v = ArrayVec::<[_; 8]>::new();
let _ = v.try_insert(1, "test");
}
/*
* insert that pushes out the last
let mut u = ArrayVec::from([1, 2, 3, 4]);
let ret = u.try_insert(3, 99);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
let ret = u.try_insert(4, 77);
assert_eq!(&u[..], &[1, 2, 3, 99]);
assert_matches!(ret, Err(_));
*/
#[test]
fn test_drop_in_insert() {
use std::cell::Cell;
let flag = &Cell::new(0);
struct Bump<'a>(&'a Cell<i32>);
impl<'a> Drop for Bump<'a> {
fn drop(&mut self) {
let n = self.0.get();
self.0.set(n + 1);
}
}
flag.set(0);
{
let mut array = ArrayVec::<[_; 2]>::new();
array.push(Bump(flag));
array.insert(0, Bump(flag));
assert_eq!(flag.get(), 0);
let ret = array.try_insert(1, Bump(flag));
assert_eq!(flag.get(), 0);
assert_matches!(ret, Err(_));
drop(ret);
assert_eq!(flag.get(), 1);
}
assert_eq!(flag.get(), 3);
}
#[test]
fn test_pop_at() {
let mut v = ArrayVec::<[String; 4]>::new();
let s = String::from;
v.push(s("a"));
v.push(s("b"));
v.push(s("c"));
v.push(s("d"));
assert_eq!(v.pop_at(4), None);
assert_eq!(v.pop_at(1), Some(s("b")));
assert_eq!(v.pop_at(1), Some(s("c")));
assert_eq!(v.pop_at(2), None);
assert_eq!(&v[..], &["a", "d"]);
}
#[test]
fn test_sizes() {
let v = ArrayVec::from([0u8; 1 << 16]);
assert_eq!(vec![0u8; v.len()], &v[..]);
}
#[test]
fn test_default() {
use std::net;
let s: ArrayString<[u8; 4]> = Default::default();
// Something without `Default` implementation.
let v: ArrayVec<[net::TcpStream; 4]> = Default::default();
assert_eq!(s.len(), 0);
assert_eq!(v.len(), 0);
}
#[cfg(feature="array-sizes-33-128")]
#[test]
fn test_sizes_33_128() {
ArrayVec::from([0u8; 52]);
ArrayVec::from([0u8; 127]);
}
#[cfg(feature="array-sizes-129-255")]
#[test]
fn test_sizes_129_255() {
ArrayVec::from([0u8; 237]);
ArrayVec::from([0u8; 255]);
}
#[test]
fn test_extend_zst() {
let mut range = 0..10;
#[derive(Copy, Clone, PartialEq, Debug)]
struct Z; // Zero sized type
let mut array: ArrayVec<[_; 5]> = range.by_ref().map(|_| Z).collect();
assert_eq!(&array[..], &[Z; 5]);
assert_eq!(range.next(), Some(5));
array.extend(range.by_ref().map(|_| Z));
assert_eq!(range.next(), Some(6));
let mut array: ArrayVec<[_; 10]> = (0..3).map(|_| Z).collect();
assert_eq!(&array[..], &[Z; 3]);
array.extend((3..5).map(|_| Z));
assert_eq!(&array[..], &[Z; 5]);
assert_eq!(array.len(), 5);
}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"c9e49774ee89a3b7b533362d82060a14f2777ccefbe03b956f6f08057b6c3600","Cargo.toml":"c736151a4747b2c041404d730e17dec631393c5feea287edc8a3e482f83a8927","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0245ee104228a100ce5fceecf43e25faae450494d9173f43fd94c27d69fdac13","README.md":"f42eb73acbc7729825a836a532d1c8c6b71e006e0d87e549ea7a483da7472425","benches/arraystring.rs":"f12b890977117ebde4ca42bcd6b91f2a6a087f2b235aaca6d15e30d125ae9f67","benches/extend.rs":"c3d69cc488ec5341b019cfed545ebbfea252f98718037b413f6a349da9489d1b","ci/miri.sh":"59172afe080a3431f4e7dbd66d2040afa27ab9c0359532bd68f8f423261738de","custom.css":"e6f2cd299392337b4e2959c52f422e5b7be11920ea98d10db44d10ddef5ed47c","src/array.rs":"5fa75554ebdf595c918fe923a84678989fc420d800310ee19a21597f7d683b66","src/array_string.rs":"48c175371aed3372158a9331e939cffc2a11a09120253fa9d0521e5cbca7cfca","src/char.rs":"3fe9e9cc68fc7cedb238d53924b552b876a60c4fea85935d2f5d1ca0d41ffa3e","src/errors.rs":"ca44c0987f59ae57623088d80013e75129101caea93c278c8ebb0df898bc6b1b","src/lib.rs":"08270486d9e9d34e02e0edf227baf5e87b36d38d264da209d3b7f8962dce1b54","src/maybe_uninit.rs":"c81cf16f976bfaf7c1fe371aa2fba84872874fb0e43c96f63bef01cceb5e1d64","tests/serde.rs":"18c165cf6024f04a25b19aa139657d7c59f72d1541c9b24b44f9eaea01f507db","tests/tests.rs":"b9a3db8a0b957695d9ecc539a7ea2ded1eea3c6f76de8b5624c2b4eae95f1fdd"},"package":"23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"}
{"files":{"CHANGELOG.md":"9608f4cde67279f69e80b85373f19c1dde90a075b51d930e25e26d7ddabbfa03","Cargo.toml":"f4001657c27de1386a449fc5035abc0016f4f70ad99161b16e263d1eb2892dd8","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0245ee104228a100ce5fceecf43e25faae450494d9173f43fd94c27d69fdac13","README.md":"2264c34c62ea4c617d72047b00749b4786dfb9dff2fac24e0320170ee0cd19c8","benches/arraystring.rs":"fad1cecef71c290375befc77c75a868988b8d74135e8f8732bc5b58c85a8ab46","benches/extend.rs":"c38ecedbc88217a7e9fe1a73f916b168a96e48010a7ccd3dba5c3f8dea030d5d","ci/miri.sh":"6bad1d135e1bdd67a6b91c870a7cf5ee09a85f9515633592a6abfbba95fdaf52","src/array_string.rs":"cefc432a025d780e01113144cbd5c0856d4424ec7fba4e626ddea523fbc8bbaf","src/arrayvec.rs":"32da1d3202f621852a3b884491f7a1086b7d813fe64b5cd7cab858ad3e967560","src/arrayvec_impl.rs":"a5e3391dc350041651f0ba3816c863ff7f552ff553e4a88f801481dfad7e7613","src/char.rs":"1de50e1d6045af2b3496426492315ba774986f9bc8301ffa391de861a08cc9cb","src/errors.rs":"ca44c0987f59ae57623088d80013e75129101caea93c278c8ebb0df898bc6b1b","src/lib.rs":"29a4123616c0912ccae5d931d45f0ccc3746647da1ba077c34538824910dd0ca","src/utils.rs":"d1cdc508dfca385e63f1f57bc8b53ed4a7f515e4ac1ebaa97b1d543fc8369432","tests/serde.rs":"117eb2961b5954d13c577edf60bbb07cb7481685cc9d6c49760a981d71465849","tests/tests.rs":"c1cd94337f4dc9aa31a6c16e334319389c605a98ccc5792e5489167cb614b7cb"},"package":"be4dc07131ffa69b8072d35f5007352af944213cde02545e2103680baed38fcd"}

141
third_party/rust/arrayvec/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,56 +1,109 @@
Recent Changes (arrayvec)
-------------------------
=========================
- 0.5.2
## 0.7.1
- Add `is_empty` methods for ArrayVec and ArrayString by @nicbn
- Implement `TryFrom<Slice>` for ArrayVec by @paulkernfeld
- Add `unstable-const-fn` to make `new` methods const by @m-ou-se
- Run miri in CI and a few related fixes by @RalfJung
- Fix outdated comment by @Phlosioneer
- Move changelog to a separate file by @Luro02
- Remove deprecated `Error::description` by @AnderEnder
- Use pointer method `add` by @hbina
- Add new ArrayVec methods `.take()` and `.into_inner_unchecked()` by @conradludgate
- `clone_from` now uses `truncate` when needed by @a1phyr
- 0.5.1
## 0.7.0
- Add `as_ptr`, `as_mut_ptr` accessors directly on the `ArrayVec` by @tbu-
(matches the same addition to `Vec` which happened in Rust 1.37).
- Add method `ArrayString::len` (now available directly, not just through deref to str).
- Use raw pointers instead of `&mut [u8]` for encoding chars into `ArrayString`
(uninit best practice fix).
- Use raw pointers instead of `get_unchecked_mut` where the target may be
uninitialized everywhere relevant in the ArrayVec implementation
(uninit best practice fix).
- Changed inline hints on many methods, mainly removing inline hints
- `ArrayVec::dispose` is now deprecated (it has no purpose anymore)
- `fn new_const` is now the way to const-construct arrayvec and arraystring,
and `fn new` has been reverted to a regular "non-const" function.
This works around performance issue #182, where the const fn version did not
optimize well. Change by @bluss with thanks to @rodrimati1992 and @niklasf
for analyzing the problem.
- 0.4.12
- The deprecated feature flag `unstable-const-fn` was removed, since it's not needed
- Use raw pointers instead of `get_unchecked_mut` where the target may be
uninitialized everywhere relevant in the ArrayVec implementation.
- Optimize `.retain()` by using the same algorithm as in std, change by @niklasf,
issue #174. Original optimization in Rust std by @oxalica in rust-lang/rust/pull/81126
- 0.5.0
## 0.6.1
- Use `MaybeUninit` (now unconditionally) in the implementation of
`ArrayVec`
- Use `MaybeUninit` (now unconditionally) in the implementation of
`ArrayString`
- The crate feature for serde serialization is now named `serde`.
- Updated the `Array` trait interface, and it is now easier to use for
users outside the crate.
- Add `FromStr` impl for `ArrayString` by @despawnerer
- Add method `try_extend_from_slice` to `ArrayVec`, which is always
effecient by @Thomasdezeeuw.
- Add method `remaining_capacity` by @Thomasdezeeuw
- Improve performance of the `extend` method.
- The index type of zero capacity vectors is now itself zero size, by
@clarfon
- Use `drop_in_place` for truncate and clear methods. This affects drop order
and resume from panic during drop.
- Use Rust 2018 edition for the implementation
- Require Rust 1.36 or later, for the unconditional `MaybeUninit`
improvements.
- The ``ArrayVec::new`` and ``ArrayString::new`` constructors are properly
const fns on stable and the feature flag ``unstable-const-fn`` is now deprecated.
by @rodrimati1992
- Small fix to the capacity check macro by @Xaeroxe
- Typo fix in documentation by @cuviper
- Small code cleanup by @bluss
## 0.6.0
- The **const generics** release 🎉. Arrayvec finally implements what it
wanted to implement, since its first version: a vector backed by an array,
with generic parameters for the arbitrary element type *and* backing array
capacity.
The New type syntax is `ArrayVec<T, CAP>` where `CAP` is the arrayvec capacity.
For arraystring the syntax is `ArrayString<CAP>`.
Length is stored internally as u32; this limits the maximum capacity. The size
of the `ArrayVec` or `ArrayString` structs for the same capacity may grow
slightly compared with the previous version (depending on padding requirements
for the element type). Change by @bluss.
- Arrayvec's `.extend()` and `FromIterator`/`.collect()` to arrayvec now
**panic** if the capacity of the arrayvec is exceeded. Change by @bluss.
- Arraystring now implements `TryFrom<&str>` and `TryFrom<fmt::Arguments>` by
@c410-f3r
- Minimum supported rust version is Rust 1.51
## 0.5.2
- Add `is_empty` methods for ArrayVec and ArrayString by @nicbn
- Implement `TryFrom<Slice>` for ArrayVec by @paulkernfeld
- Add `unstable-const-fn` to make `new` methods const by @m-ou-se
- Run miri in CI and a few related fixes by @RalfJung
- Fix outdated comment by @Phlosioneer
- Move changelog to a separate file by @Luro02
- Remove deprecated `Error::description` by @AnderEnder
- Use pointer method `add` by @hbina
## 0.5.1
- Add `as_ptr`, `as_mut_ptr` accessors directly on the `ArrayVec` by @tbu-
(matches the same addition to `Vec` which happened in Rust 1.37).
- Add method `ArrayString::len` (now available directly, not just through deref to str).
- Use raw pointers instead of `&mut [u8]` for encoding chars into `ArrayString`
(uninit best practice fix).
- Use raw pointers instead of `get_unchecked_mut` where the target may be
uninitialized everywhere relevant in the ArrayVec implementation
(uninit best practice fix).
- Changed inline hints on many methods, mainly removing inline hints
- `ArrayVec::dispose` is now deprecated (it has no purpose anymore)
## 0.4.12
- Use raw pointers instead of `get_unchecked_mut` where the target may be
uninitialized everywhere relevant in the ArrayVec implementation.
## 0.5.0
- Use `MaybeUninit` (now unconditionally) in the implementation of
`ArrayVec`
- Use `MaybeUninit` (now unconditionally) in the implementation of
`ArrayString`
- The crate feature for serde serialization is now named `serde`.
- Updated the `Array` trait interface, and it is now easier to use for
users outside the crate.
- Add `FromStr` impl for `ArrayString` by @despawnerer
- Add method `try_extend_from_slice` to `ArrayVec`, which is always
effecient by @Thomasdezeeuw.
- Add method `remaining_capacity` by @Thomasdezeeuw
- Improve performance of the `extend` method.
- The index type of zero capacity vectors is now itself zero size, by
@clarfon
- Use `drop_in_place` for truncate and clear methods. This affects drop order
and resume from panic during drop.
- Use Rust 2018 edition for the implementation
- Require Rust 1.36 or later, for the unconditional `MaybeUninit`
improvements.
## Older releases
- 0.4.11

7
third_party/rust/arrayvec/Cargo.toml поставляемый
Просмотреть файл

@ -13,13 +13,13 @@
[package]
edition = "2018"
name = "arrayvec"
version = "0.5.2"
version = "0.7.1"
authors = ["bluss"]
description = "A vector with fixed capacity, backed by an array (it can be stored on the stack too). Implements fixed capacity ArrayVec and ArrayString."
documentation = "https://docs.rs/arrayvec/"
keywords = ["stack", "vector", "array", "data-structure", "no_std"]
categories = ["data-structures", "no-std"]
license = "MIT/Apache-2.0"
license = "MIT OR Apache-2.0"
repository = "https://github.com/bluss/arrayvec"
[package.metadata.docs.rs]
features = ["serde"]
@ -56,8 +56,5 @@ version = "1.0"
[build-dependencies]
[features]
array-sizes-129-255 = []
array-sizes-33-128 = []
default = ["std"]
std = []
unstable-const-fn = []

7
third_party/rust/arrayvec/README.md поставляемый
Просмотреть файл

@ -2,9 +2,12 @@
arrayvec
========
[![Crates.io: arrayvec](https://img.shields.io/crates/v/arrayvec.svg)](https://crates.io/crates/arrayvec)
[![Crates.io: nodrop](https://img.shields.io/crates/v/nodrop.svg)](https://crates.io/crates/nodrop)
[![Documentation](https://docs.rs/arrayvec/badge.svg)](https://docs.rs/arrayvec)
[![Build Status](https://travis-ci.org/bluss/arrayvec.svg?branch=master)](https://travis-ci.org/bluss/arrayvec)
[![Build Status](https://github.com/bluss/arrayvec/workflows/Continuous%20integration/badge.svg?branch=master)](https://github.com/bluss/arrayvec/actions)
[![License: Apache](https://img.shields.io/badge/License-Apache%202.0-red.svg)](LICENSE-APACHE)
OR
[![License: MIT](https://img.shields.io/badge/license-MIT-blue.svg)](LICENSE)

Просмотреть файл

@ -7,7 +7,7 @@ use arrayvec::ArrayString;
use bencher::Bencher;
fn try_push_c(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let mut v = ArrayString::<512>::new();
b.iter(|| {
v.clear();
while v.try_push('c').is_ok() {
@ -18,7 +18,7 @@ fn try_push_c(b: &mut Bencher) {
}
fn try_push_alpha(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let mut v = ArrayString::<512>::new();
b.iter(|| {
v.clear();
while v.try_push('α').is_ok() {
@ -30,7 +30,7 @@ fn try_push_alpha(b: &mut Bencher) {
// Yes, pushing a string char-by-char is slow. Use .push_str.
fn try_push_string(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let mut v = ArrayString::<512>::new();
let input = "abcαβγ“”";
b.iter(|| {
v.clear();
@ -45,7 +45,7 @@ fn try_push_string(b: &mut Bencher) {
}
fn push_c(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let mut v = ArrayString::<512>::new();
b.iter(|| {
v.clear();
while !v.is_full() {
@ -57,7 +57,7 @@ fn push_c(b: &mut Bencher) {
}
fn push_alpha(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let mut v = ArrayString::<512>::new();
b.iter(|| {
v.clear();
while !v.is_full() {
@ -69,7 +69,7 @@ fn push_alpha(b: &mut Bencher) {
}
fn push_string(b: &mut Bencher) {
let mut v = ArrayString::<[u8; 512]>::new();
let mut v = ArrayString::<512>::new();
let input = "abcαβγ“”";
b.iter(|| {
v.clear();

10
third_party/rust/arrayvec/benches/extend.rs поставляемый
Просмотреть файл

@ -10,7 +10,7 @@ use bencher::Bencher;
use bencher::black_box;
fn extend_with_constant(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let mut v = ArrayVec::<u8, 512>::new();
let cap = v.capacity();
b.iter(|| {
v.clear();
@ -22,7 +22,7 @@ fn extend_with_constant(b: &mut Bencher) {
}
fn extend_with_range(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let mut v = ArrayVec::<u8, 512>::new();
let cap = v.capacity();
b.iter(|| {
v.clear();
@ -34,7 +34,7 @@ fn extend_with_range(b: &mut Bencher) {
}
fn extend_with_slice(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let mut v = ArrayVec::<u8, 512>::new();
let data = [1; 512];
b.iter(|| {
v.clear();
@ -46,7 +46,7 @@ fn extend_with_slice(b: &mut Bencher) {
}
fn extend_with_write(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let mut v = ArrayVec::<u8, 512>::new();
let data = [1; 512];
b.iter(|| {
v.clear();
@ -57,7 +57,7 @@ fn extend_with_write(b: &mut Bencher) {
}
fn extend_from_slice(b: &mut Bencher) {
let mut v = ArrayVec::<[u8; 512]>::new();
let mut v = ArrayVec::<u8, 512>::new();
let data = [1; 512];
b.iter(|| {
v.clear();

2
third_party/rust/arrayvec/ci/miri.sh поставляемый Normal file → Executable file
Просмотреть файл

@ -1,4 +1,4 @@
#!/usr/bin/env sh
#!/bin/sh
set -ex

236
third_party/rust/arrayvec/src/array_string.rs поставляемый
Просмотреть файл

@ -1,50 +1,51 @@
use std::borrow::Borrow;
use std::cmp;
use std::convert::TryFrom;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ptr;
use std::mem::MaybeUninit;
use std::ops::{Deref, DerefMut};
use std::ptr;
use std::slice;
use std::str;
use std::str::FromStr;
use std::str::Utf8Error;
use std::slice;
use crate::array::Array;
use crate::array::Index;
use crate::CapacityError;
use crate::LenUint;
use crate::char::encode_utf8;
use crate::utils::MakeMaybeUninit;
#[cfg(feature="serde")]
use serde::{Serialize, Deserialize, Serializer, Deserializer};
use super::MaybeUninit as MaybeUninitCopy;
/// A string with a fixed capacity.
///
/// The `ArrayString` is a string backed by a fixed size array. It keeps track
/// of its length.
/// of its length, and is parameterized by `CAP` for the maximum capacity.
///
/// `CAP` is of type `usize` but is range limited to `u32::MAX`; attempting to create larger
/// arrayvecs with larger capacity will panic.
///
/// The string is a contiguous value that you can store directly on the stack
/// if needed.
#[derive(Copy)]
pub struct ArrayString<A>
where A: Array<Item=u8> + Copy
{
xs: MaybeUninitCopy<A>,
len: A::Index,
pub struct ArrayString<const CAP: usize> {
// the `len` first elements of the array are initialized
xs: [MaybeUninit<u8>; CAP],
len: LenUint,
}
impl<A> Default for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Default for ArrayString<CAP>
{
/// Return an empty `ArrayString`
fn default() -> ArrayString<A> {
fn default() -> ArrayString<CAP> {
ArrayString::new()
}
}
impl<A> ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> ArrayString<CAP>
{
/// Create a new empty `ArrayString`.
///
@ -53,34 +54,35 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 16]>::new();
/// let mut string = ArrayString::<16>::new();
/// string.push_str("foo");
/// assert_eq!(&string[..], "foo");
/// assert_eq!(string.capacity(), 16);
/// ```
#[cfg(not(feature="unstable-const-fn"))]
pub fn new() -> ArrayString<A> {
pub fn new() -> ArrayString<CAP> {
assert_capacity_limit!(CAP);
unsafe {
ArrayString {
xs: MaybeUninitCopy::uninitialized(),
len: Index::ZERO,
}
ArrayString { xs: MaybeUninit::uninit().assume_init(), len: 0 }
}
}
#[cfg(feature="unstable-const-fn")]
pub const fn new() -> ArrayString<A> {
unsafe {
ArrayString {
xs: MaybeUninitCopy::uninitialized(),
len: Index::ZERO,
}
}
/// Create a new empty `ArrayString` (const fn).
///
/// Capacity is inferred from the type parameter.
///
/// ```
/// use arrayvec::ArrayString;
///
/// static ARRAY: ArrayString<1024> = ArrayString::new_const();
/// ```
pub const fn new_const() -> ArrayString<CAP> {
assert_capacity_limit_const!(CAP);
ArrayString { xs: MakeMaybeUninit::ARRAY, len: 0 }
}
/// Return the length of the string.
#[inline]
pub fn len(&self) -> usize { self.len.to_usize() }
pub fn len(&self) -> usize { self.len as usize }
/// Returns whether the string is empty.
#[inline]
@ -95,7 +97,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 3]>::from("foo").unwrap();
/// let mut string = ArrayString::<3>::from("foo").unwrap();
/// assert_eq!(&string[..], "foo");
/// assert_eq!(string.len(), 3);
/// assert_eq!(string.capacity(), 3);
@ -115,13 +117,16 @@ impl<A> ArrayString<A>
///
/// let string = ArrayString::from_byte_string(b"hello world").unwrap();
/// ```
pub fn from_byte_string(b: &A) -> Result<Self, Utf8Error> {
let len = str::from_utf8(b.as_slice())?.len();
debug_assert_eq!(len, A::CAPACITY);
Ok(ArrayString {
xs: MaybeUninitCopy::from(*b),
len: Index::from(A::CAPACITY),
})
pub fn from_byte_string(b: &[u8; CAP]) -> Result<Self, Utf8Error> {
let len = str::from_utf8(b)?.len();
debug_assert_eq!(len, CAP);
let mut vec = Self::new();
unsafe {
(b as *const [u8; CAP] as *const [MaybeUninit<u8>; CAP])
.copy_to_nonoverlapping(&mut vec.xs as *mut [MaybeUninit<u8>; CAP], 1);
vec.set_len(CAP);
}
Ok(vec)
}
/// Return the capacity of the `ArrayString`.
@ -129,18 +134,18 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let string = ArrayString::<[_; 3]>::new();
/// let string = ArrayString::<3>::new();
/// assert_eq!(string.capacity(), 3);
/// ```
#[inline(always)]
pub fn capacity(&self) -> usize { A::CAPACITY }
pub fn capacity(&self) -> usize { CAP }
/// Return if the `ArrayString` is completely filled.
///
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 1]>::new();
/// let mut string = ArrayString::<1>::new();
/// assert!(!string.is_full());
/// string.push_str("A");
/// assert!(string.is_full());
@ -154,7 +159,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
/// let mut string = ArrayString::<2>::new();
///
/// string.push('a');
/// string.push('b');
@ -174,7 +179,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
/// let mut string = ArrayString::<2>::new();
///
/// string.try_push('a').unwrap();
/// string.try_push('b').unwrap();
@ -186,7 +191,7 @@ impl<A> ArrayString<A>
pub fn try_push(&mut self, c: char) -> Result<(), CapacityError<char>> {
let len = self.len();
unsafe {
let ptr = self.xs.ptr_mut().add(len);
let ptr = self.as_mut_ptr().add(len);
let remaining_cap = self.capacity() - len;
match encode_utf8(c, ptr, remaining_cap) {
Ok(n) => {
@ -205,7 +210,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
/// let mut string = ArrayString::<2>::new();
///
/// string.push_str("a");
/// string.push_str("d");
@ -225,7 +230,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 2]>::new();
/// let mut string = ArrayString::<2>::new();
///
/// string.try_push_str("a").unwrap();
/// let overflow1 = string.try_push_str("bc");
@ -241,7 +246,7 @@ impl<A> ArrayString<A>
return Err(CapacityError::new(s));
}
unsafe {
let dst = self.xs.ptr_mut().add(self.len());
let dst = self.as_mut_ptr().add(self.len());
let src = s.as_ptr();
ptr::copy_nonoverlapping(src, dst, s.len());
let newl = self.len() + s.len();
@ -257,7 +262,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut s = ArrayString::<[_; 3]>::from("foo").unwrap();
/// let mut s = ArrayString::<3>::from("foo").unwrap();
///
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
@ -287,7 +292,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut string = ArrayString::<[_; 6]>::from("foobar").unwrap();
/// let mut string = ArrayString::<6>::from("foobar").unwrap();
/// string.truncate(3);
/// assert_eq!(&string[..], "foo");
/// string.truncate(4);
@ -317,7 +322,7 @@ impl<A> ArrayString<A>
/// ```
/// use arrayvec::ArrayString;
///
/// let mut s = ArrayString::<[_; 3]>::from("foo").unwrap();
/// let mut s = ArrayString::<3>::from("foo").unwrap();
///
/// assert_eq!(s.remove(0), 'f');
/// assert_eq!(s.remove(1), 'o');
@ -332,8 +337,8 @@ impl<A> ArrayString<A>
let next = idx + ch.len_utf8();
let len = self.len();
unsafe {
ptr::copy(self.xs.ptr().add(next),
self.xs.ptr_mut().add(idx),
ptr::copy(self.as_ptr().add(next),
self.as_mut_ptr().add(idx),
len - next);
self.set_len(len - (next - idx));
}
@ -355,104 +360,102 @@ impl<A> ArrayString<A>
/// This method uses *debug assertions* to check the validity of `length`
/// and may use other debug assertions.
pub unsafe fn set_len(&mut self, length: usize) {
// type invariant that capacity always fits in LenUint
debug_assert!(length <= self.capacity());
self.len = Index::from(length);
self.len = length as LenUint;
}
/// Return a string slice of the whole `ArrayString`.
pub fn as_str(&self) -> &str {
self
}
fn as_ptr(&self) -> *const u8 {
self.xs.as_ptr() as *const u8
}
fn as_mut_ptr(&mut self) -> *mut u8 {
self.xs.as_mut_ptr() as *mut u8
}
}
impl<A> Deref for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Deref for ArrayString<CAP>
{
type Target = str;
#[inline]
fn deref(&self) -> &str {
unsafe {
let sl = slice::from_raw_parts(self.xs.ptr(), self.len.to_usize());
let sl = slice::from_raw_parts(self.as_ptr(), self.len());
str::from_utf8_unchecked(sl)
}
}
}
impl<A> DerefMut for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> DerefMut for ArrayString<CAP>
{
#[inline]
fn deref_mut(&mut self) -> &mut str {
unsafe {
let sl = slice::from_raw_parts_mut(self.xs.ptr_mut(), self.len.to_usize());
let len = self.len();
let sl = slice::from_raw_parts_mut(self.as_mut_ptr(), len);
str::from_utf8_unchecked_mut(sl)
}
}
}
impl<A> PartialEq for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> PartialEq for ArrayString<CAP>
{
fn eq(&self, rhs: &Self) -> bool {
**self == **rhs
}
}
impl<A> PartialEq<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> PartialEq<str> for ArrayString<CAP>
{
fn eq(&self, rhs: &str) -> bool {
&**self == rhs
}
}
impl<A> PartialEq<ArrayString<A>> for str
where A: Array<Item=u8> + Copy
impl<const CAP: usize> PartialEq<ArrayString<CAP>> for str
{
fn eq(&self, rhs: &ArrayString<A>) -> bool {
fn eq(&self, rhs: &ArrayString<CAP>) -> bool {
self == &**rhs
}
}
impl<A> Eq for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Eq for ArrayString<CAP>
{ }
impl<A> Hash for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Hash for ArrayString<CAP>
{
fn hash<H: Hasher>(&self, h: &mut H) {
(**self).hash(h)
}
}
impl<A> Borrow<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Borrow<str> for ArrayString<CAP>
{
fn borrow(&self) -> &str { self }
}
impl<A> AsRef<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> AsRef<str> for ArrayString<CAP>
{
fn as_ref(&self) -> &str { self }
}
impl<A> fmt::Debug for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> fmt::Debug for ArrayString<CAP>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) }
}
impl<A> fmt::Display for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> fmt::Display for ArrayString<CAP>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { (**self).fmt(f) }
}
/// `Write` appends written data to the end of the string.
impl<A> fmt::Write for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> fmt::Write for ArrayString<CAP>
{
fn write_char(&mut self, c: char) -> fmt::Result {
self.try_push(c).map_err(|_| fmt::Error)
@ -463,10 +466,9 @@ impl<A> fmt::Write for ArrayString<A>
}
}
impl<A> Clone for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Clone for ArrayString<CAP>
{
fn clone(&self) -> ArrayString<A> {
fn clone(&self) -> ArrayString<CAP> {
*self
}
fn clone_from(&mut self, rhs: &Self) {
@ -476,8 +478,7 @@ impl<A> Clone for ArrayString<A>
}
}
impl<A> PartialOrd for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> PartialOrd for ArrayString<CAP>
{
fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
(**self).partial_cmp(&**rhs)
@ -488,8 +489,7 @@ impl<A> PartialOrd for ArrayString<A>
fn ge(&self, rhs: &Self) -> bool { **self >= **rhs }
}
impl<A> PartialOrd<str> for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> PartialOrd<str> for ArrayString<CAP>
{
fn partial_cmp(&self, rhs: &str) -> Option<cmp::Ordering> {
(**self).partial_cmp(rhs)
@ -500,28 +500,25 @@ impl<A> PartialOrd<str> for ArrayString<A>
fn ge(&self, rhs: &str) -> bool { &**self >= rhs }
}
impl<A> PartialOrd<ArrayString<A>> for str
where A: Array<Item=u8> + Copy
impl<const CAP: usize> PartialOrd<ArrayString<CAP>> for str
{
fn partial_cmp(&self, rhs: &ArrayString<A>) -> Option<cmp::Ordering> {
fn partial_cmp(&self, rhs: &ArrayString<CAP>) -> Option<cmp::Ordering> {
self.partial_cmp(&**rhs)
}
fn lt(&self, rhs: &ArrayString<A>) -> bool { self < &**rhs }
fn le(&self, rhs: &ArrayString<A>) -> bool { self <= &**rhs }
fn gt(&self, rhs: &ArrayString<A>) -> bool { self > &**rhs }
fn ge(&self, rhs: &ArrayString<A>) -> bool { self >= &**rhs }
fn lt(&self, rhs: &ArrayString<CAP>) -> bool { self < &**rhs }
fn le(&self, rhs: &ArrayString<CAP>) -> bool { self <= &**rhs }
fn gt(&self, rhs: &ArrayString<CAP>) -> bool { self > &**rhs }
fn ge(&self, rhs: &ArrayString<CAP>) -> bool { self >= &**rhs }
}
impl<A> Ord for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Ord for ArrayString<CAP>
{
fn cmp(&self, rhs: &Self) -> cmp::Ordering {
(**self).cmp(&**rhs)
}
}
impl<A> FromStr for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> FromStr for ArrayString<CAP>
{
type Err = CapacityError;
@ -532,8 +529,7 @@ impl<A> FromStr for ArrayString<A>
#[cfg(feature="serde")]
/// Requires crate feature `"serde"`
impl<A> Serialize for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<const CAP: usize> Serialize for ArrayString<CAP>
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer
@ -544,8 +540,7 @@ impl<A> Serialize for ArrayString<A>
#[cfg(feature="serde")]
/// Requires crate feature `"serde"`
impl<'de, A> Deserialize<'de> for ArrayString<A>
where A: Array<Item=u8> + Copy
impl<'de, const CAP: usize> Deserialize<'de> for ArrayString<CAP>
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where D: Deserializer<'de>
@ -553,13 +548,13 @@ impl<'de, A> Deserialize<'de> for ArrayString<A>
use serde::de::{self, Visitor};
use std::marker::PhantomData;
struct ArrayStringVisitor<A: Array<Item=u8>>(PhantomData<A>);
struct ArrayStringVisitor<const CAP: usize>(PhantomData<[u8; CAP]>);
impl<'de, A: Copy + Array<Item=u8>> Visitor<'de> for ArrayStringVisitor<A> {
type Value = ArrayString<A>;
impl<'de, const CAP: usize> Visitor<'de> for ArrayStringVisitor<CAP> {
type Value = ArrayString<CAP>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "a string no more than {} bytes long", A::CAPACITY)
write!(formatter, "a string no more than {} bytes long", CAP)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
@ -577,6 +572,29 @@ impl<'de, A> Deserialize<'de> for ArrayString<A>
}
}
deserializer.deserialize_str(ArrayStringVisitor::<A>(PhantomData))
deserializer.deserialize_str(ArrayStringVisitor(PhantomData))
}
}
impl<'a, const CAP: usize> TryFrom<&'a str> for ArrayString<CAP>
{
type Error = CapacityError<&'a str>;
fn try_from(f: &'a str) -> Result<Self, Self::Error> {
let mut v = Self::new();
v.try_push_str(f)?;
Ok(v)
}
}
impl<'a, const CAP: usize> TryFrom<fmt::Arguments<'a>> for ArrayString<CAP>
{
type Error = CapacityError<fmt::Error>;
fn try_from(f: fmt::Arguments<'a>) -> Result<Self, Self::Error> {
use fmt::Write;
let mut v = Self::new();
v.write_fmt(f).map_err(|e| CapacityError::new(e))?;
Ok(v)
}
}

1250
third_party/rust/arrayvec/src/arrayvec.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше