Bug 1622846 - Serialize WebGPU descriptors via Serde for IPC r=jgilbert,webidl,smaug

This PR updates wgpu to the latest and changes the way we pass object descriptors to the GPU process.
Instead of trying to convert them between DOM-CPP-IPDL-FFI-Rust, we now do DOM-FFI-Rust
and serialize them by serde into ipc::ByteBuf objects.

Differential Revision: https://phabricator.services.mozilla.com/D94908
This commit is contained in:
Dzmitry Malyshau 2020-11-06 18:59:27 +00:00
Родитель d95911267d
Коммит 505438ed91
511 изменённых файлов: 92869 добавлений и 25907 удалений

Просмотреть файл

@ -28,7 +28,7 @@ replace-with = "vendored-sources"
rev = "1dc60327164a3f64f87180147b1f3a2a9cc74276"
[source."https://github.com/kvark/spirv_cross"]
branch = "wgpu3"
branch = "wgpu4"
git = "https://github.com/kvark/spirv_cross"
replace-with = "vendored-sources"
@ -50,7 +50,7 @@ rev = "0917fe780032a6bbb23d71be545f9c1834128d75"
[source."https://github.com/gfx-rs/naga"]
git = "https://github.com/gfx-rs/naga"
replace-with = "vendored-sources"
rev = "bce6358eb1026c13d2f1c6d365af37afe8869a86"
rev = "aa35110471ee7915e1f4e1de61ea41f2f32f92c4"
[source."https://github.com/djg/cubeb-pulse-rs"]
git = "https://github.com/djg/cubeb-pulse-rs"

184
Cargo.lock сгенерированный
Просмотреть файл

@ -70,6 +70,9 @@ name = "arrayvec"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8"
dependencies = [
"serde",
]
[[package]]
name = "ash"
@ -80,12 +83,6 @@ dependencies = [
"libloading 0.6.2",
]
[[package]]
name = "atom"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c86699c3f02778ec07158376991c8f783dd1f2f95c579ffaf0738dc984b2fe2"
[[package]]
name = "atomic"
version = "0.4.5"
@ -297,6 +294,21 @@ dependencies = [
"which",
]
[[package]]
name = "bit-set"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de"
dependencies = [
"bit-vec",
]
[[package]]
name = "bit-vec"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0dc55f2d8a1a85650ac47858bb001b4c0dd73d79e3c455a842925e68d29cd3"
[[package]]
name = "bitflags"
version = "1.2.1"
@ -552,6 +564,12 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cfg_aliases"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
[[package]]
name = "chardetng"
version = "0.1.9"
@ -1434,6 +1452,12 @@ dependencies = [
"xpcom",
]
[[package]]
name = "fixedbitset"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
[[package]]
name = "flate2"
version = "1.0.12"
@ -1808,9 +1832,9 @@ dependencies = [
[[package]]
name = "gfx-auxil"
version = "0.4.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67bdbf8e8d6883c70e5a0d7379ad8ab3ac95127a3761306b36122d8f1c177a8e"
checksum = "07cd956b592970f08545b9325b87580eb95a51843b6f39da27b8667fec1a1216"
dependencies = [
"fxhash",
"gfx-hal",
@ -1819,10 +1843,11 @@ dependencies = [
[[package]]
name = "gfx-backend-dx11"
version = "0.5.3"
version = "0.6.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc32a386fdb3240dea3df141dfc2343bf95e5719da36c3c5b0ecb6940a184f2b"
checksum = "52b0c3b8b2e0a60c1380a7c27652cd86b791e5d8312fb9592a7a59bd437e9532"
dependencies = [
"arrayvec",
"bitflags",
"gfx-auxil",
"gfx-hal",
@ -1833,16 +1858,19 @@ dependencies = [
"raw-window-handle",
"smallvec",
"spirv_cross",
"thunderdome",
"winapi 0.3.9",
"wio",
]
[[package]]
name = "gfx-backend-dx12"
version = "0.5.10"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5959ce9dd48e7e04c71128024c3e76e86aa6fb6d83e48aad6819a1f7afae52e4"
checksum = "bf8bc6329ebac49722b66a2b87d5d769bba1de584f51ffbf0cd31701d01050b0"
dependencies = [
"arrayvec",
"bit-set",
"bitflags",
"d3d12",
"gfx-auxil",
@ -1857,19 +1885,20 @@ dependencies = [
[[package]]
name = "gfx-backend-empty"
version = "0.5.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b67bd2d7bc022b257ddbdabc5fa3b10c29c292372c3409f2b6a6e3f4e11cdb85"
checksum = "2085227c12b78f6657a900c829f2d0deb46a9be3eaf86844fde263cdc218f77c"
dependencies = [
"gfx-hal",
"log",
"raw-window-handle",
]
[[package]]
name = "gfx-backend-metal"
version = "0.5.7"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e0882c9bf80d6b57275acb6209ca60657b63ef71bb9e437f28a2417389c0b10"
checksum = "60ba1c77c112e7d35786dbd49ed26f2a76ce53a44bc09fe964935e4e35ed7f2b"
dependencies = [
"arrayvec",
"bitflags",
@ -1893,15 +1922,16 @@ dependencies = [
[[package]]
name = "gfx-backend-vulkan"
version = "0.5.11"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aec9c919cfc236d2c36aaa38609c1906a92f2df99a3c7f53022b01936f98275a"
checksum = "3a3a63cf61067a09b7d1ac480af3cb2ae0c5ede5bed294607bbd814cb1666c45"
dependencies = [
"arrayvec",
"ash",
"byteorder",
"core-graphics-types",
"gfx-hal",
"inplace_it",
"lazy_static",
"log",
"objc",
@ -1912,10 +1942,11 @@ dependencies = [
[[package]]
name = "gfx-descriptor"
version = "0.1.0"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bf35f5d66d1bc56e63e68d7528441453f25992bd954b84309d23c659df2c5da"
checksum = "cd8c7afcd000f279d541a490e27117e61037537279b9342279abf4938fe60c6b"
dependencies = [
"arrayvec",
"fxhash",
"gfx-hal",
"log",
@ -1923,9 +1954,9 @@ dependencies = [
[[package]]
name = "gfx-hal"
version = "0.5.3"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a18534b23d4c262916231511309bc1f307c74cda8dcb68b93a10ca213a22814b"
checksum = "18d0754f5b7a43915fd7466883b2d1bb0800d7cc4609178d0b27bf143b9e5123"
dependencies = [
"bitflags",
"raw-window-handle",
@ -1933,13 +1964,13 @@ dependencies = [
[[package]]
name = "gfx-memory"
version = "0.1.3"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2eed6cda674d9cd4d92229102dbd544292124533d236904f987e9afab456137"
checksum = "dccdda5d2b39412f4ca2cb15c70b5a82783a86b0606f5e985342754c8ed88f05"
dependencies = [
"bit-set",
"fxhash",
"gfx-hal",
"hibitset",
"log",
"slab",
]
@ -2251,15 +2282,6 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35"
[[package]]
name = "hibitset"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47e7292fd9f7fe89fa35c98048f2d0a69b79ed243604234d18f6f8a1aa6f408d"
dependencies = [
"atom",
]
[[package]]
name = "http"
version = "0.2.0"
@ -2402,6 +2424,12 @@ dependencies = [
"adler32",
]
[[package]]
name = "inplace_it"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd01a2a73f2f399df96b22dc88ea687ef4d76226284e7531ae3c7ee1dc5cb534"
[[package]]
name = "instant"
version = "0.1.8"
@ -3251,14 +3279,16 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.1.0"
source = "git+https://github.com/gfx-rs/naga?rev=bce6358eb1026c13d2f1c6d365af37afe8869a86#bce6358eb1026c13d2f1c6d365af37afe8869a86"
version = "0.2.0"
source = "git+https://github.com/gfx-rs/naga?rev=aa35110471ee7915e1f4e1de61ea41f2f32f92c4#aa35110471ee7915e1f4e1de61ea41f2f32f92c4"
dependencies = [
"bitflags",
"fxhash",
"log",
"num-traits",
"petgraph",
"spirv_headers",
"thiserror",
]
[[package]]
@ -3669,16 +3699,7 @@ name = "peek-poke"
version = "0.2.0"
dependencies = [
"euclid",
"peek-poke-derive 0.2.1",
]
[[package]]
name = "peek-poke"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d93fd6a575ebf1ac2668d08443c97a22872cfb463fd8b7ddd141e9f6be59af2f"
dependencies = [
"peek-poke-derive 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"peek-poke-derive",
]
[[package]]
@ -3692,19 +3713,6 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "peek-poke-derive"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb44a25c5bba983be0fc8592dfaf3e6d0935ce8be0c6b15b2a39507af34a926"
dependencies = [
"proc-macro2",
"quote",
"syn",
"synstructure",
"unicode-xid",
]
[[package]]
name = "peeking_take_while"
version = "0.1.2"
@ -3717,6 +3725,16 @@ version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
[[package]]
name = "petgraph"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7"
dependencies = [
"fixedbitset",
"indexmap",
]
[[package]]
name = "phf"
version = "0.8.0"
@ -4697,15 +4715,15 @@ dependencies = [
[[package]]
name = "spirv-cross-internal"
version = "0.1.0"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu3#20191ad2f370afd6d247edcb9ff9da32d3bedb9c"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu4#e51babbf00427984fe343e48493d8a9339fec473"
dependencies = [
"cc",
]
[[package]]
name = "spirv_cross"
version = "0.20.0"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu3#20191ad2f370afd6d247edcb9ff9da32d3bedb9c"
version = "0.22.0"
source = "git+https://github.com/kvark/spirv_cross?branch=wgpu4#e51babbf00427984fe343e48493d8a9339fec473"
dependencies = [
"spirv-cross-internal",
]
@ -5093,6 +5111,12 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d23e87ee7a1ef5bd2d38cef24ff360f6e02beee13c6a7eb64dddde4a3da427a3"
[[package]]
name = "thunderdome"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7572415bd688d401c52f6e36f4c8e805b9ae1622619303b9fa835d531db0acae"
[[package]]
name = "time"
version = "0.1.40"
@ -5344,6 +5368,26 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860"
[[package]]
name = "tracing"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0987850db3733619253fe60e17cb59b82d37c7e6c0236bb81e4d6b87c879f27"
dependencies = [
"cfg-if 0.1.10",
"pin-project-lite",
"tracing-core",
]
[[package]]
name = "tracing-core"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f50de3927f93d202783f4513cda820ab47ef17f624b03c096e86ef00c67e6b5f"
dependencies = [
"lazy_static",
]
[[package]]
name = "tracy-rs"
version = "0.1.2"
@ -5754,7 +5798,7 @@ dependencies = [
"derive_more",
"euclid",
"malloc_size_of_derive",
"peek-poke 0.2.0",
"peek-poke",
"serde",
"serde_bytes",
"serde_derive",
@ -5810,10 +5854,11 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.5.0"
version = "0.6.0"
dependencies = [
"arrayvec",
"bitflags",
"cfg_aliases",
"copyless",
"fxhash",
"gfx-backend-dx11",
@ -5824,24 +5869,21 @@ dependencies = [
"gfx-descriptor",
"gfx-hal",
"gfx-memory",
"log",
"naga",
"parking_lot",
"peek-poke 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ron",
"serde",
"smallvec",
"spirv_headers",
"vec_map",
"thiserror",
"tracing",
"wgpu-types",
]
[[package]]
name = "wgpu-types"
version = "0.5.0"
version = "0.6.0"
dependencies = [
"bitflags",
"peek-poke 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde",
]
@ -5849,8 +5891,10 @@ dependencies = [
name = "wgpu_bindings"
version = "0.1.0"
dependencies = [
"bincode",
"log",
"parking_lot",
"serde",
"wgpu-core",
"wgpu-types",
]

Просмотреть файл

@ -68,7 +68,7 @@ libudev-sys = { path = "dom/webauthn/libudev-sys" }
packed_simd = { git = "https://github.com/hsivonen/packed_simd", rev="0917fe780032a6bbb23d71be545f9c1834128d75" }
rlbox_lucet_sandbox = { git = "https://github.com/PLSysSec/rlbox_lucet_sandbox/", rev="ed8bac8812e9f335d5fadd0f4ece96981aba88a3" }
nix = { git = "https://github.com/shravanrn/nix/", branch = "r0.13.1", rev="4af6c367603869a30fddb5ffb0aba2b9477ba92e" }
spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu3", rev = "20191ad2f370afd6d247edcb9ff9da32d3bedb9c" }
spirv_cross = { git = "https://github.com/kvark/spirv_cross", branch = "wgpu4", rev = "e9eff10f964957e7a001c5f712effe17ce09aa99" }
# failure's backtrace feature might break our builds, see bug 1608157.
failure = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" }
failure_derive = { git = "https://github.com/badboy/failure", rev = "64af847bc5fdcb6d2438bec8a6030812a80519a5" }

Просмотреть файл

@ -111,9 +111,11 @@ void CommandEncoder::CopyBufferToBuffer(const Buffer& aSource,
BufferAddress aDestinationOffset,
BufferAddress aSize) {
if (mValid) {
mBridge->SendCommandEncoderCopyBufferToBuffer(
mId, aSource.mId, aSourceOffset, aDestination.mId, aDestinationOffset,
aSize);
ipc::ByteBuf bb;
ffi::wgpu_command_encoder_copy_buffer_to_buffer(
aSource.mId, aSourceOffset, aDestination.mId, aDestinationOffset, aSize,
ToFFI(&bb));
mBridge->SendCommandEncoderAction(mId, std::move(bb));
}
}
@ -122,11 +124,11 @@ void CommandEncoder::CopyBufferToTexture(
const dom::GPUTextureCopyView& aDestination,
const dom::GPUExtent3D& aCopySize) {
if (mValid) {
const auto source = ConvertBufferCopyView(aSource);
const auto destination = ConvertTextureCopyView(aDestination);
const auto size = ConvertExtent(aCopySize);
mBridge->SendCommandEncoderCopyBufferToTexture(mId, source, destination,
size);
ipc::ByteBuf bb;
ffi::wgpu_command_encoder_copy_buffer_to_texture(
ConvertBufferCopyView(aSource), ConvertTextureCopyView(aDestination),
ConvertExtent(aCopySize), ToFFI(&bb));
mBridge->SendCommandEncoderAction(mId, std::move(bb));
}
}
void CommandEncoder::CopyTextureToBuffer(
@ -134,11 +136,11 @@ void CommandEncoder::CopyTextureToBuffer(
const dom::GPUBufferCopyView& aDestination,
const dom::GPUExtent3D& aCopySize) {
if (mValid) {
const auto source = ConvertTextureCopyView(aSource);
const auto destination = ConvertBufferCopyView(aDestination);
const auto size = ConvertExtent(aCopySize);
mBridge->SendCommandEncoderCopyTextureToBuffer(mId, source, destination,
size);
ipc::ByteBuf bb;
ffi::wgpu_command_encoder_copy_texture_to_buffer(
ConvertTextureCopyView(aSource), ConvertBufferCopyView(aDestination),
ConvertExtent(aCopySize), ToFFI(&bb));
mBridge->SendCommandEncoderAction(mId, std::move(bb));
}
}
void CommandEncoder::CopyTextureToTexture(
@ -146,11 +148,11 @@ void CommandEncoder::CopyTextureToTexture(
const dom::GPUTextureCopyView& aDestination,
const dom::GPUExtent3D& aCopySize) {
if (mValid) {
const auto source = ConvertTextureCopyView(aSource);
const auto destination = ConvertTextureCopyView(aDestination);
const auto size = ConvertExtent(aCopySize);
mBridge->SendCommandEncoderCopyTextureToTexture(mId, source, destination,
size);
ipc::ByteBuf bb;
ffi::wgpu_command_encoder_copy_texture_to_texture(
ConvertTextureCopyView(aSource), ConvertTextureCopyView(aDestination),
ConvertExtent(aCopySize), ToFFI(&bb));
mBridge->SendCommandEncoderAction(mId, std::move(bb));
}
}
@ -177,36 +179,26 @@ already_AddRefed<RenderPassEncoder> CommandEncoder::BeginRenderPass(
return pass.forget();
}
void CommandEncoder::EndComputePass(Span<const uint8_t> aData,
void CommandEncoder::EndComputePass(ffi::WGPUComputePass& aPass,
ErrorResult& aRv) {
if (!mValid) {
return aRv.ThrowInvalidStateError("Command encoder is not valid");
}
ipc::Shmem shmem;
if (!mBridge->AllocShmem(aData.Length(), ipc::Shmem::SharedMemory::TYPE_BASIC,
&shmem)) {
return aRv.ThrowAbortError(nsPrintfCString(
"Unable to allocate shmem of size %zu", aData.Length()));
}
memcpy(shmem.get<uint8_t>(), aData.data(), aData.Length());
mBridge->SendCommandEncoderRunComputePass(mId, std::move(shmem));
ipc::ByteBuf byteBuf;
ffi::wgpu_compute_pass_finish(&aPass, ToFFI(&byteBuf));
mBridge->SendCommandEncoderAction(mId, std::move(byteBuf));
}
void CommandEncoder::EndRenderPass(Span<const uint8_t> aData,
void CommandEncoder::EndRenderPass(ffi::WGPURenderPass& aPass,
ErrorResult& aRv) {
if (!mValid) {
return aRv.ThrowInvalidStateError("Command encoder is not valid");
}
ipc::Shmem shmem;
if (!mBridge->AllocShmem(aData.Length(), ipc::Shmem::SharedMemory::TYPE_BASIC,
&shmem)) {
return aRv.ThrowAbortError(nsPrintfCString(
"Unable to allocate shmem of size %zu", aData.Length()));
}
memcpy(shmem.get<uint8_t>(), aData.data(), aData.Length());
mBridge->SendCommandEncoderRunRenderPass(mId, std::move(shmem));
ipc::ByteBuf byteBuf;
ffi::wgpu_render_pass_finish(&aPass, ToFFI(&byteBuf));
mBridge->SendCommandEncoderAction(mId, std::move(byteBuf));
}
already_AddRefed<CommandBuffer> CommandEncoder::Finish(

Просмотреть файл

@ -26,8 +26,10 @@ typedef RangeEnforcedUnsignedLongSequenceOrGPUExtent3DDict GPUExtent3D;
} // namespace dom
namespace webgpu {
namespace ffi {
struct WGPUComputePass;
struct WGPURenderPass;
struct WGPUTextureDataLayout;
struct WGPUTextureCopyView;
struct WGPUTextureCopyView_TextureId;
struct WGPUExtent3d;
} // namespace ffi
@ -50,8 +52,9 @@ class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
static void ConvertTextureDataLayoutToFFI(
const dom::GPUTextureDataLayout& aLayout,
ffi::WGPUTextureDataLayout* aLayoutFFI);
static void ConvertTextureCopyViewToFFI(const dom::GPUTextureCopyView& aView,
ffi::WGPUTextureCopyView* aViewFFI);
static void ConvertTextureCopyViewToFFI(
const dom::GPUTextureCopyView& aView,
ffi::WGPUTextureCopyView_TextureId* aViewFFI);
static void ConvertExtent3DToFFI(const dom::GPUExtent3D& aExtent,
ffi::WGPUExtent3d* aExtentFFI);
@ -64,8 +67,8 @@ class CommandEncoder final : public ObjectBase, public ChildOf<Device> {
WeakPtr<dom::HTMLCanvasElement> mTargetCanvasElement;
public:
void EndComputePass(Span<const uint8_t> aData, ErrorResult& aRv);
void EndRenderPass(Span<const uint8_t> aData, ErrorResult& aRv);
void EndComputePass(ffi::WGPUComputePass& aPass, ErrorResult& aRv);
void EndRenderPass(ffi::WGPURenderPass& aPass, ErrorResult& aRv);
void CopyBufferToBuffer(const Buffer& aSource, BufferAddress aSourceOffset,
const Buffer& aDestination,

Просмотреть файл

@ -17,8 +17,8 @@ GPU_IMPL_CYCLE_COLLECTION(ComputePassEncoder, mParent, mUsedBindGroups,
mUsedPipelines)
GPU_IMPL_JS_WRAP(ComputePassEncoder)
ffi::WGPURawPass BeginComputePass(RawId aEncoderId,
const dom::GPUComputePassDescriptor& aDesc) {
ffi::WGPUComputePass* BeginComputePass(
RawId aEncoderId, const dom::GPUComputePassDescriptor& aDesc) {
ffi::WGPUComputePassDescriptor desc = {};
Unused << aDesc; // no useful fields
return ffi::wgpu_command_encoder_begin_compute_pass(aEncoderId, &desc);
@ -40,7 +40,7 @@ void ComputePassEncoder::SetBindGroup(
const dom::Sequence<uint32_t>& aDynamicOffsets) {
if (mValid) {
mUsedBindGroups.AppendElement(&aBindGroup);
ffi::wgpu_compute_pass_set_bind_group(&mRaw, aSlot, aBindGroup.mId,
ffi::wgpu_compute_pass_set_bind_group(mRaw, aSlot, aBindGroup.mId,
aDynamicOffsets.Elements(),
aDynamicOffsets.Length());
}
@ -49,20 +49,20 @@ void ComputePassEncoder::SetBindGroup(
void ComputePassEncoder::SetPipeline(const ComputePipeline& aPipeline) {
if (mValid) {
mUsedPipelines.AppendElement(&aPipeline);
ffi::wgpu_compute_pass_set_pipeline(&mRaw, aPipeline.mId);
ffi::wgpu_compute_pass_set_pipeline(mRaw, aPipeline.mId);
}
}
void ComputePassEncoder::Dispatch(uint32_t x, uint32_t y, uint32_t z) {
if (mValid) {
ffi::wgpu_compute_pass_dispatch(&mRaw, x, y, z);
ffi::wgpu_compute_pass_dispatch(mRaw, x, y, z);
}
}
void ComputePassEncoder::DispatchIndirect(const Buffer& aIndirectBuffer,
uint64_t aIndirectOffset) {
if (mValid) {
ffi::wgpu_compute_pass_dispatch_indirect(&mRaw, aIndirectBuffer.mId,
ffi::wgpu_compute_pass_dispatch_indirect(mRaw, aIndirectBuffer.mId,
aIndirectOffset);
}
}
@ -70,10 +70,8 @@ void ComputePassEncoder::DispatchIndirect(const Buffer& aIndirectBuffer,
void ComputePassEncoder::EndPass(ErrorResult& aRv) {
if (mValid) {
mValid = false;
uintptr_t length = 0;
const uint8_t* pass_data = ffi::wgpu_compute_pass_finish(&mRaw, &length);
mParent->EndComputePass(Span(pass_data, length), aRv);
ffi::wgpu_compute_pass_destroy(mRaw);
MOZ_ASSERT(mRaw);
mParent->EndComputePass(*mRaw, aRv);
}
}

Просмотреть файл

@ -32,7 +32,7 @@ class ComputePassEncoder final : public ObjectBase,
virtual ~ComputePassEncoder();
void Cleanup() {}
ffi::WGPURawPass mRaw;
ffi::WGPUComputePass* const mRaw;
// keep all the used objects alive while the pass is recorded
nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
nsTArray<RefPtr<const ComputePipeline>> mUsedPipelines;

Просмотреть файл

@ -79,8 +79,8 @@ void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
ffi::WGPUExtent3d extent = {};
CommandEncoder::ConvertExtent3DToFFI(aSize, &extent);
const auto bpt = aDestination.mTexture->BytesPerTexel();
if (bpt == 0) {
const auto bpb = aDestination.mTexture->mBytesPerBlock;
if (!bpb) {
aRv.ThrowAbortError(nsPrintfCString("Invalid texture format"));
return;
}
@ -89,12 +89,13 @@ void Queue::WriteTexture(const dom::GPUTextureCopyView& aDestination,
return;
}
// TODO: support block-compressed formats
aData.ComputeState();
const auto fullRows =
(CheckedInt<size_t>(extent.depth - 1) * aDataLayout.mRowsPerImage +
extent.height - 1);
const auto checkedSize = fullRows * aDataLayout.mBytesPerRow +
CheckedInt<size_t>(extent.width) * bpt;
CheckedInt<size_t>(extent.width) * bpb.value();
if (!checkedSize.isValid()) {
aRv.ThrowRangeError("Mapped size is too large");
return;

Просмотреть файл

@ -41,75 +41,75 @@ ffi::WGPUColor ConvertColor(const dom::GPUColorDict& aColor) {
return color;
}
ffi::WGPURawPass BeginRenderPass(RawId aEncoderId,
const dom::GPURenderPassDescriptor& aDesc) {
ffi::WGPURenderPass* BeginRenderPass(
RawId aEncoderId, const dom::GPURenderPassDescriptor& aDesc) {
ffi::WGPURenderPassDescriptor desc = {};
ffi::WGPURenderPassDepthStencilAttachmentDescriptor dsDesc = {};
ffi::WGPUDepthStencilAttachmentDescriptor dsDesc = {};
if (aDesc.mDepthStencilAttachment.WasPassed()) {
const auto& dsa = aDesc.mDepthStencilAttachment.Value();
dsDesc.attachment = dsa.mAttachment->mId;
if (dsa.mDepthLoadValue.IsFloat()) {
dsDesc.depth_load_op = ffi::WGPULoadOp_Clear;
dsDesc.clear_depth = dsa.mDepthLoadValue.GetAsFloat();
dsDesc.depth.load_op = ffi::WGPULoadOp_Clear;
dsDesc.depth.clear_value = dsa.mDepthLoadValue.GetAsFloat();
}
if (dsa.mDepthLoadValue.IsGPULoadOp()) {
dsDesc.depth_load_op =
dsDesc.depth.load_op =
ConvertLoadOp(dsa.mDepthLoadValue.GetAsGPULoadOp());
}
dsDesc.depth_store_op = ConvertStoreOp(dsa.mDepthStoreOp);
dsDesc.depth.store_op = ConvertStoreOp(dsa.mDepthStoreOp);
if (dsa.mStencilLoadValue.IsRangeEnforcedUnsignedLong()) {
dsDesc.stencil_load_op = ffi::WGPULoadOp_Clear;
dsDesc.clear_stencil =
dsDesc.stencil.load_op = ffi::WGPULoadOp_Clear;
dsDesc.stencil.clear_value =
dsa.mStencilLoadValue.GetAsRangeEnforcedUnsignedLong();
}
if (dsa.mStencilLoadValue.IsGPULoadOp()) {
dsDesc.stencil_load_op =
dsDesc.stencil.load_op =
ConvertLoadOp(dsa.mStencilLoadValue.GetAsGPULoadOp());
}
dsDesc.stencil_store_op = ConvertStoreOp(dsa.mStencilStoreOp);
dsDesc.stencil.store_op = ConvertStoreOp(dsa.mStencilStoreOp);
desc.depth_stencil_attachment = &dsDesc;
}
std::array<ffi::WGPURenderPassColorAttachmentDescriptor,
WGPUMAX_COLOR_TARGETS>
std::array<ffi::WGPUColorAttachmentDescriptor, WGPUMAX_COLOR_TARGETS>
colorDescs = {};
desc.color_attachments = colorDescs.data();
desc.color_attachments_length = aDesc.mColorAttachments.Length();
for (size_t i = 0; i < aDesc.mColorAttachments.Length(); ++i) {
const auto& ca = aDesc.mColorAttachments[i];
ffi::WGPURenderPassColorAttachmentDescriptor& cd = colorDescs[i];
ffi::WGPUColorAttachmentDescriptor& cd = colorDescs[i];
cd.attachment = ca.mAttachment->mId;
cd.store_op = ConvertStoreOp(ca.mStoreOp);
cd.channel.store_op = ConvertStoreOp(ca.mStoreOp);
if (ca.mResolveTarget.WasPassed()) {
cd.resolve_target = ca.mResolveTarget.Value().mId;
}
if (ca.mLoadValue.IsGPULoadOp()) {
cd.load_op = ConvertLoadOp(ca.mLoadValue.GetAsGPULoadOp());
cd.channel.load_op = ConvertLoadOp(ca.mLoadValue.GetAsGPULoadOp());
} else {
cd.load_op = ffi::WGPULoadOp_Clear;
cd.channel.load_op = ffi::WGPULoadOp_Clear;
if (ca.mLoadValue.IsDoubleSequence()) {
const auto& seq = ca.mLoadValue.GetAsDoubleSequence();
if (seq.Length() >= 1) {
cd.clear_color.r = seq[0];
cd.channel.clear_value.r = seq[0];
}
if (seq.Length() >= 2) {
cd.clear_color.g = seq[1];
cd.channel.clear_value.g = seq[1];
}
if (seq.Length() >= 3) {
cd.clear_color.b = seq[2];
cd.channel.clear_value.b = seq[2];
}
if (seq.Length() >= 4) {
cd.clear_color.a = seq[3];
cd.channel.clear_value.a = seq[3];
}
}
if (ca.mLoadValue.IsGPUColorDict()) {
cd.clear_color = ConvertColor(ca.mLoadValue.GetAsGPUColorDict());
cd.channel.clear_value =
ConvertColor(ca.mLoadValue.GetAsGPUColorDict());
}
}
}
@ -141,7 +141,7 @@ void RenderPassEncoder::SetBindGroup(
const dom::Sequence<uint32_t>& aDynamicOffsets) {
if (mValid) {
mUsedBindGroups.AppendElement(&aBindGroup);
ffi::wgpu_render_pass_set_bind_group(&mRaw, aSlot, aBindGroup.mId,
ffi::wgpu_render_pass_set_bind_group(mRaw, aSlot, aBindGroup.mId,
aDynamicOffsets.Elements(),
aDynamicOffsets.Length());
}
@ -150,7 +150,7 @@ void RenderPassEncoder::SetBindGroup(
void RenderPassEncoder::SetPipeline(const RenderPipeline& aPipeline) {
if (mValid) {
mUsedPipelines.AppendElement(&aPipeline);
ffi::wgpu_render_pass_set_pipeline(&mRaw, aPipeline.mId);
ffi::wgpu_render_pass_set_pipeline(mRaw, aPipeline.mId);
}
}
@ -158,8 +158,7 @@ void RenderPassEncoder::SetIndexBuffer(const Buffer& aBuffer, uint64_t aOffset,
uint64_t aSize) {
if (mValid) {
mUsedBuffers.AppendElement(&aBuffer);
ffi::wgpu_render_pass_set_index_buffer(&mRaw, aBuffer.mId, aOffset,
ffi::make_buffer_size(aSize));
ffi::wgpu_render_pass_set_index_buffer(mRaw, aBuffer.mId, aOffset, aSize);
}
}
@ -167,16 +166,16 @@ void RenderPassEncoder::SetVertexBuffer(uint32_t aSlot, const Buffer& aBuffer,
uint64_t aOffset, uint64_t aSize) {
if (mValid) {
mUsedBuffers.AppendElement(&aBuffer);
ffi::wgpu_render_pass_set_vertex_buffer(&mRaw, aSlot, aBuffer.mId, aOffset,
ffi::make_buffer_size(aSize));
ffi::wgpu_render_pass_set_vertex_buffer(mRaw, aSlot, aBuffer.mId, aOffset,
aSize);
}
}
void RenderPassEncoder::Draw(uint32_t aVertexCount, uint32_t aInstanceCount,
uint32_t aFirstVertex, uint32_t aFirstInstance) {
if (mValid) {
ffi::wgpu_render_pass_draw(&mRaw, aVertexCount, aInstanceCount,
aFirstVertex, aFirstInstance);
ffi::wgpu_render_pass_draw(mRaw, aVertexCount, aInstanceCount, aFirstVertex,
aFirstInstance);
}
}
@ -185,7 +184,7 @@ void RenderPassEncoder::DrawIndexed(uint32_t aIndexCount,
uint32_t aFirstIndex, int32_t aBaseVertex,
uint32_t aFirstInstance) {
if (mValid) {
ffi::wgpu_render_pass_draw_indexed(&mRaw, aIndexCount, aInstanceCount,
ffi::wgpu_render_pass_draw_indexed(mRaw, aIndexCount, aInstanceCount,
aFirstIndex, aBaseVertex,
aFirstInstance);
}
@ -194,7 +193,7 @@ void RenderPassEncoder::DrawIndexed(uint32_t aIndexCount,
void RenderPassEncoder::DrawIndirect(const Buffer& aIndirectBuffer,
uint64_t aIndirectOffset) {
if (mValid) {
ffi::wgpu_render_pass_draw_indirect(&mRaw, aIndirectBuffer.mId,
ffi::wgpu_render_pass_draw_indirect(mRaw, aIndirectBuffer.mId,
aIndirectOffset);
}
}
@ -202,7 +201,7 @@ void RenderPassEncoder::DrawIndirect(const Buffer& aIndirectBuffer,
void RenderPassEncoder::DrawIndexedIndirect(const Buffer& aIndirectBuffer,
uint64_t aIndirectOffset) {
if (mValid) {
ffi::wgpu_render_pass_draw_indexed_indirect(&mRaw, aIndirectBuffer.mId,
ffi::wgpu_render_pass_draw_indexed_indirect(mRaw, aIndirectBuffer.mId,
aIndirectOffset);
}
}
@ -210,10 +209,8 @@ void RenderPassEncoder::DrawIndexedIndirect(const Buffer& aIndirectBuffer,
void RenderPassEncoder::EndPass(ErrorResult& aRv) {
if (mValid) {
mValid = false;
uintptr_t length = 0;
const uint8_t* pass_data = ffi::wgpu_render_pass_finish(&mRaw, &length);
mParent->EndRenderPass(Span(pass_data, length), aRv);
ffi::wgpu_render_pass_destroy(mRaw);
MOZ_ASSERT(mRaw);
mParent->EndRenderPass(*mRaw, aRv);
}
}

Просмотреть файл

@ -37,7 +37,7 @@ class RenderPassEncoder final : public ObjectBase,
virtual ~RenderPassEncoder();
void Cleanup() {}
ffi::WGPURawPass mRaw;
ffi::WGPURenderPass* const mRaw;
// keep all the used objects alive while the pass is recorded
nsTArray<RefPtr<const BindGroup>> mUsedBindGroups;
nsTArray<RefPtr<const Buffer>> mUsedBuffers;

Просмотреть файл

@ -15,11 +15,62 @@ namespace webgpu {
GPU_IMPL_CYCLE_COLLECTION(Texture, mParent)
GPU_IMPL_JS_WRAP(Texture)
static Maybe<uint8_t> GetBytesPerBlock(dom::GPUTextureFormat format) {
switch (format) {
case dom::GPUTextureFormat::R8unorm:
case dom::GPUTextureFormat::R8snorm:
case dom::GPUTextureFormat::R8uint:
case dom::GPUTextureFormat::R8sint:
return Some<uint8_t>(1u);
case dom::GPUTextureFormat::R16uint:
case dom::GPUTextureFormat::R16sint:
case dom::GPUTextureFormat::R16float:
case dom::GPUTextureFormat::Rg8unorm:
case dom::GPUTextureFormat::Rg8snorm:
case dom::GPUTextureFormat::Rg8uint:
case dom::GPUTextureFormat::Rg8sint:
return Some<uint8_t>(2u);
case dom::GPUTextureFormat::R32uint:
case dom::GPUTextureFormat::R32sint:
case dom::GPUTextureFormat::R32float:
case dom::GPUTextureFormat::Rg16uint:
case dom::GPUTextureFormat::Rg16sint:
case dom::GPUTextureFormat::Rg16float:
case dom::GPUTextureFormat::Rgba8unorm:
case dom::GPUTextureFormat::Rgba8unorm_srgb:
case dom::GPUTextureFormat::Rgba8snorm:
case dom::GPUTextureFormat::Rgba8uint:
case dom::GPUTextureFormat::Rgba8sint:
case dom::GPUTextureFormat::Bgra8unorm:
case dom::GPUTextureFormat::Bgra8unorm_srgb:
case dom::GPUTextureFormat::Rgb10a2unorm:
case dom::GPUTextureFormat::Rg11b10float:
return Some<uint8_t>(4u);
case dom::GPUTextureFormat::Rg32uint:
case dom::GPUTextureFormat::Rg32sint:
case dom::GPUTextureFormat::Rg32float:
case dom::GPUTextureFormat::Rgba16uint:
case dom::GPUTextureFormat::Rgba16sint:
case dom::GPUTextureFormat::Rgba16float:
return Some<uint8_t>(8u);
case dom::GPUTextureFormat::Rgba32uint:
case dom::GPUTextureFormat::Rgba32sint:
case dom::GPUTextureFormat::Rgba32float:
return Some<uint8_t>(16u);
case dom::GPUTextureFormat::Depth32float:
return Some<uint8_t>(4u);
case dom::GPUTextureFormat::Depth24plus:
case dom::GPUTextureFormat::Depth24plus_stencil8:
case dom::GPUTextureFormat::EndGuard_:
return Nothing();
}
}
Texture::Texture(Device* const aParent, RawId aId,
const dom::GPUTextureDescriptor& aDesc)
: ChildOf(aParent),
mId(aId),
mDefaultViewDescriptor(WebGPUChild::GetDefaultViewDescriptor(aDesc)) {}
mBytesPerBlock(GetBytesPerBlock(aDesc.mFormat)) {}
Texture::~Texture() { Cleanup(); }
@ -33,57 +84,9 @@ void Texture::Cleanup() {
}
}
uint8_t Texture::BytesPerTexel() const {
switch (mDefaultViewDescriptor->format) {
case ffi::WGPUTextureFormat_R8Unorm:
case ffi::WGPUTextureFormat_R8Snorm:
case ffi::WGPUTextureFormat_R8Uint:
case ffi::WGPUTextureFormat_R8Sint:
return 1;
case ffi::WGPUTextureFormat_R16Uint:
case ffi::WGPUTextureFormat_R16Sint:
case ffi::WGPUTextureFormat_R16Float:
case ffi::WGPUTextureFormat_Rg8Unorm:
case ffi::WGPUTextureFormat_Rg8Snorm:
case ffi::WGPUTextureFormat_Rg8Uint:
case ffi::WGPUTextureFormat_Rg8Sint:
return 2;
case ffi::WGPUTextureFormat_R32Uint:
case ffi::WGPUTextureFormat_R32Sint:
case ffi::WGPUTextureFormat_R32Float:
case ffi::WGPUTextureFormat_Rg16Uint:
case ffi::WGPUTextureFormat_Rg16Sint:
case ffi::WGPUTextureFormat_Rg16Float:
case ffi::WGPUTextureFormat_Rgba8Unorm:
case ffi::WGPUTextureFormat_Rgba8UnormSrgb:
case ffi::WGPUTextureFormat_Rgba8Snorm:
case ffi::WGPUTextureFormat_Rgba8Uint:
case ffi::WGPUTextureFormat_Rgba8Sint:
case ffi::WGPUTextureFormat_Bgra8Unorm:
case ffi::WGPUTextureFormat_Bgra8UnormSrgb:
case ffi::WGPUTextureFormat_Rgb10a2Unorm:
case ffi::WGPUTextureFormat_Rg11b10Float:
return 4;
case ffi::WGPUTextureFormat_Rg32Uint:
case ffi::WGPUTextureFormat_Rg32Sint:
case ffi::WGPUTextureFormat_Rg32Float:
return 8;
case ffi::WGPUTextureFormat_Rgba16Uint:
case ffi::WGPUTextureFormat_Rgba16Sint:
case ffi::WGPUTextureFormat_Rgba16Float:
case ffi::WGPUTextureFormat_Rgba32Uint:
case ffi::WGPUTextureFormat_Rgba32Sint:
case ffi::WGPUTextureFormat_Rgba32Float:
return 16;
default:
return 0;
}
}
already_AddRefed<TextureView> Texture::CreateView(
const dom::GPUTextureViewDescriptor& aDesc) {
RawId id = mParent->GetBridge()->TextureCreateView(mId, aDesc,
*mDefaultViewDescriptor);
RawId id = mParent->GetBridge()->TextureCreateView(mId, aDesc);
RefPtr<TextureView> view = new TextureView(this, id);
return view.forget();
}

Просмотреть файл

@ -33,17 +33,14 @@ class Texture final : public ObjectBase, public ChildOf<Device> {
const dom::GPUTextureDescriptor& aDesc);
Device* GetParentDevice() { return mParent; }
const RawId mId;
const Maybe<uint8_t> mBytesPerBlock;
WeakPtr<dom::HTMLCanvasElement> mTargetCanvasElement;
uint8_t BytesPerTexel() const;
private:
virtual ~Texture();
void Cleanup();
const UniquePtr<ffi::WGPUTextureViewDescriptor> mDefaultViewDescriptor;
public:
already_AddRefed<TextureView> CreateView(
const dom::GPUTextureViewDescriptor& aDesc);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -9,21 +9,9 @@ using layers::RGBDescriptor from "mozilla/layers/LayersSurfaces.h";
using wr::ExternalImageId from "mozilla/webrender/WebRenderAPI.h";
using RawId from "mozilla/webgpu/WebGPUTypes.h";
using BufferAddress from "mozilla/webgpu/WebGPUTypes.h";
using SerialBindGroupLayoutDescriptor from "mozilla/webgpu/WebGPUTypes.h";
using SerialPipelineLayoutDescriptor from "mozilla/webgpu/WebGPUTypes.h";
using SerialBindGroupDescriptor from "mozilla/webgpu/WebGPUTypes.h";
using SerialComputePipelineDescriptor from "mozilla/webgpu/WebGPUTypes.h";
using SerialRenderPipelineDescriptor from "mozilla/webgpu/WebGPUTypes.h";
using SerialSamplerDescriptor from "mozilla/webgpu/WebGPUTypes.h";
using dom::GPURequestAdapterOptions from "mozilla/dom/WebGPUBinding.h";
using dom::GPUDeviceDescriptor from "mozilla/dom/WebGPUBinding.h";
using dom::GPUCommandEncoderDescriptor from "mozilla/dom/WebGPUBinding.h";
using dom::GPUCommandBufferDescriptor from "mozilla/dom/WebGPUBinding.h";
using dom::GPUPipelineLayoutDescriptor from "mozilla/dom/WebGPUBinding.h";
using webgpu::ffi::WGPUBufferDescriptor from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUTextureDescriptor from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUTextureViewDescriptor from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUBufferCopyView from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUTextureDataLayout from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUTextureCopyView from "mozilla/webgpu/ffi/wgpu.h";
using webgpu::ffi::WGPUExtent3d from "mozilla/webgpu/ffi/wgpu.h";
@ -45,29 +33,22 @@ async protocol PWebGPU
manager PCompositorBridge;
parent:
async DeviceAction(RawId selfId, ByteBuf buf);
async TextureAction(RawId selfId, ByteBuf buf);
async CommandEncoderAction(RawId selfId, ByteBuf buf);
async InstanceRequestAdapter(GPURequestAdapterOptions options, RawId[] ids) returns (RawId adapterId);
async AdapterRequestDevice(RawId selfId, GPUDeviceDescriptor desc, RawId newId);
async AdapterDestroy(RawId selfId);
async DeviceCreateBuffer(RawId selfId, WGPUBufferDescriptor desc, nsCString label, RawId newId);
async BufferReturnShmem(RawId selfId, Shmem shmem);
async BufferMap(RawId selfId, WGPUHostMap hostMap, uint64_t offset, uint64_t size) returns (Shmem sm);
async BufferUnmap(RawId selfId, Shmem shmem, bool flush);
async BufferDestroy(RawId selfId);
async DeviceCreateTexture(RawId selfId, WGPUTextureDescriptor desc, nsCString label, RawId newId);
async TextureCreateView(RawId selfId, WGPUTextureViewDescriptor desc, nsCString label, RawId newId);
async TextureDestroy(RawId selfId);
async TextureViewDestroy(RawId selfId);
async DeviceCreateSampler(RawId selfId, SerialSamplerDescriptor desc, RawId newId);
async SamplerDestroy(RawId selfId);
async DeviceDestroy(RawId selfId);
async DeviceCreateCommandEncoder(RawId selfId, GPUCommandEncoderDescriptor desc, RawId newId);
async CommandEncoderCopyBufferToBuffer(RawId selfId, RawId sourceId, BufferAddress sourceOffset, RawId destinationId, BufferAddress destinationOffset, BufferAddress size);
async CommandEncoderCopyBufferToTexture(RawId selfId, WGPUBufferCopyView source, WGPUTextureCopyView destination, WGPUExtent3d extent);
async CommandEncoderCopyTextureToBuffer(RawId selfId, WGPUTextureCopyView source, WGPUBufferCopyView destination, WGPUExtent3d extent);
async CommandEncoderCopyTextureToTexture(RawId selfId, WGPUTextureCopyView source, WGPUTextureCopyView destination, WGPUExtent3d extent);
async CommandEncoderRunComputePass(RawId selfId, Shmem shmem);
async CommandEncoderRunRenderPass(RawId selfId, Shmem shmem);
async CommandEncoderFinish(RawId selfId, GPUCommandBufferDescriptor desc);
async CommandEncoderDestroy(RawId selfId);
async CommandBufferDestroy(RawId selfId);
@ -75,17 +56,11 @@ parent:
async QueueWriteBuffer(RawId selfId, RawId bufferId, BufferAddress bufferOffset, Shmem shmem);
async QueueWriteTexture(RawId selfId, WGPUTextureCopyView destination, Shmem shmem, WGPUTextureDataLayout layout, WGPUExtent3d extent);
async DeviceCreateBindGroupLayout(RawId selfId, SerialBindGroupLayoutDescriptor desc, RawId newId);
async BindGroupLayoutDestroy(RawId selfId);
async DeviceCreatePipelineLayout(RawId selfId, SerialPipelineLayoutDescriptor desc, RawId newId);
async PipelineLayoutDestroy(RawId selfId);
async DeviceCreateBindGroup(RawId selfId, SerialBindGroupDescriptor desc, RawId newId);
async BindGroupDestroy(RawId selfId);
async DeviceCreateShaderModule(RawId selfId, uint32_t[] spirv, nsCString wgsl, RawId newId);
async ShaderModuleDestroy(RawId selfId);
async DeviceCreateComputePipeline(RawId selfId, SerialComputePipelineDescriptor desc, RawId newId);
async ComputePipelineDestroy(RawId selfId);
async DeviceCreateRenderPipeline(RawId selfId, SerialRenderPipelineDescriptor desc, RawId newId);
async RenderPipelineDestroy(RawId selfId);
async DeviceCreateSwapChain(RawId selfId, RawId queueId, RGBDescriptor desc, RawId[] bufferIds, ExternalImageId externalId);
async SwapChainPresent(ExternalImageId externalId, RawId textureId, RawId commandEncoderId);

Просмотреть файл

@ -14,10 +14,8 @@ NS_IMPL_CYCLE_COLLECTION(WebGPUChild)
NS_IMPL_CYCLE_COLLECTION_ROOT_NATIVE(WebGPUChild, AddRef)
NS_IMPL_CYCLE_COLLECTION_UNROOT_NATIVE(WebGPUChild, Release)
static ffi::WGPUCompareFunction ConvertCompareFunction(
const dom::GPUCompareFunction& aCompare) {
// Value of 0 = Undefined is reserved on the C side for "null" semantics.
return ffi::WGPUCompareFunction(static_cast<uint8_t>(aCompare) + 1);
ffi::WGPUByteBuf* ToFFI(ipc::ByteBuf* x) {
return reinterpret_cast<ffi::WGPUByteBuf*>(x);
}
static ffi::WGPUClient* initialize() {
@ -70,53 +68,32 @@ Maybe<RawId> WebGPUChild::AdapterRequestDevice(
RawId WebGPUChild::DeviceCreateBuffer(RawId aSelfId,
const dom::GPUBufferDescriptor& aDesc) {
ffi::WGPUBufferDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
desc.size = aDesc.mSize;
desc.usage = aDesc.mUsage;
desc.mapped_at_creation = aDesc.mMappedAtCreation;
RawId id = ffi::wgpu_client_make_buffer_id(mClient, aSelfId);
if (!SendDeviceCreateBuffer(aSelfId, desc, nsCString(), id)) {
ByteBuf bb;
RawId id =
ffi::wgpu_client_create_buffer(mClient, aSelfId, &desc, ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
}
UniquePtr<ffi::WGPUTextureViewDescriptor> WebGPUChild::GetDefaultViewDescriptor(
const dom::GPUTextureDescriptor& aDesc) {
ffi::WGPUTextureViewDescriptor desc = {};
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
// compute depth
uint32_t depth = 0;
if (aDesc.mSize.IsRangeEnforcedUnsignedLongSequence()) {
const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence();
depth = seq.Length() > 2 ? seq[2] : 1;
} else {
depth = aDesc.mSize.GetAsGPUExtent3DDict().mDepth;
}
// compute dimension
switch (aDesc.mDimension) {
case dom::GPUTextureDimension::_1d:
desc.dimension = ffi::WGPUTextureViewDimension_D1;
break;
case dom::GPUTextureDimension::_2d:
desc.dimension = depth > 1 ? ffi::WGPUTextureViewDimension_D2Array
: ffi::WGPUTextureViewDimension_D2;
break;
case dom::GPUTextureDimension::_3d:
desc.dimension = ffi::WGPUTextureViewDimension_D3;
break;
default:
MOZ_CRASH("Unexpected texture dimension");
}
// compute level count
desc.level_count = aDesc.mMipLevelCount;
return UniquePtr<ffi::WGPUTextureViewDescriptor>(
new ffi::WGPUTextureViewDescriptor(desc));
}
RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId,
const dom::GPUTextureDescriptor& aDesc) {
ffi::WGPUTextureDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
if (aDesc.mSize.IsRangeEnforcedUnsignedLongSequence()) {
const auto& seq = aDesc.mSize.GetAsRangeEnforcedUnsignedLongSequence();
desc.size.width = seq.Length() > 0 ? seq[0] : 1;
@ -136,37 +113,48 @@ RawId WebGPUChild::DeviceCreateTexture(RawId aSelfId,
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
desc.usage = aDesc.mUsage;
RawId id = ffi::wgpu_client_make_texture_id(mClient, aSelfId);
if (!SendDeviceCreateTexture(aSelfId, desc, nsCString(), id)) {
ByteBuf bb;
RawId id =
ffi::wgpu_client_create_texture(mClient, aSelfId, &desc, ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
}
RawId WebGPUChild::TextureCreateView(
RawId aSelfId, const dom::GPUTextureViewDescriptor& aDesc,
const ffi::WGPUTextureViewDescriptor& aDefaultViewDesc) {
ffi::WGPUTextureViewDescriptor desc = aDefaultViewDesc;
if (aDesc.mFormat.WasPassed()) {
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat.Value());
RawId aSelfId, const dom::GPUTextureViewDescriptor& aDesc) {
ffi::WGPUTextureViewDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
ffi::WGPUTextureFormat format = ffi::WGPUTextureFormat_Sentinel;
if (aDesc.mFormat.WasPassed()) {
format = ffi::WGPUTextureFormat(aDesc.mFormat.Value());
desc.format = &format;
}
ffi::WGPUTextureViewDimension dimension =
ffi::WGPUTextureViewDimension_Sentinel;
if (aDesc.mDimension.WasPassed()) {
desc.dimension = ffi::WGPUTextureViewDimension(aDesc.mDimension.Value());
dimension = ffi::WGPUTextureViewDimension(aDesc.mDimension.Value());
desc.dimension = &dimension;
}
desc.aspect = ffi::WGPUTextureAspect(aDesc.mAspect);
desc.base_mip_level = aDesc.mBaseMipLevel;
desc.level_count = aDesc.mMipLevelCount.WasPassed()
? aDesc.mMipLevelCount.Value()
: aDefaultViewDesc.level_count - aDesc.mBaseMipLevel;
desc.level_count =
aDesc.mMipLevelCount.WasPassed() ? aDesc.mMipLevelCount.Value() : 0;
desc.base_array_layer = aDesc.mBaseArrayLayer;
desc.array_layer_count =
aDesc.mArrayLayerCount.WasPassed()
? aDesc.mArrayLayerCount.Value()
: aDefaultViewDesc.array_layer_count - aDesc.mBaseArrayLayer;
aDesc.mArrayLayerCount.WasPassed() ? aDesc.mArrayLayerCount.Value() : 0;
RawId id = ffi::wgpu_client_make_texture_view_id(mClient, aSelfId);
if (!SendTextureCreateView(aSelfId, desc, nsCString(), id)) {
ByteBuf bb;
RawId id =
ffi::wgpu_client_create_texture_view(mClient, aSelfId, &desc, ToFFI(&bb));
if (!SendTextureAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
@ -174,21 +162,32 @@ RawId WebGPUChild::TextureCreateView(
RawId WebGPUChild::DeviceCreateSampler(RawId aSelfId,
const dom::GPUSamplerDescriptor& aDesc) {
SerialSamplerDescriptor desc = {};
desc.mAddressU = ffi::WGPUAddressMode(aDesc.mAddressModeU);
desc.mAddressV = ffi::WGPUAddressMode(aDesc.mAddressModeV);
desc.mAddressW = ffi::WGPUAddressMode(aDesc.mAddressModeW);
desc.mMagFilter = ffi::WGPUFilterMode(aDesc.mMagFilter);
desc.mMinFilter = ffi::WGPUFilterMode(aDesc.mMinFilter);
desc.mMipmapFilter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
desc.mLodMinClamp = aDesc.mLodMinClamp;
desc.mLodMaxClamp = aDesc.mLodMaxClamp;
if (aDesc.mCompare.WasPassed()) {
desc.mCompare = Some(ConvertCompareFunction(aDesc.mCompare.Value()));
ffi::WGPUSamplerDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
RawId id = ffi::wgpu_client_make_sampler_id(mClient, aSelfId);
if (!SendDeviceCreateSampler(aSelfId, desc, id)) {
desc.address_modes[0] = ffi::WGPUAddressMode(aDesc.mAddressModeU);
desc.address_modes[1] = ffi::WGPUAddressMode(aDesc.mAddressModeV);
desc.address_modes[2] = ffi::WGPUAddressMode(aDesc.mAddressModeW);
desc.mag_filter = ffi::WGPUFilterMode(aDesc.mMagFilter);
desc.min_filter = ffi::WGPUFilterMode(aDesc.mMinFilter);
desc.mipmap_filter = ffi::WGPUFilterMode(aDesc.mMipmapFilter);
desc.lod_min_clamp = aDesc.mLodMinClamp;
desc.lod_max_clamp = aDesc.mLodMaxClamp;
ffi::WGPUCompareFunction comparison = ffi::WGPUCompareFunction_Sentinel;
if (aDesc.mCompare.WasPassed()) {
comparison = ffi::WGPUCompareFunction(aDesc.mCompare.Value());
desc.compare = &comparison;
}
ByteBuf bb;
RawId id =
ffi::wgpu_client_create_sampler(mClient, aSelfId, &desc, ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
@ -196,8 +195,17 @@ RawId WebGPUChild::DeviceCreateSampler(RawId aSelfId,
RawId WebGPUChild::DeviceCreateCommandEncoder(
RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_encoder_id(mClient, aSelfId);
if (!SendDeviceCreateCommandEncoder(aSelfId, aDesc, id)) {
ffi::WGPUCommandEncoderDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
ByteBuf bb;
RawId id = ffi::wgpu_client_create_command_encoder(mClient, aSelfId, &desc,
ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
@ -217,26 +225,61 @@ RawId WebGPUChild::CommandEncoderFinish(
RawId WebGPUChild::DeviceCreateBindGroupLayout(
RawId aSelfId, const dom::GPUBindGroupLayoutDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_bind_group_layout_id(mClient, aSelfId);
nsTArray<ffi::WGPUBindGroupLayoutEntry> entries(aDesc.mEntries.Length());
struct OptionalData {
ffi::WGPUTextureViewDimension dim;
ffi::WGPUTextureComponentType type;
ffi::WGPUTextureFormat format;
};
nsTArray<OptionalData> optional(aDesc.mEntries.Length());
for (const auto& entry : aDesc.mEntries) {
OptionalData data = {};
if (entry.mViewDimension.WasPassed()) {
data.dim = ffi::WGPUTextureViewDimension(entry.mViewDimension.Value());
}
if (entry.mTextureComponentType.WasPassed()) {
data.type =
ffi::WGPUTextureComponentType(entry.mTextureComponentType.Value());
}
if (entry.mStorageTextureFormat.WasPassed()) {
data.format = ffi::WGPUTextureFormat(entry.mStorageTextureFormat.Value());
}
optional.AppendElement(data);
}
nsTArray<ffi::WGPUBindGroupLayoutEntry> entries(aDesc.mEntries.Length());
for (size_t i = 0; i < aDesc.mEntries.Length(); ++i) {
const auto& entry = aDesc.mEntries[i];
ffi::WGPUBindGroupLayoutEntry e = {};
e.binding = entry.mBinding;
e.visibility = entry.mVisibility;
e.ty = ffi::WGPURawBindingType(entry.mType);
e.multisampled = entry.mMultisampled;
e.has_dynamic_offset = entry.mHasDynamicOffset;
e.view_dimension = ffi::WGPUTextureViewDimension(entry.mViewDimension);
e.texture_component_type =
ffi::WGPUTextureComponentType(entry.mTextureComponentType);
e.storage_texture_format =
entry.mStorageTextureFormat.WasPassed()
? ffi::WGPUTextureFormat(entry.mStorageTextureFormat.Value())
: ffi::WGPUTextureFormat(0);
if (entry.mViewDimension.WasPassed()) {
e.view_dimension = &optional[i].dim;
}
if (entry.mTextureComponentType.WasPassed()) {
e.texture_component_type = &optional[i].type;
}
if (entry.mStorageTextureFormat.WasPassed()) {
e.storage_texture_format = &optional[i].format;
}
entries.AppendElement(e);
}
SerialBindGroupLayoutDescriptor desc = {nsCString(), std::move(entries)};
if (!SendDeviceCreateBindGroupLayout(aSelfId, desc, id)) {
ffi::WGPUBindGroupLayoutDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
desc.entries = entries.Elements();
desc.entries_length = entries.Length();
ByteBuf bb;
RawId id = ffi::wgpu_client_create_bind_group_layout(mClient, aSelfId, &desc,
ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
@ -244,12 +287,25 @@ RawId WebGPUChild::DeviceCreateBindGroupLayout(
RawId WebGPUChild::DeviceCreatePipelineLayout(
RawId aSelfId, const dom::GPUPipelineLayoutDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_pipeline_layout_id(mClient, aSelfId);
SerialPipelineLayoutDescriptor desc = {};
for (const auto& layouts : aDesc.mBindGroupLayouts) {
desc.mBindGroupLayouts.AppendElement(layouts->mId);
nsTArray<ffi::WGPUBindGroupLayoutId> bindGroupLayouts(
aDesc.mBindGroupLayouts.Length());
for (const auto& layout : aDesc.mBindGroupLayouts) {
bindGroupLayouts.AppendElement(layout->mId);
}
if (!SendDeviceCreatePipelineLayout(aSelfId, desc, id)) {
ffi::WGPUPipelineLayoutDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
desc.bind_group_layouts = bindGroupLayouts.Elements();
desc.bind_group_layouts_length = bindGroupLayouts.Length();
ByteBuf bb;
RawId id = ffi::wgpu_client_create_pipeline_layout(mClient, aSelfId, &desc,
ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
@ -257,31 +313,39 @@ RawId WebGPUChild::DeviceCreatePipelineLayout(
RawId WebGPUChild::DeviceCreateBindGroup(
RawId aSelfId, const dom::GPUBindGroupDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_bind_group_id(mClient, aSelfId);
SerialBindGroupDescriptor desc = {};
desc.mLayout = aDesc.mLayout->mId;
nsTArray<ffi::WGPUBindGroupEntry> entries(aDesc.mEntries.Length());
for (const auto& entry : aDesc.mEntries) {
SerialBindGroupEntry bd = {};
bd.mBinding = entry.mBinding;
ffi::WGPUBindGroupEntry e = {};
e.binding = entry.mBinding;
if (entry.mResource.IsGPUBufferBinding()) {
bd.mType = SerialBindGroupEntryType::Buffer;
const auto& bufBinding = entry.mResource.GetAsGPUBufferBinding();
bd.mValue = bufBinding.mBuffer->mId;
bd.mBufferOffset = bufBinding.mOffset;
bd.mBufferSize =
bufBinding.mSize.WasPassed() ? bufBinding.mSize.Value() : 0;
e.buffer = bufBinding.mBuffer->mId;
e.offset = bufBinding.mOffset;
e.size = bufBinding.mSize.WasPassed() ? bufBinding.mSize.Value() : 0;
}
if (entry.mResource.IsGPUTextureView()) {
bd.mType = SerialBindGroupEntryType::Texture;
bd.mValue = entry.mResource.GetAsGPUTextureView()->mId;
e.texture_view = entry.mResource.GetAsGPUTextureView()->mId;
}
if (entry.mResource.IsGPUSampler()) {
bd.mType = SerialBindGroupEntryType::Sampler;
bd.mValue = entry.mResource.GetAsGPUSampler()->mId;
e.sampler = entry.mResource.GetAsGPUSampler()->mId;
}
desc.mEntries.AppendElement(bd);
entries.AppendElement(e);
}
if (!SendDeviceCreateBindGroup(aSelfId, desc, id)) {
ffi::WGPUBindGroupDescriptor desc = {};
nsCString label;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
desc.layout = aDesc.mLayout->mId;
desc.entries = entries.Elements();
desc.entries_length = entries.Length();
ByteBuf bb;
RawId id =
ffi::wgpu_client_create_bind_group(mClient, aSelfId, &desc, ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
@ -289,40 +353,45 @@ RawId WebGPUChild::DeviceCreateBindGroup(
RawId WebGPUChild::DeviceCreateShaderModule(
RawId aSelfId, const dom::GPUShaderModuleDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_shader_module_id(mClient, aSelfId);
ffi::WGPUShaderModuleDescriptor desc = {};
nsTArray<uint32_t> spirv;
nsCString wgsl;
if (aDesc.mCode.IsString()) {
CopyUTF16toUTF8(aDesc.mCode.GetAsString(), wgsl);
LossyCopyUTF16toASCII(aDesc.mCode.GetAsString(), wgsl);
desc.wgsl_chars = wgsl.get();
} else {
const auto& code = aDesc.mCode.GetAsUint32Array();
code.ComputeState();
spirv.AppendElements(code.Data(), code.Length());
desc.spirv_words = code.Data();
desc.spirv_words_length = code.Length();
}
if (!SendDeviceCreateShaderModule(aSelfId, spirv, wgsl, id)) {
ByteBuf bb;
RawId id = ffi::wgpu_client_create_shader_module(mClient, aSelfId, &desc,
ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
}
static SerialProgrammableStageDescriptor ConvertProgrammableStageDescriptor(
const dom::GPUProgrammableStageDescriptor& aDesc) {
SerialProgrammableStageDescriptor stage = {};
stage.mModule = aDesc.mModule->mId;
stage.mEntryPoint = aDesc.mEntryPoint;
return stage;
}
RawId WebGPUChild::DeviceCreateComputePipeline(
RawId aSelfId, const dom::GPUComputePipelineDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_compute_pipeline_id(mClient, aSelfId);
const SerialComputePipelineDescriptor desc = {
aDesc.mLayout->mId,
ConvertProgrammableStageDescriptor(aDesc.mComputeStage),
};
if (!SendDeviceCreateComputePipeline(aSelfId, desc, id)) {
ffi::WGPUComputePipelineDescriptor desc = {};
nsCString label, entryPoint;
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
desc.layout = aDesc.mLayout->mId;
desc.compute_stage.module = aDesc.mComputeStage.mModule->mId;
LossyCopyUTF16toASCII(aDesc.mComputeStage.mEntryPoint, entryPoint);
desc.compute_stage.entry_point = entryPoint.get();
ByteBuf bb;
RawId id = ffi::wgpu_client_create_compute_pipeline(mClient, aSelfId, &desc,
ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;
@ -358,6 +427,12 @@ static ffi::WGPUColorStateDescriptor ConvertColorDescriptor(
return desc;
}
static ffi::WGPUCompareFunction ConvertCompareFunction(
const dom::GPUCompareFunction& aCompare) {
// Value of 0 = Undefined is reserved on the C side for "null" semantics.
return ffi::WGPUCompareFunction(static_cast<uint8_t>(aCompare) + 1);
}
static ffi::WGPUStencilStateFaceDescriptor ConvertStencilFaceDescriptor(
const dom::GPUStencilStateFaceDescriptor& aDesc) {
ffi::WGPUStencilStateFaceDescriptor desc = {};
@ -374,68 +449,97 @@ static ffi::WGPUDepthStencilStateDescriptor ConvertDepthStencilDescriptor(
desc.format = ffi::WGPUTextureFormat(aDesc.mFormat);
desc.depth_write_enabled = aDesc.mDepthWriteEnabled;
desc.depth_compare = ConvertCompareFunction(aDesc.mDepthCompare);
desc.stencil_front = ConvertStencilFaceDescriptor(aDesc.mStencilFront);
desc.stencil_back = ConvertStencilFaceDescriptor(aDesc.mStencilBack);
desc.stencil_read_mask = aDesc.mStencilReadMask;
desc.stencil_write_mask = aDesc.mStencilWriteMask;
return desc;
}
static ffi::WGPUVertexAttributeDescriptor ConvertVertexAttributeDescriptor(
const dom::GPUVertexAttributeDescriptor& aDesc) {
ffi::WGPUVertexAttributeDescriptor desc = {};
desc.offset = aDesc.mOffset;
desc.format = ffi::WGPUVertexFormat(aDesc.mFormat);
desc.shader_location = aDesc.mShaderLocation;
return desc;
}
static SerialVertexBufferLayoutDescriptor ConvertVertexBufferLayoutDescriptor(
const dom::GPUVertexBufferLayoutDescriptor& aDesc) {
SerialVertexBufferLayoutDescriptor desc = {};
desc.mArrayStride = aDesc.mArrayStride;
desc.mStepMode = ffi::WGPUInputStepMode(aDesc.mStepMode);
for (const auto& vat : aDesc.mAttributes) {
desc.mAttributes.AppendElement(ConvertVertexAttributeDescriptor(vat));
}
desc.stencil.front = ConvertStencilFaceDescriptor(aDesc.mStencilFront);
desc.stencil.back = ConvertStencilFaceDescriptor(aDesc.mStencilBack);
desc.stencil.read_mask = aDesc.mStencilReadMask;
desc.stencil.write_mask = aDesc.mStencilWriteMask;
return desc;
}
RawId WebGPUChild::DeviceCreateRenderPipeline(
RawId aSelfId, const dom::GPURenderPipelineDescriptor& aDesc) {
RawId id = ffi::wgpu_client_make_render_pipeline_id(mClient, aSelfId);
SerialRenderPipelineDescriptor desc = {};
desc.mLayout = aDesc.mLayout->mId;
desc.mVertexStage = ConvertProgrammableStageDescriptor(aDesc.mVertexStage);
ffi::WGPURenderPipelineDescriptor desc = {};
nsCString label, vsEntry, fsEntry;
ffi::WGPUProgrammableStageDescriptor vertexStage = {};
ffi::WGPUProgrammableStageDescriptor fragmentStage = {};
if (aDesc.mLabel.WasPassed()) {
LossyCopyUTF16toASCII(aDesc.mLabel.Value(), label);
desc.label = label.get();
}
desc.layout = aDesc.mLayout->mId;
vertexStage.module = aDesc.mVertexStage.mModule->mId;
LossyCopyUTF16toASCII(aDesc.mVertexStage.mEntryPoint, vsEntry);
vertexStage.entry_point = vsEntry.get();
desc.vertex_stage = &vertexStage;
if (aDesc.mFragmentStage.WasPassed()) {
desc.mFragmentStage =
ConvertProgrammableStageDescriptor(aDesc.mFragmentStage.Value());
const auto& stage = aDesc.mFragmentStage.Value();
fragmentStage.module = stage.mModule->mId;
LossyCopyUTF16toASCII(stage.mEntryPoint, fsEntry);
fragmentStage.entry_point = fsEntry.get();
desc.fragment_stage = &fragmentStage;
}
desc.mPrimitiveTopology =
desc.primitive_topology =
ffi::WGPUPrimitiveTopology(aDesc.mPrimitiveTopology);
// TODO: expect it to be optional to begin with
desc.mRasterizationState =
Some(ConvertRasterizationDescriptor(aDesc.mRasterizationState));
for (const auto& color_state : aDesc.mColorStates) {
desc.mColorStates.AppendElement(ConvertColorDescriptor(color_state));
const auto rasterization =
ConvertRasterizationDescriptor(aDesc.mRasterizationState);
desc.rasterization_state = &rasterization;
nsTArray<ffi::WGPUColorStateDescriptor> colorStates;
for (const auto& colorState : aDesc.mColorStates) {
colorStates.AppendElement(ConvertColorDescriptor(colorState));
}
desc.color_states = colorStates.Elements();
desc.color_states_length = colorStates.Length();
ffi::WGPUDepthStencilStateDescriptor depthStencilState = {};
if (aDesc.mDepthStencilState.WasPassed()) {
desc.mDepthStencilState =
Some(ConvertDepthStencilDescriptor(aDesc.mDepthStencilState.Value()));
depthStencilState =
ConvertDepthStencilDescriptor(aDesc.mDepthStencilState.Value());
desc.depth_stencil_state = &depthStencilState;
}
desc.mVertexState.mIndexFormat =
desc.vertex_state.index_format =
ffi::WGPUIndexFormat(aDesc.mVertexState.mIndexFormat);
nsTArray<ffi::WGPUVertexBufferDescriptor> vertexBuffers;
nsTArray<ffi::WGPUVertexAttributeDescriptor> vertexAttributes;
for (const auto& vertex_desc : aDesc.mVertexState.mVertexBuffers) {
SerialVertexBufferLayoutDescriptor vb_desc = {};
ffi::WGPUVertexBufferDescriptor vb_desc = {};
if (!vertex_desc.IsNull()) {
vb_desc = ConvertVertexBufferLayoutDescriptor(vertex_desc.Value());
const auto& vd = vertex_desc.Value();
vb_desc.stride = vd.mArrayStride;
vb_desc.step_mode = ffi::WGPUInputStepMode(vd.mStepMode);
// Note: we are setting the length but not the pointer
vb_desc.attributes_length = vd.mAttributes.Length();
for (const auto& vat : vd.mAttributes) {
ffi::WGPUVertexAttributeDescriptor ad = {};
ad.offset = vat.mOffset;
ad.format = ffi::WGPUVertexFormat(vat.mFormat);
ad.shader_location = vat.mShaderLocation;
vertexAttributes.AppendElement(ad);
}
}
desc.mVertexState.mVertexBuffers.AppendElement(std::move(vb_desc));
vertexBuffers.AppendElement(vb_desc);
}
desc.mSampleCount = aDesc.mSampleCount;
desc.mSampleMask = aDesc.mSampleMask;
desc.mAlphaToCoverageEnabled = aDesc.mAlphaToCoverageEnabled;
if (!SendDeviceCreateRenderPipeline(aSelfId, desc, id)) {
// Now patch up all the pointers to attribute lists.
size_t numAttributes = 0;
for (auto& vb_desc : vertexBuffers) {
vb_desc.attributes = vertexAttributes.Elements() + numAttributes;
numAttributes += vb_desc.attributes_length;
}
desc.vertex_state.vertex_buffers = vertexBuffers.Elements();
desc.vertex_state.vertex_buffers_length = vertexBuffers.Length();
desc.sample_count = aDesc.mSampleCount;
desc.sample_mask = aDesc.mSampleMask;
desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
ByteBuf bb;
RawId id = ffi::wgpu_client_create_render_pipeline(mClient, aSelfId, &desc,
ToFFI(&bb));
if (!SendDeviceAction(aSelfId, std::move(bb))) {
MOZ_CRASH("IPC failure");
}
return id;

Просмотреть файл

@ -24,6 +24,8 @@ struct WGPUTextureViewDescriptor;
typedef MozPromise<RawId, Maybe<ipc::ResponseRejectReason>, true> RawIdPromise;
ffi::WGPUByteBuf* ToFFI(ipc::ByteBuf* x);
class WebGPUChild final : public PWebGPUChild {
public:
friend class layers::CompositorBridgeChild;
@ -42,13 +44,10 @@ class WebGPUChild final : public PWebGPUChild {
const dom::GPUDeviceDescriptor& aDesc);
RawId DeviceCreateBuffer(RawId aSelfId,
const dom::GPUBufferDescriptor& aDesc);
static UniquePtr<ffi::WGPUTextureViewDescriptor> GetDefaultViewDescriptor(
const dom::GPUTextureDescriptor& aDesc);
RawId DeviceCreateTexture(RawId aSelfId,
const dom::GPUTextureDescriptor& aDesc);
RawId TextureCreateView(
RawId aSelfId, const dom::GPUTextureViewDescriptor& aDesc,
const ffi::WGPUTextureViewDescriptor& aDefaultViewDesc);
RawId TextureCreateView(RawId aSelfId,
const dom::GPUTextureViewDescriptor& aDesc);
RawId DeviceCreateSampler(RawId aSelfId,
const dom::GPUSamplerDescriptor& aDesc);
RawId DeviceCreateCommandEncoder(

Просмотреть файл

@ -111,25 +111,22 @@ static void FreeSurface(RawId id, void* param) {
}
static ffi::WGPUIdentityRecyclerFactory MakeFactory(void* param) {
// Note: careful about the order here!
const ffi::WGPUIdentityRecyclerFactory factory = {
param,
FreeAdapter,
FreeDevice,
FreeSwapChain,
FreePipelineLayout,
FreeShaderModule,
FreeBindGroupLayout,
FreeBindGroup,
FreeCommandBuffer,
FreeRenderPipeline,
FreeComputePipeline,
FreeBuffer,
FreeTexture,
FreeTextureView,
FreeSampler,
FreeSurface,
};
ffi::WGPUIdentityRecyclerFactory factory = {param};
factory.free_adapter = FreeAdapter;
factory.free_device = FreeDevice;
factory.free_swap_chain = FreeSwapChain;
factory.free_pipeline_layout = FreePipelineLayout;
factory.free_shader_module = FreeShaderModule;
factory.free_bind_group_layout = FreeBindGroupLayout;
factory.free_bind_group = FreeBindGroup;
factory.free_command_buffer = FreeCommandBuffer;
factory.free_render_pipeline = FreeRenderPipeline;
factory.free_compute_pipeline = FreeComputePipeline;
factory.free_buffer = FreeBuffer;
factory.free_texture = FreeTexture;
factory.free_texture_view = FreeTextureView;
factory.free_sampler = FreeSampler;
factory.free_surface = FreeSurface;
return factory;
}
@ -176,36 +173,39 @@ ipc::IPCResult WebGPUParent::RecvInstanceRequestAdapter(
ipc::IPCResult WebGPUParent::RecvAdapterRequestDevice(
RawId aSelfId, const dom::GPUDeviceDescriptor& aDesc, RawId aNewId) {
ffi::WGPUDeviceDescriptor desc = {};
desc.limits.max_bind_groups = aDesc.mLimits.WasPassed()
? aDesc.mLimits.Value().mMaxBindGroups
: WGPUDEFAULT_BIND_GROUPS;
if (aDesc.mLimits.WasPassed()) {
const auto& lim = aDesc.mLimits.Value();
desc.limits.max_bind_groups = lim.mMaxBindGroups;
desc.limits.max_dynamic_uniform_buffers_per_pipeline_layout =
lim.mMaxDynamicUniformBuffersPerPipelineLayout;
desc.limits.max_dynamic_storage_buffers_per_pipeline_layout =
lim.mMaxDynamicStorageBuffersPerPipelineLayout;
desc.limits.max_sampled_textures_per_shader_stage =
lim.mMaxSampledTexturesPerShaderStage;
desc.limits.max_samplers_per_shader_stage = lim.mMaxSamplersPerShaderStage;
desc.limits.max_storage_buffers_per_shader_stage =
lim.mMaxStorageBuffersPerShaderStage;
desc.limits.max_storage_textures_per_shader_stage =
lim.mMaxStorageTexturesPerShaderStage;
desc.limits.max_uniform_buffers_per_shader_stage =
lim.mMaxUniformBuffersPerShaderStage;
desc.limits.max_uniform_buffer_binding_size =
lim.mMaxUniformBufferBindingSize;
} else {
ffi::wgpu_server_fill_default_limits(&desc.limits);
}
// TODO: fill up the descriptor
ffi::wgpu_server_adapter_request_device(mContext, aSelfId, &desc, aNewId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvAdapterDestroy(RawId aSelfId) {
ffi::wgpu_server_adapter_destroy(mContext, aSelfId);
ffi::wgpu_server_adapter_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceDestroy(RawId aSelfId) {
ffi::wgpu_server_device_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateBuffer(
RawId aSelfId, const ffi::WGPUBufferDescriptor& aDesc,
const nsCString& aLabel, RawId aNewId) {
ffi::WGPUBufferDescriptor desc = aDesc;
if (!aLabel.IsEmpty()) {
desc.label = aLabel.Data();
}
ffi::wgpu_server_device_create_buffer(mContext, aSelfId, &desc, aNewId);
if (desc.usage & (WGPUBufferUsage_MAP_READ | WGPUBufferUsage_MAP_WRITE)) {
mSharedMemoryMap.insert({aNewId, Shmem()});
}
ffi::wgpu_server_device_drop(mContext, aSelfId);
return IPC_OK();
}
@ -283,7 +283,7 @@ ipc::IPCResult WebGPUParent::RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem,
}
ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) {
ffi::wgpu_server_buffer_destroy(mContext, aSelfId);
ffi::wgpu_server_buffer_drop(mContext, aSelfId);
const auto iter = mSharedMemoryMap.find(aSelfId);
if (iter != mSharedMemoryMap.end()) {
@ -293,120 +293,18 @@ ipc::IPCResult WebGPUParent::RecvBufferDestroy(RawId aSelfId) {
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateTexture(
RawId aSelfId, const ffi::WGPUTextureDescriptor& aDesc,
const nsCString& aLabel, RawId aNewId) {
ffi::WGPUTextureDescriptor desc = aDesc;
if (!aLabel.IsEmpty()) {
desc.label = aLabel.Data();
}
ffi::wgpu_server_device_create_texture(mContext, aSelfId, &desc, aNewId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvTextureCreateView(
RawId aSelfId, const ffi::WGPUTextureViewDescriptor& aDesc,
const nsCString& aLabel, RawId aNewId) {
ffi::WGPUTextureViewDescriptor desc = aDesc;
if (!aLabel.IsEmpty()) {
desc.label = aLabel.Data();
}
ffi::wgpu_server_texture_create_view(mContext, aSelfId, &desc, aNewId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvTextureDestroy(RawId aSelfId) {
ffi::wgpu_server_texture_destroy(mContext, aSelfId);
ffi::wgpu_server_texture_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvTextureViewDestroy(RawId aSelfId) {
ffi::wgpu_server_texture_view_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateSampler(
RawId aSelfId, const SerialSamplerDescriptor& aDesc, RawId aNewId) {
ffi::WGPUSamplerDescriptor desc = {};
desc.address_modes[0] = aDesc.mAddressU;
desc.address_modes[1] = aDesc.mAddressV;
desc.address_modes[2] = aDesc.mAddressW;
desc.mag_filter = aDesc.mMagFilter;
desc.min_filter = aDesc.mMinFilter;
desc.mipmap_filter = aDesc.mMipmapFilter;
desc.lod_min_clamp = aDesc.mLodMinClamp;
desc.lod_max_clamp = aDesc.mLodMaxClamp;
if (aDesc.mCompare) {
desc.compare = aDesc.mCompare.ptr();
}
if (!aDesc.mLabel.IsEmpty()) {
desc.label = aDesc.mLabel.Data();
}
ffi::wgpu_server_device_create_sampler(mContext, aSelfId, &desc, aNewId);
ffi::wgpu_server_texture_view_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvSamplerDestroy(RawId aSelfId) {
ffi::wgpu_server_sampler_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateCommandEncoder(
RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc,
RawId aNewId) {
ffi::WGPUCommandEncoderDescriptor desc = {};
if (aDesc.mLabel.WasPassed()) {
// TODO: desc.label = aDesc.mLabel.Value();
}
ffi::wgpu_server_device_create_encoder(mContext, aSelfId, &desc, aNewId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyBufferToBuffer(
RawId aSelfId, RawId aSourceId, BufferAddress aSourceOffset,
RawId aDestinationId, BufferAddress aDestinationOffset,
BufferAddress aSize) {
ffi::wgpu_server_encoder_copy_buffer_to_buffer(mContext, aSelfId, aSourceId,
aSourceOffset, aDestinationId,
aDestinationOffset, aSize);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyBufferToTexture(
RawId aSelfId, WGPUBufferCopyView aSource, WGPUTextureCopyView aDestination,
WGPUExtent3d aCopySize) {
ffi::wgpu_server_encoder_copy_buffer_to_texture(mContext, aSelfId, &aSource,
&aDestination, &aCopySize);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyTextureToBuffer(
RawId aSelfId, WGPUTextureCopyView aSource, WGPUBufferCopyView aDestination,
WGPUExtent3d aCopySize) {
ffi::wgpu_server_encoder_copy_texture_to_buffer(mContext, aSelfId, &aSource,
&aDestination, &aCopySize);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderCopyTextureToTexture(
RawId aSelfId, WGPUTextureCopyView aSource,
WGPUTextureCopyView aDestination, WGPUExtent3d aCopySize) {
ffi::wgpu_server_encoder_copy_texture_to_texture(mContext, aSelfId, &aSource,
&aDestination, &aCopySize);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderRunComputePass(RawId aSelfId,
Shmem&& aShmem) {
ffi::wgpu_server_encode_compute_pass(mContext, aSelfId, aShmem.get<uint8_t>(),
aShmem.Size<uint8_t>());
DeallocShmem(aShmem);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderRunRenderPass(RawId aSelfId,
Shmem&& aShmem) {
ffi::wgpu_server_encode_render_pass(mContext, aSelfId, aShmem.get<uint8_t>(),
aShmem.Size<uint8_t>());
DeallocShmem(aShmem);
ffi::wgpu_server_sampler_drop(mContext, aSelfId);
return IPC_OK();
}
@ -419,12 +317,12 @@ ipc::IPCResult WebGPUParent::RecvCommandEncoderFinish(
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderDestroy(RawId aSelfId) {
ffi::wgpu_server_encoder_destroy(mContext, aSelfId);
ffi::wgpu_server_encoder_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandBufferDestroy(RawId aSelfId) {
ffi::wgpu_server_command_buffer_destroy(mContext, aSelfId);
ffi::wgpu_server_command_buffer_drop(mContext, aSelfId);
return IPC_OK();
}
@ -457,154 +355,33 @@ ipc::IPCResult WebGPUParent::RecvQueueWriteTexture(
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateBindGroupLayout(
RawId aSelfId, const SerialBindGroupLayoutDescriptor& aDesc, RawId aNewId) {
ffi::WGPUBindGroupLayoutDescriptor desc = {};
desc.entries = aDesc.mEntries.Elements();
desc.entries_length = aDesc.mEntries.Length();
ffi::wgpu_server_device_create_bind_group_layout(mContext, aSelfId, &desc,
aNewId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvBindGroupLayoutDestroy(RawId aSelfId) {
ffi::wgpu_server_bind_group_layout_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreatePipelineLayout(
RawId aSelfId, const SerialPipelineLayoutDescriptor& aDesc, RawId aNewId) {
ffi::WGPUPipelineLayoutDescriptor desc = {};
desc.bind_group_layouts = aDesc.mBindGroupLayouts.Elements();
desc.bind_group_layouts_length = aDesc.mBindGroupLayouts.Length();
ffi::wgpu_server_device_create_pipeline_layout(mContext, aSelfId, &desc,
aNewId);
ffi::wgpu_server_bind_group_layout_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvPipelineLayoutDestroy(RawId aSelfId) {
ffi::wgpu_server_pipeline_layout_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateBindGroup(
RawId aSelfId, const SerialBindGroupDescriptor& aDesc, RawId aNewId) {
nsTArray<ffi::WGPUBindGroupEntry> ffiEntries(aDesc.mEntries.Length());
for (const auto& entry : aDesc.mEntries) {
ffi::WGPUBindGroupEntry bge = {};
bge.binding = entry.mBinding;
switch (entry.mType) {
case SerialBindGroupEntryType::Buffer:
bge.buffer = entry.mValue;
bge.offset = entry.mBufferOffset;
bge.size = ffi::make_buffer_size(entry.mBufferSize);
break;
case SerialBindGroupEntryType::Texture:
bge.texture_view = entry.mValue;
break;
case SerialBindGroupEntryType::Sampler:
bge.sampler = entry.mValue;
break;
default:
MOZ_CRASH("unreachable");
}
ffiEntries.AppendElement(bge);
}
ffi::WGPUBindGroupDescriptor desc = {};
desc.layout = aDesc.mLayout;
desc.entries = ffiEntries.Elements();
desc.entries_length = ffiEntries.Length();
ffi::wgpu_server_device_create_bind_group(mContext, aSelfId, &desc, aNewId);
ffi::wgpu_server_pipeline_layout_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvBindGroupDestroy(RawId aSelfId) {
ffi::wgpu_server_bind_group_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateShaderModule(
RawId aSelfId, const nsTArray<uint32_t>& aSpirv, const nsCString& aWgsl,
RawId aNewId) {
ffi::WGPUShaderModuleDescriptor desc = {};
desc.code.bytes = aSpirv.Elements();
desc.code.length = aSpirv.Length();
ffi::wgpu_server_device_create_shader_module(mContext, aSelfId, &desc,
aNewId);
ffi::wgpu_server_bind_group_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvShaderModuleDestroy(RawId aSelfId) {
ffi::wgpu_server_shader_module_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateComputePipeline(
RawId aSelfId, const SerialComputePipelineDescriptor& aDesc, RawId aNewId) {
const NS_LossyConvertUTF16toASCII entryPoint(aDesc.mComputeStage.mEntryPoint);
ffi::WGPUComputePipelineDescriptor desc = {};
desc.layout = aDesc.mLayout;
desc.compute_stage.module = aDesc.mComputeStage.mModule;
desc.compute_stage.entry_point = entryPoint.get();
ffi::wgpu_server_device_create_compute_pipeline(mContext, aSelfId, &desc,
aNewId);
ffi::wgpu_server_shader_module_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvComputePipelineDestroy(RawId aSelfId) {
ffi::wgpu_server_compute_pipeline_destroy(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceCreateRenderPipeline(
RawId aSelfId, const SerialRenderPipelineDescriptor& aDesc, RawId aNewId) {
const NS_LossyConvertUTF16toASCII vsEntryPoint(
aDesc.mVertexStage.mEntryPoint);
const NS_LossyConvertUTF16toASCII fsEntryPoint(
aDesc.mFragmentStage.mEntryPoint);
nsTArray<ffi::WGPUVertexBufferLayoutDescriptor> vertexBuffers(
aDesc.mVertexState.mVertexBuffers.Length());
ffi::WGPURenderPipelineDescriptor desc = {};
ffi::WGPUProgrammableStageDescriptor fragmentDesc = {};
desc.layout = aDesc.mLayout;
desc.vertex_stage.module = aDesc.mVertexStage.mModule;
desc.vertex_stage.entry_point = vsEntryPoint.get();
if (aDesc.mFragmentStage.mModule != 0) {
fragmentDesc.module = aDesc.mFragmentStage.mModule;
fragmentDesc.entry_point = fsEntryPoint.get();
desc.fragment_stage = &fragmentDesc;
}
desc.primitive_topology = aDesc.mPrimitiveTopology;
if (aDesc.mRasterizationState.isSome()) {
desc.rasterization_state = aDesc.mRasterizationState.ptr();
}
desc.color_states = aDesc.mColorStates.Elements();
desc.color_states_length = aDesc.mColorStates.Length();
if (aDesc.mDepthStencilState.isSome()) {
desc.depth_stencil_state = aDesc.mDepthStencilState.ptr();
}
for (const auto& vertexBuffer : aDesc.mVertexState.mVertexBuffers) {
ffi::WGPUVertexBufferLayoutDescriptor vb = {};
vb.array_stride = vertexBuffer.mArrayStride;
vb.step_mode = vertexBuffer.mStepMode;
vb.attributes = vertexBuffer.mAttributes.Elements();
vb.attributes_length = vertexBuffer.mAttributes.Length();
vertexBuffers.AppendElement(vb);
}
desc.vertex_state.index_format = aDesc.mVertexState.mIndexFormat;
desc.vertex_state.vertex_buffers = vertexBuffers.Elements();
desc.vertex_state.vertex_buffers_length = vertexBuffers.Length();
desc.sample_count = aDesc.mSampleCount;
desc.sample_mask = aDesc.mSampleMask;
desc.alpha_to_coverage_enabled = aDesc.mAlphaToCoverageEnabled;
ffi::wgpu_server_device_create_render_pipeline(mContext, aSelfId, &desc,
aNewId);
ffi::wgpu_server_compute_pipeline_drop(mContext, aSelfId);
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvRenderPipelineDestroy(RawId aSelfId) {
ffi::wgpu_server_render_pipeline_destroy(mContext, aSelfId);
ffi::wgpu_server_render_pipeline_drop(mContext, aSelfId);
return IPC_OK();
}
@ -791,10 +568,10 @@ ipc::IPCResult WebGPUParent::RecvSwapChainDestroy(
}
}
for (const auto bid : data->mAvailableBufferIds) {
ffi::wgpu_server_buffer_destroy(mContext, bid);
ffi::wgpu_server_buffer_drop(mContext, bid);
}
for (const auto bid : data->mQueuedBufferIds) {
ffi::wgpu_server_buffer_destroy(mContext, bid);
ffi::wgpu_server_buffer_drop(mContext, bid);
}
data->mBuffersLock.Unlock();
return IPC_OK();
@ -812,5 +589,26 @@ ipc::IPCResult WebGPUParent::RecvShutdown() {
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvDeviceAction(RawId aSelf,
const ipc::ByteBuf& aByteBuf) {
ffi::wgpu_server_device_action(
mContext, aSelf, reinterpret_cast<const ffi::WGPUByteBuf*>(&aByteBuf));
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvTextureAction(RawId aSelf,
const ipc::ByteBuf& aByteBuf) {
ffi::wgpu_server_texture_action(
mContext, aSelf, reinterpret_cast<const ffi::WGPUByteBuf*>(&aByteBuf));
return IPC_OK();
}
ipc::IPCResult WebGPUParent::RecvCommandEncoderAction(
RawId aSelf, const ipc::ByteBuf& aByteBuf) {
ffi::wgpu_server_command_encoder_action(
mContext, aSelf, reinterpret_cast<const ffi::WGPUByteBuf*>(&aByteBuf));
return IPC_OK();
}
} // namespace webgpu
} // namespace mozilla

Просмотреть файл

@ -30,46 +30,15 @@ class WebGPUParent final : public PWebGPUParent {
RawId aNewId);
ipc::IPCResult RecvAdapterDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateBuffer(RawId aSelfId,
const ffi::WGPUBufferDescriptor& aDesc,
const nsCString& aLabel, RawId aNewId);
ipc::IPCResult RecvBufferReturnShmem(RawId aSelfId, Shmem&& aShmem);
ipc::IPCResult RecvBufferMap(RawId aSelfId, ffi::WGPUHostMap aHostMap,
uint64_t aOffset, uint64_t size,
BufferMapResolver&& aResolver);
ipc::IPCResult RecvBufferUnmap(RawId aSelfId, Shmem&& aShmem, bool aFlush);
ipc::IPCResult RecvBufferDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateTexture(
RawId aSelfId, const ffi::WGPUTextureDescriptor& aDesc,
const nsCString& aLabel, RawId aNewId);
ipc::IPCResult RecvTextureCreateView(
RawId aSelfId, const ffi::WGPUTextureViewDescriptor& aDesc,
const nsCString& aLabel, RawId aNewId);
ipc::IPCResult RecvTextureDestroy(RawId aSelfId);
ipc::IPCResult RecvTextureViewDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateSampler(RawId aSelfId,
const SerialSamplerDescriptor& aDesc,
RawId aNewId);
ipc::IPCResult RecvSamplerDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateCommandEncoder(
RawId aSelfId, const dom::GPUCommandEncoderDescriptor& aDesc,
RawId aNewId);
ipc::IPCResult RecvCommandEncoderCopyBufferToBuffer(
RawId aSelfId, RawId aSourceId, BufferAddress aSourceOffset,
RawId aDestinationId, BufferAddress aDestinationOffset,
BufferAddress aSize);
ipc::IPCResult RecvCommandEncoderCopyBufferToTexture(
RawId aSelfId, WGPUBufferCopyView aSource,
WGPUTextureCopyView aDestination, WGPUExtent3d aCopySize);
ipc::IPCResult RecvCommandEncoderCopyTextureToBuffer(
RawId aSelfId, WGPUTextureCopyView aSource,
WGPUBufferCopyView aDestination, WGPUExtent3d aCopySize);
ipc::IPCResult RecvCommandEncoderCopyTextureToTexture(
RawId aSelfId, WGPUTextureCopyView aSource,
WGPUTextureCopyView aDestination, WGPUExtent3d aCopySize);
ipc::IPCResult RecvCommandEncoderRunComputePass(RawId aSelfId,
Shmem&& aShmem);
ipc::IPCResult RecvCommandEncoderRunRenderPass(RawId aSelfId, Shmem&& aShmem);
ipc::IPCResult RecvCommandEncoderFinish(
RawId aSelfId, const dom::GPUCommandBufferDescriptor& aDesc);
ipc::IPCResult RecvCommandEncoderDestroy(RawId aSelfId);
@ -82,27 +51,11 @@ class WebGPUParent final : public PWebGPUParent {
RawId aSelfId, const ffi::WGPUTextureCopyView& aDestination,
Shmem&& aShmem, const ffi::WGPUTextureDataLayout& aDataLayout,
const ffi::WGPUExtent3d& aExtent);
ipc::IPCResult RecvDeviceCreateBindGroupLayout(
RawId aSelfId, const SerialBindGroupLayoutDescriptor& aDesc,
RawId aNewId);
ipc::IPCResult RecvBindGroupLayoutDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreatePipelineLayout(
RawId aSelfId, const SerialPipelineLayoutDescriptor& aDesc, RawId aNewId);
ipc::IPCResult RecvPipelineLayoutDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateBindGroup(
RawId aSelfId, const SerialBindGroupDescriptor& aDesc, RawId aNewId);
ipc::IPCResult RecvBindGroupDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateShaderModule(RawId aSelfId,
const nsTArray<uint32_t>& aSpirv,
const nsCString& aWgsl,
RawId aNewId);
ipc::IPCResult RecvShaderModuleDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateComputePipeline(
RawId aSelfId, const SerialComputePipelineDescriptor& aDesc,
RawId aNewId);
ipc::IPCResult RecvComputePipelineDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateRenderPipeline(
RawId aSelfId, const SerialRenderPipelineDescriptor& aDesc, RawId aNewId);
ipc::IPCResult RecvRenderPipelineDestroy(RawId aSelfId);
ipc::IPCResult RecvDeviceCreateSwapChain(RawId aSelfId, RawId aQueueId,
const layers::RGBDescriptor& aDesc,
@ -112,6 +65,12 @@ class WebGPUParent final : public PWebGPUParent {
RawId aTextureId,
RawId aCommandEncoderId);
ipc::IPCResult RecvSwapChainDestroy(wr::ExternalImageId aExternalId);
ipc::IPCResult RecvDeviceAction(RawId aSelf, const ipc::ByteBuf& aByteBuf);
ipc::IPCResult RecvTextureAction(RawId aSelf, const ipc::ByteBuf& aByteBuf);
ipc::IPCResult RecvCommandEncoderAction(RawId aSelf,
const ipc::ByteBuf& aByteBuf);
ipc::IPCResult RecvShutdown();
private:

Просмотреть файл

@ -13,27 +13,6 @@
namespace IPC {
// Special handling of the raw strings serialization.
// We are carrying the strings through IPC separately from the containing
// structs. So this implementation always reads nullptr for the char pointer.
template <>
struct ParamTraits<mozilla::webgpu::ffi::WGPURawString> {
typedef mozilla::webgpu::ffi::WGPURawString paramType;
static void Write(Message* aMsg, const paramType& aParam) {
mozilla::Unused << aMsg;
mozilla::Unused << aParam;
}
static bool Read(const Message* aMsg, PickleIterator* aIter,
paramType* aResult) {
mozilla::Unused << aMsg;
mozilla::Unused << aIter;
*aResult = nullptr;
return true;
}
};
#define DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, guard) \
template <> \
struct ParamTraits<something> \
@ -45,28 +24,9 @@ struct ParamTraits<mozilla::webgpu::ffi::WGPURawString> {
DEFINE_IPC_SERIALIZER_ENUM_GUARD(something, something##_Sentinel)
DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::dom::GPUPowerPreference);
DEFINE_IPC_SERIALIZER_DOM_ENUM(mozilla::webgpu::SerialBindGroupEntryType);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUAddressMode);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPURawBindingType);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUBlendFactor);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUBlendOperation);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUCompareFunction);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUCullMode);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUFilterMode);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUFrontFace);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUHostMap);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUIndexFormat);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUInputStepMode);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUPrimitiveTopology);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUStencilOperation);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureAspect);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureDimension);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureFormat);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUTextureViewDimension);
DEFINE_IPC_SERIALIZER_FFI_ENUM(mozilla::webgpu::ffi::WGPUVertexFormat);
DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandEncoderDescriptor);
DEFINE_IPC_SERIALIZER_WITHOUT_FIELDS(mozilla::dom::GPUCommandBufferDescriptor);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPURequestAdapterOptions,
@ -76,77 +36,15 @@ DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUExtensions,
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPULimits, mMaxBindGroups);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::dom::GPUDeviceDescriptor,
mExtensions, mLimits);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUBufferDescriptor,
label, size, usage, mapped_at_creation);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDescriptor,
label, size, mip_level_count, sample_count,
dimension, format, usage);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUExtent3d, width,
height, depth);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUOrigin3d, x, y, z);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::ffi::WGPUTextureViewDescriptor, label, format, dimension,
aspect, base_mip_level, level_count, base_array_layer, array_layer_count);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUBlendDescriptor,
src_factor, dst_factor, operation);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::ffi::WGPURasterizationStateDescriptor, front_face,
cull_mode, depth_bias, depth_bias_slope_scale, depth_bias_clamp);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::ffi::WGPUColorStateDescriptor, format, alpha_blend,
color_blend, write_mask);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::ffi::WGPUStencilStateFaceDescriptor, compare, fail_op,
depth_fail_op, pass_op);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::ffi::WGPUDepthStencilStateDescriptor, format,
depth_write_enabled, depth_compare, stencil_front, stencil_back,
stencil_read_mask, stencil_write_mask);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::ffi::WGPUVertexAttributeDescriptor, offset, format,
shader_location);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureDataLayout,
offset, bytes_per_row, rows_per_image);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUBufferCopyView,
buffer, layout);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::ffi::WGPUTextureCopyView,
texture, mip_level, origin);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::ffi::WGPUBindGroupLayoutEntry, binding, visibility, ty,
multisampled, has_dynamic_offset, view_dimension, texture_component_type,
storage_texture_format);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::SerialBindGroupLayoutDescriptor, mLabel, mEntries);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::SerialPipelineLayoutDescriptor, mBindGroupLayouts);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::SerialBindGroupEntry,
mBinding, mType, mValue, mBufferOffset,
mBufferSize);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::SerialBindGroupDescriptor,
mLabel, mLayout, mEntries);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::SerialProgrammableStageDescriptor, mModule, mEntryPoint);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::SerialVertexBufferLayoutDescriptor, mArrayStride,
mStepMode, mAttributes);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::SerialVertexStateDescriptor,
mIndexFormat, mVertexBuffers);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::SerialComputePipelineDescriptor, mLayout, mComputeStage);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(
mozilla::webgpu::SerialRenderPipelineDescriptor, mLayout, mVertexStage,
mFragmentStage, mPrimitiveTopology, mRasterizationState, mColorStates,
mDepthStencilState, mVertexState, mSampleCount, mSampleMask,
mAlphaToCoverageEnabled);
DEFINE_IPC_SERIALIZER_WITH_FIELDS(mozilla::webgpu::SerialSamplerDescriptor,
mLabel, mAddressU, mAddressV, mAddressW,
mMagFilter, mMinFilter, mMipmapFilter,
mLodMinClamp, mLodMaxClamp, mCompare,
mAnisotropyClamp);
#undef DEFINE_IPC_SERIALIZER_FFI_ENUM
#undef DEFINE_IPC_SERIALIZER_DOM_ENUM
#undef DEFINE_IPC_SERIALIZER_ENUM_GUARD

Просмотреть файл

@ -7,9 +7,6 @@
#define WEBGPU_TYPES_H_
#include <cstdint>
#include "nsTArray.h"
#include "mozilla/Maybe.h"
#include "mozilla/webgpu/ffi/wgpu.h"
namespace mozilla {
namespace webgpu {
@ -17,85 +14,6 @@ namespace webgpu {
typedef uint64_t RawId;
typedef uint64_t BufferAddress;
struct SerialBindGroupLayoutDescriptor {
nsCString mLabel;
nsTArray<ffi::WGPUBindGroupLayoutEntry> mEntries;
};
struct SerialPipelineLayoutDescriptor {
nsTArray<RawId> mBindGroupLayouts;
};
enum class SerialBindGroupEntryType : uint8_t {
Buffer,
Texture,
Sampler,
EndGuard_
};
struct SerialBindGroupEntry {
uint32_t mBinding = 0;
SerialBindGroupEntryType mType = SerialBindGroupEntryType::EndGuard_;
RawId mValue = 0;
BufferAddress mBufferOffset = 0;
BufferAddress mBufferSize = 0;
};
struct SerialBindGroupDescriptor {
nsCString mLabel;
RawId mLayout = 0;
nsTArray<SerialBindGroupEntry> mEntries;
};
struct SerialProgrammableStageDescriptor {
RawId mModule = 0;
nsString mEntryPoint;
};
struct SerialComputePipelineDescriptor {
RawId mLayout = 0;
SerialProgrammableStageDescriptor mComputeStage;
};
struct SerialVertexBufferLayoutDescriptor {
ffi::WGPUBufferAddress mArrayStride = 0;
ffi::WGPUInputStepMode mStepMode = ffi::WGPUInputStepMode_Sentinel;
nsTArray<ffi::WGPUVertexAttributeDescriptor> mAttributes;
};
struct SerialVertexStateDescriptor {
ffi::WGPUIndexFormat mIndexFormat = ffi::WGPUIndexFormat_Sentinel;
nsTArray<SerialVertexBufferLayoutDescriptor> mVertexBuffers;
};
struct SerialRenderPipelineDescriptor {
RawId mLayout = 0;
SerialProgrammableStageDescriptor mVertexStage;
SerialProgrammableStageDescriptor mFragmentStage;
ffi::WGPUPrimitiveTopology mPrimitiveTopology =
ffi::WGPUPrimitiveTopology_Sentinel;
Maybe<ffi::WGPURasterizationStateDescriptor> mRasterizationState;
nsTArray<ffi::WGPUColorStateDescriptor> mColorStates;
Maybe<ffi::WGPUDepthStencilStateDescriptor> mDepthStencilState;
SerialVertexStateDescriptor mVertexState;
uint32_t mSampleCount = 0;
uint32_t mSampleMask = 0;
bool mAlphaToCoverageEnabled = false;
};
struct SerialSamplerDescriptor {
nsCString mLabel;
ffi::WGPUAddressMode mAddressU = ffi::WGPUAddressMode_Sentinel,
mAddressV = ffi::WGPUAddressMode_Sentinel,
mAddressW = ffi::WGPUAddressMode_Sentinel;
ffi::WGPUFilterMode mMagFilter = ffi::WGPUFilterMode_Sentinel,
mMinFilter = ffi::WGPUFilterMode_Sentinel,
mMipmapFilter = ffi::WGPUFilterMode_Sentinel;
float mLodMinClamp = 0.0, mLodMaxClamp = 0.0;
Maybe<ffi::WGPUCompareFunction> mCompare;
uint8_t mAnisotropyClamp = 0;
};
} // namespace webgpu
} // namespace mozilla

Просмотреть файл

@ -25,7 +25,7 @@ const func = async function() {
await bufferRead.mapAsync(GPUMapMode.READ);
const data = bufferRead.getMappedRange();
const value = new Float32Array(data)[0];
const value = (new Float32Array(data))[0];
bufferRead.unmap();
ok(value == 1.0, 'value == 1.0');

Просмотреть файл

@ -214,7 +214,7 @@ interface GPUBufferUsage {
const GPUBufferUsageFlags QUERY_RESOLVE = 0x0200;
};
dictionary GPUBufferDescriptor {
dictionary GPUBufferDescriptor : GPUObjectDescriptorBase {
required GPUSize64 size;
required GPUBufferUsageFlags usage;
boolean mappedAtCreation = false;
@ -317,7 +317,7 @@ interface GPUTextureUsage {
const GPUTextureUsageFlags OUTPUT_ATTACHMENT = 0x10;
};
dictionary GPUTextureDescriptor {
dictionary GPUTextureDescriptor : GPUObjectDescriptorBase {
required GPUExtent3D size;
GPUIntegerCoordinate mipLevelCount = 1;
GPUSize32 sampleCount = 1;
@ -455,8 +455,8 @@ dictionary GPUBindGroupLayoutEntry {
required GPUIndex32 binding;
required GPUShaderStageFlags visibility;
required GPUBindingType type;
GPUTextureViewDimension viewDimension = "2d";
GPUTextureComponentType textureComponentType = "float";
GPUTextureViewDimension viewDimension;
GPUTextureComponentType textureComponentType;
boolean multisampled = false;
boolean hasDynamicOffset = false;
GPUTextureFormat storageTextureFormat;

57
gfx/wgpu/.github/workflows/ci.yml поставляемый
Просмотреть файл

@ -44,50 +44,46 @@ jobs:
strategy:
fail-fast: false
matrix:
name:
[
MacOS Stable,
MacOS Nightly,
Ubuntu Stable,
Ubuntu Nightly,
Windows Stable,
Windows Nightly,
]
os: [macos-10.15, ubuntu-18.04, windows-2019]
channel: [stable, nightly]
include:
- os: macos-10.15
name: MacOS Stable
- name: MacOS Stable
channel: stable
os: macos-10.15
build_command: cargo clippy
additional_core_features: trace
additional_player_features: winit
- os: macos-10.15
name: MacOS Nightly
- name: MacOS Nightly
os: macos-10.15
channel: nightly
build_command: cargo test
build_command: cargo test -- --nocapture
additional_core_features:
additional_player_features:
- os: ubuntu-18.04
name: Ubuntu Stable
- name: Ubuntu Stable
os: ubuntu-18.04
channel: stable
build_command: cargo clippy
additional_core_features: trace,replay
additional_player_features:
- os: ubuntu-18.04
name: Ubuntu Nightly
- name: Ubuntu Nightly
os: ubuntu-18.04
channel: nightly
build_command: cargo test
additional_core_features:
build_command: |
sudo add-apt-repository ppa:oibaf/graphics-drivers;
sudo apt-get install mesa-vulkan-drivers;
cargo test -- --nocapture;
additional_core_features: serial-pass
additional_player_features: winit
- os: windows-2019
name: Windows Stable
- name: Windows Stable
os: windows-2019
channel: stable
build_command: rustup default stable-msvc; cargo clippy
additional_core_features: trace
additional_core_features: trace,serial-pass
additional_player_features: renderdoc
- os: windows-2019
name: Windows Nightly
- name: Windows Nightly
os: windows-2019
channel: nightly
build_command: rustup default nightly-msvc; cargo test
build_command: rustup default nightly-msvc; cargo test -- --nocapture
additional_core_features:
additional_player_features:
steps:
@ -96,14 +92,15 @@ jobs:
name: Install latest nightly
uses: actions-rs/toolchain@v1
with:
# temporary due to https://github.com/rust-lang/rust/issues/72467
toolchain: nightly-2020-05-01
toolchain: nightly
override: true
- if: matrix.channel == 'stable'
run: rustup component add clippy
- name: cargo clippy/test
run: ${{ matrix.build_command }}
# build with no features first
- if: matrix.additional_core_features == ''
run: cargo check --manifest-path wgpu-core/Cargo.toml --no-default-features
- if: matrix.additional_core_features != ''
run: cargo check --manifest-path wgpu-core/Cargo.toml --features ${{ matrix.additional_core_features }}
- if: matrix.additional_player_features != ''
run: cargo check --manifest-path player/Cargo.toml --features ${{ matrix.additional_player_features }}
- run: ${{ matrix.build_command }}

7
gfx/wgpu/.monocodus Normal file
Просмотреть файл

@ -0,0 +1,7 @@
version: 1.1.0
rust:
formatter:
name: rustfmt
repo_checkers:
- name: rust-clippy

Просмотреть файл

@ -1,5 +1,57 @@
# Change Log
## v0.6 (2020-08-17)
- Crates:
- C API is moved to [another repository](https://github.com/gfx-rs/wgpu-native)
- `player`: standalone API replayer and tester
- Features:
- Proper error handling with all functions returning `Result`
- Graceful handling of "error" objects
- API tracing [infrastructure](http://kvark.github.io/wgpu/debug/test/ron/2020/07/18/wgpu-api-tracing.html)
- uploading data with `write_buffer`/`write_texture` queue operations
- reusable render bundles
- read-only depth/stencil attachments
- bind group layout deduplication
- Cows, cows everywhere
- Web+Native features:
- Depth clamping (feature)
- BC texture compression
- Native-only features:
- mappable primary buffers
- texture array bindings
- push constants
- multi-draw indirect
- Validation:
- all transfer operations
- all resource creation
- bind group matching to the layout
- experimental shader interface matching with Naga
## v0.5.6 (2020-07-09)
- add debug markers support
## v0.5.5 (2020-05-20)
- fix destruction of adapters, swap chains, and bind group layouts
- fix command pool leak with temporary threads
- improve assertion messages
- implement `From<TextureFormat>` for `TextureComponentType`
## v0.5.4 (2020-04-24)
- fix memory management of staging buffers
## v0.5.3 (2020-04-18)
- fix reading access to storage textures
- another fix to layout transitions for swapchain images
## v0.5.2 (2020-04-15)
- fix read-only storage flags
- fix pipeline layout life time
- improve various assert messages
## v0.5.1 (2020-04-10)
- fix tracking of swapchain images that are used multiple times in a command buffer
- fix tracking of initial usage of a resource across a command buffer
## v0.5 (2020-04-06)
- Crates:
- `wgpu-types`: common types between native and web targets
@ -8,6 +60,7 @@
- based on gfx-hal-0.5
- moved from Rendy to the new `gfx-memory` and `gfx-descriptor` crates
- passes are now recorded on the client side. The user is also responsible to keep all resources referenced in the pass up until it ends recording.
- coordinate system is changed to have Y up in the rendering space
- revised GPU lifetime tracking of all resources
- revised usage tracking logic
- all IDs are now non-zero

487
gfx/wgpu/Cargo.lock сгенерированный
Просмотреть файл

@ -29,6 +29,15 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8052e2d8aabbb8d556d6abbcce2a22b9590996c5f849b9c7ce4544a2e3b984e"
[[package]]
name = "ansi_term"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
dependencies = [
"winapi 0.3.8",
]
[[package]]
name = "approx"
version = "0.3.2"
@ -43,14 +52,17 @@ name = "arrayvec"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8"
dependencies = [
"serde",
]
[[package]]
name = "ash"
version = "0.30.0"
version = "0.31.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69daec0742947f33a85931fa3cb0ce5f07929159dcbd1f0cbb5b2912e2978509"
checksum = "c69a8137596e84c22d57f3da1b5de1d4230b1742a710091c85f4d7ce50f00f38"
dependencies = [
"libloading 0.5.2",
"libloading 0.6.2",
]
[[package]]
@ -83,22 +95,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff"
[[package]]
name = "battery"
version = "0.7.5"
name = "bit-set"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36a698e449024a5d18994a815998bf5e2e4bc1883e35a7d7ba95b6b69ee45907"
checksum = "6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de"
dependencies = [
"cfg-if",
"core-foundation 0.6.4",
"lazycell",
"libc",
"mach",
"nix 0.15.0",
"num-traits",
"uom",
"winapi 0.3.8",
"bit-vec",
]
[[package]]
name = "bit-vec"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0dc55f2d8a1a85650ac47858bb001b4c0dd73d79e3c455a842925e68d29cd3"
[[package]]
name = "bitflags"
version = "1.2.1"
@ -131,7 +141,7 @@ checksum = "7aa2097be53a00de9e8fc349fea6d76221f398f5c4fa550d420669906962d160"
dependencies = [
"mio",
"mio-extras",
"nix 0.14.1",
"nix",
]
[[package]]
@ -139,6 +149,9 @@ name = "cc"
version = "1.0.54"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bbb73db36c1246e9034e307d0fba23f9a2e251faa47ade70c1bd252220c8311"
dependencies = [
"jobserver",
]
[[package]]
name = "cfg-if"
@ -146,6 +159,23 @@ version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "cfg_aliases"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6da2b592f5a2e590c3d94c44313bab369f2286cfe1e4134c830bf3317814866"
[[package]]
name = "chrono"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2"
dependencies = [
"num-integer",
"num-traits",
"time",
]
[[package]]
name = "cloudabi"
version = "0.0.3"
@ -187,7 +217,7 @@ checksum = "7ade49b65d560ca58c403a479bb396592b155c0185eada742ee323d1d68d6318"
dependencies = [
"bitflags",
"block",
"core-foundation 0.9.1",
"core-foundation 0.9.0",
"core-graphics-types",
"foreign-types",
"libc",
@ -200,16 +230,6 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536"
[[package]]
name = "core-foundation"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d"
dependencies = [
"core-foundation-sys 0.6.2",
"libc",
]
[[package]]
name = "core-foundation"
version = "0.7.0"
@ -222,20 +242,14 @@ dependencies = [
[[package]]
name = "core-foundation"
version = "0.9.1"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62"
checksum = "3b5ed8e7e76c45974e15e41bfa8d5b0483cd90191639e01d8f5f1e606299d3fb"
dependencies = [
"core-foundation-sys 0.8.2",
"core-foundation-sys 0.8.0",
"libc",
]
[[package]]
name = "core-foundation-sys"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b"
[[package]]
name = "core-foundation-sys"
version = "0.7.0"
@ -244,9 +258,9 @@ checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac"
[[package]]
name = "core-foundation-sys"
version = "0.8.2"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b"
checksum = "9a21fa21941700a3cd8fcb4091f361a6a712fac632f85d9f487cc892045d55c6"
[[package]]
name = "core-graphics"
@ -262,12 +276,12 @@ dependencies = [
[[package]]
name = "core-graphics-types"
version = "0.1.1"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3a68b68b3446082644c91ac778bf50cd4104bfb002b5a6a7c44cca5a2c70788b"
checksum = "e92f5d519093a4178296707dbaa3880eae85a5ef5386675f361a1cf25376e93c"
dependencies = [
"bitflags",
"core-foundation 0.9.1",
"core-foundation 0.9.0",
"foreign-types",
"libc",
]
@ -328,6 +342,13 @@ version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "52ba6eb47c2131e784a38b726eb54c1e1484904f013e576a25354d0124161af6"
[[package]]
name = "dummy"
version = "0.1.0"
dependencies = [
"wgpu-core",
]
[[package]]
name = "env_logger"
version = "0.7.1"
@ -341,6 +362,12 @@ dependencies = [
"termcolor",
]
[[package]]
name = "fixedbitset"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d"
[[package]]
name = "float-cmp"
version = "0.7.0"
@ -392,9 +419,9 @@ dependencies = [
[[package]]
name = "generator"
version = "0.6.20"
version = "0.6.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "caaa160efb38ce00acbe4450d41a103fb3d2acdb17ff09a7cf38f3ac26af0738"
checksum = "f5e13c8f4607ff74f6d0fa37007cb95492531333f46bb9744f772d9e7830855c"
dependencies = [
"cc",
"libc",
@ -405,9 +432,9 @@ dependencies = [
[[package]]
name = "gfx-auxil"
version = "0.4.0"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67bdbf8e8d6883c70e5a0d7379ad8ab3ac95127a3761306b36122d8f1c177a8e"
checksum = "07cd956b592970f08545b9325b87580eb95a51843b6f39da27b8667fec1a1216"
dependencies = [
"fxhash",
"gfx-hal",
@ -416,10 +443,11 @@ dependencies = [
[[package]]
name = "gfx-backend-dx11"
version = "0.5.3"
version = "0.6.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc32a386fdb3240dea3df141dfc2343bf95e5719da36c3c5b0ecb6940a184f2b"
checksum = "52b0c3b8b2e0a60c1380a7c27652cd86b791e5d8312fb9592a7a59bd437e9532"
dependencies = [
"arrayvec",
"bitflags",
"gfx-auxil",
"gfx-hal",
@ -430,16 +458,19 @@ dependencies = [
"raw-window-handle",
"smallvec",
"spirv_cross",
"thunderdome",
"winapi 0.3.8",
"wio",
]
[[package]]
name = "gfx-backend-dx12"
version = "0.5.6"
version = "0.6.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "98c598fb38d6f51db0219ac26d16ff8b78bc134987acd1940438a5adc46b694f"
checksum = "bf8bc6329ebac49722b66a2b87d5d769bba1de584f51ffbf0cd31701d01050b0"
dependencies = [
"arrayvec",
"bit-set",
"bitflags",
"d3d12",
"gfx-auxil",
@ -454,19 +485,20 @@ dependencies = [
[[package]]
name = "gfx-backend-empty"
version = "0.5.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b67bd2d7bc022b257ddbdabc5fa3b10c29c292372c3409f2b6a6e3f4e11cdb85"
checksum = "2085227c12b78f6657a900c829f2d0deb46a9be3eaf86844fde263cdc218f77c"
dependencies = [
"gfx-hal",
"log",
"raw-window-handle",
]
[[package]]
name = "gfx-backend-metal"
version = "0.5.7"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e0882c9bf80d6b57275acb6209ca60657b63ef71bb9e437f28a2417389c0b10"
checksum = "60ba1c77c112e7d35786dbd49ed26f2a76ce53a44bc09fe964935e4e35ed7f2b"
dependencies = [
"arrayvec",
"bitflags",
@ -490,15 +522,16 @@ dependencies = [
[[package]]
name = "gfx-backend-vulkan"
version = "0.5.7"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a4614727b750d62766db20d94032833f7293f9307f1b2103d5f8833889f863f"
checksum = "3a3a63cf61067a09b7d1ac480af3cb2ae0c5ede5bed294607bbd814cb1666c45"
dependencies = [
"arrayvec",
"ash",
"byteorder",
"core-graphics",
"core-graphics-types",
"gfx-hal",
"inplace_it",
"lazy_static",
"log",
"objc",
@ -510,10 +543,11 @@ dependencies = [
[[package]]
name = "gfx-descriptor"
version = "0.1.0"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bf35f5d66d1bc56e63e68d7528441453f25992bd954b84309d23c659df2c5da"
checksum = "cd8c7afcd000f279d541a490e27117e61037537279b9342279abf4938fe60c6b"
dependencies = [
"arrayvec",
"fxhash",
"gfx-hal",
"log",
@ -521,9 +555,9 @@ dependencies = [
[[package]]
name = "gfx-hal"
version = "0.5.3"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a18534b23d4c262916231511309bc1f307c74cda8dcb68b93a10ca213a22814b"
checksum = "18d0754f5b7a43915fd7466883b2d1bb0800d7cc4609178d0b27bf143b9e5123"
dependencies = [
"bitflags",
"raw-window-handle",
@ -531,9 +565,9 @@ dependencies = [
[[package]]
name = "gfx-memory"
version = "0.1.3"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2eed6cda674d9cd4d92229102dbd544292124533d236904f987e9afab456137"
checksum = "fe8d8855df07f438eb8a765e90356d5b821d644ea3b59b870091450b89576a9f"
dependencies = [
"fxhash",
"gfx-hal",
@ -542,6 +576,15 @@ dependencies = [
"slab",
]
[[package]]
name = "hashbrown"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e91b62f79061a0bc2e046024cb7ba44b08419ed238ecbd9adbd787434b9e8c25"
dependencies = [
"autocfg",
]
[[package]]
name = "hermit-abi"
version = "0.1.13"
@ -569,6 +612,22 @@ dependencies = [
"quick-error",
]
[[package]]
name = "indexmap"
version = "1.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86b45e59b16c76b11bf9738fd5d38879d3bd28ad292d7b313608becb17ae2df9"
dependencies = [
"autocfg",
"hashbrown",
]
[[package]]
name = "inplace_it"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd01a2a73f2f399df96b22dc88ea687ef4d76226284e7531ae3c7ee1dc5cb534"
[[package]]
name = "instant"
version = "0.1.4"
@ -584,12 +643,27 @@ dependencies = [
"libc",
]
[[package]]
name = "itoa"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
[[package]]
name = "jni-sys"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]]
name = "jobserver"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c71313ebb9439f74b00d9d2dcec36440beaf57a6aa0623068441dd7cd81a7f2"
dependencies = [
"libc",
]
[[package]]
name = "js-sys"
version = "0.3.40"
@ -693,15 +767,6 @@ dependencies = [
"scoped-tls",
]
[[package]]
name = "mach"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "86dd2487cdfea56def77b88438a2c915fb45113c5319bfe7e14306ca4cd0b0e1"
dependencies = [
"libc",
]
[[package]]
name = "malloc_buf"
version = "0.0.6"
@ -711,6 +776,15 @@ dependencies = [
"libc",
]
[[package]]
name = "matchers"
version = "0.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1"
dependencies = [
"regex-automata",
]
[[package]]
name = "maybe-uninit"
version = "2.0.0"
@ -792,14 +866,16 @@ dependencies = [
[[package]]
name = "naga"
version = "0.1.0"
source = "git+https://github.com/gfx-rs/naga?rev=bce6358eb1026c13d2f1c6d365af37afe8869a86#bce6358eb1026c13d2f1c6d365af37afe8869a86"
version = "0.2.0"
source = "git+https://github.com/gfx-rs/naga?rev=aa35110471ee7915e1f4e1de61ea41f2f32f92c4#aa35110471ee7915e1f4e1de61ea41f2f32f92c4"
dependencies = [
"bitflags",
"fxhash",
"log",
"num-traits",
"petgraph",
"spirv_headers",
"thiserror",
]
[[package]]
@ -858,23 +934,20 @@ dependencies = [
]
[[package]]
name = "nix"
version = "0.15.0"
name = "num-integer"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b2e0b4f3320ed72aaedb9a5ac838690a8047c7b275da22711fddff4f8a14229"
checksum = "8d59457e662d541ba17869cf51cf177c0b5f0cbf476c66bdc90bf1edac4f875b"
dependencies = [
"bitflags",
"cc",
"cfg-if",
"libc",
"void",
"autocfg",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.11"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096"
checksum = "ac267bcc07f48ee5f8935ab0d24f316fb722d7a1292e2913f0cc196b29ffd611"
dependencies = [
"autocfg",
]
@ -985,34 +1058,22 @@ dependencies = [
"winapi 0.3.8",
]
[[package]]
name = "peek-poke"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d93fd6a575ebf1ac2668d08443c97a22872cfb463fd8b7ddd141e9f6be59af2f"
dependencies = [
"peek-poke-derive",
]
[[package]]
name = "peek-poke-derive"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb44a25c5bba983be0fc8592dfaf3e6d0935ce8be0c6b15b2a39507af34a926"
dependencies = [
"proc-macro2 1.0.18",
"quote 1.0.7",
"syn",
"synstructure",
"unicode-xid 0.2.0",
]
[[package]]
name = "percent-encoding"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
[[package]]
name = "petgraph"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "467d164a6de56270bd7c4d070df81d07beace25012d5103ced4e9ff08d6afdb7"
dependencies = [
"fixedbitset",
"indexmap",
]
[[package]]
name = "pkg-config"
version = "0.3.17"
@ -1029,7 +1090,9 @@ dependencies = [
"raw-window-handle",
"renderdoc",
"ron",
"serde",
"wgpu-core",
"wgpu-subscriber",
"wgpu-types",
"winit",
]
@ -1118,6 +1181,16 @@ dependencies = [
"thread_local",
]
[[package]]
name = "regex-automata"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4"
dependencies = [
"byteorder",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.18"
@ -1147,9 +1220,9 @@ checksum = "60d4a9058849c3e765fe2fa68b72c1416b1766f27eac3c52d7bac8712ea0d390"
[[package]]
name = "ron"
version = "0.6.2"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8a58080b7bb83b2ea28c3b7a9a994fd5e310330b7c8ca5258d99b98128ecfe4"
checksum = "a91260f321dbf3b5a16ff91c451dc9eb644ce72775a6812f9c3dfffe63818f8f"
dependencies = [
"base64",
"bitflags",
@ -1185,6 +1258,12 @@ dependencies = [
"stb_truetype",
]
[[package]]
name = "ryu"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
[[package]]
name = "same-file"
version = "1.0.6"
@ -1241,6 +1320,26 @@ dependencies = [
"syn",
]
[[package]]
name = "serde_json"
version = "1.0.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec2c5d7e739bc07a3e73381a39d61fdb5f671c60c1df26a130690665803d8226"
dependencies = [
"itoa",
"ryu",
"serde",
]
[[package]]
name = "sharded-slab"
version = "0.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06d5a3f5166fb5b42a5439f2eee8b9de149e235961e3eb21c5808fc3ea17ff3e"
dependencies = [
"lazy_static",
]
[[package]]
name = "slab"
version = "0.4.2"
@ -1249,9 +1348,9 @@ checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8"
[[package]]
name = "smallvec"
version = "1.4.0"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7cb5678e1615754284ec264d9bb5b4c27d2018577fd90ac0ceb578591ed5ee4"
checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
[[package]]
name = "smithay-client-toolkit"
@ -1264,16 +1363,16 @@ dependencies = [
"dlib",
"lazy_static",
"memmap",
"nix 0.14.1",
"nix",
"wayland-client",
"wayland-protocols",
]
[[package]]
name = "spirv_cross"
version = "0.20.0"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a33a9478e9c78782dd694d05dee074703a9c4c74b511de742b88a7e8149f1b37"
checksum = "d8221f4aebf53a4447aebd4fe29ebff2c66dd2c2821e63675e09e85bd21c8633"
dependencies = [
"cc",
"js-sys",
@ -1319,18 +1418,6 @@ dependencies = [
"unicode-xid 0.2.0",
]
[[package]]
name = "synstructure"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701"
dependencies = [
"proc-macro2 1.0.18",
"quote 1.0.7",
"syn",
"unicode-xid 0.2.0",
]
[[package]]
name = "termcolor"
version = "1.1.0"
@ -1340,6 +1427,37 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "thiserror"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793"
dependencies = [
"proc-macro2 1.0.18",
"quote 1.0.7",
"syn",
]
[[package]]
name = "thread-id"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1"
dependencies = [
"libc",
"redox_syscall",
"winapi 0.3.8",
]
[[package]]
name = "thread_local"
version = "1.0.1"
@ -1349,6 +1467,22 @@ dependencies = [
"lazy_static",
]
[[package]]
name = "thunderdome"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7572415bd688d401c52f6e36f4c8e805b9ae1622619303b9fa835d531db0acae"
[[package]]
name = "time"
version = "0.1.43"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438"
dependencies = [
"libc",
"winapi 0.3.8",
]
[[package]]
name = "toml"
version = "0.5.6"
@ -1359,10 +1493,64 @@ dependencies = [
]
[[package]]
name = "typenum"
version = "1.12.0"
name = "tracing"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "373c8a200f9e67a0c95e62a4f52fbf80c23b4381c05a17845531982fa99e6b33"
checksum = "a41f40ed0e162c911ac6fcb53ecdc8134c46905fdbbae8c50add462a538b495f"
dependencies = [
"cfg-if",
"tracing-core",
]
[[package]]
name = "tracing-core"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715"
dependencies = [
"lazy_static",
]
[[package]]
name = "tracing-log"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e0f8c7178e13481ff6765bd169b33e8d554c5d2bbede5e32c356194be02b9b9"
dependencies = [
"lazy_static",
"log",
"tracing-core",
]
[[package]]
name = "tracing-serde"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6ccba2f8f16e0ed268fc765d9b7ff22e965e7185d32f8f1ec8294fe17d86e79"
dependencies = [
"serde",
"tracing-core",
]
[[package]]
name = "tracing-subscriber"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04a11b459109e38ff6e1b580bafef4142a11d44889f5d07424cbce2fd2a2a119"
dependencies = [
"ansi_term",
"chrono",
"lazy_static",
"matchers",
"regex",
"serde",
"serde_json",
"sharded-slab",
"smallvec",
"tracing-core",
"tracing-log",
"tracing-serde",
]
[[package]]
name = "unicode-xid"
@ -1376,22 +1564,6 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
[[package]]
name = "uom"
version = "0.26.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cec796ec5f7ac557631709079168286056205c51c60aac33f51764bdc7b8dc4"
dependencies = [
"num-traits",
"typenum",
]
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "void"
version = "1.0.2"
@ -1474,7 +1646,7 @@ dependencies = [
"downcast-rs",
"libc",
"mio",
"nix 0.14.1",
"nix",
"wayland-commons",
"wayland-scanner",
"wayland-sys",
@ -1486,7 +1658,7 @@ version = "0.23.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb66b0d1a27c39bbce712b6372131c6e25149f03ffb0cd017cf8f7de8d66dbdb"
dependencies = [
"nix 0.14.1",
"nix",
"wayland-sys",
]
@ -1525,11 +1697,11 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.5.0"
version = "0.6.0"
dependencies = [
"arrayvec",
"battery",
"bitflags",
"cfg_aliases",
"copyless",
"fxhash",
"gfx-backend-dx11",
@ -1540,26 +1712,35 @@ dependencies = [
"gfx-descriptor",
"gfx-hal",
"gfx-memory",
"log",
"loom",
"naga",
"parking_lot 0.11.0",
"peek-poke",
"raw-window-handle",
"ron",
"serde",
"smallvec",
"spirv_headers",
"vec_map",
"thiserror",
"tracing",
"wgpu-types",
]
[[package]]
name = "wgpu-subscriber"
version = "0.1.0"
source = "git+https://github.com/gfx-rs/subscriber.git?rev=cdc9feb53f152f9c41905ed9efeff2c1ed214361#cdc9feb53f152f9c41905ed9efeff2c1ed214361"
dependencies = [
"parking_lot 0.11.0",
"thread-id",
"tracing",
"tracing-log",
"tracing-subscriber",
]
[[package]]
name = "wgpu-types"
version = "0.5.0"
version = "0.6.0"
dependencies = [
"bitflags",
"peek-poke",
"serde",
]

Просмотреть файл

@ -1,6 +1,7 @@
[workspace]
members = [
"player",
"wgpu-core",
"dummy",
"player",
"wgpu-core",
"wgpu-types",
]

Просмотреть файл

@ -27,6 +27,6 @@ If you are looking for the native implementation or bindings to the API in other
DX12 | :heavy_check_mark: | | |
Vulkan | :heavy_check_mark: | :heavy_check_mark: | |
Metal | | | :heavy_check_mark: |
OpenGL | :construction: | :construction: | :construction: |
OpenGL | | :construction: | :construction: |
:heavy_check_mark: = Primary support — :white_check_mark: = Secondary support — :construction: = Unsupported, but support in progress

Просмотреть файл

@ -8,5 +8,3 @@ status = [
"Windows Stable",
"Windows Nightly",
]
timeout_sec = 18000 # 5 hours

16
gfx/wgpu/dummy/Cargo.toml Normal file
Просмотреть файл

@ -0,0 +1,16 @@
[package]
name = "dummy"
version = "0.1.0"
authors = [
"Dzmitry Malyshau <kvark@mozilla.com>",
]
edition = "2018"
publish = false
[features]
[dependencies.wgc]
path = "../wgpu-core"
package = "wgpu-core"
version = "0.6"
features = ["serial-pass", "trace"]

3
gfx/wgpu/dummy/README.md Normal file
Просмотреть файл

@ -0,0 +1,3 @@
# wgpu dummy
This is a dummy build target that makes `cargo check` and `cargo test` in the workspace to cover all the API.

Просмотреть файл

@ -0,0 +1,3 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */

Просмотреть файл

@ -19,20 +19,28 @@ env_logger = "0.7"
log = "0.4"
raw-window-handle = "0.3"
renderdoc = { version = "0.8", optional = true, default_features = false }
ron = "0.6.2"
ron = "0.6"
winit = { version = "0.22", optional = true }
[dependencies.wgt]
path = "../wgpu-types"
package = "wgpu-types"
version = "0.5"
version = "0.6"
features = ["replay"]
[dependencies.wgc]
path = "../wgpu-core"
package = "wgpu-core"
version = "0.5"
version = "0.6"
features = ["replay", "raw-window-handle"]
[dependencies.wgpu-subscriber]
git = "https://github.com/gfx-rs/subscriber.git"
rev = "cdc9feb53f152f9c41905ed9efeff2c1ed214361"
version = "0.1"
[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies]
gfx-backend-vulkan = { version = "0.5", features = ["x11"] }
gfx-backend-vulkan = { version = "0.6", features = ["x11"] }
[dev-dependencies]
serde = "1"

Просмотреть файл

@ -1,10 +1,11 @@
# wgpu player
This is application that allows replaying the `wgpu` workloads recorded elsewhere.
This is application that allows replaying the `wgpu` workloads recorded elsewhere. You must use the player built from
the same revision as an application was linking to, or otherwise the data may fail to load.
Launch as:
```rust
player <trace-dir>
play <trace-dir>
```
When built with "winit" feature, it's able to replay the workloads that operate on a swapchain. It renders each frame sequentially, then waits for the user to close the window. When built without "winit", it launches in console mode and can replay any trace that doesn't use swapchains.

Просмотреть файл

@ -0,0 +1,171 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! This is a player for WebGPU traces.
!*/
use player::{gfx_select, GlobalPlay as _, IdentityPassThroughFactory};
use wgc::device::trace;
use std::{
fs,
path::{Path, PathBuf},
};
fn main() {
#[cfg(feature = "winit")]
use winit::{event_loop::EventLoop, window::WindowBuilder};
wgpu_subscriber::initialize_default_subscriber(
std::env::var("WGPU_CHROME_TRACE")
.as_ref()
.map(Path::new)
.ok(),
);
#[cfg(feature = "renderdoc")]
#[cfg_attr(feature = "winit", allow(unused))]
let mut rd = renderdoc::RenderDoc::<renderdoc::V110>::new()
.expect("Failed to connect to RenderDoc: are you running without it?");
//TODO: setting for the backend bits
//TODO: setting for the target frame, or controls
let dir = match std::env::args().nth(1) {
Some(arg) if Path::new(&arg).is_dir() => PathBuf::from(arg),
_ => panic!("Provide the dir path as the parameter"),
};
log::info!("Loading trace '{:?}'", dir);
let file = fs::File::open(dir.join(trace::FILE_NAME)).unwrap();
let mut actions: Vec<trace::Action> = ron::de::from_reader(file).unwrap();
actions.reverse(); // allows us to pop from the top
log::info!("Found {} actions", actions.len());
#[cfg(feature = "winit")]
let event_loop = {
log::info!("Creating a window");
EventLoop::new()
};
#[cfg(feature = "winit")]
let window = WindowBuilder::new()
.with_title("wgpu player")
.with_resizable(false)
.build(&event_loop)
.unwrap();
let global =
wgc::hub::Global::new("player", IdentityPassThroughFactory, wgt::BackendBit::all());
let mut command_buffer_id_manager = wgc::hub::IdentityManager::default();
#[cfg(feature = "winit")]
let surface =
global.instance_create_surface(&window, wgc::id::TypedId::zip(0, 1, wgt::Backend::Empty));
let device = match actions.pop() {
Some(trace::Action::Init { desc, backend }) => {
log::info!("Initializing the device for backend: {:?}", backend);
let adapter = global
.request_adapter(
&wgc::instance::RequestAdapterOptions {
power_preference: wgt::PowerPreference::LowPower,
#[cfg(feature = "winit")]
compatible_surface: Some(surface),
#[cfg(not(feature = "winit"))]
compatible_surface: None,
},
wgc::instance::AdapterInputs::IdSet(
&[wgc::id::TypedId::zip(0, 0, backend)],
|id| id.backend(),
),
)
.expect("Unable to find an adapter for selected backend");
let info = gfx_select!(adapter => global.adapter_get_info(adapter)).unwrap();
log::info!("Picked '{}'", info.name);
gfx_select!(adapter => global.adapter_request_device(
adapter,
&desc,
None,
wgc::id::TypedId::zip(1, 0, wgt::Backend::Empty)
))
.expect("Failed to request device")
}
_ => panic!("Expected Action::Init"),
};
log::info!("Executing actions");
#[cfg(not(feature = "winit"))]
{
#[cfg(feature = "renderdoc")]
rd.start_frame_capture(std::ptr::null(), std::ptr::null());
while let Some(action) = actions.pop() {
gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
}
#[cfg(feature = "renderdoc")]
rd.end_frame_capture(std::ptr::null(), std::ptr::null());
gfx_select!(device => global.device_poll(device, true)).unwrap();
}
#[cfg(feature = "winit")]
{
use winit::{
event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
event_loop::ControlFlow,
};
let mut frame_count = 0;
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
window.request_redraw();
}
Event::RedrawRequested(_) => loop {
match actions.pop() {
Some(trace::Action::CreateSwapChain(id, desc)) => {
log::info!("Initializing the swapchain");
assert_eq!(id.to_surface_id(), surface);
window.set_inner_size(winit::dpi::PhysicalSize::new(
desc.width,
desc.height,
));
gfx_select!(device => global.device_create_swap_chain(device, surface, &desc));
}
Some(trace::Action::PresentSwapChain(id)) => {
frame_count += 1;
log::debug!("Presenting frame {}", frame_count);
gfx_select!(device => global.swap_chain_present(id));
break;
}
Some(action) => {
gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
}
None => break,
}
},
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
virtual_keycode: Some(VirtualKeyCode::Escape),
state: ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
_ => {}
},
Event::LoopDestroyed => {
log::info!("Closing");
gfx_select!(device => global.device_poll(device, true));
}
_ => {}
}
});
}
}

294
gfx/wgpu/player/src/lib.rs Normal file
Просмотреть файл

@ -0,0 +1,294 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! This is a player library for WebGPU traces.
*
* # Notes
* - we call device_maintain_ids() before creating any refcounted resource,
* which is basically everything except for BGL and shader modules,
* so that we don't accidentally try to use the same ID.
!*/
use wgc::device::trace;
use std::{borrow::Cow, fmt::Debug, fs, marker::PhantomData, path::Path};
#[macro_export]
macro_rules! gfx_select {
($id:expr => $global:ident.$method:ident( $($param:expr),+ )) => {
match $id.backend() {
#[cfg(not(any(target_os = "ios", target_os = "macos")))]
wgt::Backend::Vulkan => $global.$method::<wgc::backend::Vulkan>( $($param),+ ),
#[cfg(any(target_os = "ios", target_os = "macos"))]
wgt::Backend::Metal => $global.$method::<wgc::backend::Metal>( $($param),+ ),
#[cfg(windows)]
wgt::Backend::Dx12 => $global.$method::<wgc::backend::Dx12>( $($param),+ ),
#[cfg(windows)]
wgt::Backend::Dx11 => $global.$method::<wgc::backend::Dx11>( $($param),+ ),
_ => unreachable!()
}
};
}
#[derive(Debug)]
pub struct IdentityPassThrough<I>(PhantomData<I>);
impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandler<I> for IdentityPassThrough<I> {
type Input = I;
fn process(&self, id: I, backend: wgt::Backend) -> I {
let (index, epoch, _backend) = id.unzip();
I::zip(index, epoch, backend)
}
fn free(&self, _id: I) {}
}
pub struct IdentityPassThroughFactory;
impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandlerFactory<I>
for IdentityPassThroughFactory
{
type Filter = IdentityPassThrough<I>;
fn spawn(&self, _min_index: u32) -> Self::Filter {
IdentityPassThrough(PhantomData)
}
}
impl wgc::hub::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {}
pub trait GlobalPlay {
fn encode_commands<B: wgc::hub::GfxBackend>(
&self,
encoder: wgc::id::CommandEncoderId,
commands: Vec<trace::Command>,
) -> wgc::id::CommandBufferId;
fn process<B: wgc::hub::GfxBackend>(
&self,
device: wgc::id::DeviceId,
action: trace::Action,
dir: &Path,
comb_manager: &mut wgc::hub::IdentityManager,
);
}
impl GlobalPlay for wgc::hub::Global<IdentityPassThroughFactory> {
fn encode_commands<B: wgc::hub::GfxBackend>(
&self,
encoder: wgc::id::CommandEncoderId,
commands: Vec<trace::Command>,
) -> wgc::id::CommandBufferId {
for command in commands {
match command {
trace::Command::CopyBufferToBuffer {
src,
src_offset,
dst,
dst_offset,
size,
} => self
.command_encoder_copy_buffer_to_buffer::<B>(
encoder, src, src_offset, dst, dst_offset, size,
)
.unwrap(),
trace::Command::CopyBufferToTexture { src, dst, size } => self
.command_encoder_copy_buffer_to_texture::<B>(encoder, &src, &dst, &size)
.unwrap(),
trace::Command::CopyTextureToBuffer { src, dst, size } => self
.command_encoder_copy_texture_to_buffer::<B>(encoder, &src, &dst, &size)
.unwrap(),
trace::Command::CopyTextureToTexture { src, dst, size } => self
.command_encoder_copy_texture_to_texture::<B>(encoder, &src, &dst, &size)
.unwrap(),
trace::Command::RunComputePass { base } => {
self.command_encoder_run_compute_pass_impl::<B>(encoder, base.as_ref())
.unwrap();
}
trace::Command::RunRenderPass {
base,
target_colors,
target_depth_stencil,
} => {
self.command_encoder_run_render_pass_impl::<B>(
encoder,
base.as_ref(),
&target_colors,
target_depth_stencil.as_ref(),
)
.unwrap();
}
}
}
self.command_encoder_finish::<B>(encoder, &wgt::CommandBufferDescriptor { label: None })
.unwrap()
}
fn process<B: wgc::hub::GfxBackend>(
&self,
device: wgc::id::DeviceId,
action: trace::Action,
dir: &Path,
comb_manager: &mut wgc::hub::IdentityManager,
) {
use wgc::device::trace::Action as A;
log::info!("action {:?}", action);
match action {
A::Init { .. } => panic!("Unexpected Action::Init: has to be the first action only"),
A::CreateSwapChain { .. } | A::PresentSwapChain(_) => {
panic!("Unexpected SwapChain action: winit feature is not enabled")
}
A::CreateBuffer(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_buffer::<B>(device, &desc, id).unwrap();
}
A::FreeBuffer(id) => {
self.buffer_destroy::<B>(id).unwrap();
}
A::DestroyBuffer(id) => {
self.buffer_drop::<B>(id, true);
}
A::CreateTexture(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_texture::<B>(device, &desc, id).unwrap();
}
A::FreeTexture(id) => {
self.texture_destroy::<B>(id).unwrap();
}
A::DestroyTexture(id) => {
self.texture_drop::<B>(id, true);
}
A::CreateTextureView {
id,
parent_id,
desc,
} => {
self.device_maintain_ids::<B>(device).unwrap();
self.texture_create_view::<B>(parent_id, &desc, id).unwrap();
}
A::DestroyTextureView(id) => {
self.texture_view_drop::<B>(id).unwrap();
}
A::CreateSampler(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_sampler::<B>(device, &desc, id).unwrap();
}
A::DestroySampler(id) => {
self.sampler_drop::<B>(id);
}
A::GetSwapChainTexture { id, parent_id } => {
if let Some(id) = id {
self.swap_chain_get_current_texture_view::<B>(parent_id, id)
.unwrap()
.view_id
.unwrap();
}
}
A::CreateBindGroupLayout(id, desc) => {
self.device_create_bind_group_layout::<B>(device, &desc, id)
.unwrap();
}
A::DestroyBindGroupLayout(id) => {
self.bind_group_layout_drop::<B>(id);
}
A::CreatePipelineLayout(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_pipeline_layout::<B>(device, &desc, id)
.unwrap();
}
A::DestroyPipelineLayout(id) => {
self.pipeline_layout_drop::<B>(id);
}
A::CreateBindGroup(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_bind_group::<B>(device, &desc, id)
.unwrap();
}
A::DestroyBindGroup(id) => {
self.bind_group_drop::<B>(id);
}
A::CreateShaderModule { id, data } => {
let source = if data.ends_with(".wgsl") {
let code = fs::read_to_string(dir.join(data)).unwrap();
wgc::pipeline::ShaderModuleSource::Wgsl(Cow::Owned(code))
} else {
let byte_vec = fs::read(dir.join(data)).unwrap();
let spv = byte_vec
.chunks(4)
.map(|c| u32::from_le_bytes([c[0], c[1], c[2], c[3]]))
.collect::<Vec<_>>();
wgc::pipeline::ShaderModuleSource::SpirV(Cow::Owned(spv))
};
self.device_create_shader_module::<B>(device, source, id)
.unwrap();
}
A::DestroyShaderModule(id) => {
self.shader_module_drop::<B>(id);
}
A::CreateComputePipeline(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_compute_pipeline::<B>(device, &desc, id, None)
.unwrap();
}
A::DestroyComputePipeline(id) => {
self.compute_pipeline_drop::<B>(id);
}
A::CreateRenderPipeline(id, desc) => {
self.device_maintain_ids::<B>(device).unwrap();
self.device_create_render_pipeline::<B>(device, &desc, id, None)
.unwrap();
}
A::DestroyRenderPipeline(id) => {
self.render_pipeline_drop::<B>(id);
}
A::CreateRenderBundle { id, desc, base } => {
let bundle =
wgc::command::RenderBundleEncoder::new(&desc, device, Some(base)).unwrap();
self.render_bundle_encoder_finish::<B>(
bundle,
&wgt::RenderBundleDescriptor { label: desc.label },
id,
)
.unwrap();
}
A::DestroyRenderBundle(id) => {
self.render_bundle_drop::<B>(id);
}
A::WriteBuffer {
id,
data,
range,
queued,
} => {
let bin = std::fs::read(dir.join(data)).unwrap();
let size = (range.end - range.start) as usize;
if queued {
self.queue_write_buffer::<B>(device, id, range.start, &bin)
.unwrap();
} else {
self.device_wait_for_buffer::<B>(device, id).unwrap();
self.device_set_buffer_sub_data::<B>(device, id, range.start, &bin[..size])
.unwrap();
}
}
A::WriteTexture {
to,
data,
layout,
size,
} => {
let bin = std::fs::read(dir.join(data)).unwrap();
self.queue_write_texture::<B>(device, &to, &bin, &layout, &size)
.unwrap();
}
A::Submit(_index, commands) => {
let encoder = self
.device_create_command_encoder::<B>(
device,
&wgt::CommandEncoderDescriptor { label: None },
comb_manager.alloc(device.backend()),
)
.unwrap();
let cmdbuf = self.encode_commands::<B>(encoder, commands);
self.queue_submit::<B>(device, &[cmdbuf]).unwrap();
}
}
}
}

Просмотреть файл

@ -1,597 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! This is a player for WebGPU traces.
*
* # Notes
* - we call device_maintain_ids() before creating any refcounted resource,
* which is basically everything except for BGL and shader modules,
* so that we don't accidentally try to use the same ID.
!*/
use wgc::device::trace;
use std::{
ffi::CString,
fmt::Debug,
fs::File,
marker::PhantomData,
path::{Path, PathBuf},
ptr,
};
macro_rules! gfx_select {
($id:expr => $global:ident.$method:ident( $($param:expr),+ )) => {
match $id.backend() {
#[cfg(not(any(target_os = "ios", target_os = "macos")))]
wgt::Backend::Vulkan => $global.$method::<wgc::backend::Vulkan>( $($param),+ ),
#[cfg(any(target_os = "ios", target_os = "macos"))]
wgt::Backend::Metal => $global.$method::<wgc::backend::Metal>( $($param),+ ),
#[cfg(windows)]
wgt::Backend::Dx12 => $global.$method::<wgc::backend::Dx12>( $($param),+ ),
#[cfg(windows)]
wgt::Backend::Dx11 => $global.$method::<wgc::backend::Dx11>( $($param),+ ),
_ => unreachable!()
}
};
}
struct Label(Option<CString>);
impl Label {
fn new(text: &str) -> Self {
Self(if text.is_empty() {
None
} else {
Some(CString::new(text).expect("invalid label"))
})
}
fn as_ptr(&self) -> *const std::os::raw::c_char {
match self.0 {
Some(ref c_string) => c_string.as_ptr(),
None => ptr::null(),
}
}
}
struct OwnedProgrammableStage {
desc: wgc::pipeline::ProgrammableStageDescriptor,
#[allow(dead_code)]
entry_point: CString,
}
impl From<trace::ProgrammableStageDescriptor> for OwnedProgrammableStage {
fn from(stage: trace::ProgrammableStageDescriptor) -> Self {
let entry_point = CString::new(stage.entry_point.as_str()).unwrap();
OwnedProgrammableStage {
desc: wgc::pipeline::ProgrammableStageDescriptor {
module: stage.module,
entry_point: entry_point.as_ptr(),
},
entry_point,
}
}
}
#[derive(Debug)]
struct IdentityPassThrough<I>(PhantomData<I>);
impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandler<I> for IdentityPassThrough<I> {
type Input = I;
fn process(&self, id: I, backend: wgt::Backend) -> I {
let (index, epoch, _backend) = id.unzip();
I::zip(index, epoch, backend)
}
fn free(&self, _id: I) {}
}
struct IdentityPassThroughFactory;
impl<I: Clone + Debug + wgc::id::TypedId> wgc::hub::IdentityHandlerFactory<I>
for IdentityPassThroughFactory
{
type Filter = IdentityPassThrough<I>;
fn spawn(&self, _min_index: u32) -> Self::Filter {
IdentityPassThrough(PhantomData)
}
}
impl wgc::hub::GlobalIdentityHandlerFactory for IdentityPassThroughFactory {}
trait GlobalExt {
fn encode_commands<B: wgc::hub::GfxBackend>(
&self,
encoder: wgc::id::CommandEncoderId,
commands: Vec<trace::Command>,
) -> wgc::id::CommandBufferId;
fn process<B: wgc::hub::GfxBackend>(
&self,
device: wgc::id::DeviceId,
action: trace::Action,
dir: &PathBuf,
comb_manager: &mut wgc::hub::IdentityManager,
);
}
impl GlobalExt for wgc::hub::Global<IdentityPassThroughFactory> {
fn encode_commands<B: wgc::hub::GfxBackend>(
&self,
encoder: wgc::id::CommandEncoderId,
commands: Vec<trace::Command>,
) -> wgc::id::CommandBufferId {
for command in commands {
match command {
trace::Command::CopyBufferToBuffer {
src,
src_offset,
dst,
dst_offset,
size,
} => self.command_encoder_copy_buffer_to_buffer::<B>(
encoder, src, src_offset, dst, dst_offset, size,
),
trace::Command::CopyBufferToTexture { src, dst, size } => {
self.command_encoder_copy_buffer_to_texture::<B>(encoder, &src, &dst, &size)
}
trace::Command::CopyTextureToBuffer { src, dst, size } => {
self.command_encoder_copy_texture_to_buffer::<B>(encoder, &src, &dst, &size)
}
trace::Command::CopyTextureToTexture { src, dst, size } => {
self.command_encoder_copy_texture_to_texture::<B>(encoder, &src, &dst, &size)
}
trace::Command::RunComputePass {
commands,
dynamic_offsets,
} => unsafe {
let mut offsets = &dynamic_offsets[..];
let mut pass = wgc::command::RawPass::new_compute(encoder);
for com in commands {
pass.encode(&com);
if let wgc::command::ComputeCommand::SetBindGroup {
num_dynamic_offsets,
..
} = com
{
pass.encode_slice(&offsets[..num_dynamic_offsets as usize]);
offsets = &offsets[num_dynamic_offsets as usize..];
}
}
let (data, _) = pass.finish_compute();
self.command_encoder_run_compute_pass::<B>(encoder, &data);
},
trace::Command::RunRenderPass {
target_colors,
target_depth_stencil,
commands,
dynamic_offsets,
} => unsafe {
let mut offsets = &dynamic_offsets[..];
let mut pass = wgc::command::RawPass::new_render(
encoder,
&wgc::command::RenderPassDescriptor {
color_attachments: target_colors.as_ptr(),
color_attachments_length: target_colors.len(),
depth_stencil_attachment: target_depth_stencil.as_ref(),
},
);
for com in commands {
pass.encode(&com);
if let wgc::command::RenderCommand::SetBindGroup {
num_dynamic_offsets,
..
} = com
{
pass.encode_slice(&offsets[..num_dynamic_offsets as usize]);
offsets = &offsets[num_dynamic_offsets as usize..];
}
}
let (data, _) = pass.finish_render();
self.command_encoder_run_render_pass::<B>(encoder, &data);
},
}
}
self.command_encoder_finish::<B>(encoder, &wgt::CommandBufferDescriptor { todo: 0 })
}
fn process<B: wgc::hub::GfxBackend>(
&self,
device: wgc::id::DeviceId,
action: trace::Action,
dir: &PathBuf,
comb_manager: &mut wgc::hub::IdentityManager,
) {
use wgc::device::trace::Action as A;
match action {
A::Init { .. } => panic!("Unexpected Action::Init: has to be the first action only"),
A::CreateSwapChain { .. } | A::PresentSwapChain(_) => {
panic!("Unexpected SwapChain action: winit feature is not enabled")
}
A::CreateBuffer { id, desc } => {
let label = Label::new(&desc.label);
self.device_maintain_ids::<B>(device);
self.device_create_buffer::<B>(device, &desc.map_label(|_| label.as_ptr()), id);
}
A::DestroyBuffer(id) => {
self.buffer_destroy::<B>(id);
}
A::CreateTexture { id, desc } => {
let label = Label::new(&desc.label);
self.device_maintain_ids::<B>(device);
self.device_create_texture::<B>(device, &desc.map_label(|_| label.as_ptr()), id);
}
A::DestroyTexture(id) => {
self.texture_destroy::<B>(id);
}
A::CreateTextureView {
id,
parent_id,
desc,
} => {
let label = desc.as_ref().map_or(Label(None), |d| Label::new(&d.label));
self.device_maintain_ids::<B>(device);
self.texture_create_view::<B>(
parent_id,
desc.map(|d| d.map_label(|_| label.as_ptr())).as_ref(),
id,
);
}
A::DestroyTextureView(id) => {
self.texture_view_destroy::<B>(id);
}
A::CreateSampler { id, desc } => {
let label = Label::new(&desc.label);
self.device_maintain_ids::<B>(device);
self.device_create_sampler::<B>(device, &desc.map_label(|_| label.as_ptr()), id);
}
A::DestroySampler(id) => {
self.sampler_destroy::<B>(id);
}
A::GetSwapChainTexture { id, parent_id } => {
if let Some(id) = id {
self.swap_chain_get_next_texture::<B>(parent_id, id)
.view_id
.unwrap();
}
}
A::CreateBindGroupLayout { id, label, entries } => {
self.device_create_bind_group_layout::<B>(
device,
&wgt::BindGroupLayoutDescriptor {
label: Some(&label),
bindings: &entries,
},
id,
)
.unwrap();
}
A::DestroyBindGroupLayout(id) => {
self.bind_group_layout_destroy::<B>(id);
}
A::CreatePipelineLayout {
id,
bind_group_layouts,
} => {
self.device_maintain_ids::<B>(device);
self.device_create_pipeline_layout::<B>(
device,
&wgc::binding_model::PipelineLayoutDescriptor {
bind_group_layouts: bind_group_layouts.as_ptr(),
bind_group_layouts_length: bind_group_layouts.len(),
},
id,
)
.unwrap();
}
A::DestroyPipelineLayout(id) => {
self.pipeline_layout_destroy::<B>(id);
}
A::CreateBindGroup {
id,
label,
layout_id,
entries,
} => {
use wgc::binding_model as bm;
let entry_vec = entries
.iter()
.map(|(binding, res)| wgc::binding_model::BindGroupEntry {
binding: *binding,
resource: match *res {
trace::BindingResource::Buffer { id, offset, size } => {
bm::BindingResource::Buffer(bm::BufferBinding {
buffer: id,
offset,
size,
})
}
trace::BindingResource::Sampler(id) => bm::BindingResource::Sampler(id),
trace::BindingResource::TextureView(id) => {
bm::BindingResource::TextureView(id)
}
trace::BindingResource::TextureViewArray(ref id_array) => {
bm::BindingResource::TextureViewArray(id_array)
}
},
})
.collect::<Vec<_>>();
self.device_maintain_ids::<B>(device);
self.device_create_bind_group::<B>(
device,
&wgc::binding_model::BindGroupDescriptor {
label: Some(&label),
layout: layout_id,
bindings: &entry_vec,
},
id,
);
}
A::DestroyBindGroup(id) => {
self.bind_group_destroy::<B>(id);
}
A::CreateShaderModule { id, data } => {
let spv = wgt::read_spirv(File::open(dir.join(data)).unwrap()).unwrap();
self.device_create_shader_module::<B>(
device,
&wgc::pipeline::ShaderModuleDescriptor {
code: wgc::U32Array {
bytes: spv.as_ptr(),
length: spv.len(),
},
},
id,
);
}
A::DestroyShaderModule(id) => {
self.shader_module_destroy::<B>(id);
}
A::CreateComputePipeline { id, desc } => {
let cs_stage = OwnedProgrammableStage::from(desc.compute_stage);
self.device_maintain_ids::<B>(device);
self.device_create_compute_pipeline::<B>(
device,
&wgc::pipeline::ComputePipelineDescriptor {
layout: desc.layout,
compute_stage: cs_stage.desc,
},
id,
)
.unwrap();
}
A::DestroyComputePipeline(id) => {
self.compute_pipeline_destroy::<B>(id);
}
A::CreateRenderPipeline { id, desc } => {
let vs_stage = OwnedProgrammableStage::from(desc.vertex_stage);
let fs_stage = desc.fragment_stage.map(OwnedProgrammableStage::from);
let vertex_buffers = desc
.vertex_state
.vertex_buffers
.iter()
.map(|vb| wgc::pipeline::VertexBufferLayoutDescriptor {
array_stride: vb.array_stride,
step_mode: vb.step_mode,
attributes: vb.attributes.as_ptr(),
attributes_length: vb.attributes.len(),
})
.collect::<Vec<_>>();
self.device_maintain_ids::<B>(device);
self.device_create_render_pipeline::<B>(
device,
&wgc::pipeline::RenderPipelineDescriptor {
layout: desc.layout,
vertex_stage: vs_stage.desc,
fragment_stage: fs_stage.as_ref().map_or(ptr::null(), |s| &s.desc),
primitive_topology: desc.primitive_topology,
rasterization_state: desc
.rasterization_state
.as_ref()
.map_or(ptr::null(), |rs| rs),
color_states: desc.color_states.as_ptr(),
color_states_length: desc.color_states.len(),
depth_stencil_state: desc
.depth_stencil_state
.as_ref()
.map_or(ptr::null(), |ds| ds),
vertex_state: wgc::pipeline::VertexStateDescriptor {
index_format: desc.vertex_state.index_format,
vertex_buffers: vertex_buffers.as_ptr(),
vertex_buffers_length: vertex_buffers.len(),
},
sample_count: desc.sample_count,
sample_mask: desc.sample_mask,
alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled,
},
id,
)
.unwrap();
}
A::DestroyRenderPipeline(id) => {
self.render_pipeline_destroy::<B>(id);
}
A::WriteBuffer {
id,
data,
range,
queued,
} => {
let bin = std::fs::read(dir.join(data)).unwrap();
let size = (range.end - range.start) as usize;
if queued {
self.queue_write_buffer::<B>(device, id, range.start, &bin);
} else {
self.device_wait_for_buffer::<B>(device, id);
self.device_set_buffer_sub_data::<B>(device, id, range.start, &bin[..size]);
}
}
A::WriteTexture {
to,
data,
layout,
size,
} => {
let bin = std::fs::read(dir.join(data)).unwrap();
self.queue_write_texture::<B>(device, &to, &bin, &layout, &size);
}
A::Submit(_index, commands) => {
let encoder = self.device_create_command_encoder::<B>(
device,
&wgt::CommandEncoderDescriptor { label: ptr::null() },
comb_manager.alloc(device.backend()),
);
let comb = self.encode_commands::<B>(encoder, commands);
self.queue_submit::<B>(device, &[comb]);
}
}
}
}
fn main() {
#[cfg(feature = "winit")]
use winit::{event_loop::EventLoop, window::WindowBuilder};
env_logger::init();
#[cfg(feature = "renderdoc")]
let mut rd = renderdoc::RenderDoc::<renderdoc::V110>::new()
.expect("Failed to connect to RenderDoc: are you running without it?");
//TODO: setting for the backend bits
//TODO: setting for the target frame, or controls
let dir = match std::env::args().nth(1) {
Some(arg) if Path::new(&arg).is_dir() => PathBuf::from(arg),
_ => panic!("Provide the dir path as the parameter"),
};
log::info!("Loading trace '{:?}'", dir);
let file = File::open(dir.join(trace::FILE_NAME)).unwrap();
let mut actions: Vec<trace::Action> = ron::de::from_reader(file).unwrap();
actions.reverse(); // allows us to pop from the top
log::info!("Found {} actions", actions.len());
#[cfg(feature = "winit")]
let event_loop = {
log::info!("Creating a window");
EventLoop::new()
};
#[cfg(feature = "winit")]
let window = WindowBuilder::new()
.with_title("wgpu player")
.with_resizable(false)
.build(&event_loop)
.unwrap();
let global = wgc::hub::Global::new("player", IdentityPassThroughFactory);
let mut command_buffer_id_manager = wgc::hub::IdentityManager::default();
#[cfg(feature = "winit")]
let surface =
global.instance_create_surface(&window, wgc::id::TypedId::zip(0, 1, wgt::Backend::Empty));
let device = match actions.pop() {
Some(trace::Action::Init { desc, backend }) => {
log::info!("Initializing the device for backend: {:?}", backend);
let adapter = global
.pick_adapter(
&wgc::instance::RequestAdapterOptions {
power_preference: wgt::PowerPreference::Default,
#[cfg(feature = "winit")]
compatible_surface: Some(surface),
#[cfg(not(feature = "winit"))]
compatible_surface: None,
},
unsafe { wgt::UnsafeExtensions::allow() },
wgc::instance::AdapterInputs::IdSet(
&[wgc::id::TypedId::zip(0, 0, backend)],
|id| id.backend(),
),
)
.expect("Unable to find an adapter for selected backend");
let info = gfx_select!(adapter => global.adapter_get_info(adapter));
log::info!("Picked '{}'", info.name);
gfx_select!(adapter => global.adapter_request_device(
adapter,
&desc,
None,
wgc::id::TypedId::zip(1, 0, wgt::Backend::Empty)
))
}
_ => panic!("Expected Action::Init"),
};
log::info!("Executing actions");
#[cfg(not(feature = "winit"))]
{
#[cfg(feature = "renderdoc")]
rd.start_frame_capture(ptr::null(), ptr::null());
while let Some(action) = actions.pop() {
gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
}
#[cfg(feature = "renderdoc")]
rd.end_frame_capture(ptr::null(), ptr::null());
gfx_select!(device => global.device_poll(device, true));
}
#[cfg(feature = "winit")]
{
use winit::{
event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
event_loop::ControlFlow,
};
let mut frame_count = 0;
event_loop.run(move |event, _, control_flow| {
*control_flow = ControlFlow::Poll;
match event {
Event::MainEventsCleared => {
window.request_redraw();
}
Event::RedrawRequested(_) => loop {
match actions.pop() {
Some(trace::Action::CreateSwapChain { id, desc }) => {
log::info!("Initializing the swapchain");
assert_eq!(id.to_surface_id(), surface);
window.set_inner_size(winit::dpi::PhysicalSize::new(
desc.width,
desc.height,
));
gfx_select!(device => global.device_create_swap_chain(device, surface, &desc));
}
Some(trace::Action::PresentSwapChain(id)) => {
frame_count += 1;
log::debug!("Presenting frame {}", frame_count);
gfx_select!(device => global.swap_chain_present(id));
break;
}
Some(action) => {
gfx_select!(device => global.process(device, action, &dir, &mut command_buffer_id_manager));
}
None => break,
}
},
Event::WindowEvent { event, .. } => match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
virtual_keycode: Some(VirtualKeyCode::Escape),
state: ElementState::Pressed,
..
},
..
}
| WindowEvent::CloseRequested => {
*control_flow = ControlFlow::Exit;
}
_ => {}
},
Event::LoopDestroyed => {
log::info!("Closing");
gfx_select!(device => global.device_poll(device, true));
}
_ => {}
}
});
}
}

Просмотреть файл

@ -0,0 +1,8 @@
(
backends: (bits: 0xF),
tests: [
"buffer-copy.ron",
"bind-group.ron",
"quad.ron",
],
)

Просмотреть файл

@ -0,0 +1,76 @@
(
features: (bits: 0x0),
expectations: [], //not crash!
actions: [
CreatePipelineLayout(Id(0, 1, Empty), (
label: Some("empty"),
bind_group_layouts: [],
push_constant_ranges: [],
)),
CreateShaderModule(
id: Id(0, 1, Empty),
data: "empty.comp.spv",
),
CreateComputePipeline(Id(0, 1, Empty), (
label: None,
layout: Some(Id(0, 1, Empty)),
compute_stage: (
module: Id(0, 1, Empty),
entry_point: "main",
),
)),
CreateBuffer(Id(0, 1, Empty), (
label: None,
size: 16,
usage: (
bits: 64,
),
mapped_at_creation: false,
)),
CreateBindGroupLayout(Id(0, 1, Empty), (
label: None,
entries: [
(
binding: 0,
visibility: (bits: 0x3),
ty: UniformBuffer(
dynamic: false,
min_binding_size: None,
),
count: None,
),
],
)),
CreateBindGroup(Id(0, 1, Empty), (
label: None,
layout: Id(0, 1, Empty),
entries: [
(
binding: 0,
resource: Buffer((
buffer_id: Id(0, 1, Empty),
offset: 0,
size: None,
)),
)
],
)),
Submit(1, [
RunComputePass(
base: (
commands: [
SetPipeline(Id(0, 1, Empty)),
SetBindGroup(
index: 0,
num_dynamic_offsets: 0,
bind_group_id: Id(0, 1, Empty),
),
],
dynamic_offsets: [],
string_data: [],
push_constant_data: [],
),
),
]),
],
)

Просмотреть файл

@ -0,0 +1,34 @@
(
features: (bits: 0x0),
expectations: [
(
name: "basic",
buffer: (index: 0, epoch: 1),
offset: 0,
data: Raw([0x00, 0x00, 0x80, 0xBF]),
)
],
actions: [
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("dummy"),
size: 16,
usage: (
bits: 41,
),
mapped_at_creation: false,
),
),
WriteBuffer(
id: Id(0, 1, Empty),
data: "data1.bin",
range: (
start: 0,
end: 16,
),
queued: true,
),
Submit(1, []),
],
)

Двоичные данные
gfx/wgpu/player/tests/data/data1.bin Normal file

Двоичный файл не отображается.

Просмотреть файл

@ -0,0 +1,5 @@
#version 450
layout(local_size_x = 1) in;
void main() {
}

Двоичные данные
gfx/wgpu/player/tests/data/empty.comp.spv Normal file

Двоичный файл не отображается.

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -0,0 +1,7 @@
#version 450
layout(location = 0) out vec4 outColor;
void main() {
outColor = vec4(1.0, 1.0, 1.0, 1.0);
}

Двоичные данные
gfx/wgpu/player/tests/data/quad.frag.spv Normal file

Двоичный файл не отображается.

Просмотреть файл

@ -0,0 +1,153 @@
(
features: (bits: 0x0),
expectations: [
(
name: "Quad",
buffer: (index: 0, epoch: 1),
offset: 0,
data: File("quad.bin", 16384),
)
],
actions: [
CreateShaderModule(
id: Id(0, 1, Empty),
data: "quad.vert.spv",
),
CreateShaderModule(
id: Id(1, 1, Empty),
data: "quad.frag.spv",
),
CreateTexture(Id(0, 1, Empty), (
label: Some("Output Texture"),
size: (
width: 64,
height: 64,
depth: 1,
),
mip_level_count: 1,
sample_count: 1,
dimension: D2,
format: Rgba8Unorm,
usage: (
bits: 27,
),
)),
CreateTextureView(
id: Id(0, 1, Empty),
parent_id: Id(0, 1, Empty),
desc: (),
),
CreateBuffer(
Id(0, 1, Empty),
(
label: Some("Output Buffer"),
size: 16384,
usage: (
bits: 9,
),
mapped_at_creation: false,
),
),
CreatePipelineLayout(Id(0, 1, Empty), (
label: None,
bind_group_layouts: [],
push_constant_ranges: [],
)),
CreateRenderPipeline(Id(0, 1, Empty), (
label: None,
layout: Some(Id(0, 1, Empty)),
vertex_stage: (
module: Id(0, 1, Empty),
entry_point: "main",
),
fragment_stage: Some((
module: Id(1, 1, Empty),
entry_point: "main",
)),
rasterization_state: None,
primitive_topology: TriangleList,
color_states: [
(
format: Rgba8Unorm,
alpha_blend: (
src_factor: One,
dst_factor: Zero,
operation: Add,
),
color_blend: (
src_factor: One,
dst_factor: Zero,
operation: Add,
),
write_mask: (
bits: 15,
),
),
],
depth_stencil_state: None,
vertex_state: (
index_format: Uint16,
vertex_buffers: [],
),
sample_count: 1,
sample_mask: 4294967295,
alpha_to_coverage_enabled: false,
)),
Submit(1, [
RunRenderPass(
base: (
commands: [
SetPipeline(Id(0, 1, Empty)),
Draw(
vertex_count: 3,
instance_count: 1,
first_vertex: 0,
first_instance: 0,
),
],
dynamic_offsets: [],
string_data: [],
push_constant_data: [],
),
target_colors: [
(
attachment: Id(0, 1, Empty),
resolve_target: None,
channel: (
load_op: Clear,
store_op: Store,
clear_value: (
r: 0,
g: 0,
b: 0,
a: 1,
),
read_only: false,
),
),
],
target_depth_stencil: None,
),
CopyTextureToBuffer(
src: (
texture: Id(0, 1, Empty),
mip_level: 0,
array_layer: 0,
),
dst: (
buffer: Id(0, 1, Empty),
layout: (
offset: 0,
bytes_per_row: 256,
rows_per_image: 64,
),
),
size: (
width: 64,
height: 64,
depth: 1,
),
),
]),
],
)

Просмотреть файл

@ -0,0 +1,10 @@
#version 450
out gl_PerVertex {
vec4 gl_Position;
};
void main() {
vec2 pos = vec2(gl_VertexIndex == 2 ? 3.0 : -1.0, gl_VertexIndex == 1 ? 3.0 : -1.0);
gl_Position = vec4(pos, 0.0, 1.0);
}

Двоичные данные
gfx/wgpu/player/tests/data/quad.vert.spv Normal file

Двоичный файл не отображается.

Просмотреть файл

@ -0,0 +1,213 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Tester for WebGPU
* It enumerates the available backends on the system,
* and run the tests through them.
*
* Test requirements:
* - all IDs have the backend `Empty`
* - all expected buffers have `MAP_READ` usage
* - last action is `Submit`
* - no swapchain use
!*/
use player::{gfx_select, GlobalPlay, IdentityPassThroughFactory};
use std::{
fs::{read_to_string, File},
io::{Read, Seek, SeekFrom},
path::{Path, PathBuf},
ptr, slice,
};
#[derive(serde::Deserialize)]
struct RawId {
index: u32,
epoch: u32,
}
#[derive(serde::Deserialize)]
enum ExpectedData {
Raw(Vec<u8>),
File(String, usize),
}
impl ExpectedData {
fn len(&self) -> usize {
match self {
ExpectedData::Raw(vec) => vec.len(),
ExpectedData::File(_, size) => *size,
}
}
}
#[derive(serde::Deserialize)]
struct Expectation {
name: String,
buffer: RawId,
offset: wgt::BufferAddress,
data: ExpectedData,
}
#[derive(serde::Deserialize)]
struct Test<'a> {
features: wgt::Features,
expectations: Vec<Expectation>,
actions: Vec<wgc::device::trace::Action<'a>>,
}
extern "C" fn map_callback(status: wgc::resource::BufferMapAsyncStatus, _user_data: *mut u8) {
match status {
wgc::resource::BufferMapAsyncStatus::Success => (),
_ => panic!("Unable to map"),
}
}
impl Test<'_> {
fn load(path: PathBuf, backend: wgt::Backend) -> Self {
let backend_name = match backend {
wgt::Backend::Vulkan => "Vulkan",
wgt::Backend::Metal => "Metal",
wgt::Backend::Dx12 => "Dx12",
wgt::Backend::Dx11 => "Dx11",
wgt::Backend::Gl => "Gl",
_ => unreachable!(),
};
let string = read_to_string(path).unwrap().replace("Empty", backend_name);
ron::de::from_str(&string).unwrap()
}
fn run(
self,
dir: &Path,
global: &wgc::hub::Global<IdentityPassThroughFactory>,
adapter: wgc::id::AdapterId,
test_num: u32,
) {
let backend = adapter.backend();
let device = gfx_select!(adapter => global.adapter_request_device(
adapter,
&wgt::DeviceDescriptor {
features: self.features | wgt::Features::MAPPABLE_PRIMARY_BUFFERS,
limits: wgt::Limits::default(),
shader_validation: true,
},
None,
wgc::id::TypedId::zip(test_num, 0, backend)
))
.unwrap();
let mut command_buffer_id_manager = wgc::hub::IdentityManager::default();
println!("\t\t\tRunning...");
for action in self.actions {
gfx_select!(device => global.process(device, action, dir, &mut command_buffer_id_manager));
}
println!("\t\t\tMapping...");
for expect in &self.expectations {
let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend);
gfx_select!(device => global.buffer_map_async(
buffer,
expect.offset .. expect.offset+expect.data.len() as wgt::BufferAddress,
wgc::resource::BufferMapOperation {
host: wgc::device::HostMap::Read,
callback: map_callback,
user_data: ptr::null_mut(),
}
))
.unwrap();
}
println!("\t\t\tWaiting...");
gfx_select!(device => global.device_poll(device, true)).unwrap();
for expect in self.expectations {
println!("\t\t\tChecking {}", expect.name);
let buffer = wgc::id::TypedId::zip(expect.buffer.index, expect.buffer.epoch, backend);
let ptr =
gfx_select!(device => global.buffer_get_mapped_range(buffer, expect.offset, None))
.unwrap();
let contents = unsafe { slice::from_raw_parts(ptr, expect.data.len()) };
let expected_data = match expect.data {
ExpectedData::Raw(vec) => vec,
ExpectedData::File(name, size) => {
let mut bin = vec![0; size];
let mut file = File::open(dir.join(name)).unwrap();
file.seek(SeekFrom::Start(expect.offset)).unwrap();
file.read_exact(&mut bin[..]).unwrap();
bin
}
};
assert_eq!(&expected_data[..], contents);
}
gfx_select!(device => global.clear_backend(()));
}
}
#[derive(serde::Deserialize)]
struct Corpus {
backends: wgt::BackendBit,
tests: Vec<String>,
}
const BACKENDS: &[wgt::Backend] = &[
wgt::Backend::Vulkan,
wgt::Backend::Metal,
wgt::Backend::Dx12,
wgt::Backend::Dx11,
wgt::Backend::Gl,
];
impl Corpus {
fn run_from(path: PathBuf) {
println!("Corpus {:?}", path);
let dir = path.parent().unwrap();
let corpus: Corpus = ron::de::from_reader(File::open(&path).unwrap()).unwrap();
let global = wgc::hub::Global::new("test", IdentityPassThroughFactory, corpus.backends);
for &backend in BACKENDS {
if !corpus.backends.contains(backend.into()) {
continue;
}
let adapter = match global.request_adapter(
&wgc::instance::RequestAdapterOptions {
power_preference: wgt::PowerPreference::LowPower,
compatible_surface: None,
},
wgc::instance::AdapterInputs::IdSet(
&[wgc::id::TypedId::zip(0, 0, backend)],
|id| id.backend(),
),
) {
Ok(adapter) => adapter,
Err(_) => continue,
};
println!("\tBackend {:?}", backend);
let supported_features =
gfx_select!(adapter => global.adapter_features(adapter)).unwrap();
let mut test_num = 0;
for test_path in &corpus.tests {
println!("\t\tTest '{:?}'", test_path);
let test = Test::load(dir.join(test_path), adapter.backend());
if !supported_features.contains(test.features) {
println!(
"\t\tSkipped due to missing features {:?}",
test.features - supported_features
);
continue;
}
test.run(dir, &global, adapter, test_num);
test_num += 1;
}
}
}
}
#[test]
fn test_api() {
Corpus::run_from(PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("tests/data/all.ron"))
}

Просмотреть файл

@ -1,10 +1,7 @@
[package]
name = "wgpu-core"
version = "0.5.0"
authors = [
"Dzmitry Malyshau <kvark@mozilla.com>",
"Joshua Groves <josh@joshgroves.com>",
]
version = "0.6.0"
authors = ["wgpu developers"]
edition = "2018"
description = "WebGPU core logic on gfx-hal"
homepage = "https://github.com/gfx-rs/wgpu"
@ -16,54 +13,55 @@ license = "MPL-2.0"
[features]
default = []
# Enable API tracing
trace = ["ron", "serde", "wgt/trace"]
# Enable API replaying
replay = ["serde", "wgt/replay"]
#NOTE: glutin feature is not stable, use at your own risk
#glutin = ["gfx-backend-gl/glutin"]
# Enable serializable compute/render passes, and bundle encoders.
serial-pass = ["serde", "wgt/serde", "arrayvec/serde"]
[dependencies]
arrayvec = "0.5"
bitflags = "1.0"
copyless = "0.1"
fxhash = "0.2"
log = "0.4"
hal = { package = "gfx-hal", version = "0.5.1" }
gfx-backend-empty = "0.5"
gfx-descriptor = "0.1"
gfx-memory = "0.1"
hal = { package = "gfx-hal", version = "0.6" }
gfx-backend-empty = "0.6"
parking_lot = "0.11"
peek-poke = "0.2"
raw-window-handle = { version = "0.3", optional = true }
ron = { version = "0.6.2", optional = true }
ron = { version = "0.6", optional = true }
serde = { version = "1.0", features = ["serde_derive"], optional = true }
smallvec = "1"
spirv_headers = { version = "1.4.2" }
vec_map = "0.8.1"
tracing = { version = "0.1", default-features = false, features = ["std"] }
thiserror = "1"
gfx-descriptor = "0.2"
gfx-memory = "0.2"
[dependencies.naga]
version = "0.2"
git = "https://github.com/gfx-rs/naga"
rev = "bce6358eb1026c13d2f1c6d365af37afe8869a86"
rev = "aa35110471ee7915e1f4e1de61ea41f2f32f92c4"
features = ["spv-in", "spv-out", "wgsl-in"]
[dependencies.wgt]
path = "../wgpu-types"
package = "wgpu-types"
version = "0.5"
features = ["peek-poke"]
version = "0.6"
[target.'cfg(any(target_os = "ios", target_os = "macos"))'.dependencies]
gfx-backend-metal = { version = "0.5.7" }
gfx-backend-vulkan = { version = "0.5.7", optional = true }
gfx-backend-metal = { version = "0.6" }
gfx-backend-vulkan = { version = "0.6.4", optional = true }
[target.'cfg(all(unix, not(target_os = "ios"), not(target_os = "macos")))'.dependencies]
gfx-backend-vulkan = { version = "0.5.7" }
gfx-backend-vulkan = { version = "0.6.4" }
[target.'cfg(windows)'.dependencies]
gfx-backend-dx12 = { version = "0.5.5" }
gfx-backend-dx11 = { version = "0.5" }
gfx-backend-vulkan = { version = "0.5.7" }
[target.'cfg(any(target_os = "linux", target_os = "macos", target_os = "windows", target_os = "dragonfly", target_os = "freebsd"))'.dependencies]
battery = { version = "0.7", optional = true }
gfx-backend-dx12 = { version = "0.6" }
gfx-backend-dx11 = { version = "0.6" }
gfx-backend-vulkan = { version = "0.6.4" }
[dev-dependencies]
loom = "0.3"
[build-dependencies]
cfg_aliases = "0.1"

Просмотреть файл

@ -0,0 +1,18 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
fn main() {
// Setup cfg aliases
cfg_aliases::cfg_aliases! {
// Vendors/systems
apple: { any(target_os = "ios", target_os = "macos") },
// Backends
vulkan: { any(windows, all(unix, not(apple)), feature = "gfx-backend-vulkan") },
metal: { apple },
dx12: { windows },
dx11: { windows },
gl: { unix },
}
}

Просмотреть файл

@ -3,9 +3,11 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId},
device::{DeviceError, SHADER_STAGE_COUNT},
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId, Valid},
track::{TrackerSet, DUMMY_SELECTOR},
FastHashMap, LifeGuard, RefCount, Stored, MAX_BIND_GROUPS,
validation::{MissingBufferUsageError, MissingTextureUsageError},
FastHashMap, Label, LifeGuard, MultiRefCount, RefCount, Stored, MAX_BIND_GROUPS,
};
use arrayvec::ArrayVec;
@ -15,16 +17,292 @@ use gfx_descriptor::{DescriptorCounts, DescriptorSet};
use serde::Deserialize;
#[cfg(feature = "trace")]
use serde::Serialize;
use std::borrow::Borrow;
use std::{
borrow::{Borrow, Cow},
ops::Range,
};
use thiserror::Error;
#[derive(Clone, Debug, Error)]
pub enum CreateBindGroupLayoutError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("arrays of bindings unsupported for this type of binding")]
ArrayUnsupported,
#[error("conflicting binding at index {0}")]
ConflictBinding(u32),
#[error("required device feature is missing: {0:?}")]
MissingFeature(wgt::Features),
#[error(transparent)]
TooManyBindings(BindingTypeMaxCountError),
}
#[derive(Clone, Debug, Error)]
pub enum CreateBindGroupError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("bind group layout is invalid")]
InvalidLayout,
#[error("buffer {0:?} is invalid or destroyed")]
InvalidBuffer(BufferId),
#[error("texture view {0:?} is invalid")]
InvalidTextureView(TextureViewId),
#[error("sampler {0:?} is invalid")]
InvalidSampler(SamplerId),
#[error("binding count declared with {expected} items, but {actual} items were provided")]
BindingArrayLengthMismatch { actual: usize, expected: usize },
#[error("bound buffer range {range:?} does not fit in buffer of size {size}")]
BindingRangeTooLarge {
range: Range<wgt::BufferAddress>,
size: u64,
},
#[error("buffer binding size {actual} is less than minimum {min}")]
BindingSizeTooSmall { actual: u64, min: u64 },
#[error("number of bindings in bind group descriptor ({actual}) does not match the number of bindings defined in the bind group layout ({expected})")]
BindingsNumMismatch { actual: usize, expected: usize },
#[error("binding {0} is used at least twice in the descriptor")]
DuplicateBinding(u32),
#[error("unable to find a corresponding declaration for the given binding {0}")]
MissingBindingDeclaration(u32),
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error(transparent)]
MissingTextureUsage(#[from] MissingTextureUsageError),
#[error("required device features not enabled: {0:?}")]
MissingFeatures(wgt::Features),
#[error("binding declared as a single item, but bind group is using it as an array")]
SingleBindingExpected,
#[error("unable to create a bind group with a swap chain image")]
SwapChainImage,
#[error("buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(wgt::BufferAddress),
#[error("uniform buffer binding range exceeds `max_uniform_buffer_binding_size` limit")]
UniformBufferRangeTooLarge,
#[error("binding {binding} has a different type ({actual:?}) than the one in the layout ({expected:?})")]
WrongBindingType {
// Index of the binding
binding: u32,
// The type given to the function
actual: wgt::BindingType,
// Human-readable description of expected types
expected: &'static str,
},
#[error("the given sampler is/is not a comparison sampler, while the layout type indicates otherwise")]
WrongSamplerComparison,
#[error("bound texture views can not have both depth and stencil aspects enabled")]
DepthStencilAspect,
}
#[derive(Clone, Debug, Error)]
pub enum BindingZone {
#[error("stage {0:?}")]
Stage(wgt::ShaderStage),
#[error("whole pipeline")]
Pipeline,
}
#[derive(Clone, Debug, Error)]
#[error("too many bindings of type {kind:?} in {zone}, limit is {count}")]
pub struct BindingTypeMaxCountError {
pub kind: BindingTypeMaxCountErrorKind,
pub zone: BindingZone,
pub count: u32,
}
#[derive(Clone, Debug)]
pub enum BindGroupLayoutError {
ConflictBinding(u32),
MissingExtension(wgt::Extensions),
/// Arrays of bindings can't be 0 elements long
ZeroCount,
/// Arrays of bindings unsupported for this type of binding
ArrayUnsupported,
pub enum BindingTypeMaxCountErrorKind {
DynamicUniformBuffers,
DynamicStorageBuffers,
SampledTextures,
Samplers,
StorageBuffers,
StorageTextures,
UniformBuffers,
}
#[derive(Debug, Default)]
pub(crate) struct PerStageBindingTypeCounter {
vertex: u32,
fragment: u32,
compute: u32,
}
impl PerStageBindingTypeCounter {
pub(crate) fn add(&mut self, stage: wgt::ShaderStage, count: u32) {
if stage.contains(wgt::ShaderStage::VERTEX) {
self.vertex += count;
}
if stage.contains(wgt::ShaderStage::FRAGMENT) {
self.fragment += count;
}
if stage.contains(wgt::ShaderStage::COMPUTE) {
self.compute += count;
}
}
pub(crate) fn max(&self) -> (BindingZone, u32) {
let max_value = self.vertex.max(self.fragment.max(self.compute));
let mut stage = wgt::ShaderStage::NONE;
if max_value == self.vertex {
stage |= wgt::ShaderStage::VERTEX
}
if max_value == self.fragment {
stage |= wgt::ShaderStage::FRAGMENT
}
if max_value == self.compute {
stage |= wgt::ShaderStage::COMPUTE
}
(BindingZone::Stage(stage), max_value)
}
pub(crate) fn merge(&mut self, other: &Self) {
self.vertex = self.vertex.max(other.vertex);
self.fragment = self.fragment.max(other.fragment);
self.compute = self.compute.max(other.compute);
}
pub(crate) fn validate(
&self,
limit: u32,
kind: BindingTypeMaxCountErrorKind,
) -> Result<(), BindingTypeMaxCountError> {
let (zone, count) = self.max();
if limit < count {
Err(BindingTypeMaxCountError { kind, zone, count })
} else {
Ok(())
}
}
}
#[derive(Debug, Default)]
pub(crate) struct BindingTypeMaxCountValidator {
dynamic_uniform_buffers: u32,
dynamic_storage_buffers: u32,
sampled_textures: PerStageBindingTypeCounter,
samplers: PerStageBindingTypeCounter,
storage_buffers: PerStageBindingTypeCounter,
storage_textures: PerStageBindingTypeCounter,
uniform_buffers: PerStageBindingTypeCounter,
}
impl BindingTypeMaxCountValidator {
pub(crate) fn add_binding(&mut self, binding: &wgt::BindGroupLayoutEntry) {
let count = binding.count.map_or(1, |count| count.get());
match binding.ty {
wgt::BindingType::UniformBuffer { dynamic, .. } => {
self.uniform_buffers.add(binding.visibility, count);
if dynamic {
self.dynamic_uniform_buffers += count;
}
}
wgt::BindingType::StorageBuffer { dynamic, .. } => {
self.storage_buffers.add(binding.visibility, count);
if dynamic {
self.dynamic_storage_buffers += count;
}
}
wgt::BindingType::Sampler { .. } => {
self.samplers.add(binding.visibility, count);
}
wgt::BindingType::SampledTexture { .. } => {
self.sampled_textures.add(binding.visibility, count);
}
wgt::BindingType::StorageTexture { .. } => {
self.storage_textures.add(binding.visibility, count);
}
}
}
pub(crate) fn merge(&mut self, other: &Self) {
self.dynamic_uniform_buffers += other.dynamic_uniform_buffers;
self.dynamic_storage_buffers += other.dynamic_storage_buffers;
self.sampled_textures.merge(&other.sampled_textures);
self.samplers.merge(&other.samplers);
self.storage_buffers.merge(&other.storage_buffers);
self.storage_textures.merge(&other.storage_textures);
self.uniform_buffers.merge(&other.uniform_buffers);
}
pub(crate) fn validate(&self, limits: &wgt::Limits) -> Result<(), BindingTypeMaxCountError> {
if limits.max_dynamic_uniform_buffers_per_pipeline_layout < self.dynamic_uniform_buffers {
return Err(BindingTypeMaxCountError {
kind: BindingTypeMaxCountErrorKind::DynamicUniformBuffers,
zone: BindingZone::Pipeline,
count: self.dynamic_uniform_buffers,
});
}
if limits.max_dynamic_storage_buffers_per_pipeline_layout < self.dynamic_storage_buffers {
return Err(BindingTypeMaxCountError {
kind: BindingTypeMaxCountErrorKind::DynamicStorageBuffers,
zone: BindingZone::Pipeline,
count: self.dynamic_storage_buffers,
});
}
self.sampled_textures.validate(
limits.max_sampled_textures_per_shader_stage,
BindingTypeMaxCountErrorKind::SampledTextures,
)?;
self.storage_buffers.validate(
limits.max_storage_buffers_per_shader_stage,
BindingTypeMaxCountErrorKind::StorageBuffers,
)?;
self.samplers.validate(
limits.max_samplers_per_shader_stage,
BindingTypeMaxCountErrorKind::Samplers,
)?;
self.storage_buffers.validate(
limits.max_storage_buffers_per_shader_stage,
BindingTypeMaxCountErrorKind::StorageBuffers,
)?;
self.storage_textures.validate(
limits.max_storage_textures_per_shader_stage,
BindingTypeMaxCountErrorKind::StorageTextures,
)?;
self.uniform_buffers.validate(
limits.max_uniform_buffers_per_shader_stage,
BindingTypeMaxCountErrorKind::UniformBuffers,
)?;
Ok(())
}
}
/// Bindable resource and the slot to bind it to.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct BindGroupEntry<'a> {
/// Slot for which binding provides resource. Corresponds to an entry of the same
/// binding index in the [`BindGroupLayoutDescriptor`].
pub binding: u32,
/// Resource to attach to the binding
pub resource: BindingResource<'a>,
}
/// Describes a group of bindings and the resources to be bound.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct BindGroupDescriptor<'a> {
/// Debug label of the bind group. This will show up in graphics debuggers for easy identification.
pub label: Label<'a>,
/// The [`BindGroupLayout`] that corresponds to this bind group.
pub layout: BindGroupLayoutId,
/// The resources to bind to this bind group.
pub entries: Cow<'a, [BindGroupEntry<'a>]>,
}
/// Describes a [`BindGroupLayout`].
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct BindGroupLayoutDescriptor<'a> {
/// Debug label of the bind group layout. This will show up in graphics debuggers for easy identification.
pub label: Label<'a>,
/// Array of entries in this BindGroupLayout
pub entries: Cow<'a, [wgt::BindGroupLayoutEntry]>,
}
pub(crate) type BindEntryMap = FastHashMap<u32, wgt::BindGroupLayoutEntry>;
@ -33,22 +311,92 @@ pub(crate) type BindEntryMap = FastHashMap<u32, wgt::BindGroupLayoutEntry>;
pub struct BindGroupLayout<B: hal::Backend> {
pub(crate) raw: B::DescriptorSetLayout,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
pub(crate) multi_ref_count: MultiRefCount,
pub(crate) entries: BindEntryMap,
pub(crate) desc_counts: DescriptorCounts,
pub(crate) dynamic_count: usize,
pub(crate) count_validator: BindingTypeMaxCountValidator,
}
#[repr(C)]
#[derive(Debug)]
pub struct PipelineLayoutDescriptor {
pub bind_group_layouts: *const BindGroupLayoutId,
pub bind_group_layouts_length: usize,
#[derive(Clone, Debug, Error)]
pub enum CreatePipelineLayoutError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("bind group layout {0:?} is invalid")]
InvalidBindGroupLayout(BindGroupLayoutId),
#[error(
"push constant at index {index} has range bound {bound} not aligned to {}",
wgt::PUSH_CONSTANT_ALIGNMENT
)]
MisalignedPushConstantRange { index: usize, bound: u32 },
#[error("device does not have required feature: {0:?}")]
MissingFeature(wgt::Features),
#[error("push constant range (index {index}) provides for stage(s) {provided:?} but there exists another range that provides stage(s) {intersected:?}. Each stage may only be provided by one range")]
MoreThanOnePushConstantRangePerStage {
index: usize,
provided: wgt::ShaderStage,
intersected: wgt::ShaderStage,
},
#[error("push constant at index {index} has range {}..{} which exceeds device push constant size limit 0..{max}", range.start, range.end)]
PushConstantRangeTooLarge {
index: usize,
range: Range<u32>,
max: u32,
},
#[error(transparent)]
TooManyBindings(BindingTypeMaxCountError),
#[error("bind group layout count {actual} exceeds device bind group limit {max}")]
TooManyGroups { actual: usize, max: usize },
}
#[derive(Clone, Debug)]
pub enum PipelineLayoutError {
TooManyGroups(usize),
#[derive(Clone, Debug, Error)]
pub enum PushConstantUploadError {
#[error("provided push constant with indices {offset}..{end_offset} overruns matching push constant range at index {idx}, with stage(s) {:?} and indices {:?}", range.stages, range.range)]
TooLarge {
offset: u32,
end_offset: u32,
idx: usize,
range: wgt::PushConstantRange,
},
#[error("provided push constant is for stage(s) {actual:?}, stage with a partial match found at index {idx} with stage(s) {matched:?}, however push constants must be complete matches")]
PartialRangeMatch {
actual: wgt::ShaderStage,
idx: usize,
matched: wgt::ShaderStage,
},
#[error("provided push constant is for stage(s) {actual:?}, but intersects a push constant range (at index {idx}) with stage(s) {missing:?}. Push constants must provide the stages for all ranges they intersect")]
MissingStages {
actual: wgt::ShaderStage,
idx: usize,
missing: wgt::ShaderStage,
},
#[error("provided push constant is for stage(s) {actual:?}, however the pipeline layout has no push constant range for the stage(s) {unmatched:?}")]
UnmatchedStages {
actual: wgt::ShaderStage,
unmatched: wgt::ShaderStage,
},
#[error("provided push constant offset {0} does not respect `PUSH_CONSTANT_ALIGNMENT`")]
Unaligned(u32),
}
/// Describes a pipeline layout.
///
/// A `PipelineLayoutDescriptor` can be used to create a pipeline layout.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "trace", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct PipelineLayoutDescriptor<'a> {
/// Debug label of the pipeine layout. This will show up in graphics debuggers for easy identification.
pub label: Label<'a>,
/// Bind groups that this pipeline uses. The first entry will provide all the bindings for
/// "set = 0", second entry will provide all the bindings for "set = 1" etc.
pub bind_group_layouts: Cow<'a, [BindGroupLayoutId]>,
/// Set of push constant ranges this pipeline uses. Each shader stage that uses push constants
/// must define the range in push constant memory that corresponds to its single `layout(push_constant)`
/// uniform block.
///
/// If this array is non-empty, the [`Features::PUSH_CONSTANTS`] must be enabled.
pub push_constant_ranges: Cow<'a, [wgt::PushConstantRange]>,
}
#[derive(Debug)]
@ -56,51 +404,173 @@ pub struct PipelineLayout<B: hal::Backend> {
pub(crate) raw: B::PipelineLayout,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
pub(crate) bind_group_layout_ids: ArrayVec<[Stored<BindGroupLayoutId>; MAX_BIND_GROUPS]>,
pub(crate) bind_group_layout_ids: ArrayVec<[Valid<BindGroupLayoutId>; MAX_BIND_GROUPS]>,
pub(crate) push_constant_ranges: ArrayVec<[wgt::PushConstantRange; SHADER_STAGE_COUNT]>,
}
impl<B: hal::Backend> PipelineLayout<B> {
/// Validate push constants match up with expected ranges.
pub(crate) fn validate_push_constant_ranges(
&self,
stages: wgt::ShaderStage,
offset: u32,
end_offset: u32,
) -> Result<(), PushConstantUploadError> {
// Don't need to validate size against the push constant size limit here,
// as push constant ranges are already validated to be within bounds,
// and we validate that they are within the ranges.
if offset % wgt::PUSH_CONSTANT_ALIGNMENT != 0 {
return Err(PushConstantUploadError::Unaligned(offset));
}
// Push constant validation looks very complicated on the surface, but
// the problem can be range-reduced pretty well.
//
// Push constants require (summarized from the vulkan spec):
// 1. For each byte in the range and for each shader stage in stageFlags,
// there must be a push constant range in the layout that includes that
// byte and that stage.
// 2. For each byte in the range and for each push constant range that overlaps that byte,
// `stage` must include all stages in that push constant ranges `stage`.
//
// However there are some additional constraints that help us:
// 3. All push constant ranges are the only range that can access that stage.
// i.e. if one range has VERTEX, no other range has VERTEX
//
// Therefore we can simplify the checks in the following ways:
// - Because 3 guarantees that the push constant range has a unique stage,
// when we check for 1, we can simply check that our entire updated range
// is within a push constant range. i.e. our range for a specific stage cannot
// intersect more than one push constant range.
let mut used_stages = wgt::ShaderStage::NONE;
for (idx, range) in self.push_constant_ranges.iter().enumerate() {
// contains not intersects due to 2
if stages.contains(range.stages) {
if !(range.range.start <= offset && end_offset <= range.range.end) {
return Err(PushConstantUploadError::TooLarge {
offset,
end_offset,
idx,
range: range.clone(),
});
}
used_stages |= range.stages;
} else if stages.intersects(range.stages) {
// Will be caught by used stages check below, but we can do this because of 1
// and is more helpful to the user.
return Err(PushConstantUploadError::PartialRangeMatch {
actual: stages,
idx,
matched: range.stages,
});
}
// The push constant range intersects range we are uploading
if offset < range.range.end && range.range.start < end_offset {
// But requires stages we don't provide
if !stages.contains(range.stages) {
return Err(PushConstantUploadError::MissingStages {
actual: stages,
idx,
missing: stages,
});
}
}
}
if used_stages != stages {
return Err(PushConstantUploadError::UnmatchedStages {
actual: stages,
unmatched: stages - used_stages,
});
}
Ok(())
}
}
#[repr(C)]
#[derive(Debug)]
#[derive(Clone, Debug, Hash, PartialEq)]
#[cfg_attr(feature = "trace", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub struct BufferBinding {
pub buffer: BufferId,
pub buffer_id: BufferId,
pub offset: wgt::BufferAddress,
pub size: wgt::BufferSize,
pub size: Option<wgt::BufferSize>,
}
// Note: Duplicated in wgpu-rs as BindingResource
#[derive(Debug)]
// Note: Duplicated in `wgpu-rs` as `BindingResource`
// They're different enough that it doesn't make sense to share a common type
#[derive(Debug, Clone)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum BindingResource<'a> {
Buffer(BufferBinding),
Sampler(SamplerId),
TextureView(TextureViewId),
TextureViewArray(&'a [TextureViewId]),
TextureViewArray(Cow<'a, [TextureViewId]>),
}
// Note: Duplicated in wgpu-rs as Binding
#[derive(Debug)]
pub struct BindGroupEntry<'a> {
pub binding: u32,
pub resource: BindingResource<'a>,
#[derive(Clone, Debug, Error)]
pub enum BindError {
#[error("number of dynamic offsets ({actual}) doesn't match the number of dynamic bindings in the bind group layout ({expected})")]
MismatchedDynamicOffsetCount { actual: usize, expected: usize },
#[error(
"dynamic binding at index {idx}: offset {offset} does not respect `BIND_BUFFER_ALIGNMENT`"
)]
UnalignedDynamicBinding { idx: usize, offset: u32 },
#[error("dynamic binding at index {idx} with offset {offset} would overrun the buffer (limit: {max})")]
DynamicBindingOutOfBounds { idx: usize, offset: u32, max: u64 },
}
// Note: Duplicated in wgpu-rs as BindGroupDescriptor
#[derive(Debug)]
pub struct BindGroupDescriptor<'a> {
pub label: Option<&'a str>,
pub layout: BindGroupLayoutId,
pub bindings: &'a [BindGroupEntry<'a>],
pub struct BindGroupDynamicBindingData {
/// The maximum value the dynamic offset can have before running off the end of the buffer.
pub(crate) maximum_dynamic_offset: wgt::BufferAddress,
}
#[derive(Debug)]
pub struct BindGroup<B: hal::Backend> {
pub(crate) raw: DescriptorSet<B>,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) layout_id: BindGroupLayoutId,
pub(crate) layout_id: Valid<BindGroupLayoutId>,
pub(crate) life_guard: LifeGuard,
pub(crate) used: TrackerSet,
pub(crate) dynamic_count: usize,
pub(crate) dynamic_binding_info: Vec<BindGroupDynamicBindingData>,
}
impl<B: hal::Backend> BindGroup<B> {
pub(crate) fn validate_dynamic_bindings(
&self,
offsets: &[wgt::DynamicOffset],
) -> Result<(), BindError> {
if self.dynamic_binding_info.len() != offsets.len() {
return Err(BindError::MismatchedDynamicOffsetCount {
expected: self.dynamic_binding_info.len(),
actual: offsets.len(),
});
}
for (idx, (info, &offset)) in self
.dynamic_binding_info
.iter()
.zip(offsets.iter())
.enumerate()
{
if offset as wgt::BufferAddress % wgt::BIND_BUFFER_ALIGNMENT != 0 {
return Err(BindError::UnalignedDynamicBinding { idx, offset });
}
if offset as wgt::BufferAddress > info.maximum_dynamic_offset {
return Err(BindError::DynamicBindingOutOfBounds {
idx,
offset,
max: info.maximum_dynamic_offset,
});
}
}
Ok(())
}
}
impl<B: hal::Backend> Borrow<RefCount> for BindGroup<B> {
@ -114,3 +584,11 @@ impl<B: hal::Backend> Borrow<()> for BindGroup<B> {
&DUMMY_SELECTOR
}
}
#[derive(Clone, Debug, Error)]
pub enum GetBindGroupLayoutError {
#[error("pipeline is invalid")]
InvalidPipeline,
#[error("invalid group index {0}")]
InvalidGroupIndex(u32),
}

Просмотреть файл

@ -4,12 +4,13 @@
use super::CommandBuffer;
use crate::{
hub::GfxBackend, id::DeviceId, track::TrackerSet, FastHashMap, PrivateFeatures, Stored,
SubmissionIndex,
device::DeviceError, hub::GfxBackend, id::DeviceId, track::TrackerSet, FastHashMap,
PrivateFeatures, Stored, SubmissionIndex,
};
use hal::{command::CommandBuffer as _, device::Device as _, pool::CommandPool as _};
use parking_lot::Mutex;
use thiserror::Error;
use std::thread;
@ -28,8 +29,8 @@ impl<B: hal::Backend> CommandPool<B> {
for i in (0..self.pending.len()).rev() {
if self.pending[i].1 <= last_done_index {
let (cmd_buf, index) = self.pending.swap_remove(i);
log::trace!(
"recycling comb submitted in {} when {} is last done",
tracing::trace!(
"recycling cmdbuf submitted in {} when {} is last done",
index,
last_done_index,
);
@ -80,30 +81,37 @@ impl<B: GfxBackend> CommandAllocator<B> {
limits: wgt::Limits,
private_features: PrivateFeatures,
#[cfg(feature = "trace")] enable_tracing: bool,
) -> CommandBuffer<B> {
) -> Result<CommandBuffer<B>, CommandAllocatorError> {
//debug_assert_eq!(device_id.backend(), B::VARIANT);
let thread_id = thread::current().id();
let mut inner = self.inner.lock();
let init = inner
.pools
.entry(thread_id)
.or_insert_with(|| CommandPool {
raw: unsafe {
log::info!("Starting on thread {:?}", thread_id);
device.create_command_pool(
self.queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
}
.unwrap(),
total: 0,
available: Vec::new(),
pending: Vec::new(),
})
.allocate();
use std::collections::hash_map::Entry;
let pool = match inner.pools.entry(thread_id) {
Entry::Occupied(e) => e.into_mut(),
Entry::Vacant(e) => {
tracing::info!("Starting on thread {:?}", thread_id);
let raw = unsafe {
device
.create_command_pool(
self.queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.or(Err(DeviceError::OutOfMemory))?
};
let pool = CommandPool {
raw,
total: 0,
available: Vec::new(),
pending: Vec::new(),
};
e.insert(pool)
}
};
CommandBuffer {
let init = pool.allocate();
Ok(CommandBuffer {
raw: vec![init],
is_recording: true,
recorded_thread_id: thread_id,
@ -118,14 +126,17 @@ impl<B: GfxBackend> CommandAllocator<B> {
} else {
None
},
}
})
}
}
impl<B: hal::Backend> CommandAllocator<B> {
pub fn new(queue_family: hal::queue::QueueFamilyId, device: &B::Device) -> Self {
pub fn new(
queue_family: hal::queue::QueueFamilyId,
device: &B::Device,
) -> Result<Self, CommandAllocatorError> {
let internal_thread_id = thread::current().id();
log::info!("Starting on (internal) thread {:?}", internal_thread_id);
tracing::info!("Starting on (internal) thread {:?}", internal_thread_id);
let mut pools = FastHashMap::default();
pools.insert(
internal_thread_id,
@ -136,18 +147,18 @@ impl<B: hal::Backend> CommandAllocator<B> {
queue_family,
hal::pool::CommandPoolCreateFlags::RESET_INDIVIDUAL,
)
.unwrap()
.or(Err(DeviceError::OutOfMemory))?
},
total: 0,
available: Vec::new(),
pending: Vec::new(),
},
);
CommandAllocator {
Ok(Self {
queue_family,
internal_thread_id,
inner: Mutex::new(Inner { pools }),
}
})
}
fn allocate_for_thread_id(&self, thread_id: thread::ThreadId) -> B::CommandBuffer {
@ -213,7 +224,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
}
}
for thread_id in remove_threads {
log::info!("Removing from thread {:?}", thread_id);
tracing::info!("Removing from thread {:?}", thread_id);
let mut pool = inner.pools.remove(&thread_id).unwrap();
unsafe {
pool.raw.free(pool.available);
@ -229,7 +240,7 @@ impl<B: hal::Backend> CommandAllocator<B> {
pool.recycle(raw);
}
if pool.total != pool.available.len() {
log::error!(
tracing::error!(
"Some command buffers are still recorded, only tracking {} / {}",
pool.available.len(),
pool.total
@ -242,3 +253,9 @@ impl<B: hal::Backend> CommandAllocator<B> {
}
}
}
#[derive(Clone, Debug, Error)]
pub enum CommandAllocatorError {
#[error(transparent)]
Device(#[from] DeviceError),
}

Просмотреть файл

@ -3,29 +3,29 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
binding_model::BindGroup,
hub::GfxBackend,
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId},
Stored,
binding_model::{BindGroup, PipelineLayout},
device::SHADER_STAGE_COUNT,
hub::{GfxBackend, Storage},
id::{BindGroupId, BindGroupLayoutId, PipelineLayoutId, Valid},
Stored, MAX_BIND_GROUPS,
};
use smallvec::{smallvec, SmallVec};
use arrayvec::ArrayVec;
use std::slice;
use wgt::DynamicOffset;
pub const DEFAULT_BIND_GROUPS: usize = 4;
type BindGroupMask = u8;
#[derive(Clone, Debug)]
pub struct BindGroupPair {
layout_id: BindGroupLayoutId,
pub(super) struct BindGroupPair {
layout_id: Valid<BindGroupLayoutId>,
group_id: Stored<BindGroupId>,
}
#[derive(Debug)]
pub enum LayoutChange<'a> {
pub(super) enum LayoutChange<'a> {
Unchanged,
Match(BindGroupId, &'a [DynamicOffset]),
Match(Valid<BindGroupId>, &'a [DynamicOffset]),
Mismatch,
}
@ -36,11 +36,11 @@ pub enum Provision {
}
#[derive(Clone)]
pub struct FollowUpIter<'a> {
pub(super) struct FollowUpIter<'a> {
iter: slice::Iter<'a, BindGroupEntry>,
}
impl<'a> Iterator for FollowUpIter<'a> {
type Item = (BindGroupId, &'a [DynamicOffset]);
type Item = (Valid<BindGroupId>, &'a [DynamicOffset]);
fn next(&mut self) -> Option<Self::Item> {
self.iter
.next()
@ -49,8 +49,8 @@ impl<'a> Iterator for FollowUpIter<'a> {
}
#[derive(Clone, Default, Debug)]
pub struct BindGroupEntry {
expected_layout_id: Option<BindGroupLayoutId>,
pub(super) struct BindGroupEntry {
expected_layout_id: Option<Valid<BindGroupLayoutId>>,
provided: Option<BindGroupPair>,
dynamic_offsets: Vec<DynamicOffset>,
}
@ -58,11 +58,11 @@ pub struct BindGroupEntry {
impl BindGroupEntry {
fn provide<B: GfxBackend>(
&mut self,
bind_group_id: BindGroupId,
bind_group_id: Valid<BindGroupId>,
bind_group: &BindGroup<B>,
offsets: &[DynamicOffset],
) -> Provision {
debug_assert_eq!(B::VARIANT, bind_group_id.backend());
debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
let was_compatible = match self.provided {
Some(BindGroupPair {
@ -85,14 +85,16 @@ impl BindGroupEntry {
ref_count: bind_group.life_guard.add_ref(),
},
});
//TODO: validate the count of dynamic offsets to match the layout
self.dynamic_offsets.clear();
self.dynamic_offsets.extend_from_slice(offsets);
Provision::Changed { was_compatible }
}
pub fn expect_layout(&mut self, bind_group_layout_id: BindGroupLayoutId) -> LayoutChange {
pub fn expect_layout(
&mut self,
bind_group_layout_id: Valid<BindGroupLayoutId>,
) -> LayoutChange {
let some = Some(bind_group_layout_id);
if self.expected_layout_id != some {
self.expected_layout_id = some;
@ -110,15 +112,16 @@ impl BindGroupEntry {
}
}
fn is_valid(&self) -> bool {
fn is_valid(&self) -> Option<bool> {
match (self.expected_layout_id, self.provided.as_ref()) {
(None, _) => true,
(Some(_), None) => false,
(Some(layout), Some(pair)) => layout == pair.layout_id,
(None, None) => Some(true),
(None, Some(_)) => None,
(Some(_), None) => Some(false),
(Some(layout), Some(pair)) => Some(layout == pair.layout_id),
}
}
fn actual_value(&self) -> Option<BindGroupId> {
fn actual_value(&self) -> Option<Valid<BindGroupId>> {
self.expected_layout_id.and_then(|layout_id| {
self.provided.as_ref().and_then(|pair| {
if pair.layout_id == layout_id {
@ -133,19 +136,44 @@ impl BindGroupEntry {
#[derive(Debug)]
pub struct Binder {
pub(crate) pipeline_layout_id: Option<PipelineLayoutId>, //TODO: strongly `Stored`
pub(crate) entries: SmallVec<[BindGroupEntry; DEFAULT_BIND_GROUPS]>,
pub(super) pipeline_layout_id: Option<Valid<PipelineLayoutId>>, //TODO: strongly `Stored`
pub(super) entries: ArrayVec<[BindGroupEntry; MAX_BIND_GROUPS]>,
}
impl Binder {
pub(crate) fn new(max_bind_groups: u32) -> Self {
pub(super) fn new(max_bind_groups: u32) -> Self {
Self {
pipeline_layout_id: None,
entries: smallvec![Default::default(); max_bind_groups as usize],
entries: (0..max_bind_groups)
.map(|_| BindGroupEntry::default())
.collect(),
}
}
pub(crate) fn reset_expectations(&mut self, length: usize) {
pub(super) fn reset(&mut self) {
self.pipeline_layout_id = None;
self.entries.clear();
}
pub(super) fn change_pipeline_layout<B: GfxBackend>(
&mut self,
guard: &Storage<PipelineLayout<B>, PipelineLayoutId>,
new_id: Valid<PipelineLayoutId>,
) {
let old_id_opt = self.pipeline_layout_id.replace(new_id);
let new = &guard[new_id];
let length = if let Some(old_id) = old_id_opt {
let old = &guard[old_id];
if old.push_constant_ranges == new.push_constant_ranges {
new.bind_group_layout_ids.len()
} else {
0
}
} else {
0
};
for entry in self.entries[length..].iter_mut() {
entry.expected_layout_id = None;
}
@ -156,15 +184,15 @@ impl Binder {
/// (i.e. compatible with current expectations). Also returns an iterator
/// of bind group IDs to be bound with it: those are compatible bind groups
/// that were previously blocked because the current one was incompatible.
pub(crate) fn provide_entry<'a, B: GfxBackend>(
pub(super) fn provide_entry<'a, B: GfxBackend>(
&'a mut self,
index: usize,
bind_group_id: BindGroupId,
bind_group_id: Valid<BindGroupId>,
bind_group: &BindGroup<B>,
offsets: &[DynamicOffset],
) -> Option<(PipelineLayoutId, FollowUpIter<'a>)> {
log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
debug_assert_eq!(B::VARIANT, bind_group_id.backend());
) -> Option<(Valid<PipelineLayoutId>, FollowUpIter<'a>)> {
tracing::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
debug_assert_eq!(B::VARIANT, bind_group_id.0.backend());
match self.entries[index].provide(bind_group_id, bind_group, offsets) {
Provision::Unchanged => None,
@ -176,7 +204,7 @@ impl Binder {
} else {
self.entries.len()
});
log::trace!("\t\tbinding up to {}", end);
tracing::trace!("\t\tbinding up to {}", end);
Some((
self.pipeline_layout_id?,
FollowUpIter {
@ -184,16 +212,23 @@ impl Binder {
},
))
} else {
log::trace!("\t\tskipping above compatible {}", compatible_count);
tracing::trace!("\t\tskipping above compatible {}", compatible_count);
None
}
}
}
}
pub(crate) fn invalid_mask(&self) -> BindGroupMask {
pub(super) fn list_active(&self) -> impl Iterator<Item = Valid<BindGroupId>> + '_ {
self.entries.iter().filter_map(|e| match e.provided {
Some(ref pair) if e.expected_layout_id.is_some() => Some(pair.group_id.value),
_ => None,
})
}
pub(super) fn invalid_mask(&self) -> BindGroupMask {
self.entries.iter().enumerate().fold(0, |mask, (i, entry)| {
if entry.is_valid() {
if entry.is_valid().unwrap_or(true) {
mask
} else {
mask | 1u8 << i
@ -204,7 +239,57 @@ impl Binder {
fn compatible_count(&self) -> usize {
self.entries
.iter()
.position(|entry| !entry.is_valid())
.position(|entry| !entry.is_valid().unwrap_or(false))
.unwrap_or_else(|| self.entries.len())
}
}
struct PushConstantChange {
stages: wgt::ShaderStage,
offset: u32,
enable: bool,
}
/// Break up possibly overlapping push constant ranges into a set of non-overlapping ranges
/// which contain all the stage flags of the original ranges. This allows us to zero out (or write any value)
/// to every possible value.
pub fn compute_nonoverlapping_ranges(
ranges: &[wgt::PushConstantRange],
) -> ArrayVec<[wgt::PushConstantRange; SHADER_STAGE_COUNT * 2]> {
if ranges.is_empty() {
return ArrayVec::new();
}
debug_assert!(ranges.len() <= SHADER_STAGE_COUNT);
let mut breaks: ArrayVec<[PushConstantChange; SHADER_STAGE_COUNT * 2]> = ArrayVec::new();
for range in ranges {
breaks.push(PushConstantChange {
stages: range.stages,
offset: range.range.start,
enable: true,
});
breaks.push(PushConstantChange {
stages: range.stages,
offset: range.range.end,
enable: false,
});
}
breaks.sort_unstable_by_key(|change| change.offset);
let mut output_ranges = ArrayVec::new();
let mut position = 0_u32;
let mut stages = wgt::ShaderStage::NONE;
for bk in breaks {
if bk.offset - position > 0 && !stages.is_empty() {
output_ranges.push(wgt::PushConstantRange {
stages,
range: position..bk.offset,
})
}
position = bk.offset;
stages.set(bk.stages, bk.enable);
}
output_ranges
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -3,62 +3,98 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
binding_model::{BindError, BindGroup, PushConstantUploadError},
command::{
bind::{Binder, LayoutChange},
CommandBuffer, PhantomSlice,
BasePass, BasePassRef, CommandBuffer, CommandEncoderError, StateChange,
},
device::all_buffer_stages,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id,
resource::BufferUse,
resource::{Buffer, BufferUse, Texture},
span,
track::{TrackerSet, UsageConflict},
validation::{check_buffer_usage, MissingBufferUsageError},
MAX_BIND_GROUPS,
};
use arrayvec::ArrayVec;
use hal::command::CommandBuffer as _;
use peek_poke::{Peek, PeekPoke, Poke};
use wgt::{BufferAddress, BufferUsage, DynamicOffset, BIND_BUFFER_ALIGNMENT};
use thiserror::Error;
use wgt::{BufferAddress, BufferUsage, ShaderStage};
use std::iter;
use std::{fmt, iter, str};
#[derive(Debug, PartialEq)]
enum PipelineState {
Required,
Set,
}
#[derive(Clone, Copy, Debug, PeekPoke)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
#[doc(hidden)]
#[derive(Clone, Copy, Debug)]
#[cfg_attr(
any(feature = "serial-pass", feature = "trace"),
derive(serde::Serialize)
)]
#[cfg_attr(
any(feature = "serial-pass", feature = "replay"),
derive(serde::Deserialize)
)]
pub enum ComputeCommand {
SetBindGroup {
index: u8,
num_dynamic_offsets: u8,
bind_group_id: id::BindGroupId,
#[cfg_attr(any(feature = "trace", feature = "replay"), serde(skip))]
phantom_offsets: PhantomSlice<DynamicOffset>,
},
SetPipeline(id::ComputePipelineId),
SetPushConstant {
offset: u32,
size_bytes: u32,
values_offset: u32,
},
Dispatch([u32; 3]),
DispatchIndirect {
buffer_id: id::BufferId,
offset: BufferAddress,
},
End,
PushDebugGroup {
color: u32,
len: usize,
},
PopDebugGroup,
InsertDebugMarker {
color: u32,
len: usize,
},
}
impl Default for ComputeCommand {
fn default() -> Self {
ComputeCommand::End
#[cfg_attr(feature = "serial-pass", derive(serde::Deserialize, serde::Serialize))]
pub struct ComputePass {
base: BasePass<ComputeCommand>,
parent_id: id::CommandEncoderId,
}
impl ComputePass {
pub fn new(parent_id: id::CommandEncoderId) -> Self {
Self {
base: BasePass::new(),
parent_id,
}
}
pub fn parent_id(&self) -> id::CommandEncoderId {
self.parent_id
}
#[cfg(feature = "trace")]
pub fn into_command(self) -> crate::device::trace::Command {
crate::device::trace::Command::RunComputePass { base: self.base }
}
}
impl super::RawPass {
pub unsafe fn new_compute(parent: id::CommandEncoderId) -> Self {
Self::from_vec(Vec::<ComputeCommand>::with_capacity(1), parent)
}
pub unsafe fn finish_compute(mut self) -> (Vec<u8>, id::CommandEncoderId) {
self.finish(ComputeCommand::End);
self.into_vec()
impl fmt::Debug for ComputePass {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"ComputePass {{ encoder_id: {:?}, data: {:?} commands and {:?} dynamic offsets }}",
self.parent_id,
self.base.commands.len(),
self.base.dynamic_offsets.len()
)
}
}
@ -68,131 +104,227 @@ pub struct ComputePassDescriptor {
pub todo: u32,
}
#[derive(Clone, Debug, Error, PartialEq)]
pub enum DispatchError {
#[error("compute pipeline must be set")]
MissingPipeline,
#[error("current compute pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
IncompatibleBindGroup {
index: u32,
//expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
},
}
#[derive(Clone, Debug, Error)]
pub enum ComputePassError {
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
#[error("bind group {0:?} is invalid")]
InvalidBindGroup(id::BindGroupId),
#[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("compute pipeline {0:?} is invalid")]
InvalidPipeline(id::ComputePipelineId),
#[error("indirect buffer {0:?} is invalid or destroyed")]
InvalidIndirectBuffer(id::BufferId),
#[error(transparent)]
ResourceUsageConflict(#[from] UsageConflict),
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error("cannot pop debug group, because number of pushed debug groups is zero")]
InvalidPopDebugGroup,
#[error(transparent)]
Dispatch(#[from] DispatchError),
#[error(transparent)]
Bind(#[from] BindError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
}
#[derive(Debug)]
struct State {
binder: Binder,
pipeline: StateChange<id::ComputePipelineId>,
trackers: TrackerSet,
debug_scope_depth: u32,
}
impl State {
fn is_ready(&self) -> Result<(), DispatchError> {
//TODO: vertex buffers
let bind_mask = self.binder.invalid_mask();
if bind_mask != 0 {
//let (expected, provided) = self.binder.entries[index as usize].info();
return Err(DispatchError::IncompatibleBindGroup {
index: bind_mask.trailing_zeros(),
});
}
if self.pipeline.is_unset() {
return Err(DispatchError::MissingPipeline);
}
Ok(())
}
fn flush_states<B: GfxBackend>(
&mut self,
raw_cmd_buf: &mut B::CommandBuffer,
base_trackers: &mut TrackerSet,
bind_group_guard: &Storage<BindGroup<B>, id::BindGroupId>,
buffer_guard: &Storage<Buffer<B>, id::BufferId>,
texture_guard: &Storage<Texture<B>, id::TextureId>,
) -> Result<(), UsageConflict> {
for id in self.binder.list_active() {
self.trackers.merge_extend(&bind_group_guard[id].used)?;
}
tracing::trace!("Encoding dispatch barriers");
CommandBuffer::insert_barriers(
raw_cmd_buf,
base_trackers,
&self.trackers,
buffer_guard,
texture_guard,
);
self.trackers.clear();
Ok(())
}
}
// Common routines between render/compute
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn command_encoder_run_compute_pass<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
raw_data: &[u8],
) {
pass: &ComputePass,
) -> Result<(), ComputePassError> {
self.command_encoder_run_compute_pass_impl::<B>(encoder_id, pass.base.as_ref())
}
#[doc(hidden)]
pub fn command_encoder_run_compute_pass_impl<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
mut base: BasePassRef<ComputeCommand>,
) -> Result<(), ComputePassError> {
span!(_guard, INFO, "CommandEncoder::run_compute_pass");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
let cmb = &mut cmb_guard[encoder_id];
let raw = cmb.raw.last_mut().unwrap();
let mut binder = Binder::new(cmb.limits.max_bind_groups);
let (mut cmd_buf_guard, mut token) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let raw = cmd_buf.raw.last_mut().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf.commands {
list.push(crate::device::trace::Command::RunComputePass {
base: BasePass::from_ref(base),
});
}
let (_, mut token) = hub.render_bundles.read(&mut token);
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, mut token) = hub.compute_pipelines.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let mut pipeline_state = PipelineState::Required;
let mut state = State {
binder: Binder::new(cmd_buf.limits.max_bind_groups),
pipeline: StateChange::new(),
trackers: TrackerSet::new(B::VARIANT),
debug_scope_depth: 0,
};
let mut temp_offsets = Vec::new();
let mut peeker = raw_data.as_ptr();
let raw_data_end = unsafe { raw_data.as_ptr().add(raw_data.len()) };
let mut command = ComputeCommand::Dispatch([0; 3]); // dummy
loop {
assert!(unsafe { peeker.add(ComputeCommand::max_size()) } <= raw_data_end);
peeker = unsafe { ComputeCommand::peek_from(peeker, &mut command) };
match command {
for command in base.commands {
match *command {
ComputeCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group_id,
phantom_offsets,
} => {
let (new_peeker, offsets) = unsafe {
phantom_offsets.decode_unaligned(
peeker,
num_dynamic_offsets as usize,
raw_data_end,
)
};
peeker = new_peeker;
if cfg!(debug_assertions) {
for off in offsets {
assert_eq!(
*off as BufferAddress % BIND_BUFFER_ALIGNMENT,
0,
"Misaligned dynamic buffer offset: {} does not align with {}",
off,
BIND_BUFFER_ALIGNMENT
);
}
let max_bind_groups = cmd_buf.limits.max_bind_groups;
if (index as u32) >= max_bind_groups {
return Err(ComputePassError::BindGroupIndexOutOfRange {
index,
max: max_bind_groups,
});
}
let bind_group = cmb
temp_offsets.clear();
temp_offsets
.extend_from_slice(&base.dynamic_offsets[..num_dynamic_offsets as usize]);
base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..];
let bind_group = cmd_buf
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.unwrap();
assert_eq!(bind_group.dynamic_count, offsets.len());
.map_err(|_| ComputePassError::InvalidBindGroup(bind_group_id))?;
bind_group.validate_dynamic_bindings(&temp_offsets)?;
log::trace!(
"Encoding barriers on binding of {:?} to {:?}",
bind_group_id,
encoder_id
);
CommandBuffer::insert_barriers(
raw,
&mut cmb.trackers,
&bind_group.used,
&*buffer_guard,
&*texture_guard,
);
if let Some((pipeline_layout_id, follow_ups)) =
binder.provide_entry(index as usize, bind_group_id, bind_group, offsets)
{
let bind_groups = iter::once(bind_group.raw.raw()).chain(
follow_ups
.clone()
.map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()),
);
if let Some((pipeline_layout_id, follow_ups)) = state.binder.provide_entry(
index as usize,
id::Valid(bind_group_id),
bind_group,
&temp_offsets,
) {
let bind_groups = iter::once(bind_group.raw.raw())
.chain(
follow_ups
.clone()
.map(|(bg_id, _)| bind_group_guard[bg_id].raw.raw()),
)
.collect::<ArrayVec<[_; MAX_BIND_GROUPS]>>();
temp_offsets.extend(follow_ups.flat_map(|(_, offsets)| offsets));
unsafe {
raw.bind_compute_descriptor_sets(
&pipeline_layout_guard[pipeline_layout_id].raw,
index as usize,
bind_groups,
offsets
.iter()
.chain(follow_ups.flat_map(|(_, offsets)| offsets))
.cloned(),
&temp_offsets,
);
}
}
}
ComputeCommand::SetPipeline(pipeline_id) => {
pipeline_state = PipelineState::Set;
let pipeline = cmb
if state.pipeline.set_and_check_redundant(pipeline_id) {
continue;
}
let pipeline = cmd_buf
.trackers
.compute_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ())
.unwrap();
.map_err(|_| ComputePassError::InvalidPipeline(pipeline_id))?;
unsafe {
raw.bind_compute_pipeline(&pipeline.raw);
}
// Rebind resources
if binder.pipeline_layout_id != Some(pipeline.layout_id.value) {
if state.binder.pipeline_layout_id != Some(pipeline.layout_id.value) {
let pipeline_layout = &pipeline_layout_guard[pipeline.layout_id.value];
binder.pipeline_layout_id = Some(pipeline.layout_id.value);
binder.reset_expectations(pipeline_layout.bind_group_layout_ids.len());
state.binder.change_pipeline_layout(
&*pipeline_layout_guard,
pipeline.layout_id.value,
);
let mut is_compatible = true;
for (index, (entry, bgl_id)) in binder
for (index, (entry, &bgl_id)) in state
.binder
.entries
.iter_mut()
.zip(&pipeline_layout.bind_group_layout_ids)
.enumerate()
{
match entry.expect_layout(bgl_id.value) {
match entry.expect_layout(bgl_id) {
LayoutChange::Match(bg_id, offsets) if is_compatible => {
let desc_set = bind_group_guard[bg_id].raw.raw();
unsafe {
@ -210,93 +342,128 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
}
// Clear push constant ranges
let non_overlapping = super::bind::compute_nonoverlapping_ranges(
&pipeline_layout.push_constant_ranges,
);
for range in non_overlapping {
let offset = range.range.start;
let size_bytes = range.range.end - offset;
super::push_constant_clear(
offset,
size_bytes,
|clear_offset, clear_data| unsafe {
raw.push_compute_constants(
&pipeline_layout.raw,
clear_offset,
clear_data,
);
},
);
}
}
}
ComputeCommand::SetPushConstant {
offset,
size_bytes,
values_offset,
} => {
let end_offset_bytes = offset + size_bytes;
let values_end_offset =
(values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
let data_slice =
&base.push_constant_data[(values_offset as usize)..values_end_offset];
let pipeline_layout_id = state
.binder
.pipeline_layout_id
//TODO: don't error here, lazily update the push constants
.ok_or(ComputePassError::Dispatch(DispatchError::MissingPipeline))?;
let pipeline_layout = &pipeline_layout_guard[pipeline_layout_id];
pipeline_layout
.validate_push_constant_ranges(
ShaderStage::COMPUTE,
offset,
end_offset_bytes,
)
.map_err(ComputePassError::from)?;
unsafe { raw.push_compute_constants(&pipeline_layout.raw, offset, data_slice) }
}
ComputeCommand::Dispatch(groups) => {
assert_eq!(
pipeline_state,
PipelineState::Set,
"Dispatch error: Pipeline is missing"
);
state.is_ready()?;
state.flush_states(
raw,
&mut cmd_buf.trackers,
&*bind_group_guard,
&*buffer_guard,
&*texture_guard,
)?;
unsafe {
raw.dispatch(groups);
}
}
ComputeCommand::DispatchIndirect { buffer_id, offset } => {
assert_eq!(
pipeline_state,
PipelineState::Set,
"Dispatch error: Pipeline is missing"
);
let (src_buffer, src_pending) = cmb.trackers.buffers.use_replace(
state.is_ready()?;
let indirect_buffer = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), BufferUse::INDIRECT)
.map_err(|_| ComputePassError::InvalidIndirectBuffer(buffer_id))?;
check_buffer_usage(indirect_buffer.usage, BufferUsage::INDIRECT)?;
let &(ref buf_raw, _) = indirect_buffer
.raw
.as_ref()
.ok_or(ComputePassError::InvalidIndirectBuffer(buffer_id))?;
state.flush_states(
raw,
&mut cmd_buf.trackers,
&*bind_group_guard,
&*buffer_guard,
buffer_id,
(),
BufferUse::INDIRECT,
);
assert!(src_buffer.usage.contains(BufferUsage::INDIRECT));
let barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
&*texture_guard,
)?;
unsafe {
raw.pipeline_barrier(
all_buffer_stages()..all_buffer_stages(),
hal::memory::Dependencies::empty(),
barriers,
);
raw.dispatch_indirect(&src_buffer.raw, offset);
raw.dispatch_indirect(buf_raw, offset);
}
}
ComputeCommand::End => break,
ComputeCommand::PushDebugGroup { color, len } => {
state.debug_scope_depth += 1;
let label = str::from_utf8(&base.string_data[..len]).unwrap();
unsafe {
raw.begin_debug_marker(label, color);
}
base.string_data = &base.string_data[len..];
}
ComputeCommand::PopDebugGroup => {
if state.debug_scope_depth == 0 {
return Err(ComputePassError::InvalidPopDebugGroup);
}
state.debug_scope_depth -= 1;
unsafe {
raw.end_debug_marker();
}
}
ComputeCommand::InsertDebugMarker { color, len } => {
let label = str::from_utf8(&base.string_data[..len]).unwrap();
unsafe { raw.insert_debug_marker(label, color) }
base.string_data = &base.string_data[len..];
}
}
}
#[cfg(feature = "trace")]
match cmb.commands {
Some(ref mut list) => {
let mut pass_commands = Vec::new();
let mut pass_dynamic_offsets = Vec::new();
peeker = raw_data.as_ptr();
loop {
peeker = unsafe { ComputeCommand::peek_from(peeker, &mut command) };
match command {
ComputeCommand::SetBindGroup {
num_dynamic_offsets,
phantom_offsets,
..
} => {
let (new_peeker, offsets) = unsafe {
phantom_offsets.decode_unaligned(
peeker,
num_dynamic_offsets as usize,
raw_data_end,
)
};
peeker = new_peeker;
pass_dynamic_offsets.extend_from_slice(offsets);
}
ComputeCommand::End => break,
_ => {}
}
pass_commands.push(command);
}
list.push(crate::device::trace::Command::RunComputePass {
commands: pass_commands,
dynamic_offsets: pass_dynamic_offsets,
});
}
None => {}
}
Ok(())
}
}
pub mod compute_ffi {
use super::{
super::{PhantomSlice, RawPass},
ComputeCommand,
};
use crate::{id, RawString};
use std::{convert::TryInto, slice};
use super::{ComputeCommand, ComputePass};
use crate::{id, span, RawString};
use std::{convert::TryInto, ffi, slice};
use wgt::{BufferAddress, DynamicOffset};
/// # Safety
@ -307,73 +474,130 @@ pub mod compute_ffi {
// `RawPass::encode` and `RawPass::encode_slice`.
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_set_bind_group(
pass: &mut RawPass,
pass: &mut ComputePass,
index: u32,
bind_group_id: id::BindGroupId,
offsets: *const DynamicOffset,
offset_length: usize,
) {
pass.encode(&ComputeCommand::SetBindGroup {
span!(_guard, DEBUG, "ComputePass::set_bind_group");
pass.base.commands.push(ComputeCommand::SetBindGroup {
index: index.try_into().unwrap(),
num_dynamic_offsets: offset_length.try_into().unwrap(),
bind_group_id,
phantom_offsets: PhantomSlice::default(),
});
pass.encode_slice(slice::from_raw_parts(offsets, offset_length));
pass.base
.dynamic_offsets
.extend_from_slice(slice::from_raw_parts(offsets, offset_length));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_set_pipeline(
pass: &mut RawPass,
pub extern "C" fn wgpu_compute_pass_set_pipeline(
pass: &mut ComputePass,
pipeline_id: id::ComputePipelineId,
) {
pass.encode(&ComputeCommand::SetPipeline(pipeline_id));
span!(_guard, DEBUG, "ComputePass::set_pipeline");
pass.base
.commands
.push(ComputeCommand::SetPipeline(pipeline_id));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_dispatch(
pass: &mut RawPass,
pub unsafe extern "C" fn wgpu_compute_pass_set_push_constant(
pass: &mut ComputePass,
offset: u32,
size_bytes: u32,
data: *const u8,
) {
span!(_guard, DEBUG, "ComputePass::set_push_constant");
assert_eq!(
offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
"Push constant offset must be aligned to 4 bytes."
);
assert_eq!(
size_bytes & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
"Push constant size must be aligned to 4 bytes."
);
let data_slice = slice::from_raw_parts(data, size_bytes as usize);
let value_offset = pass.base.push_constant_data.len().try_into().expect(
"Ran out of push constant space. Don't set 4gb of push constants per ComputePass.",
);
pass.base.push_constant_data.extend(
data_slice
.chunks_exact(wgt::PUSH_CONSTANT_ALIGNMENT as usize)
.map(|arr| u32::from_ne_bytes([arr[0], arr[1], arr[2], arr[3]])),
);
pass.base.commands.push(ComputeCommand::SetPushConstant {
offset,
size_bytes,
values_offset: value_offset,
});
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_dispatch(
pass: &mut ComputePass,
groups_x: u32,
groups_y: u32,
groups_z: u32,
) {
pass.encode(&ComputeCommand::Dispatch([groups_x, groups_y, groups_z]));
span!(_guard, DEBUG, "ComputePass::dispatch");
pass.base
.commands
.push(ComputeCommand::Dispatch([groups_x, groups_y, groups_z]));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_dispatch_indirect(
pass: &mut RawPass,
pub extern "C" fn wgpu_compute_pass_dispatch_indirect(
pass: &mut ComputePass,
buffer_id: id::BufferId,
offset: BufferAddress,
) {
pass.encode(&ComputeCommand::DispatchIndirect { buffer_id, offset });
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_push_debug_group(_pass: &mut RawPass, _label: RawString) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_pop_debug_group(_pass: &mut RawPass) {
//TODO
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_insert_debug_marker(
_pass: &mut RawPass,
_label: RawString,
) {
//TODO
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_finish(
pass: &mut RawPass,
length: &mut usize,
) -> *const u8 {
pass.finish(ComputeCommand::End);
*length = pass.size();
span!(_guard, DEBUG, "ComputePass::dispatch_indirect");
pass.base
.commands
.push(ComputeCommand::DispatchIndirect { buffer_id, offset });
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_push_debug_group(
pass: &mut ComputePass,
label: RawString,
color: u32,
) {
span!(_guard, DEBUG, "ComputePass::push_debug_group");
let bytes = ffi::CStr::from_ptr(label).to_bytes();
pass.base.string_data.extend_from_slice(bytes);
pass.base.commands.push(ComputeCommand::PushDebugGroup {
color,
len: bytes.len(),
});
}
#[no_mangle]
pub extern "C" fn wgpu_compute_pass_pop_debug_group(pass: &mut ComputePass) {
span!(_guard, DEBUG, "ComputePass::pop_debug_group");
pass.base.commands.push(ComputeCommand::PopDebugGroup);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_insert_debug_marker(
pass: &mut ComputePass,
label: RawString,
color: u32,
) {
span!(_guard, DEBUG, "ComputePass::insert_debug_marker");
let bytes = ffi::CStr::from_ptr(label).to_bytes();
pass.base.string_data.extend_from_slice(bytes);
pass.base.commands.push(ComputeCommand::InsertDebugMarker {
color,
len: bytes.len(),
});
}
}

Просмотреть файл

@ -0,0 +1,180 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*! Draw structures - shared between render passes and bundles.
!*/
use crate::{
binding_model::PushConstantUploadError,
id,
resource::BufferUse,
track::UseExtendError,
validation::{MissingBufferUsageError, MissingTextureUsageError},
};
use wgt::{BufferAddress, BufferSize, Color};
use std::num::NonZeroU32;
use thiserror::Error;
pub type BufferError = UseExtendError<BufferUse>;
/// Error validating a draw call.
#[derive(Clone, Debug, Error, PartialEq)]
pub enum DrawError {
#[error("blend color needs to be set")]
MissingBlendColor,
#[error("render pipeline must be set")]
MissingPipeline,
#[error("current render pipeline has a layout which is incompatible with a currently set bind group, first differing at entry index {index}")]
IncompatibleBindGroup {
index: u32,
//expected: BindGroupLayoutId,
//provided: Option<(BindGroupLayoutId, BindGroupId)>,
},
#[error("vertex {last_vertex} extends beyond limit {vertex_limit}")]
VertexBeyondLimit { last_vertex: u32, vertex_limit: u32 },
#[error("instance {last_instance} extends beyond limit {instance_limit}")]
InstanceBeyondLimit {
last_instance: u32,
instance_limit: u32,
},
#[error("index {last_index} extends beyond limit {index_limit}")]
IndexBeyondLimit { last_index: u32, index_limit: u32 },
}
/// Error encountered when encoding a render command.
/// This is the shared error set between render bundles and passes.
#[derive(Clone, Debug, Error)]
pub enum RenderCommandError {
#[error("bind group {0:?} is invalid")]
InvalidBindGroup(id::BindGroupId),
#[error("bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u8, max: u32 },
#[error("dynamic buffer offset {0} does not respect `BIND_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(u64),
#[error("number of buffer offsets ({actual}) does not match the number of dynamic bindings ({expected})")]
InvalidDynamicOffsetCount { actual: usize, expected: usize },
#[error("render pipeline {0:?} is invalid")]
InvalidPipeline(id::RenderPipelineId),
#[error("render pipeline is incompatible, {0}")]
IncompatiblePipeline(#[from] crate::device::RenderPassCompatibilityError),
#[error("pipeline is not compatible with the depth-stencil read-only render pass")]
IncompatibleReadOnlyDepthStencil,
#[error("buffer {0:?} is in error {1:?}")]
Buffer(id::BufferId, BufferError),
#[error("buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error(transparent)]
MissingTextureUsage(#[from] MissingTextureUsageError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
#[error("Invalid Viewport parameters")]
InvalidViewport,
#[error("Invalid ScissorRect parameters")]
InvalidScissorRect,
}
#[derive(Clone, Copy, Debug, Default)]
#[cfg_attr(
any(feature = "serial-pass", feature = "trace"),
derive(serde::Serialize)
)]
#[cfg_attr(
any(feature = "serial-pass", feature = "replay"),
derive(serde::Deserialize)
)]
pub struct Rect<T> {
pub x: T,
pub y: T,
pub w: T,
pub h: T,
}
#[doc(hidden)]
#[derive(Clone, Copy, Debug)]
#[cfg_attr(
any(feature = "serial-pass", feature = "trace"),
derive(serde::Serialize)
)]
#[cfg_attr(
any(feature = "serial-pass", feature = "replay"),
derive(serde::Deserialize)
)]
pub enum RenderCommand {
SetBindGroup {
index: u8,
num_dynamic_offsets: u8,
bind_group_id: id::BindGroupId,
},
SetPipeline(id::RenderPipelineId),
SetIndexBuffer {
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetVertexBuffer {
slot: u32,
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetBlendColor(Color),
SetStencilReference(u32),
SetViewport {
rect: Rect<f32>,
//TODO: use half-float to reduce the size?
depth_min: f32,
depth_max: f32,
},
SetScissor(Rect<u32>),
SetPushConstant {
stages: wgt::ShaderStage,
offset: u32,
size_bytes: u32,
/// None means there is no data and the data should be an array of zeros.
///
/// Facilitates clears in renderbundles which explicitly do their clears.
values_offset: Option<u32>,
},
Draw {
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
},
DrawIndexed {
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
},
MultiDrawIndirect {
buffer_id: id::BufferId,
offset: BufferAddress,
/// Count of `None` represents a non-multi call.
count: Option<NonZeroU32>,
indexed: bool,
},
MultiDrawIndirectCount {
buffer_id: id::BufferId,
offset: BufferAddress,
count_buffer_id: id::BufferId,
count_buffer_offset: BufferAddress,
max_count: u32,
indexed: bool,
},
PushDebugGroup {
color: u32,
len: usize,
},
PopDebugGroup,
InsertDebugMarker {
color: u32,
len: usize,
},
ExecuteBundle(id::RenderBundleId),
}

Просмотреть файл

@ -4,152 +4,36 @@
mod allocator;
mod bind;
mod bundle;
mod compute;
mod draw;
mod render;
mod transfer;
pub(crate) use self::allocator::CommandAllocator;
pub use self::allocator::CommandAllocatorError;
pub use self::bundle::*;
pub use self::compute::*;
pub use self::draw::*;
pub use self::render::*;
pub use self::transfer::*;
use crate::{
device::{all_buffer_stages, all_image_stages, MAX_COLOR_TARGETS},
device::{all_buffer_stages, all_image_stages},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Storage, Token},
id,
resource::{Buffer, Texture},
span,
track::TrackerSet,
PrivateFeatures, Stored,
Label, PrivateFeatures, Stored,
};
use peek_poke::PeekPoke;
use hal::command::CommandBuffer as _;
use thiserror::Error;
use std::{marker::PhantomData, mem, ptr, slice, thread::ThreadId};
use std::thread::ThreadId;
#[derive(Clone, Copy, Debug, PeekPoke)]
pub struct PhantomSlice<T>(PhantomData<T>);
impl<T> Default for PhantomSlice<T> {
fn default() -> Self {
PhantomSlice(PhantomData)
}
}
impl<T> PhantomSlice<T> {
unsafe fn decode_unaligned<'a>(
self,
pointer: *const u8,
count: usize,
bound: *const u8,
) -> (*const u8, &'a [T]) {
let align_offset = pointer.align_offset(mem::align_of::<T>());
let aligned = pointer.add(align_offset);
let size = count * mem::size_of::<T>();
let end = aligned.add(size);
assert!(
end <= bound,
"End of phantom slice ({:?}) exceeds bound ({:?})",
end,
bound
);
(end, slice::from_raw_parts(aligned as *const T, count))
}
}
#[repr(C)]
pub struct RawPass {
data: *mut u8,
base: *mut u8,
capacity: usize,
parent: id::CommandEncoderId,
}
impl RawPass {
fn from_vec<T>(mut vec: Vec<T>, encoder_id: id::CommandEncoderId) -> Self {
let ptr = vec.as_mut_ptr() as *mut u8;
let capacity = vec.capacity() * mem::size_of::<T>();
mem::forget(vec);
RawPass {
data: ptr,
base: ptr,
capacity,
parent: encoder_id,
}
}
/// Finish encoding a raw pass.
///
/// The last command is provided, yet the encoder
/// is guaranteed to have exactly `C::max_size()` space for it.
unsafe fn finish<C: peek_poke::Poke>(&mut self, command: C) {
self.ensure_extra_size(C::max_size());
let extended_end = self.data.add(C::max_size());
let end = command.poke_into(self.data);
ptr::write_bytes(end, 0, extended_end as usize - end as usize);
self.data = extended_end;
}
fn size(&self) -> usize {
self.data as usize - self.base as usize
}
/// Recover the data vector of the pass, consuming `self`.
unsafe fn into_vec(mut self) -> (Vec<u8>, id::CommandEncoderId) {
(self.invalidate(), self.parent)
}
/// Make pass contents invalid, return the contained data.
///
/// Any following access to the pass will result in a crash
/// for accessing address 0.
pub unsafe fn invalidate(&mut self) -> Vec<u8> {
let size = self.size();
assert!(
size <= self.capacity,
"Size of RawPass ({}) exceeds capacity ({})",
size,
self.capacity
);
let vec = Vec::from_raw_parts(self.base, size, self.capacity);
self.data = ptr::null_mut();
self.base = ptr::null_mut();
self.capacity = 0;
vec
}
unsafe fn ensure_extra_size(&mut self, extra_size: usize) {
let size = self.size();
if size + extra_size > self.capacity {
let mut vec = Vec::from_raw_parts(self.base, size, self.capacity);
vec.reserve(extra_size);
//let (data, size, capacity) = vec.into_raw_parts(); //TODO: when stable
self.data = vec.as_mut_ptr().add(vec.len());
self.base = vec.as_mut_ptr();
self.capacity = vec.capacity();
mem::forget(vec);
}
}
#[inline]
pub unsafe fn encode<C: peek_poke::Poke>(&mut self, command: &C) {
self.ensure_extra_size(C::max_size());
self.data = command.poke_into(self.data);
}
#[inline]
pub unsafe fn encode_slice<T: Copy>(&mut self, data: &[T]) {
let align_offset = self.data.align_offset(mem::align_of::<T>());
let extra = align_offset + mem::size_of::<T>() * data.len();
self.ensure_extra_size(extra);
slice::from_raw_parts_mut(self.data.add(align_offset) as *mut T, data.len())
.copy_from_slice(data);
self.data = self.data.add(extra);
}
}
pub struct RenderBundle<B: hal::Backend> {
_raw: B::CommandBuffer,
}
const PUSH_CONSTANT_CLEAR_ARRAY: &[u32] = &[0_u32; 64];
#[derive(Debug)]
pub struct CommandBuffer<B: hal::Backend> {
@ -166,6 +50,17 @@ pub struct CommandBuffer<B: hal::Backend> {
}
impl<B: GfxBackend> CommandBuffer<B> {
fn get_encoder(
storage: &mut Storage<Self, id::CommandEncoderId>,
id: id::CommandEncoderId,
) -> Result<&mut Self, CommandEncoderError> {
match storage.get_mut(id) {
Ok(cmd_buf) if cmd_buf.is_recording => Ok(cmd_buf),
Ok(_) => Err(CommandEncoderError::NotRecording),
Err(_) => Err(CommandEncoderError::Invalid),
}
}
pub(crate) fn insert_barriers(
raw: &mut B::CommandBuffer,
base: &mut TrackerSet,
@ -193,6 +88,7 @@ impl<B: GfxBackend> CommandBuffer<B> {
.merge_extend(&head.compute_pipes)
.unwrap();
base.render_pipes.merge_extend(&head.render_pipes).unwrap();
base.bundles.merge_extend(&head.bundles).unwrap();
let stages = all_buffer_stages() | all_image_stages();
unsafe {
@ -205,73 +101,194 @@ impl<B: GfxBackend> CommandBuffer<B> {
}
}
#[repr(C)]
#[derive(PeekPoke)]
struct PassComponent<T> {
load_op: wgt::LoadOp,
store_op: wgt::StoreOp,
clear_value: T,
read_only: bool,
#[derive(Copy, Clone, Debug)]
pub struct BasePassRef<'a, C> {
pub commands: &'a [C],
pub dynamic_offsets: &'a [wgt::DynamicOffset],
pub string_data: &'a [u8],
pub push_constant_data: &'a [u32],
}
// required for PeekPoke
impl<T: Default> Default for PassComponent<T> {
fn default() -> Self {
PassComponent {
load_op: wgt::LoadOp::Clear,
store_op: wgt::StoreOp::Clear,
clear_value: T::default(),
read_only: false,
#[doc(hidden)]
#[derive(Debug)]
#[cfg_attr(
any(feature = "serial-pass", feature = "trace"),
derive(serde::Serialize)
)]
#[cfg_attr(
any(feature = "serial-pass", feature = "replay"),
derive(serde::Deserialize)
)]
pub struct BasePass<C> {
pub commands: Vec<C>,
pub dynamic_offsets: Vec<wgt::DynamicOffset>,
pub string_data: Vec<u8>,
pub push_constant_data: Vec<u32>,
}
impl<C: Clone> BasePass<C> {
fn new() -> Self {
Self {
commands: Vec::new(),
dynamic_offsets: Vec::new(),
string_data: Vec::new(),
push_constant_data: Vec::new(),
}
}
#[cfg(feature = "trace")]
fn from_ref(base: BasePassRef<C>) -> Self {
Self {
commands: base.commands.to_vec(),
dynamic_offsets: base.dynamic_offsets.to_vec(),
string_data: base.string_data.to_vec(),
push_constant_data: base.push_constant_data.to_vec(),
}
}
pub fn as_ref(&self) -> BasePassRef<C> {
BasePassRef {
commands: &self.commands,
dynamic_offsets: &self.dynamic_offsets,
string_data: &self.string_data,
push_constant_data: &self.push_constant_data,
}
}
}
#[repr(C)]
#[derive(Default, PeekPoke)]
struct RawRenderPassColorAttachmentDescriptor {
attachment: u64,
resolve_target: u64,
component: PassComponent<wgt::Color>,
}
#[repr(C)]
#[derive(Default, PeekPoke)]
struct RawRenderPassDepthStencilAttachmentDescriptor {
attachment: u64,
depth: PassComponent<f32>,
stencil: PassComponent<u32>,
}
#[repr(C)]
#[derive(Default, PeekPoke)]
struct RawRenderTargets {
colors: [RawRenderPassColorAttachmentDescriptor; MAX_COLOR_TARGETS],
depth_stencil: RawRenderPassDepthStencilAttachmentDescriptor,
#[derive(Clone, Debug, Error)]
pub enum CommandEncoderError {
#[error("command encoder is invalid")]
Invalid,
#[error("command encoder must be active")]
NotRecording,
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn command_encoder_finish<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
_desc: &wgt::CommandBufferDescriptor,
) -> id::CommandBufferId {
_desc: &wgt::CommandBufferDescriptor<Label>,
) -> Result<id::CommandBufferId, CommandEncoderError> {
span!(_guard, INFO, "CommandEncoder::finish");
let hub = B::hub(self);
let mut token = Token::root();
let (swap_chain_guard, mut token) = hub.swap_chains.read(&mut token);
//TODO: actually close the last recorded command buffer
let (mut comb_guard, _) = hub.command_buffers.write(&mut token);
let comb = &mut comb_guard[encoder_id];
assert!(comb.is_recording, "Command buffer must be recording");
comb.is_recording = false;
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
cmd_buf.is_recording = false;
// stop tracking the swapchain image, if used
if let Some((ref sc_id, _)) = comb.used_swap_chain {
if let Some((ref sc_id, _)) = cmd_buf.used_swap_chain {
let view_id = swap_chain_guard[sc_id.value]
.acquired_view_id
.as_ref()
.expect("Used swap chain frame has already presented");
comb.trackers.views.remove(view_id.value);
cmd_buf.trackers.views.remove(view_id.value);
}
log::debug!("Command buffer {:?} {:#?}", encoder_id, comb.trackers);
encoder_id
tracing::trace!("Command buffer {:?} {:#?}", encoder_id, cmd_buf.trackers);
Ok(encoder_id)
}
pub fn command_encoder_push_debug_group<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
label: &str,
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::push_debug_group");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.begin_debug_marker(label, 0);
}
Ok(())
}
pub fn command_encoder_insert_debug_marker<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
label: &str,
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::insert_debug_marker");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.insert_debug_marker(label, 0);
}
Ok(())
}
pub fn command_encoder_pop_debug_group<B: GfxBackend>(
&self,
encoder_id: id::CommandEncoderId,
) -> Result<(), CommandEncoderError> {
span!(_guard, DEBUG, "CommandEncoder::pop_debug_marker");
let hub = B::hub(self);
let mut token = Token::root();
let (mut cmd_buf_guard, _) = hub.command_buffers.write(&mut token);
let cmd_buf = CommandBuffer::get_encoder(&mut *cmd_buf_guard, encoder_id)?;
let cmb_raw = cmd_buf.raw.last_mut().unwrap();
unsafe {
cmb_raw.end_debug_marker();
}
Ok(())
}
}
fn push_constant_clear<PushFn>(offset: u32, size_bytes: u32, mut push_fn: PushFn)
where
PushFn: FnMut(u32, &[u32]),
{
let mut count_words = 0_u32;
let size_words = size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT;
while count_words < size_words {
let count_bytes = count_words * wgt::PUSH_CONSTANT_ALIGNMENT;
let size_to_write_words =
(size_words - count_words).min(PUSH_CONSTANT_CLEAR_ARRAY.len() as u32);
push_fn(
offset + count_bytes,
&PUSH_CONSTANT_CLEAR_ARRAY[0..size_to_write_words as usize],
);
count_words += size_to_write_words;
}
}
#[derive(Debug)]
struct StateChange<T> {
last_state: Option<T>,
}
impl<T: Copy + PartialEq> StateChange<T> {
fn new() -> Self {
Self { last_state: None }
}
fn set_and_check_redundant(&mut self, new_state: T) -> bool {
let already_set = self.last_state == Some(new_state);
self.last_state = Some(new_state);
already_set
}
fn is_unset(&self) -> bool {
self.last_state.is_none()
}
fn reset(&mut self) {
self.last_state = None;
}
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -2,7 +2,12 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{resource, PrivateFeatures};
use crate::{
command::{LoadOp, PassChannel, StoreOp},
resource, PrivateFeatures,
};
use std::convert::TryInto;
pub fn map_buffer_usage(usage: wgt::BufferUsage) -> (hal::buffer::Usage, hal::memory::Properties) {
use hal::buffer::Usage as U;
@ -79,13 +84,20 @@ pub fn map_binding_type(binding: &wgt::BindGroupLayoutEntry) -> hal::pso::Descri
use hal::pso;
use wgt::BindingType as Bt;
match binding.ty {
Bt::UniformBuffer { dynamic } => pso::DescriptorType::Buffer {
Bt::UniformBuffer {
dynamic,
min_binding_size: _,
} => pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Uniform,
format: pso::BufferDescriptorFormat::Structured {
dynamic_offset: dynamic,
},
},
Bt::StorageBuffer { readonly, dynamic } => pso::DescriptorType::Buffer {
Bt::StorageBuffer {
readonly,
dynamic,
min_binding_size: _,
} => pso::DescriptorType::Buffer {
ty: pso::BufferDescriptorType::Storage {
read_only: readonly,
},
@ -93,7 +105,7 @@ pub fn map_binding_type(binding: &wgt::BindGroupLayoutEntry) -> hal::pso::Descri
dynamic_offset: dynamic,
},
},
Bt::Sampler { .. } => pso::DescriptorType::Sampler,
Bt::Sampler { comparison: _ } => pso::DescriptorType::Sampler,
Bt::SampledTexture { .. } => pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled {
with_sampler: false,
@ -104,7 +116,6 @@ pub fn map_binding_type(binding: &wgt::BindGroupLayoutEntry) -> hal::pso::Descri
read_only: readonly,
},
},
_ => unreachable!(),
}
}
@ -231,29 +242,25 @@ pub fn map_depth_stencil_state_descriptor(
desc: &wgt::DepthStencilStateDescriptor,
) -> hal::pso::DepthStencilDesc {
hal::pso::DepthStencilDesc {
depth: if desc.depth_write_enabled || desc.depth_compare != wgt::CompareFunction::Always {
depth: if desc.is_depth_enabled() {
Some(hal::pso::DepthTest {
fun: map_compare_function(desc.depth_compare)
.expect("DepthStencilStateDescriptor has undefined compare function"),
fun: map_compare_function(desc.depth_compare),
write: desc.depth_write_enabled,
})
} else {
None
},
depth_bounds: false, // TODO
stencil: if desc.stencil_read_mask != !0
|| desc.stencil_write_mask != !0
|| desc.stencil_front != wgt::StencilStateFaceDescriptor::IGNORE
|| desc.stencil_back != wgt::StencilStateFaceDescriptor::IGNORE
{
stencil: if desc.stencil.is_enabled() {
let s = &desc.stencil;
Some(hal::pso::StencilTest {
faces: hal::pso::Sided {
front: map_stencil_face(&desc.stencil_front),
back: map_stencil_face(&desc.stencil_back),
front: map_stencil_face(&s.front),
back: map_stencil_face(&s.back),
},
read_masks: hal::pso::State::Static(hal::pso::Sided::new(desc.stencil_read_mask)),
write_masks: hal::pso::State::Static(hal::pso::Sided::new(desc.stencil_write_mask)),
reference_values: if desc.needs_stencil_reference() {
read_masks: hal::pso::State::Static(hal::pso::Sided::new(s.read_mask)),
write_masks: hal::pso::State::Static(hal::pso::Sided::new(s.write_mask)),
reference_values: if s.needs_ref_value() {
hal::pso::State::Dynamic
} else {
hal::pso::State::Static(hal::pso::Sided::new(0))
@ -269,29 +276,25 @@ fn map_stencil_face(
stencil_state_face_desc: &wgt::StencilStateFaceDescriptor,
) -> hal::pso::StencilFace {
hal::pso::StencilFace {
fun: map_compare_function(stencil_state_face_desc.compare)
.expect("StencilStateFaceDescriptor has undefined compare function"),
fun: map_compare_function(stencil_state_face_desc.compare),
op_fail: map_stencil_operation(stencil_state_face_desc.fail_op),
op_depth_fail: map_stencil_operation(stencil_state_face_desc.depth_fail_op),
op_pass: map_stencil_operation(stencil_state_face_desc.pass_op),
}
}
pub fn map_compare_function(
compare_function: wgt::CompareFunction,
) -> Option<hal::pso::Comparison> {
pub fn map_compare_function(compare_function: wgt::CompareFunction) -> hal::pso::Comparison {
use hal::pso::Comparison as H;
use wgt::CompareFunction as Cf;
match compare_function {
Cf::Undefined => None,
Cf::Never => Some(H::Never),
Cf::Less => Some(H::Less),
Cf::Equal => Some(H::Equal),
Cf::LessEqual => Some(H::LessEqual),
Cf::Greater => Some(H::Greater),
Cf::NotEqual => Some(H::NotEqual),
Cf::GreaterEqual => Some(H::GreaterEqual),
Cf::Always => Some(H::Always),
Cf::Never => H::Never,
Cf::Less => H::Less,
Cf::Equal => H::Equal,
Cf::LessEqual => H::LessEqual,
Cf::Greater => H::Greater,
Cf::NotEqual => H::NotEqual,
Cf::GreaterEqual => H::GreaterEqual,
Cf::Always => H::Always,
}
}
@ -367,8 +370,8 @@ pub(crate) fn map_texture_format(
// Depth and stencil formats
Tf::Depth32Float => H::D32Sfloat,
Tf::Depth24Plus => {
if private_features.texture_d24_s8 {
H::D24UnormS8Uint
if private_features.texture_d24 {
H::X8D24Unorm
} else {
H::D32Sfloat
}
@ -380,6 +383,140 @@ pub(crate) fn map_texture_format(
H::D32SfloatS8Uint
}
}
// BCn compressed formats
Tf::Bc1RgbaUnorm => H::Bc1RgbaUnorm,
Tf::Bc1RgbaUnormSrgb => H::Bc1RgbaSrgb,
Tf::Bc2RgbaUnorm => H::Bc2Unorm,
Tf::Bc2RgbaUnormSrgb => H::Bc2Srgb,
Tf::Bc3RgbaUnorm => H::Bc3Unorm,
Tf::Bc3RgbaUnormSrgb => H::Bc3Srgb,
Tf::Bc4RUnorm => H::Bc4Unorm,
Tf::Bc4RSnorm => H::Bc4Snorm,
Tf::Bc5RgUnorm => H::Bc5Unorm,
Tf::Bc5RgSnorm => H::Bc5Snorm,
Tf::Bc6hRgbSfloat => H::Bc6hSfloat,
Tf::Bc6hRgbUfloat => H::Bc6hUfloat,
Tf::Bc7RgbaUnorm => H::Bc7Unorm,
Tf::Bc7RgbaUnormSrgb => H::Bc7Srgb,
}
}
pub fn texture_block_size(format: wgt::TextureFormat) -> (u32, u32) {
use wgt::TextureFormat as Tf;
match format {
Tf::R8Unorm
| Tf::R8Snorm
| Tf::R8Uint
| Tf::R8Sint
| Tf::R16Uint
| Tf::R16Sint
| Tf::R16Float
| Tf::Rg8Unorm
| Tf::Rg8Snorm
| Tf::Rg8Uint
| Tf::Rg8Sint
| Tf::R32Uint
| Tf::R32Sint
| Tf::R32Float
| Tf::Rg16Uint
| Tf::Rg16Sint
| Tf::Rg16Float
| Tf::Rgba8Unorm
| Tf::Rgba8UnormSrgb
| Tf::Rgba8Snorm
| Tf::Rgba8Uint
| Tf::Rgba8Sint
| Tf::Bgra8Unorm
| Tf::Bgra8UnormSrgb
| Tf::Rgb10a2Unorm
| Tf::Rg11b10Float
| Tf::Rg32Uint
| Tf::Rg32Sint
| Tf::Rg32Float
| Tf::Rgba16Uint
| Tf::Rgba16Sint
| Tf::Rgba16Float
| Tf::Rgba32Uint
| Tf::Rgba32Sint
| Tf::Rgba32Float
| Tf::Depth32Float
| Tf::Depth24Plus
| Tf::Depth24PlusStencil8 => (1, 1),
Tf::Bc1RgbaUnorm
| Tf::Bc1RgbaUnormSrgb
| Tf::Bc2RgbaUnorm
| Tf::Bc2RgbaUnormSrgb
| Tf::Bc3RgbaUnorm
| Tf::Bc3RgbaUnormSrgb
| Tf::Bc4RUnorm
| Tf::Bc4RSnorm
| Tf::Bc5RgUnorm
| Tf::Bc5RgSnorm
| Tf::Bc6hRgbUfloat
| Tf::Bc6hRgbSfloat
| Tf::Bc7RgbaUnorm
| Tf::Bc7RgbaUnormSrgb => (4, 4),
}
}
pub fn texture_features(format: wgt::TextureFormat) -> wgt::Features {
use wgt::TextureFormat as Tf;
match format {
Tf::R8Unorm
| Tf::R8Snorm
| Tf::R8Uint
| Tf::R8Sint
| Tf::R16Uint
| Tf::R16Sint
| Tf::R16Float
| Tf::Rg8Unorm
| Tf::Rg8Snorm
| Tf::Rg8Uint
| Tf::Rg8Sint
| Tf::R32Uint
| Tf::R32Sint
| Tf::R32Float
| Tf::Rg16Uint
| Tf::Rg16Sint
| Tf::Rg16Float
| Tf::Rgba8Unorm
| Tf::Rgba8UnormSrgb
| Tf::Rgba8Snorm
| Tf::Rgba8Uint
| Tf::Rgba8Sint
| Tf::Bgra8Unorm
| Tf::Bgra8UnormSrgb
| Tf::Rgb10a2Unorm
| Tf::Rg11b10Float
| Tf::Rg32Uint
| Tf::Rg32Sint
| Tf::Rg32Float
| Tf::Rgba16Uint
| Tf::Rgba16Sint
| Tf::Rgba16Float
| Tf::Rgba32Uint
| Tf::Rgba32Sint
| Tf::Rgba32Float
| Tf::Depth32Float
| Tf::Depth24Plus
| Tf::Depth24PlusStencil8 => wgt::Features::empty(),
Tf::Bc1RgbaUnorm
| Tf::Bc1RgbaUnormSrgb
| Tf::Bc2RgbaUnorm
| Tf::Bc2RgbaUnormSrgb
| Tf::Bc3RgbaUnorm
| Tf::Bc3RgbaUnormSrgb
| Tf::Bc4RUnorm
| Tf::Bc4RSnorm
| Tf::Bc5RgUnorm
| Tf::Bc5RgSnorm
| Tf::Bc6hRgbUfloat
| Tf::Bc6hRgbSfloat
| Tf::Bc7RgbaUnorm
| Tf::Bc7RgbaUnormSrgb => wgt::Features::TEXTURE_COMPRESSION_BC,
}
}
@ -420,15 +557,26 @@ pub fn map_vertex_format(vertex_format: wgt::VertexFormat) -> hal::format::Forma
}
}
fn checked_u32_as_u16(value: u32) -> u16 {
assert!(value <= ::std::u16::MAX as u32);
value as u16
}
pub fn is_power_of_two(val: u32) -> bool {
val != 0 && (val & (val - 1)) == 0
}
pub fn is_valid_copy_src_texture_format(format: wgt::TextureFormat) -> bool {
use wgt::TextureFormat as Tf;
match format {
Tf::Depth24Plus | Tf::Depth24PlusStencil8 => false,
_ => true,
}
}
pub fn is_valid_copy_dst_texture_format(format: wgt::TextureFormat) -> bool {
use wgt::TextureFormat as Tf;
match format {
Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => false,
_ => true,
}
}
pub fn map_texture_dimension_size(
dimension: wgt::TextureDimension,
wgt::Extent3d {
@ -437,28 +585,49 @@ pub fn map_texture_dimension_size(
depth,
}: wgt::Extent3d,
sample_size: u32,
) -> hal::image::Kind {
) -> Result<hal::image::Kind, resource::TextureDimensionError> {
use hal::image::Kind as H;
use resource::TextureDimensionError as Tde;
use wgt::TextureDimension::*;
match dimension {
let zero_dim = if width == 0 {
Some(resource::TextureErrorDimension::X)
} else if height == 0 {
Some(resource::TextureErrorDimension::Y)
} else if depth == 0 {
Some(resource::TextureErrorDimension::Z)
} else {
None
};
if let Some(dim) = zero_dim {
return Err(resource::TextureDimensionError::Zero(dim));
}
Ok(match dimension {
D1 => {
assert_eq!(height, 1);
assert_eq!(sample_size, 1);
H::D1(width, checked_u32_as_u16(depth))
if height != 1 {
return Err(Tde::InvalidHeight);
}
if sample_size != 1 {
return Err(Tde::InvalidSampleCount(sample_size));
}
let layers = depth.try_into().unwrap_or(!0);
H::D1(width, layers)
}
D2 => {
assert!(
sample_size <= 32 && is_power_of_two(sample_size),
"Invalid sample_count of {}",
sample_size
);
H::D2(width, height, checked_u32_as_u16(depth), sample_size as u8)
if sample_size > 32 || !is_power_of_two(sample_size) {
return Err(Tde::InvalidSampleCount(sample_size));
}
let layers = depth.try_into().unwrap_or(!0);
H::D2(width, height, layers, sample_size as u8)
}
D3 => {
assert_eq!(sample_size, 1);
if sample_size != 1 {
return Err(Tde::InvalidSampleCount(sample_size));
}
H::D3(width, height, depth)
}
}
})
}
pub fn map_texture_view_dimension(dimension: wgt::TextureViewDimension) -> hal::image::ViewKind {
@ -525,11 +694,11 @@ pub(crate) fn map_texture_state(
W::UNINITIALIZED => return (A::empty(), L::Undefined),
W::COPY_SRC => L::TransferSrcOptimal,
W::COPY_DST => L::TransferDstOptimal,
W::SAMPLED => L::ShaderReadOnlyOptimal,
W::SAMPLED if is_color => L::ShaderReadOnlyOptimal,
W::ATTACHMENT_READ | W::ATTACHMENT_WRITE if is_color => L::ColorAttachmentOptimal,
W::ATTACHMENT_READ => L::DepthStencilReadOnlyOptimal,
_ if is_color => L::General,
W::ATTACHMENT_WRITE => L::DepthStencilAttachmentOptimal,
_ => L::General,
_ => L::DepthStencilReadOnlyOptimal,
};
let mut access = A::empty();
@ -566,15 +735,15 @@ pub(crate) fn map_texture_state(
(access, layout)
}
pub fn map_load_store_ops(load: wgt::LoadOp, store: wgt::StoreOp) -> hal::pass::AttachmentOps {
pub fn map_load_store_ops<V>(channel: &PassChannel<V>) -> hal::pass::AttachmentOps {
hal::pass::AttachmentOps {
load: match load {
wgt::LoadOp::Clear => hal::pass::AttachmentLoadOp::Clear,
wgt::LoadOp::Load => hal::pass::AttachmentLoadOp::Load,
load: match channel.load_op {
LoadOp::Clear => hal::pass::AttachmentLoadOp::Clear,
LoadOp::Load => hal::pass::AttachmentLoadOp::Load,
},
store: match store {
wgt::StoreOp::Clear => hal::pass::AttachmentStoreOp::DontCare, //TODO!
wgt::StoreOp::Store => hal::pass::AttachmentStoreOp::Store,
store: match channel.store_op {
StoreOp::Clear => hal::pass::AttachmentStoreOp::DontCare, //TODO!
StoreOp::Store => hal::pass::AttachmentStoreOp::Store,
},
}
}
@ -618,6 +787,7 @@ pub fn map_wrap(address: wgt::AddressMode) -> hal::image::WrapMode {
Am::ClampToEdge => W::Clamp,
Am::Repeat => W::Tile,
Am::MirrorRepeat => W::Mirror,
Am::ClampToBorder => W::Border,
}
}
@ -626,8 +796,12 @@ pub fn map_rasterization_state_descriptor(
) -> hal::pso::Rasterizer {
use hal::pso;
pso::Rasterizer {
depth_clamping: false,
polygon_mode: pso::PolygonMode::Fill,
depth_clamping: desc.clamp_depth,
polygon_mode: match desc.polygon_mode {
wgt::PolygonMode::Fill => pso::PolygonMode::Fill,
wgt::PolygonMode::Line => pso::PolygonMode::Line,
wgt::PolygonMode::Point => pso::PolygonMode::Point,
},
cull_face: match desc.cull_mode {
wgt::CullMode::None => pso::Face::empty(),
wgt::CullMode::Front => pso::Face::FRONT,

Просмотреть файл

@ -5,7 +5,8 @@
#[cfg(feature = "trace")]
use crate::device::trace;
use crate::{
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
device::{queue::TempResource, DeviceError},
hub::{GfxBackend, GlobalIdentityHandlerFactory, Hub, Token},
id, resource,
track::TrackerSet,
FastHashMap, RefCount, Stored, SubmissionIndex,
@ -16,6 +17,7 @@ use gfx_descriptor::{DescriptorAllocator, DescriptorSet};
use gfx_memory::{Heaps, MemoryBlock};
use hal::device::Device as _;
use parking_lot::Mutex;
use thiserror::Error;
use std::sync::atomic::Ordering;
@ -24,19 +26,20 @@ const CLEANUP_WAIT_MS: u64 = 5000;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Debug, Default)]
pub struct SuspectedResources {
pub(crate) buffers: Vec<id::BufferId>,
pub(crate) textures: Vec<id::TextureId>,
pub(crate) texture_views: Vec<id::TextureViewId>,
pub(crate) samplers: Vec<id::SamplerId>,
pub(crate) bind_groups: Vec<id::BindGroupId>,
pub(crate) compute_pipelines: Vec<id::ComputePipelineId>,
pub(crate) render_pipelines: Vec<id::RenderPipelineId>,
pub(crate) bind_group_layouts: Vec<Stored<id::BindGroupLayoutId>>,
pub(crate) buffers: Vec<id::Valid<id::BufferId>>,
pub(crate) textures: Vec<id::Valid<id::TextureId>>,
pub(crate) texture_views: Vec<id::Valid<id::TextureViewId>>,
pub(crate) samplers: Vec<id::Valid<id::SamplerId>>,
pub(crate) bind_groups: Vec<id::Valid<id::BindGroupId>>,
pub(crate) compute_pipelines: Vec<id::Valid<id::ComputePipelineId>>,
pub(crate) render_pipelines: Vec<id::Valid<id::RenderPipelineId>>,
pub(crate) bind_group_layouts: Vec<id::Valid<id::BindGroupLayoutId>>,
pub(crate) pipeline_layouts: Vec<Stored<id::PipelineLayoutId>>,
pub(crate) render_bundles: Vec<id::Valid<id::RenderBundleId>>,
}
impl SuspectedResources {
pub fn clear(&mut self) {
pub(crate) fn clear(&mut self) {
self.buffers.clear();
self.textures.clear();
self.texture_views.clear();
@ -46,9 +49,10 @@ impl SuspectedResources {
self.render_pipelines.clear();
self.bind_group_layouts.clear();
self.pipeline_layouts.clear();
self.render_bundles.clear();
}
pub fn extend(&mut self, other: &Self) {
pub(crate) fn extend(&mut self, other: &Self) {
self.buffers.extend_from_slice(&other.buffers);
self.textures.extend_from_slice(&other.textures);
self.texture_views.extend_from_slice(&other.texture_views);
@ -62,6 +66,18 @@ impl SuspectedResources {
.extend_from_slice(&other.bind_group_layouts);
self.pipeline_layouts
.extend_from_slice(&other.pipeline_layouts);
self.render_bundles.extend_from_slice(&other.render_bundles);
}
pub(crate) fn add_trackers(&mut self, trackers: &TrackerSet) {
self.buffers.extend(trackers.buffers.used());
self.textures.extend(trackers.textures.used());
self.texture_views.extend(trackers.views.used());
self.samplers.extend(trackers.samplers.used());
self.bind_groups.extend(trackers.bind_groups.used());
self.compute_pipelines.extend(trackers.compute_pipes.used());
self.render_pipelines.extend(trackers.render_pipes.used());
self.render_bundles.extend(trackers.bundles.used());
}
}
@ -72,7 +88,7 @@ struct NonReferencedResources<B: hal::Backend> {
images: Vec<(B::Image, MemoryBlock<B>)>,
// Note: we keep the associated ID here in order to be able to check
// at any point what resources are used in a submission.
image_views: Vec<(id::TextureViewId, B::ImageView)>,
image_views: Vec<(id::Valid<id::TextureViewId>, B::ImageView)>,
samplers: Vec<B::Sampler>,
framebuffers: Vec<B::Framebuffer>,
desc_sets: Vec<DescriptorSet<B>>,
@ -84,7 +100,7 @@ struct NonReferencedResources<B: hal::Backend> {
impl<B: hal::Backend> NonReferencedResources<B> {
fn new() -> Self {
NonReferencedResources {
Self {
buffers: Vec::new(),
images: Vec::new(),
image_views: Vec::new(),
@ -117,17 +133,15 @@ impl<B: hal::Backend> NonReferencedResources<B> {
heaps_mutex: &Mutex<Heaps<B>>,
descriptor_allocator_mutex: &Mutex<DescriptorAllocator<B>>,
) {
if !self.buffers.is_empty() {
if !self.buffers.is_empty() || !self.images.is_empty() {
let mut heaps = heaps_mutex.lock();
for (raw, memory) in self.buffers.drain(..) {
log::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
tracing::trace!("Buffer {:?} is destroyed with memory {:?}", raw, memory);
device.destroy_buffer(raw);
heaps.free(device, memory);
}
}
if !self.images.is_empty() {
let mut heaps = heaps_mutex.lock();
for (raw, memory) in self.images.drain(..) {
tracing::trace!("Image {:?} is destroyed with memory {:?}", raw, memory);
device.destroy_image(raw);
heaps.free(device, memory);
}
@ -169,7 +183,15 @@ struct ActiveSubmission<B: hal::Backend> {
index: SubmissionIndex,
fence: B::Fence,
last_resources: NonReferencedResources<B>,
mapped: Vec<id::BufferId>,
mapped: Vec<id::Valid<id::BufferId>>,
}
#[derive(Clone, Debug, Error)]
pub enum WaitIdleError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("GPU got stuck :(")]
StuckGpu,
}
/// A struct responsible for tracking resource lifetimes.
@ -181,9 +203,14 @@ struct ActiveSubmission<B: hal::Backend> {
/// 3. When `ActiveSubmission` is retired, the mapped buffers associated with it are moved to `ready_to_map` vector.
/// 4. Finally, `handle_mapping` issues all the callbacks.
#[derive(Debug)]
pub struct LifetimeTracker<B: hal::Backend> {
pub(crate) struct LifetimeTracker<B: hal::Backend> {
/// Resources that the user has requested be mapped, but are still in use.
mapped: Vec<Stored<id::BufferId>>,
/// Buffers can be used in a submission that is yet to be made, by the
/// means of `write_buffer()`, so we have a special place for them.
pub future_suspected_buffers: Vec<Stored<id::BufferId>>,
/// Textures can be used in the upcoming submission by `write_texture`.
pub future_suspected_textures: Vec<Stored<id::TextureId>>,
/// Resources that are suspected for destruction.
pub suspected_resources: SuspectedResources,
/// Resources that are not referenced any more but still used by GPU.
@ -193,13 +220,15 @@ pub struct LifetimeTracker<B: hal::Backend> {
/// Resources that are neither referenced or used, just life_tracker
/// actual deletion.
free_resources: NonReferencedResources<B>,
ready_to_map: Vec<id::BufferId>,
ready_to_map: Vec<id::Valid<id::BufferId>>,
}
impl<B: hal::Backend> LifetimeTracker<B> {
pub fn new() -> Self {
LifetimeTracker {
Self {
mapped: Vec::new(),
future_suspected_buffers: Vec::new(),
future_suspected_textures: Vec::new(),
suspected_resources: SuspectedResources::default(),
active: Vec::new(),
free_resources: NonReferencedResources::new(),
@ -212,11 +241,28 @@ impl<B: hal::Backend> LifetimeTracker<B> {
index: SubmissionIndex,
fence: B::Fence,
new_suspects: &SuspectedResources,
temp_buffers: impl Iterator<Item = (B::Buffer, MemoryBlock<B>)>,
temp_resources: impl Iterator<Item = (TempResource<B>, MemoryBlock<B>)>,
) {
let mut last_resources = NonReferencedResources::new();
last_resources.buffers.extend(temp_buffers);
for (res, memory) in temp_resources {
match res {
TempResource::Buffer(raw) => last_resources.buffers.push((raw, memory)),
TempResource::Image(raw) => last_resources.images.push((raw, memory)),
}
}
self.suspected_resources.buffers.extend(
self.future_suspected_buffers
.drain(..)
.map(|stored| stored.value),
);
self.suspected_resources.textures.extend(
self.future_suspected_textures
.drain(..)
.map(|stored| stored.value),
);
self.suspected_resources.extend(new_suspects);
self.active.alloc().init(ActiveSubmission {
index,
fence,
@ -225,57 +271,56 @@ impl<B: hal::Backend> LifetimeTracker<B> {
});
}
pub fn map(&mut self, buffer: id::BufferId, ref_count: RefCount) {
self.mapped.push(Stored {
value: buffer,
ref_count,
});
pub(crate) fn map(&mut self, value: id::Valid<id::BufferId>, ref_count: RefCount) {
self.mapped.push(Stored { value, ref_count });
}
/// Find the pending entry with the lowest active index. If none can be found that means
/// everything in the allocator can be cleaned up, so std::usize::MAX is correct.
#[cfg(feature = "replay")]
pub fn lowest_active_submission(&self) -> SubmissionIndex {
self.active
.iter()
.fold(std::usize::MAX, |v, active| active.index.min(v))
}
fn wait_idle(&self, device: &B::Device) {
fn wait_idle(&self, device: &B::Device) -> Result<(), WaitIdleError> {
if !self.active.is_empty() {
log::debug!("Waiting for IDLE...");
tracing::debug!("Waiting for IDLE...");
let status = unsafe {
device.wait_for_fences(
self.active.iter().map(|a| &a.fence),
hal::device::WaitFor::All,
CLEANUP_WAIT_MS * 1_000_000,
)
device
.wait_for_fences(
self.active.iter().map(|a| &a.fence),
hal::device::WaitFor::All,
CLEANUP_WAIT_MS * 1_000_000,
)
.map_err(DeviceError::from)?
};
log::debug!("...Done");
assert_eq!(status, Ok(true), "GPU got stuck :(");
tracing::debug!("...Done");
if status == false {
// We timed out while waiting for the fences
return Err(WaitIdleError::StuckGpu);
}
}
Ok(())
}
/// Returns the last submission index that is done.
pub fn triage_submissions(&mut self, device: &B::Device, force_wait: bool) -> SubmissionIndex {
pub fn triage_submissions(
&mut self,
device: &B::Device,
force_wait: bool,
) -> Result<SubmissionIndex, WaitIdleError> {
if force_wait {
self.wait_idle(device);
self.wait_idle(device)?;
}
//TODO: enable when `is_sorted_by_key` is stable
//debug_assert!(self.active.is_sorted_by_key(|a| a.index));
let done_count = self
.active
.iter()
.position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap() })
.position(|a| unsafe { !device.get_fence_status(&a.fence).unwrap_or(false) })
.unwrap_or_else(|| self.active.len());
let last_done = if done_count != 0 {
self.active[done_count - 1].index
} else {
return 0;
return Ok(0);
};
for a in self.active.drain(..done_count) {
log::trace!("Active submission {} is done", a.index);
tracing::trace!("Active submission {} is done", a.index);
self.free_resources.extend(a.last_resources);
self.ready_to_map.extend(a.mapped);
unsafe {
@ -283,7 +328,7 @@ impl<B: hal::Backend> LifetimeTracker<B> {
}
}
last_done
Ok(last_done)
}
pub fn cleanup(
@ -298,50 +343,69 @@ impl<B: hal::Backend> LifetimeTracker<B> {
descriptor_allocator_mutex.lock().cleanup(device);
}
}
pub fn schedule_resource_destruction(
&mut self,
temp_resource: TempResource<B>,
memory: MemoryBlock<B>,
last_submit_index: SubmissionIndex,
) {
let resources = self
.active
.iter_mut()
.find(|a| a.index == last_submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources);
match temp_resource {
TempResource::Buffer(raw) => resources.buffers.push((raw, memory)),
TempResource::Image(raw) => resources.images.push((raw, memory)),
}
}
}
impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
trackers: &Mutex<TrackerSet>,
#[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
token: &mut Token<super::Device<B>>,
) {
let hub = B::hub(global);
if !self.suspected_resources.render_bundles.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.render_bundles.write(token);
while let Some(id) = self.suspected_resources.render_bundles.pop() {
if trackers.bundles.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyRenderBundle(id.0)));
if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
self.suspected_resources.add_trackers(&res.used);
}
}
}
}
if !self.suspected_resources.bind_groups.is_empty() {
let mut trackers = trackers.lock();
let (mut guard, _) = hub.bind_groups.write(token);
for id in self.suspected_resources.bind_groups.drain(..) {
while let Some(id) = self.suspected_resources.bind_groups.pop() {
if trackers.bind_groups.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id)));
hub.bind_groups.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroup(id.0)));
assert!(res.used.bind_groups.is_empty());
self.suspected_resources
.buffers
.extend(res.used.buffers.used());
self.suspected_resources
.textures
.extend(res.used.textures.used());
self.suspected_resources
.texture_views
.extend(res.used.views.used());
self.suspected_resources
.samplers
.extend(res.used.samplers.used());
if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
self.suspected_resources.add_trackers(&res.used);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.desc_sets
.push(res.raw);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.desc_sets
.push(res.raw);
}
}
}
}
@ -353,25 +417,25 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.texture_views.drain(..) {
if trackers.views.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id)));
hub.texture_views.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyTextureView(id.0)));
let raw = match res.inner {
resource::TextureViewInner::Native { raw, source_id } => {
self.suspected_resources.textures.push(source_id.value);
raw
}
resource::TextureViewInner::SwapChain { .. } => unreachable!(),
};
if let Some(res) = hub.texture_views.unregister_locked(id.0, &mut *guard) {
let raw = match res.inner {
resource::TextureViewInner::Native { raw, source_id } => {
self.suspected_resources.textures.push(source_id.value);
raw
}
resource::TextureViewInner::SwapChain { .. } => unreachable!(),
};
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.image_views
.push((id, raw));
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.image_views
.push((id, raw));
}
}
}
}
@ -383,17 +447,17 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.textures.drain(..) {
if trackers.textures.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id)));
hub.textures.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyTexture(id.0)));
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.images
.push((res.raw, res.memory));
if let Some(res) = hub.textures.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.images
.extend(res.raw);
}
}
}
}
@ -405,17 +469,17 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.samplers.drain(..) {
if trackers.samplers.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroySampler(id)));
hub.samplers.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroySampler(id.0)));
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.samplers
.push(res.raw);
if let Some(res) = hub.samplers.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.samplers
.push(res.raw);
}
}
}
}
@ -427,18 +491,18 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.buffers.drain(..) {
if trackers.buffers.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id)));
hub.buffers.free_id(id);
let res = guard.remove(id).unwrap();
log::debug!("Buffer {:?} is detached", id);
trace.map(|t| t.lock().add(trace::Action::DestroyBuffer(id.0)));
tracing::debug!("Buffer {:?} is detached", id);
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.buffers
.push((res.raw, res.memory));
if let Some(res) = hub.buffers.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.buffers
.extend(res.raw);
}
}
}
}
@ -450,17 +514,17 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.compute_pipelines.drain(..) {
if trackers.compute_pipes.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id)));
hub.compute_pipelines.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyComputePipeline(id.0)));
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.compute_pipes
.push(res.raw);
if let Some(res) = hub.compute_pipelines.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.compute_pipes
.push(res.raw);
}
}
}
}
@ -472,36 +536,17 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for id in self.suspected_resources.render_pipelines.drain(..) {
if trackers.render_pipes.remove_abandoned(id) {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id)));
hub.render_pipelines.free_id(id);
let res = guard.remove(id).unwrap();
trace.map(|t| t.lock().add(trace::Action::DestroyRenderPipeline(id.0)));
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.graphics_pipes
.push(res.raw);
}
}
}
if !self.suspected_resources.bind_group_layouts.is_empty() {
let (mut guard, _) = hub.bind_group_layouts.write(token);
for Stored {
value: id,
ref_count,
} in self.suspected_resources.bind_group_layouts.drain(..)
{
//Note: this has to happen after all the suspected pipelines are destroyed
if ref_count.load() == 1 {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id)));
hub.bind_group_layouts.free_id(id);
let layout = guard.remove(id).unwrap();
self.free_resources.descriptor_set_layouts.push(layout.raw);
if let Some(res) = hub.render_pipelines.unregister_locked(id.0, &mut *guard) {
let submit_index = res.life_guard.submission_index.load(Ordering::Acquire);
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.graphics_pipes
.push(res.raw);
}
}
}
}
@ -517,10 +562,32 @@ impl<B: GfxBackend> LifetimeTracker<B> {
//Note: this has to happen after all the suspected pipelines are destroyed
if ref_count.load() == 1 {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id)));
hub.pipeline_layouts.free_id(id);
let layout = guard.remove(id).unwrap();
self.free_resources.pipeline_layouts.push(layout.raw);
trace.map(|t| t.lock().add(trace::Action::DestroyPipelineLayout(id.0)));
if let Some(lay) = hub.pipeline_layouts.unregister_locked(id.0, &mut *guard) {
self.suspected_resources
.bind_group_layouts
.extend_from_slice(&lay.bind_group_layout_ids);
self.free_resources.pipeline_layouts.push(lay.raw);
}
}
}
}
if !self.suspected_resources.bind_group_layouts.is_empty() {
let (mut guard, _) = hub.bind_group_layouts.write(token);
for id in self.suspected_resources.bind_group_layouts.drain(..) {
//Note: this has to happen after all the suspected pipelines are destroyed
//Note: nothing else can bump the refcount since the guard is locked exclusively
//Note: same BGL can appear multiple times in the list, but only the last
// encounter could drop the refcount to 0.
if guard[id].multi_ref_count.dec_and_check_empty() {
#[cfg(feature = "trace")]
trace.map(|t| t.lock().add(trace::Action::DestroyBindGroupLayout(id.0)));
if let Some(lay) = hub.bind_group_layouts.unregister_locked(id.0, &mut *guard) {
self.free_resources.descriptor_set_layouts.push(lay.raw);
}
}
}
}
@ -528,20 +595,20 @@ impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn triage_mapped<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
token: &mut Token<super::Device<B>>,
) {
if self.mapped.is_empty() {
return;
}
let (buffer_guard, _) = B::hub(global).buffers.read(token);
let (buffer_guard, _) = hub.buffers.read(token);
for stored in self.mapped.drain(..) {
let resource_id = stored.value;
let buf = &buffer_guard[resource_id];
let submit_index = buf.life_guard.submission_index.load(Ordering::Acquire);
log::trace!(
tracing::trace!(
"Mapping of {:?} at submission {:?} gets assigned to active {:?}",
resource_id,
submit_index,
@ -558,11 +625,11 @@ impl<B: GfxBackend> LifetimeTracker<B> {
pub(crate) fn triage_framebuffers<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
framebuffers: &mut FastHashMap<super::FramebufferKey, B::Framebuffer>,
token: &mut Token<super::Device<B>>,
) {
let (texture_view_guard, _) = B::hub(global).texture_views.read(token);
let (texture_view_guard, _) = hub.texture_views.read(token);
let remove_list = framebuffers
.keys()
.filter_map(|key| {
@ -574,7 +641,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
for &at in key.all() {
// If this attachment is still registered, it's still valid
if texture_view_guard.contains(at) {
if texture_view_guard.contains(at.0) {
continue;
}
@ -610,21 +677,23 @@ impl<B: GfxBackend> LifetimeTracker<B> {
})
.collect::<FastHashMap<_, _>>();
log::debug!("Free framebuffers {:?}", remove_list);
for (ref key, submit_index) in remove_list {
let framebuffer = framebuffers.remove(key).unwrap();
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.framebuffers
.push(framebuffer);
if !remove_list.is_empty() {
tracing::debug!("Free framebuffers {:?}", remove_list);
for (ref key, submit_index) in remove_list {
let framebuffer = framebuffers.remove(key).unwrap();
self.active
.iter_mut()
.find(|a| a.index == submit_index)
.map_or(&mut self.free_resources, |a| &mut a.last_resources)
.framebuffers
.push(framebuffer);
}
}
}
pub(crate) fn handle_mapping<G: GlobalIdentityHandlerFactory>(
&mut self,
global: &Global<G>,
hub: &Hub<B, G>,
raw: &B::Device,
trackers: &Mutex<TrackerSet>,
token: &mut Token<super::Device<B>>,
@ -632,8 +701,7 @@ impl<B: GfxBackend> LifetimeTracker<B> {
if self.ready_to_map.is_empty() {
return Vec::new();
}
let hub = B::hub(global);
let (mut buffer_guard, _) = B::hub(global).buffers.write(token);
let (mut buffer_guard, _) = hub.buffers.write(token);
let mut pending_callbacks: Vec<super::BufferMapPendingCallback> =
Vec::with_capacity(self.ready_to_map.len());
let mut trackers = trackers.lock();
@ -642,12 +710,13 @@ impl<B: GfxBackend> LifetimeTracker<B> {
if buffer.life_guard.ref_count.is_none() && trackers.buffers.remove_abandoned(buffer_id)
{
buffer.map_state = resource::BufferMapState::Idle;
log::debug!("Mapping request is dropped because the buffer is destroyed.");
hub.buffers.free_id(buffer_id);
let buffer = buffer_guard.remove(buffer_id).unwrap();
self.free_resources
tracing::debug!("Mapping request is dropped because the buffer is destroyed.");
if let Some(buf) = hub
.buffers
.push((buffer.raw, buffer.memory));
.unregister_locked(buffer_id.0, &mut *buffer_guard)
{
self.free_resources.buffers.extend(buf.raw);
}
} else {
let mapping = match std::mem::replace(
&mut buffer.map_state,
@ -656,21 +725,25 @@ impl<B: GfxBackend> LifetimeTracker<B> {
resource::BufferMapState::Waiting(pending_mapping) => pending_mapping,
_ => panic!("No pending mapping."),
};
log::debug!("Buffer {:?} map state -> Active", buffer_id);
let host = mapping.op.host;
let status = match super::map_buffer(raw, buffer, mapping.sub_range.clone(), host) {
Ok(ptr) => {
buffer.map_state = resource::BufferMapState::Active {
ptr,
sub_range: mapping.sub_range,
host,
};
resource::BufferMapAsyncStatus::Success
}
Err(e) => {
log::error!("Mapping failed {:?}", e);
resource::BufferMapAsyncStatus::Error
let status = if mapping.sub_range.size.map_or(true, |x| x != 0) {
tracing::debug!("Buffer {:?} map state -> Active", buffer_id);
let host = mapping.op.host;
match super::map_buffer(raw, buffer, mapping.sub_range.clone(), host) {
Ok(ptr) => {
buffer.map_state = resource::BufferMapState::Active {
ptr,
sub_range: mapping.sub_range,
host,
};
resource::BufferMapAsyncStatus::Success
}
Err(e) => {
tracing::error!("Mapping failed {:?}", e);
resource::BufferMapAsyncStatus::Error
}
}
} else {
resource::BufferMapAsyncStatus::Success
};
pending_callbacks.push((mapping.op, status));
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -5,82 +5,117 @@
#[cfg(feature = "trace")]
use crate::device::trace::Action;
use crate::{
command::{CommandAllocator, CommandBuffer, TextureCopyView, BITS_PER_BYTE},
command::{
texture_copy_view_to_hal, validate_linear_texture_data, validate_texture_copy_range,
CommandAllocator, CommandBuffer, CopySide, TextureCopyView, TransferError, BITS_PER_BYTE,
},
conv,
device::{DeviceError, WaitIdleError},
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Token},
id,
resource::{BufferMapState, BufferUse, TextureUse},
resource::{BufferAccessError, BufferMapState, BufferUse, TextureUse},
span, FastHashSet,
};
use gfx_memory::{Block, Heaps, MemoryBlock};
use hal::{command::CommandBuffer as _, device::Device as _, queue::CommandQueue as _};
use smallvec::SmallVec;
use std::{iter, sync::atomic::Ordering};
use std::iter;
use thiserror::Error;
struct StagingData<B: hal::Backend> {
buffer: B::Buffer,
memory: MemoryBlock<B>,
comb: B::CommandBuffer,
cmdbuf: B::CommandBuffer,
}
#[derive(Debug, Default)]
#[derive(Debug)]
pub enum TempResource<B: hal::Backend> {
Buffer(B::Buffer),
Image(B::Image),
}
#[derive(Debug)]
pub(crate) struct PendingWrites<B: hal::Backend> {
pub command_buffer: Option<B::CommandBuffer>,
pub temp_buffers: Vec<(B::Buffer, MemoryBlock<B>)>,
pub temp_resources: Vec<(TempResource<B>, MemoryBlock<B>)>,
pub dst_buffers: FastHashSet<id::BufferId>,
pub dst_textures: FastHashSet<id::TextureId>,
}
impl<B: hal::Backend> PendingWrites<B> {
pub fn new() -> Self {
PendingWrites {
Self {
command_buffer: None,
temp_buffers: Vec::new(),
temp_resources: Vec::new(),
dst_buffers: FastHashSet::default(),
dst_textures: FastHashSet::default(),
}
}
pub fn dispose(
self,
device: &B::Device,
com_allocator: &CommandAllocator<B>,
cmd_allocator: &CommandAllocator<B>,
mem_allocator: &mut Heaps<B>,
) {
if let Some(raw) = self.command_buffer {
com_allocator.discard_internal(raw);
cmd_allocator.discard_internal(raw);
}
for (buffer, memory) in self.temp_buffers {
for (resource, memory) in self.temp_resources {
mem_allocator.free(device, memory);
unsafe {
device.destroy_buffer(buffer);
match resource {
TempResource::Buffer(buffer) => unsafe {
device.destroy_buffer(buffer);
},
TempResource::Image(image) => unsafe {
device.destroy_image(image);
},
}
}
}
pub fn consume_temp(&mut self, buffer: B::Buffer, memory: MemoryBlock<B>) {
self.temp_buffers.push((buffer, memory));
pub fn consume_temp(&mut self, resource: TempResource<B>, memory: MemoryBlock<B>) {
self.temp_resources.push((resource, memory));
}
fn consume(&mut self, stage: StagingData<B>) {
self.temp_buffers.push((stage.buffer, stage.memory));
self.command_buffer = Some(stage.comb);
self.temp_resources
.push((TempResource::Buffer(stage.buffer), stage.memory));
self.command_buffer = Some(stage.cmdbuf);
}
#[must_use]
fn finish(&mut self) -> Option<B::CommandBuffer> {
self.dst_buffers.clear();
self.dst_textures.clear();
self.command_buffer.take().map(|mut cmd_buf| unsafe {
cmd_buf.finish();
cmd_buf
})
}
}
impl<B: hal::Backend> super::Device<B> {
pub fn borrow_pending_writes(&mut self) -> &mut B::CommandBuffer {
if self.pending_writes.command_buffer.is_none() {
let mut comb = self.com_allocator.allocate_internal();
let mut cmdbuf = self.cmd_allocator.allocate_internal();
unsafe {
comb.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
self.pending_writes.command_buffer = Some(comb);
self.pending_writes.command_buffer = Some(cmdbuf);
}
self.pending_writes.command_buffer.as_mut().unwrap()
}
fn prepare_stage(&mut self, size: wgt::BufferAddress) -> StagingData<B> {
fn prepare_stage(&mut self, size: wgt::BufferAddress) -> Result<StagingData<B>, DeviceError> {
let mut buffer = unsafe {
self.raw
.create_buffer(size, hal::buffer::Usage::TRANSFER_SRC)
.unwrap()
.map_err(|err| match err {
hal::buffer::CreationError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to create staging buffer: {}", err),
})?
};
//TODO: do we need to transition into HOST_WRITE access first?
let requirements = unsafe { self.raw.get_buffer_requirements(&buffer) };
@ -90,38 +125,62 @@ impl<B: hal::Backend> super::Device<B> {
.lock()
.allocate(
&self.raw,
requirements.type_mask as u32,
&requirements,
gfx_memory::MemoryUsage::Staging { read_back: false },
gfx_memory::Kind::Linear,
requirements.size,
requirements.alignment,
)
.unwrap();
.map_err(DeviceError::from_heaps)?;
unsafe {
self.raw.set_buffer_name(&mut buffer, "<write_buffer_temp>");
self.raw
.bind_buffer_memory(memory.memory(), memory.segment().offset, &mut buffer)
.unwrap();
.map_err(DeviceError::from_bind)?;
}
let comb = match self.pending_writes.command_buffer.take() {
Some(comb) => comb,
let cmdbuf = match self.pending_writes.command_buffer.take() {
Some(cmdbuf) => cmdbuf,
None => {
let mut comb = self.com_allocator.allocate_internal();
let mut cmdbuf = self.cmd_allocator.allocate_internal();
unsafe {
comb.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
cmdbuf.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
comb
cmdbuf
}
};
StagingData {
Ok(StagingData {
buffer,
memory,
comb,
}
cmdbuf,
})
}
}
#[derive(Clone, Debug, Error)]
pub enum QueueWriteError {
#[error(transparent)]
Queue(#[from] DeviceError),
#[error(transparent)]
Transfer(#[from] TransferError),
}
#[derive(Clone, Debug, Error)]
pub enum QueueSubmitError {
#[error(transparent)]
Queue(#[from] DeviceError),
#[error("command buffer {0:?} is invalid")]
InvalidCommandBuffer(id::CommandBufferId),
#[error("buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error("texture {0:?} is destroyed")]
DestroyedTexture(id::TextureId),
#[error(transparent)]
Unmap(#[from] BufferAccessError),
#[error("swap chain output was dropped before the command buffer got submitted")]
SwapChainOutputDropped,
#[error("GPU got stuck :(")]
StuckGpu,
}
//TODO: move out common parts of write_xxx.
impl<G: GlobalIdentityHandlerFactory> Global<G> {
@ -131,82 +190,78 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
buffer_id: id::BufferId,
buffer_offset: wgt::BufferAddress,
data: &[u8],
) {
) -> Result<(), QueueWriteError> {
span!(_guard, INFO, "Queue::write_buffer");
let hub = B::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = &mut device_guard[queue_id];
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let (buffer_guard, _) = hub.buffers.read(&mut token);
#[cfg(feature = "trace")]
match device.trace {
Some(ref trace) => {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(Action::WriteBuffer {
id: buffer_id,
data: data_path,
range: buffer_offset..buffer_offset + data.len() as wgt::BufferAddress,
queued: true,
});
}
None => {}
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(Action::WriteBuffer {
id: buffer_id,
data: data_path,
range: buffer_offset..buffer_offset + data.len() as wgt::BufferAddress,
queued: true,
});
}
let data_size = data.len() as wgt::BufferAddress;
if data_size == 0 {
log::trace!("Ignoring write_buffer of size 0");
return;
tracing::trace!("Ignoring write_buffer of size 0");
return Ok(());
}
let mut stage = device.prepare_stage(data_size);
let mut stage = device.prepare_stage(data_size)?;
{
let mut mapped = stage
.memory
.map(&device.raw, hal::memory::Segment::ALL)
.unwrap();
.map_err(|err| match err {
hal::device::MapError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to map buffer: {}", err),
})?;
unsafe { mapped.write(&device.raw, hal::memory::Segment::ALL) }
.unwrap()
.expect("failed to get writer to mapped staging buffer")
.slice[..data.len()]
.copy_from_slice(data);
}
let mut trackers = device.trackers.lock();
let (dst, transition) =
trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST);
assert!(
dst.usage.contains(wgt::BufferUsage::COPY_DST),
"Write buffer usage {:?} must contain flag COPY_DST",
dst.usage
);
let last_submit_index = device.life_guard.submission_index.load(Ordering::Relaxed);
dst.life_guard.use_at(last_submit_index + 1);
let (dst, transition) = trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), BufferUse::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
let &(ref dst_raw, _) = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
if !dst.usage.contains(wgt::BufferUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)?;
}
dst.life_guard.use_at(device.active_submission_index + 1);
assert_eq!(
data_size % wgt::COPY_BUFFER_ALIGNMENT,
0,
"Buffer write size {} must be a multiple of {}",
buffer_offset,
wgt::COPY_BUFFER_ALIGNMENT,
);
assert_eq!(
buffer_offset % wgt::COPY_BUFFER_ALIGNMENT,
0,
"Buffer offset {} must be a multiple of {}",
buffer_offset,
wgt::COPY_BUFFER_ALIGNMENT,
);
let destination_start_offset = buffer_offset;
let destination_end_offset = buffer_offset + data_size;
assert!(
destination_end_offset <= dst.size,
"Write buffer with indices {}..{} overruns destination buffer of size {}",
destination_start_offset,
destination_end_offset,
dst.size
);
if data_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedCopySize(data_size))?
}
if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
Err(TransferError::UnalignedBufferOffset(buffer_offset))?
}
if buffer_offset + data_size > dst.size {
Err(TransferError::BufferOverrun {
start_offset: buffer_offset,
end_offset: buffer_offset + data_size,
buffer_size: dst.size,
side: CopySide::Destination,
})?
}
let region = hal::command::BufferCopy {
src: 0,
@ -214,7 +269,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
size: data.len() as _,
};
unsafe {
stage.comb.pipeline_barrier(
stage.cmdbuf.pipeline_barrier(
super::all_buffer_stages()..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
iter::once(hal::memory::Barrier::Buffer {
@ -226,11 +281,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.chain(transition.map(|pending| pending.into_hal(dst))),
);
stage
.comb
.copy_buffer(&stage.buffer, &dst.raw, iter::once(region));
.cmdbuf
.copy_buffer(&stage.buffer, dst_raw, iter::once(region));
}
device.pending_writes.consume(stage);
device.pending_writes.dst_buffers.insert(buffer_id);
Ok(())
}
pub fn queue_write_texture<B: GfxBackend>(
@ -240,70 +298,116 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
data: &[u8],
data_layout: &wgt::TextureDataLayout,
size: &wgt::Extent3d,
) {
) -> Result<(), QueueWriteError> {
span!(_guard, INFO, "Queue::write_texture");
let hub = B::hub(self);
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = &mut device_guard[queue_id];
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let (texture_guard, _) = hub.textures.read(&mut token);
let (image_layers, image_range, image_offset) = destination.to_hal(&*texture_guard);
let (image_layers, image_range, image_offset) =
texture_copy_view_to_hal(destination, size, &*texture_guard)?;
#[cfg(feature = "trace")]
match device.trace {
Some(ref trace) => {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(Action::WriteTexture {
to: destination.clone(),
data: data_path,
layout: data_layout.clone(),
size: *size,
});
}
None => {}
if let Some(ref trace) = device.trace {
let mut trace = trace.lock();
let data_path = trace.make_binary("bin", data);
trace.add(Action::WriteTexture {
to: destination.clone(),
data: data_path,
layout: data_layout.clone(),
size: *size,
});
}
if size.width == 0 || size.height == 0 || size.width == 0 {
log::trace!("Ignoring write_texture of size 0");
return;
if size.width == 0 || size.height == 0 || size.depth == 0 {
tracing::trace!("Ignoring write_texture of size 0");
return Ok(());
}
let texture_format = texture_guard[destination.texture].format;
let bytes_per_texel = conv::map_texture_format(texture_format, device.private_features)
let texture_format = texture_guard.get(destination.texture).unwrap().format;
let bytes_per_block = conv::map_texture_format(texture_format, device.private_features)
.surface_desc()
.bits as u32
/ BITS_PER_BYTE;
crate::command::validate_linear_texture_data(
validate_linear_texture_data(
data_layout,
texture_format,
data.len() as wgt::BufferAddress,
bytes_per_texel as wgt::BufferAddress,
CopySide::Source,
bytes_per_block as wgt::BufferAddress,
size,
);
)?;
let (block_width, block_height) = conv::texture_block_size(texture_format);
if !conv::is_valid_copy_dst_texture_format(texture_format) {
Err(TransferError::CopyToForbiddenTextureFormat(texture_format))?
}
let width_blocks = size.width / block_width;
let height_blocks = size.height / block_width;
let texel_rows_per_image = data_layout.rows_per_image;
let block_rows_per_image = data_layout.rows_per_image / block_height;
let bytes_per_row_alignment = get_lowest_common_denom(
device.hal_limits.optimal_buffer_copy_pitch_alignment as u32,
bytes_per_texel,
bytes_per_block,
);
let stage_bytes_per_row = align_to(bytes_per_texel * size.width, bytes_per_row_alignment);
let stage_size = stage_bytes_per_row as u64
* ((size.depth - 1) * data_layout.rows_per_image + size.height) as u64;
let mut stage = device.prepare_stage(stage_size);
let stage_bytes_per_row = align_to(bytes_per_block * width_blocks, bytes_per_row_alignment);
let block_rows_in_copy = (size.depth - 1) * block_rows_per_image + height_blocks;
let stage_size = stage_bytes_per_row as u64 * block_rows_in_copy as u64;
let mut stage = device.prepare_stage(stage_size)?;
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers
.textures
.use_replace(
&*texture_guard,
destination.texture,
image_range,
TextureUse::COPY_DST,
)
.unwrap();
let &(ref dst_raw, _) = dst
.raw
.as_ref()
.ok_or(TransferError::InvalidTexture(destination.texture))?;
if !dst.usage.contains(wgt::TextureUsage::COPY_DST) {
Err(TransferError::MissingCopyDstUsageFlag)?
}
validate_texture_copy_range(
destination,
dst.format,
dst.kind,
CopySide::Destination,
size,
)?;
dst.life_guard.use_at(device.active_submission_index + 1);
{
let mut mapped = stage
.memory
.map(&device.raw, hal::memory::Segment::ALL)
.unwrap();
let mapping = unsafe { mapped.write(&device.raw, hal::memory::Segment::ALL) }.unwrap();
.map_err(|err| match err {
hal::device::MapError::OutOfMemory(_) => DeviceError::OutOfMemory,
_ => panic!("failed to map staging buffer: {}", err),
})?;
let mapping = unsafe { mapped.write(&device.raw, hal::memory::Segment::ALL) }
.expect("failed to get writer to mapped staging buffer");
if stage_bytes_per_row == data_layout.bytes_per_row {
// Unlikely case of the data already being aligned optimally.
// Fast path if the data isalready being aligned optimally.
mapping.slice[..stage_size as usize].copy_from_slice(data);
} else {
// Copy row by row into the optimal alignment.
let copy_bytes_per_row =
stage_bytes_per_row.min(data_layout.bytes_per_row) as usize;
for layer in 0..size.depth {
let rows_offset = layer * data_layout.rows_per_image;
for row in 0..size.height {
let rows_offset = layer * block_rows_per_image;
for row in 0..height_blocks {
let data_offset =
(rows_offset + row) as usize * data_layout.bytes_per_row as usize;
let stage_offset =
@ -315,33 +419,16 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers.textures.use_replace(
&*texture_guard,
destination.texture,
image_range,
TextureUse::COPY_DST,
);
assert!(
dst.usage.contains(wgt::TextureUsage::COPY_DST),
"Write texture usage {:?} must contain flag COPY_DST",
dst.usage
);
crate::command::validate_texture_copy_range(destination, dst.kind, size);
let last_submit_index = device.life_guard.submission_index.load(Ordering::Relaxed);
dst.life_guard.use_at(last_submit_index + 1);
let region = hal::command::BufferImageCopy {
buffer_offset: 0,
buffer_width: stage_bytes_per_row / bytes_per_texel,
buffer_height: data_layout.rows_per_image,
buffer_width: (stage_bytes_per_row / bytes_per_block) * block_width,
buffer_height: texel_rows_per_image,
image_layers,
image_offset,
image_extent: conv::map_extent(size, dst.dimension),
};
unsafe {
stage.comb.pipeline_barrier(
stage.cmdbuf.pipeline_barrier(
super::all_image_stages() | hal::pso::PipelineStage::HOST
..hal::pso::PipelineStage::TRANSFER,
hal::memory::Dependencies::empty(),
@ -353,43 +440,42 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
})
.chain(transition.map(|pending| pending.into_hal(dst))),
);
stage.comb.copy_buffer_to_image(
stage.cmdbuf.copy_buffer_to_image(
&stage.buffer,
&dst.raw,
dst_raw,
hal::image::Layout::TransferDstOptimal,
iter::once(region),
);
}
device.pending_writes.consume(stage);
device
.pending_writes
.dst_textures
.insert(destination.texture);
Ok(())
}
pub fn queue_submit<B: GfxBackend>(
&self,
queue_id: id::QueueId,
command_buffer_ids: &[id::CommandBufferId],
) {
) -> Result<(), QueueSubmitError> {
span!(_guard, INFO, "Queue::submit");
let hub = B::hub(self);
let callbacks = {
let mut token = Token::root();
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let device = &mut device_guard[queue_id];
let pending_write_command_buffer =
device
.pending_writes
.command_buffer
.take()
.map(|mut comb_raw| unsafe {
comb_raw.finish();
comb_raw
});
let device = device_guard
.get_mut(queue_id)
.map_err(|_| DeviceError::Invalid)?;
let pending_write_command_buffer = device.pending_writes.finish();
device.temp_suspected.clear();
let submit_index = 1 + device
.life_guard
.submission_index
.fetch_add(1, Ordering::Relaxed);
device.active_submission_index += 1;
let submit_index = device.active_submission_index;
let fence = {
let mut signal_swapchain_semaphores = SmallVec::<[_; 1]>::new();
@ -414,105 +500,124 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
let comb = &mut command_buffer_guard[cmb_id];
let cmdbuf = command_buffer_guard
.get_mut(cmb_id)
.map_err(|_| QueueSubmitError::InvalidCommandBuffer(cmb_id))?;
#[cfg(feature = "trace")]
match device.trace {
Some(ref trace) => trace
.lock()
.add(Action::Submit(submit_index, comb.commands.take().unwrap())),
None => (),
};
if let Some(ref trace) = device.trace {
trace.lock().add(Action::Submit(
submit_index,
cmdbuf.commands.take().unwrap(),
));
}
if let Some((sc_id, fbo)) = comb.used_swap_chain.take() {
if let Some((sc_id, fbo)) = cmdbuf.used_swap_chain.take() {
let sc = &mut swap_chain_guard[sc_id.value];
assert!(sc.acquired_view_id.is_some(),
"SwapChainOutput for {:?} was dropped before the respective command buffer {:?} got submitted!",
sc_id.value, cmb_id);
sc.active_submission_index = submit_index;
if sc.acquired_view_id.is_none() {
return Err(QueueSubmitError::SwapChainOutputDropped);
}
// For each swapchain, we only want to have at most 1 signaled semaphore.
if sc.acquired_framebuffers.is_empty() {
// Only add a signal if this is the first time for this swapchain
// to be used in the submission.
signal_swapchain_semaphores.push(sc_id.value);
}
sc.acquired_framebuffers.push(fbo);
}
// optimize the tracked states
comb.trackers.optimize();
cmdbuf.trackers.optimize();
// update submission IDs
for id in comb.trackers.buffers.used() {
if let BufferMapState::Waiting(_) = buffer_guard[id].map_state {
panic!("Buffer has a pending mapping.");
for id in cmdbuf.trackers.buffers.used() {
let buffer = &mut buffer_guard[id];
if buffer.raw.is_none() {
return Err(QueueSubmitError::DestroyedBuffer(id.0))?;
}
if !buffer_guard[id].life_guard.use_at(submit_index) {
if let BufferMapState::Active { .. } = buffer_guard[id].map_state {
log::warn!("Dropped buffer has a pending mapping.");
super::unmap_buffer(&device.raw, &mut buffer_guard[id]);
if !buffer.life_guard.use_at(submit_index) {
if let BufferMapState::Active { .. } = buffer.map_state {
tracing::warn!("Dropped buffer has a pending mapping.");
super::unmap_buffer(&device.raw, buffer)?;
}
device.temp_suspected.buffers.push(id);
} else {
match buffer.map_state {
BufferMapState::Idle => (),
_ => panic!("Buffer {:?} is still mapped", id),
}
}
}
for id in comb.trackers.textures.used() {
if !texture_guard[id].life_guard.use_at(submit_index) {
for id in cmdbuf.trackers.textures.used() {
let texture = &texture_guard[id];
if texture.raw.is_none() {
return Err(QueueSubmitError::DestroyedTexture(id.0))?;
}
if !texture.life_guard.use_at(submit_index) {
device.temp_suspected.textures.push(id);
}
}
for id in comb.trackers.views.used() {
for id in cmdbuf.trackers.views.used() {
if !texture_view_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.texture_views.push(id);
}
}
for id in comb.trackers.bind_groups.used() {
for id in cmdbuf.trackers.bind_groups.used() {
if !bind_group_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.bind_groups.push(id);
}
}
for id in comb.trackers.samplers.used() {
for id in cmdbuf.trackers.samplers.used() {
if !sampler_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.samplers.push(id);
}
}
for id in comb.trackers.compute_pipes.used() {
for id in cmdbuf.trackers.compute_pipes.used() {
if !compute_pipe_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.compute_pipelines.push(id);
}
}
for id in comb.trackers.render_pipes.used() {
for id in cmdbuf.trackers.render_pipes.used() {
if !render_pipe_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.render_pipelines.push(id);
}
}
// execute resource transitions
let mut transit = device.com_allocator.extend(comb);
let mut transit = device.cmd_allocator.extend(cmdbuf);
unsafe {
// the last buffer was open, closing now
comb.raw.last_mut().unwrap().finish();
cmdbuf.raw.last_mut().unwrap().finish();
transit
.begin_primary(hal::command::CommandBufferFlags::ONE_TIME_SUBMIT);
}
log::trace!("Stitching command buffer {:?} before submission", cmb_id);
tracing::trace!("Stitching command buffer {:?} before submission", cmb_id);
CommandBuffer::insert_barriers(
&mut transit,
&mut *trackers,
&comb.trackers,
&cmdbuf.trackers,
&*buffer_guard,
&*texture_guard,
);
unsafe {
transit.finish();
}
comb.raw.insert(0, transit);
cmdbuf.raw.insert(0, transit);
}
log::debug!("Device after submission {}: {:#?}", submit_index, trackers);
tracing::trace!("Device after submission {}: {:#?}", submit_index, trackers);
}
// now prepare the GPU submission
let fence = device.raw.create_fence(false).unwrap();
let fence = device
.raw
.create_fence(false)
.or(Err(DeviceError::OutOfMemory))?;
let submission = hal::queue::Submission {
command_buffers: pending_write_command_buffer.as_ref().into_iter().chain(
command_buffer_ids
.iter()
.flat_map(|&cmb_id| &command_buffer_guard[cmb_id].raw),
.flat_map(|&cmb_id| &command_buffer_guard.get(cmb_id).unwrap().raw),
),
wait_semaphores: Vec::new(),
signal_semaphores: signal_swapchain_semaphores
@ -528,28 +633,35 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if let Some(comb_raw) = pending_write_command_buffer {
device
.com_allocator
.cmd_allocator
.after_submit_internal(comb_raw, submit_index);
}
let callbacks = device.maintain(self, false, &mut token);
let callbacks = match device.maintain(&hub, false, &mut token) {
Ok(callbacks) => callbacks,
Err(WaitIdleError::Device(err)) => return Err(QueueSubmitError::Queue(err)),
Err(WaitIdleError::StuckGpu) => return Err(QueueSubmitError::StuckGpu),
};
super::Device::lock_life_internal(&device.life_tracker, &mut token).track_submission(
submit_index,
fence,
&device.temp_suspected,
device.pending_writes.temp_buffers.drain(..),
device.pending_writes.temp_resources.drain(..),
);
// finally, return the command buffers to the allocator
for &cmb_id in command_buffer_ids {
let (cmd_buf, _) = hub.command_buffers.unregister(cmb_id, &mut token);
device.com_allocator.after_submit(cmd_buf, submit_index);
if let (Some(cmd_buf), _) = hub.command_buffers.unregister(cmb_id, &mut token) {
device.cmd_allocator.after_submit(cmd_buf, submit_index);
}
}
callbacks
};
super::fire_map_callbacks(callbacks);
Ok(())
}
}

Просмотреть файл

@ -2,13 +2,10 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
command::{BufferCopyView, TextureCopyView},
id,
};
#[cfg(feature = "trace")]
use std::io::Write as _;
use crate::id;
use std::ops::Range;
#[cfg(feature = "trace")]
use std::{borrow::Cow, io::Write as _};
//TODO: consider a readable Id that doesn't include the backend
@ -16,153 +13,83 @@ type FileName = String;
pub const FILE_NAME: &str = "trace.ron";
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum BindingResource {
Buffer {
id: id::BufferId,
offset: wgt::BufferAddress,
size: wgt::BufferSize,
},
Sampler(id::SamplerId),
TextureView(id::TextureViewId),
TextureViewArray(Vec<id::TextureViewId>),
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ProgrammableStageDescriptor {
pub module: id::ShaderModuleId,
pub entry_point: String,
}
#[cfg(feature = "trace")]
impl ProgrammableStageDescriptor {
pub fn new(desc: &crate::pipeline::ProgrammableStageDescriptor) -> Self {
ProgrammableStageDescriptor {
module: desc.module,
entry_point: unsafe { std::ffi::CStr::from_ptr(desc.entry_point) }
.to_string_lossy()
.to_string(),
}
pub(crate) fn new_render_bundle_encoder_descriptor<'a>(
label: Option<&'a str>,
context: &'a super::RenderPassContext,
) -> crate::command::RenderBundleEncoderDescriptor<'a> {
crate::command::RenderBundleEncoderDescriptor {
label: label.map(Cow::Borrowed),
color_formats: Cow::Borrowed(&context.attachments.colors),
depth_stencil_format: context.attachments.depth_stencil,
sample_count: context.sample_count as u32,
}
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ComputePipelineDescriptor {
pub layout: id::PipelineLayoutId,
pub compute_stage: ProgrammableStageDescriptor,
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct VertexBufferLayoutDescriptor {
pub array_stride: wgt::BufferAddress,
pub step_mode: wgt::InputStepMode,
pub attributes: Vec<wgt::VertexAttributeDescriptor>,
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct VertexStateDescriptor {
pub index_format: wgt::IndexFormat,
pub vertex_buffers: Vec<VertexBufferLayoutDescriptor>,
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct RenderPipelineDescriptor {
pub layout: id::PipelineLayoutId,
pub vertex_stage: ProgrammableStageDescriptor,
pub fragment_stage: Option<ProgrammableStageDescriptor>,
pub primitive_topology: wgt::PrimitiveTopology,
pub rasterization_state: Option<wgt::RasterizationStateDescriptor>,
pub color_states: Vec<wgt::ColorStateDescriptor>,
pub depth_stencil_state: Option<wgt::DepthStencilStateDescriptor>,
pub vertex_state: VertexStateDescriptor,
pub sample_count: u32,
pub sample_mask: u32,
pub alpha_to_coverage_enabled: bool,
}
#[derive(Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub enum Action {
pub enum Action<'a> {
Init {
desc: wgt::DeviceDescriptor,
backend: wgt::Backend,
},
CreateBuffer {
id: id::BufferId,
desc: wgt::BufferDescriptor<String>,
},
CreateBuffer(id::BufferId, crate::resource::BufferDescriptor<'a>),
FreeBuffer(id::BufferId),
DestroyBuffer(id::BufferId),
CreateTexture {
id: id::TextureId,
desc: wgt::TextureDescriptor<String>,
},
CreateTexture(id::TextureId, crate::resource::TextureDescriptor<'a>),
FreeTexture(id::TextureId),
DestroyTexture(id::TextureId),
CreateTextureView {
id: id::TextureViewId,
parent_id: id::TextureId,
desc: Option<wgt::TextureViewDescriptor<String>>,
desc: crate::resource::TextureViewDescriptor<'a>,
},
DestroyTextureView(id::TextureViewId),
CreateSampler {
id: id::SamplerId,
desc: wgt::SamplerDescriptor<String>,
},
CreateSampler(id::SamplerId, crate::resource::SamplerDescriptor<'a>),
DestroySampler(id::SamplerId),
CreateSwapChain {
id: id::SwapChainId,
desc: wgt::SwapChainDescriptor,
},
CreateSwapChain(id::SwapChainId, wgt::SwapChainDescriptor),
GetSwapChainTexture {
id: Option<id::TextureViewId>,
parent_id: id::SwapChainId,
},
PresentSwapChain(id::SwapChainId),
CreateBindGroupLayout {
id: id::BindGroupLayoutId,
label: String,
entries: Vec<wgt::BindGroupLayoutEntry>,
},
CreateBindGroupLayout(
id::BindGroupLayoutId,
crate::binding_model::BindGroupLayoutDescriptor<'a>,
),
DestroyBindGroupLayout(id::BindGroupLayoutId),
CreatePipelineLayout {
id: id::PipelineLayoutId,
bind_group_layouts: Vec<id::BindGroupLayoutId>,
},
CreatePipelineLayout(
id::PipelineLayoutId,
crate::binding_model::PipelineLayoutDescriptor<'a>,
),
DestroyPipelineLayout(id::PipelineLayoutId),
CreateBindGroup {
id: id::BindGroupId,
label: String,
layout_id: id::BindGroupLayoutId,
entries: std::collections::BTreeMap<u32, BindingResource>,
},
CreateBindGroup(
id::BindGroupId,
crate::binding_model::BindGroupDescriptor<'a>,
),
DestroyBindGroup(id::BindGroupId),
CreateShaderModule {
id: id::ShaderModuleId,
data: FileName,
},
DestroyShaderModule(id::ShaderModuleId),
CreateComputePipeline {
id: id::ComputePipelineId,
desc: ComputePipelineDescriptor,
},
CreateComputePipeline(
id::ComputePipelineId,
crate::pipeline::ComputePipelineDescriptor<'a>,
),
DestroyComputePipeline(id::ComputePipelineId),
CreateRenderPipeline {
id: id::RenderPipelineId,
desc: RenderPipelineDescriptor,
},
CreateRenderPipeline(
id::RenderPipelineId,
crate::pipeline::RenderPipelineDescriptor<'a>,
),
DestroyRenderPipeline(id::RenderPipelineId),
CreateRenderBundle {
id: id::RenderBundleId,
desc: crate::command::RenderBundleEncoderDescriptor<'a>,
base: crate::command::BasePass<crate::command::RenderCommand>,
},
DestroyRenderBundle(id::RenderBundleId),
WriteBuffer {
id: id::BufferId,
data: FileName,
@ -170,7 +97,7 @@ pub enum Action {
queued: bool,
},
WriteTexture {
to: TextureCopyView,
to: crate::command::TextureCopyView,
data: FileName,
layout: wgt::TextureDataLayout,
size: wgt::Extent3d,
@ -190,29 +117,27 @@ pub enum Command {
size: wgt::BufferAddress,
},
CopyBufferToTexture {
src: BufferCopyView,
dst: TextureCopyView,
src: crate::command::BufferCopyView,
dst: crate::command::TextureCopyView,
size: wgt::Extent3d,
},
CopyTextureToBuffer {
src: TextureCopyView,
dst: BufferCopyView,
src: crate::command::TextureCopyView,
dst: crate::command::BufferCopyView,
size: wgt::Extent3d,
},
CopyTextureToTexture {
src: TextureCopyView,
dst: TextureCopyView,
src: crate::command::TextureCopyView,
dst: crate::command::TextureCopyView,
size: wgt::Extent3d,
},
RunComputePass {
commands: Vec<crate::command::ComputeCommand>,
dynamic_offsets: Vec<wgt::DynamicOffset>,
base: crate::command::BasePass<crate::command::ComputeCommand>,
},
RunRenderPass {
target_colors: Vec<crate::command::RenderPassColorAttachmentDescriptor>,
target_depth_stencil: Option<crate::command::RenderPassDepthStencilAttachmentDescriptor>,
commands: Vec<crate::command::RenderCommand>,
dynamic_offsets: Vec<wgt::DynamicOffset>,
base: crate::command::BasePass<crate::command::RenderCommand>,
target_colors: Vec<crate::command::ColorAttachmentDescriptor>,
target_depth_stencil: Option<crate::command::DepthStencilAttachmentDescriptor>,
},
}
@ -228,10 +153,10 @@ pub struct Trace {
#[cfg(feature = "trace")]
impl Trace {
pub fn new(path: &std::path::Path) -> Result<Self, std::io::Error> {
log::info!("Tracing into '{:?}'", path);
tracing::info!("Tracing into '{:?}'", path);
let mut file = std::fs::File::create(path.join(FILE_NAME))?;
file.write(b"[\n")?;
Ok(Trace {
file.write_all(b"[\n")?;
Ok(Self {
path: path.to_path_buf(),
file,
config: ron::ser::PrettyConfig::default(),
@ -252,7 +177,7 @@ impl Trace {
let _ = writeln!(self.file, "{},", string);
}
Err(e) => {
log::warn!("RON serialization failure: {:?}", e);
tracing::warn!("RON serialization failure: {:?}", e);
}
}
}
@ -261,6 +186,6 @@ impl Trace {
#[cfg(feature = "trace")]
impl Drop for Trace {
fn drop(&mut self) {
let _ = self.file.write(b"]");
let _ = self.file.write_all(b"]");
}
}

Просмотреть файл

@ -5,22 +5,22 @@
use crate::{
backend,
binding_model::{BindGroup, BindGroupLayout, PipelineLayout},
command::CommandBuffer,
command::{CommandBuffer, RenderBundle},
device::Device,
id::{
AdapterId, BindGroupId, BindGroupLayoutId, BufferId, CommandBufferId, ComputePipelineId,
DeviceId, PipelineLayoutId, RenderPipelineId, SamplerId, ShaderModuleId, SurfaceId,
SwapChainId, TextureId, TextureViewId, TypedId,
DeviceId, PipelineLayoutId, RenderBundleId, RenderPipelineId, SamplerId, ShaderModuleId,
SurfaceId, SwapChainId, TextureId, TextureViewId, TypedId, Valid,
},
instance::{Adapter, Instance, Surface},
pipeline::{ComputePipeline, RenderPipeline, ShaderModule},
resource::{Buffer, Sampler, Texture, TextureView},
span,
swap_chain::SwapChain,
Epoch, Index,
};
use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use vec_map::VecMap;
use wgt::Backend;
#[cfg(debug_assertions)]
@ -36,7 +36,7 @@ pub struct IdentityManager {
impl Default for IdentityManager {
fn default() -> Self {
IdentityManager {
Self {
free: Default::default(),
epochs: Default::default(),
}
@ -45,7 +45,7 @@ impl Default for IdentityManager {
impl IdentityManager {
pub fn from_index(min_index: u32) -> Self {
IdentityManager {
Self {
free: (0..min_index).collect(),
epochs: vec![1; min_index as usize],
}
@ -76,76 +76,139 @@ impl IdentityManager {
}
}
#[derive(Debug)]
enum Element<T> {
Vacant,
Occupied(T, Epoch),
Error(Epoch),
}
#[derive(Clone, Debug)]
pub(crate) struct InvalidId;
#[derive(Debug)]
pub struct Storage<T, I: TypedId> {
//TODO: consider concurrent hashmap?
map: VecMap<(T, Epoch)>,
map: Vec<Element<T>>,
kind: &'static str,
_phantom: PhantomData<I>,
}
impl<T, I: TypedId> ops::Index<I> for Storage<T, I> {
impl<T, I: TypedId> ops::Index<Valid<I>> for Storage<T, I> {
type Output = T;
fn index(&self, id: I) -> &T {
let (index, epoch, _) = id.unzip();
let (ref value, storage_epoch) = match self.map.get(index as usize) {
Some(v) => v,
None => panic!("{}[{}] does not exist", self.kind, index),
};
assert_eq!(
epoch, *storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
value
fn index(&self, id: Valid<I>) -> &T {
self.get(id.0).unwrap()
}
}
impl<T, I: TypedId> ops::IndexMut<I> for Storage<T, I> {
fn index_mut(&mut self, id: I) -> &mut T {
let (index, epoch, _) = id.unzip();
let (ref mut value, storage_epoch) = match self.map.get_mut(index as usize) {
Some(v) => v,
None => panic!("{}[{}] does not exist", self.kind, index),
};
assert_eq!(
epoch, *storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
value
impl<T, I: TypedId> ops::IndexMut<Valid<I>> for Storage<T, I> {
fn index_mut(&mut self, id: Valid<I>) -> &mut T {
self.get_mut(id.0).unwrap()
}
}
impl<T, I: TypedId> Storage<T, I> {
pub fn contains(&self, id: I) -> bool {
pub(crate) fn contains(&self, id: I) -> bool {
let (index, epoch, _) = id.unzip();
match self.map.get(index as usize) {
Some(&(_, storage_epoch)) => epoch == storage_epoch,
None => false,
match self.map[index as usize] {
Element::Vacant => false,
Element::Occupied(_, storage_epoch) | Element::Error(storage_epoch) => {
epoch == storage_epoch
}
}
}
pub fn insert(&mut self, id: I, value: T) -> Option<T> {
/// Get a reference to an item behind a potentially invalid ID.
/// Panics if there is an epoch mismatch, or the entry is empty.
pub(crate) fn get(&self, id: I) -> Result<&T, InvalidId> {
let (index, epoch, _) = id.unzip();
let old = self.map.insert(index as usize, (value, epoch));
old.map(|(v, _storage_epoch)| v)
let (result, storage_epoch) = match self.map[index as usize] {
Element::Occupied(ref v, epoch) => (Ok(v), epoch),
Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
Element::Error(epoch) => (Err(InvalidId), epoch),
};
assert_eq!(
epoch, storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
result
}
pub fn remove(&mut self, id: I) -> Option<T> {
/// Get a mutable reference to an item behind a potentially invalid ID.
/// Panics if there is an epoch mismatch, or the entry is empty.
pub(crate) fn get_mut(&mut self, id: I) -> Result<&mut T, InvalidId> {
let (index, epoch, _) = id.unzip();
self.map
.remove(index as usize)
.map(|(value, storage_epoch)| {
let (result, storage_epoch) = match self.map[index as usize] {
Element::Occupied(ref mut v, epoch) => (Ok(v), epoch),
Element::Vacant => panic!("{}[{}] does not exist", self.kind, index),
Element::Error(epoch) => (Err(InvalidId), epoch),
};
assert_eq!(
epoch, storage_epoch,
"{}[{}] is no longer alive",
self.kind, index
);
result
}
fn insert_impl(&mut self, index: usize, element: Element<T>) {
if index >= self.map.len() {
self.map.resize_with(index + 1, || Element::Vacant);
}
match std::mem::replace(&mut self.map[index], element) {
Element::Vacant => {}
_ => panic!("Index {:?} is already occupied", index),
}
}
pub(crate) fn insert(&mut self, id: I, value: T) {
let (index, epoch, _) = id.unzip();
self.insert_impl(index as usize, Element::Occupied(value, epoch))
}
pub(crate) fn insert_error(&mut self, id: I) {
let (index, epoch, _) = id.unzip();
self.insert_impl(index as usize, Element::Error(epoch))
}
pub(crate) fn remove(&mut self, id: I) -> Option<T> {
let (index, epoch, _) = id.unzip();
match std::mem::replace(&mut self.map[index as usize], Element::Vacant) {
Element::Occupied(value, storage_epoch) => {
assert_eq!(epoch, storage_epoch);
value
})
Some(value)
}
Element::Error(_) => None,
Element::Vacant => panic!("Cannot remove a vacant resource"),
}
}
pub fn iter(&self, backend: Backend) -> impl Iterator<Item = (I, &T)> {
self.map.iter().map(move |(index, (value, storage_epoch))| {
(I::zip(index as Index, *storage_epoch, backend), value)
})
// Prevents panic on out of range access, allows Vacant elements.
pub(crate) fn try_remove(&mut self, id: I) -> Option<T> {
let (index, epoch, _) = id.unzip();
if index as usize >= self.map.len() {
None
} else if let Element::Occupied(value, storage_epoch) =
std::mem::replace(&mut self.map[index as usize], Element::Vacant)
{
assert_eq!(epoch, storage_epoch);
Some(value)
} else {
None
}
}
pub(crate) fn iter(&self, backend: Backend) -> impl Iterator<Item = (I, &T)> {
self.map
.iter()
.enumerate()
.filter_map(move |(index, x)| match *x {
Element::Occupied(ref value, storage_epoch) => {
Some((I::zip(index as Index, storage_epoch, backend), value))
}
_ => None,
})
.into_iter()
}
}
@ -174,7 +237,7 @@ impl<B: hal::Backend> Access<SwapChain<B>> for Root {}
impl<B: hal::Backend> Access<SwapChain<B>> for Device<B> {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for Root {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for Device<B> {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<PipelineLayout<B>> for RenderBundle {}
impl<B: hal::Backend> Access<BindGroupLayout<B>> for Root {}
impl<B: hal::Backend> Access<BindGroupLayout<B>> for Device<B> {}
impl<B: hal::Backend> Access<BindGroupLayout<B>> for PipelineLayout<B> {}
@ -186,6 +249,8 @@ impl<B: hal::Backend> Access<BindGroup<B>> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for Root {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for Device<B> {}
impl<B: hal::Backend> Access<CommandBuffer<B>> for SwapChain<B> {}
impl<B: hal::Backend> Access<RenderBundle> for Device<B> {}
impl<B: hal::Backend> Access<RenderBundle> for CommandBuffer<B> {}
impl<B: hal::Backend> Access<ComputePipeline<B>> for Device<B> {}
impl<B: hal::Backend> Access<ComputePipeline<B>> for BindGroup<B> {}
impl<B: hal::Backend> Access<RenderPipeline<B>> for Device<B> {}
@ -233,7 +298,7 @@ impl<'a, T> Token<'a, T> {
assert_ne!(old, 0, "Root token was dropped");
active.set(old + 1);
});
Token { level: PhantomData }
Self { level: PhantomData }
}
}
@ -244,7 +309,7 @@ impl Token<'static, Root> {
assert_eq!(0, active.replace(1), "Root token is already active");
});
Token { level: PhantomData }
Self { level: PhantomData }
}
}
@ -298,6 +363,7 @@ pub trait GlobalIdentityHandlerFactory:
+ IdentityHandlerFactory<BindGroupLayoutId>
+ IdentityHandlerFactory<BindGroupId>
+ IdentityHandlerFactory<CommandBufferId>
+ IdentityHandlerFactory<RenderBundleId>
+ IdentityHandlerFactory<RenderPipelineId>
+ IdentityHandlerFactory<ComputePipelineId>
+ IdentityHandlerFactory<BufferId>
@ -321,10 +387,10 @@ pub struct Registry<T, I: TypedId, F: IdentityHandlerFactory<I>> {
impl<T, I: TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
fn new(backend: Backend, factory: &F, kind: &'static str) -> Self {
Registry {
Self {
identity: factory.spawn(0),
data: RwLock::new(Storage {
map: VecMap::new(),
map: Vec::new(),
kind,
_phantom: PhantomData,
}),
@ -333,10 +399,10 @@ impl<T, I: TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
}
fn without_backend(factory: &F, kind: &'static str) -> Self {
Registry {
Self {
identity: factory.spawn(1),
data: RwLock::new(Storage {
map: VecMap::new(),
map: Vec::new(),
kind,
_phantom: PhantomData,
}),
@ -348,8 +414,7 @@ impl<T, I: TypedId, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
impl<T, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
pub fn register<A: Access<T>>(&self, id: I, value: T, _token: &mut Token<A>) {
debug_assert_eq!(id.unzip().2, self.backend);
let old = self.data.write().insert(id, value);
assert!(old.is_none());
self.data.write().insert(id, value);
}
pub fn read<'a, A: Access<T>>(
@ -365,31 +430,64 @@ impl<T, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
) -> (RwLockWriteGuard<'a, Storage<T, I>>, Token<'a, T>) {
(self.data.write(), Token::new())
}
}
impl<T, I: TypedId + Copy, F: IdentityHandlerFactory<I>> Registry<T, I, F> {
pub fn register_identity<A: Access<T>>(
pub(crate) fn register_identity<A: Access<T>>(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
value: T,
token: &mut Token<A>,
) -> I {
) -> Valid<I> {
let id = self.identity.process(id_in, self.backend);
self.register(id, value, token);
Valid(id)
}
pub(crate) fn register_identity_locked(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
value: T,
guard: &mut Storage<T, I>,
) -> Valid<I> {
let id = self.identity.process(id_in, self.backend);
guard.insert(id, value);
Valid(id)
}
pub fn register_error<A: Access<T>>(
&self,
id_in: <F::Filter as IdentityHandler<I>>::Input,
_token: &mut Token<A>,
) -> I {
let id = self.identity.process(id_in, self.backend);
debug_assert_eq!(id.unzip().2, self.backend);
self.data.write().insert_error(id);
id
}
pub fn unregister_locked(&self, id: I, guard: &mut Storage<T, I>) -> Option<T> {
let value = guard.remove(id);
//Note: careful about the order here!
self.identity.free(id);
//Returning None is legal if it's an error ID
value
}
pub fn unregister<'a, A: Access<T>>(
&self,
id: I,
_token: &'a mut Token<A>,
) -> (T, Token<'a, T>) {
let value = self.data.write().remove(id).unwrap();
) -> (Option<T>, Token<'a, T>) {
let value = self.data.write().remove(id);
//Note: careful about the order here!
self.identity.free(id);
//Returning None is legal if it's an error ID
(value, Token::new())
}
pub fn process_id(&self, id_in: <F::Filter as IdentityHandler<I>>::Input) -> I {
self.identity.process(id_in, self.backend)
}
pub fn free_id(&self, id: I) {
self.identity.free(id)
}
@ -405,6 +503,7 @@ pub struct Hub<B: hal::Backend, F: GlobalIdentityHandlerFactory> {
pub bind_group_layouts: Registry<BindGroupLayout<B>, BindGroupLayoutId, F>,
pub bind_groups: Registry<BindGroup<B>, BindGroupId, F>,
pub command_buffers: Registry<CommandBuffer<B>, CommandBufferId, F>,
pub render_bundles: Registry<RenderBundle, RenderBundleId, F>,
pub render_pipelines: Registry<RenderPipeline<B>, RenderPipelineId, F>,
pub compute_pipelines: Registry<ComputePipeline<B>, ComputePipelineId, F>,
pub buffers: Registry<Buffer<B>, BufferId, F>,
@ -415,7 +514,7 @@ pub struct Hub<B: hal::Backend, F: GlobalIdentityHandlerFactory> {
impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
fn new(factory: &F) -> Self {
Hub {
Self {
adapters: Registry::new(B::VARIANT, factory, "Adapter"),
devices: Registry::new(B::VARIANT, factory, "Device"),
swap_chains: Registry::new(B::VARIANT, factory, "SwapChain"),
@ -424,6 +523,7 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
bind_group_layouts: Registry::new(B::VARIANT, factory, "BindGroupLayout"),
bind_groups: Registry::new(B::VARIANT, factory, "BindGroup"),
command_buffers: Registry::new(B::VARIANT, factory, "CommandBuffer"),
render_bundles: Registry::new(B::VARIANT, factory, "RenderBundle"),
render_pipelines: Registry::new(B::VARIANT, factory, "RenderPipeline"),
compute_pipelines: Registry::new(B::VARIANT, factory, "ComputePipeline"),
buffers: Registry::new(B::VARIANT, factory, "Buffer"),
@ -435,129 +535,153 @@ impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
}
impl<B: GfxBackend, F: GlobalIdentityHandlerFactory> Hub<B, F> {
fn clear(&mut self, surface_guard: &mut Storage<Surface, SurfaceId>) {
fn clear(&self, surface_guard: &mut Storage<Surface, SurfaceId>) {
use crate::resource::TextureViewInner;
use hal::{device::Device as _, window::PresentationSurface as _};
let mut devices = self.devices.data.write();
for (device, _) in devices.map.values_mut() {
device.prepare_to_die();
for element in devices.map.iter_mut() {
if let Element::Occupied(device, _) = element {
device.prepare_to_die();
}
}
for (_, (sampler, _)) in self.samplers.data.write().map.drain() {
unsafe {
devices[sampler.device_id.value]
.raw
.destroy_sampler(sampler.raw);
for element in self.samplers.data.write().map.drain(..) {
if let Element::Occupied(sampler, _) = element {
unsafe {
devices[sampler.device_id.value]
.raw
.destroy_sampler(sampler.raw);
}
}
}
{
let textures = self.textures.data.read();
for (_, (texture_view, _)) in self.texture_views.data.write().map.drain() {
match texture_view.inner {
TextureViewInner::Native { raw, source_id } => {
let device = &devices[textures[source_id.value].device_id.value];
unsafe {
device.raw.destroy_image_view(raw);
for element in self.texture_views.data.write().map.drain(..) {
if let Element::Occupied(texture_view, _) = element {
match texture_view.inner {
TextureViewInner::Native { raw, source_id } => {
let device = &devices[textures[source_id.value].device_id.value];
unsafe {
device.raw.destroy_image_view(raw);
}
}
TextureViewInner::SwapChain { .. } => {} //TODO
}
TextureViewInner::SwapChain { .. } => {} //TODO
}
}
}
for (_, (texture, _)) in self.textures.data.write().map.drain() {
devices[texture.device_id.value].destroy_texture(texture);
}
for (_, (buffer, _)) in self.buffers.data.write().map.drain() {
//TODO: unmap if needed
devices[buffer.device_id.value].destroy_buffer(buffer);
}
for (_, (command_buffer, _)) in self.command_buffers.data.write().map.drain() {
devices[command_buffer.device_id.value]
.com_allocator
.after_submit(command_buffer, 0);
}
for (_, (bind_group, _)) in self.bind_groups.data.write().map.drain() {
let device = &devices[bind_group.device_id.value];
device.destroy_bind_group(bind_group);
}
for (_, (module, _)) in self.shader_modules.data.write().map.drain() {
let device = &devices[module.device_id.value];
unsafe {
device.raw.destroy_shader_module(module.raw);
for element in self.textures.data.write().map.drain(..) {
if let Element::Occupied(texture, _) = element {
devices[texture.device_id.value].destroy_texture(texture);
}
}
for (_, (bgl, _)) in self.bind_group_layouts.data.write().map.drain() {
let device = &devices[bgl.device_id.value];
unsafe {
device.raw.destroy_descriptor_set_layout(bgl.raw);
for element in self.buffers.data.write().map.drain(..) {
if let Element::Occupied(buffer, _) = element {
//TODO: unmap if needed
devices[buffer.device_id.value].destroy_buffer(buffer);
}
}
for (_, (pipeline_layout, _)) in self.pipeline_layouts.data.write().map.drain() {
let device = &devices[pipeline_layout.device_id.value];
unsafe {
device.raw.destroy_pipeline_layout(pipeline_layout.raw);
for element in self.command_buffers.data.write().map.drain(..) {
if let Element::Occupied(command_buffer, _) = element {
devices[command_buffer.device_id.value]
.cmd_allocator
.after_submit(command_buffer, 0);
}
}
for (_, (pipeline, _)) in self.compute_pipelines.data.write().map.drain() {
let device = &devices[pipeline.device_id.value];
unsafe {
device.raw.destroy_compute_pipeline(pipeline.raw);
}
}
for (_, (pipeline, _)) in self.render_pipelines.data.write().map.drain() {
let device = &devices[pipeline.device_id.value];
unsafe {
device.raw.destroy_graphics_pipeline(pipeline.raw);
for element in self.bind_groups.data.write().map.drain(..) {
if let Element::Occupied(bind_group, _) = element {
let device = &devices[bind_group.device_id.value];
device.destroy_bind_group(bind_group);
}
}
for (index, (swap_chain, epoch)) in self.swap_chains.data.write().map.drain() {
let device = &devices[swap_chain.device_id.value];
let surface = &mut surface_guard[TypedId::zip(index as Index, epoch, B::VARIANT)];
let suf = B::get_surface_mut(surface);
unsafe {
device.raw.destroy_semaphore(swap_chain.semaphore);
suf.unconfigure_swapchain(&device.raw);
for element in self.shader_modules.data.write().map.drain(..) {
if let Element::Occupied(module, _) = element {
let device = &devices[module.device_id.value];
unsafe {
device.raw.destroy_shader_module(module.raw);
}
}
}
for element in self.bind_group_layouts.data.write().map.drain(..) {
if let Element::Occupied(bgl, _) = element {
let device = &devices[bgl.device_id.value];
unsafe {
device.raw.destroy_descriptor_set_layout(bgl.raw);
}
}
}
for element in self.pipeline_layouts.data.write().map.drain(..) {
if let Element::Occupied(pipeline_layout, _) = element {
let device = &devices[pipeline_layout.device_id.value];
unsafe {
device.raw.destroy_pipeline_layout(pipeline_layout.raw);
}
}
}
for element in self.compute_pipelines.data.write().map.drain(..) {
if let Element::Occupied(pipeline, _) = element {
let device = &devices[pipeline.device_id.value];
unsafe {
device.raw.destroy_compute_pipeline(pipeline.raw);
}
}
}
for element in self.render_pipelines.data.write().map.drain(..) {
if let Element::Occupied(pipeline, _) = element {
let device = &devices[pipeline.device_id.value];
unsafe {
device.raw.destroy_graphics_pipeline(pipeline.raw);
}
}
}
for (_, (device, _)) in devices.map.drain() {
device.dispose();
for (index, element) in self.swap_chains.data.write().map.drain(..).enumerate() {
if let Element::Occupied(swap_chain, epoch) = element {
let device = &devices[swap_chain.device_id.value];
let surface = surface_guard
.get_mut(TypedId::zip(index as Index, epoch, B::VARIANT))
.unwrap();
let suf = B::get_surface_mut(surface);
unsafe {
device.raw.destroy_semaphore(swap_chain.semaphore);
suf.unconfigure_swapchain(&device.raw);
}
}
}
for element in devices.map.drain(..) {
if let Element::Occupied(device, _) = element {
device.dispose();
}
}
}
}
#[derive(Debug)]
pub struct Hubs<F: GlobalIdentityHandlerFactory> {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
#[cfg(vulkan)]
vulkan: Hub<backend::Vulkan, F>,
#[cfg(any(target_os = "ios", target_os = "macos"))]
#[cfg(metal)]
metal: Hub<backend::Metal, F>,
#[cfg(windows)]
#[cfg(dx12)]
dx12: Hub<backend::Dx12, F>,
#[cfg(windows)]
#[cfg(dx11)]
dx11: Hub<backend::Dx11, F>,
}
impl<F: GlobalIdentityHandlerFactory> Hubs<F> {
fn new(factory: &F) -> Self {
Hubs {
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
Self {
#[cfg(vulkan)]
vulkan: Hub::new(factory),
#[cfg(any(target_os = "ios", target_os = "macos"))]
#[cfg(metal)]
metal: Hub::new(factory),
#[cfg(windows)]
#[cfg(dx12)]
dx12: Hub::new(factory),
#[cfg(windows)]
#[cfg(dx11)]
dx11: Hub::new(factory),
}
}
@ -571,35 +695,51 @@ pub struct Global<G: GlobalIdentityHandlerFactory> {
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn new(name: &str, factory: G) -> Self {
Global {
instance: Instance::new(name, 1),
pub fn new(name: &str, factory: G, backends: wgt::BackendBit) -> Self {
span!(_guard, INFO, "Global::new");
Self {
instance: Instance::new(name, 1, backends),
surfaces: Registry::without_backend(&factory, "Surface"),
hubs: Hubs::new(&factory),
}
}
pub fn clear_backend<B: GfxBackend>(&self, _dummy: ()) {
let mut surface_guard = self.surfaces.data.write();
let hub = B::hub(self);
hub.clear(&mut *surface_guard);
}
}
impl<G: GlobalIdentityHandlerFactory> Drop for Global<G> {
fn drop(&mut self) {
if !thread::panicking() {
log::info!("Dropping Global");
tracing::info!("Dropping Global");
let mut surface_guard = self.surfaces.data.write();
// destroy hubs
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
self.hubs.vulkan.clear(&mut *surface_guard);
#[cfg(any(target_os = "ios", target_os = "macos"))]
self.hubs.metal.clear(&mut *surface_guard);
#[cfg(windows)]
self.hubs.dx12.clear(&mut *surface_guard);
#[cfg(windows)]
self.hubs.dx11.clear(&mut *surface_guard);
#[cfg(vulkan)]
{
self.hubs.vulkan.clear(&mut *surface_guard);
}
#[cfg(metal)]
{
self.hubs.metal.clear(&mut *surface_guard);
}
#[cfg(dx12)]
{
self.hubs.dx12.clear(&mut *surface_guard);
}
#[cfg(dx11)]
{
self.hubs.dx11.clear(&mut *surface_guard);
}
// destroy surfaces
for (_, (surface, _)) in surface_guard.map.drain() {
self.instance.destroy_surface(surface);
for element in surface_guard.map.drain(..) {
if let Element::Occupied(surface, _) = element {
self.instance.destroy_surface(surface);
}
}
}
}
@ -611,10 +751,7 @@ pub trait GfxBackend: hal::Backend {
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface;
}
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
#[cfg(vulkan)]
impl GfxBackend for backend::Vulkan {
const VARIANT: Backend = Backend::Vulkan;
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
@ -625,18 +762,18 @@ impl GfxBackend for backend::Vulkan {
}
}
#[cfg(any(target_os = "ios", target_os = "macos"))]
#[cfg(metal)]
impl GfxBackend for backend::Metal {
const VARIANT: Backend = Backend::Metal;
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
&global.hubs.metal
}
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
&mut surface.metal
surface.metal.as_mut().unwrap()
}
}
#[cfg(windows)]
#[cfg(dx12)]
impl GfxBackend for backend::Dx12 {
const VARIANT: Backend = Backend::Dx12;
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
@ -647,13 +784,19 @@ impl GfxBackend for backend::Dx12 {
}
}
#[cfg(windows)]
#[cfg(dx11)]
impl GfxBackend for backend::Dx11 {
const VARIANT: Backend = Backend::Dx11;
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
&global.hubs.dx11
}
fn get_surface_mut(surface: &mut Surface) -> &mut Self::Surface {
&mut surface.dx11
surface.dx11.as_mut().unwrap()
}
}
#[cfg(test)]
fn _test_send_sync(global: &Global<IdentityManagerFactory>) {
fn test_internal<T: Send + Sync>(_: T) {}
test_internal(global)
}

Просмотреть файл

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{Epoch, Index};
use std::{fmt, marker::PhantomData, mem, num::NonZeroU64};
use std::{cmp::Ordering, fmt, marker::PhantomData, num::NonZeroU64};
use wgt::Backend;
const BACKEND_BITS: usize = 3;
@ -17,6 +17,14 @@ type Dummy = crate::backend::Empty;
derive(serde::Deserialize),
serde(from = "SerialId")
)]
#[cfg_attr(
all(feature = "serde", not(feature = "trace")),
derive(serde::Serialize)
)]
#[cfg_attr(
all(feature = "serde", not(feature = "replay")),
derive(serde::Deserialize)
)]
pub struct Id<T>(NonZeroU64, PhantomData<T>);
// This type represents Id in a more readable (and editable) way.
@ -31,7 +39,7 @@ enum SerialId {
impl<T> From<Id<T>> for SerialId {
fn from(id: Id<T>) -> Self {
let (index, epoch, backend) = id.unzip();
SerialId::Id(index, epoch, backend)
Self::Id(index, epoch, backend)
}
}
#[cfg(feature = "replay")]
@ -43,20 +51,12 @@ impl<T> From<SerialId> for Id<T> {
}
}
// required for PeekPoke
impl<T> Default for Id<T> {
fn default() -> Self {
Id(
// Create an ID that doesn't make sense:
// the high `BACKEND_BITS` are to be set to 0, which matches `Backend::Empty`,
// the other bits are all 1s
unsafe { NonZeroU64::new_unchecked(!0 >> BACKEND_BITS) },
PhantomData,
)
}
}
impl<T> Id<T> {
#[cfg(test)]
pub(crate) fn dummy() -> Valid<Self> {
Valid(Id(NonZeroU64::new(1).unwrap(), PhantomData))
}
pub fn backend(self) -> Backend {
match self.0.get() >> (64 - BACKEND_BITS) as u8 {
0 => Backend::Empty,
@ -68,14 +68,6 @@ impl<T> Id<T> {
_ => unreachable!(),
}
}
pub(crate) fn into_raw(self) -> u64 {
self.0.get()
}
pub(crate) fn from_raw(value: u64) -> Option<Self> {
NonZeroU64::new(value).map(|nz| Id(nz, PhantomData))
}
}
impl<T> Copy for Id<T> {}
@ -106,24 +98,26 @@ impl<T> PartialEq for Id<T> {
impl<T> Eq for Id<T> {}
unsafe impl<T> peek_poke::Poke for Id<T> {
fn max_size() -> usize {
mem::size_of::<u64>()
}
unsafe fn poke_into(&self, data: *mut u8) -> *mut u8 {
self.0.get().poke_into(data)
impl<T> PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.0.partial_cmp(&other.0)
}
}
impl<T> peek_poke::Peek for Id<T> {
unsafe fn peek_from(mut data: *const u8, this: *mut Self) -> *const u8 {
let mut v = 0u64;
data = u64::peek_from(data, &mut v);
(*this).0 = NonZeroU64::new(v).unwrap();
data
impl<T> Ord for Id<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.0.cmp(&other.0)
}
}
/// An internal ID that has been checked to point to
/// a valid object in the storages.
#[repr(transparent)]
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub(crate) struct Valid<I>(pub I);
pub trait TypedId {
fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self;
fn unzip(self) -> (Index, Epoch, Backend);
@ -164,11 +158,12 @@ pub type ShaderModuleId = Id<crate::pipeline::ShaderModule<Dummy>>;
pub type RenderPipelineId = Id<crate::pipeline::RenderPipeline<Dummy>>;
pub type ComputePipelineId = Id<crate::pipeline::ComputePipeline<Dummy>>;
// Command
pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type CommandEncoderId = CommandBufferId;
pub type RenderPassId = *mut crate::command::RawPass;
pub type ComputePassId = *mut crate::command::RawPass;
pub type RenderBundleId = Id<crate::command::RenderBundle<Dummy>>;
pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type RenderPassEncoderId = *mut crate::command::RenderPass;
pub type ComputePassEncoderId = *mut crate::command::ComputePass;
pub type RenderBundleEncoderId = *mut crate::command::RenderBundleEncoder;
pub type RenderBundleId = Id<crate::command::RenderBundle>;
// Swap chain
pub type SwapChainId = Id<crate::swap_chain::SwapChain<Dummy>>;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -9,18 +9,19 @@
unused_qualifications
)]
#[macro_use]
mod macros;
pub mod backend {
#[cfg(windows)]
pub use gfx_backend_dx11::Backend as Dx11;
#[cfg(windows)]
pub use gfx_backend_dx12::Backend as Dx12;
pub use gfx_backend_empty::Backend as Empty;
#[cfg(any(target_os = "ios", target_os = "macos"))]
#[cfg(dx11)]
pub use gfx_backend_dx11::Backend as Dx11;
#[cfg(dx12)]
pub use gfx_backend_dx12::Backend as Dx12;
#[cfg(metal)]
pub use gfx_backend_metal::Backend as Metal;
#[cfg(any(
not(any(target_os = "ios", target_os = "macos")),
feature = "gfx-backend-vulkan"
))]
#[cfg(vulkan)]
pub use gfx_backend_vulkan::Backend as Vulkan;
}
@ -32,14 +33,11 @@ pub mod hub;
pub mod id;
pub mod instance;
pub mod pipeline;
pub mod power;
pub mod resource;
pub mod swap_chain;
mod track;
mod validation;
pub use hal::pso::read_spirv;
#[cfg(test)]
use loom::sync::atomic;
#[cfg(not(test))]
@ -47,19 +45,20 @@ use std::sync::atomic;
use atomic::{AtomicUsize, Ordering};
use std::{os::raw::c_char, ptr};
use std::{borrow::Cow, os::raw::c_char, ptr};
const MAX_BIND_GROUPS: usize = 4;
pub const MAX_BIND_GROUPS: usize = 8;
type SubmissionIndex = usize;
type Index = u32;
type Epoch = u32;
pub type RawString = *const c_char;
pub type Label<'a> = Option<Cow<'a, str>>;
//TODO: make it private. Currently used for swapchain creation impl.
/// Reference count object that is 1:1 with each reference.
#[derive(Debug)]
pub struct RefCount(ptr::NonNull<AtomicUsize>);
struct RefCount(ptr::NonNull<AtomicUsize>);
unsafe impl Send for RefCount {}
unsafe impl Sync for RefCount {}
@ -84,7 +83,7 @@ impl RefCount {
/// logic. To use this safely from outside of `Drop::drop`, the calling function must move
/// `Self` into a `ManuallyDrop`.
unsafe fn rich_drop_inner(&mut self) -> bool {
if self.0.as_ref().fetch_sub(1, Ordering::Relaxed) == 1 {
if self.0.as_ref().fetch_sub(1, Ordering::AcqRel) == 1 {
let _ = Box::from_raw(self.0.as_ptr());
true
} else {
@ -95,9 +94,9 @@ impl RefCount {
impl Clone for RefCount {
fn clone(&self) -> Self {
let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::Relaxed);
let old_size = unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel);
assert!(old_size < Self::MAX);
RefCount(self.0)
Self(self.0)
}
}
@ -131,6 +130,36 @@ fn loom() {
});
}
/// Reference count object that tracks multiple references.
/// Unlike `RefCount`, it's manually inc()/dec() called.
#[derive(Debug)]
struct MultiRefCount(ptr::NonNull<AtomicUsize>);
unsafe impl Send for MultiRefCount {}
unsafe impl Sync for MultiRefCount {}
impl MultiRefCount {
fn new() -> Self {
let bx = Box::new(AtomicUsize::new(1));
let ptr = Box::into_raw(bx);
Self(unsafe { ptr::NonNull::new_unchecked(ptr) })
}
fn inc(&self) {
unsafe { self.0.as_ref() }.fetch_add(1, Ordering::AcqRel);
}
fn dec_and_check_empty(&self) -> bool {
unsafe { self.0.as_ref() }.fetch_sub(1, Ordering::AcqRel) == 1
}
}
impl Drop for MultiRefCount {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.0.as_ptr()) };
}
}
#[derive(Debug)]
struct LifeGuard {
ref_count: Option<RefCount>,
@ -140,7 +169,7 @@ struct LifeGuard {
impl LifeGuard {
fn new() -> Self {
let bx = Box::new(AtomicUsize::new(1));
LifeGuard {
Self {
ref_count: ptr::NonNull::new(Box::into_raw(bx)).map(RefCount),
submission_index: AtomicUsize::new(0),
}
@ -159,43 +188,52 @@ impl LifeGuard {
#[derive(Clone, Debug)]
struct Stored<T> {
value: T,
value: id::Valid<T>,
ref_count: RefCount,
}
#[repr(C)]
#[derive(Debug)]
pub struct U32Array {
pub bytes: *const u32,
pub length: usize,
}
#[derive(Clone, Copy, Debug)]
struct PrivateFeatures {
shader_validation: bool,
anisotropic_filtering: bool,
texture_d24: bool,
texture_d24_s8: bool,
}
#[macro_export]
macro_rules! gfx_select {
($id:expr => $global:ident.$method:ident( $($param:expr),+ )) => {
($id:expr => $global:ident.$method:ident( $($param:expr),* )) => {
match $id.backend() {
#[cfg(any(not(any(target_os = "ios", target_os = "macos")), feature = "gfx-backend-vulkan"))]
wgt::Backend::Vulkan => $global.$method::<$crate::backend::Vulkan>( $($param),+ ),
wgt::Backend::Vulkan => $global.$method::<$crate::backend::Vulkan>( $($param),* ),
#[cfg(any(target_os = "ios", target_os = "macos"))]
wgt::Backend::Metal => $global.$method::<$crate::backend::Metal>( $($param),+ ),
wgt::Backend::Metal => $global.$method::<$crate::backend::Metal>( $($param),* ),
#[cfg(windows)]
wgt::Backend::Dx12 => $global.$method::<$crate::backend::Dx12>( $($param),+ ),
wgt::Backend::Dx12 => $global.$method::<$crate::backend::Dx12>( $($param),* ),
#[cfg(windows)]
wgt::Backend::Dx11 => $global.$method::<$crate::backend::Dx11>( $($param),+ ),
wgt::Backend::Dx11 => $global.$method::<$crate::backend::Dx11>( $($param),* ),
_ => unreachable!()
}
};
}
#[macro_export]
macro_rules! span {
($guard_name:tt, $level:ident, $name:expr, $($fields:tt)*) => {
let span = tracing::span!(tracing::Level::$level, $name, $($fields)*);
let $guard_name = span.enter();
};
($guard_name:tt, $level:ident, $name:expr) => {
let span = tracing::span!(tracing::Level::$level, $name);
let $guard_name = span.enter();
};
}
/// Fast hash map used internally.
type FastHashMap<K, V> =
std::collections::HashMap<K, V, std::hash::BuildHasherDefault<fxhash::FxHasher>>;
/// Fast hash set used internally.
type FastHashSet<K> = std::collections::HashSet<K, std::hash::BuildHasherDefault<fxhash::FxHasher>>;
#[test]
fn test_default_limits() {

Просмотреть файл

@ -0,0 +1,226 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
macro_rules! backends_map {
// one let statement per backend with mapped data
(
let map = |$backend:pat| $map:block;
$(
#[cfg($backend_cfg:meta)] let $pat:pat = map($expr:expr);
)*
) => {
$(
#[cfg($backend_cfg)]
let $pat = {
let $backend = $expr;
$map
};
)*
};
// one block statement per backend with mapped data
(
let map = |$backend:pat| $map:block;
$(
#[cfg($backend_cfg:meta)] map($expr:expr),
)*
) => {
$(
#[cfg($backend_cfg)]
{
let $backend = $expr;
$map
}
)*
};
// a struct constructor with one field per backend with mapped data
(
let map = |$backend:pat| $map:block;
$Struct:ident {
$(
#[cfg($backend_cfg:meta)] $ident:ident : map($expr:expr),
)*
}
) => {
$Struct {
$(
#[cfg($backend_cfg)]
$ident: {
let $backend = $expr;
$map
},
)*
}
};
}
#[test]
fn test_backend_macro() {
struct Foo {
#[cfg(any(
windows,
all(unix, not(any(target_os = "ios", target_os = "macos"))),
feature = "gfx-backend-vulkan",
))]
vulkan: u32,
#[cfg(any(target_os = "ios", target_os = "macos"))]
metal: u32,
#[cfg(windows)]
dx12: u32,
#[cfg(windows)]
dx11: u32,
}
// test struct construction
let test_foo: Foo = backends_map! {
let map = |init| { init - 100 };
Foo {
#[cfg(vulkan)] vulkan: map(101),
#[cfg(metal)] metal: map(102),
#[cfg(dx12)] dx12: map(103),
#[cfg(dx11)] dx11: map(104),
}
};
let mut vec = Vec::new();
// test basic statement-per-backend
backends_map! {
let map = |(id, chr)| {
vec.push((id, chr));
};
#[cfg(vulkan)]
map((test_foo.vulkan, 'a')),
#[cfg(metal)]
map((test_foo.metal, 'b')),
#[cfg(dx12)]
map((test_foo.dx12, 'c')),
#[cfg(dx11)]
map((test_foo.dx11, 'd')),
}
#[cfg(any(
windows,
all(unix, not(any(target_os = "ios", target_os = "macos"))),
feature = "gfx-backend-vulkan",
))]
assert!(vec.contains(&(1, 'a')));
#[cfg(any(target_os = "ios", target_os = "macos"))]
assert!(vec.contains(&(2, 'b')));
#[cfg(windows)]
assert!(vec.contains(&(3, 'c')));
#[cfg(windows)]
assert!(vec.contains(&(4, 'd')));
// test complex statement-per-backend
backends_map! {
let map = |(id, pred, code)| {
if pred(id) {
code();
}
};
#[cfg(vulkan)]
map((test_foo.vulkan, |v| v == 1, || println!("vulkan"))),
#[cfg(metal)]
map((test_foo.metal, |v| v == 2, || println!("metal"))),
#[cfg(dx12)]
map((test_foo.dx12, |v| v == 3, || println!("dx12"))),
#[cfg(dx11)]
map((test_foo.dx11, |v| v == 4, || println!("dx11"))),
}
// test struct construction 2
let test_foo_2: Foo = Foo {
#[cfg(vulkan)]
vulkan: 1,
#[cfg(metal)]
metal: 2,
#[cfg(dx12)]
dx12: 3,
#[cfg(dx11)]
dx11: 4,
};
#[cfg(vulkan)]
let var_vulkan = test_foo_2.vulkan;
#[cfg(metal)]
let var_metal = test_foo_2.metal;
#[cfg(dx12)]
let var_dx12 = test_foo_2.dx12;
#[cfg(dx11)]
let var_dx11 = test_foo_2.dx11;
backends_map! {
let map = |(id, chr, var)| { (chr, id, var) };
#[cfg(vulkan)]
let var_vulkan = map((test_foo_2.vulkan, 'a', var_vulkan));
#[cfg(metal)]
let var_metal = map((test_foo_2.metal, 'b', var_metal));
#[cfg(dx12)]
let var_dx12 = map((test_foo_2.dx12, 'c', var_dx12));
#[cfg(dx11)]
let var_dx11 = map((test_foo_2.dx11, 'd', var_dx11));
}
#[cfg(vulkan)]
{
println!("backend int: {:?}", var_vulkan);
}
#[cfg(metal)]
{
println!("backend int: {:?}", var_metal);
}
#[cfg(dx12)]
{
println!("backend int: {:?}", var_dx12);
}
#[cfg(dx11)]
{
println!("backend int: {:?}", var_dx11);
}
#[cfg(any(
windows,
all(unix, not(any(target_os = "ios", target_os = "macos"))),
feature = "gfx-backend-vulkan",
))]
let _ = var_vulkan;
#[cfg(any(target_os = "ios", target_os = "macos"))]
let _ = var_metal;
#[cfg(windows)]
let _ = var_dx12;
#[cfg(windows)]
let _ = var_dx11;
}

Просмотреть файл

@ -3,38 +3,23 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
device::RenderPassContext,
binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError},
device::{DeviceError, RenderPassContext},
id::{DeviceId, PipelineLayoutId, ShaderModuleId},
validation::StageError,
LifeGuard, RawString, RefCount, Stored, U32Array,
};
use std::borrow::Borrow;
use wgt::{
BufferAddress, ColorStateDescriptor, DepthStencilStateDescriptor, IndexFormat, InputStepMode,
PrimitiveTopology, RasterizationStateDescriptor, VertexAttributeDescriptor,
Label, LifeGuard, RefCount, Stored,
};
use std::borrow::{Borrow, Cow};
use thiserror::Error;
use wgt::{BufferAddress, IndexFormat, InputStepMode};
#[repr(C)]
// Unable to serialize with `naga::Module` in here:
// requires naga serialization feature.
#[derive(Debug)]
pub struct VertexBufferLayoutDescriptor {
pub array_stride: BufferAddress,
pub step_mode: InputStepMode,
pub attributes: *const VertexAttributeDescriptor,
pub attributes_length: usize,
}
#[repr(C)]
#[derive(Debug)]
pub struct VertexStateDescriptor {
pub index_format: IndexFormat,
pub vertex_buffers: *const VertexBufferLayoutDescriptor,
pub vertex_buffers_length: usize,
}
#[repr(C)]
#[derive(Debug)]
pub struct ShaderModuleDescriptor {
pub code: U32Array,
pub enum ShaderModuleSource<'a> {
SpirV(Cow<'a, [u32]>),
Wgsl(Cow<'a, str>),
Naga(naga::Module),
}
#[derive(Debug)]
@ -44,22 +29,62 @@ pub struct ShaderModule<B: hal::Backend> {
pub(crate) module: Option<naga::Module>,
}
#[repr(C)]
#[derive(Debug)]
pub struct ProgrammableStageDescriptor {
pub module: ShaderModuleId,
pub entry_point: RawString,
}
#[repr(C)]
#[derive(Debug)]
pub struct ComputePipelineDescriptor {
pub layout: PipelineLayoutId,
pub compute_stage: ProgrammableStageDescriptor,
#[derive(Clone, Debug, Error)]
pub enum CreateShaderModuleError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error(transparent)]
Validation(#[from] naga::proc::ValidationError),
}
/// Describes a programmable pipeline stage.
#[derive(Clone, Debug)]
pub enum ComputePipelineError {
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ProgrammableStageDescriptor<'a> {
/// The compiled shader module for this stage.
pub module: ShaderModuleId,
/// The name of the entry point in the compiled shader. There must be a function that returns
/// void with this name in the shader.
pub entry_point: Cow<'a, str>,
}
/// Number of implicit bind groups derived at pipeline creation.
pub type ImplicitBindGroupCount = u8;
#[derive(Clone, Debug, Error)]
pub enum ImplicitLayoutError {
#[error("missing IDs for deriving {0} bind groups")]
MissingIds(ImplicitBindGroupCount),
#[error("unable to reflect the shader {0:?} interface")]
ReflectionError(wgt::ShaderStage),
#[error(transparent)]
BindGroup(#[from] CreateBindGroupLayoutError),
#[error(transparent)]
Pipeline(#[from] CreatePipelineLayoutError),
}
/// Describes a compute pipeline.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct ComputePipelineDescriptor<'a> {
pub label: Label<'a>,
/// The layout of bind groups for this pipeline.
pub layout: Option<PipelineLayoutId>,
/// The compiled compute stage and its entry point.
pub compute_stage: ProgrammableStageDescriptor<'a>,
}
#[derive(Clone, Debug, Error)]
pub enum CreateComputePipelineError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("pipeline layout is invalid")]
InvalidLayout,
#[error("unable to derive an implicit layout")]
Implicit(#[from] ImplicitLayoutError),
#[error(transparent)]
Stage(StageError),
}
@ -77,37 +102,94 @@ impl<B: hal::Backend> Borrow<RefCount> for ComputePipeline<B> {
}
}
#[repr(C)]
#[derive(Debug)]
pub struct RenderPipelineDescriptor {
pub layout: PipelineLayoutId,
pub vertex_stage: ProgrammableStageDescriptor,
pub fragment_stage: *const ProgrammableStageDescriptor,
pub primitive_topology: PrimitiveTopology,
pub rasterization_state: *const RasterizationStateDescriptor,
pub color_states: *const ColorStateDescriptor,
pub color_states_length: usize,
pub depth_stencil_state: *const DepthStencilStateDescriptor,
pub vertex_state: VertexStateDescriptor,
/// Describes how the vertex buffer is interpreted.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct VertexBufferDescriptor<'a> {
/// The stride, in bytes, between elements of this buffer.
pub stride: BufferAddress,
/// How often this vertex buffer is "stepped" forward.
pub step_mode: InputStepMode,
/// The list of attributes which comprise a single vertex.
pub attributes: Cow<'a, [wgt::VertexAttributeDescriptor]>,
}
/// Describes vertex input state for a render pipeline.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct VertexStateDescriptor<'a> {
/// The format of any index buffers used with this pipeline.
pub index_format: IndexFormat,
/// The format of any vertex buffers used with this pipeline.
pub vertex_buffers: Cow<'a, [VertexBufferDescriptor<'a>]>,
}
/// Describes a render (graphics) pipeline.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct RenderPipelineDescriptor<'a> {
pub label: Label<'a>,
/// The layout of bind groups for this pipeline.
pub layout: Option<PipelineLayoutId>,
/// The compiled vertex stage and its entry point.
pub vertex_stage: ProgrammableStageDescriptor<'a>,
/// The compiled fragment stage and its entry point, if any.
pub fragment_stage: Option<ProgrammableStageDescriptor<'a>>,
/// The rasterization process for this pipeline.
pub rasterization_state: Option<wgt::RasterizationStateDescriptor>,
/// The primitive topology used to interpret vertices.
pub primitive_topology: wgt::PrimitiveTopology,
/// The effect of draw calls on the color aspect of the output target.
pub color_states: Cow<'a, [wgt::ColorStateDescriptor]>,
/// The effect of draw calls on the depth and stencil aspects of the output target, if any.
pub depth_stencil_state: Option<wgt::DepthStencilStateDescriptor>,
/// The vertex input state for this pipeline.
pub vertex_state: VertexStateDescriptor<'a>,
/// The number of samples calculated per pixel (for MSAA). For non-multisampled textures,
/// this should be `1`
pub sample_count: u32,
/// Bitmask that restricts the samples of a pixel modified by this pipeline. All samples
/// can be enabled using the value `!0`
pub sample_mask: u32,
/// When enabled, produces another sample mask per pixel based on the alpha output value, that
/// is ANDed with the sample_mask and the primitive coverage to restrict the set of samples
/// affected by a primitive.
///
/// The implicit mask produced for alpha of zero is guaranteed to be zero, and for alpha of one
/// is guaranteed to be all 1-s.
pub alpha_to_coverage_enabled: bool,
}
#[derive(Clone, Debug)]
pub enum RenderPipelineError {
#[derive(Clone, Debug, Error)]
pub enum CreateRenderPipelineError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("pipelie layout is invalid")]
InvalidLayout,
#[error("unable to derive an implicit layout")]
Implicit(#[from] ImplicitLayoutError),
#[error("incompatible output format at index {index}")]
IncompatibleOutputFormat { index: u8 },
#[error("invalid sample count {0}")]
InvalidSampleCount(u32),
#[error("vertex buffer {index} stride {stride} does not respect `VERTEX_STRIDE_ALIGNMENT`")]
UnalignedVertexStride { index: u32, stride: BufferAddress },
#[error("vertex attribute at location {location} has invalid offset {offset}")]
InvalidVertexAttributeOffset {
location: wgt::ShaderLocation,
offset: BufferAddress,
},
#[error("missing required device features {0:?}")]
MissingFeature(wgt::Features),
#[error("error in stage {flag:?}")]
Stage {
flag: wgt::ShaderStage,
#[source]
error: StageError,
},
IncompatibleOutputFormat {
index: u8,
},
InvalidSampleCount(u32),
}
bitflags::bitflags! {
@ -115,7 +197,7 @@ bitflags::bitflags! {
pub struct PipelineFlags: u32 {
const BLEND_COLOR = 1;
const STENCIL_REFERENCE = 2;
const DEPTH_STENCIL_READ_ONLY = 4;
const WRITES_DEPTH_STENCIL = 4;
}
}
@ -127,7 +209,6 @@ pub struct RenderPipeline<B: hal::Backend> {
pub(crate) pass_context: RenderPassContext,
pub(crate) flags: PipelineFlags,
pub(crate) index_format: IndexFormat,
pub(crate) sample_count: u8,
pub(crate) vertex_strides: Vec<(BufferAddress, InputStepMode)>,
pub(crate) life_guard: LifeGuard,
}

Просмотреть файл

@ -1,73 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::fmt;
#[derive(Debug)]
pub enum Error {
Unsupported,
Error(Box<dyn std::error::Error>),
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::Unsupported => write!(f, "Battery status is unsupported on this platform"),
Error::Error(err) => write!(f, "Battery status retrieval failed: {}", err),
}
}
}
#[cfg(all(
feature = "battery",
any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "dragonfly",
target_os = "freebsd"
)
))]
mod platform {
use super::Error;
use battery::{self, Manager, State};
impl From<battery::errors::Error> for Error {
fn from(err: battery::errors::Error) -> Error {
// Box the error so that the battery::errors::Error type does
// not leak out of this module.
Error::Error(Box::new(err))
}
}
pub fn is_battery_discharging() -> Result<bool, Error> {
let manager = Manager::new()?;
for battery in manager.batteries()? {
if battery?.state() == State::Discharging {
return Ok(true);
}
}
Ok(false)
}
}
#[cfg(any(
not(feature = "battery"),
not(any(
target_os = "linux",
target_os = "macos",
target_os = "windows",
target_os = "dragonfly",
target_os = "freebsd"
))
))]
mod platform {
use super::Error;
pub fn is_battery_discharging() -> Result<bool, Error> {
Err(Error::Unsupported)
}
}
pub use platform::is_battery_discharging;

Просмотреть файл

@ -3,19 +3,25 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{
device::DeviceError,
id::{DeviceId, SwapChainId, TextureId},
track::DUMMY_SELECTOR,
LifeGuard, RefCount, Stored,
track::{TextureSelector, DUMMY_SELECTOR},
validation::MissingBufferUsageError,
Label, LifeGuard, RefCount, Stored,
};
use gfx_memory::MemoryBlock;
use wgt::{BufferAddress, BufferUsage, TextureFormat, TextureUsage};
use thiserror::Error;
use std::{borrow::Borrow, ptr::NonNull};
use std::{
borrow::Borrow,
num::{NonZeroU32, NonZeroU8},
ptr::NonNull,
};
bitflags::bitflags! {
/// The internal enum mirrored from `BufferUsage`. The values don't have to match!
pub (crate) struct BufferUse: u32 {
pub struct BufferUse: u32 {
const EMPTY = 0;
const MAP_READ = 1;
const MAP_WRITE = 2;
@ -42,7 +48,7 @@ bitflags::bitflags! {
bitflags::bitflags! {
/// The internal enum mirrored from `TextureUsage`. The values don't have to match!
pub(crate) struct TextureUse: u32 {
pub struct TextureUse: u32 {
const EMPTY = 0;
const COPY_SRC = 1;
const COPY_DST = 2;
@ -73,12 +79,13 @@ pub enum BufferMapAsyncStatus {
}
#[derive(Debug)]
pub enum BufferMapState<B: hal::Backend> {
pub(crate) enum BufferMapState<B: hal::Backend> {
/// Mapped at creation.
Init {
ptr: NonNull<u8>,
stage_buffer: B::Buffer,
stage_memory: MemoryBlock<B>,
needs_flush: bool,
},
/// Waiting for GPU to be done before mapping
Waiting(BufferPendingMapping),
@ -111,34 +118,76 @@ unsafe impl Sync for BufferMapOperation {}
impl BufferMapOperation {
pub(crate) fn call_error(self) {
log::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
tracing::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
unsafe {
(self.callback)(BufferMapAsyncStatus::Error, self.user_data);
}
}
}
#[derive(Clone, Debug, Error)]
pub enum BufferAccessError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("buffer is invalid")]
Invalid,
#[error("buffer is destroyed")]
Destroyed,
#[error("buffer is already mapped")]
AlreadyMapped,
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error("buffer is not mapped")]
NotMapped,
#[error("buffer map range does not respect `COPY_BUFFER_ALIGNMENT`")]
UnalignedRange,
}
impl From<hal::device::MapError> for BufferAccessError {
fn from(error: hal::device::MapError) -> Self {
match error {
hal::device::MapError::OutOfMemory(_) => {
BufferAccessError::Device(DeviceError::OutOfMemory)
}
_ => panic!("failed to map buffer: {}", error),
}
}
}
#[derive(Debug)]
pub struct BufferPendingMapping {
pub(crate) struct BufferPendingMapping {
pub sub_range: hal::buffer::SubRange,
pub op: BufferMapOperation,
// hold the parent alive while the mapping is active
pub parent_ref_count: RefCount,
}
pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
#[derive(Debug)]
pub struct Buffer<B: hal::Backend> {
pub(crate) raw: B::Buffer,
pub(crate) raw: Option<(B::Buffer, MemoryBlock<B>)>,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) usage: BufferUsage,
pub(crate) memory: MemoryBlock<B>,
pub(crate) size: BufferAddress,
pub(crate) usage: wgt::BufferUsage,
pub(crate) size: wgt::BufferAddress,
pub(crate) full_range: (),
pub(crate) sync_mapped_writes: Option<hal::memory::Segment>,
pub(crate) life_guard: LifeGuard,
pub(crate) map_state: BufferMapState<B>,
}
#[derive(Clone, Debug, Error)]
pub enum CreateBufferError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("failed to map buffer while creating: {0}")]
AccessError(#[from] BufferAccessError),
#[error("buffers that are mapped at creation have to be aligned to `COPY_BUFFER_ALIGNMENT`")]
UnalignedSize,
#[error("`MAP` usage can only be combined with the opposite `COPY`, requested {0:?}")]
UsageMismatch(wgt::BufferUsage),
}
impl<B: hal::Backend> Borrow<RefCount> for Buffer<B> {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()
@ -151,31 +200,93 @@ impl<B: hal::Backend> Borrow<()> for Buffer<B> {
}
}
pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>>;
#[derive(Debug)]
pub struct Texture<B: hal::Backend> {
pub(crate) raw: B::Image,
pub(crate) raw: Option<(B::Image, MemoryBlock<B>)>,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) usage: TextureUsage,
pub(crate) usage: wgt::TextureUsage,
pub(crate) aspects: hal::format::Aspects,
pub(crate) dimension: wgt::TextureDimension,
pub(crate) kind: hal::image::Kind,
pub(crate) format: TextureFormat,
pub(crate) full_range: hal::image::SubresourceRange,
pub(crate) memory: MemoryBlock<B>,
pub(crate) format: wgt::TextureFormat,
pub(crate) full_range: TextureSelector,
pub(crate) life_guard: LifeGuard,
}
#[derive(Clone, Debug)]
pub enum TextureErrorDimension {
X,
Y,
Z,
}
#[derive(Clone, Debug, Error)]
pub enum TextureDimensionError {
#[error("Dimension {0:?} is zero")]
Zero(TextureErrorDimension),
#[error("1D textures must have height set to 1")]
InvalidHeight,
#[error("sample count {0} is invalid")]
InvalidSampleCount(u32),
}
#[derive(Clone, Debug, Error)]
pub enum CreateTextureError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("D24Plus textures cannot be copied")]
CannotCopyD24Plus,
#[error(transparent)]
InvalidDimension(#[from] TextureDimensionError),
#[error("texture descriptor mip level count ({0}) is invalid")]
InvalidMipLevelCount(u32),
#[error("Feature {0:?} must be enabled to create a texture of type {1:?}")]
MissingFeature(wgt::Features, wgt::TextureFormat),
}
impl<B: hal::Backend> Borrow<RefCount> for Texture<B> {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()
}
}
impl<B: hal::Backend> Borrow<hal::image::SubresourceRange> for Texture<B> {
fn borrow(&self) -> &hal::image::SubresourceRange {
impl<B: hal::Backend> Borrow<TextureSelector> for Texture<B> {
fn borrow(&self) -> &TextureSelector {
&self.full_range
}
}
/// Describes a [`TextureView`].
#[derive(Clone, Debug, Default, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize), serde(default))]
pub struct TextureViewDescriptor<'a> {
/// Debug label of the texture view. This will show up in graphics debuggers for easy identification.
pub label: Label<'a>,
/// Format of the texture view, or `None` for the same format as the texture itself.
/// At this time, it must be the same the underlying format of the texture.
pub format: Option<wgt::TextureFormat>,
/// The dimension of the texture view. For 1D textures, this must be `1D`. For 2D textures it must be one of
/// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `3D`
pub dimension: Option<wgt::TextureViewDimension>,
/// Aspect of the texture. Color textures must be [`TextureAspect::All`].
pub aspect: wgt::TextureAspect,
/// Base mip level.
pub base_mip_level: u32,
/// Mip level count.
/// If `Some(count)`, `base_mip_level + count` must be less or equal to underlying texture mip count.
/// If `None`, considered to include the rest of the mipmap levels, but at least 1 in total.
pub level_count: Option<NonZeroU32>,
/// Base array layer.
pub base_array_layer: u32,
/// Layer count.
/// If `Some(count)`, `base_array_layer + count` must be less or equal to the underlying array count.
/// If `None`, considered to include the rest of the array layers, but at least 1 in total.
pub array_layer_count: Option<NonZeroU32>,
}
#[derive(Debug)]
pub(crate) enum TextureViewInner<B: hal::Backend> {
Native {
@ -192,13 +303,48 @@ pub(crate) enum TextureViewInner<B: hal::Backend> {
pub struct TextureView<B: hal::Backend> {
pub(crate) inner: TextureViewInner<B>,
//TODO: store device_id for quick access?
pub(crate) format: TextureFormat,
pub(crate) aspects: hal::format::Aspects,
pub(crate) format: wgt::TextureFormat,
pub(crate) extent: hal::image::Extent,
pub(crate) samples: hal::image::NumSamples,
pub(crate) range: hal::image::SubresourceRange,
pub(crate) selector: TextureSelector,
pub(crate) life_guard: LifeGuard,
}
#[derive(Clone, Debug, Error)]
pub enum CreateTextureViewError {
#[error("parent texture is invalid or destroyed")]
InvalidTexture,
#[error("not enough memory left")]
OutOfMemory,
#[error("Invalid texture view dimension `{view:?}` with texture of dimension `{image:?}`")]
InvalidTextureViewDimension {
view: wgt::TextureViewDimension,
image: wgt::TextureDimension,
},
#[error("Invalid texture depth `{depth}` for texture view of dimension `Cubemap`. Cubemap views must use images of size 6.")]
InvalidCubemapTextureDepth { depth: u16 },
#[error("Invalid texture depth `{depth}` for texture view of dimension `CubemapArray`. Cubemap views must use images with sizes which are a multiple of 6.")]
InvalidCubemapArrayTextureDepth { depth: u16 },
#[error(
"TextureView mip level count + base mip level {requested} must be <= Texture mip level count {total}"
)]
InvalidMipLevelCount { requested: u32, total: u8 },
#[error("TextureView array layer count + base array layer {requested} must be <= Texture depth/array layer count {total}")]
InvalidArrayLayerCount { requested: u32, total: u16 },
#[error("Aspect {requested:?} is not in the source texture ({total:?})")]
InvalidAspect {
requested: hal::format::Aspects,
total: hal::format::Aspects,
},
}
#[derive(Clone, Debug, Error)]
pub enum TextureViewDestroyError {
#[error("cannot destroy swap chain image")]
SwapChainImage,
}
impl<B: hal::Backend> Borrow<RefCount> for TextureView<B> {
fn borrow(&self) -> &RefCount {
self.life_guard.ref_count.as_ref().unwrap()
@ -211,11 +357,70 @@ impl<B: hal::Backend> Borrow<()> for TextureView<B> {
}
}
/// Describes a [`Sampler`]
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
#[cfg_attr(feature = "replay", derive(serde::Deserialize))]
pub struct SamplerDescriptor<'a> {
/// Debug label of the sampler. This will show up in graphics debuggers for easy identification.
pub label: Label<'a>,
/// How to deal with out of bounds accesses in the u (i.e. x) direction
pub address_modes: [wgt::AddressMode; 3],
/// How to filter the texture when it needs to be magnified (made larger)
pub mag_filter: wgt::FilterMode,
/// How to filter the texture when it needs to be minified (made smaller)
pub min_filter: wgt::FilterMode,
/// How to filter between mip map levels
pub mipmap_filter: wgt::FilterMode,
/// Minimum level of detail (i.e. mip level) to use
pub lod_min_clamp: f32,
/// Maximum level of detail (i.e. mip level) to use
pub lod_max_clamp: f32,
/// If this is enabled, this is a comparison sampler using the given comparison function.
pub compare: Option<wgt::CompareFunction>,
/// Valid values: 1, 2, 4, 8, and 16.
pub anisotropy_clamp: Option<NonZeroU8>,
/// Border color to use when address_mode is [`AddressMode::ClampToBorder`]
pub border_color: Option<wgt::SamplerBorderColor>,
}
impl Default for SamplerDescriptor<'_> {
fn default() -> Self {
Self {
label: None,
address_modes: Default::default(),
mag_filter: Default::default(),
min_filter: Default::default(),
mipmap_filter: Default::default(),
lod_min_clamp: 0.0,
lod_max_clamp: std::f32::MAX,
compare: None,
anisotropy_clamp: None,
border_color: None,
}
}
}
#[derive(Debug)]
pub struct Sampler<B: hal::Backend> {
pub(crate) raw: B::Sampler,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) life_guard: LifeGuard,
/// `true` if this is a comparison sampler
pub(crate) comparison: bool,
}
#[derive(Clone, Debug, Error)]
pub enum CreateSamplerError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("invalid anisotropic clamp {0}, must be one of 1, 2, 4, 8 or 16")]
InvalidClamp(u8),
#[error("cannot create any more samplers")]
TooManyObjects,
/// AddressMode::ClampToBorder requires feature ADDRESS_MODE_CLAMP_TO_BORDER
#[error("Feature {0:?} must be enabled")]
MissingFeature(wgt::Features),
}
impl<B: hal::Backend> Borrow<RefCount> for Sampler<B> {
@ -229,3 +434,11 @@ impl<B: hal::Backend> Borrow<()> for Sampler<B> {
&DUMMY_SELECTOR
}
}
#[derive(Clone, Debug, Error)]
pub enum DestroyError {
#[error("resource is invalid")]
Invalid,
#[error("resource is already destroyed")]
AlreadyDestroyed,
}

Просмотреть файл

@ -36,12 +36,16 @@
use crate::device::trace::Action;
use crate::{
conv,
device::DeviceError,
hub::{GfxBackend, Global, GlobalIdentityHandlerFactory, Input, Token},
id::{DeviceId, SwapChainId, TextureViewId},
resource, LifeGuard, PrivateFeatures, Stored,
id::{DeviceId, SwapChainId, TextureViewId, Valid},
resource, span,
track::TextureSelector,
LifeGuard, PrivateFeatures, Stored, SubmissionIndex,
};
use hal::{self, device::Device as _, queue::CommandQueue as _, window::PresentationSurface as _};
use thiserror::Error;
use wgt::{SwapChainDescriptor, SwapChainStatus};
const FRAME_TIMEOUT_MS: u64 = 1000;
@ -56,6 +60,36 @@ pub struct SwapChain<B: hal::Backend> {
pub(crate) semaphore: B::Semaphore,
pub(crate) acquired_view_id: Option<Stored<TextureViewId>>,
pub(crate) acquired_framebuffers: Vec<B::Framebuffer>,
pub(crate) active_submission_index: SubmissionIndex,
}
#[derive(Clone, Debug, Error)]
pub enum SwapChainError {
#[error("swap chain is invalid")]
Invalid,
#[error("parent surface is invalid")]
InvalidSurface,
#[error(transparent)]
Device(#[from] DeviceError),
#[error("swap chain image is already acquired")]
AlreadyAcquired,
}
#[derive(Clone, Debug, Error)]
pub enum CreateSwapChainError {
#[error(transparent)]
Device(#[from] DeviceError),
#[error("invalid surface")]
InvalidSurface,
#[error("`SwapChainOutput` must be dropped before a new `SwapChain` is made")]
SwapChainOutputExists,
#[error("surface does not support the adapter's queue family")]
UnsupportedQueueFamily,
#[error("requested format {requested:?} is not in list of supported formats: {available:?}")]
UnsupportedFormat {
requested: hal::format::Format,
available: Vec<hal::format::Format>,
},
}
pub(crate) fn swap_chain_descriptor_to_hal(
@ -88,19 +122,25 @@ pub struct SwapChainOutput {
}
impl<G: GlobalIdentityHandlerFactory> Global<G> {
pub fn swap_chain_get_next_texture<B: GfxBackend>(
pub fn swap_chain_get_current_texture_view<B: GfxBackend>(
&self,
swap_chain_id: SwapChainId,
view_id_in: Input<G, TextureViewId>,
) -> SwapChainOutput {
) -> Result<SwapChainOutput, SwapChainError> {
span!(_guard, INFO, "SwapChain::get_next_texture");
let hub = B::hub(self);
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let surface = surface_guard
.get_mut(swap_chain_id.to_surface_id())
.map_err(|_| SwapChainError::InvalidSurface)?;
let (device_guard, mut token) = hub.devices.read(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let sc = swap_chain_guard
.get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?;
#[cfg_attr(not(feature = "trace"), allow(unused_variables))]
let device = &device_guard[sc.device_id.value];
@ -111,109 +151,136 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Err(err) => (
None,
match err {
hal::window::AcquireError::OutOfMemory(_) => SwapChainStatus::OutOfMemory,
hal::window::AcquireError::OutOfMemory(_) => Err(DeviceError::OutOfMemory)?,
hal::window::AcquireError::NotReady => unreachable!(), // we always set a timeout
hal::window::AcquireError::Timeout => SwapChainStatus::Timeout,
hal::window::AcquireError::OutOfDate => SwapChainStatus::Outdated,
hal::window::AcquireError::SurfaceLost(_) => SwapChainStatus::Lost,
hal::window::AcquireError::DeviceLost(_) => SwapChainStatus::Lost,
hal::window::AcquireError::DeviceLost(_) => Err(DeviceError::Lost)?,
},
),
};
let view_id = image.map(|image| {
let view = resource::TextureView {
inner: resource::TextureViewInner::SwapChain {
image,
source_id: Stored {
value: swap_chain_id,
ref_count: sc.life_guard.add_ref(),
let view_id = match image {
Some(image) => {
let view = resource::TextureView {
inner: resource::TextureViewInner::SwapChain {
image,
source_id: Stored {
value: Valid(swap_chain_id),
ref_count: sc.life_guard.add_ref(),
},
},
},
format: sc.desc.format,
extent: hal::image::Extent {
width: sc.desc.width,
height: sc.desc.height,
depth: 1,
},
samples: 1,
range: hal::image::SubresourceRange {
aspects: hal::format::Aspects::COLOR,
layers: 0..1,
levels: 0..1,
},
life_guard: LifeGuard::new(),
};
format: sc.desc.format,
extent: hal::image::Extent {
width: sc.desc.width,
height: sc.desc.height,
depth: 1,
},
samples: 1,
selector: TextureSelector {
layers: 0..1,
levels: 0..1,
},
life_guard: LifeGuard::new(),
};
let ref_count = view.life_guard.add_ref();
let id = hub
.texture_views
.register_identity(view_id_in, view, &mut token);
let ref_count = view.life_guard.add_ref();
let id = hub
.texture_views
.register_identity(view_id_in, view, &mut token);
assert!(
sc.acquired_view_id.is_none(),
"Swap chain image is already acquired"
);
if sc.acquired_view_id.is_some() {
return Err(SwapChainError::AlreadyAcquired);
}
sc.acquired_view_id = Some(Stored {
value: id,
ref_count,
});
sc.acquired_view_id = Some(Stored {
value: id,
ref_count,
});
id
});
#[cfg(feature = "trace")]
match device.trace {
Some(ref trace) => trace.lock().add(Action::GetSwapChainTexture {
id: view_id,
parent_id: swap_chain_id,
}),
None => (),
Some(id.0)
}
None => None,
};
SwapChainOutput { status, view_id }
#[cfg(feature = "trace")]
if let Some(ref trace) = device.trace {
trace.lock().add(Action::GetSwapChainTexture {
id: view_id,
parent_id: swap_chain_id,
});
}
Ok(SwapChainOutput { status, view_id })
}
pub fn swap_chain_present<B: GfxBackend>(&self, swap_chain_id: SwapChainId) {
pub fn swap_chain_present<B: GfxBackend>(
&self,
swap_chain_id: SwapChainId,
) -> Result<SwapChainStatus, SwapChainError> {
span!(_guard, INFO, "SwapChain::present");
let hub = B::hub(self);
let mut token = Token::root();
let (mut surface_guard, mut token) = self.surfaces.write(&mut token);
let surface = &mut surface_guard[swap_chain_id.to_surface_id()];
let surface = surface_guard
.get_mut(swap_chain_id.to_surface_id())
.map_err(|_| SwapChainError::InvalidSurface)?;
let (mut device_guard, mut token) = hub.devices.write(&mut token);
let (mut swap_chain_guard, mut token) = hub.swap_chains.write(&mut token);
let sc = &mut swap_chain_guard[swap_chain_id];
let sc = swap_chain_guard
.get_mut(swap_chain_id)
.map_err(|_| SwapChainError::Invalid)?;
let device = &mut device_guard[sc.device_id.value];
#[cfg(feature = "trace")]
match device.trace {
Some(ref trace) => trace.lock().add(Action::PresentSwapChain(swap_chain_id)),
None => (),
};
if let Some(ref trace) = device.trace {
trace.lock().add(Action::PresentSwapChain(swap_chain_id));
}
let view_id = sc
.acquired_view_id
.take()
.expect("Swap chain image is not acquired");
let (view, _) = hub.texture_views.unregister(view_id.value, &mut token);
.ok_or(SwapChainError::AlreadyAcquired)?;
let (view_maybe, _) = hub.texture_views.unregister(view_id.value.0, &mut token);
let view = view_maybe.ok_or(SwapChainError::Invalid)?;
let image = match view.inner {
resource::TextureViewInner::Native { .. } => unreachable!(),
resource::TextureViewInner::SwapChain { image, .. } => image,
};
let err = unsafe {
let queue = &mut device.queue_group.queues[0];
queue.present_surface(B::get_surface_mut(surface), image, Some(&sc.semaphore))
let sem = if sc.active_submission_index > device.last_completed_submission_index() {
Some(&sc.semaphore)
} else {
None
};
if let Err(e) = err {
log::warn!("present failed: {:?}", e);
}
let queue = &mut device.queue_group.queues[0];
let result = unsafe { queue.present(B::get_surface_mut(surface), image, sem) };
tracing::debug!(trace = true, "Presented. End of Frame");
for fbo in sc.acquired_framebuffers.drain(..) {
unsafe {
device.raw.destroy_framebuffer(fbo);
}
}
match result {
Ok(None) => Ok(SwapChainStatus::Good),
Ok(Some(_)) => Ok(SwapChainStatus::Suboptimal),
Err(err) => match err {
hal::window::PresentError::OutOfMemory(_) => {
Err(SwapChainError::Device(DeviceError::OutOfMemory))
}
hal::window::PresentError::OutOfDate => Ok(SwapChainStatus::Outdated),
hal::window::PresentError::SurfaceLost(_) => Ok(SwapChainStatus::Lost),
hal::window::PresentError::DeviceLost(_) => {
Err(SwapChainError::Device(DeviceError::Lost))
}
},
}
}
}

Просмотреть файл

@ -3,7 +3,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{PendingTransition, ResourceState, Unit};
use crate::{id::BufferId, resource::BufferUse};
use crate::{
id::{BufferId, Valid},
resource::BufferUse,
};
//TODO: store `hal::buffer::State` here to avoid extra conversions
pub(crate) type BufferState = Unit<BufferUse>;
@ -23,7 +26,7 @@ impl PendingTransition<BufferState> {
impl Default for BufferState {
fn default() -> Self {
BufferState {
Self {
first: None,
last: BufferUse::empty(),
}
@ -47,7 +50,7 @@ impl ResourceState for BufferState {
fn change(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
_selector: Self::Selector,
usage: Self::Usage,
output: Option<&mut Vec<PendingTransition<Self>>>,
@ -79,9 +82,28 @@ impl ResourceState for BufferState {
Ok(())
}
fn prepend(
&mut self,
id: Valid<Self::Id>,
_selector: Self::Selector,
usage: Self::Usage,
) -> Result<(), PendingTransition<Self>> {
match self.first {
Some(old) if old != usage => Err(PendingTransition {
id,
selector: (),
usage: old..usage,
}),
_ => {
self.first = Some(usage);
Ok(())
}
}
}
fn merge(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
other: &Self,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
@ -131,7 +153,7 @@ mod test {
first: None,
last: BufferUse::INDEX,
};
let id = Id::default();
let id = Id::dummy();
assert_eq!(
bs.change(id, (), BufferUse::STORAGE_STORE, None),
Err(PendingTransition {
@ -151,7 +173,7 @@ mod test {
first: None,
last: BufferUse::STORAGE_STORE,
};
let id = Id::default();
let id = Id::dummy();
let mut list = Vec::new();
bs.change(id, (), BufferUse::VERTEX, Some(&mut list))
.unwrap();
@ -190,4 +212,30 @@ mod test {
}
);
}
#[test]
fn prepend() {
let mut bs = Unit {
first: None,
last: BufferUse::VERTEX,
};
let id = Id::dummy();
bs.prepend(id, (), BufferUse::INDEX).unwrap();
bs.prepend(id, (), BufferUse::INDEX).unwrap();
assert_eq!(
bs.prepend(id, (), BufferUse::STORAGE_LOAD),
Err(PendingTransition {
id,
selector: (),
usage: BufferUse::INDEX..BufferUse::STORAGE_LOAD,
})
);
assert_eq!(
bs,
Unit {
first: Some(BufferUse::INDEX),
last: BufferUse::VERTEX,
}
);
}
}

Просмотреть файл

@ -9,16 +9,17 @@ mod texture;
use crate::{
conv,
hub::Storage,
id::{self, TypedId},
id::{self, TypedId, Valid},
resource, Epoch, FastHashMap, Index, RefCount,
};
use std::{
borrow::Borrow, collections::hash_map::Entry, fmt, marker::PhantomData, ops, vec::Drain,
};
use thiserror::Error;
pub(crate) use buffer::BufferState;
pub(crate) use texture::TextureState;
pub(crate) use texture::{TextureSelector, TextureState};
/// A single unit of state tracking. It keeps an initial
/// usage as well as the last/current one, similar to `Range`.
@ -31,7 +32,7 @@ pub struct Unit<U> {
impl<U: Copy> Unit<U> {
/// Create a new unit from a given usage.
fn new(usage: U) -> Self {
Unit {
Self {
first: None,
last: usage,
}
@ -45,7 +46,7 @@ impl<U: Copy> Unit<U> {
/// The main trait that abstracts away the tracking logic of
/// a particular resource type, like a buffer or a texture.
pub trait ResourceState: Clone + Default {
pub(crate) trait ResourceState: Clone + Default {
/// Corresponding `HUB` identifier.
type Id: Copy + fmt::Debug + TypedId;
/// A type specifying the sub-resources.
@ -74,12 +75,20 @@ pub trait ResourceState: Clone + Default {
/// be done for read-only usages.
fn change(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>>;
/// Sets up the first usage of the selected sub-resources.
fn prepend(
&mut self,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
) -> Result<(), PendingTransition<Self>>;
/// Merge the state of this resource tracked by a different instance
/// with the current one.
///
@ -90,7 +99,7 @@ pub trait ResourceState: Clone + Default {
/// the error is generated (returning the conflict).
fn merge(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
other: &Self,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>>;
@ -112,8 +121,8 @@ struct Resource<S> {
/// transition. User code should be able to generate a pipeline barrier
/// based on the contents.
#[derive(Debug, PartialEq)]
pub struct PendingTransition<S: ResourceState> {
pub id: S::Id,
pub(crate) struct PendingTransition<S: ResourceState> {
pub id: Valid<S::Id>,
pub selector: S::Selector,
pub usage: ops::Range<S::Usage>,
}
@ -124,11 +133,12 @@ impl PendingTransition<BufferState> {
self,
buf: &'a resource::Buffer<B>,
) -> hal::memory::Barrier<'a, B> {
log::trace!("\tbuffer -> {:?}", self);
tracing::trace!("\tbuffer -> {:?}", self);
let &(ref target, _) = buf.raw.as_ref().expect("Buffer is destroyed");
hal::memory::Barrier::Buffer {
states: conv::map_buffer_state(self.usage.start)
..conv::map_buffer_state(self.usage.end),
target: &buf.raw,
target,
range: hal::buffer::SubRange::WHOLE,
families: None,
}
@ -141,23 +151,35 @@ impl PendingTransition<TextureState> {
self,
tex: &'a resource::Texture<B>,
) -> hal::memory::Barrier<'a, B> {
log::trace!("\ttexture -> {:?}", self);
let aspects = tex.full_range.aspects;
tracing::trace!("\ttexture -> {:?}", self);
let &(ref target, _) = tex.raw.as_ref().expect("Texture is destroyed");
let aspects = tex.aspects;
hal::memory::Barrier::Image {
states: conv::map_texture_state(self.usage.start, aspects)
..conv::map_texture_state(self.usage.end, aspects),
target: &tex.raw,
target,
range: hal::image::SubresourceRange {
aspects,
..self.selector
level_start: self.selector.levels.start,
level_count: Some(self.selector.levels.end - self.selector.levels.start),
layer_start: self.selector.layers.start,
layer_count: Some(self.selector.layers.end - self.selector.layers.start),
},
families: None,
}
}
}
#[derive(Clone, Debug, Error)]
pub enum UseExtendError<U: fmt::Debug> {
#[error("resource is invalid")]
InvalidResource,
#[error("total usage {0:?} is not valid")]
Conflict(U),
}
/// A tracker for all resources of a given type.
pub struct ResourceTracker<S: ResourceState> {
pub(crate) struct ResourceTracker<S: ResourceState> {
/// An association of known resource indices with their tracked states.
map: FastHashMap<Index, Resource<S>>,
/// Temporary storage for collecting transitions.
@ -179,7 +201,7 @@ impl<S: ResourceState + fmt::Debug> fmt::Debug for ResourceTracker<S> {
impl<S: ResourceState> ResourceTracker<S> {
/// Create a new empty tracker.
pub fn new(backend: wgt::Backend) -> Self {
ResourceTracker {
Self {
map: FastHashMap::default(),
temp: Vec::new(),
backend,
@ -187,8 +209,8 @@ impl<S: ResourceState> ResourceTracker<S> {
}
/// Remove an id from the tracked map.
pub fn remove(&mut self, id: S::Id) -> bool {
let (index, epoch, backend) = id.unzip();
pub(crate) fn remove(&mut self, id: Valid<S::Id>) -> bool {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.remove(&index) {
Some(resource) => {
@ -200,8 +222,8 @@ impl<S: ResourceState> ResourceTracker<S> {
}
/// Removes the resource from the tracker if we are holding the last reference.
pub fn remove_abandoned(&mut self, id: S::Id) -> bool {
let (index, epoch, backend) = id.unzip();
pub(crate) fn remove_abandoned(&mut self, id: Valid<S::Id>) -> bool {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.entry(index) {
Entry::Occupied(e) => {
@ -218,18 +240,18 @@ impl<S: ResourceState> ResourceTracker<S> {
}
/// Try to optimize the internal representation.
pub fn optimize(&mut self) {
pub(crate) fn optimize(&mut self) {
for resource in self.map.values_mut() {
resource.state.optimize();
}
}
/// Return an iterator over used resources keys.
pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = S::Id> {
pub fn used<'a>(&'a self) -> impl 'a + Iterator<Item = Valid<S::Id>> {
let backend = self.backend;
self.map
.iter()
.map(move |(&index, resource)| S::Id::zip(index, resource.epoch, backend))
.map(move |(&index, resource)| Valid(S::Id::zip(index, resource.epoch, backend)))
}
/// Clear the tracked contents.
@ -237,16 +259,16 @@ impl<S: ResourceState> ResourceTracker<S> {
self.map.clear();
}
/// Returns true if the tracker is empty.
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// Initialize a resource to be used.
///
/// Returns false if the resource is already registered.
pub fn init(&mut self, id: S::Id, ref_count: RefCount, state: S) -> Result<(), &S> {
let (index, epoch, backend) = id.unzip();
pub(crate) fn init(
&mut self,
id: Valid<S::Id>,
ref_count: RefCount,
state: S,
) -> Result<(), &S> {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
match self.map.entry(index) {
Entry::Vacant(e) => {
@ -265,8 +287,8 @@ impl<S: ResourceState> ResourceTracker<S> {
///
/// Returns `Some(Usage)` only if this usage is consistent
/// across the given selector.
pub fn query(&self, id: S::Id, selector: S::Selector) -> Option<S::Usage> {
let (index, epoch, backend) = id.unzip();
pub fn query(&self, id: Valid<S::Id>, selector: S::Selector) -> Option<S::Usage> {
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(backend, self.backend);
let res = self.map.get(&index)?;
assert_eq!(res.epoch, epoch);
@ -278,10 +300,10 @@ impl<S: ResourceState> ResourceTracker<S> {
fn get_or_insert<'a>(
self_backend: wgt::Backend,
map: &'a mut FastHashMap<Index, Resource<S>>,
id: S::Id,
id: Valid<S::Id>,
ref_count: &RefCount,
) -> &'a mut Resource<S> {
let (index, epoch, backend) = id.unzip();
let (index, epoch, backend) = id.0.unzip();
debug_assert_eq!(self_backend, backend);
match map.entry(index) {
Entry::Vacant(e) => e.insert(Resource {
@ -299,9 +321,9 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Extend the usage of a specified resource.
///
/// Returns conflicting transition as an error.
pub fn change_extend(
pub(crate) fn change_extend(
&mut self,
id: S::Id,
id: Valid<S::Id>,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
@ -312,9 +334,9 @@ impl<S: ResourceState> ResourceTracker<S> {
}
/// Replace the usage of a specified resource.
pub fn change_replace(
pub(crate) fn change_replace(
&mut self,
id: S::Id,
id: Valid<S::Id>,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
@ -326,9 +348,24 @@ impl<S: ResourceState> ResourceTracker<S> {
self.temp.drain(..)
}
/// Turn the tracking from the "expand" mode into the "replace" one,
/// installing the selected usage as the "first".
/// This is a special operation only used by the render pass attachments.
pub(crate) fn prepend(
&mut self,
id: Valid<S::Id>,
ref_count: &RefCount,
selector: S::Selector,
usage: S::Usage,
) -> Result<(), PendingTransition<S>> {
Self::get_or_insert(self.backend, &mut self.map, id, ref_count)
.state
.prepend(id, selector, usage)
}
/// Merge another tracker into `self` by extending the current states
/// without any transitions.
pub fn merge_extend(&mut self, other: &Self) -> Result<(), PendingTransition<S>> {
pub(crate) fn merge_extend(&mut self, other: &Self) -> Result<(), PendingTransition<S>> {
debug_assert_eq!(self.backend, other.backend);
for (&index, new) in other.map.iter() {
match self.map.entry(index) {
@ -337,7 +374,7 @@ impl<S: ResourceState> ResourceTracker<S> {
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch);
let id = S::Id::zip(index, new.epoch, self.backend);
let id = Valid(S::Id::zip(index, new.epoch, self.backend));
e.into_mut().state.merge(id, &new.state, None)?;
}
}
@ -347,7 +384,7 @@ impl<S: ResourceState> ResourceTracker<S> {
/// Merge another tracker, adding it's transitions to `self`.
/// Transitions the current usage to the new one.
pub fn merge_replace<'a>(&'a mut self, other: &'a Self) -> Drain<PendingTransition<S>> {
pub(crate) fn merge_replace<'a>(&'a mut self, other: &'a Self) -> Drain<PendingTransition<S>> {
for (&index, new) in other.map.iter() {
match self.map.entry(index) {
Entry::Vacant(e) => {
@ -355,7 +392,7 @@ impl<S: ResourceState> ResourceTracker<S> {
}
Entry::Occupied(e) => {
assert_eq!(e.get().epoch, new.epoch);
let id = S::Id::zip(index, new.epoch, self.backend);
let id = Valid(S::Id::zip(index, new.epoch, self.backend));
e.into_mut()
.state
.merge(id, &new.state, Some(&mut self.temp))
@ -371,33 +408,35 @@ impl<S: ResourceState> ResourceTracker<S> {
/// the last read-only usage, if possible.
///
/// Returns the old usage as an error if there is a conflict.
pub fn use_extend<'a, T: 'a + Borrow<RefCount>>(
pub(crate) fn use_extend<'a, T: 'a + Borrow<RefCount>>(
&mut self,
storage: &'a Storage<T, S::Id>,
id: S::Id,
selector: S::Selector,
usage: S::Usage,
) -> Result<&'a T, S::Usage> {
let item = &storage[id];
self.change_extend(id, item.borrow(), selector, usage)
) -> Result<&'a T, UseExtendError<S::Usage>> {
let item = storage
.get(id)
.map_err(|_| UseExtendError::InvalidResource)?;
self.change_extend(Valid(id), item.borrow(), selector, usage)
.map(|()| item)
.map_err(|pending| pending.usage.start)
.map_err(|pending| UseExtendError::Conflict(pending.usage.end))
}
/// Use a given resource provided by an `Id` with the specified usage.
/// Combines storage access by 'Id' with the transition that replaces
/// the last usage with a new one, returning an iterator over these
/// transitions.
pub fn use_replace<'a, T: 'a + Borrow<RefCount>>(
pub(crate) fn use_replace<'a, T: 'a + Borrow<RefCount>>(
&mut self,
storage: &'a Storage<T, S::Id>,
id: S::Id,
selector: S::Selector,
usage: S::Usage,
) -> (&'a T, Drain<PendingTransition<S>>) {
let item = &storage[id];
let drain = self.change_replace(id, item.borrow(), selector, usage);
(item, drain)
) -> Result<(&'a T, Drain<PendingTransition<S>>), S::Id> {
let item = storage.get(id).map_err(|_| id)?;
let drain = self.change_replace(Valid(id), item.borrow(), selector, usage);
Ok((item, drain))
}
}
@ -412,7 +451,7 @@ impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
fn change(
&mut self,
_id: Self::Id,
_id: Valid<Self::Id>,
_selector: Self::Selector,
_usage: Self::Usage,
_output: Option<&mut Vec<PendingTransition<Self>>>,
@ -420,9 +459,18 @@ impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
Ok(())
}
fn prepend(
&mut self,
_id: Valid<Self::Id>,
_selector: Self::Selector,
_usage: Self::Usage,
) -> Result<(), PendingTransition<Self>> {
Ok(())
}
fn merge(
&mut self,
_id: Self::Id,
_id: Valid<Self::Id>,
_other: &Self,
_output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
@ -434,6 +482,24 @@ impl<I: Copy + fmt::Debug + TypedId> ResourceState for PhantomData<I> {
pub const DUMMY_SELECTOR: () = ();
#[derive(Clone, Debug, Error)]
pub enum UsageConflict {
#[error(
"Attempted to use buffer {id:?} as a combination of {combined_use:?} within a usage scope."
)]
Buffer {
id: id::BufferId,
combined_use: resource::BufferUse,
},
#[error("Attempted to use texture {id:?} mips {mip_levels:?} layers {array_layers:?} as a combination of {combined_use:?} within a usage scope.")]
Texture {
id: id::TextureId,
mip_levels: ops::Range<u32>,
array_layers: ops::Range<u32>,
combined_use: resource::TextureUse,
},
}
/// A set of trackers for all relevant resources.
#[derive(Debug)]
pub(crate) struct TrackerSet {
@ -444,12 +510,13 @@ pub(crate) struct TrackerSet {
pub samplers: ResourceTracker<PhantomData<id::SamplerId>>,
pub compute_pipes: ResourceTracker<PhantomData<id::ComputePipelineId>>,
pub render_pipes: ResourceTracker<PhantomData<id::RenderPipelineId>>,
pub bundles: ResourceTracker<PhantomData<id::RenderBundleId>>,
}
impl TrackerSet {
/// Create an empty set.
pub fn new(backend: wgt::Backend) -> Self {
TrackerSet {
Self {
buffers: ResourceTracker::new(backend),
textures: ResourceTracker::new(backend),
views: ResourceTracker::new(backend),
@ -457,6 +524,7 @@ impl TrackerSet {
samplers: ResourceTracker::new(backend),
compute_pipes: ResourceTracker::new(backend),
render_pipes: ResourceTracker::new(backend),
bundles: ResourceTracker::new(backend),
}
}
@ -469,6 +537,7 @@ impl TrackerSet {
self.samplers.clear();
self.compute_pipes.clear();
self.render_pipes.clear();
self.bundles.clear();
}
/// Try to optimize the tracking representation.
@ -480,13 +549,26 @@ impl TrackerSet {
self.samplers.optimize();
self.compute_pipes.optimize();
self.render_pipes.optimize();
self.bundles.optimize();
}
/// Merge all the trackers of another instance by extending
/// the usage. Panics on a conflict.
pub fn merge_extend(&mut self, other: &Self) {
self.buffers.merge_extend(&other.buffers).unwrap();
self.textures.merge_extend(&other.textures).unwrap();
pub fn merge_extend(&mut self, other: &Self) -> Result<(), UsageConflict> {
self.buffers
.merge_extend(&other.buffers)
.map_err(|e| UsageConflict::Buffer {
id: e.id.0,
combined_use: e.usage.end,
})?;
self.textures
.merge_extend(&other.textures)
.map_err(|e| UsageConflict::Texture {
id: e.id.0,
mip_levels: e.selector.levels.start as u32..e.selector.levels.end as u32,
array_layers: e.selector.layers.start as u32..e.selector.layers.end as u32,
combined_use: e.usage.end,
})?;
self.views.merge_extend(&other.views).unwrap();
self.bind_groups.merge_extend(&other.bind_groups).unwrap();
self.samplers.merge_extend(&other.samplers).unwrap();
@ -494,6 +576,8 @@ impl TrackerSet {
.merge_extend(&other.compute_pipes)
.unwrap();
self.render_pipes.merge_extend(&other.render_pipes).unwrap();
self.bundles.merge_extend(&other.bundles).unwrap();
Ok(())
}
pub fn backend(&self) -> wgt::Backend {

Просмотреть файл

@ -18,13 +18,13 @@ pub struct RangedStates<I, T> {
impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
pub fn empty() -> Self {
RangedStates {
Self {
ranges: SmallVec::new(),
}
}
pub fn from_range(range: Range<I>, value: T) -> Self {
RangedStates {
Self {
ranges: iter::once((range, value)).collect(),
}
}
@ -32,7 +32,7 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
/// Construct a new instance from a slice of ranges.
#[cfg(test)]
pub fn from_slice(values: &[(Range<I>, T)]) -> Self {
RangedStates {
Self {
ranges: values.iter().cloned().collect(),
}
}

Просмотреть файл

@ -3,7 +3,11 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use super::{range::RangedStates, PendingTransition, ResourceState, Unit};
use crate::{device::MAX_MIP_LEVELS, id::TextureId, resource::TextureUse};
use crate::{
device::MAX_MIP_LEVELS,
id::{TextureId, Valid},
resource::TextureUse,
};
use arrayvec::ArrayVec;
@ -12,9 +16,16 @@ use std::{iter, ops::Range};
//TODO: store `hal::image::State` here to avoid extra conversions
type PlaneStates = RangedStates<hal::image::Layer, Unit<TextureUse>>;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct TextureSelector {
//pub aspects: hal::format::Aspects,
pub levels: Range<hal::image::Level>,
pub layers: Range<hal::image::Layer>,
}
#[derive(Clone, Debug, Default, PartialEq)]
pub(crate) struct TextureState {
mips: ArrayVec<[PlaneStates; MAX_MIP_LEVELS]>,
mips: ArrayVec<[PlaneStates; MAX_MIP_LEVELS as usize]>,
/// True if we have the information about all the subresources here
full: bool,
}
@ -33,14 +44,12 @@ impl PendingTransition<TextureState> {
}
impl TextureState {
pub fn with_range(range: &hal::image::SubresourceRange) -> Self {
debug_assert_eq!(range.layers.start, 0);
debug_assert_eq!(range.levels.start, 0);
TextureState {
pub fn new(mip_level_count: hal::image::Level, array_layer_count: hal::image::Layer) -> Self {
Self {
mips: iter::repeat_with(|| {
PlaneStates::from_range(0..range.layers.end, Unit::new(TextureUse::UNINITIALIZED))
PlaneStates::from_range(0..array_layer_count, Unit::new(TextureUse::UNINITIALIZED))
})
.take(range.levels.end as usize)
.take(mip_level_count as usize)
.collect(),
full: true,
}
@ -49,7 +58,7 @@ impl TextureState {
impl ResourceState for TextureState {
type Id = TextureId;
type Selector = hal::image::SubresourceRange;
type Selector = TextureSelector;
type Usage = TextureUse;
fn query(&self, selector: Self::Selector) -> Option<Self::Usage> {
@ -79,7 +88,7 @@ impl ResourceState for TextureState {
fn change(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
mut output: Option<&mut Vec<PendingTransition<Self>>>,
@ -102,13 +111,11 @@ impl ResourceState for TextureState {
continue;
}
// TODO: Can't satisfy clippy here unless we modify
// `hal::image::SubresourceRange` in gfx to use
// `std::ops::RangeBounds`.
// `TextureSelector` to use `std::ops::RangeBounds`.
#[allow(clippy::range_plus_one)]
let pending = PendingTransition {
id,
selector: hal::image::SubresourceRange {
aspects: hal::format::Aspects::empty(),
selector: TextureSelector {
levels: level..level + 1,
layers: range.clone(),
},
@ -136,9 +143,43 @@ impl ResourceState for TextureState {
Ok(())
}
fn prepend(
&mut self,
id: Valid<Self::Id>,
selector: Self::Selector,
usage: Self::Usage,
) -> Result<(), PendingTransition<Self>> {
assert!(self.mips.len() >= selector.levels.end as usize);
for (mip_id, mip) in self.mips[selector.levels.start as usize..selector.levels.end as usize]
.iter_mut()
.enumerate()
{
let level = selector.levels.start + mip_id as hal::image::Level;
let layers = mip.isolate(&selector.layers, Unit::new(usage));
for &mut (ref range, ref mut unit) in layers {
match unit.first {
Some(old) if old != usage => {
return Err(PendingTransition {
id,
selector: TextureSelector {
levels: level..level + 1,
layers: range.clone(),
},
usage: old..usage,
});
}
_ => {
unit.first = Some(usage);
}
}
}
}
Ok(())
}
fn merge(
&mut self,
id: Self::Id,
id: Valid<Self::Id>,
other: &Self,
mut output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
@ -185,13 +226,11 @@ impl ResourceState for TextureState {
}
} else {
// TODO: Can't satisfy clippy here unless we modify
// `hal::image::SubresourceRange` in gfx to use
// `std::ops::RangeBounds`.
// `TextureSelector` to use `std::ops::RangeBounds`.
#[allow(clippy::range_plus_one)]
let pending = PendingTransition {
id,
selector: hal::image::SubresourceRange {
aspects: hal::format::Aspects::empty(),
selector: TextureSelector {
levels: level..level + 1,
layers: layers.clone(),
},
@ -237,7 +276,6 @@ mod test {
//TODO: change() tests
use super::*;
use crate::id::Id;
use hal::{format::Aspects, image::SubresourceRange};
#[test]
fn query() {
@ -250,8 +288,7 @@ mod test {
]));
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
ts.query(TextureSelector {
levels: 1..2,
layers: 2..5,
}),
@ -259,8 +296,7 @@ mod test {
Some(TextureUse::SAMPLED),
);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
ts.query(TextureSelector {
levels: 0..2,
layers: 2..5,
}),
@ -268,8 +304,7 @@ mod test {
Some(TextureUse::SAMPLED),
);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
ts.query(TextureSelector {
levels: 1..2,
layers: 1..5,
}),
@ -277,8 +312,7 @@ mod test {
Some(TextureUse::SAMPLED),
);
assert_eq!(
ts.query(SubresourceRange {
aspects: Aspects::COLOR,
ts.query(TextureSelector {
levels: 1..2,
layers: 4..6,
}),
@ -289,7 +323,7 @@ mod test {
#[test]
fn merge() {
let id = Id::default();
let id = Id::dummy();
let mut ts1 = TextureState::default();
ts1.mips.push(PlaneStates::from_slice(&[(
1..3,
@ -307,7 +341,7 @@ mod test {
Unit::new(TextureUse::COPY_SRC),
)]));
assert_eq!(
ts1.merge(Id::default(), &ts2, None),
ts1.merge(Id::dummy(), &ts2, None),
Ok(()),
"failed to extend a compatible state"
);
@ -322,11 +356,10 @@ mod test {
ts2.mips[0] = PlaneStates::from_slice(&[(1..2, Unit::new(TextureUse::COPY_DST))]);
assert_eq!(
ts1.clone().merge(Id::default(), &ts2, None),
ts1.clone().merge(Id::dummy(), &ts2, None),
Err(PendingTransition {
id,
selector: SubresourceRange {
aspects: Aspects::empty(),
selector: TextureSelector {
levels: 0..1,
layers: 1..2,
},
@ -346,14 +379,13 @@ mod test {
},
),
]);
ts1.merge(Id::default(), &ts2, Some(&mut list)).unwrap();
ts1.merge(Id::dummy(), &ts2, Some(&mut list)).unwrap();
assert_eq!(
&list,
&[
PendingTransition {
id,
selector: SubresourceRange {
aspects: Aspects::empty(),
selector: TextureSelector {
levels: 0..1,
layers: 1..2,
},
@ -361,8 +393,7 @@ mod test {
},
PendingTransition {
id,
selector: SubresourceRange {
aspects: Aspects::empty(),
selector: TextureSelector {
levels: 0..1,
layers: 2..3,
},
@ -398,7 +429,7 @@ mod test {
last: TextureUse::COPY_SRC,
},
)]);
ts1.merge(Id::default(), &ts2, Some(&mut list)).unwrap();
ts1.merge(Id::dummy(), &ts2, Some(&mut list)).unwrap();
assert_eq!(&list, &[], "unexpected replacing transition");
list.clear();
@ -409,13 +440,12 @@ mod test {
last: TextureUse::COPY_DST,
},
)]);
ts1.merge(Id::default(), &ts2, Some(&mut list)).unwrap();
ts1.merge(Id::dummy(), &ts2, Some(&mut list)).unwrap();
assert_eq!(
&list,
&[PendingTransition {
id,
selector: SubresourceRange {
aspects: Aspects::empty(),
selector: TextureSelector {
levels: 0..1,
layers: 2..3,
},

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,10 +1,7 @@
[package]
name = "wgpu-types"
version = "0.5.0"
authors = [
"Dzmitry Malyshau <kvark@mozilla.com>",
"Joshua Groves <josh@joshgroves.com>",
]
version = "0.6.0"
authors = ["wgpu developers"]
edition = "2018"
description = "WebGPU types"
homepage = "https://github.com/gfx-rs/wgpu"
@ -21,4 +18,3 @@ replay = ["serde"]
[dependencies]
bitflags = "1.0"
serde = { version = "1.0", features = ["serde_derive"], optional = true }
peek-poke = { version = "0.2", optional = true }

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -17,15 +17,14 @@ default = []
[dependencies.wgc]
path = "../wgpu/wgpu-core"
package = "wgpu-core"
version = "0.5"
features = ["trace"]
features = ["replay", "trace", "serial-pass"]
[dependencies.wgt]
path = "../wgpu/wgpu-types"
package = "wgpu-types"
version = "0.5"
features = ["trace"]
[dependencies]
bincode = "1"
log = "0.4"
parking_lot = { version = "0.11" }
parking_lot = "0.11"
serde = "1"

Просмотреть файл

@ -8,13 +8,17 @@ autogen_warning = """/* DO NOT MODIFY THIS MANUALLY! This file was generated usi
* 2. Run `rustup run nightly cbindgen toolkit/library/rust/ --lockfile Cargo.lock --crate wgpu_bindings -o dom/webgpu/ffi/wgpu_ffi_generated.h`
*/
struct WGPUByteBuf;
typedef uint64_t WGPUNonZeroU64;
typedef uint64_t WGPUOption_BufferSize;
typedef uint32_t WGPUOption_NonZeroU32;
typedef uint8_t WGPUOption_NonZeroU8;
typedef uint64_t WGPUOption_AdapterId;
typedef uint64_t WGPUOption_BufferId;
typedef uint64_t WGPUOption_PipelineLayoutId;
typedef uint64_t WGPUOption_SamplerId;
typedef uint64_t WGPUOption_SurfaceId;
typedef uint64_t WGPUOption_TextureViewId;
typedef char WGPUNonExhaustive[0];
"""
include_version = true
braces = "SameLine"
@ -25,13 +29,16 @@ style = "tag"
[export]
prefix = "WGPU"
include = ["TextureComponentType", "TextureViewDimension"]
exclude = ["NonExhaustive", "Option_AdapterId", "Option_BufferId", "Option_SamplerId", "Option_SurfaceId", "Option_TextureViewId"]
exclude = [
"Option_AdapterId", "Option_BufferId", "Option_PipelineLayoutId", "Option_SamplerId", "Option_SurfaceId", "Option_TextureViewId",
"Option_BufferSize", "Option_NonZeroU32", "Option_NonZeroU8",
]
[export.rename]
"BufferDescriptor_RawString" = "BufferDescriptor"
"CommandBufferDescriptor_RawString" = "CommandBufferDescriptor"
"CommandEncoderDescriptor_RawString" = "CommandEncoderDescriptor"
"TextureDescriptor_RawString" = "TextureDescriptor"
"TextureViewDescriptor_RawString" = "TextureViewDescriptor"
"SamplerDescriptor_RawString" = "SamplerDescriptor"
[parse]

Просмотреть файл

@ -0,0 +1,943 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{cow_label, ByteBuf, CommandEncoderAction, DeviceAction, RawString, TextureAction};
use wgc::{hub::IdentityManager, id};
use wgt::Backend;
pub use wgc::command::{compute_ffi::*, render_ffi::*};
use parking_lot::Mutex;
use std::{
borrow::Cow,
mem,
num::{NonZeroU32, NonZeroU8},
ptr, slice,
};
fn make_byte_buf<T: serde::Serialize>(data: &T) -> ByteBuf {
let vec = bincode::serialize(data).unwrap();
let bb = ByteBuf {
data: vec.as_ptr(),
len: vec.len(),
capacity: vec.capacity(),
};
mem::forget(vec);
bb
}
#[repr(C)]
pub struct ShaderModuleDescriptor {
spirv_words: *const u32,
spirv_words_length: usize,
wgsl_chars: RawString,
}
#[repr(C)]
pub struct ProgrammableStageDescriptor {
module: id::ShaderModuleId,
entry_point: RawString,
}
impl ProgrammableStageDescriptor {
fn to_wgpu(&self) -> wgc::pipeline::ProgrammableStageDescriptor {
wgc::pipeline::ProgrammableStageDescriptor {
module: self.module,
entry_point: cow_label(&self.entry_point).unwrap(),
}
}
}
#[repr(C)]
pub struct ComputePipelineDescriptor {
label: RawString,
layout: Option<id::PipelineLayoutId>,
compute_stage: ProgrammableStageDescriptor,
}
#[repr(C)]
pub struct VertexBufferDescriptor {
stride: wgt::BufferAddress,
step_mode: wgt::InputStepMode,
attributes: *const wgt::VertexAttributeDescriptor,
attributes_length: usize,
}
#[repr(C)]
pub struct VertexStateDescriptor {
index_format: wgt::IndexFormat,
vertex_buffers: *const VertexBufferDescriptor,
vertex_buffers_length: usize,
}
#[repr(C)]
pub struct RenderPipelineDescriptor<'a> {
label: RawString,
layout: Option<id::PipelineLayoutId>,
vertex_stage: &'a ProgrammableStageDescriptor,
fragment_stage: Option<&'a ProgrammableStageDescriptor>,
primitive_topology: wgt::PrimitiveTopology,
rasterization_state: Option<&'a wgt::RasterizationStateDescriptor>,
color_states: *const wgt::ColorStateDescriptor,
color_states_length: usize,
depth_stencil_state: Option<&'a wgt::DepthStencilStateDescriptor>,
vertex_state: VertexStateDescriptor,
sample_count: u32,
sample_mask: u32,
alpha_to_coverage_enabled: bool,
}
#[repr(C)]
pub enum RawBindingType {
UniformBuffer,
StorageBuffer,
ReadonlyStorageBuffer,
Sampler,
ComparisonSampler,
SampledTexture,
ReadonlyStorageTexture,
WriteonlyStorageTexture,
}
#[repr(C)]
pub struct BindGroupLayoutEntry<'a> {
binding: u32,
visibility: wgt::ShaderStage,
ty: RawBindingType,
has_dynamic_offset: bool,
min_binding_size: Option<wgt::BufferSize>,
view_dimension: Option<&'a wgt::TextureViewDimension>,
texture_component_type: Option<&'a wgt::TextureComponentType>,
multisampled: bool,
storage_texture_format: Option<&'a wgt::TextureFormat>,
}
#[repr(C)]
pub struct BindGroupLayoutDescriptor<'a> {
label: RawString,
entries: *const BindGroupLayoutEntry<'a>,
entries_length: usize,
}
#[repr(C)]
#[derive(Debug)]
pub struct BindGroupEntry {
binding: u32,
buffer: Option<id::BufferId>,
offset: wgt::BufferAddress,
size: Option<wgt::BufferSize>,
sampler: Option<id::SamplerId>,
texture_view: Option<id::TextureViewId>,
}
#[repr(C)]
pub struct BindGroupDescriptor {
label: RawString,
layout: id::BindGroupLayoutId,
entries: *const BindGroupEntry,
entries_length: usize,
}
#[repr(C)]
pub struct PipelineLayoutDescriptor {
label: RawString,
bind_group_layouts: *const id::BindGroupLayoutId,
bind_group_layouts_length: usize,
}
#[repr(C)]
pub struct SamplerDescriptor<'a> {
label: RawString,
address_modes: [wgt::AddressMode; 3],
mag_filter: wgt::FilterMode,
min_filter: wgt::FilterMode,
mipmap_filter: wgt::FilterMode,
lod_min_clamp: f32,
lod_max_clamp: f32,
compare: Option<&'a wgt::CompareFunction>,
anisotropy_clamp: Option<NonZeroU8>,
}
#[repr(C)]
pub struct TextureViewDescriptor<'a> {
label: RawString,
format: Option<&'a wgt::TextureFormat>,
dimension: Option<&'a wgt::TextureViewDimension>,
aspect: wgt::TextureAspect,
base_mip_level: u32,
level_count: Option<NonZeroU32>,
base_array_layer: u32,
array_layer_count: Option<NonZeroU32>,
}
#[derive(Debug, Default)]
struct IdentityHub {
adapters: IdentityManager,
devices: IdentityManager,
buffers: IdentityManager,
command_buffers: IdentityManager,
render_bundles: IdentityManager,
bind_group_layouts: IdentityManager,
pipeline_layouts: IdentityManager,
bind_groups: IdentityManager,
shader_modules: IdentityManager,
compute_pipelines: IdentityManager,
render_pipelines: IdentityManager,
textures: IdentityManager,
texture_views: IdentityManager,
samplers: IdentityManager,
}
#[derive(Debug, Default)]
struct Identities {
surfaces: IdentityManager,
vulkan: IdentityHub,
#[cfg(any(target_os = "ios", target_os = "macos"))]
metal: IdentityHub,
#[cfg(windows)]
dx12: IdentityHub,
}
impl Identities {
fn select(&mut self, backend: Backend) -> &mut IdentityHub {
match backend {
Backend::Vulkan => &mut self.vulkan,
#[cfg(any(target_os = "ios", target_os = "macos"))]
Backend::Metal => &mut self.metal,
#[cfg(windows)]
Backend::Dx12 => &mut self.dx12,
_ => panic!("Unexpected backend: {:?}", backend),
}
}
}
#[derive(Debug)]
pub struct Client {
identities: Mutex<Identities>,
}
#[repr(C)]
#[derive(Debug)]
pub struct Infrastructure {
pub client: *mut Client,
pub error: *const u8,
}
#[no_mangle]
pub extern "C" fn wgpu_client_new() -> Infrastructure {
log::info!("Initializing WGPU client");
let client = Box::new(Client {
identities: Mutex::new(Identities::default()),
});
Infrastructure {
client: Box::into_raw(client),
error: ptr::null(),
}
}
/// # Safety
///
/// This function is unsafe because improper use may lead to memory
/// problems. For example, a double-free may occur if the function is called
/// twice on the same raw pointer.
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_delete(client: *mut Client) {
log::info!("Terminating WGPU client");
let _client = Box::from_raw(client);
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `id_length` elements.
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_make_adapter_ids(
client: &Client,
ids: *mut id::AdapterId,
id_length: usize,
) -> usize {
let mut identities = client.identities.lock();
assert_ne!(id_length, 0);
let mut ids = slice::from_raw_parts_mut(ids, id_length).iter_mut();
*ids.next().unwrap() = identities.vulkan.adapters.alloc(Backend::Vulkan);
#[cfg(any(target_os = "ios", target_os = "macos"))]
{
*ids.next().unwrap() = identities.metal.adapters.alloc(Backend::Metal);
}
#[cfg(windows)]
{
*ids.next().unwrap() = identities.dx12.adapters.alloc(Backend::Dx12);
}
id_length - ids.len()
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_adapter_id(client: &Client, id: id::AdapterId) {
client
.identities
.lock()
.select(id.backend())
.adapters
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_device_id(
client: &Client,
adapter_id: id::AdapterId,
) -> id::DeviceId {
let backend = adapter_id.backend();
client
.identities
.lock()
.select(backend)
.devices
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_device_id(client: &Client, id: id::DeviceId) {
client
.identities
.lock()
.select(id.backend())
.devices
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_buffer_id(
client: &Client,
device_id: id::DeviceId,
) -> id::BufferId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.buffers
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_create_buffer(
client: &Client,
device_id: id::DeviceId,
desc: &wgt::BufferDescriptor<RawString>,
bb: &mut ByteBuf,
) -> id::BufferId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.buffers
.alloc(backend);
let action = DeviceAction::CreateBuffer(id, desc.map_label(cow_label));
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_buffer_id(client: &Client, id: id::BufferId) {
client
.identities
.lock()
.select(id.backend())
.buffers
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_create_texture(
client: &Client,
device_id: id::DeviceId,
desc: &wgt::TextureDescriptor<RawString>,
bb: &mut ByteBuf,
) -> id::TextureId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.textures
.alloc(backend);
let action = DeviceAction::CreateTexture(id, desc.map_label(cow_label));
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_texture_id(client: &Client, id: id::TextureId) {
client
.identities
.lock()
.select(id.backend())
.textures
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_create_texture_view(
client: &Client,
device_id: id::DeviceId,
desc: &TextureViewDescriptor,
bb: &mut ByteBuf,
) -> id::TextureViewId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.texture_views
.alloc(backend);
let wgpu_desc = wgc::resource::TextureViewDescriptor {
label: cow_label(&desc.label),
format: desc.format.cloned(),
dimension: desc.dimension.cloned(),
aspect: desc.aspect,
base_mip_level: desc.base_mip_level,
level_count: desc.level_count,
base_array_layer: desc.base_array_layer,
array_layer_count: desc.array_layer_count,
};
let action = TextureAction::CreateView(id, wgpu_desc);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_texture_view_id(client: &Client, id: id::TextureViewId) {
client
.identities
.lock()
.select(id.backend())
.texture_views
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_create_sampler(
client: &Client,
device_id: id::DeviceId,
desc: &SamplerDescriptor,
bb: &mut ByteBuf,
) -> id::SamplerId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.samplers
.alloc(backend);
let wgpu_desc = wgc::resource::SamplerDescriptor {
label: cow_label(&desc.label),
address_modes: desc.address_modes,
mag_filter: desc.mag_filter,
min_filter: desc.min_filter,
mipmap_filter: desc.mipmap_filter,
lod_min_clamp: desc.lod_min_clamp,
lod_max_clamp: desc.lod_max_clamp,
compare: desc.compare.cloned(),
anisotropy_clamp: desc.anisotropy_clamp,
border_color: None,
};
let action = DeviceAction::CreateSampler(id, wgpu_desc);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_sampler_id(client: &Client, id: id::SamplerId) {
client
.identities
.lock()
.select(id.backend())
.samplers
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_encoder_id(
client: &Client,
device_id: id::DeviceId,
) -> id::CommandEncoderId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.command_buffers
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_create_command_encoder(
client: &Client,
device_id: id::DeviceId,
desc: &wgt::CommandEncoderDescriptor<RawString>,
bb: &mut ByteBuf,
) -> id::CommandEncoderId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.command_buffers
.alloc(backend);
let action = DeviceAction::CreateCommandEncoder(id, desc.map_label(cow_label));
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_encoder_id(client: &Client, id: id::CommandEncoderId) {
client
.identities
.lock()
.select(id.backend())
.command_buffers
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass(
encoder_id: id::CommandEncoderId,
_desc: Option<&wgc::command::ComputePassDescriptor>,
) -> *mut wgc::command::ComputePass {
let pass = wgc::command::ComputePass::new(encoder_id);
Box::into_raw(Box::new(pass))
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_finish(
pass: *mut wgc::command::ComputePass,
output: &mut ByteBuf,
) {
let command = Box::from_raw(pass).into_command();
*output = make_byte_buf(&command);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_destroy(pass: *mut wgc::command::ComputePass) {
let _ = Box::from_raw(pass);
}
#[repr(C)]
pub struct RenderPassDescriptor {
pub color_attachments: *const wgc::command::ColorAttachmentDescriptor,
pub color_attachments_length: usize,
pub depth_stencil_attachment: *const wgc::command::DepthStencilAttachmentDescriptor,
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId,
desc: &RenderPassDescriptor,
) -> *mut wgc::command::RenderPass {
let pass = wgc::command::RenderPass::new(
encoder_id,
wgc::command::RenderPassDescriptor {
color_attachments: Cow::Borrowed(slice::from_raw_parts(
desc.color_attachments,
desc.color_attachments_length,
)),
depth_stencil_attachment: desc.depth_stencil_attachment.as_ref(),
},
);
Box::into_raw(Box::new(pass))
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_finish(
pass: *mut wgc::command::RenderPass,
output: &mut ByteBuf,
) {
let command = Box::from_raw(pass).into_command();
*output = make_byte_buf(&command);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_destroy(pass: *mut wgc::command::RenderPass) {
let _ = Box::from_raw(pass);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_create_bind_group_layout(
client: &Client,
device_id: id::DeviceId,
desc: &BindGroupLayoutDescriptor,
bb: &mut ByteBuf,
) -> id::BindGroupLayoutId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.bind_group_layouts
.alloc(backend);
let mut entries = Vec::with_capacity(desc.entries_length);
for entry in slice::from_raw_parts(desc.entries, desc.entries_length) {
entries.push(wgt::BindGroupLayoutEntry {
binding: entry.binding,
visibility: entry.visibility,
count: None,
ty: match entry.ty {
RawBindingType::UniformBuffer => wgt::BindingType::UniformBuffer {
dynamic: entry.has_dynamic_offset,
min_binding_size: entry.min_binding_size,
},
RawBindingType::StorageBuffer => wgt::BindingType::StorageBuffer {
dynamic: entry.has_dynamic_offset,
min_binding_size: entry.min_binding_size,
readonly: false,
},
RawBindingType::ReadonlyStorageBuffer => wgt::BindingType::StorageBuffer {
dynamic: entry.has_dynamic_offset,
min_binding_size: entry.min_binding_size,
readonly: true,
},
RawBindingType::Sampler => wgt::BindingType::Sampler { comparison: false },
RawBindingType::ComparisonSampler => wgt::BindingType::Sampler { comparison: true },
RawBindingType::SampledTexture => wgt::BindingType::SampledTexture {
dimension: *entry.view_dimension.unwrap(),
component_type: *entry.texture_component_type.unwrap(),
multisampled: entry.multisampled,
},
RawBindingType::ReadonlyStorageTexture => wgt::BindingType::StorageTexture {
dimension: *entry.view_dimension.unwrap(),
format: *entry.storage_texture_format.unwrap(),
readonly: true,
},
RawBindingType::WriteonlyStorageTexture => wgt::BindingType::StorageTexture {
dimension: *entry.view_dimension.unwrap(),
format: *entry.storage_texture_format.unwrap(),
readonly: false,
},
},
});
}
let wgpu_desc = wgc::binding_model::BindGroupLayoutDescriptor {
label: cow_label(&desc.label),
entries: Cow::Owned(entries),
};
let action = DeviceAction::CreateBindGroupLayout(id, wgpu_desc);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_bind_group_layout_id(
client: &Client,
id: id::BindGroupLayoutId,
) {
client
.identities
.lock()
.select(id.backend())
.bind_group_layouts
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_create_pipeline_layout(
client: &Client,
device_id: id::DeviceId,
desc: &PipelineLayoutDescriptor,
bb: &mut ByteBuf,
) -> id::PipelineLayoutId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.pipeline_layouts
.alloc(backend);
let wgpu_desc = wgc::binding_model::PipelineLayoutDescriptor {
label: cow_label(&desc.label),
bind_group_layouts: Cow::Borrowed(slice::from_raw_parts(
desc.bind_group_layouts,
desc.bind_group_layouts_length,
)),
push_constant_ranges: Cow::Borrowed(&[]),
};
let action = DeviceAction::CreatePipelineLayout(id, wgpu_desc);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_pipeline_layout_id(client: &Client, id: id::PipelineLayoutId) {
client
.identities
.lock()
.select(id.backend())
.pipeline_layouts
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_create_bind_group(
client: &Client,
device_id: id::DeviceId,
desc: &BindGroupDescriptor,
bb: &mut ByteBuf,
) -> id::BindGroupId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.bind_groups
.alloc(backend);
let mut entries = Vec::with_capacity(desc.entries_length);
for entry in slice::from_raw_parts(desc.entries, desc.entries_length) {
entries.push(wgc::binding_model::BindGroupEntry {
binding: entry.binding,
resource: if let Some(id) = entry.buffer {
wgc::binding_model::BindingResource::Buffer(wgc::binding_model::BufferBinding {
buffer_id: id,
offset: entry.offset,
size: entry.size,
})
} else if let Some(id) = entry.sampler {
wgc::binding_model::BindingResource::Sampler(id)
} else if let Some(id) = entry.texture_view {
wgc::binding_model::BindingResource::TextureView(id)
} else {
panic!("Unexpected binding entry {:?}", entry);
},
});
}
let wgpu_desc = wgc::binding_model::BindGroupDescriptor {
label: cow_label(&desc.label),
layout: desc.layout,
entries: Cow::Owned(entries),
};
let action = DeviceAction::CreateBindGroup(id, wgpu_desc);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_bind_group_id(client: &Client, id: id::BindGroupId) {
client
.identities
.lock()
.select(id.backend())
.bind_groups
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_create_shader_module(
client: &Client,
device_id: id::DeviceId,
desc: &ShaderModuleDescriptor,
bb: &mut ByteBuf,
) -> id::ShaderModuleId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.shader_modules
.alloc(backend);
assert!(!desc.spirv_words.is_null());
let data = Cow::Borrowed(slice::from_raw_parts(
desc.spirv_words,
desc.spirv_words_length,
));
let action = DeviceAction::CreateShaderModule(id, data);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_shader_module_id(client: &Client, id: id::ShaderModuleId) {
client
.identities
.lock()
.select(id.backend())
.shader_modules
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_create_compute_pipeline(
client: &Client,
device_id: id::DeviceId,
desc: &ComputePipelineDescriptor,
bb: &mut ByteBuf,
) -> id::ComputePipelineId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.compute_pipelines
.alloc(backend);
let wgpu_desc = wgc::pipeline::ComputePipelineDescriptor {
label: cow_label(&desc.label),
layout: desc.layout,
compute_stage: desc.compute_stage.to_wgpu(),
};
let action = DeviceAction::CreateComputePipeline(id, wgpu_desc);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_compute_pipeline_id(client: &Client, id: id::ComputePipelineId) {
client
.identities
.lock()
.select(id.backend())
.compute_pipelines
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_create_render_pipeline(
client: &Client,
device_id: id::DeviceId,
desc: &RenderPipelineDescriptor,
bb: &mut ByteBuf,
) -> id::RenderPipelineId {
let backend = device_id.backend();
let id = client
.identities
.lock()
.select(backend)
.render_pipelines
.alloc(backend);
let wgpu_desc = wgc::pipeline::RenderPipelineDescriptor {
label: cow_label(&desc.label),
layout: desc.layout,
vertex_stage: desc.vertex_stage.to_wgpu(),
fragment_stage: desc
.fragment_stage
.map(ProgrammableStageDescriptor::to_wgpu),
rasterization_state: desc.rasterization_state.cloned(),
primitive_topology: desc.primitive_topology,
color_states: Cow::Borrowed(slice::from_raw_parts(
desc.color_states,
desc.color_states_length,
)),
depth_stencil_state: desc.depth_stencil_state.cloned(),
vertex_state: wgc::pipeline::VertexStateDescriptor {
index_format: desc.vertex_state.index_format,
vertex_buffers: {
let vbufs = slice::from_raw_parts(
desc.vertex_state.vertex_buffers,
desc.vertex_state.vertex_buffers_length,
);
let owned = vbufs
.iter()
.map(|vb| wgc::pipeline::VertexBufferDescriptor {
stride: vb.stride,
step_mode: vb.step_mode,
attributes: Cow::Borrowed(if vb.attributes.is_null() {
&[]
} else {
slice::from_raw_parts(vb.attributes, vb.attributes_length)
}),
})
.collect();
Cow::Owned(owned)
},
},
sample_count: desc.sample_count,
sample_mask: desc.sample_mask,
alpha_to_coverage_enabled: desc.alpha_to_coverage_enabled,
};
let action = DeviceAction::CreateRenderPipeline(id, wgpu_desc);
*bb = make_byte_buf(&action);
id
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_render_pipeline_id(client: &Client, id: id::RenderPipelineId) {
client
.identities
.lock()
.select(id.backend())
.render_pipelines
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_buffer(
src: id::BufferId,
src_offset: wgt::BufferAddress,
dst: id::BufferId,
dst_offset: wgt::BufferAddress,
size: wgt::BufferAddress,
bb: &mut ByteBuf,
) {
let action = CommandEncoderAction::CopyBufferToBuffer {
src,
src_offset,
dst,
dst_offset,
size,
};
*bb = make_byte_buf(&action);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_buffer(
src: wgc::command::TextureCopyView,
dst: wgc::command::BufferCopyView,
size: wgt::Extent3d,
bb: &mut ByteBuf,
) {
let action = CommandEncoderAction::CopyTextureToBuffer { src, dst, size };
*bb = make_byte_buf(&action);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_copy_buffer_to_texture(
src: wgc::command::BufferCopyView,
dst: wgc::command::TextureCopyView,
size: wgt::Extent3d,
bb: &mut ByteBuf,
) {
let action = CommandEncoderAction::CopyBufferToTexture { src, dst, size };
*bb = make_byte_buf(&action);
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_copy_texture_to_texture(
src: wgc::command::TextureCopyView,
dst: wgc::command::TextureCopyView,
size: wgt::Extent3d,
bb: &mut ByteBuf,
) {
let action = CommandEncoderAction::CopyTextureToTexture { src, dst, size };
*bb = make_byte_buf(&action);
}

Просмотреть файл

@ -39,6 +39,7 @@ pub struct IdentityRecyclerFactory {
free_bind_group_layout: extern "C" fn(id::BindGroupLayoutId, FactoryParam),
free_bind_group: extern "C" fn(id::BindGroupId, FactoryParam),
free_command_buffer: extern "C" fn(id::CommandBufferId, FactoryParam),
free_render_bundle: extern "C" fn(id::RenderBundleId, FactoryParam),
free_render_pipeline: extern "C" fn(id::RenderPipelineId, FactoryParam),
free_compute_pipeline: extern "C" fn(id::ComputePipelineId, FactoryParam),
free_buffer: extern "C" fn(id::BufferId, FactoryParam),
@ -128,6 +129,16 @@ impl wgc::hub::IdentityHandlerFactory<id::CommandBufferId> for IdentityRecyclerF
}
}
}
impl wgc::hub::IdentityHandlerFactory<id::RenderBundleId> for IdentityRecyclerFactory {
type Filter = IdentityRecycler<id::RenderBundleId>;
fn spawn(&self, _min_index: u32) -> Self::Filter {
IdentityRecycler {
fun: self.free_render_bundle,
param: self.param,
kind: "render_bundle",
}
}
}
impl wgc::hub::IdentityHandlerFactory<id::RenderPipelineId> for IdentityRecyclerFactory {
type Filter = IdentityRecycler<id::RenderPipelineId>;
fn spawn(&self, _min_index: u32) -> Self::Filter {

Просмотреть файл

@ -2,456 +2,79 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use wgc::{hub::IdentityManager, id};
use wgt::Backend;
use wgc::id;
pub use wgc::command::{compute_ffi::*, render_ffi::*};
use parking_lot::Mutex;
use std::{ptr, slice};
pub mod client;
pub mod identity;
pub mod server;
// In WebIDL the "whole size" semantics is zero.
// Use this function to convert one into another.
#[no_mangle]
pub extern "C" fn make_buffer_size(raw_size: u64) -> wgt::BufferSize {
if raw_size != 0 {
wgt::BufferSize(raw_size)
pub use wgc::device::trace::Command as CommandEncoderAction;
use std::{borrow::Cow, slice};
type RawString = *const std::os::raw::c_char;
//TODO: figure out why 'a and 'b have to be different here
//TODO: remove this
fn cow_label<'a, 'b>(raw: &'a RawString) -> Option<Cow<'b, str>> {
if raw.is_null() {
None
} else {
wgt::BufferSize::WHOLE
let cstr = unsafe { std::ffi::CStr::from_ptr(*raw) };
cstr.to_str().ok().map(Cow::Borrowed)
}
}
#[derive(Debug, Default)]
struct IdentityHub {
adapters: IdentityManager,
devices: IdentityManager,
buffers: IdentityManager,
command_buffers: IdentityManager,
bind_group_layouts: IdentityManager,
pipeline_layouts: IdentityManager,
bind_groups: IdentityManager,
shader_modules: IdentityManager,
compute_pipelines: IdentityManager,
render_pipelines: IdentityManager,
textures: IdentityManager,
texture_views: IdentityManager,
samplers: IdentityManager,
}
#[derive(Debug, Default)]
struct Identities {
surfaces: IdentityManager,
vulkan: IdentityHub,
#[cfg(any(target_os = "ios", target_os = "macos"))]
metal: IdentityHub,
#[cfg(windows)]
dx12: IdentityHub,
}
impl Identities {
fn select(&mut self, backend: Backend) -> &mut IdentityHub {
match backend {
Backend::Vulkan => &mut self.vulkan,
#[cfg(any(target_os = "ios", target_os = "macos"))]
Backend::Metal => &mut self.metal,
#[cfg(windows)]
Backend::Dx12 => &mut self.dx12,
_ => panic!("Unexpected backend: {:?}", backend),
}
}
}
#[derive(Debug)]
pub struct Client {
identities: Mutex<Identities>,
}
#[repr(C)]
#[derive(Debug)]
pub struct Infrastructure {
pub client: *mut Client,
pub error: *const u8,
pub struct ByteBuf {
data: *const u8,
len: usize,
capacity: usize,
}
#[no_mangle]
pub extern "C" fn wgpu_client_new() -> Infrastructure {
log::info!("Initializing WGPU client");
let client = Box::new(Client {
identities: Mutex::new(Identities::default()),
});
Infrastructure {
client: Box::into_raw(client),
error: ptr::null(),
impl ByteBuf {
unsafe fn as_slice(&self) -> &[u8] {
slice::from_raw_parts(self.data, self.len)
}
}
/// # Safety
///
/// This function is unsafe because improper use may lead to memory
/// problems. For example, a double-free may occur if the function is called
/// twice on the same raw pointer.
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_delete(client: *mut Client) {
log::info!("Terminating WGPU client");
let _client = Box::from_raw(client);
#[derive(serde::Serialize, serde::Deserialize)]
enum DeviceAction<'a> {
CreateBuffer(id::BufferId, wgc::resource::BufferDescriptor<'a>),
CreateTexture(id::TextureId, wgc::resource::TextureDescriptor<'a>),
CreateSampler(id::SamplerId, wgc::resource::SamplerDescriptor<'a>),
CreateBindGroupLayout(
id::BindGroupLayoutId,
wgc::binding_model::BindGroupLayoutDescriptor<'a>,
),
CreatePipelineLayout(
id::PipelineLayoutId,
wgc::binding_model::PipelineLayoutDescriptor<'a>,
),
CreateBindGroup(id::BindGroupId, wgc::binding_model::BindGroupDescriptor<'a>),
CreateShaderModule(id::ShaderModuleId, Cow<'a, [u32]>),
CreateComputePipeline(
id::ComputePipelineId,
wgc::pipeline::ComputePipelineDescriptor<'a>,
),
CreateRenderPipeline(
id::RenderPipelineId,
wgc::pipeline::RenderPipelineDescriptor<'a>,
),
CreateRenderBundle(
id::RenderBundleId,
wgc::command::RenderBundleEncoderDescriptor<'a>,
wgc::command::BasePass<wgc::command::RenderCommand>,
),
CreateCommandEncoder(
id::CommandEncoderId,
wgt::CommandEncoderDescriptor<wgc::Label<'a>>,
),
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `id_length` elements.
#[no_mangle]
pub unsafe extern "C" fn wgpu_client_make_adapter_ids(
client: &Client,
ids: *mut id::AdapterId,
id_length: usize,
) -> usize {
let mut identities = client.identities.lock();
assert_ne!(id_length, 0);
let mut ids = slice::from_raw_parts_mut(ids, id_length).iter_mut();
*ids.next().unwrap() = identities.vulkan.adapters.alloc(Backend::Vulkan);
#[cfg(any(target_os = "ios", target_os = "macos"))]
{
*ids.next().unwrap() = identities.metal.adapters.alloc(Backend::Metal);
}
#[cfg(windows)]
{
*ids.next().unwrap() = identities.dx12.adapters.alloc(Backend::Dx12);
}
id_length - ids.len()
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_adapter_id(client: &Client, id: id::AdapterId) {
client
.identities
.lock()
.select(id.backend())
.adapters
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_device_id(
client: &Client,
adapter_id: id::AdapterId,
) -> id::DeviceId {
let backend = adapter_id.backend();
client
.identities
.lock()
.select(backend)
.devices
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_device_id(client: &Client, id: id::DeviceId) {
client
.identities
.lock()
.select(id.backend())
.devices
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_buffer_id(
client: &Client,
device_id: id::DeviceId,
) -> id::BufferId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.buffers
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_buffer_id(client: &Client, id: id::BufferId) {
client
.identities
.lock()
.select(id.backend())
.buffers
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_texture_id(
client: &Client,
device_id: id::DeviceId,
) -> id::TextureId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.textures
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_texture_id(client: &Client, id: id::TextureId) {
client
.identities
.lock()
.select(id.backend())
.textures
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_texture_view_id(
client: &Client,
device_id: id::DeviceId,
) -> id::TextureViewId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.texture_views
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_texture_view_id(client: &Client, id: id::TextureViewId) {
client
.identities
.lock()
.select(id.backend())
.texture_views
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_sampler_id(
client: &Client,
device_id: id::DeviceId,
) -> id::SamplerId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.samplers
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_sampler_id(client: &Client, id: id::SamplerId) {
client
.identities
.lock()
.select(id.backend())
.samplers
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_encoder_id(
client: &Client,
device_id: id::DeviceId,
) -> id::CommandEncoderId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.command_buffers
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_encoder_id(client: &Client, id: id::CommandEncoderId) {
client
.identities
.lock()
.select(id.backend())
.command_buffers
.free(id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_compute_pass(
encoder_id: id::CommandEncoderId,
_desc: Option<&wgc::command::ComputePassDescriptor>,
) -> wgc::command::RawPass {
wgc::command::RawPass::new_compute(encoder_id)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_compute_pass_destroy(mut pass: wgc::command::RawPass) {
let _ = pass.invalidate();
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_command_encoder_begin_render_pass(
encoder_id: id::CommandEncoderId,
desc: &wgc::command::RenderPassDescriptor,
) -> wgc::command::RawPass {
wgc::command::RawPass::new_render(encoder_id, desc)
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_render_pass_destroy(mut pass: wgc::command::RawPass) {
let _ = pass.invalidate();
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_bind_group_layout_id(
client: &Client,
device_id: id::DeviceId,
) -> id::BindGroupLayoutId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.bind_group_layouts
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_bind_group_layout_id(
client: &Client,
id: id::BindGroupLayoutId,
) {
client
.identities
.lock()
.select(id.backend())
.bind_group_layouts
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_pipeline_layout_id(
client: &Client,
device_id: id::DeviceId,
) -> id::PipelineLayoutId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.pipeline_layouts
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_pipeline_layout_id(client: &Client, id: id::PipelineLayoutId) {
client
.identities
.lock()
.select(id.backend())
.pipeline_layouts
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_bind_group_id(
client: &Client,
device_id: id::DeviceId,
) -> id::BindGroupId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.bind_groups
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_bind_group_id(client: &Client, id: id::BindGroupId) {
client
.identities
.lock()
.select(id.backend())
.bind_groups
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_shader_module_id(
client: &Client,
device_id: id::DeviceId,
) -> id::ShaderModuleId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.shader_modules
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_shader_module_id(client: &Client, id: id::ShaderModuleId) {
client
.identities
.lock()
.select(id.backend())
.shader_modules
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_compute_pipeline_id(
client: &Client,
device_id: id::DeviceId,
) -> id::ComputePipelineId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.compute_pipelines
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_compute_pipeline_id(client: &Client, id: id::ComputePipelineId) {
client
.identities
.lock()
.select(id.backend())
.compute_pipelines
.free(id)
}
#[no_mangle]
pub extern "C" fn wgpu_client_make_render_pipeline_id(
client: &Client,
device_id: id::DeviceId,
) -> id::RenderPipelineId {
let backend = device_id.backend();
client
.identities
.lock()
.select(backend)
.render_pipelines
.alloc(backend)
}
#[no_mangle]
pub extern "C" fn wgpu_client_kill_render_pipeline_id(client: &Client, id: id::RenderPipelineId) {
client
.identities
.lock()
.select(id.backend())
.render_pipelines
.free(id)
#[derive(serde::Serialize, serde::Deserialize)]
enum TextureAction<'a> {
CreateView(id::TextureViewId, wgc::resource::TextureViewDescriptor<'a>),
}

Просмотреть файл

@ -2,15 +2,17 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::identity::IdentityRecyclerFactory;
use crate::{
cow_label, identity::IdentityRecyclerFactory, ByteBuf, CommandEncoderAction, DeviceAction,
RawString, TextureAction,
};
use wgc::{gfx_select, id};
use std::{marker::PhantomData, mem, slice};
use std::slice;
// hide wgc's global in private
pub struct Global(wgc::hub::Global<IdentityRecyclerFactory>);
pub type RawString = *const std::os::raw::c_char;
impl std::ops::Deref for Global {
type Target = wgc::hub::Global<IdentityRecyclerFactory>;
@ -19,95 +21,14 @@ impl std::ops::Deref for Global {
}
}
#[repr(C)]
pub enum RawBindingType {
UniformBuffer,
StorageBuffer,
ReadonlyStorageBuffer,
Sampler,
ComparisonSampler,
SampledTexture,
ReadonlyStorageTexture,
WriteonlyStorageTexture,
}
#[repr(transparent)]
#[derive(Clone, Copy)]
pub struct RawEnumOption<T>(u32, PhantomData<T>);
impl<T: Copy> From<Option<T>> for RawEnumOption<T> {
fn from(option: Option<T>) -> Self {
debug_assert_eq!(mem::size_of::<T>(), 4);
let value = match option {
Some(ref v) => unsafe { *mem::transmute::<*const T, *const u32>(v) },
None => !0,
};
RawEnumOption(value, PhantomData)
}
}
impl<T: Copy> RawEnumOption<T> {
fn unwrap(self) -> T {
assert_ne!(self.0, !0);
unsafe { *mem::transmute::<*const u32, *const T>(&self.0) }
}
}
#[repr(C)]
pub struct BindGroupLayoutEntry {
pub binding: u32,
pub visibility: wgt::ShaderStage,
pub ty: RawBindingType,
pub has_dynamic_offset: bool,
pub view_dimension: RawEnumOption<wgt::TextureViewDimension>,
pub texture_component_type: RawEnumOption<wgt::TextureComponentType>,
pub multisampled: bool,
pub storage_texture_format: RawEnumOption<wgt::TextureFormat>,
}
#[repr(C)]
pub struct BindGroupLayoutDescriptor {
pub label: RawString,
pub entries: *const BindGroupLayoutEntry,
pub entries_length: usize,
}
#[repr(C)]
#[derive(Debug)]
pub struct BindGroupEntry {
pub binding: u32,
pub buffer: Option<id::BufferId>,
pub offset: wgt::BufferAddress,
pub size: wgt::BufferSize,
pub sampler: Option<id::SamplerId>,
pub texture_view: Option<id::TextureViewId>,
}
#[repr(C)]
pub struct BindGroupDescriptor {
pub label: RawString,
pub layout: id::BindGroupLayoutId,
pub entries: *const BindGroupEntry,
pub entries_length: usize,
}
#[repr(C)]
pub struct SamplerDescriptor<'a> {
pub label: RawString,
pub address_modes: [wgt::AddressMode; 3],
pub mag_filter: wgt::FilterMode,
pub min_filter: wgt::FilterMode,
pub mipmap_filter: wgt::FilterMode,
pub lod_min_clamp: f32,
pub lod_max_clamp: f32,
pub compare: Option<&'a wgt::CompareFunction>,
pub anisotropy_clamp: u8,
}
#[no_mangle]
pub extern "C" fn wgpu_server_new(factory: IdentityRecyclerFactory) -> *mut Global {
log::info!("Initializing WGPU server");
let global = Global(wgc::hub::Global::new("wgpu", factory));
let global = Global(wgc::hub::Global::new(
"wgpu",
factory,
wgt::BackendBit::PRIMARY,
));
Box::into_raw(Box::new(global))
}
@ -124,7 +45,7 @@ pub unsafe extern "C" fn wgpu_server_delete(global: *mut Global) {
#[no_mangle]
pub extern "C" fn wgpu_server_poll_all_devices(global: &Global, force_wait: bool) {
global.poll_all_devices(force_wait);
global.poll_all_devices(force_wait).unwrap();
}
/// Request an adapter according to the specified options.
@ -144,16 +65,23 @@ pub unsafe extern "C" fn wgpu_server_instance_request_adapter(
id_length: usize,
) -> i8 {
let ids = slice::from_raw_parts(ids, id_length);
match global.pick_adapter(
match global.request_adapter(
desc,
wgt::UnsafeExtensions::disallow(),
wgc::instance::AdapterInputs::IdSet(ids, |i| i.backend()),
) {
Some(id) => ids.iter().position(|&i| i == id).unwrap() as i8,
None => -1,
Ok(id) => ids.iter().position(|&i| i == id).unwrap() as i8,
Err(e) => {
log::warn!("request_adapter: {:?}", e);
-1
}
}
}
#[no_mangle]
pub extern "C" fn wgpu_server_fill_default_limits(limits: &mut wgt::Limits) {
*limits = wgt::Limits::default();
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_adapter_request_device(
global: &Global,
@ -165,17 +93,18 @@ pub unsafe extern "C" fn wgpu_server_adapter_request_device(
let trace_path = trace_string
.as_ref()
.map(|string| std::path::Path::new(string.as_str()));
gfx_select!(self_id => global.adapter_request_device(self_id, desc, trace_path, new_id));
gfx_select!(self_id => global.adapter_request_device(self_id, desc, trace_path, new_id))
.unwrap();
}
#[no_mangle]
pub extern "C" fn wgpu_server_adapter_destroy(global: &Global, adapter_id: id::AdapterId) {
pub extern "C" fn wgpu_server_adapter_drop(global: &Global, adapter_id: id::AdapterId) {
gfx_select!(adapter_id => global.adapter_destroy(adapter_id))
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_destroy(global: &Global, self_id: id::DeviceId) {
gfx_select!(self_id => global.device_destroy(self_id))
pub extern "C" fn wgpu_server_device_drop(global: &Global, self_id: id::DeviceId) {
gfx_select!(self_id => global.device_drop(self_id))
}
#[no_mangle]
@ -185,7 +114,8 @@ pub extern "C" fn wgpu_server_device_create_buffer(
desc: &wgt::BufferDescriptor<RawString>,
new_id: id::BufferId,
) {
gfx_select!(self_id => global.device_create_buffer(self_id, desc, new_id));
let desc = desc.map_label(cow_label);
gfx_select!(self_id => global.device_create_buffer(self_id, &desc, new_id)).unwrap();
}
#[no_mangle]
@ -200,7 +130,8 @@ pub extern "C" fn wgpu_server_buffer_map(
buffer_id,
start .. start + size,
operation
));
))
.unwrap();
}
/// # Safety
@ -212,47 +143,204 @@ pub unsafe extern "C" fn wgpu_server_buffer_get_mapped_range(
global: &Global,
buffer_id: id::BufferId,
start: wgt::BufferAddress,
size: wgt::BufferAddress,
size: Option<wgt::BufferSize>,
) -> *mut u8 {
gfx_select!(buffer_id => global.buffer_get_mapped_range(
buffer_id,
start,
wgt::BufferSize(size)
size
))
.unwrap()
}
#[no_mangle]
pub extern "C" fn wgpu_server_buffer_unmap(global: &Global, buffer_id: id::BufferId) {
gfx_select!(buffer_id => global.buffer_unmap(buffer_id));
gfx_select!(buffer_id => global.buffer_unmap(buffer_id)).unwrap();
}
#[no_mangle]
pub extern "C" fn wgpu_server_buffer_destroy(global: &Global, self_id: id::BufferId) {
gfx_select!(self_id => global.buffer_destroy(self_id));
pub extern "C" fn wgpu_server_buffer_drop(global: &Global, self_id: id::BufferId) {
gfx_select!(self_id => global.buffer_drop(self_id, false));
}
trait GlobalExt {
fn device_action<B: wgc::hub::GfxBackend>(&self, self_id: id::DeviceId, action: DeviceAction);
fn texture_action<B: wgc::hub::GfxBackend>(
&self,
self_id: id::TextureId,
action: TextureAction,
);
fn command_encoder_action<B: wgc::hub::GfxBackend>(
&self,
self_id: id::CommandEncoderId,
action: CommandEncoderAction,
);
}
impl GlobalExt for Global {
fn device_action<B: wgc::hub::GfxBackend>(&self, self_id: id::DeviceId, action: DeviceAction) {
let implicit_ids = None; //TODO
match action {
DeviceAction::CreateBuffer(id, desc) => {
self.device_create_buffer::<B>(self_id, &desc, id).unwrap();
}
DeviceAction::CreateTexture(id, desc) => {
self.device_create_texture::<B>(self_id, &desc, id).unwrap();
}
DeviceAction::CreateSampler(id, desc) => {
self.device_create_sampler::<B>(self_id, &desc, id).unwrap();
}
DeviceAction::CreateBindGroupLayout(id, desc) => {
self.device_create_bind_group_layout::<B>(self_id, &desc, id)
.unwrap();
}
DeviceAction::CreatePipelineLayout(id, desc) => {
self.device_create_pipeline_layout::<B>(self_id, &desc, id)
.unwrap();
}
DeviceAction::CreateBindGroup(id, desc) => {
self.device_create_bind_group::<B>(self_id, &desc, id)
.unwrap();
}
DeviceAction::CreateShaderModule(id, spirv) => {
self.device_create_shader_module::<B>(
self_id,
wgc::pipeline::ShaderModuleSource::SpirV(spirv),
id,
)
.unwrap();
}
DeviceAction::CreateComputePipeline(id, desc) => {
self.device_create_compute_pipeline::<B>(self_id, &desc, id, implicit_ids)
.unwrap();
}
DeviceAction::CreateRenderPipeline(id, desc) => {
self.device_create_render_pipeline::<B>(self_id, &desc, id, implicit_ids)
.unwrap();
}
DeviceAction::CreateRenderBundle(_id, desc, _base) => {
wgc::command::RenderBundleEncoder::new(&desc, self_id, None).unwrap();
}
DeviceAction::CreateCommandEncoder(id, desc) => {
self.device_create_command_encoder::<B>(self_id, &desc, id)
.unwrap();
}
}
}
fn texture_action<B: wgc::hub::GfxBackend>(
&self,
self_id: id::TextureId,
action: TextureAction,
) {
match action {
TextureAction::CreateView(id, desc) => {
self.texture_create_view::<B>(self_id, &desc, id).unwrap();
}
}
}
fn command_encoder_action<B: wgc::hub::GfxBackend>(
&self,
self_id: id::CommandEncoderId,
action: CommandEncoderAction,
) {
match action {
CommandEncoderAction::CopyBufferToBuffer {
src,
src_offset,
dst,
dst_offset,
size,
} => self
.command_encoder_copy_buffer_to_buffer::<B>(
self_id, src, src_offset, dst, dst_offset, size,
)
.unwrap(),
CommandEncoderAction::CopyBufferToTexture { src, dst, size } => self
.command_encoder_copy_buffer_to_texture::<B>(self_id, &src, &dst, &size)
.unwrap(),
CommandEncoderAction::CopyTextureToBuffer { src, dst, size } => self
.command_encoder_copy_texture_to_buffer::<B>(self_id, &src, &dst, &size)
.unwrap(),
CommandEncoderAction::CopyTextureToTexture { src, dst, size } => self
.command_encoder_copy_texture_to_texture::<B>(self_id, &src, &dst, &size)
.unwrap(),
CommandEncoderAction::RunComputePass { base } => {
self.command_encoder_run_compute_pass_impl::<B>(self_id, base.as_ref())
.unwrap();
}
CommandEncoderAction::RunRenderPass {
base,
target_colors,
target_depth_stencil,
} => {
self.command_encoder_run_render_pass_impl::<B>(
self_id,
base.as_ref(),
&target_colors,
target_depth_stencil.as_ref(),
)
.unwrap();
}
}
}
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_device_action(
global: &Global,
self_id: id::DeviceId,
byte_buf: &ByteBuf,
) {
let action = bincode::deserialize(byte_buf.as_slice()).unwrap();
gfx_select!(self_id => global.device_action(self_id, action));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_texture_action(
global: &Global,
self_id: id::TextureId,
byte_buf: &ByteBuf,
) {
let action = bincode::deserialize(byte_buf.as_slice()).unwrap();
gfx_select!(self_id => global.texture_action(self_id, action));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_command_encoder_action(
global: &Global,
self_id: id::CommandEncoderId,
byte_buf: &ByteBuf,
) {
let action = bincode::deserialize(byte_buf.as_slice()).unwrap();
gfx_select!(self_id => global.command_encoder_action(self_id, action));
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_create_encoder(
global: &Global,
self_id: id::DeviceId,
desc: &wgt::CommandEncoderDescriptor,
desc: &wgt::CommandEncoderDescriptor<RawString>,
new_id: id::CommandEncoderId,
) {
gfx_select!(self_id => global.device_create_command_encoder(self_id, &desc, new_id));
let desc = desc.map_label(cow_label);
gfx_select!(self_id => global.device_create_command_encoder(self_id, &desc, new_id)).unwrap();
}
#[no_mangle]
pub extern "C" fn wgpu_server_encoder_finish(
global: &Global,
self_id: id::CommandEncoderId,
desc: &wgt::CommandBufferDescriptor,
desc: &wgt::CommandBufferDescriptor<RawString>,
) {
gfx_select!(self_id => global.command_encoder_finish(self_id, desc));
let desc = desc.map_label(cow_label);
gfx_select!(self_id => global.command_encoder_finish(self_id, &desc)).unwrap();
}
#[no_mangle]
pub extern "C" fn wgpu_server_encoder_destroy(global: &Global, self_id: id::CommandEncoderId) {
gfx_select!(self_id => global.command_encoder_destroy(self_id));
pub extern "C" fn wgpu_server_encoder_drop(global: &Global, self_id: id::CommandEncoderId) {
gfx_select!(self_id => global.command_encoder_drop(self_id));
}
/// # Safety
@ -260,24 +348,8 @@ pub extern "C" fn wgpu_server_encoder_destroy(global: &Global, self_id: id::Comm
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `byte_length` elements.
#[no_mangle]
pub extern "C" fn wgpu_server_command_buffer_destroy(
global: &Global,
self_id: id::CommandBufferId,
) {
gfx_select!(self_id => global.command_buffer_destroy(self_id));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_encoder_copy_buffer_to_buffer(
global: &Global,
self_id: id::CommandEncoderId,
source_id: id::BufferId,
source_offset: wgt::BufferAddress,
destination_id: id::BufferId,
destination_offset: wgt::BufferAddress,
size: wgt::BufferAddress,
) {
gfx_select!(self_id => global.command_encoder_copy_buffer_to_buffer(self_id, source_id, source_offset, destination_id, destination_offset, size));
pub extern "C" fn wgpu_server_command_buffer_drop(global: &Global, self_id: id::CommandBufferId) {
gfx_select!(self_id => global.command_buffer_drop(self_id));
}
#[no_mangle]
@ -288,61 +360,7 @@ pub unsafe extern "C" fn wgpu_server_encoder_copy_texture_to_buffer(
destination: &wgc::command::BufferCopyView,
size: &wgt::Extent3d,
) {
gfx_select!(self_id => global.command_encoder_copy_texture_to_buffer(self_id, source, destination, size));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_encoder_copy_buffer_to_texture(
global: &Global,
self_id: id::CommandEncoderId,
source: &wgc::command::BufferCopyView,
destination: &wgc::command::TextureCopyView,
size: &wgt::Extent3d,
) {
gfx_select!(self_id => global.command_encoder_copy_buffer_to_texture(self_id, source, destination, size));
}
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_encoder_copy_texture_to_texture(
global: &Global,
self_id: id::CommandEncoderId,
source: &wgc::command::TextureCopyView,
destination: &wgc::command::TextureCopyView,
size: &wgt::Extent3d,
) {
gfx_select!(self_id => global.command_encoder_copy_texture_to_texture(self_id, source, destination, size));
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointers are
/// valid for `color_attachments_length` and `command_length` elements,
/// respectively.
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_encode_compute_pass(
global: &Global,
self_id: id::CommandEncoderId,
bytes: *const u8,
byte_length: usize,
) {
let raw_data = slice::from_raw_parts(bytes, byte_length);
gfx_select!(self_id => global.command_encoder_run_compute_pass(self_id, raw_data));
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointers are
/// valid for `color_attachments_length` and `command_length` elements,
/// respectively.
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_encode_render_pass(
global: &Global,
self_id: id::CommandEncoderId,
commands: *const u8,
command_length: usize,
) {
let raw_pass = slice::from_raw_parts(commands, command_length);
gfx_select!(self_id => global.command_encoder_run_render_pass(self_id, raw_pass));
gfx_select!(self_id => global.command_encoder_copy_texture_to_buffer(self_id, source, destination, size)).unwrap();
}
/// # Safety
@ -357,7 +375,7 @@ pub unsafe extern "C" fn wgpu_server_queue_submit(
command_buffer_id_length: usize,
) {
let command_buffers = slice::from_raw_parts(command_buffer_ids, command_buffer_id_length);
gfx_select!(self_id => global.queue_submit(self_id, command_buffers));
gfx_select!(self_id => global.queue_submit(self_id, command_buffers)).unwrap();
}
/// # Safety
@ -374,7 +392,8 @@ pub unsafe extern "C" fn wgpu_server_queue_write_buffer(
data_length: usize,
) {
let data = slice::from_raw_parts(data, data_length);
gfx_select!(self_id => global.queue_write_buffer(self_id, buffer_id, buffer_offset, data));
gfx_select!(self_id => global.queue_write_buffer(self_id, buffer_id, buffer_offset, data))
.unwrap();
}
/// # Safety
@ -392,248 +411,57 @@ pub unsafe extern "C" fn wgpu_server_queue_write_texture(
extent: &wgt::Extent3d,
) {
let data = slice::from_raw_parts(data, data_length);
gfx_select!(self_id => global.queue_write_texture(self_id, destination, data, layout, extent));
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `entries_length` elements.
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_device_create_bind_group_layout(
global: &Global,
self_id: id::DeviceId,
desc: &BindGroupLayoutDescriptor,
new_id: id::BindGroupLayoutId,
) {
let entries = slice::from_raw_parts(desc.entries, desc.entries_length);
let bindings = entries
.iter()
.map(|entry| wgt::BindGroupLayoutEntry {
binding: entry.binding,
visibility: entry.visibility,
ty: match entry.ty {
RawBindingType::UniformBuffer => wgt::BindingType::UniformBuffer {
dynamic: entry.has_dynamic_offset,
},
RawBindingType::StorageBuffer => wgt::BindingType::StorageBuffer {
dynamic: entry.has_dynamic_offset,
readonly: false,
},
RawBindingType::ReadonlyStorageBuffer => wgt::BindingType::StorageBuffer {
dynamic: entry.has_dynamic_offset,
readonly: true,
},
RawBindingType::Sampler => wgt::BindingType::Sampler { comparison: false },
RawBindingType::ComparisonSampler => wgt::BindingType::Sampler { comparison: true },
RawBindingType::SampledTexture => wgt::BindingType::SampledTexture {
dimension: entry.view_dimension.unwrap(),
component_type: entry.texture_component_type.unwrap(),
multisampled: entry.multisampled,
},
RawBindingType::ReadonlyStorageTexture => wgt::BindingType::StorageTexture {
dimension: entry.view_dimension.unwrap(),
component_type: entry.texture_component_type.unwrap(),
format: entry.storage_texture_format.unwrap(),
readonly: true,
},
RawBindingType::WriteonlyStorageTexture => wgt::BindingType::StorageTexture {
dimension: entry.view_dimension.unwrap(),
component_type: entry.texture_component_type.unwrap(),
format: entry.storage_texture_format.unwrap(),
readonly: false,
},
},
..Default::default()
})
.collect::<Vec<_>>();
let desc = wgt::BindGroupLayoutDescriptor {
label: None,
bindings: &bindings,
};
gfx_select!(self_id => global.device_create_bind_group_layout(self_id, &desc, new_id)).unwrap();
gfx_select!(self_id => global.queue_write_texture(self_id, destination, data, layout, extent))
.unwrap();
}
#[no_mangle]
pub extern "C" fn wgpu_server_bind_group_layout_destroy(
pub extern "C" fn wgpu_server_bind_group_layout_drop(
global: &Global,
self_id: id::BindGroupLayoutId,
) {
gfx_select!(self_id => global.bind_group_layout_destroy(self_id));
gfx_select!(self_id => global.bind_group_layout_drop(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_create_pipeline_layout(
global: &Global,
self_id: id::DeviceId,
desc: &wgc::binding_model::PipelineLayoutDescriptor,
new_id: id::PipelineLayoutId,
) {
gfx_select!(self_id => global.device_create_pipeline_layout(self_id, desc, new_id)).unwrap();
pub extern "C" fn wgpu_server_pipeline_layout_drop(global: &Global, self_id: id::PipelineLayoutId) {
gfx_select!(self_id => global.pipeline_layout_drop(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_pipeline_layout_destroy(
global: &Global,
self_id: id::PipelineLayoutId,
) {
gfx_select!(self_id => global.pipeline_layout_destroy(self_id));
}
/// # Safety
///
/// This function is unsafe as there is no guarantee that the given pointer is
/// valid for `entries_length` elements.
#[no_mangle]
pub unsafe extern "C" fn wgpu_server_device_create_bind_group(
global: &Global,
self_id: id::DeviceId,
desc: &BindGroupDescriptor,
new_id: id::BindGroupId,
) {
let entries = slice::from_raw_parts(desc.entries, desc.entries_length);
let bindings = entries
.iter()
.map(|entry| wgc::binding_model::BindGroupEntry {
binding: entry.binding,
resource: if let Some(id) = entry.buffer {
wgc::binding_model::BindingResource::Buffer(wgc::binding_model::BufferBinding {
buffer: id,
offset: entry.offset,
size: entry.size,
})
} else if let Some(id) = entry.sampler {
wgc::binding_model::BindingResource::Sampler(id)
} else if let Some(id) = entry.texture_view {
wgc::binding_model::BindingResource::TextureView(id)
} else {
panic!("Unrecognized binding entry: {:?}", entry);
},
})
.collect::<Vec<_>>();
let desc = wgc::binding_model::BindGroupDescriptor {
label: None,
layout: desc.layout,
bindings: &bindings,
};
gfx_select!(self_id => global.device_create_bind_group(self_id, &desc, new_id));
pub extern "C" fn wgpu_server_bind_group_drop(global: &Global, self_id: id::BindGroupId) {
gfx_select!(self_id => global.bind_group_drop(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_bind_group_destroy(global: &Global, self_id: id::BindGroupId) {
gfx_select!(self_id => global.bind_group_destroy(self_id));
pub extern "C" fn wgpu_server_shader_module_drop(global: &Global, self_id: id::ShaderModuleId) {
gfx_select!(self_id => global.shader_module_drop(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_create_shader_module(
global: &Global,
self_id: id::DeviceId,
desc: &wgc::pipeline::ShaderModuleDescriptor,
new_id: id::ShaderModuleId,
) {
gfx_select!(self_id => global.device_create_shader_module(self_id, desc, new_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_shader_module_destroy(global: &Global, self_id: id::ShaderModuleId) {
gfx_select!(self_id => global.shader_module_destroy(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_create_compute_pipeline(
global: &Global,
self_id: id::DeviceId,
desc: &wgc::pipeline::ComputePipelineDescriptor,
new_id: id::ComputePipelineId,
) {
gfx_select!(self_id => global.device_create_compute_pipeline(self_id, desc, new_id)).unwrap();
}
#[no_mangle]
pub extern "C" fn wgpu_server_compute_pipeline_destroy(
pub extern "C" fn wgpu_server_compute_pipeline_drop(
global: &Global,
self_id: id::ComputePipelineId,
) {
gfx_select!(self_id => global.compute_pipeline_destroy(self_id));
gfx_select!(self_id => global.compute_pipeline_drop(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_create_render_pipeline(
global: &Global,
self_id: id::DeviceId,
desc: &wgc::pipeline::RenderPipelineDescriptor,
new_id: id::RenderPipelineId,
) {
gfx_select!(self_id => global.device_create_render_pipeline(self_id, desc, new_id)).unwrap();
pub extern "C" fn wgpu_server_render_pipeline_drop(global: &Global, self_id: id::RenderPipelineId) {
gfx_select!(self_id => global.render_pipeline_drop(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_render_pipeline_destroy(
global: &Global,
self_id: id::RenderPipelineId,
) {
gfx_select!(self_id => global.render_pipeline_destroy(self_id));
pub extern "C" fn wgpu_server_texture_drop(global: &Global, self_id: id::TextureId) {
gfx_select!(self_id => global.texture_drop(self_id, false));
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_create_texture(
global: &Global,
self_id: id::DeviceId,
desc: &wgt::TextureDescriptor<RawString>,
new_id: id::TextureId,
) {
gfx_select!(self_id => global.device_create_texture(self_id, desc, new_id));
pub extern "C" fn wgpu_server_texture_view_drop(global: &Global, self_id: id::TextureViewId) {
gfx_select!(self_id => global.texture_view_drop(self_id)).unwrap();
}
#[no_mangle]
pub extern "C" fn wgpu_server_texture_create_view(
global: &Global,
self_id: id::TextureId,
desc: Option<&wgt::TextureViewDescriptor<RawString>>,
new_id: id::TextureViewId,
) {
gfx_select!(self_id => global.texture_create_view(self_id, desc, new_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_texture_destroy(global: &Global, self_id: id::TextureId) {
gfx_select!(self_id => global.texture_destroy(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_texture_view_destroy(global: &Global, self_id: id::TextureViewId) {
gfx_select!(self_id => global.texture_view_destroy(self_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_device_create_sampler(
global: &Global,
self_id: id::DeviceId,
desc: &SamplerDescriptor,
new_id: id::SamplerId,
) {
let desc = wgt::SamplerDescriptor {
label: desc.label,
address_mode_u: desc.address_modes[0],
address_mode_v: desc.address_modes[1],
address_mode_w: desc.address_modes[2],
mag_filter: desc.mag_filter,
min_filter: desc.min_filter,
mipmap_filter: desc.mipmap_filter,
lod_min_clamp: desc.lod_min_clamp,
lod_max_clamp: desc.lod_max_clamp,
compare: desc.compare.cloned(),
anisotropy_clamp: if desc.anisotropy_clamp > 1 {
Some(desc.anisotropy_clamp)
} else {
None
},
_non_exhaustive: unsafe { wgt::NonExhaustive::new() },
};
gfx_select!(self_id => global.device_create_sampler(self_id, &desc, new_id));
}
#[no_mangle]
pub extern "C" fn wgpu_server_sampler_destroy(global: &Global, self_id: id::SamplerId) {
gfx_select!(self_id => global.sampler_destroy(self_id));
pub extern "C" fn wgpu_server_sampler_drop(global: &Global, self_id: id::SamplerId) {
gfx_select!(self_id => global.sampler_drop(self_id));
}

Просмотреть файл

@ -14,7 +14,6 @@ namespace ffi {
#define WGPU_INLINE
#define WGPU_FUNC
#define WGPU_DESTRUCTOR_SAFE_FUNC
extern "C" {
#include "wgpu_ffi_generated.h"
@ -22,7 +21,6 @@ extern "C" {
#undef WGPU_INLINE
#undef WGPU_FUNC
#undef WGPU_DESTRUCTOR_SAFE_FUNC
} // namespace ffi
} // namespace webgpu

1
third_party/rust/atom/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"e002ed3dd38dc0551a851c4266742ed6d51b7173556c370b57d1b156b59c7350","LICENSE":"09e8a9bcec8067104652c168685ab0931e7868f9c8284b66f5ae6edae5f1130b","examples/fifo.rs":"f6a1091ecc3061c8c51a5906a93abb2f43853f23fbe56b3b36430ab0bece2e10","examples/simple.rs":"5590003f2775307d0d00ef6bcd2c009a011f71850033fca4ed7d2105e9a88b1c","readme.md":"a91b178c0b0fab0af36854d760e354808c36bbeda1bf11e77a8e02a5e4ad1a9d","src/lib.rs":"7a682b15762ad81e2cbc87add0d7538bc9627ddee5eb60af4d34b7276df0b974","tests/atom.rs":"d94cdd5a1bb9626b21642a4b2345927991e822b2623f1971f053c48e99979db8"},"package":"3c86699c3f02778ec07158376991c8f783dd1f2f95c579ffaf0738dc984b2fe2"}

175
third_party/rust/atom/LICENSE поставляемый
Просмотреть файл

@ -1,175 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.

83
third_party/rust/atom/examples/fifo.rs поставляемый
Просмотреть файл

@ -1,83 +0,0 @@
// Copyright 2015 Colin Sherratt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate atom;
use atom::*;
use std::mem;
use std::sync::{Arc, Barrier};
use std::thread;
const THREADS: usize = 100;
#[derive(Debug)]
struct Link {
next: AtomSetOnce<Box<Link>>,
}
impl Drop for Link {
fn drop(&mut self) {
// This is done to avoid a recusive drop of the List
while let Some(mut h) = self.next.atom().take() {
self.next = mem::replace(&mut h.next, AtomSetOnce::empty());
}
}
}
fn main() {
let b = Arc::new(Barrier::new(THREADS + 1));
let head = Arc::new(Link {
next: AtomSetOnce::empty(),
});
for _ in 0..THREADS {
let b = b.clone();
let head = head.clone();
thread::spawn(move || {
let mut hptr = &*head;
for _ in 0..10_000 {
let mut my_awesome_node = Box::new(Link {
next: AtomSetOnce::empty(),
});
loop {
while let Some(h) = hptr.next.get() {
hptr = h;
}
my_awesome_node = match hptr.next.set_if_none(my_awesome_node) {
Some(v) => v,
None => break,
};
}
}
b.wait();
});
}
b.wait();
let mut hptr = &*head;
let mut count = 0;
while let Some(h) = hptr.next.get() {
hptr = h;
count += 1;
}
println!(
"Using {} threads we wrote {} links at the same time!",
THREADS, count
);
}

33
third_party/rust/atom/examples/simple.rs поставляемый
Просмотреть файл

@ -1,33 +0,0 @@
extern crate atom;
use atom::*;
use std::sync::Arc;
use std::thread;
fn main() {
// Create an empty atom
let shared_atom = Arc::new(Atom::empty());
// set the value 75
shared_atom.swap(Box::new(75));
// Spawn a bunch of thread that will try and take the value
let threads: Vec<thread::JoinHandle<()>> = (0..8)
.map(|_| {
let shared_atom = shared_atom.clone();
thread::spawn(move || {
// Take the contents of the atom, only one will win the race
if let Some(v) = shared_atom.take() {
println!("I got it: {:?} :D", v);
} else {
println!("I did not get it :(");
}
})
})
.collect();
// join the threads
for t in threads {
t.join().unwrap();
}
}

101
third_party/rust/atom/readme.md поставляемый
Просмотреть файл

@ -1,101 +0,0 @@
Atom
====
[![Build Status](https://travis-ci.org/slide-rs/atom.svg?branch=master)](https://travis-ci.org/csherratt/atom)
[![Atom](http://meritbadge.herokuapp.com/atom)](https://crates.io/crates/atom)
`Atom` is a simple abstraction around Rust's `AtomicPtr`. It provides a simple, wait-free way to exchange
data between threads safely. `Atom` is built around the principle that an atomic swap can be used to
safely emulate Rust's ownership.
![store](https://raw.githubusercontent.com/csherratt/atom/master/.store.png)
Using [`store`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html#method.store) to set a shared
atomic pointer is unsafe in rust (or any language) because the contents of the pointer can be overwritten at any
point in time causing the contents of the pointer to be lost. This can cause your system to leak memory, and
if you are expecting that memory to do something useful (like wake a sleeping thread), you are in trouble.
![load](https://raw.githubusercontent.com/csherratt/atom/master/.load.png)
Similarly, [`load`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html#method.store)
is unsafe since there is no guarantee that that pointer will live for even a cycle after you have read it. Another
thread may modify the pointer, or free it. For `load` to be safe you need to have some outside contract to preserve
the correct ownership semantics.
![swap](https://raw.githubusercontent.com/csherratt/atom/master/.swap.png)
A [`swap`](https://doc.rust-lang.org/std/sync/atomic/struct.AtomicPtr.html#method.swap) is special as it allows
a reference to be exchanged without the risk of that pointer being freed, or stomped on. When a thread
swaps an `AtomicPtr` the old pointer ownership is moved to the caller, and the `AtomicPtr` takes ownership of the new
pointer.
Using `Atom`
------------
Add atom your `Cargo.toml`
```
[dependencies]
atom="*"
```
A short example:
```rust
extern crate atom;
use std::sync::Arc;
use std::thread;
use atom::*;
fn main() {
// Create an empty atom
let shared_atom = Arc::new(Atom::empty());
// set the value 75
shared_atom.swap(Box::new(75));
// Spawn a bunch of thread that will try and take the value
let threads: Vec<thread::JoinHandle<()>> = (0..8).map(|_| {
let shared_atom = shared_atom.clone();
thread::spawn(move || {
// Take the contents of the atom, only one will win the race
if let Some(v) = shared_atom.take() {
println!("I got it: {:?} :D", v);
} else {
println!("I did not get it :(");
}
})
}).collect();
// join the threads
for t in threads { t.join().unwrap(); }
```
The result will look something like this:
```
I did not get it :(
I got it: 75 :D
I did not get it :(
I did not get it :(
I did not get it :(
I did not get it :(
I did not get it :(
I did not get it :(
```
Using an `Atom` has some advantages over using a raw `AtomicPtr`. First, you don't need any
unsafe code in order to convert the `Box<T>` to and from a `Box` the library handles that for
you. Secondly, `Atom` implements `drop` so you won't accidentally leak a pointer when dropping
your data structure.
AtomSetOnce
-----------
This is an additional bit of abstraction around an Atom. Recall that I said `load` was unsafe
unless you have an additional restrictions. `AtomSetOnce` as the name indicates may only be
set once, and then it may never be unset. We know that if the `Atom` is set the pointer will be
valid for the lifetime of the `Atom`. This means we can implement `Deref` in a safe way.
Take a look at the `fifo` example to see how this can be used to write a lock-free linked list.

340
third_party/rust/atom/src/lib.rs поставляемый
Просмотреть файл

@ -1,340 +0,0 @@
// Copyright 2015 Colin Sherratt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::fmt::{self, Debug, Formatter};
use std::marker::PhantomData;
use std::mem;
use std::ops::Deref;
use std::ptr;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering;
use std::sync::Arc;
/// An Atom wraps an AtomicPtr, it allows for safe mutation of an atomic
/// into common Rust Types.
pub struct Atom<P>
where
P: IntoRawPtr + FromRawPtr,
{
inner: AtomicPtr<()>,
data: PhantomData<P>,
}
impl<P> Debug for Atom<P>
where
P: IntoRawPtr + FromRawPtr,
{
fn fmt(&self, f: &mut Formatter) -> Result<(), fmt::Error> {
write!(f, "atom({:?})", self.inner.load(Ordering::Relaxed))
}
}
impl<P> Atom<P>
where
P: IntoRawPtr + FromRawPtr,
{
/// Create a empty Atom
pub fn empty() -> Atom<P> {
Atom {
inner: AtomicPtr::new(ptr::null_mut()),
data: PhantomData,
}
}
/// Create a new Atomic from Pointer P
pub fn new(value: P) -> Atom<P> {
Atom {
inner: AtomicPtr::new(unsafe { value.into_raw() }),
data: PhantomData,
}
}
/// Swap a new value into the Atom, This will try multiple
/// times until it succeeds. The old value will be returned.
pub fn swap(&self, v: P) -> Option<P> {
let new = unsafe { v.into_raw() };
let old = self.inner.swap(new, Ordering::AcqRel);
if !old.is_null() {
Some(unsafe { FromRawPtr::from_raw(old) })
} else {
None
}
}
/// Take the value of the Atom replacing it with null pointer
/// Returning the contents. If the contents was a `null` pointer the
/// result will be `None`.
pub fn take(&self) -> Option<P> {
let old = self.inner.swap(ptr::null_mut(), Ordering::Acquire);
if !old.is_null() {
Some(unsafe { FromRawPtr::from_raw(old) })
} else {
None
}
}
/// This will do a `CAS` setting the value only if it is NULL
/// this will return `None` if the value was written,
/// otherwise a `Some(v)` will be returned, where the value was
/// the same value that you passed into this function
pub fn set_if_none(&self, v: P) -> Option<P> {
let new = unsafe { v.into_raw() };
let old = self.inner
.compare_and_swap(ptr::null_mut(), new, Ordering::Release);
if !old.is_null() {
Some(unsafe { FromRawPtr::from_raw(new) })
} else {
None
}
}
/// Take the current content, write it into P then do a CAS to extent this
/// Atom with the previous contents. This can be used to create a LIFO
///
/// Returns true if this set this migrated the Atom from null.
pub fn replace_and_set_next(&self, mut value: P) -> bool
where
P: GetNextMut<NextPtr = Option<P>>,
{
unsafe {
let next = value.get_next() as *mut Option<P>;
let raw = value.into_raw();
// Iff next was set to Some(P) we want to
// assert that it was droppeds
drop(ptr::read(next));
loop {
let pcurrent = self.inner.load(Ordering::Relaxed);
let current = if pcurrent.is_null() {
None
} else {
Some(FromRawPtr::from_raw(pcurrent))
};
ptr::write(next, current);
let last = self.inner.compare_and_swap(pcurrent, raw, Ordering::AcqRel);
if last == pcurrent {
return last.is_null();
}
}
}
}
/// Check to see if an atom is None
///
/// This only means that the contents was None when it was measured
pub fn is_none(&self) -> bool {
self.inner.load(Ordering::Relaxed).is_null()
}
}
impl<P> Drop for Atom<P>
where
P: IntoRawPtr + FromRawPtr,
{
fn drop(&mut self) {
unsafe {
let ptr = self.inner.load(Ordering::Relaxed);
if !ptr.is_null() {
let _: P = FromRawPtr::from_raw(ptr);
}
}
}
}
unsafe impl<P> Send for Atom<P>
where
P: IntoRawPtr + FromRawPtr,
{
}
unsafe impl<P> Sync for Atom<P>
where
P: IntoRawPtr + FromRawPtr,
{
}
/// Convert from into a raw pointer
pub trait IntoRawPtr {
unsafe fn into_raw(self) -> *mut ();
}
/// Convert from a raw ptr into a pointer
pub trait FromRawPtr {
unsafe fn from_raw(ptr: *mut ()) -> Self;
}
impl<T> IntoRawPtr for Box<T> {
#[inline]
unsafe fn into_raw(self) -> *mut () {
Box::into_raw(self) as *mut ()
}
}
impl<T> FromRawPtr for Box<T> {
#[inline]
unsafe fn from_raw(ptr: *mut ()) -> Box<T> {
Box::from_raw(ptr as *mut T)
}
}
impl<T> IntoRawPtr for Arc<T> {
#[inline]
unsafe fn into_raw(self) -> *mut () {
Arc::into_raw(self) as *mut T as *mut ()
}
}
impl<T> FromRawPtr for Arc<T> {
#[inline]
unsafe fn from_raw(ptr: *mut ()) -> Arc<T> {
Arc::from_raw(ptr as *const () as *const T)
}
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
unsafe fn copy_lifetime<'a, S: ?Sized, T: ?Sized + 'a>(_ptr: &'a S, ptr: &T) -> &'a T {
mem::transmute(ptr)
}
/// Transforms lifetime of the second pointer to match the first.
#[inline]
unsafe fn copy_mut_lifetime<'a, S: ?Sized, T: ?Sized + 'a>(_ptr: &'a S, ptr: &mut T) -> &'a mut T {
mem::transmute(ptr)
}
/// This is a restricted version of the Atom. It allows for only
/// `set_if_none` to be called.
///
/// `swap` and `take` can be used only with a mutable reference. Meaning
/// that AtomSetOnce is not usable as a
#[derive(Debug)]
pub struct AtomSetOnce<P>
where
P: IntoRawPtr + FromRawPtr,
{
inner: Atom<P>,
}
impl<P> AtomSetOnce<P>
where
P: IntoRawPtr + FromRawPtr,
{
/// Create an empty `AtomSetOnce`
pub fn empty() -> AtomSetOnce<P> {
AtomSetOnce {
inner: Atom::empty(),
}
}
/// Create a new `AtomSetOnce` from Pointer P
pub fn new(value: P) -> AtomSetOnce<P> {
AtomSetOnce {
inner: Atom::new(value),
}
}
/// This will do a `CAS` setting the value only if it is NULL
/// this will return `OK(())` if the value was written,
/// otherwise a `Err(P)` will be returned, where the value was
/// the same value that you passed into this function
pub fn set_if_none(&self, v: P) -> Option<P> {
self.inner.set_if_none(v)
}
/// Convert an `AtomSetOnce` into an `Atom`
pub fn into_atom(self) -> Atom<P> {
self.inner
}
/// Allow access to the atom if exclusive access is granted
pub fn atom(&mut self) -> &mut Atom<P> {
&mut self.inner
}
/// Check to see if an atom is None
///
/// This only means that the contents was None when it was measured
pub fn is_none(&self) -> bool {
self.inner.is_none()
}
}
impl<T, P> AtomSetOnce<P>
where
P: IntoRawPtr + FromRawPtr + Deref<Target = T>,
{
/// If the Atom is set, get the value
pub fn get<'a>(&'a self) -> Option<&'a T> {
let ptr = self.inner.inner.load(Ordering::Acquire);
if ptr.is_null() {
None
} else {
unsafe {
// This is safe since ptr cannot be changed once it is set
// which means that this is now a Arc or a Box.
let v: P = FromRawPtr::from_raw(ptr);
let out = copy_lifetime(self, &*v);
mem::forget(v);
Some(out)
}
}
}
}
impl<T> AtomSetOnce<Box<T>> {
/// If the Atom is set, get the value
pub fn get_mut<'a>(&'a mut self) -> Option<&'a mut T> {
let ptr = self.inner.inner.load(Ordering::Acquire);
if ptr.is_null() {
None
} else {
unsafe {
// This is safe since ptr cannot be changed once it is set
// which means that this is now a Arc or a Box.
let mut v: Box<T> = FromRawPtr::from_raw(ptr);
let out = copy_mut_lifetime(self, &mut *v);
mem::forget(v);
Some(out)
}
}
}
}
impl<T> AtomSetOnce<T>
where
T: Clone + IntoRawPtr + FromRawPtr,
{
/// Duplicate the inner pointer if it is set
pub fn dup<'a>(&self) -> Option<T> {
let ptr = self.inner.inner.load(Ordering::Acquire);
if ptr.is_null() {
None
} else {
unsafe {
// This is safe since ptr cannot be changed once it is set
// which means that this is now a Arc or a Box.
let v: T = FromRawPtr::from_raw(ptr);
let out = v.clone();
mem::forget(v);
Some(out)
}
}
}
}
/// This is a utility Trait that fetches the next ptr from
/// an object.
pub trait GetNextMut {
type NextPtr;
fn get_next(&mut self) -> &mut Self::NextPtr;
}

189
third_party/rust/atom/tests/atom.rs поставляемый
Просмотреть файл

@ -1,189 +0,0 @@
// Copyright 2015 Colin Sherratt
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate atom;
use atom::*;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::sync::*;
use std::thread;
#[test]
fn swap() {
let a = Atom::empty();
assert_eq!(a.swap(Box::new(1u8)), None);
assert_eq!(a.swap(Box::new(2u8)), Some(Box::new(1u8)));
assert_eq!(a.swap(Box::new(3u8)), Some(Box::new(2u8)));
}
#[test]
fn take() {
let a = Atom::new(Box::new(7u8));
assert_eq!(a.take(), Some(Box::new(7)));
assert_eq!(a.take(), None);
}
#[test]
fn set_if_none() {
let a = Atom::empty();
assert_eq!(a.set_if_none(Box::new(7u8)), None);
assert_eq!(a.set_if_none(Box::new(8u8)), Some(Box::new(8u8)));
}
#[derive(Clone)]
struct Canary(Arc<AtomicUsize>);
impl Drop for Canary {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
#[test]
fn ensure_drop() {
let v = Arc::new(AtomicUsize::new(0));
let a = Box::new(Canary(v.clone()));
let a = Atom::new(a);
assert_eq!(v.load(Ordering::SeqCst), 0);
drop(a);
assert_eq!(v.load(Ordering::SeqCst), 1);
}
#[test]
fn ensure_drop_arc() {
let v = Arc::new(AtomicUsize::new(0));
let a = Arc::new(Canary(v.clone()));
let a = Atom::new(a);
assert_eq!(v.load(Ordering::SeqCst), 0);
drop(a);
assert_eq!(v.load(Ordering::SeqCst), 1);
}
#[test]
fn ensure_send() {
let atom = Arc::new(Atom::empty());
let wait = Arc::new(Barrier::new(2));
let w = wait.clone();
let a = atom.clone();
thread::spawn(move || {
a.swap(Box::new(7u8));
w.wait();
});
wait.wait();
assert_eq!(atom.take(), Some(Box::new(7u8)));
}
#[test]
fn get() {
let atom = Arc::new(AtomSetOnce::empty());
assert_eq!(atom.get(), None);
assert_eq!(atom.set_if_none(Box::new(8u8)), None);
assert_eq!(atom.get(), Some(&8u8));
}
#[test]
fn get_arc() {
let atom = Arc::new(AtomSetOnce::empty());
assert_eq!(atom.get(), None);
assert_eq!(atom.set_if_none(Arc::new(8u8)), None);
assert_eq!(atom.get(), Some(&8u8));
let v = Arc::new(AtomicUsize::new(0));
let atom = Arc::new(AtomSetOnce::empty());
atom.get();
atom.set_if_none(Arc::new(Canary(v.clone())));
atom.get();
drop(atom);
assert_eq!(v.load(Ordering::SeqCst), 1);
}
#[derive(Debug)]
struct Link {
next: Option<Box<Link>>,
value: u32,
}
impl Link {
fn new(v: u32) -> Box<Link> {
Box::new(Link {
next: None,
value: v,
})
}
}
impl GetNextMut for Box<Link> {
type NextPtr = Option<Box<Link>>;
fn get_next(&mut self) -> &mut Option<Box<Link>> {
&mut self.next
}
}
#[test]
fn lifo() {
let atom = Atom::empty();
for i in 0..100 {
let x = atom.replace_and_set_next(Link::new(99 - i));
assert_eq!(x, i == 0);
}
let expected: Vec<u32> = (0..100).collect();
let mut found = Vec::new();
let mut chain = atom.take();
while let Some(v) = chain {
found.push(v.value);
chain = v.next;
}
assert_eq!(expected, found);
}
#[allow(dead_code)]
struct LinkCanary {
next: Option<Box<LinkCanary>>,
value: Canary,
}
impl LinkCanary {
fn new(v: Canary) -> Box<LinkCanary> {
Box::new(LinkCanary {
next: None,
value: v,
})
}
}
impl GetNextMut for Box<LinkCanary> {
type NextPtr = Option<Box<LinkCanary>>;
fn get_next(&mut self) -> &mut Option<Box<LinkCanary>> {
&mut self.next
}
}
#[test]
fn lifo_drop() {
let v = Arc::new(AtomicUsize::new(0));
let canary = Canary(v.clone());
let mut link = LinkCanary::new(canary.clone());
link.next = Some(LinkCanary::new(canary.clone()));
let atom = Atom::empty();
atom.replace_and_set_next(link);
assert_eq!(1, v.load(Ordering::SeqCst));
drop(atom);
assert_eq!(2, v.load(Ordering::SeqCst));
}

1
third_party/rust/bit-set/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"dc9c19dabc65b5388d4118617c6d340adeaac60ec37f1df3244fc5aab3a27799","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"49741b792be0800387a30bf6300d5ad4d306e15b63510301e377670489620f40","deploy-docs.sh":"7b66111b124c1c7e59cb84cf110d98b5cb783bd35a676e970d9b3035e55f7dfd","src/lib.rs":"51809e3f8799d712a740f5bd37b658fbda44a5c7e62bf33a69c255866afa61b1"},"package":"6e11e16035ea35e4e5997b393eacbf6f63983188f7a2ad25bfb13465f5ad59de"}

Просмотреть файл

@ -11,22 +11,22 @@
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "peek-poke"
version = "0.2.0"
authors = ["Dan Glastonbury <dan.glastonbury@gmail.com>"]
description = "A mechanism for serializing and deserializing data into/from byte buffers, for use in WebRender."
name = "bit-set"
version = "0.5.2"
authors = ["Alexis Beingessner <a.beingessner@gmail.com>"]
description = "A set of bits"
homepage = "https://github.com/contain-rs/bit-set"
documentation = "https://contain-rs.github.io/bit-set/bit_set"
readme = "README.md"
keywords = ["data-structures", "bitset"]
license = "MIT/Apache-2.0"
repository = "https://github.com/servo/webrender"
[dependencies.euclid]
version = "0.20.0"
optional = true
[dependencies.peek-poke-derive]
version = "0.2"
optional = true
repository = "https://github.com/contain-rs/bit-set"
[dependencies.bit-vec]
version = "0.6.1"
default-features = false
[dev-dependencies.rand]
version = "0.3"
[features]
default = ["derive"]
derive = ["peek-poke-derive"]
extras = ["derive", "euclid"]
default = ["std"]
std = ["bit-vec/std"]

201
third_party/rust/bit-set/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/bit-set/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
Copyright (c) 2016 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше