Bug 1900038 - Update `wgpu` to revision c7458638d14921c7562e4197ddeefa17be413587. r=webgpu-reviewers,supply-chain-reviewers,nical

Differential Revision: https://phabricator.services.mozilla.com/D212293
This commit is contained in:
Teodor Tanasoaia 2024-06-03 08:21:18 +00:00
Родитель 81095edb39
Коммит e6f7483570
88 изменённых файлов: 3305 добавлений и 1026 удалений

Просмотреть файл

@ -25,9 +25,9 @@ git = "https://github.com/franziskuskiefer/cose-rust"
rev = "43c22248d136c8b38fe42ea709d08da6355cf04b"
replace-with = "vendored-sources"
[source."git+https://github.com/gfx-rs/wgpu?rev=18b758e3889bdd6ffa769085de15e2b96a0c1eb5"]
[source."git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587"]
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
replace-with = "vendored-sources"
[source."git+https://github.com/glandium/mio?rev=9a2ef335c366044ffe73b1c4acabe50a1daefe05"]

26
Cargo.lock сгенерированный
Просмотреть файл

@ -1210,7 +1210,7 @@ dependencies = [
[[package]]
name = "d3d12"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=18b758e3889bdd6ffa769085de15e2b96a0c1eb5#18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
dependencies = [
"bitflags 2.5.0",
"libloading",
@ -3919,7 +3919,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=18b758e3889bdd6ffa769085de15e2b96a0c1eb5#18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
dependencies = [
"arrayvec",
"bit-set",
@ -5103,9 +5103,9 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.201"
version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c"
checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"
dependencies = [
"serde_derive",
]
@ -5131,9 +5131,9 @@ dependencies = [
[[package]]
name = "serde_derive"
version = "1.0.201"
version = "1.0.203"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865"
checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"
dependencies = [
"proc-macro2",
"quote",
@ -5698,18 +5698,18 @@ checksum = "aac81b6fd6beb5884b0cf3321b8117e6e5d47ecb6fc89f414cfdcca8b2fe2dd8"
[[package]]
name = "thiserror"
version = "1.0.59"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa"
checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.59"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"
checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
dependencies = [
"proc-macro2",
"quote",
@ -6638,7 +6638,7 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=18b758e3889bdd6ffa769085de15e2b96a0c1eb5#18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
dependencies = [
"arrayvec",
"bit-vec",
@ -6663,7 +6663,7 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=18b758e3889bdd6ffa769085de15e2b96a0c1eb5#18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
dependencies = [
"android_system_properties",
"arrayvec",
@ -6702,7 +6702,7 @@ dependencies = [
[[package]]
name = "wgpu-types"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=18b758e3889bdd6ffa769085de15e2b96a0c1eb5#18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
dependencies = [
"bitflags 2.5.0",
"js-sys",

Просмотреть файл

@ -17,7 +17,7 @@ default = []
[dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
# TODO: remove the replay feature on the next update containing https://github.com/gfx-rs/wgpu/pull/5182
features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"]
@ -26,37 +26,37 @@ features = ["serde", "replay", "trace", "strict_asserts", "wgsl", "api_log_info"
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
features = ["metal"]
# We want the wgpu-core Direct3D backends on Windows.
[target.'cfg(windows)'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
features = ["dx12"]
# We want the wgpu-core Vulkan backend on Linux and Windows.
[target.'cfg(any(windows, all(unix, not(any(target_os = "macos", target_os = "ios")))))'.dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
features = ["vulkan"]
[dependencies.wgt]
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
[dependencies.wgh]
package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
features = ["windows_rs", "oom_panic", "device_lost_panic", "internal_error_panic"]
[target.'cfg(windows)'.dependencies.d3d12]
git = "https://github.com/gfx-rs/wgpu"
rev = "18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
[target.'cfg(windows)'.dependencies]
winapi = "0.3"

Просмотреть файл

@ -20,11 +20,11 @@ origin:
# Human-readable identifier for this version/release
# Generally "version NNN", "tag SSS", "bookmark SSS"
release: 18b758e3889bdd6ffa769085de15e2b96a0c1eb5 (2024-05-17T20:01:43Z).
release: commit c7458638d14921c7562e4197ddeefa17be413587
# Revision to pull in
# Must be a long or short commit SHA (long preferred)
revision: 18b758e3889bdd6ffa769085de15e2b96a0c1eb5
revision: c7458638d14921c7562e4197ddeefa17be413587
license: ['MIT', 'Apache-2.0']

Просмотреть файл

@ -69,6 +69,7 @@ impl ProgrammableStageDescriptor {
entry_point: cow_label(&self.entry_point),
constants: Cow::Owned(constants),
zero_initialize_workgroup_memory: true,
vertex_pulling_transform: false,
}
}
}

Просмотреть файл

@ -959,14 +959,29 @@ pub fn replay_compute_pass<A: HalApi>(
global: &Global,
id: CommandEncoderId,
src_pass: &RecordedComputePass,
) -> Result<(), wgc::command::ComputePassError> {
let mut dst_pass = global.command_encoder_create_compute_pass::<A>(
mut error_buf: crate::error::ErrorBuffer,
) {
let (mut dst_pass, err) = global.command_encoder_create_compute_pass::<A>(
id,
&wgc::command::ComputePassDescriptor {
label: src_pass.base.label.as_ref().map(|s| s.as_str().into()),
timestamp_writes: src_pass.timestamp_writes.as_ref(),
},
);
if let Some(err) = err {
error_buf.init(err);
return;
}
if let Err(err) = replay_compute_pass_impl::<A>(global, src_pass, &mut dst_pass) {
error_buf.init(err);
}
}
fn replay_compute_pass_impl<A: HalApi>(
global: &Global,
src_pass: &RecordedComputePass,
dst_pass: &mut wgc::command::ComputePass<A>,
) -> Result<(), wgc::command::ComputePassError> {
let mut dynamic_offsets = src_pass.base.dynamic_offsets.as_slice();
let mut dynamic_offsets = |len| {
let offsets;
@ -987,55 +1002,51 @@ pub fn replay_compute_pass<A: HalApi>(
bind_group_id,
} => {
let offsets = dynamic_offsets(num_dynamic_offsets);
global.compute_pass_set_bind_group(&mut dst_pass, index, bind_group_id, offsets)?;
global.compute_pass_set_bind_group(dst_pass, index, bind_group_id, offsets)?;
}
ComputeCommand::SetPipeline(pipeline_id) => {
global.compute_pass_set_pipeline(&mut dst_pass, pipeline_id)?;
global.compute_pass_set_pipeline(dst_pass, pipeline_id)?;
}
ComputeCommand::Dispatch([x, y, z]) => {
global.compute_pass_dispatch_workgroups(&mut dst_pass, x, y, z);
global.compute_pass_dispatch_workgroups(dst_pass, x, y, z)?;
}
ComputeCommand::DispatchIndirect { buffer_id, offset } => {
global.compute_pass_dispatch_workgroups_indirect(
&mut dst_pass,
buffer_id,
offset,
)?;
global.compute_pass_dispatch_workgroups_indirect(dst_pass, buffer_id, offset)?;
}
ComputeCommand::PushDebugGroup { color, len } => {
let label = strings(len);
let label = std::str::from_utf8(label).unwrap();
global.compute_pass_push_debug_group(&mut dst_pass, label, color);
global.compute_pass_push_debug_group(dst_pass, label, color)?;
}
ComputeCommand::PopDebugGroup => {
global.compute_pass_pop_debug_group(&mut dst_pass);
global.compute_pass_pop_debug_group(dst_pass)?;
}
ComputeCommand::InsertDebugMarker { color, len } => {
let label = strings(len);
let label = std::str::from_utf8(label).unwrap();
global.compute_pass_insert_debug_marker(&mut dst_pass, label, color);
global.compute_pass_insert_debug_marker(dst_pass, label, color)?;
}
ComputeCommand::WriteTimestamp {
query_set_id,
query_index,
} => {
global.compute_pass_write_timestamp(&mut dst_pass, query_set_id, query_index)?;
global.compute_pass_write_timestamp(dst_pass, query_set_id, query_index)?;
}
ComputeCommand::BeginPipelineStatisticsQuery {
query_set_id,
query_index,
} => {
global.compute_pass_begin_pipeline_statistics_query(
&mut dst_pass,
dst_pass,
query_set_id,
query_index,
)?;
}
ComputeCommand::EndPipelineStatisticsQuery => {
global.compute_pass_end_pipeline_statistics_query(&mut dst_pass);
global.compute_pass_end_pipeline_statistics_query(dst_pass)?;
}
}
}
global.command_encoder_run_compute_pass(&dst_pass)
global.compute_pass_end(dst_pass)
}

Просмотреть файл

@ -952,13 +952,11 @@ impl Global {
base,
timestamp_writes,
} => {
if let Err(err) = self
.command_encoder_run_compute_pass_with_unresolved_commands::<A>(
self_id,
base.as_ref(),
timestamp_writes.as_ref(),
)
{
if let Err(err) = self.compute_pass_end_with_unresolved_commands::<A>(
self_id,
base,
timestamp_writes.as_ref(),
) {
error_buf.init(err);
}
}
@ -997,7 +995,7 @@ impl Global {
timestamp_writes,
occlusion_query_set_id,
} => {
if let Err(err) = self.command_encoder_run_render_pass_impl::<A>(
if let Err(err) = self.render_pass_end_impl::<A>(
self_id,
base.as_ref(),
&target_colors,
@ -1094,7 +1092,7 @@ pub unsafe extern "C" fn wgpu_server_compute_pass(
global: &Global,
encoder_id: id::CommandEncoderId,
byte_buf: &ByteBuf,
mut error_buf: ErrorBuffer,
error_buf: ErrorBuffer,
) {
let src_pass = bincode::deserialize(byte_buf.as_slice()).unwrap();
@ -1103,8 +1101,8 @@ pub unsafe extern "C" fn wgpu_server_compute_pass(
&self,
encoder_id: id::CommandEncoderId,
src_pass: &RecordedComputePass,
) -> Result<(), wgc::command::ComputePassError>
where
error_buf: ErrorBuffer,
) where
A: wgc::hal_api::HalApi;
}
impl ReplayComputePass for Global {
@ -1112,17 +1110,15 @@ pub unsafe extern "C" fn wgpu_server_compute_pass(
&self,
encoder_id: id::CommandEncoderId,
src_pass: &RecordedComputePass,
) -> Result<(), wgc::command::ComputePassError>
where
error_buf: ErrorBuffer,
) where
A: wgc::hal_api::HalApi,
{
crate::command::replay_compute_pass::<A>(self, encoder_id, src_pass)
crate::command::replay_compute_pass::<A>(self, encoder_id, src_pass, error_buf);
}
}
if let Err(err) = gfx_select!(encoder_id => global.replay_compute_pass(encoder_id, &src_pass)) {
error_buf.init(err);
}
gfx_select!(encoder_id => global.replay_compute_pass(encoder_id, &src_pass, error_buf));
}
#[no_mangle]

Просмотреть файл

@ -1434,9 +1434,12 @@ criteria = "safe-to-deploy"
delta = "0.19.0 -> 0.20.0"
[[audits.d3d12]]
who = "Erich Gubler <erichdongubler@gmail.com>"
who = [
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
delta = "0.20.0 -> 0.20.0@git:c7458638d14921c7562e4197ddeefa17be413587"
importable = false
[[audits.darling]]
@ -2877,9 +2880,12 @@ criteria = "safe-to-deploy"
delta = "0.19.2 -> 0.20.0"
[[audits.naga]]
who = "Erich Gubler <erichdongubler@gmail.com>"
who = [
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
delta = "0.20.0 -> 0.20.0@git:c7458638d14921c7562e4197ddeefa17be413587"
importable = false
[[audits.net2]]
@ -4772,9 +4778,12 @@ criteria = "safe-to-deploy"
delta = "0.19.3 -> 0.20.0"
[[audits.wgpu-core]]
who = "Erich Gubler <erichdongubler@gmail.com>"
who = [
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
delta = "0.20.0 -> 0.20.0@git:c7458638d14921c7562e4197ddeefa17be413587"
importable = false
[[audits.wgpu-hal]]
@ -4836,9 +4845,12 @@ criteria = "safe-to-deploy"
delta = "0.19.3 -> 0.20.0"
[[audits.wgpu-hal]]
who = "Erich Gubler <erichdongubler@gmail.com>"
who = [
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
delta = "0.20.0 -> 0.20.0@git:c7458638d14921c7562e4197ddeefa17be413587"
importable = false
[[audits.wgpu-types]]
@ -4900,9 +4912,12 @@ criteria = "safe-to-deploy"
delta = "0.19.2 -> 0.20.0"
[[audits.wgpu-types]]
who = "Erich Gubler <erichdongubler@gmail.com>"
who = [
"Erich Gubler <erichdongubler@gmail.com>",
"Teodor Tanasoaia <ttanasoaia@mozilla.com>",
]
criteria = "safe-to-deploy"
delta = "0.20.0 -> 0.20.0@git:18b758e3889bdd6ffa769085de15e2b96a0c1eb5"
delta = "0.20.0 -> 0.20.0@git:c7458638d14921c7562e4197ddeefa17be413587"
importable = false
[[audits.whatsys]]
@ -5334,7 +5349,7 @@ end = "2024-05-05"
criteria = "safe-to-deploy"
user-id = 3618 # David Tolnay (dtolnay)
start = "2019-03-01"
end = "2024-04-25"
end = "2025-05-31"
[[trusted.serde_bytes]]
criteria = "safe-to-deploy"
@ -5346,7 +5361,7 @@ end = "2024-04-25"
criteria = "safe-to-deploy"
user-id = 3618 # David Tolnay (dtolnay)
start = "2019-03-01"
end = "2024-04-25"
end = "2025-05-31"
[[trusted.serde_json]]
criteria = "safe-to-deploy"
@ -5388,13 +5403,13 @@ end = "2024-05-03"
criteria = "safe-to-deploy"
user-id = 3618 # David Tolnay (dtolnay)
start = "2019-10-09"
end = "2024-04-25"
end = "2025-05-31"
[[trusted.thiserror-impl]]
criteria = "safe-to-deploy"
user-id = 3618 # David Tolnay (dtolnay)
start = "2019-10-09"
end = "2024-04-25"
end = "2025-05-31"
[[trusted.threadbound]]
criteria = "safe-to-deploy"

Просмотреть файл

@ -510,6 +510,13 @@ user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.serde]]
version = "1.0.203"
when = "2024-05-25"
user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.serde_bytes]]
version = "0.11.9"
when = "2023-02-05"
@ -524,6 +531,13 @@ user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.serde_derive]]
version = "1.0.203"
when = "2024-05-25"
user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.serde_json]]
version = "1.0.116"
when = "2024-04-16"
@ -573,6 +587,13 @@ user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.thiserror]]
version = "1.0.61"
when = "2024-05-17"
user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.thiserror-impl]]
version = "1.0.59"
when = "2024-04-20"
@ -580,6 +601,13 @@ user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.thiserror-impl]]
version = "1.0.61"
when = "2024-05-17"
user-id = 3618
user-login = "dtolnay"
user-name = "David Tolnay"
[[publisher.threadbound]]
version = "0.1.5"
when = "2022-12-17"

2
third_party/rust/naga/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

1
third_party/rust/naga/CHANGELOG.md поставляемый
Просмотреть файл

@ -79,6 +79,7 @@ For changelogs after v0.14, see [the wgpu changelog](../CHANGELOG.md).
- Add and fix minimum Metal version checks for optional functionality. ([#2486](https://github.com/gfx-rs/naga/pull/2486)) **@teoxoy**
- Make varyings' struct members unique. ([#2521](https://github.com/gfx-rs/naga/pull/2521)) **@evahop**
- Add experimental vertex pulling transform flag. ([#5254](https://github.com/gfx-rs/wgpu/pull/5254)) **@bradwerth**
#### GLSL-OUT

4
third_party/rust/naga/Cargo.toml поставляемый
Просмотреть файл

@ -47,7 +47,7 @@ bit-set = "0.5"
bitflags = "2.5"
log = "0.4"
rustc-hash = "1.1.0"
thiserror = "1.0.59"
thiserror = "1.0.61"
[dependencies.arbitrary]
version = "1.3"
@ -74,7 +74,7 @@ version = "0.2.1"
optional = true
[dependencies.serde]
version = "1.0.200"
version = "1.0.202"
features = ["derive"]
optional = true

2
third_party/rust/naga/src/back/hlsl/help.rs поставляемый
Просмотреть файл

@ -1334,7 +1334,7 @@ impl<'a, W: Write> super::Writer<'a, W> {
/// Parenthesizing the expression like `((float4)0).y` would work... except DXC can't handle
/// cases like:
///
/// ```ignore
/// ```text
/// tests\out\hlsl\access.hlsl:183:41: error: cannot compile this l-value expression yet
/// t_1.am = (__mat4x2[2])((float4x2[2])0);
/// ^

118
third_party/rust/naga/src/back/msl/mod.rs поставляемый
Просмотреть файл

@ -222,6 +222,113 @@ impl Default for Options {
}
}
/// Corresponds to [WebGPU `GPUVertexFormat`](
/// https://gpuweb.github.io/gpuweb/#enumdef-gpuvertexformat).
#[repr(u32)]
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub enum VertexFormat {
/// Two unsigned bytes (u8). `vec2<u32>` in shaders.
Uint8x2 = 0,
/// Four unsigned bytes (u8). `vec4<u32>` in shaders.
Uint8x4 = 1,
/// Two signed bytes (i8). `vec2<i32>` in shaders.
Sint8x2 = 2,
/// Four signed bytes (i8). `vec4<i32>` in shaders.
Sint8x4 = 3,
/// Two unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec2<f32>` in shaders.
Unorm8x2 = 4,
/// Four unsigned bytes (u8). [0, 255] converted to float [0, 1] `vec4<f32>` in shaders.
Unorm8x4 = 5,
/// Two signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec2<f32>` in shaders.
Snorm8x2 = 6,
/// Four signed bytes (i8). [-127, 127] converted to float [-1, 1] `vec4<f32>` in shaders.
Snorm8x4 = 7,
/// Two unsigned shorts (u16). `vec2<u32>` in shaders.
Uint16x2 = 8,
/// Four unsigned shorts (u16). `vec4<u32>` in shaders.
Uint16x4 = 9,
/// Two signed shorts (i16). `vec2<i32>` in shaders.
Sint16x2 = 10,
/// Four signed shorts (i16). `vec4<i32>` in shaders.
Sint16x4 = 11,
/// Two unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec2<f32>` in shaders.
Unorm16x2 = 12,
/// Four unsigned shorts (u16). [0, 65535] converted to float [0, 1] `vec4<f32>` in shaders.
Unorm16x4 = 13,
/// Two signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec2<f32>` in shaders.
Snorm16x2 = 14,
/// Four signed shorts (i16). [-32767, 32767] converted to float [-1, 1] `vec4<f32>` in shaders.
Snorm16x4 = 15,
/// Two half-precision floats (no Rust equiv). `vec2<f32>` in shaders.
Float16x2 = 16,
/// Four half-precision floats (no Rust equiv). `vec4<f32>` in shaders.
Float16x4 = 17,
/// One single-precision float (f32). `f32` in shaders.
Float32 = 18,
/// Two single-precision floats (f32). `vec2<f32>` in shaders.
Float32x2 = 19,
/// Three single-precision floats (f32). `vec3<f32>` in shaders.
Float32x3 = 20,
/// Four single-precision floats (f32). `vec4<f32>` in shaders.
Float32x4 = 21,
/// One unsigned int (u32). `u32` in shaders.
Uint32 = 22,
/// Two unsigned ints (u32). `vec2<u32>` in shaders.
Uint32x2 = 23,
/// Three unsigned ints (u32). `vec3<u32>` in shaders.
Uint32x3 = 24,
/// Four unsigned ints (u32). `vec4<u32>` in shaders.
Uint32x4 = 25,
/// One signed int (i32). `i32` in shaders.
Sint32 = 26,
/// Two signed ints (i32). `vec2<i32>` in shaders.
Sint32x2 = 27,
/// Three signed ints (i32). `vec3<i32>` in shaders.
Sint32x3 = 28,
/// Four signed ints (i32). `vec4<i32>` in shaders.
Sint32x4 = 29,
/// Three unsigned 10-bit integers and one 2-bit integer, packed into a 32-bit integer (u32). [0, 1024] converted to float [0, 1] `vec4<f32>` in shaders.
#[cfg_attr(feature = "serde", serde(rename = "unorm10-10-10-2"))]
Unorm10_10_10_2 = 34,
}
/// A mapping of vertex buffers and their attributes to shader
/// locations.
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct AttributeMapping {
/// Shader location associated with this attribute
pub shader_location: u32,
/// Offset in bytes from start of vertex buffer structure
pub offset: u32,
/// Format code to help us unpack the attribute into the type
/// used by the shader. Codes correspond to a 0-based index of
/// <https://gpuweb.github.io/gpuweb/#enumdef-gpuvertexformat>.
/// The conversion process is described by
/// <https://gpuweb.github.io/gpuweb/#vertex-processing>.
pub format: VertexFormat,
}
/// A description of a vertex buffer with all the information we
/// need to address the attributes within it.
#[derive(Debug, Default, Clone, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct VertexBufferMapping {
/// Shader location associated with this buffer
pub id: u32,
/// Size of the structure in bytes
pub stride: u32,
/// True if the buffer is indexed by vertex, false if indexed
/// by instance.
pub indexed_by_vertex: bool,
/// Vec of the attributes within the structure
pub attributes: Vec<AttributeMapping>,
}
/// A subset of options that are meant to be changed per pipeline.
#[derive(Debug, Default, Clone)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
@ -234,6 +341,17 @@ pub struct PipelineOptions {
///
/// Enable this for vertex shaders with point primitive topologies.
pub allow_and_force_point_size: bool,
/// If set, when generating the Metal vertex shader, transform it
/// to receive the vertex buffers, lengths, and vertex id as args,
/// and bounds-check the vertex id and use the index into the
/// vertex buffers to access attributes, rather than using Metal's
/// [[stage-in]] assembled attribute data.
pub vertex_pulling_transform: bool,
/// vertex_buffer_mappings are used during shader translation to
/// support vertex pulling.
pub vertex_buffer_mappings: Vec<VertexBufferMapping>,
}
impl Options {

1131
third_party/rust/naga/src/back/msl/writer.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

132
third_party/rust/naga/src/front/spv/mod.rs поставляемый
Просмотреть файл

@ -313,14 +313,14 @@ struct LookupVariable {
type_id: spirv::Word,
}
/// Information about SPIR-V result ids, stored in `Parser::lookup_expression`.
/// Information about SPIR-V result ids, stored in `Frontend::lookup_expression`.
#[derive(Clone, Debug)]
struct LookupExpression {
/// The `Expression` constructed for this result.
///
/// Note that, while a SPIR-V result id can be used in any block dominated
/// by its definition, a Naga `Expression` is only in scope for the rest of
/// its subtree. `Parser::get_expr_handle` takes care of spilling the result
/// its subtree. `Frontend::get_expr_handle` takes care of spilling the result
/// to a `LocalVariable` which can then be used anywhere.
handle: Handle<crate::Expression>,
@ -564,6 +564,20 @@ enum SignAnchor {
Operand,
}
enum AtomicOpInst {
AtomicIIncrement,
}
#[allow(dead_code)]
struct AtomicOp {
instruction: AtomicOpInst,
result_type_id: spirv::Word,
result_id: spirv::Word,
pointer_id: spirv::Word,
scope_id: spirv::Word,
memory_semantics_id: spirv::Word,
}
pub struct Frontend<I> {
data: I,
data_offset: usize,
@ -575,10 +589,11 @@ pub struct Frontend<I> {
future_member_decor: FastHashMap<(spirv::Word, MemberIndex), Decoration>,
lookup_member: FastHashMap<(Handle<crate::Type>, MemberIndex), LookupMember>,
handle_sampling: FastHashMap<Handle<crate::GlobalVariable>, image::SamplingFlags>,
// Used to upgrade types used in atomic ops to atomic types, keyed by pointer id
lookup_atomic: FastHashMap<spirv::Word, AtomicOp>,
lookup_type: FastHashMap<spirv::Word, LookupType>,
lookup_void_type: Option<spirv::Word>,
lookup_storage_buffer_types: FastHashMap<Handle<crate::Type>, crate::StorageAccess>,
// Lookup for samplers and sampled images, storing flags on how they are used.
lookup_constant: FastHashMap<spirv::Word, LookupConstant>,
lookup_variable: FastHashMap<spirv::Word, LookupVariable>,
lookup_expression: FastHashMap<spirv::Word, LookupExpression>,
@ -631,6 +646,7 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
future_member_decor: FastHashMap::default(),
handle_sampling: FastHashMap::default(),
lookup_member: FastHashMap::default(),
lookup_atomic: FastHashMap::default(),
lookup_type: FastHashMap::default(),
lookup_void_type: None,
lookup_storage_buffer_types: FastHashMap::default(),
@ -3944,7 +3960,81 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
);
emitter.start(ctx.expressions);
}
_ => return Err(Error::UnsupportedInstruction(self.state, inst.op)),
Op::AtomicIIncrement => {
inst.expect(6)?;
let start = self.data_offset;
let span = self.span_from_with_op(start);
let result_type_id = self.next()?;
let result_id = self.next()?;
let pointer_id = self.next()?;
let scope_id = self.next()?;
let memory_semantics_id = self.next()?;
// Store the op for a later pass where we "upgrade" the pointer type
let atomic = AtomicOp {
instruction: AtomicOpInst::AtomicIIncrement,
result_type_id,
result_id,
pointer_id,
scope_id,
memory_semantics_id,
};
self.lookup_atomic.insert(pointer_id, atomic);
log::trace!("\t\t\tlooking up expr {:?}", pointer_id);
let (p_lexp_handle, p_lexp_ty_id) = {
let lexp = self.lookup_expression.lookup(pointer_id)?;
let handle = get_expr_handle!(pointer_id, &lexp);
(handle, lexp.type_id)
};
log::trace!("\t\t\tlooking up type {pointer_id:?}");
let p_ty = self.lookup_type.lookup(p_lexp_ty_id)?;
let p_ty_base_id =
p_ty.base_id.ok_or(Error::InvalidAccessType(p_lexp_ty_id))?;
log::trace!("\t\t\tlooking up base type {p_ty_base_id:?} of {p_ty:?}");
let p_base_ty = self.lookup_type.lookup(p_ty_base_id)?;
// Create an expression for our result
let r_lexp_handle = {
let expr = crate::Expression::AtomicResult {
ty: p_base_ty.handle,
comparison: false,
};
let handle = ctx.expressions.append(expr, span);
self.lookup_expression.insert(
result_id,
LookupExpression {
handle,
type_id: result_type_id,
block_id,
},
);
handle
};
// Create a literal "1" since WGSL lacks an increment operation
let one_lexp_handle = make_index_literal(
ctx,
1,
&mut block,
&mut emitter,
p_base_ty.handle,
p_lexp_ty_id,
span,
)?;
// Create a statement for the op itself
let stmt = crate::Statement::Atomic {
pointer: p_lexp_handle,
fun: crate::AtomicFunction::Add,
value: one_lexp_handle,
result: r_lexp_handle,
};
block.push(stmt, span);
}
_ => {
return Err(Error::UnsupportedInstruction(self.state, inst.op));
}
}
};
@ -5594,4 +5684,38 @@ mod test {
];
let _ = super::parse_u8_slice(&bin, &Default::default()).unwrap();
}
#[test]
fn atomic_i_inc() {
let _ = env_logger::builder()
.is_test(true)
.filter_level(log::LevelFilter::Trace)
.try_init();
let bytes = include_bytes!("../../../tests/in/spv/atomic_i_increment.spv");
let m = super::parse_u8_slice(bytes, &Default::default()).unwrap();
let mut validator = crate::valid::Validator::new(
crate::valid::ValidationFlags::empty(),
Default::default(),
);
let info = validator.validate(&m).unwrap();
let wgsl =
crate::back::wgsl::write_string(&m, &info, crate::back::wgsl::WriterFlags::empty())
.unwrap();
log::info!("atomic_i_increment:\n{wgsl}");
let m = match crate::front::wgsl::parse_str(&wgsl) {
Ok(m) => m,
Err(e) => {
log::error!("{}", e.emit_to_string(&wgsl));
// at this point we know atomics create invalid modules
// so simply bail
return;
}
};
let mut validator =
crate::valid::Validator::new(crate::valid::ValidationFlags::all(), Default::default());
if let Err(e) = validator.validate(&m) {
log::error!("{}", e.emit_to_string(&wgsl));
}
}
}

4
third_party/rust/naga/src/valid/analyzer.rs поставляемый
Просмотреть файл

@ -383,6 +383,10 @@ impl FunctionInfo {
/// refer to a global variable. Those expressions don't contribute
/// any usage to the global themselves; that depends on how other
/// expressions use them.
///
/// [`assignable_global`]: ExpressionInfo::assignable_global
/// [`Access`]: crate::Expression::Access
/// [`AccessIndex`]: crate::Expression::AccessIndex
#[must_use]
fn add_assignable_ref(
&mut self,

42
third_party/rust/naga/src/valid/function.rs поставляемый
Просмотреть файл

@ -172,6 +172,8 @@ pub enum FunctionError {
WorkgroupUniformLoadInvalidPointer(Handle<crate::Expression>),
#[error("Subgroup operation is invalid")]
InvalidSubgroup(#[from] SubgroupError),
#[error("Emit statement should not cover \"result\" expressions like {0:?}")]
EmitResult(Handle<crate::Expression>),
}
bitflags::bitflags! {
@ -554,7 +556,45 @@ impl super::Validator {
match *statement {
S::Emit(ref range) => {
for handle in range.clone() {
self.emit_expression(handle, context)?;
use crate::Expression as Ex;
match context.expressions[handle] {
Ex::Literal(_)
| Ex::Constant(_)
| Ex::Override(_)
| Ex::ZeroValue(_)
| Ex::Compose { .. }
| Ex::Access { .. }
| Ex::AccessIndex { .. }
| Ex::Splat { .. }
| Ex::Swizzle { .. }
| Ex::FunctionArgument(_)
| Ex::GlobalVariable(_)
| Ex::LocalVariable(_)
| Ex::Load { .. }
| Ex::ImageSample { .. }
| Ex::ImageLoad { .. }
| Ex::ImageQuery { .. }
| Ex::Unary { .. }
| Ex::Binary { .. }
| Ex::Select { .. }
| Ex::Derivative { .. }
| Ex::Relational { .. }
| Ex::Math { .. }
| Ex::As { .. }
| Ex::ArrayLength(_)
| Ex::RayQueryGetIntersection { .. } => {
self.emit_expression(handle, context)?
}
Ex::CallResult(_)
| Ex::AtomicResult { .. }
| Ex::WorkGroupUniformLoadResult { .. }
| Ex::RayQueryProceedResult
| Ex::SubgroupBallotResult
| Ex::SubgroupOperationResult { .. } => {
return Err(FunctionError::EmitResult(handle)
.with_span_handle(handle, context.expressions));
}
}
}
}
S::Block(ref block) => {

2
third_party/rust/serde/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"51902891c19dabcfb3d92f39d782c804e68184f48d887798d55be98876c64e73","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"731c044fc5f98b37a89e9049c9214267db98763309cb63146b45c029640f82a3","build.rs":"daecc51751de7a6aee619b268674adfba240f42f09405631159cd84520aba28f","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/de/format.rs":"c85071b016df643b161859682d21ce34fa0ebf2a3bdbeeea69859da48f5d934f","src/de/ignored_any.rs":"6480f2b2a83dc4764d01b2eec7309729eef2492eede2e5ee98d23a60b05198eb","src/de/impls.rs":"cd2781405d8f4568701a701e6b9946e5956eb201571118c8a49038928477779a","src/de/mod.rs":"e6250b9d385593a04ad984e7906acadf50cc8e6f5554f6ac0ed18af75512e57a","src/de/seed.rs":"045d890712a04eb33ffc5a021e5d948a63c89402b8ffeea749df2171b7484f8f","src/de/size_hint.rs":"fff83dc39d30e75e8e611991f9c5399188a1aad23a6462dbca2c8b62655cfedb","src/de/value.rs":"0fd511a288c20a1b768718f4baadf9c7d4146d276af6a71ba1d0f7679b28644a","src/integer128.rs":"29ef30b7d94507b34807090e68173767cdc7aff62edccd38affe69e75338dddc","src/lib.rs":"86b0e2240ac3ca56b20cabbb272ab2acef840390e7fe4e406dfa1d5a549ce80c","src/macros.rs":"0d4b392ed6fe529fda2c5439c8547fe9717e64f528bfd01f633bb725f98b53cd","src/private/de.rs":"9255ecf2d5c52f0f52b4e0dbf85bdd8c140bc2c1ac96086ee395589e0521aeb4","src/private/doc.rs":"b222decb40321190155209e1b8a5a52e3adfaa470047e379e664b71e0320655a","src/private/mod.rs":"b8f0c348621d91dd9da3db83d8877e70bc61ad0a2dc2d6fb57c6fc2c2cbafa26","src/private/ser.rs":"73577607e779e1b42713c51b9bc7136f99daccf473b4a641c99ceebe46708d47","src/ser/fmt.rs":"d1cfd9623605413e45a23ef778d97f0ac4da4adaed23739f1f9d7414b30e384b","src/ser/impls.rs":"0eebc159f7d3962c46c323e456afb679747ff57563e796c5fd92c64ed074ae70","src/ser/impossible.rs":"5c325da8e0370ab22abe1e15d8af1dc7a1707b127508f61e720cd7f0caa80593","src/ser/mod.rs":"a92d4ce0a6c68eb22a24a61574a5d2e9b0b463b284ff08e62298b8f7ae1a7464","src/std_error.rs":"25a07149e2e468747ffa5a58051c7f93d7b3c0fa0372f012a96c97ec8ab03b97"},"package":"780f1cebed1629e4753a1a38a3c72d30b97ec044f0aef68cb26650a3c5cf363c"}
{"files":{"Cargo.toml":"99245e2dcef3d674a06780d28d779d902dc95891aeeae9362811146e2dbc23f2","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"731c044fc5f98b37a89e9049c9214267db98763309cb63146b45c029640f82a3","build.rs":"fd90532b83c6cf71c8aaf6ef7bedfdab85aac6bc06fc2d578ca7dd99195b8622","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/de/format.rs":"c85071b016df643b161859682d21ce34fa0ebf2a3bdbeeea69859da48f5d934f","src/de/ignored_any.rs":"6480f2b2a83dc4764d01b2eec7309729eef2492eede2e5ee98d23a60b05198eb","src/de/impls.rs":"f1d691a1bd4f0404c590eb217bab1d07c7d56ff9e874da6ab28d4affa7fef94c","src/de/mod.rs":"342de5aa92c54ebfcf30213f86a8626839ab132dc7fd1300a47ab40d5faf0dba","src/de/seed.rs":"045d890712a04eb33ffc5a021e5d948a63c89402b8ffeea749df2171b7484f8f","src/de/size_hint.rs":"fff83dc39d30e75e8e611991f9c5399188a1aad23a6462dbca2c8b62655cfedb","src/de/value.rs":"0c485908b1f755e4750af0aefa2132460dadbcf30919c15c06ca795a92d96430","src/integer128.rs":"29ef30b7d94507b34807090e68173767cdc7aff62edccd38affe69e75338dddc","src/lib.rs":"6e8f95df70fe4c797d86d9596dccf3efa547aa322eddffe3f777822eb7fe29bb","src/macros.rs":"0d4b392ed6fe529fda2c5439c8547fe9717e64f528bfd01f633bb725f98b53cd","src/private/de.rs":"9255ecf2d5c52f0f52b4e0dbf85bdd8c140bc2c1ac96086ee395589e0521aeb4","src/private/doc.rs":"b222decb40321190155209e1b8a5a52e3adfaa470047e379e664b71e0320655a","src/private/mod.rs":"b8f0c348621d91dd9da3db83d8877e70bc61ad0a2dc2d6fb57c6fc2c2cbafa26","src/private/ser.rs":"73577607e779e1b42713c51b9bc7136f99daccf473b4a641c99ceebe46708d47","src/ser/fmt.rs":"d1cfd9623605413e45a23ef778d97f0ac4da4adaed23739f1f9d7414b30e384b","src/ser/impls.rs":"585908d859fc89adcc1c6a7acc8f12467feba7daeb8c44ae28d25fd40d140f2c","src/ser/impossible.rs":"5c325da8e0370ab22abe1e15d8af1dc7a1707b127508f61e720cd7f0caa80593","src/ser/mod.rs":"a92d4ce0a6c68eb22a24a61574a5d2e9b0b463b284ff08e62298b8f7ae1a7464","src/std_error.rs":"25a07149e2e468747ffa5a58051c7f93d7b3c0fa0372f012a96c97ec8ab03b97"},"package":"7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094"}

10
third_party/rust/serde/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
edition = "2018"
rust-version = "1.31"
name = "serde"
version = "1.0.201"
version = "1.0.203"
authors = [
"Erick Tryzelaar <erick.tryzelaar@gmail.com>",
"David Tolnay <dtolnay@gmail.com>",
@ -46,11 +46,7 @@ features = [
"rc",
"unstable",
]
rustdoc-args = [
"--cfg",
"doc_cfg",
"--generate-link-to-definition",
]
rustdoc-args = ["--generate-link-to-definition"]
targets = ["x86_64-unknown-linux-gnu"]
[package.metadata.playground]
@ -80,4 +76,4 @@ std = []
unstable = []
[target."cfg(any())".dependencies.serde_derive]
version = "=1.0.201"
version = "=1.0.203"

1
third_party/rust/serde/build.rs поставляемый
Просмотреть файл

@ -14,7 +14,6 @@ fn main() {
};
if minor >= 77 {
println!("cargo:rustc-check-cfg=cfg(doc_cfg)");
println!("cargo:rustc-check-cfg=cfg(no_core_cstr)");
println!("cargo:rustc-check-cfg=cfg(no_core_num_saturating)");
println!("cargo:rustc-check-cfg=cfg(no_core_try_from)");

233
third_party/rust/serde/src/de/impls.rs поставляемый
Просмотреть файл

@ -39,7 +39,7 @@ impl<'de> Deserialize<'de> for () {
}
#[cfg(feature = "unstable")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "unstable")))]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
impl<'de> Deserialize<'de> for ! {
fn deserialize<D>(_deserializer: D) -> Result<Self, D::Error>
where
@ -695,7 +695,7 @@ impl<'a, 'de> Visitor<'de> for StringInPlaceVisitor<'a> {
}
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de> Deserialize<'de> for String {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -839,7 +839,7 @@ impl<'de> Visitor<'de> for CStringVisitor {
}
#[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de> Deserialize<'de> for CString {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -868,7 +868,7 @@ macro_rules! forwarded_impl {
forwarded_impl! {
#[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
(), Box<CStr>, CString::into_boxed_c_str
}
@ -1086,7 +1086,7 @@ fn nop_reserve<T>(_seq: T, _n: usize) {}
seq_impl!(
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
BinaryHeap<T: Ord>,
seq,
BinaryHeap::clear,
@ -1097,7 +1097,7 @@ seq_impl!(
seq_impl!(
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
BTreeSet<T: Eq + Ord>,
seq,
BTreeSet::clear,
@ -1108,7 +1108,7 @@ seq_impl!(
seq_impl!(
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
LinkedList<T>,
seq,
LinkedList::clear,
@ -1119,7 +1119,7 @@ seq_impl!(
seq_impl!(
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
HashSet<T: Eq + Hash, S: BuildHasher + Default>,
seq,
HashSet::clear,
@ -1130,7 +1130,7 @@ seq_impl!(
seq_impl!(
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
VecDeque<T>,
seq,
VecDeque::clear,
@ -1142,7 +1142,7 @@ seq_impl!(
////////////////////////////////////////////////////////////////////////////////
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de, T> Deserialize<'de> for Vec<T>
where
T: Deserialize<'de>,
@ -1393,82 +1393,103 @@ array_impls! {
macro_rules! tuple_impls {
($($len:tt => ($($n:tt $name:ident)+))+) => {
$(
impl<'de, $($name: Deserialize<'de>),+> Deserialize<'de> for ($($name,)+) {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct TupleVisitor<$($name,)+> {
marker: PhantomData<($($name,)+)>,
}
impl<'de, $($name: Deserialize<'de>),+> Visitor<'de> for TupleVisitor<$($name,)+> {
type Value = ($($name,)+);
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(concat!("a tuple of size ", $len))
}
#[inline]
#[allow(non_snake_case)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
$(
let $name = match tri!(seq.next_element()) {
Some(value) => value,
None => return Err(Error::invalid_length($n, &self)),
};
)+
Ok(($($name,)+))
}
}
deserializer.deserialize_tuple($len, TupleVisitor { marker: PhantomData })
}
#[inline]
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
struct TupleInPlaceVisitor<'a, $($name: 'a,)+>(&'a mut ($($name,)+));
impl<'a, 'de, $($name: Deserialize<'de>),+> Visitor<'de> for TupleInPlaceVisitor<'a, $($name,)+> {
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(concat!("a tuple of size ", $len))
}
#[inline]
#[allow(non_snake_case)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
$(
if tri!(seq.next_element_seed(InPlaceSeed(&mut (self.0).$n))).is_none() {
return Err(Error::invalid_length($n, &self));
}
)+
Ok(())
}
}
deserializer.deserialize_tuple($len, TupleInPlaceVisitor(place))
}
#[cfg_attr(docsrs, doc(hidden))]
impl<'de, $($name),+> Deserialize<'de> for ($($name,)+)
where
$($name: Deserialize<'de>,)+
{
tuple_impl_body!($len => ($($n $name)+));
}
)+
}
};
}
macro_rules! tuple_impl_body {
($len:tt => ($($n:tt $name:ident)+)) => {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct TupleVisitor<$($name,)+> {
marker: PhantomData<($($name,)+)>,
}
impl<'de, $($name: Deserialize<'de>),+> Visitor<'de> for TupleVisitor<$($name,)+> {
type Value = ($($name,)+);
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(concat!("a tuple of size ", $len))
}
#[inline]
#[allow(non_snake_case)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
$(
let $name = match tri!(seq.next_element()) {
Some(value) => value,
None => return Err(Error::invalid_length($n, &self)),
};
)+
Ok(($($name,)+))
}
}
deserializer.deserialize_tuple($len, TupleVisitor { marker: PhantomData })
}
#[inline]
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
struct TupleInPlaceVisitor<'a, $($name: 'a,)+>(&'a mut ($($name,)+));
impl<'a, 'de, $($name: Deserialize<'de>),+> Visitor<'de> for TupleInPlaceVisitor<'a, $($name,)+> {
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str(concat!("a tuple of size ", $len))
}
#[inline]
#[allow(non_snake_case)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
$(
if tri!(seq.next_element_seed(InPlaceSeed(&mut (self.0).$n))).is_none() {
return Err(Error::invalid_length($n, &self));
}
)+
Ok(())
}
}
deserializer.deserialize_tuple($len, TupleInPlaceVisitor(place))
}
};
}
#[cfg_attr(docsrs, doc(fake_variadic))]
#[cfg_attr(
docsrs,
doc = "This trait is implemented for tuples up to 16 items long."
)]
impl<'de, T> Deserialize<'de> for (T,)
where
T: Deserialize<'de>,
{
tuple_impl_body!(1 => (0 T));
}
tuple_impls! {
1 => (0 T0)
2 => (0 T0 1 T1)
3 => (0 T0 1 T1 2 T2)
4 => (0 T0 1 T1 2 T2 3 T3)
@ -1546,7 +1567,7 @@ macro_rules! map_impl {
map_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
BTreeMap<K: Ord, V>,
map,
BTreeMap::new(),
@ -1554,7 +1575,7 @@ map_impl! {
map_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
HashMap<K: Eq + Hash, V, S: BuildHasher + Default>,
map,
HashMap::with_capacity_and_hasher(size_hint::cautious::<(K, V)>(map.size_hint()), S::default()),
@ -1696,7 +1717,7 @@ macro_rules! deserialize_enum {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<'de> Deserialize<'de> for net::IpAddr {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -1717,13 +1738,13 @@ impl<'de> Deserialize<'de> for net::IpAddr {
parse_ip_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
net::Ipv4Addr, "IPv4 address", 4
}
parse_ip_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
net::Ipv6Addr, "IPv6 address", 16
}
@ -1750,7 +1771,7 @@ macro_rules! parse_socket_impl {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<'de> Deserialize<'de> for net::SocketAddr {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -1771,14 +1792,14 @@ impl<'de> Deserialize<'de> for net::SocketAddr {
parse_socket_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
net::SocketAddrV4, "IPv4 socket address",
|(ip, port)| net::SocketAddrV4::new(ip, port),
}
parse_socket_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
net::SocketAddrV6, "IPv6 socket address",
|(ip, port)| net::SocketAddrV6::new(ip, port, 0, 0),
}
@ -1814,7 +1835,7 @@ impl<'a> Visitor<'a> for PathVisitor {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<'de: 'a, 'a> Deserialize<'de> for &'a Path {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -1869,7 +1890,7 @@ impl<'de> Visitor<'de> for PathBufVisitor {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<'de> Deserialize<'de> for PathBuf {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -1881,7 +1902,7 @@ impl<'de> Deserialize<'de> for PathBuf {
forwarded_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
(), Box<Path>, PathBuf::into_boxed_path
}
@ -1943,7 +1964,7 @@ impl<'de> Visitor<'de> for OsStringVisitor {
}
#[cfg(all(feature = "std", any(unix, windows)))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", any(unix, windows)))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))]
impl<'de> Deserialize<'de> for OsString {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -1957,30 +1978,30 @@ impl<'de> Deserialize<'de> for OsString {
forwarded_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
(T), Box<T>, Box::new
}
forwarded_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
(T), Box<[T]>, Vec::into_boxed_slice
}
forwarded_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
(), Box<str>, String::into_boxed_str
}
forwarded_impl! {
#[cfg(all(feature = "std", any(unix, windows)))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", any(unix, windows)))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))]
(), Box<OsStr>, OsString::into_boxed_os_str
}
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de, 'a, T> Deserialize<'de> for Cow<'a, T>
where
T: ?Sized + ToOwned,
@ -2003,7 +2024,7 @@ where
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(
doc_cfg,
docsrs,
doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc"))))
)]
impl<'de, T> Deserialize<'de> for RcWeak<T>
@ -2025,7 +2046,7 @@ where
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(
doc_cfg,
docsrs,
doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc"))))
)]
impl<'de, T> Deserialize<'de> for ArcWeak<T>
@ -2073,7 +2094,7 @@ box_forwarded_impl! {
///
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
Rc
}
@ -2086,7 +2107,7 @@ box_forwarded_impl! {
///
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
Arc
}
@ -2110,13 +2131,13 @@ forwarded_impl! {
forwarded_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
(T), Mutex<T>, Mutex::new
}
forwarded_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
(T), RwLock<T>, RwLock::new
}
@ -2271,7 +2292,7 @@ impl<'de> Deserialize<'de> for Duration {
////////////////////////////////////////////////////////////////////////////////
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<'de> Deserialize<'de> for SystemTime {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
@ -3107,7 +3128,7 @@ macro_rules! atomic_impl {
($($ty:ident $size:expr)*) => {
$(
#[cfg(any(no_target_has_atomic, target_has_atomic = $size))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", target_has_atomic = $size))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "std", target_has_atomic = $size))))]
impl<'de> Deserialize<'de> for $ty {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where

4
third_party/rust/serde/src/de/mod.rs поставляемый
Просмотреть файл

@ -1525,7 +1525,7 @@ pub trait Visitor<'de>: Sized {
/// `String`.
#[inline]
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: Error,
@ -1584,7 +1584,7 @@ pub trait Visitor<'de>: Sized {
/// The default implementation forwards to `visit_bytes` and then drops the
/// `Vec<u8>`.
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where
E: Error,

24
third_party/rust/serde/src/de/value.rs поставляемый
Просмотреть файл

@ -112,7 +112,7 @@ impl Debug for Error {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl error::Error for Error {
fn description(&self) -> &str {
&self.err
@ -185,14 +185,14 @@ impl<E> Debug for UnitDeserializer<E> {
/// A deserializer that cannot be instantiated.
#[cfg(feature = "unstable")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "unstable")))]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub struct NeverDeserializer<E> {
never: !,
marker: PhantomData<E>,
}
#[cfg(feature = "unstable")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "unstable")))]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
impl<'de, E> IntoDeserializer<'de, E> for !
where
E: de::Error,
@ -565,7 +565,7 @@ impl<'de, E> Debug for BorrowedStrDeserializer<'de, E> {
/// A deserializer holding a `String`.
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
pub struct StringDeserializer<E> {
value: String,
marker: PhantomData<E>,
@ -582,7 +582,7 @@ impl<E> Clone for StringDeserializer<E> {
}
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de, E> IntoDeserializer<'de, E> for String
where
E: de::Error,
@ -670,7 +670,7 @@ impl<E> Debug for StringDeserializer<E> {
/// A deserializer holding a `Cow<str>`.
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
pub struct CowStrDeserializer<'a, E> {
value: Cow<'a, str>,
marker: PhantomData<E>,
@ -687,7 +687,7 @@ impl<'a, E> Clone for CowStrDeserializer<'a, E> {
}
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de, 'a, E> IntoDeserializer<'de, E> for Cow<'a, str>
where
E: de::Error,
@ -1006,7 +1006,7 @@ where
////////////////////////////////////////////////////////////////////////////////
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de, T, E> IntoDeserializer<'de, E> for Vec<T>
where
T: IntoDeserializer<'de, E>,
@ -1020,7 +1020,7 @@ where
}
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de, T, E> IntoDeserializer<'de, E> for BTreeSet<T>
where
T: IntoDeserializer<'de, E> + Eq + Ord,
@ -1034,7 +1034,7 @@ where
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<'de, T, S, E> IntoDeserializer<'de, E> for HashSet<T, S>
where
T: IntoDeserializer<'de, E> + Eq + Hash,
@ -1421,7 +1421,7 @@ impl Expected for ExpectedInMap {
////////////////////////////////////////////////////////////////////////////////
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl<'de, K, V, E> IntoDeserializer<'de, E> for BTreeMap<K, V>
where
K: IntoDeserializer<'de, E> + Eq + Ord,
@ -1436,7 +1436,7 @@ where
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<'de, K, V, S, E> IntoDeserializer<'de, E> for HashMap<K, V, S>
where
K: IntoDeserializer<'de, E> + Eq + Hash,

7
third_party/rust/serde/src/lib.rs поставляемый
Просмотреть файл

@ -95,11 +95,12 @@
////////////////////////////////////////////////////////////////////////////////
// Serde types in rustdoc of other crates get linked to here.
#![doc(html_root_url = "https://docs.rs/serde/1.0.201")]
#![doc(html_root_url = "https://docs.rs/serde/1.0.203")]
// Support using Serde without the standard library!
#![cfg_attr(not(feature = "std"), no_std)]
// Show which crate feature enables conditionally compiled APIs in documentation.
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg, rustdoc_internals))]
#![cfg_attr(docsrs, allow(internal_features))]
// Unstable functionality only if the user asks for it. For tracking and
// discussion of these features please refer to this issue:
//
@ -330,7 +331,7 @@ extern crate serde_derive;
/// Derive macro available if serde is built with `features = ["derive"]`.
#[cfg(feature = "serde_derive")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
#[cfg_attr(docsrs, doc(cfg(feature = "derive")))]
pub use serde_derive::{Deserialize, Serialize};
#[cfg(all(not(no_serde_derive), any(feature = "std", feature = "alloc")))]

108
third_party/rust/serde/src/ser/impls.rs поставляемый
Просмотреть файл

@ -48,7 +48,7 @@ impl Serialize for str {
}
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl Serialize for String {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -71,7 +71,7 @@ impl<'a> Serialize for fmt::Arguments<'a> {
////////////////////////////////////////////////////////////////////////////////
#[cfg(any(feature = "std", not(no_core_cstr)))]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for CStr {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -83,7 +83,7 @@ impl Serialize for CStr {
}
#[cfg(any(feature = "std", all(not(no_core_cstr), feature = "alloc")))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
impl Serialize for CString {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -232,37 +232,37 @@ macro_rules! seq_impl {
seq_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
BinaryHeap<T: Ord>
}
seq_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
BTreeSet<T: Ord>
}
seq_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
HashSet<T: Eq + Hash, H: BuildHasher>
}
seq_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
LinkedList<T>
}
seq_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
Vec<T>
}
seq_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
VecDeque<T>
}
@ -371,7 +371,7 @@ impl Serialize for () {
}
#[cfg(feature = "unstable")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "unstable")))]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
impl Serialize for ! {
fn serialize<S>(&self, _serializer: S) -> Result<S::Ok, S::Error>
where
@ -386,28 +386,46 @@ impl Serialize for ! {
macro_rules! tuple_impls {
($($len:expr => ($($n:tt $name:ident)+))+) => {
$(
#[cfg_attr(docsrs, doc(hidden))]
impl<$($name),+> Serialize for ($($name,)+)
where
$($name: Serialize,)+
{
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tuple = tri!(serializer.serialize_tuple($len));
$(
tri!(tuple.serialize_element(&self.$n));
)+
tuple.end()
}
tuple_impl_body!($len => ($($n)+));
}
)+
}
};
}
macro_rules! tuple_impl_body {
($len:expr => ($($n:tt)+)) => {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tuple = tri!(serializer.serialize_tuple($len));
$(
tri!(tuple.serialize_element(&self.$n));
)+
tuple.end()
}
};
}
#[cfg_attr(docsrs, doc(fake_variadic))]
#[cfg_attr(
docsrs,
doc = "This trait is implemented for tuples up to 16 items long."
)]
impl<T> Serialize for (T,)
where
T: Serialize,
{
tuple_impl_body!(1 => (0));
}
tuple_impls! {
1 => (0 T0)
2 => (0 T0 1 T1)
3 => (0 T0 1 T1 2 T2)
4 => (0 T0 1 T1 2 T2 3 T3)
@ -476,13 +494,13 @@ macro_rules! map_impl {
map_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
BTreeMap<K: Ord, V>
}
map_impl! {
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
HashMap<K: Eq + Hash, V, H: BuildHasher>
}
@ -516,7 +534,7 @@ deref_impl! {
deref_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
<T> Serialize for Box<T> where T: ?Sized + Serialize
}
@ -530,7 +548,7 @@ deref_impl! {
///
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
<T> Serialize for Rc<T> where T: ?Sized + Serialize
}
@ -544,13 +562,13 @@ deref_impl! {
///
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))))]
<T> Serialize for Arc<T> where T: ?Sized + Serialize
}
deref_impl! {
#[cfg(any(feature = "std", feature = "alloc"))]
#[cfg_attr(doc_cfg, doc(cfg(any(feature = "std", feature = "alloc"))))]
#[cfg_attr(docsrs, doc(cfg(any(feature = "std", feature = "alloc"))))]
<'a, T> Serialize for Cow<'a, T> where T: ?Sized + Serialize + ToOwned
}
@ -561,7 +579,7 @@ deref_impl! {
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(
doc_cfg,
docsrs,
doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc"))))
)]
impl<T> Serialize for RcWeak<T>
@ -581,7 +599,7 @@ where
/// [`"rc"`]: https://serde.rs/feature-flags.html#-features-rc
#[cfg(all(feature = "rc", any(feature = "std", feature = "alloc")))]
#[cfg_attr(
doc_cfg,
docsrs,
doc(cfg(all(feature = "rc", any(feature = "std", feature = "alloc"))))
)]
impl<T> Serialize for ArcWeak<T>
@ -660,7 +678,7 @@ where
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<T> Serialize for Mutex<T>
where
T: ?Sized + Serialize,
@ -677,7 +695,7 @@ where
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl<T> Serialize for RwLock<T>
where
T: ?Sized + Serialize,
@ -731,7 +749,7 @@ impl Serialize for Duration {
////////////////////////////////////////////////////////////////////////////////
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for SystemTime {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -776,7 +794,7 @@ macro_rules! serialize_display_bounded_length {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for net::IpAddr {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -847,7 +865,7 @@ fn test_format_u8() {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for net::Ipv4Addr {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -872,7 +890,7 @@ impl Serialize for net::Ipv4Addr {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for net::Ipv6Addr {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -889,7 +907,7 @@ impl Serialize for net::Ipv6Addr {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for net::SocketAddr {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -914,7 +932,7 @@ impl Serialize for net::SocketAddr {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for net::SocketAddrV4 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -931,7 +949,7 @@ impl Serialize for net::SocketAddrV4 {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for net::SocketAddrV6 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -953,7 +971,7 @@ impl Serialize for net::SocketAddrV6 {
////////////////////////////////////////////////////////////////////////////////
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for Path {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -967,7 +985,7 @@ impl Serialize for Path {
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
impl Serialize for PathBuf {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -978,7 +996,7 @@ impl Serialize for PathBuf {
}
#[cfg(all(feature = "std", any(unix, windows)))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", any(unix, windows)))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))]
impl Serialize for OsStr {
#[cfg(unix)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -1001,7 +1019,7 @@ impl Serialize for OsStr {
}
#[cfg(all(feature = "std", any(unix, windows)))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", any(unix, windows)))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "std", any(unix, windows)))))]
impl Serialize for OsString {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -1060,7 +1078,7 @@ macro_rules! atomic_impl {
($($ty:ident $size:expr)*) => {
$(
#[cfg(any(no_target_has_atomic, target_has_atomic = $size))]
#[cfg_attr(doc_cfg, doc(cfg(all(feature = "std", target_has_atomic = $size))))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "std", target_has_atomic = $size))))]
impl Serialize for $ty {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"c2999021cf1359abaadff19926f19e0639a5df80fc113115b231e3825c0178c6","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"731c044fc5f98b37a89e9049c9214267db98763309cb63146b45c029640f82a3","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/bound.rs":"6c5c20785ac95af9480f8d0de35a7e844cc36a16012f6468db148acd03cb15c2","src/de.rs":"c221ab2b94a5d80dccff74a37f3448b3d695656552b452595dc289c73b12fb2b","src/dummy.rs":"9533dfee23f20d92ea75734c739022820c2787ded0d54f459feacdeb770ec912","src/fragment.rs":"6757cb4c3131d4300f093572efc273c4ab5a20e3e1efb54a311dcfa52d0bd6eb","src/internals/ast.rs":"7dc997e4090033bbd1d0bdd870e8bb87b096b7f66cfd02047f6b85ebdd569b12","src/internals/attr.rs":"6584c0a02de0d17993877303f3cc2c1bccf235257632220421f98082d82d387a","src/internals/case.rs":"10c8dda2b32d8c6c6b63cf09cdc63d02375af7e95ecefe8fecb34f93b65191bb","src/internals/check.rs":"d842eb9912fd29311060b67f3bc62c438eb7b5d86093355acb4de7eee02a0ef8","src/internals/ctxt.rs":"83a4e6fbe0e439d578478883594407e03f2f340541be479bdf0b04a202633a37","src/internals/mod.rs":"ed021ca635c18132a0e5c3d90f21b7f65def0a61e946421a30200b5b9ab6ad43","src/internals/receiver.rs":"fe8a480669511b5edcfe71f5dd290cf72ccec54c9016ec85f2ac59dce538077f","src/internals/respan.rs":"899753859c58ce5f532a3ec4584796a52f13ed5a0533191e48c953ba5c1b52ff","src/internals/symbol.rs":"d619e88caa3c7a09b03014257f2b349ee922290062d9b97b4dd19d0e64532690","src/lib.rs":"71ef6a5edbfc12df36d009d2eb035de7291a5af079b7e3835aef94cdb4eece1f","src/pretend.rs":"7facc10a5b805564dd95735ae11118ec17ca6adcc49a59764e7c920e27b9fc4a","src/ser.rs":"a5638fa1b42b2a0d23cc12649f9180d4132f4cb7cf9edace1a1caed483f609e9","src/this.rs":"87818dc80cbb521b51938a653d09daf10aafc220bb10425948de82ad670fcb85"},"package":"c5e405930b9796f1c00bee880d03fc7e0bb4b9a11afc776885ffe84320da2865"}
{"files":{"Cargo.toml":"165df5f0c76b7421141def7245508ce2b77fcb469582ba6d02d9c911d502cf43","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"731c044fc5f98b37a89e9049c9214267db98763309cb63146b45c029640f82a3","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/bound.rs":"6c5c20785ac95af9480f8d0de35a7e844cc36a16012f6468db148acd03cb15c2","src/de.rs":"c221ab2b94a5d80dccff74a37f3448b3d695656552b452595dc289c73b12fb2b","src/dummy.rs":"9533dfee23f20d92ea75734c739022820c2787ded0d54f459feacdeb770ec912","src/fragment.rs":"6757cb4c3131d4300f093572efc273c4ab5a20e3e1efb54a311dcfa52d0bd6eb","src/internals/ast.rs":"7dc997e4090033bbd1d0bdd870e8bb87b096b7f66cfd02047f6b85ebdd569b12","src/internals/attr.rs":"50b3fca6e7ae1ea23cbe35b3b26b139f0860aa3201aa6ea6fc12db1c17dffeb2","src/internals/case.rs":"10c8dda2b32d8c6c6b63cf09cdc63d02375af7e95ecefe8fecb34f93b65191bb","src/internals/check.rs":"d842eb9912fd29311060b67f3bc62c438eb7b5d86093355acb4de7eee02a0ef8","src/internals/ctxt.rs":"83a4e6fbe0e439d578478883594407e03f2f340541be479bdf0b04a202633a37","src/internals/mod.rs":"ed021ca635c18132a0e5c3d90f21b7f65def0a61e946421a30200b5b9ab6ad43","src/internals/receiver.rs":"710f875da3bad3e2a7fc1df40ab6805bb5e971b6a2a04c1b643b8a0aa29e8496","src/internals/respan.rs":"899753859c58ce5f532a3ec4584796a52f13ed5a0533191e48c953ba5c1b52ff","src/internals/symbol.rs":"d619e88caa3c7a09b03014257f2b349ee922290062d9b97b4dd19d0e64532690","src/lib.rs":"fd26edb1f03ff548a6cc86dc4e43c0b567c1bc0e5e54bb6d13cdc1b60fdcc26e","src/pretend.rs":"7facc10a5b805564dd95735ae11118ec17ca6adcc49a59764e7c920e27b9fc4a","src/ser.rs":"a5638fa1b42b2a0d23cc12649f9180d4132f4cb7cf9edace1a1caed483f609e9","src/this.rs":"87818dc80cbb521b51938a653d09daf10aafc220bb10425948de82ad670fcb85"},"package":"500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba"}

2
third_party/rust/serde_derive/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
edition = "2015"
rust-version = "1.56"
name = "serde_derive"
version = "1.0.201"
version = "1.0.203"
authors = [
"Erick Tryzelaar <erick.tryzelaar@gmail.com>",
"David Tolnay <dtolnay@gmail.com>",

Просмотреть файл

@ -185,8 +185,8 @@ impl Name {
#[derive(Copy, Clone)]
pub struct RenameAllRules {
serialize: RenameRule,
deserialize: RenameRule,
pub serialize: RenameRule,
pub deserialize: RenameRule,
}
impl RenameAllRules {

Просмотреть файл

@ -84,7 +84,7 @@ impl ReplaceReceiver<'_> {
self.visit_type_mut_impl(ty);
return;
};
*ty = self.self_ty(span).into();
*ty = Type::Path(self.self_ty(span));
}
// `Self::Assoc` -> `<Receiver>::Assoc`

2
third_party/rust/serde_derive/src/lib.rs поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
//!
//! [https://serde.rs/derive.html]: https://serde.rs/derive.html
#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.201")]
#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.203")]
#![cfg_attr(not(check_cfg), allow(unexpected_cfgs))]
// Ignored clippy lints
#![allow(

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"27f9696d64a97df170dc620366a6b6b0b162917a686511e6b878586b04846226","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","src/ast.rs":"9b6cd6b1553483c99cd7e36aa422d37f4353c99b15da55534d28822f7fa7fd08","src/attr.rs":"99fca8c65a1907ee2f43f7ec28ce465bae22336c529bd7148738f9ce35375101","src/expand.rs":"2736a714372a4b81ac5438783dd2c0f656d624bec5cc4089af96ceecaeee011e","src/fmt.rs":"5d1cefc012403c2d4ff7ab2513c0ec559166df4271d5983a6463939b5ec8c3e1","src/generics.rs":"2076cde22271be355a8131a77add4b93f83ab0af4317cd2df5471fffa4f95c66","src/lib.rs":"5eea86c771e643328ad9bc3b881cce4bf9d50adae1b33e0d07645bdd9044003d","src/prop.rs":"5ba613e38430831259f20b258f33d57dcb783fbaeeb49e5faffa7b2a7be99e67","src/span.rs":"430460a4fa0d1fa9c627c1ddd575d2b101778fea84217591e1a93a5f6a2a0132","src/valid.rs":"ac95253944fd360d3578d0643a7baabb2cfa6bf9fbced7a6ce1f7b0529a3bb98"},"package":"d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66"}
{"files":{"Cargo.toml":"2b8cbe5e37d52d2d639a9506834c3c41044ccddb39420f0b7a30c14a1d26bc5f","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","src/ast.rs":"9b6cd6b1553483c99cd7e36aa422d37f4353c99b15da55534d28822f7fa7fd08","src/attr.rs":"99fca8c65a1907ee2f43f7ec28ce465bae22336c529bd7148738f9ce35375101","src/expand.rs":"2736a714372a4b81ac5438783dd2c0f656d624bec5cc4089af96ceecaeee011e","src/fmt.rs":"5d1cefc012403c2d4ff7ab2513c0ec559166df4271d5983a6463939b5ec8c3e1","src/generics.rs":"2076cde22271be355a8131a77add4b93f83ab0af4317cd2df5471fffa4f95c66","src/lib.rs":"5eea86c771e643328ad9bc3b881cce4bf9d50adae1b33e0d07645bdd9044003d","src/prop.rs":"5ba613e38430831259f20b258f33d57dcb783fbaeeb49e5faffa7b2a7be99e67","src/span.rs":"430460a4fa0d1fa9c627c1ddd575d2b101778fea84217591e1a93a5f6a2a0132","src/valid.rs":"ac95253944fd360d3578d0643a7baabb2cfa6bf9fbced7a6ce1f7b0529a3bb98"},"package":"46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"}

10
third_party/rust/thiserror-impl/Cargo.toml поставляемый
Просмотреть файл

@ -13,9 +13,15 @@
edition = "2021"
rust-version = "1.56"
name = "thiserror-impl"
version = "1.0.59"
version = "1.0.61"
authors = ["David Tolnay <dtolnay@gmail.com>"]
build = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Implementation detail of the `thiserror` crate"
readme = false
license = "MIT OR Apache-2.0"
repository = "https://github.com/dtolnay/thiserror"
@ -24,6 +30,8 @@ rustdoc-args = ["--generate-link-to-definition"]
targets = ["x86_64-unknown-linux-gnu"]
[lib]
name = "thiserror_impl"
path = "src/lib.rs"
proc-macro = true
[dependencies.proc-macro2]

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

65
third_party/rust/thiserror/Cargo.toml поставляемый
Просмотреть файл

@ -13,8 +13,13 @@
edition = "2021"
rust-version = "1.56"
name = "thiserror"
version = "1.0.59"
version = "1.0.61"
authors = ["David Tolnay <dtolnay@gmail.com>"]
build = "build.rs"
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "derive(Error)"
documentation = "https://docs.rs/thiserror"
readme = "README.md"
@ -31,8 +36,64 @@ repository = "https://github.com/dtolnay/thiserror"
rustdoc-args = ["--generate-link-to-definition"]
targets = ["x86_64-unknown-linux-gnu"]
[lib]
name = "thiserror"
path = "src/lib.rs"
[[test]]
name = "test_error"
path = "tests/test_error.rs"
[[test]]
name = "test_source"
path = "tests/test_source.rs"
[[test]]
name = "test_generics"
path = "tests/test_generics.rs"
[[test]]
name = "test_from"
path = "tests/test_from.rs"
[[test]]
name = "test_lints"
path = "tests/test_lints.rs"
[[test]]
name = "test_transparent"
path = "tests/test_transparent.rs"
[[test]]
name = "test_backtrace"
path = "tests/test_backtrace.rs"
[[test]]
name = "test_path"
path = "tests/test_path.rs"
[[test]]
name = "compiletest"
path = "tests/compiletest.rs"
[[test]]
name = "test_deprecated"
path = "tests/test_deprecated.rs"
[[test]]
name = "test_display"
path = "tests/test_display.rs"
[[test]]
name = "test_option"
path = "tests/test_option.rs"
[[test]]
name = "test_expr"
path = "tests/test_expr.rs"
[dependencies.thiserror-impl]
version = "=1.0.59"
version = "=1.0.61"
[dev-dependencies.anyhow]
version = "1.0.73"

3
third_party/rust/thiserror/build.rs поставляемый
Просмотреть файл

@ -7,6 +7,9 @@ use std::process::{self, Command, Stdio};
fn main() {
println!("cargo:rerun-if-changed=build/probe.rs");
println!("cargo:rustc-check-cfg=cfg(error_generic_member_access)");
println!("cargo:rustc-check-cfg=cfg(thiserror_nightly_testing)");
let error_generic_member_access;
let consider_rustc_bootstrap;
if compile_probe(false) {

2
third_party/rust/thiserror/build/probe.rs поставляемый
Просмотреть файл

@ -4,8 +4,8 @@
#![feature(error_generic_member_access)]
use core::fmt::{self, Debug, Display};
use std::error::{Error, Request};
use std::fmt::{self, Debug, Display};
struct MyError(Thing);
struct Thing;

2
third_party/rust/thiserror/src/aserror.rs поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
use core::panic::UnwindSafe;
use std::error::Error;
use std::panic::UnwindSafe;
#[doc(hidden)]
pub trait AsDynError<'a>: Sealed {

2
third_party/rust/thiserror/src/display.rs поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
use std::fmt::Display;
use core::fmt::Display;
use std::path::{self, Path, PathBuf};
#[doc(hidden)]

6
third_party/rust/thiserror/src/lib.rs поставляемый
Просмотреть файл

@ -62,7 +62,7 @@
//! which may be arbitrary expressions. For example:
//!
//! ```rust
//! # use std::i32;
//! # use core::i32;
//! # use thiserror::Error;
//! #
//! #[derive(Error, Debug)]
@ -129,7 +129,7 @@
//! std::error::Error` will work as a source.
//!
//! ```rust
//! # use std::fmt::{self, Display};
//! # use core::fmt::{self, Display};
//! # use thiserror::Error;
//! #
//! #[derive(Error, Debug)]
@ -228,7 +228,7 @@
//!
//! [`anyhow`]: https://github.com/dtolnay/anyhow
#![doc(html_root_url = "https://docs.rs/thiserror/1.0.59")]
#![doc(html_root_url = "https://docs.rs/thiserror/1.0.61")]
#![allow(
clippy::module_name_repetitions,
clippy::needless_lifetimes,

Просмотреть файл

@ -1,6 +1,6 @@
#![allow(clippy::needless_raw_string_hashes, clippy::uninlined_format_args)]
use std::fmt::{self, Display};
use core::fmt::{self, Display};
use thiserror::Error;
fn assert<T: Display>(expected: &str, value: T) {

Просмотреть файл

@ -1,6 +1,6 @@
#![allow(dead_code)]
use std::fmt::{self, Display};
use core::fmt::{self, Display};
use std::io;
use thiserror::Error;

Просмотреть файл

@ -1,6 +1,6 @@
#![allow(clippy::iter_cloned_collect, clippy::uninlined_format_args)]
use std::fmt::Display;
use core::fmt::Display;
use thiserror::Error;
// Some of the elaborate cases from the rcc codebase, which is a C compiler in

Просмотреть файл

@ -1,6 +1,6 @@
#![allow(clippy::needless_late_init, clippy::uninlined_format_args)]
use std::fmt::{self, Debug, Display};
use core::fmt::{self, Debug, Display};
use thiserror::Error;
pub struct NoFormat;

Просмотреть файл

@ -1,5 +1,5 @@
use core::fmt::Display;
use ref_cast::RefCast;
use std::fmt::Display;
use std::path::{Path, PathBuf};
use thiserror::Error;

Просмотреть файл

@ -1,4 +1,4 @@
use std::fmt::{self, Display};
use core::fmt::{self, Display};
use thiserror::Error;
#[derive(Error, Debug)]

Просмотреть файл

@ -1,4 +1,4 @@
use std::fmt::Debug;
use core::fmt::Debug;
use thiserror::Error;
#[derive(Error, Debug)]

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

6
third_party/rust/wgpu-core/Cargo.toml поставляемый
Просмотреть файл

@ -53,13 +53,13 @@ smallvec = "1"
thiserror = "1"
[dependencies.bytemuck]
version = "1.14"
version = "1.16"
optional = true
[dependencies.hal]
version = "0.20.0"
path = "../wgpu-hal"
default_features = false
default-features = false
package = "wgpu-hal"
[dependencies.naga]
@ -93,7 +93,6 @@ cfg_aliases = "0.1"
[features]
api_log_info = []
default = ["link"]
dx12 = ["hal/dx12"]
fragile-send-sync-non-atomic-wasm = [
"hal/fragile-send-sync-non-atomic-wasm",
@ -101,7 +100,6 @@ fragile-send-sync-non-atomic-wasm = [
]
gles = ["hal/gles"]
glsl = ["naga/glsl-in"]
link = ["hal/link"]
metal = ["hal/metal"]
renderdoc = ["hal/renderdoc"]
replay = [

Просмотреть файл

@ -888,7 +888,7 @@ unsafe impl<A: HalApi> Sync for RenderBundle<A> {}
impl<A: HalApi> RenderBundle<A> {
/// Actually encode the contents into a native command buffer.
///
/// This is partially duplicating the logic of `command_encoder_run_render_pass`.
/// This is partially duplicating the logic of `render_pass_end`.
/// However the point of this function is to be lighter, since we already had
/// a chance to go through the commands in `render_bundle_encoder_finish`.
///

Просмотреть файл

@ -26,8 +26,6 @@ use wgt::{math::align_to, BufferAddress, BufferUsages, ImageSubresourceRange, Te
pub enum ClearError {
#[error("To use clear_texture the CLEAR_TEXTURE feature needs to be enabled")]
MissingClearTextureFeature,
#[error("Command encoder {0:?} is invalid")]
InvalidCommandEncoder(CommandEncoderId),
#[error("Device {0:?} is invalid")]
InvalidDevice(DeviceId),
#[error("Buffer {0:?} is invalid or destroyed")]
@ -74,6 +72,8 @@ whereas subesource range specified start {subresource_base_array_layer} and coun
},
#[error(transparent)]
Device(#[from] DeviceError),
#[error(transparent)]
CommandEncoderError(#[from] super::CommandEncoderError),
}
impl Global {
@ -89,8 +89,7 @@ impl Global {
let hub = A::hub(self);
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)
.map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?;
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
@ -183,8 +182,7 @@ impl Global {
let hub = A::hub(self);
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)
.map_err(|_| ClearError::InvalidCommandEncoder(command_encoder_id))?;
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();

Просмотреть файл

@ -5,15 +5,15 @@ use crate::{
compute_command::{ArcComputeCommand, ComputeCommand},
end_pipeline_statistics_query,
memory_init::{fixup_discarded_surfaces, SurfacesInDiscardState},
BasePass, BasePassRef, BindGroupStateChange, CommandBuffer, CommandEncoderError,
CommandEncoderStatus, MapPassErr, PassErrorScope, QueryUseError, StateChange,
BasePass, BindGroupStateChange, CommandBuffer, CommandEncoderError, CommandEncoderStatus,
MapPassErr, PassErrorScope, QueryUseError, StateChange,
},
device::{DeviceError, MissingDownlevelFlags, MissingFeatures},
error::{ErrorFormatter, PrettyError},
global::Global,
hal_api::HalApi,
hal_label,
id::{self, DeviceId},
id::{self},
init_tracker::MemoryInitKind,
resource::{self, Resource},
snatch::SnatchGuard,
@ -34,9 +34,20 @@ use wgt::{BufferAddress, DynamicOffset};
use std::sync::Arc;
use std::{fmt, mem, str};
use super::DynComputePass;
pub struct ComputePass<A: HalApi> {
base: BasePass<ArcComputeCommand<A>>,
parent_id: id::CommandEncoderId,
/// All pass data & records is stored here.
///
/// If this is `None`, the pass is in the 'ended' state and can no longer be used.
/// Any attempt to record more commands will result in a validation error.
base: Option<BasePass<ArcComputeCommand<A>>>,
/// Parent command buffer that this pass records commands into.
///
/// If it is none, this pass is invalid and any operation on it will return an error.
parent: Option<Arc<CommandBuffer<A>>>,
timestamp_writes: Option<ComputePassTimestampWrites>,
// Resource binding dedupe state.
@ -45,10 +56,11 @@ pub struct ComputePass<A: HalApi> {
}
impl<A: HalApi> ComputePass<A> {
fn new(parent_id: id::CommandEncoderId, desc: &ComputePassDescriptor) -> Self {
/// If the parent command buffer is invalid, the returned pass will be invalid.
fn new(parent: Option<Arc<CommandBuffer<A>>>, desc: &ComputePassDescriptor) -> Self {
Self {
base: BasePass::<ArcComputeCommand<A>>::new(&desc.label),
parent_id,
base: Some(BasePass::new(&desc.label)),
parent,
timestamp_writes: desc.timestamp_writes.cloned(),
current_bind_groups: BindGroupStateChange::new(),
@ -56,14 +68,30 @@ impl<A: HalApi> ComputePass<A> {
}
}
pub fn parent_id(&self) -> id::CommandEncoderId {
self.parent_id
#[inline]
pub fn parent_id(&self) -> Option<id::CommandBufferId> {
self.parent.as_ref().map(|cmd_buf| cmd_buf.as_info().id())
}
#[inline]
pub fn label(&self) -> Option<&str> {
self.base.as_ref().and_then(|base| base.label.as_deref())
}
fn base_mut<'a>(
&'a mut self,
scope: PassErrorScope,
) -> Result<&'a mut BasePass<ArcComputeCommand<A>>, ComputePassError> {
self.base
.as_mut()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)
}
}
impl<A: HalApi> fmt::Debug for ComputePass<A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ComputePass {{ encoder_id: {:?} }}", self.parent_id)
write!(f, "ComputePass {{ parent: {:?} }}", self.parent_id())
}
}
@ -108,10 +136,12 @@ pub enum ComputePassErrorInner {
Device(#[from] DeviceError),
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
#[error("Parent encoder is invalid")]
InvalidParentEncoder,
#[error("Bind group at index {0:?} is invalid")]
InvalidBindGroup(u32),
#[error("Device {0:?} is invalid")]
InvalidDevice(DeviceId),
InvalidDevice(id::DeviceId),
#[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u32, max: u32 },
#[error("Compute pipeline {0:?} is invalid")]
@ -140,12 +170,20 @@ pub enum ComputePassErrorInner {
Bind(#[from] BindError),
#[error(transparent)]
PushConstants(#[from] PushConstantUploadError),
#[error("Push constant offset must be aligned to 4 bytes")]
PushConstantOffsetAlignment,
#[error("Push constant size must be aligned to 4 bytes")]
PushConstantSizeAlignment,
#[error("Ran out of push constant space. Don't set 4gb of push constants per ComputePass.")]
PushConstantOutOfMemory,
#[error(transparent)]
QueryUse(#[from] QueryUseError),
#[error(transparent)]
MissingFeatures(#[from] MissingFeatures),
#[error(transparent)]
MissingDownlevelFlags(#[from] MissingDownlevelFlags),
#[error("The compute pass has already been ended and no further commands can be recorded")]
PassEnded,
}
impl PrettyError for ComputePassErrorInner {
@ -263,48 +301,75 @@ impl<'a, A: HalApi> State<'a, A> {
// Running the compute pass.
impl Global {
/// Creates a compute pass.
///
/// If creation fails, an invalid pass is returned.
/// Any operation on an invalid pass will return an error.
///
/// If successful, puts the encoder into the [`CommandEncoderStatus::Locked`] state.
pub fn command_encoder_create_compute_pass<A: HalApi>(
&self,
parent_id: id::CommandEncoderId,
encoder_id: id::CommandEncoderId,
desc: &ComputePassDescriptor,
) -> ComputePass<A> {
ComputePass::new(parent_id, desc)
) -> (ComputePass<A>, Option<CommandEncoderError>) {
let hub = A::hub(self);
match CommandBuffer::lock_encoder(hub, encoder_id) {
Ok(cmd_buf) => (ComputePass::new(Some(cmd_buf), desc), None),
Err(err) => (ComputePass::new(None, desc), Some(err)),
}
}
/// Creates a type erased compute pass.
///
/// If creation fails, an invalid pass is returned.
/// Any operation on an invalid pass will return an error.
pub fn command_encoder_create_compute_pass_dyn<A: HalApi>(
&self,
parent_id: id::CommandEncoderId,
encoder_id: id::CommandEncoderId,
desc: &ComputePassDescriptor,
) -> Box<dyn super::DynComputePass> {
Box::new(ComputePass::<A>::new(parent_id, desc))
) -> (Box<dyn DynComputePass>, Option<CommandEncoderError>) {
let (pass, err) = self.command_encoder_create_compute_pass::<A>(encoder_id, desc);
(Box::new(pass), err)
}
pub fn command_encoder_run_compute_pass<A: HalApi>(
pub fn compute_pass_end<A: HalApi>(
&self,
pass: &ComputePass<A>,
pass: &mut ComputePass<A>,
) -> Result<(), ComputePassError> {
self.command_encoder_run_compute_pass_impl(
pass.parent_id,
pass.base.as_ref(),
pass.timestamp_writes.as_ref(),
)
let scope = PassErrorScope::Pass(pass.parent_id());
let Some(parent) = pass.parent.as_ref() else {
return Err(ComputePassErrorInner::InvalidParentEncoder).map_pass_err(scope);
};
parent.unlock_encoder().map_pass_err(scope)?;
let base = pass
.base
.take()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)?;
self.compute_pass_end_impl(parent, base, pass.timestamp_writes.as_ref())
}
#[doc(hidden)]
pub fn command_encoder_run_compute_pass_with_unresolved_commands<A: HalApi>(
pub fn compute_pass_end_with_unresolved_commands<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
base: BasePassRef<ComputeCommand>,
base: BasePass<ComputeCommand>,
timestamp_writes: Option<&ComputePassTimestampWrites>,
) -> Result<(), ComputePassError> {
let resolved_commands =
ComputeCommand::resolve_compute_command_ids(A::hub(self), base.commands)?;
let hub = A::hub(self);
self.command_encoder_run_compute_pass_impl::<A>(
encoder_id,
BasePassRef {
let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id)
.map_pass_err(PassErrorScope::PassEncoder(encoder_id))?;
let commands = ComputeCommand::resolve_compute_command_ids(A::hub(self), &base.commands)?;
self.compute_pass_end_impl::<A>(
&cmd_buf,
BasePass {
label: base.label,
commands: &resolved_commands,
commands,
dynamic_offsets: base.dynamic_offsets,
string_data: base.string_data,
push_constant_data: base.push_constant_data,
@ -313,19 +378,17 @@ impl Global {
)
}
fn command_encoder_run_compute_pass_impl<A: HalApi>(
fn compute_pass_end_impl<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
base: BasePassRef<ArcComputeCommand<A>>,
cmd_buf: &CommandBuffer<A>,
base: BasePass<ArcComputeCommand<A>>,
timestamp_writes: Option<&ComputePassTimestampWrites>,
) -> Result<(), ComputePassError> {
profiling::scope!("CommandEncoder::run_compute_pass");
let pass_scope = PassErrorScope::Pass(encoder_id);
let pass_scope = PassErrorScope::Pass(Some(cmd_buf.as_info().id()));
let hub = A::hub(self);
let cmd_buf: Arc<CommandBuffer<A>> =
CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(pass_scope)?;
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(ComputePassErrorInner::InvalidDevice(
@ -341,7 +404,7 @@ impl Global {
if let Some(ref mut list) = cmd_buf_data.commands {
list.push(crate::device::trace::Command::RunComputePass {
base: BasePass {
label: base.label.map(str::to_string),
label: base.label.clone(),
commands: base.commands.iter().map(Into::into).collect(),
dynamic_offsets: base.dynamic_offsets.to_vec(),
string_data: base.string_data.to_vec(),
@ -429,7 +492,7 @@ impl Global {
.flags
.contains(wgt::InstanceFlags::DISCARD_HAL_LABELS);
let hal_desc = hal::ComputePassDescriptor {
label: hal_label(base.label, self.instance.flags),
label: hal_label(base.label.as_deref(), self.instance.flags),
timestamp_writes,
};
@ -455,9 +518,9 @@ impl Global {
let scope = PassErrorScope::SetBindGroup(bind_group.as_info().id());
let max_bind_groups = cmd_buf.limits.max_bind_groups;
if index >= &max_bind_groups {
if index >= max_bind_groups {
return Err(ComputePassErrorInner::BindGroupIndexOutOfRange {
index: *index,
index,
max: max_bind_groups,
})
.map_pass_err(scope);
@ -470,9 +533,9 @@ impl Global {
);
dynamic_offset_count += num_dynamic_offsets;
let bind_group = tracker.bind_groups.insert_single(bind_group.clone());
let bind_group = tracker.bind_groups.insert_single(bind_group);
bind_group
.validate_dynamic_bindings(*index, &temp_offsets, &cmd_buf.limits)
.validate_dynamic_bindings(index, &temp_offsets, &cmd_buf.limits)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(
@ -494,7 +557,7 @@ impl Global {
let entries =
state
.binder
.assign_group(*index as usize, bind_group, &temp_offsets);
.assign_group(index as usize, bind_group, &temp_offsets);
if !entries.is_empty() && pipeline_layout.is_some() {
let pipeline_layout = pipeline_layout.as_ref().unwrap().raw();
for (i, e) in entries.iter().enumerate() {
@ -521,7 +584,7 @@ impl Global {
state.pipeline = Some(pipeline_id);
tracker.compute_pipelines.insert_single(pipeline.clone());
let pipeline = tracker.compute_pipelines.insert_single(pipeline);
unsafe {
raw.set_compute_pipeline(pipeline.raw());
@ -592,7 +655,7 @@ impl Global {
let values_end_offset =
(values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
let data_slice =
&base.push_constant_data[(*values_offset as usize)..values_end_offset];
&base.push_constant_data[(values_offset as usize)..values_end_offset];
let pipeline_layout = state
.binder
@ -607,7 +670,7 @@ impl Global {
pipeline_layout
.validate_push_constant_ranges(
wgt::ShaderStages::COMPUTE,
*offset,
offset,
end_offset_bytes,
)
.map_pass_err(scope)?;
@ -616,7 +679,7 @@ impl Global {
raw.set_push_constants(
pipeline_layout.raw(),
wgt::ShaderStages::COMPUTE,
*offset,
offset,
data_slice,
);
}
@ -640,7 +703,7 @@ impl Global {
{
return Err(ComputePassErrorInner::Dispatch(
DispatchError::InvalidGroupSize {
current: *groups,
current: groups,
limit: groups_size_limit,
},
))
@ -648,7 +711,7 @@ impl Global {
}
unsafe {
raw.dispatch(*groups);
raw.dispatch(groups);
}
}
ArcComputeCommand::DispatchIndirect { buffer, offset } => {
@ -675,7 +738,7 @@ impl Global {
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
if end_offset > buffer.size {
return Err(ComputePassErrorInner::IndirectBufferOverrun {
offset: *offset,
offset,
end_offset,
buffer_size: buffer.size,
})
@ -692,8 +755,8 @@ impl Global {
buffer_memory_init_actions.extend(
buffer.initialization_status.read().create_action(
buffer,
*offset..(*offset + stride),
&buffer,
offset..(offset + stride),
MemoryInitKind::NeedsInitializedMemory,
),
);
@ -707,7 +770,7 @@ impl Global {
)
.map_pass_err(scope)?;
unsafe {
raw.dispatch_indirect(buf_raw, *offset);
raw.dispatch_indirect(buf_raw, offset);
}
}
ArcComputeCommand::PushDebugGroup { color: _, len } => {
@ -756,10 +819,10 @@ impl Global {
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES)
.map_pass_err(scope)?;
let query_set = tracker.query_sets.insert_single(query_set.clone());
let query_set = tracker.query_sets.insert_single(query_set);
query_set
.validate_and_write_timestamp(raw, query_set_id, *query_index, None)
.validate_and_write_timestamp(raw, query_set_id, query_index, None)
.map_pass_err(scope)?;
}
ArcComputeCommand::BeginPipelineStatisticsQuery {
@ -769,13 +832,13 @@ impl Global {
let query_set_id = query_set.as_info().id();
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let query_set = tracker.query_sets.insert_single(query_set.clone());
let query_set = tracker.query_sets.insert_single(query_set);
query_set
.validate_and_begin_pipeline_statistics_query(
raw,
query_set_id,
*query_index,
query_index,
None,
&mut active_query,
)
@ -834,10 +897,17 @@ impl Global {
bind_group_id: id::BindGroupId,
offsets: &[DynamicOffset],
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::SetBindGroup(bind_group_id);
let base = pass
.base
.as_mut()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)?; // Can't use base_mut() utility here because of borrow checker.
let redundant = pass.current_bind_groups.set_and_check_redundant(
bind_group_id,
index,
&mut pass.base.dynamic_offsets,
&mut base.dynamic_offsets,
offsets,
);
@ -850,13 +920,11 @@ impl Global {
.bind_groups
.read()
.get(bind_group_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::SetBindGroup(bind_group_id),
inner: ComputePassErrorInner::InvalidBindGroup(index),
})?
.map_err(|_| ComputePassErrorInner::InvalidBindGroup(index))
.map_pass_err(scope)?
.clone();
pass.base.commands.push(ArcComputeCommand::SetBindGroup {
base.commands.push(ArcComputeCommand::SetBindGroup {
index,
num_dynamic_offsets: offsets.len(),
bind_group,
@ -870,7 +938,13 @@ impl Global {
pass: &mut ComputePass<A>,
pipeline_id: id::ComputePipelineId,
) -> Result<(), ComputePassError> {
if pass.current_pipeline.set_and_check_redundant(pipeline_id) {
let redundant = pass.current_pipeline.set_and_check_redundant(pipeline_id);
let scope = PassErrorScope::SetPipelineCompute(pipeline_id);
let base = pass.base_mut(scope)?;
if redundant {
// Do redundant early-out **after** checking whether the pass is ended or not.
return Ok(());
}
@ -879,15 +953,11 @@ impl Global {
.compute_pipelines
.read()
.get(pipeline_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::SetPipelineCompute(pipeline_id),
inner: ComputePassErrorInner::InvalidPipeline(pipeline_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?
.clone();
pass.base
.commands
.push(ArcComputeCommand::SetPipeline(pipeline));
base.commands.push(ArcComputeCommand::SetPipeline(pipeline));
Ok(())
}
@ -897,33 +967,36 @@ impl Global {
pass: &mut ComputePass<A>,
offset: u32,
data: &[u8],
) {
assert_eq!(
offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
"Push constant offset must be aligned to 4 bytes."
);
assert_eq!(
data.len() as u32 & (wgt::PUSH_CONSTANT_ALIGNMENT - 1),
0,
"Push constant size must be aligned to 4 bytes."
);
let value_offset = pass.base.push_constant_data.len().try_into().expect(
"Ran out of push constant space. Don't set 4gb of push constants per ComputePass.",
); // TODO: make this an error that can be handled
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::SetPushConstant;
let base = pass.base_mut(scope)?;
pass.base.push_constant_data.extend(
if offset & (wgt::PUSH_CONSTANT_ALIGNMENT - 1) != 0 {
return Err(ComputePassErrorInner::PushConstantOffsetAlignment).map_pass_err(scope);
}
if data.len() as u32 & (wgt::PUSH_CONSTANT_ALIGNMENT - 1) != 0 {
return Err(ComputePassErrorInner::PushConstantSizeAlignment).map_pass_err(scope);
}
let value_offset = base
.push_constant_data
.len()
.try_into()
.map_err(|_| ComputePassErrorInner::PushConstantOutOfMemory)
.map_pass_err(scope)?;
base.push_constant_data.extend(
data.chunks_exact(wgt::PUSH_CONSTANT_ALIGNMENT as usize)
.map(|arr| u32::from_ne_bytes([arr[0], arr[1], arr[2], arr[3]])),
);
pass.base
.commands
.push(ArcComputeCommand::<A>::SetPushConstant {
offset,
size_bytes: data.len() as u32,
values_offset: value_offset,
});
base.commands.push(ArcComputeCommand::<A>::SetPushConstant {
offset,
size_bytes: data.len() as u32,
values_offset: value_offset,
});
Ok(())
}
pub fn compute_pass_dispatch_workgroups<A: HalApi>(
@ -932,10 +1005,18 @@ impl Global {
groups_x: u32,
groups_y: u32,
groups_z: u32,
) {
pass.base.commands.push(ArcComputeCommand::<A>::Dispatch([
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::Dispatch {
indirect: false,
pipeline: pass.current_pipeline.last_state,
};
let base = pass.base_mut(scope)?;
base.commands.push(ArcComputeCommand::<A>::Dispatch([
groups_x, groups_y, groups_z,
]));
Ok(())
}
pub fn compute_pass_dispatch_workgroups_indirect<A: HalApi>(
@ -945,21 +1026,21 @@ impl Global {
offset: BufferAddress,
) -> Result<(), ComputePassError> {
let hub = A::hub(self);
let scope = PassErrorScope::Dispatch {
indirect: true,
pipeline: pass.current_pipeline.last_state,
};
let base = pass.base_mut(scope)?;
let buffer = hub
.buffers
.read()
.get(buffer_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::Dispatch {
indirect: true,
pipeline: pass.current_pipeline.last_state,
},
inner: ComputePassErrorInner::InvalidBuffer(buffer_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidBuffer(buffer_id))
.map_pass_err(scope)?
.clone();
pass.base
.commands
base.commands
.push(ArcComputeCommand::<A>::DispatchIndirect { buffer, offset });
Ok(())
@ -970,22 +1051,29 @@ impl Global {
pass: &mut ComputePass<A>,
label: &str,
color: u32,
) {
let bytes = label.as_bytes();
pass.base.string_data.extend_from_slice(bytes);
) -> Result<(), ComputePassError> {
let base = pass.base_mut(PassErrorScope::PushDebugGroup)?;
pass.base
.commands
.push(ArcComputeCommand::<A>::PushDebugGroup {
color,
len: bytes.len(),
});
let bytes = label.as_bytes();
base.string_data.extend_from_slice(bytes);
base.commands.push(ArcComputeCommand::<A>::PushDebugGroup {
color,
len: bytes.len(),
});
Ok(())
}
pub fn compute_pass_pop_debug_group<A: HalApi>(&self, pass: &mut ComputePass<A>) {
pass.base
.commands
.push(ArcComputeCommand::<A>::PopDebugGroup);
pub fn compute_pass_pop_debug_group<A: HalApi>(
&self,
pass: &mut ComputePass<A>,
) -> Result<(), ComputePassError> {
let base = pass.base_mut(PassErrorScope::PopDebugGroup)?;
base.commands.push(ArcComputeCommand::<A>::PopDebugGroup);
Ok(())
}
pub fn compute_pass_insert_debug_marker<A: HalApi>(
@ -993,16 +1081,19 @@ impl Global {
pass: &mut ComputePass<A>,
label: &str,
color: u32,
) {
let bytes = label.as_bytes();
pass.base.string_data.extend_from_slice(bytes);
) -> Result<(), ComputePassError> {
let base = pass.base_mut(PassErrorScope::InsertDebugMarker)?;
pass.base
.commands
let bytes = label.as_bytes();
base.string_data.extend_from_slice(bytes);
base.commands
.push(ArcComputeCommand::<A>::InsertDebugMarker {
color,
len: bytes.len(),
});
Ok(())
}
pub fn compute_pass_write_timestamp<A: HalApi>(
@ -1011,18 +1102,19 @@ impl Global {
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::WriteTimestamp;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
let query_set = hub
.query_sets
.read()
.get(query_set_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::WriteTimestamp,
inner: ComputePassErrorInner::InvalidQuerySet(query_set_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?
.clone();
pass.base.commands.push(ArcComputeCommand::WriteTimestamp {
base.commands.push(ArcComputeCommand::WriteTimestamp {
query_set,
query_index,
});
@ -1036,19 +1128,19 @@ impl Global {
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
let query_set = hub
.query_sets
.read()
.get(query_set_id)
.map_err(|_| ComputePassError {
scope: PassErrorScope::WriteTimestamp,
inner: ComputePassErrorInner::InvalidQuerySet(query_set_id),
})?
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?
.clone();
pass.base
.commands
base.commands
.push(ArcComputeCommand::BeginPipelineStatisticsQuery {
query_set,
query_index,
@ -1057,9 +1149,15 @@ impl Global {
Ok(())
}
pub fn compute_pass_end_pipeline_statistics_query<A: HalApi>(&self, pass: &mut ComputePass<A>) {
pass.base
.commands
pub fn compute_pass_end_pipeline_statistics_query<A: HalApi>(
&self,
pass: &mut ComputePass<A>,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::EndPipelineStatisticsQuery;
let base = pass.base_mut(scope)?;
base.commands
.push(ArcComputeCommand::<A>::EndPipelineStatisticsQuery);
Ok(())
}
}

Просмотреть файл

@ -9,7 +9,6 @@ use super::{ComputePass, ComputePassError};
// Practically speaking this allows us merge gfx_select with type erasure:
// The alternative would be to introduce ComputePassId which then first needs to be looked up and then dispatch via gfx_select.
pub trait DynComputePass: std::fmt::Debug + WasmNotSendSync {
fn run(&mut self, context: &global::Global) -> Result<(), ComputePassError>;
fn set_bind_group(
&mut self,
context: &global::Global,
@ -22,23 +21,38 @@ pub trait DynComputePass: std::fmt::Debug + WasmNotSendSync {
context: &global::Global,
pipeline_id: id::ComputePipelineId,
) -> Result<(), ComputePassError>;
fn set_push_constant(&mut self, context: &global::Global, offset: u32, data: &[u8]);
fn set_push_constant(
&mut self,
context: &global::Global,
offset: u32,
data: &[u8],
) -> Result<(), ComputePassError>;
fn dispatch_workgroups(
&mut self,
context: &global::Global,
groups_x: u32,
groups_y: u32,
groups_z: u32,
);
) -> Result<(), ComputePassError>;
fn dispatch_workgroups_indirect(
&mut self,
context: &global::Global,
buffer_id: id::BufferId,
offset: wgt::BufferAddress,
) -> Result<(), ComputePassError>;
fn push_debug_group(&mut self, context: &global::Global, label: &str, color: u32);
fn pop_debug_group(&mut self, context: &global::Global);
fn insert_debug_marker(&mut self, context: &global::Global, label: &str, color: u32);
fn push_debug_group(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError>;
fn pop_debug_group(&mut self, context: &global::Global) -> Result<(), ComputePassError>;
fn insert_debug_marker(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError>;
fn write_timestamp(
&mut self,
context: &global::Global,
@ -51,14 +65,16 @@ pub trait DynComputePass: std::fmt::Debug + WasmNotSendSync {
query_set_id: id::QuerySetId,
query_index: u32,
) -> Result<(), ComputePassError>;
fn end_pipeline_statistics_query(&mut self, context: &global::Global);
fn end_pipeline_statistics_query(
&mut self,
context: &global::Global,
) -> Result<(), ComputePassError>;
fn end(&mut self, context: &global::Global) -> Result<(), ComputePassError>;
fn label(&self) -> Option<&str>;
}
impl<A: HalApi> DynComputePass for ComputePass<A> {
fn run(&mut self, context: &global::Global) -> Result<(), ComputePassError> {
context.command_encoder_run_compute_pass(self)
}
fn set_bind_group(
&mut self,
context: &global::Global,
@ -77,7 +93,12 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
context.compute_pass_set_pipeline(self, pipeline_id)
}
fn set_push_constant(&mut self, context: &global::Global, offset: u32, data: &[u8]) {
fn set_push_constant(
&mut self,
context: &global::Global,
offset: u32,
data: &[u8],
) -> Result<(), ComputePassError> {
context.compute_pass_set_push_constant(self, offset, data)
}
@ -87,7 +108,7 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
groups_x: u32,
groups_y: u32,
groups_z: u32,
) {
) -> Result<(), ComputePassError> {
context.compute_pass_dispatch_workgroups(self, groups_x, groups_y, groups_z)
}
@ -100,15 +121,25 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
context.compute_pass_dispatch_workgroups_indirect(self, buffer_id, offset)
}
fn push_debug_group(&mut self, context: &global::Global, label: &str, color: u32) {
fn push_debug_group(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError> {
context.compute_pass_push_debug_group(self, label, color)
}
fn pop_debug_group(&mut self, context: &global::Global) {
fn pop_debug_group(&mut self, context: &global::Global) -> Result<(), ComputePassError> {
context.compute_pass_pop_debug_group(self)
}
fn insert_debug_marker(&mut self, context: &global::Global, label: &str, color: u32) {
fn insert_debug_marker(
&mut self,
context: &global::Global,
label: &str,
color: u32,
) -> Result<(), ComputePassError> {
context.compute_pass_insert_debug_marker(self, label, color)
}
@ -130,7 +161,18 @@ impl<A: HalApi> DynComputePass for ComputePass<A> {
context.compute_pass_begin_pipeline_statistics_query(self, query_set_id, query_index)
}
fn end_pipeline_statistics_query(&mut self, context: &global::Global) {
fn end_pipeline_statistics_query(
&mut self,
context: &global::Global,
) -> Result<(), ComputePassError> {
context.compute_pass_end_pipeline_statistics_query(self)
}
fn end(&mut self, context: &global::Global) -> Result<(), ComputePassError> {
context.compute_pass_end(self)
}
fn label(&self) -> Option<&str> {
self.label()
}
}

114
third_party/rust/wgpu-core/src/command/mod.rs поставляемый
Просмотреть файл

@ -25,7 +25,6 @@ use self::memory_init::CommandBufferTextureMemoryActions;
use crate::device::{Device, DeviceError};
use crate::error::{ErrorFormatter, PrettyError};
use crate::hub::Hub;
use crate::id::CommandBufferId;
use crate::lock::{rank, Mutex};
use crate::snatch::SnatchGuard;
@ -48,13 +47,26 @@ pub(crate) enum CommandEncoderStatus {
/// Ready to record commands. An encoder's initial state.
///
/// Command building methods like [`command_encoder_clear_buffer`] and
/// [`command_encoder_run_compute_pass`] require the encoder to be in this
/// [`compute_pass_end`] require the encoder to be in this
/// state.
///
/// This corresponds to WebGPU's "open" state.
/// See <https://www.w3.org/TR/webgpu/#encoder-state-open>
///
/// [`command_encoder_clear_buffer`]: Global::command_encoder_clear_buffer
/// [`command_encoder_run_compute_pass`]: Global::command_encoder_run_compute_pass
/// [`compute_pass_end`]: Global::compute_pass_end
Recording,
/// Locked by a render or compute pass.
///
/// This state is entered when a render/compute pass is created,
/// and exited when the pass is ended.
///
/// As long as the command encoder is locked, any command building operation on it will fail
/// and put the encoder into the [`CommandEncoderStatus::Error`] state.
/// See <https://www.w3.org/TR/webgpu/#encoder-state-locked>
Locked,
/// Command recording is complete, and the buffer is ready for submission.
///
/// [`Global::command_encoder_finish`] transitions a
@ -410,6 +422,38 @@ impl<A: HalApi> CommandBuffer<A> {
}
impl<A: HalApi> CommandBuffer<A> {
fn get_encoder_impl(
hub: &Hub<A>,
id: id::CommandEncoderId,
lock_on_acquire: bool,
) -> Result<Arc<Self>, CommandEncoderError> {
let storage = hub.command_buffers.read();
match storage.get(id.into_command_buffer_id()) {
Ok(cmd_buf) => {
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
match cmd_buf_data.status {
CommandEncoderStatus::Recording => {
if lock_on_acquire {
cmd_buf_data.status = CommandEncoderStatus::Locked;
}
Ok(cmd_buf.clone())
}
CommandEncoderStatus::Locked => {
// Any operation on a locked encoder is required to put it into the invalid/error state.
// See https://www.w3.org/TR/webgpu/#encoder-state-locked
cmd_buf_data.encoder.discard();
cmd_buf_data.status = CommandEncoderStatus::Error;
Err(CommandEncoderError::Locked)
}
CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording),
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
}
}
Err(_) => Err(CommandEncoderError::Invalid),
}
}
/// Return the [`CommandBuffer`] for `id`, for recording new commands.
///
/// In `wgpu_core`, the [`CommandBuffer`] type serves both as encoder and
@ -420,14 +464,37 @@ impl<A: HalApi> CommandBuffer<A> {
hub: &Hub<A>,
id: id::CommandEncoderId,
) -> Result<Arc<Self>, CommandEncoderError> {
let storage = hub.command_buffers.read();
match storage.get(id.into_command_buffer_id()) {
Ok(cmd_buf) => match cmd_buf.data.lock().as_ref().unwrap().status {
CommandEncoderStatus::Recording => Ok(cmd_buf.clone()),
CommandEncoderStatus::Finished => Err(CommandEncoderError::NotRecording),
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
},
Err(_) => Err(CommandEncoderError::Invalid),
let lock_on_acquire = false;
Self::get_encoder_impl(hub, id, lock_on_acquire)
}
/// Return the [`CommandBuffer`] for `id` and if successful puts it into the [`CommandEncoderStatus::Locked`] state.
///
/// See [`CommandBuffer::get_encoder`].
/// Call [`CommandBuffer::unlock_encoder`] to put the [`CommandBuffer`] back into the [`CommandEncoderStatus::Recording`] state.
fn lock_encoder(
hub: &Hub<A>,
id: id::CommandEncoderId,
) -> Result<Arc<Self>, CommandEncoderError> {
let lock_on_acquire = true;
Self::get_encoder_impl(hub, id, lock_on_acquire)
}
/// Unlocks the [`CommandBuffer`] for `id` and puts it back into the [`CommandEncoderStatus::Recording`] state.
///
/// This function is the counterpart to [`CommandBuffer::lock_encoder`].
/// It is only valid to call this function if the encoder is in the [`CommandEncoderStatus::Locked`] state.
fn unlock_encoder(&self) -> Result<(), CommandEncoderError> {
let mut data_lock = self.data.lock();
let status = &mut data_lock.as_mut().unwrap().status;
match *status {
CommandEncoderStatus::Recording => Err(CommandEncoderError::Invalid),
CommandEncoderStatus::Locked => {
*status = CommandEncoderStatus::Recording;
Ok(())
}
CommandEncoderStatus::Finished => Err(CommandEncoderError::Invalid),
CommandEncoderStatus::Error => Err(CommandEncoderError::Invalid),
}
}
@ -564,6 +631,8 @@ pub enum CommandEncoderError {
NotRecording,
#[error(transparent)]
Device(#[from] DeviceError),
#[error("Command encoder is locked by a previously created render/compute pass. Before recording any new commands, the pass must be ended.")]
Locked,
}
impl Global {
@ -571,7 +640,7 @@ impl Global {
&self,
encoder_id: id::CommandEncoderId,
_desc: &wgt::CommandBufferDescriptor<Label>,
) -> (CommandBufferId, Option<CommandEncoderError>) {
) -> (id::CommandBufferId, Option<CommandEncoderError>) {
profiling::scope!("CommandEncoder::finish");
let hub = A::hub(self);
@ -592,6 +661,11 @@ impl Global {
None
}
}
CommandEncoderStatus::Locked => {
cmd_buf_data.encoder.discard();
cmd_buf_data.status = CommandEncoderStatus::Error;
Some(CommandEncoderError::Locked)
}
CommandEncoderStatus::Finished => Some(CommandEncoderError::NotRecording),
CommandEncoderStatus::Error => {
cmd_buf_data.encoder.discard();
@ -805,7 +879,12 @@ pub enum PassErrorScope {
#[error("In a bundle parameter")]
Bundle,
#[error("In a pass parameter")]
Pass(id::CommandEncoderId),
// TODO: To be removed in favor of `Pass`.
// ComputePass is already operating on command buffer instead,
// same should apply to RenderPass in the future.
PassEncoder(id::CommandEncoderId),
#[error("In a pass parameter")]
Pass(Option<id::CommandBufferId>),
#[error("In a set_bind_group command")]
SetBindGroup(id::BindGroupId),
#[error("In a set_pipeline command")]
@ -847,17 +926,24 @@ pub enum PassErrorScope {
indirect: bool,
pipeline: Option<id::ComputePipelineId>,
},
#[error("In a push_debug_group command")]
PushDebugGroup,
#[error("In a pop_debug_group command")]
PopDebugGroup,
#[error("In a insert_debug_marker command")]
InsertDebugMarker,
}
impl PrettyError for PassErrorScope {
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
// This error is not in the error chain, only notes are needed
match *self {
Self::Pass(id) => {
Self::PassEncoder(id) => {
fmt.command_buffer_label(&id.into_command_buffer_id());
}
Self::Pass(Some(id)) => {
fmt.command_buffer_label(&id);
}
Self::SetBindGroup(id) => {
fmt.bind_group_label(&id);
}

Просмотреть файл

@ -247,10 +247,16 @@ impl RenderPass {
}
}
#[inline]
pub fn parent_id(&self) -> id::CommandEncoderId {
self.parent_id
}
#[inline]
pub fn label(&self) -> Option<&str> {
self.base.label.as_deref()
}
#[cfg(feature = "trace")]
pub fn into_command(self) -> crate::device::trace::Command {
crate::device::trace::Command::RunRenderPass {
@ -1303,13 +1309,9 @@ impl<'a, 'd, A: HalApi> RenderPassInfo<'a, 'd, A> {
// Common routines between render/compute
impl Global {
pub fn command_encoder_run_render_pass<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
pass: &RenderPass,
) -> Result<(), RenderPassError> {
self.command_encoder_run_render_pass_impl::<A>(
encoder_id,
pub fn render_pass_end<A: HalApi>(&self, pass: &RenderPass) -> Result<(), RenderPassError> {
self.render_pass_end_impl::<A>(
pass.parent_id(),
pass.base.as_ref(),
&pass.color_targets,
pass.depth_stencil_target.as_ref(),
@ -1319,7 +1321,7 @@ impl Global {
}
#[doc(hidden)]
pub fn command_encoder_run_render_pass_impl<A: HalApi>(
pub fn render_pass_end_impl<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
base: BasePassRef<RenderCommand>,
@ -1339,7 +1341,7 @@ impl Global {
.contains(wgt::InstanceFlags::DISCARD_HAL_LABELS);
let label = hal_label(base.label, self.instance.flags);
let pass_scope = PassErrorScope::Pass(encoder_id);
let pass_scope = PassErrorScope::PassEncoder(encoder_id);
let hub = A::hub(self);

89
third_party/rust/wgpu-core/src/device/mod.rs поставляемый
Просмотреть файл

@ -444,3 +444,92 @@ impl ImplicitPipelineIds<'_> {
}
}
}
/// Create a validator with the given validation flags.
pub fn create_validator(
features: wgt::Features,
downlevel: wgt::DownlevelFlags,
flags: naga::valid::ValidationFlags,
) -> naga::valid::Validator {
use naga::valid::Capabilities as Caps;
let mut caps = Caps::empty();
caps.set(
Caps::PUSH_CONSTANT,
features.contains(wgt::Features::PUSH_CONSTANTS),
);
caps.set(Caps::FLOAT64, features.contains(wgt::Features::SHADER_F64));
caps.set(
Caps::PRIMITIVE_INDEX,
features.contains(wgt::Features::SHADER_PRIMITIVE_INDEX),
);
caps.set(
Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
features
.contains(wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING),
);
caps.set(
Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
features
.contains(wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING),
);
// TODO: This needs a proper wgpu feature
caps.set(
Caps::SAMPLER_NON_UNIFORM_INDEXING,
features
.contains(wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING),
);
caps.set(
Caps::STORAGE_TEXTURE_16BIT_NORM_FORMATS,
features.contains(wgt::Features::TEXTURE_FORMAT_16BIT_NORM),
);
caps.set(Caps::MULTIVIEW, features.contains(wgt::Features::MULTIVIEW));
caps.set(
Caps::EARLY_DEPTH_TEST,
features.contains(wgt::Features::SHADER_EARLY_DEPTH_TEST),
);
caps.set(
Caps::SHADER_INT64,
features.contains(wgt::Features::SHADER_INT64),
);
caps.set(
Caps::MULTISAMPLED_SHADING,
downlevel.contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING),
);
caps.set(
Caps::DUAL_SOURCE_BLENDING,
features.contains(wgt::Features::DUAL_SOURCE_BLENDING),
);
caps.set(
Caps::CUBE_ARRAY_TEXTURES,
downlevel.contains(wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES),
);
caps.set(
Caps::SUBGROUP,
features.intersects(wgt::Features::SUBGROUP | wgt::Features::SUBGROUP_VERTEX),
);
caps.set(
Caps::SUBGROUP_BARRIER,
features.intersects(wgt::Features::SUBGROUP_BARRIER),
);
let mut subgroup_stages = naga::valid::ShaderStages::empty();
subgroup_stages.set(
naga::valid::ShaderStages::COMPUTE | naga::valid::ShaderStages::FRAGMENT,
features.contains(wgt::Features::SUBGROUP),
);
subgroup_stages.set(
naga::valid::ShaderStages::VERTEX,
features.contains(wgt::Features::SUBGROUP_VERTEX),
);
let subgroup_operations = if caps.contains(Caps::SUBGROUP) {
use naga::valid::SubgroupOperationSet as S;
S::BASIC | S::VOTE | S::ARITHMETIC | S::BALLOT | S::SHUFFLE | S::SHUFFLE_RELATIVE
} else {
naga::valid::SubgroupOperationSet::empty()
};
let mut validator = naga::valid::Validator::new(flags, caps);
validator.subgroup_stages(subgroup_stages);
validator.subgroup_operations(subgroup_operations);
validator
}

Просмотреть файл

@ -1499,7 +1499,7 @@ impl Global {
.raw
.as_ref()
.unwrap()
.submit(&refs, &submit_surface_textures, Some((fence, submit_index)))
.submit(&refs, &submit_surface_textures, (fence, submit_index))
.map_err(DeviceError::from)?;
}

Просмотреть файл

@ -4,7 +4,7 @@ use crate::{
binding_model::{self, BindGroup, BindGroupLayout, BindGroupLayoutEntryError},
command, conv,
device::{
bgl,
bgl, create_validator,
life::{LifetimeTracker, WaitIdleError},
queue::PendingWrites,
AttachmentData, DeviceLostInvocation, MissingDownlevelFlags, MissingFeatures,
@ -20,7 +20,7 @@ use crate::{
},
instance::Adapter,
lock::{rank, Mutex, MutexGuard, RwLock},
pipeline::{self},
pipeline,
pool::ResourcePool,
registry::Registry,
resource::{
@ -1485,16 +1485,19 @@ impl<A: HalApi> Device<A> {
None
};
let info = self
.create_validator(naga::valid::ValidationFlags::all())
.validate(&module)
.map_err(|inner| {
pipeline::CreateShaderModuleError::Validation(naga::error::ShaderError {
source,
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
let info = create_validator(
self.features,
self.downlevel.flags,
naga::valid::ValidationFlags::all(),
)
.validate(&module)
.map_err(|inner| {
pipeline::CreateShaderModuleError::Validation(naga::error::ShaderError {
source,
label: desc.label.as_ref().map(|l| l.to_string()),
inner: Box::new(inner),
})
})?;
let interface =
validation::Interface::new(&module, &info, self.limits.clone(), self.features);
@ -1536,111 +1539,6 @@ impl<A: HalApi> Device<A> {
})
}
/// Create a validator with the given validation flags.
pub fn create_validator(
self: &Arc<Self>,
flags: naga::valid::ValidationFlags,
) -> naga::valid::Validator {
use naga::valid::Capabilities as Caps;
let mut caps = Caps::empty();
caps.set(
Caps::PUSH_CONSTANT,
self.features.contains(wgt::Features::PUSH_CONSTANTS),
);
caps.set(
Caps::FLOAT64,
self.features.contains(wgt::Features::SHADER_F64),
);
caps.set(
Caps::PRIMITIVE_INDEX,
self.features
.contains(wgt::Features::SHADER_PRIMITIVE_INDEX),
);
caps.set(
Caps::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
),
);
caps.set(
Caps::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING,
),
);
// TODO: This needs a proper wgpu feature
caps.set(
Caps::SAMPLER_NON_UNIFORM_INDEXING,
self.features.contains(
wgt::Features::SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING,
),
);
caps.set(
Caps::STORAGE_TEXTURE_16BIT_NORM_FORMATS,
self.features
.contains(wgt::Features::TEXTURE_FORMAT_16BIT_NORM),
);
caps.set(
Caps::MULTIVIEW,
self.features.contains(wgt::Features::MULTIVIEW),
);
caps.set(
Caps::EARLY_DEPTH_TEST,
self.features
.contains(wgt::Features::SHADER_EARLY_DEPTH_TEST),
);
caps.set(
Caps::SHADER_INT64,
self.features.contains(wgt::Features::SHADER_INT64),
);
caps.set(
Caps::MULTISAMPLED_SHADING,
self.downlevel
.flags
.contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING),
);
caps.set(
Caps::DUAL_SOURCE_BLENDING,
self.features.contains(wgt::Features::DUAL_SOURCE_BLENDING),
);
caps.set(
Caps::CUBE_ARRAY_TEXTURES,
self.downlevel
.flags
.contains(wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES),
);
caps.set(
Caps::SUBGROUP,
self.features
.intersects(wgt::Features::SUBGROUP | wgt::Features::SUBGROUP_VERTEX),
);
caps.set(
Caps::SUBGROUP_BARRIER,
self.features.intersects(wgt::Features::SUBGROUP_BARRIER),
);
let mut subgroup_stages = naga::valid::ShaderStages::empty();
subgroup_stages.set(
naga::valid::ShaderStages::COMPUTE | naga::valid::ShaderStages::FRAGMENT,
self.features.contains(wgt::Features::SUBGROUP),
);
subgroup_stages.set(
naga::valid::ShaderStages::VERTEX,
self.features.contains(wgt::Features::SUBGROUP_VERTEX),
);
let subgroup_operations = if caps.contains(Caps::SUBGROUP) {
use naga::valid::SubgroupOperationSet as S;
S::BASIC | S::VOTE | S::ARITHMETIC | S::BALLOT | S::SHUFFLE | S::SHUFFLE_RELATIVE
} else {
naga::valid::SubgroupOperationSet::empty()
};
let mut validator = naga::valid::Validator::new(flags, caps);
validator.subgroup_stages(subgroup_stages);
validator.subgroup_operations(subgroup_operations);
validator
}
#[allow(unused_unsafe)]
pub(crate) unsafe fn create_shader_module_spirv<'a>(
self: &Arc<Self>,
@ -2839,6 +2737,7 @@ impl<A: HalApi> Device<A> {
entry_point: final_entry_point_name.as_ref(),
constants: desc.stage.constants.as_ref(),
zero_initialize_workgroup_memory: desc.stage.zero_initialize_workgroup_memory,
vertex_pulling_transform: false,
},
cache: cache.as_ref().and_then(|it| it.raw.as_ref()),
};
@ -3267,6 +3166,7 @@ impl<A: HalApi> Device<A> {
entry_point: &vertex_entry_point_name,
constants: stage_desc.constants.as_ref(),
zero_initialize_workgroup_memory: stage_desc.zero_initialize_workgroup_memory,
vertex_pulling_transform: stage_desc.vertex_pulling_transform,
}
};
@ -3330,6 +3230,7 @@ impl<A: HalApi> Device<A> {
zero_initialize_workgroup_memory: fragment_state
.stage
.zero_initialize_workgroup_memory,
vertex_pulling_transform: false,
})
}
None => None,

Просмотреть файл

@ -61,10 +61,10 @@ impl TextureInitTracker {
&self,
action: &TextureInitTrackerAction<A>,
) -> Option<TextureInitTrackerAction<A>> {
let mut mip_range_start = std::usize::MAX;
let mut mip_range_end = std::usize::MIN;
let mut layer_range_start = std::u32::MAX;
let mut layer_range_end = std::u32::MIN;
let mut mip_range_start = usize::MAX;
let mut mip_range_end = usize::MIN;
let mut layer_range_start = u32::MAX;
let mut layer_range_end = u32::MIN;
for (i, mip_tracker) in self
.mips

2
third_party/rust/wgpu-core/src/pipeline.rs поставляемый
Просмотреть файл

@ -166,6 +166,8 @@ pub struct ProgrammableStageDescriptor<'a> {
/// This is required by the WebGPU spec, but may have overhead which can be avoided
/// for cross-platform applications
pub zero_initialize_workgroup_memory: bool,
/// Should the pipeline attempt to transform vertex shaders to use vertex pulling.
pub vertex_pulling_transform: bool,
}
/// Number of implicit bind groups derived at pipeline creation.

15
third_party/rust/wgpu-core/src/present.rs поставляемый
Просмотреть файл

@ -154,17 +154,20 @@ impl Global {
parent_id: surface_id,
});
}
#[cfg(not(feature = "trace"))]
let _ = device;
let fence_guard = device.fence.read();
let fence = fence_guard.as_ref().unwrap();
let suf = A::surface_as_hal(surface.as_ref());
let (texture_id, status) = match unsafe {
suf.unwrap()
.acquire_texture(Some(std::time::Duration::from_millis(
FRAME_TIMEOUT_MS as u64,
)))
suf.unwrap().acquire_texture(
Some(std::time::Duration::from_millis(FRAME_TIMEOUT_MS as u64)),
fence,
)
} {
Ok(Some(ast)) => {
drop(fence_guard);
let texture_desc = wgt::TextureDescriptor {
label: (),
size: wgt::Extent3d {

12
third_party/rust/wgpu-core/src/registry.rs поставляемый
Просмотреть файл

@ -176,8 +176,14 @@ impl<T: Resource> Registry<T> {
let guard = self.storage.read();
let type_name = guard.kind();
match guard.get(id) {
Ok(res) => {
// Using `get` over `try_get` is fine for the most part.
// However, there's corner cases where it can happen that a resource still holds an Arc
// to another resource that was already dropped explicitly from the registry.
// That resource is now in an invalid state, likely causing an error that lead
// us here, trying to print its label but failing because the id is now vacant.
match guard.try_get(id) {
Ok(Some(res)) => {
let label = res.label();
if label.is_empty() {
format!("<{}-{:?}>", type_name, id.unzip())
@ -185,7 +191,7 @@ impl<T: Resource> Registry<T> {
label.to_owned()
}
}
Err(_) => format!(
_ => format!(
"<Invalid-{} label={}>",
type_name,
guard.label_for_invalid_id(id)

22
third_party/rust/wgpu-core/src/resource.rs поставляемый
Просмотреть файл

@ -932,6 +932,28 @@ impl<A: HalApi> Texture<A> {
}
impl Global {
/// # Safety
///
/// - The raw buffer handle must not be manually destroyed
pub unsafe fn buffer_as_hal<A: HalApi, F: FnOnce(Option<&A::Buffer>) -> R, R>(
&self,
id: BufferId,
hal_buffer_callback: F,
) -> R {
profiling::scope!("Buffer::as_hal");
let hub = A::hub(self);
let buffer_opt = { hub.buffers.try_get(id).ok().flatten() };
let buffer = buffer_opt.as_ref().unwrap();
let hal_buffer = {
let snatch_guard = buffer.device.snatchable_lock.read();
buffer.raw(&snatch_guard)
};
hal_buffer_callback(hal_buffer)
}
/// # Safety
///
/// - The raw texture handle must not be manually destroyed

Просмотреть файл

@ -915,15 +915,6 @@ impl Interface {
class,
},
naga::TypeInner::Sampler { comparison } => ResourceType::Sampler { comparison },
naga::TypeInner::Array { stride, size, .. } => {
let size = match size {
naga::ArraySize::Constant(size) => size.get() * stride,
naga::ArraySize::Dynamic => stride,
};
ResourceType::Buffer {
size: wgt::BufferSize::new(size as u64).unwrap(),
}
}
ref other => ResourceType::Buffer {
size: wgt::BufferSize::new(other.size(module.to_ctx()) as u64).unwrap(),
},

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

4
third_party/rust/wgpu-hal/Cargo.toml поставляемый
Просмотреть файл

@ -96,7 +96,6 @@ features = ["android-native-activity"]
cfg_aliases = "0.1"
[features]
default = ["link"]
device_lost_panic = []
dx12 = [
"naga/hlsl-out",
@ -124,7 +123,6 @@ gles = [
"winapi/libloaderapi",
]
internal_error_panic = []
link = ["metal/link"]
metal = [
"naga/msl-out",
"dep:block",
@ -245,7 +243,7 @@ features = [
"public-winapi",
]
optional = true
default_features = false
default-features = false
[target."cfg(windows)".dependencies.hassle-rs]
version = "0.11"

Просмотреть файл

@ -22,7 +22,6 @@ const MAX_BUNNIES: usize = 1 << 20;
const BUNNY_SIZE: f32 = 0.15 * 256.0;
const GRAVITY: f32 = -9.8 * 100.0;
const MAX_VELOCITY: f32 = 750.0;
const COMMAND_BUFFER_PER_CONTEXT: usize = 100;
const DESIRED_MAX_LATENCY: u32 = 2;
#[repr(C)]
@ -254,6 +253,7 @@ impl<A: hal::Api> Example<A> {
entry_point: "vs_main",
constants: &constants,
zero_initialize_workgroup_memory: true,
vertex_pulling_transform: false,
},
vertex_buffers: &[],
fragment_stage: Some(hal::ProgrammableStage {
@ -261,6 +261,7 @@ impl<A: hal::Api> Example<A> {
entry_point: "fs_main",
constants: &constants,
zero_initialize_workgroup_memory: true,
vertex_pulling_transform: false,
}),
primitive: wgt::PrimitiveState {
topology: wgt::PrimitiveTopology::TriangleStrip,
@ -496,7 +497,7 @@ impl<A: hal::Api> Example<A> {
let mut fence = device.create_fence().unwrap();
let init_cmd = cmd_encoder.end_encoding().unwrap();
queue
.submit(&[&init_cmd], &[], Some((&mut fence, init_fence_value)))
.submit(&[&init_cmd], &[], (&mut fence, init_fence_value))
.unwrap();
device.wait(&fence, init_fence_value, !0).unwrap();
device.destroy_buffer(staging_buffer);
@ -548,7 +549,7 @@ impl<A: hal::Api> Example<A> {
{
let ctx = &mut self.contexts[self.context_index];
self.queue
.submit(&[], &[], Some((&mut ctx.fence, ctx.fence_value)))
.submit(&[], &[], (&mut ctx.fence, ctx.fence_value))
.unwrap();
}
@ -648,7 +649,13 @@ impl<A: hal::Api> Example<A> {
let ctx = &mut self.contexts[self.context_index];
let surface_tex = unsafe { self.surface.acquire_texture(None).unwrap().unwrap().texture };
let surface_tex = unsafe {
self.surface
.acquire_texture(None, &ctx.fence)
.unwrap()
.unwrap()
.texture
};
let target_barrier0 = hal::TextureBarrier {
texture: surface_tex.borrow(),
@ -716,7 +723,6 @@ impl<A: hal::Api> Example<A> {
}
ctx.frames_recorded += 1;
let do_fence = ctx.frames_recorded > COMMAND_BUFFER_PER_CONTEXT;
let target_barrier1 = hal::TextureBarrier {
texture: surface_tex.borrow(),
@ -730,45 +736,42 @@ impl<A: hal::Api> Example<A> {
unsafe {
let cmd_buf = ctx.encoder.end_encoding().unwrap();
let fence_param = if do_fence {
Some((&mut ctx.fence, ctx.fence_value))
} else {
None
};
self.queue
.submit(&[&cmd_buf], &[&surface_tex], fence_param)
.submit(
&[&cmd_buf],
&[&surface_tex],
(&mut ctx.fence, ctx.fence_value),
)
.unwrap();
self.queue.present(&self.surface, surface_tex).unwrap();
ctx.used_cmd_bufs.push(cmd_buf);
ctx.used_views.push(surface_tex_view);
};
if do_fence {
log::debug!("Context switch from {}", self.context_index);
let old_fence_value = ctx.fence_value;
if self.contexts.len() == 1 {
let hal_desc = hal::CommandEncoderDescriptor {
label: None,
queue: &self.queue,
};
self.contexts.push(unsafe {
ExecutionContext {
encoder: self.device.create_command_encoder(&hal_desc).unwrap(),
fence: self.device.create_fence().unwrap(),
fence_value: 0,
used_views: Vec::new(),
used_cmd_bufs: Vec::new(),
frames_recorded: 0,
}
});
}
self.context_index = (self.context_index + 1) % self.contexts.len();
let next = &mut self.contexts[self.context_index];
unsafe {
next.wait_and_clear(&self.device);
}
next.fence_value = old_fence_value + 1;
log::debug!("Context switch from {}", self.context_index);
let old_fence_value = ctx.fence_value;
if self.contexts.len() == 1 {
let hal_desc = hal::CommandEncoderDescriptor {
label: None,
queue: &self.queue,
};
self.contexts.push(unsafe {
ExecutionContext {
encoder: self.device.create_command_encoder(&hal_desc).unwrap(),
fence: self.device.create_fence().unwrap(),
fence_value: 0,
used_views: Vec::new(),
used_cmd_bufs: Vec::new(),
frames_recorded: 0,
}
});
}
self.context_index = (self.context_index + 1) % self.contexts.len();
let next = &mut self.contexts[self.context_index];
unsafe {
next.wait_and_clear(&self.device);
}
next.fence_value = old_fence_value + 1;
}
}

Просмотреть файл

@ -156,6 +156,7 @@ fn fill_screen(exposed: &hal::ExposedAdapter<hal::api::Gles>, width: u32, height
})
.unwrap()
};
let mut fence = unsafe { od.device.create_fence().unwrap() };
let rp_desc = hal::RenderPassDescriptor {
label: None,
extent: wgt::Extent3d {
@ -183,6 +184,6 @@ fn fill_screen(exposed: &hal::ExposedAdapter<hal::api::Gles>, width: u32, height
encoder.begin_render_pass(&rp_desc);
encoder.end_render_pass();
let cmd_buf = encoder.end_encoding().unwrap();
od.queue.submit(&[&cmd_buf], &[], None).unwrap();
od.queue.submit(&[&cmd_buf], &[], (&mut fence, 0)).unwrap();
}
}

Просмотреть файл

@ -13,7 +13,6 @@ use std::{
};
use winit::window::WindowButtons;
const COMMAND_BUFFER_PER_CONTEXT: usize = 100;
const DESIRED_MAX_LATENCY: u32 = 2;
/// [D3D12_RAYTRACING_INSTANCE_DESC](https://microsoft.github.io/DirectX-Specs/d3d/Raytracing.html#d3d12_raytracing_instance_desc)
@ -373,6 +372,7 @@ impl<A: hal::Api> Example<A> {
entry_point: "main",
constants: &Default::default(),
zero_initialize_workgroup_memory: true,
vertex_pulling_transform: false,
},
cache: None,
})
@ -758,7 +758,7 @@ impl<A: hal::Api> Example<A> {
let mut fence = device.create_fence().unwrap();
let init_cmd = cmd_encoder.end_encoding().unwrap();
queue
.submit(&[&init_cmd], &[], Some((&mut fence, init_fence_value)))
.submit(&[&init_cmd], &[], (&mut fence, init_fence_value))
.unwrap();
device.wait(&fence, init_fence_value, !0).unwrap();
cmd_encoder.reset_all(iter::once(init_cmd));
@ -807,7 +807,13 @@ impl<A: hal::Api> Example<A> {
fn render(&mut self) {
let ctx = &mut self.contexts[self.context_index];
let surface_tex = unsafe { self.surface.acquire_texture(None).unwrap().unwrap().texture };
let surface_tex = unsafe {
self.surface
.acquire_texture(None, &ctx.fence)
.unwrap()
.unwrap()
.texture
};
let target_barrier0 = hal::TextureBarrier {
texture: surface_tex.borrow(),
@ -908,7 +914,6 @@ impl<A: hal::Api> Example<A> {
}
ctx.frames_recorded += 1;
let do_fence = ctx.frames_recorded > COMMAND_BUFFER_PER_CONTEXT;
let target_barrier1 = hal::TextureBarrier {
texture: surface_tex.borrow(),
@ -958,45 +963,42 @@ impl<A: hal::Api> Example<A> {
unsafe {
let cmd_buf = ctx.encoder.end_encoding().unwrap();
let fence_param = if do_fence {
Some((&mut ctx.fence, ctx.fence_value))
} else {
None
};
self.queue
.submit(&[&cmd_buf], &[&surface_tex], fence_param)
.submit(
&[&cmd_buf],
&[&surface_tex],
(&mut ctx.fence, ctx.fence_value),
)
.unwrap();
self.queue.present(&self.surface, surface_tex).unwrap();
ctx.used_cmd_bufs.push(cmd_buf);
ctx.used_views.push(surface_tex_view);
};
if do_fence {
log::info!("Context switch from {}", self.context_index);
let old_fence_value = ctx.fence_value;
if self.contexts.len() == 1 {
let hal_desc = hal::CommandEncoderDescriptor {
label: None,
queue: &self.queue,
};
self.contexts.push(unsafe {
ExecutionContext {
encoder: self.device.create_command_encoder(&hal_desc).unwrap(),
fence: self.device.create_fence().unwrap(),
fence_value: 0,
used_views: Vec::new(),
used_cmd_bufs: Vec::new(),
frames_recorded: 0,
}
});
}
self.context_index = (self.context_index + 1) % self.contexts.len();
let next = &mut self.contexts[self.context_index];
unsafe {
next.wait_and_clear(&self.device);
}
next.fence_value = old_fence_value + 1;
log::info!("Context switch from {}", self.context_index);
let old_fence_value = ctx.fence_value;
if self.contexts.len() == 1 {
let hal_desc = hal::CommandEncoderDescriptor {
label: None,
queue: &self.queue,
};
self.contexts.push(unsafe {
ExecutionContext {
encoder: self.device.create_command_encoder(&hal_desc).unwrap(),
fence: self.device.create_fence().unwrap(),
fence_value: 0,
used_views: Vec::new(),
used_cmd_bufs: Vec::new(),
frames_recorded: 0,
}
});
}
self.context_index = (self.context_index + 1) % self.contexts.len();
let next = &mut self.contexts[self.context_index];
unsafe {
next.wait_and_clear(&self.device);
}
next.fence_value = old_fence_value + 1;
}
fn exit(mut self) {
@ -1004,7 +1006,7 @@ impl<A: hal::Api> Example<A> {
{
let ctx = &mut self.contexts[self.context_index];
self.queue
.submit(&[], &[], Some((&mut ctx.fence, ctx.fence_value)))
.submit(&[], &[], (&mut ctx.fence, ctx.fence_value))
.unwrap();
}

11
third_party/rust/wgpu-hal/src/dx12/mod.rs поставляемый
Просмотреть файл

@ -857,6 +857,7 @@ impl crate::Surface for Surface {
unsafe fn acquire_texture(
&self,
timeout: Option<std::time::Duration>,
_fence: &Fence,
) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
let mut swapchain = self.swap_chain.write();
let sc = swapchain.as_mut().unwrap();
@ -895,7 +896,7 @@ impl crate::Queue for Queue {
&self,
command_buffers: &[&CommandBuffer],
_surface_textures: &[&Texture],
signal_fence: Option<(&mut Fence, crate::FenceValue)>,
(signal_fence, signal_value): (&mut Fence, crate::FenceValue),
) -> Result<(), crate::DeviceError> {
let mut temp_lists = self.temp_lists.lock();
temp_lists.clear();
@ -908,11 +909,9 @@ impl crate::Queue for Queue {
self.raw.execute_command_lists(&temp_lists);
}
if let Some((fence, value)) = signal_fence {
self.raw
.signal(&fence.raw, value)
.into_device_result("Signal fence")?;
}
self.raw
.signal(&signal_fence.raw, signal_value)
.into_device_result("Signal fence")?;
// Note the lack of synchronization here between the main Direct queue
// and the dedicated presentation queue. This is automatically handled

3
third_party/rust/wgpu-hal/src/empty.rs поставляемый
Просмотреть файл

@ -75,6 +75,7 @@ impl crate::Surface for Context {
unsafe fn acquire_texture(
&self,
timeout: Option<std::time::Duration>,
fence: &Resource,
) -> Result<Option<crate::AcquiredSurfaceTexture<Api>>, crate::SurfaceError> {
Ok(None)
}
@ -114,7 +115,7 @@ impl crate::Queue for Context {
&self,
command_buffers: &[&Resource],
surface_textures: &[&Resource],
signal_fence: Option<(&mut Resource, crate::FenceValue)>,
signal_fence: (&mut Resource, crate::FenceValue),
) -> DeviceResult<()> {
Ok(())
}

26
third_party/rust/wgpu-hal/src/gles/adapter.rs поставляемый
Просмотреть файл

@ -179,33 +179,13 @@ impl super::Adapter {
0
};
let driver;
let driver_info;
if version.starts_with("WebGL ") || version.starts_with("OpenGL ") {
let es_sig = " ES";
match version.find(es_sig) {
Some(pos) => {
driver = version[..pos + es_sig.len()].to_owned();
driver_info = version[pos + es_sig.len() + 1..].to_owned();
}
None => {
let pos = version.find(' ').unwrap();
driver = version[..pos].to_owned();
driver_info = version[pos + 1..].to_owned();
}
}
} else {
driver = "OpenGL".to_owned();
driver_info = version;
}
wgt::AdapterInfo {
name: renderer_orig,
vendor: vendor_id,
device: 0,
device_type: inferred_device_type,
driver,
driver_info,
driver: "".to_owned(),
driver_info: version,
backend: wgt::Backend::Gl,
}
}
@ -797,7 +777,7 @@ impl super::Adapter {
},
max_compute_workgroups_per_dimension,
max_buffer_size: i32::MAX as u64,
max_non_sampler_bindings: std::u32::MAX,
max_non_sampler_bindings: u32::MAX,
};
let mut workarounds = super::Workarounds::empty();

15
third_party/rust/wgpu-hal/src/gles/egl.rs поставляемый
Просмотреть файл

@ -1226,17 +1226,15 @@ impl crate::Surface for Surface {
let native_window_ptr = match (self.wsi.kind, self.raw_window_handle) {
(WindowKind::Unknown | WindowKind::X11, Rwh::Xlib(handle)) => {
temp_xlib_handle = handle.window;
&mut temp_xlib_handle as *mut _ as *mut std::ffi::c_void
}
(WindowKind::AngleX11, Rwh::Xlib(handle)) => {
handle.window as *mut std::ffi::c_void
&mut temp_xlib_handle as *mut _ as *mut ffi::c_void
}
(WindowKind::AngleX11, Rwh::Xlib(handle)) => handle.window as *mut ffi::c_void,
(WindowKind::Unknown | WindowKind::X11, Rwh::Xcb(handle)) => {
temp_xcb_handle = handle.window;
&mut temp_xcb_handle as *mut _ as *mut std::ffi::c_void
&mut temp_xcb_handle as *mut _ as *mut ffi::c_void
}
(WindowKind::AngleX11, Rwh::Xcb(handle)) => {
handle.window.get() as *mut std::ffi::c_void
handle.window.get() as *mut ffi::c_void
}
(WindowKind::Unknown, Rwh::AndroidNdk(handle)) => {
handle.a_native_window.as_ptr()
@ -1252,9 +1250,9 @@ impl crate::Surface for Surface {
window
}
#[cfg(Emscripten)]
(WindowKind::Unknown, Rwh::Web(handle)) => handle.id as *mut std::ffi::c_void,
(WindowKind::Unknown, Rwh::Web(handle)) => handle.id as *mut ffi::c_void,
(WindowKind::Unknown, Rwh::Win32(handle)) => {
handle.hwnd.get() as *mut std::ffi::c_void
handle.hwnd.get() as *mut ffi::c_void
}
(WindowKind::Unknown, Rwh::AppKit(handle)) => {
#[cfg(not(target_os = "macos"))]
@ -1434,6 +1432,7 @@ impl crate::Surface for Surface {
unsafe fn acquire_texture(
&self,
_timeout_ms: Option<Duration>, //TODO
_fence: &super::Fence,
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let swapchain = self.swapchain.read();
let sc = swapchain.as_ref().unwrap();

12
third_party/rust/wgpu-hal/src/gles/queue.rs поставляемый
Просмотреть файл

@ -1740,7 +1740,7 @@ impl crate::Queue for super::Queue {
&self,
command_buffers: &[&super::CommandBuffer],
_surface_textures: &[&super::Texture],
signal_fence: Option<(&mut super::Fence, crate::FenceValue)>,
(signal_fence, signal_value): (&mut super::Fence, crate::FenceValue),
) -> Result<(), crate::DeviceError> {
let shared = Arc::clone(&self.shared);
let gl = &shared.context.lock();
@ -1774,12 +1774,10 @@ impl crate::Queue for super::Queue {
}
}
if let Some((fence, value)) = signal_fence {
fence.maintain(gl);
let sync = unsafe { gl.fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) }
.map_err(|_| crate::DeviceError::OutOfMemory)?;
fence.pending.push((value, sync));
}
signal_fence.maintain(gl);
let sync = unsafe { gl.fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) }
.map_err(|_| crate::DeviceError::OutOfMemory)?;
signal_fence.pending.push((signal_value, sync));
Ok(())
}

1
third_party/rust/wgpu-hal/src/gles/web.rs поставляемый
Просмотреть файл

@ -427,6 +427,7 @@ impl crate::Surface for Surface {
unsafe fn acquire_texture(
&self,
_timeout_ms: Option<std::time::Duration>, //TODO
_fence: &super::Fence,
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let swapchain = self.swapchain.read();
let sc = swapchain.as_ref().unwrap();

1
third_party/rust/wgpu-hal/src/gles/wgl.rs поставляемый
Просмотреть файл

@ -798,6 +798,7 @@ impl crate::Surface for Surface {
unsafe fn acquire_texture(
&self,
_timeout_ms: Option<Duration>,
_fence: &super::Fence,
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let swapchain = self.swapchain.read();
let sc = swapchain.as_ref().unwrap();

211
third_party/rust/wgpu-hal/src/lib.rs поставляемый
Просмотреть файл

@ -459,44 +459,101 @@ pub trait Instance: Sized + WasmNotSendSync {
pub trait Surface: WasmNotSendSync {
type A: Api;
/// Configures the surface to use the given device.
/// Configure `self` to use `device`.
///
/// # Safety
///
/// - All gpu work that uses the surface must have been completed.
/// - All GPU work using `self` must have been completed.
/// - All [`AcquiredSurfaceTexture`]s must have been destroyed.
/// - All [`Api::TextureView`]s derived from the [`AcquiredSurfaceTexture`]s must have been destroyed.
/// - All surfaces created using other devices must have been unconfigured before this call.
/// - The surface `self` must not currently be configured to use any other [`Device`].
unsafe fn configure(
&self,
device: &<Self::A as Api>::Device,
config: &SurfaceConfiguration,
) -> Result<(), SurfaceError>;
/// Unconfigures the surface on the given device.
/// Unconfigure `self` on `device`.
///
/// # Safety
///
/// - All gpu work that uses the surface must have been completed.
/// - All GPU work that uses `surface` must have been completed.
/// - All [`AcquiredSurfaceTexture`]s must have been destroyed.
/// - All [`Api::TextureView`]s derived from the [`AcquiredSurfaceTexture`]s must have been destroyed.
/// - The surface must have been configured on the given device.
/// - The surface `self` must have been configured on `device`.
unsafe fn unconfigure(&self, device: &<Self::A as Api>::Device);
/// Returns the next texture to be presented by the swapchain for drawing
/// Return the next texture to be presented by `self`, for the caller to draw on.
///
/// A `timeout` of `None` means to wait indefinitely, with no timeout.
/// On success, return an [`AcquiredSurfaceTexture`] representing the
/// texture into which the caller should draw the image to be displayed on
/// `self`.
///
/// If `timeout` elapses before `self` has a texture ready to be acquired,
/// return `Ok(None)`. If `timeout` is `None`, wait indefinitely, with no
/// timeout.
///
/// # Using an [`AcquiredSurfaceTexture`]
///
/// On success, this function returns an [`AcquiredSurfaceTexture`] whose
/// [`texture`] field is a [`SurfaceTexture`] from which the caller can
/// [`borrow`] a [`Texture`] to draw on. The [`AcquiredSurfaceTexture`] also
/// carries some metadata about that [`SurfaceTexture`].
///
/// All calls to [`Queue::submit`] that draw on that [`Texture`] must also
/// include the [`SurfaceTexture`] in the `surface_textures` argument.
///
/// When you are done drawing on the texture, you can display it on `self`
/// by passing the [`SurfaceTexture`] and `self` to [`Queue::present`].
///
/// If you do not wish to display the texture, you must pass the
/// [`SurfaceTexture`] to [`self.discard_texture`], so that it can be reused
/// by future acquisitions.
///
/// # Portability
///
/// Some backends can't support a timeout when acquiring a texture and
/// the timeout will be ignored.
/// Some backends can't support a timeout when acquiring a texture. On these
/// backends, `timeout` is ignored.
///
/// Returns `None` on timing out.
/// # Safety
///
/// - The surface `self` must currently be configured on some [`Device`].
///
/// - The `fence` argument must be the same [`Fence`] passed to all calls to
/// [`Queue::submit`] that used [`Texture`]s acquired from this surface.
///
/// - You may only have one texture acquired from `self` at a time. When
/// `acquire_texture` returns `Ok(Some(ast))`, you must pass the returned
/// [`SurfaceTexture`] `ast.texture` to either [`Queue::present`] or
/// [`Surface::discard_texture`] before calling `acquire_texture` again.
///
/// [`texture`]: AcquiredSurfaceTexture::texture
/// [`SurfaceTexture`]: Api::SurfaceTexture
/// [`borrow`]: std::borrow::Borrow::borrow
/// [`Texture`]: Api::Texture
/// [`Fence`]: Api::Fence
/// [`self.discard_texture`]: Surface::discard_texture
unsafe fn acquire_texture(
&self,
timeout: Option<std::time::Duration>,
fence: &<Self::A as Api>::Fence,
) -> Result<Option<AcquiredSurfaceTexture<Self::A>>, SurfaceError>;
/// Relinquish an acquired texture without presenting it.
///
/// After this call, the texture underlying [`SurfaceTexture`] may be
/// returned by subsequent calls to [`self.acquire_texture`].
///
/// # Safety
///
/// - The surface `self` must currently be configured on some [`Device`].
///
/// - `texture` must be a [`SurfaceTexture`] returned by a call to
/// [`self.acquire_texture`] that has not yet been passed to
/// [`Queue::present`].
///
/// [`SurfaceTexture`]: Api::SurfaceTexture
/// [`self.acquire_texture`]: Surface::acquire_texture
unsafe fn discard_texture(&self, texture: <Self::A as Api>::SurfaceTexture);
}
@ -529,6 +586,70 @@ pub trait Adapter: WasmNotSendSync {
unsafe fn get_presentation_timestamp(&self) -> wgt::PresentationTimestamp;
}
/// A connection to a GPU and a pool of resources to use with it.
///
/// A `wgpu-hal` `Device` represents an open connection to a specific graphics
/// processor, controlled via the backend [`Device::A`]. A `Device` is mostly
/// used for creating resources. Each `Device` has an associated [`Queue`] used
/// for command submission.
///
/// On Vulkan a `Device` corresponds to a logical device ([`VkDevice`]). Other
/// backends don't have an exact analog: for example, [`ID3D12Device`]s and
/// [`MTLDevice`]s are owned by the backends' [`wgpu_hal::Adapter`]
/// implementations, and shared by all [`wgpu_hal::Device`]s created from that
/// `Adapter`.
///
/// A `Device`'s life cycle is generally:
///
/// 1) Obtain a `Device` and its associated [`Queue`] by calling
/// [`Adapter::open`].
///
/// Alternatively, the backend-specific types that implement [`Adapter`] often
/// have methods for creating a `wgpu-hal` `Device` from a platform-specific
/// handle. For example, [`vulkan::Adapter::device_from_raw`] can create a
/// [`vulkan::Device`] from an [`ash::Device`].
///
/// 1) Create resources to use on the device by calling methods like
/// [`Device::create_texture`] or [`Device::create_shader_module`].
///
/// 1) Call [`Device::create_command_encoder`] to obtain a [`CommandEncoder`],
/// which you can use to build [`CommandBuffer`]s holding commands to be
/// executed on the GPU.
///
/// 1) Call [`Queue::submit`] on the `Device`'s associated [`Queue`] to submit
/// [`CommandBuffer`]s for execution on the GPU. If needed, call
/// [`Device::wait`] to wait for them to finish execution.
///
/// 1) Free resources with methods like [`Device::destroy_texture`] or
/// [`Device::destroy_shader_module`].
///
/// 1) Shut down the device by calling [`Device::exit`].
///
/// [`vkDevice`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#VkDevice
/// [`ID3D12Device`]: https://learn.microsoft.com/en-us/windows/win32/api/d3d12/nn-d3d12-id3d12device
/// [`MTLDevice`]: https://developer.apple.com/documentation/metal/mtldevice
/// [`wgpu_hal::Adapter`]: Adapter
/// [`wgpu_hal::Device`]: Device
/// [`vulkan::Adapter::device_from_raw`]: vulkan/struct.Adapter.html#method.device_from_raw
/// [`vulkan::Device`]: vulkan/struct.Device.html
/// [`ash::Device`]: https://docs.rs/ash/latest/ash/struct.Device.html
/// [`CommandBuffer`]: Api::CommandBuffer
///
/// # Safety
///
/// As with other `wgpu-hal` APIs, [validation] is the caller's
/// responsibility. Here are the general requirements for all `Device`
/// methods:
///
/// - Any resource passed to a `Device` method must have been created by that
/// `Device`. For example, a [`Texture`] passed to [`Device::destroy_texture`] must
/// have been created with the `Device` passed as `self`.
///
/// - Resources may not be destroyed if they are used by any submitted command
/// buffers that have not yet finished execution.
///
/// [validation]: index.html#validation-is-the-calling-codes-responsibility-not-wgpu-hals
/// [`Texture`]: Api::Texture
pub trait Device: WasmNotSendSync {
type A: Api;
@ -698,19 +819,23 @@ pub trait Queue: WasmNotSendSync {
/// Submit `command_buffers` for execution on GPU.
///
/// If `signal_fence` is `Some(fence, value)`, update `fence` to `value`
/// when the operation is complete. See [`Fence`] for details.
/// Update `fence` to `value` when the operation is complete. See
/// [`Fence`] for details.
///
/// If two calls to `submit` on a single `Queue` occur in a particular order
/// (that is, they happen on the same thread, or on two threads that have
/// synchronized to establish an ordering), then the first submission's
/// commands all complete execution before any of the second submission's
/// commands begin. All results produced by one submission are visible to
/// the next.
/// A `wgpu_hal` queue is "single threaded": all command buffers are
/// executed in the order they're submitted, with each buffer able to see
/// previous buffers' results. Specifically:
///
/// Within a submission, command buffers execute in the order in which they
/// appear in `command_buffers`. All results produced by one buffer are
/// visible to the next.
/// - If two calls to `submit` on a single `Queue` occur in a particular
/// order (that is, they happen on the same thread, or on two threads that
/// have synchronized to establish an ordering), then the first
/// submission's commands all complete execution before any of the second
/// submission's commands begin. All results produced by one submission
/// are visible to the next.
///
/// - Within a submission, command buffers execute in the order in which they
/// appear in `command_buffers`. All results produced by one buffer are
/// visible to the next.
///
/// If two calls to `submit` on a single `Queue` from different threads are
/// not synchronized to occur in a particular order, they must pass distinct
@ -721,28 +846,47 @@ pub trait Queue: WasmNotSendSync {
/// themselves are unordered. If each thread uses a separate [`Fence`], this
/// problem does not arise.
///
/// Valid usage:
/// # Safety
///
/// - All of the [`CommandBuffer`][cb]s were created from
/// [`CommandEncoder`][ce]s that are associated with this queue.
/// - Each [`CommandBuffer`][cb] in `command_buffers` must have been created
/// from a [`CommandEncoder`][ce] that was constructed from the
/// [`Device`][d] associated with this [`Queue`].
///
/// - All of those [`CommandBuffer`][cb]s must remain alive until
/// the submitted commands have finished execution. (Since
/// command buffers must not outlive their encoders, this
/// implies that the encoders must remain alive as well.)
/// - Each [`CommandBuffer`][cb] must remain alive until the submitted
/// commands have finished execution. Since command buffers must not
/// outlive their encoders, this implies that the encoders must remain
/// alive as well.
///
/// - All of the [`SurfaceTexture`][st]s that the command buffers
/// write to appear in the `surface_textures` argument.
/// - All resources used by a submitted [`CommandBuffer`][cb]
/// ([`Texture`][t]s, [`BindGroup`][bg]s, [`RenderPipeline`][rp]s, and so
/// on) must remain alive until the command buffer finishes execution.
///
/// - Every [`SurfaceTexture`][st] that any command in `command_buffers`
/// writes to must appear in the `surface_textures` argument.
///
/// - No [`SurfaceTexture`][st] may appear in the `surface_textures`
/// argument more than once.
///
/// - Each [`SurfaceTexture`][st] in `surface_textures` must be configured
/// for use with the [`Device`][d] associated with this [`Queue`],
/// typically by calling [`Surface::configure`].
///
/// - All calls to this function that include a given [`SurfaceTexture`][st]
/// in `surface_textures` must use the same [`Fence`].
///
/// [`Fence`]: Api::Fence
/// [cb]: Api::CommandBuffer
/// [ce]: Api::CommandEncoder
/// [d]: Api::Device
/// [t]: Api::Texture
/// [bg]: Api::BindGroup
/// [rp]: Api::RenderPipeline
/// [st]: Api::SurfaceTexture
unsafe fn submit(
&self,
command_buffers: &[&<Self::A as Api>::CommandBuffer],
surface_textures: &[&<Self::A as Api>::SurfaceTexture],
signal_fence: Option<(&mut <Self::A as Api>::Fence, FenceValue)>,
signal_fence: (&mut <Self::A as Api>::Fence, FenceValue),
) -> Result<(), DeviceError>;
unsafe fn present(
&self,
@ -1637,6 +1781,8 @@ pub struct ProgrammableStage<'a, A: Api> {
/// This is required by the WebGPU spec, but may have overhead which can be avoided
/// for cross-platform applications
pub zero_initialize_workgroup_memory: bool,
/// Should the pipeline attempt to transform vertex shaders to use vertex pulling.
pub vertex_pulling_transform: bool,
}
// Rust gets confused about the impl requirements for `A`
@ -1647,6 +1793,7 @@ impl<A: Api> Clone for ProgrammableStage<'_, A> {
entry_point: self.entry_point,
constants: self.constants,
zero_initialize_workgroup_memory: self.zero_initialize_workgroup_memory,
vertex_pulling_transform: self.vertex_pulling_transform,
}
}
}

Просмотреть файл

@ -82,11 +82,9 @@ impl crate::Adapter for super::Adapter {
// https://developer.apple.com/documentation/metal/mtlreadwritetexturetier/mtlreadwritetexturetier1?language=objc
// https://developer.apple.com/documentation/metal/mtlreadwritetexturetier/mtlreadwritetexturetier2?language=objc
let (read_write_tier1_if, read_write_tier2_if) = match pc.read_write_texture_tier {
metal::MTLReadWriteTextureTier::TierNone => (Tfc::empty(), Tfc::empty()),
metal::MTLReadWriteTextureTier::Tier1 => (Tfc::STORAGE_READ_WRITE, Tfc::empty()),
metal::MTLReadWriteTextureTier::Tier2 => {
(Tfc::STORAGE_READ_WRITE, Tfc::STORAGE_READ_WRITE)
}
MTLReadWriteTextureTier::TierNone => (Tfc::empty(), Tfc::empty()),
MTLReadWriteTextureTier::Tier1 => (Tfc::STORAGE_READ_WRITE, Tfc::empty()),
MTLReadWriteTextureTier::Tier2 => (Tfc::STORAGE_READ_WRITE, Tfc::STORAGE_READ_WRITE),
};
let msaa_count = pc.sample_count_mask;
@ -738,7 +736,9 @@ impl super::PrivateCapabilities {
4
},
// Per https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
max_color_attachment_bytes_per_sample: if device.supports_family(MTLGPUFamily::Apple4) {
max_color_attachment_bytes_per_sample: if family_check
&& device.supports_family(MTLGPUFamily::Apple4)
{
64
} else {
32
@ -981,7 +981,7 @@ impl super::PrivateCapabilities {
max_compute_workgroup_size_z: self.max_threads_per_group,
max_compute_workgroups_per_dimension: 0xFFFF,
max_buffer_size: self.max_buffer_size,
max_non_sampler_bindings: std::u32::MAX,
max_non_sampler_bindings: u32::MAX,
},
alignments: crate::Alignments {
buffer_copy_offset: wgt::BufferSize::new(self.buffer_alignment).unwrap(),

Просмотреть файл

@ -16,6 +16,7 @@ impl Default for super::CommandState {
raw_wg_size: metal::MTLSize::new(0, 0, 0),
stage_infos: Default::default(),
storage_buffer_length_map: Default::default(),
vertex_buffer_size_map: Default::default(),
work_group_memory_sizes: Vec::new(),
push_constants: Vec::new(),
pending_timer_queries: Vec::new(),
@ -137,6 +138,7 @@ impl super::CommandEncoder {
impl super::CommandState {
fn reset(&mut self) {
self.storage_buffer_length_map.clear();
self.vertex_buffer_size_map.clear();
self.stage_infos.vs.clear();
self.stage_infos.fs.clear();
self.stage_infos.cs.clear();
@ -160,6 +162,15 @@ impl super::CommandState {
.unwrap_or_default()
}));
// Extend with the sizes of the mapped vertex buffers, in the order
// they were added to the map.
result_sizes.extend(stage_info.vertex_buffer_mappings.iter().map(|vbm| {
self.vertex_buffer_size_map
.get(&(vbm.id as u64))
.map(|size| u32::try_from(size.get()).unwrap_or(u32::MAX))
.unwrap_or_default()
}));
if !result_sizes.is_empty() {
Some((slot as _, result_sizes))
} else {
@ -927,6 +938,27 @@ impl crate::CommandEncoder for super::CommandEncoder {
let buffer_index = self.shared.private_caps.max_vertex_buffers as u64 - 1 - index as u64;
let encoder = self.state.render.as_ref().unwrap();
encoder.set_vertex_buffer(buffer_index, Some(&binding.buffer.raw), binding.offset);
let buffer_size = binding.resolve_size();
if buffer_size > 0 {
self.state.vertex_buffer_size_map.insert(
buffer_index,
std::num::NonZeroU64::new(buffer_size).unwrap(),
);
} else {
self.state.vertex_buffer_size_map.remove(&buffer_index);
}
if let Some((index, sizes)) = self
.state
.make_sizes_buffer_update(naga::ShaderStage::Vertex, &mut self.temp.binding_sizes)
{
encoder.set_vertex_bytes(
index as _,
(sizes.len() * WORD_SIZE) as u64,
sizes.as_ptr() as _,
);
}
}
unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth_range: Range<f32>) {

87
third_party/rust/wgpu-hal/src/metal/device.rs поставляемый
Просмотреть файл

@ -59,10 +59,48 @@ fn create_depth_stencil_desc(state: &wgt::DepthStencilState) -> metal::DepthSten
desc
}
const fn convert_vertex_format_to_naga(format: wgt::VertexFormat) -> naga::back::msl::VertexFormat {
match format {
wgt::VertexFormat::Uint8x2 => naga::back::msl::VertexFormat::Uint8x2,
wgt::VertexFormat::Uint8x4 => naga::back::msl::VertexFormat::Uint8x4,
wgt::VertexFormat::Sint8x2 => naga::back::msl::VertexFormat::Sint8x2,
wgt::VertexFormat::Sint8x4 => naga::back::msl::VertexFormat::Sint8x4,
wgt::VertexFormat::Unorm8x2 => naga::back::msl::VertexFormat::Unorm8x2,
wgt::VertexFormat::Unorm8x4 => naga::back::msl::VertexFormat::Unorm8x4,
wgt::VertexFormat::Snorm8x2 => naga::back::msl::VertexFormat::Snorm8x2,
wgt::VertexFormat::Snorm8x4 => naga::back::msl::VertexFormat::Snorm8x4,
wgt::VertexFormat::Uint16x2 => naga::back::msl::VertexFormat::Uint16x2,
wgt::VertexFormat::Uint16x4 => naga::back::msl::VertexFormat::Uint16x4,
wgt::VertexFormat::Sint16x2 => naga::back::msl::VertexFormat::Sint16x2,
wgt::VertexFormat::Sint16x4 => naga::back::msl::VertexFormat::Sint16x4,
wgt::VertexFormat::Unorm16x2 => naga::back::msl::VertexFormat::Unorm16x2,
wgt::VertexFormat::Unorm16x4 => naga::back::msl::VertexFormat::Unorm16x4,
wgt::VertexFormat::Snorm16x2 => naga::back::msl::VertexFormat::Snorm16x2,
wgt::VertexFormat::Snorm16x4 => naga::back::msl::VertexFormat::Snorm16x4,
wgt::VertexFormat::Float16x2 => naga::back::msl::VertexFormat::Float16x2,
wgt::VertexFormat::Float16x4 => naga::back::msl::VertexFormat::Float16x4,
wgt::VertexFormat::Float32 => naga::back::msl::VertexFormat::Float32,
wgt::VertexFormat::Float32x2 => naga::back::msl::VertexFormat::Float32x2,
wgt::VertexFormat::Float32x3 => naga::back::msl::VertexFormat::Float32x3,
wgt::VertexFormat::Float32x4 => naga::back::msl::VertexFormat::Float32x4,
wgt::VertexFormat::Uint32 => naga::back::msl::VertexFormat::Uint32,
wgt::VertexFormat::Uint32x2 => naga::back::msl::VertexFormat::Uint32x2,
wgt::VertexFormat::Uint32x3 => naga::back::msl::VertexFormat::Uint32x3,
wgt::VertexFormat::Uint32x4 => naga::back::msl::VertexFormat::Uint32x4,
wgt::VertexFormat::Sint32 => naga::back::msl::VertexFormat::Sint32,
wgt::VertexFormat::Sint32x2 => naga::back::msl::VertexFormat::Sint32x2,
wgt::VertexFormat::Sint32x3 => naga::back::msl::VertexFormat::Sint32x3,
wgt::VertexFormat::Sint32x4 => naga::back::msl::VertexFormat::Sint32x4,
wgt::VertexFormat::Unorm10_10_10_2 => naga::back::msl::VertexFormat::Unorm10_10_10_2,
_ => unimplemented!(),
}
}
impl super::Device {
fn load_shader(
&self,
stage: &crate::ProgrammableStage<super::Api>,
vertex_buffer_mappings: &[naga::back::msl::VertexBufferMapping],
layout: &super::PipelineLayout,
primitive_class: metal::MTLPrimitiveTopologyClass,
naga_stage: naga::ShaderStage,
@ -120,6 +158,8 @@ impl super::Device {
metal::MTLPrimitiveTopologyClass::Point => true,
_ => false,
},
vertex_pulling_transform: stage.vertex_pulling_transform,
vertex_buffer_mappings: vertex_buffer_mappings.to_vec(),
};
let (source, info) =
@ -548,7 +588,7 @@ impl crate::Device for super::Device {
pc_buffer: Option<super::ResourceIndex>,
pc_limit: u32,
sizes_buffer: Option<super::ResourceIndex>,
sizes_count: u8,
need_sizes_buffer: bool,
resources: naga::back::msl::BindingMap,
}
@ -558,7 +598,7 @@ impl crate::Device for super::Device {
pc_buffer: None,
pc_limit: 0,
sizes_buffer: None,
sizes_count: 0,
need_sizes_buffer: false,
resources: Default::default(),
});
let mut bind_group_infos = arrayvec::ArrayVec::new();
@ -603,7 +643,7 @@ impl crate::Device for super::Device {
{
for info in stage_data.iter_mut() {
if entry.visibility.contains(map_naga_stage(info.stage)) {
info.sizes_count += 1;
info.need_sizes_buffer = true;
}
}
}
@ -661,11 +701,13 @@ impl crate::Device for super::Device {
// Finally, make sure we fit the limits
for info in stage_data.iter_mut() {
// handle the sizes buffer assignment and shader overrides
if info.sizes_count != 0 {
if info.need_sizes_buffer || info.stage == naga::ShaderStage::Vertex {
// Set aside space for the sizes_buffer, which is required
// for variable-length buffers, or to support vertex pulling.
info.sizes_buffer = Some(info.counters.buffers);
info.counters.buffers += 1;
}
if info.counters.buffers > self.shared.private_caps.max_buffers_per_stage
|| info.counters.textures > self.shared.private_caps.max_textures_per_stage
|| info.counters.samplers > self.shared.private_caps.max_samplers_per_stage
@ -832,8 +874,38 @@ impl crate::Device for super::Device {
// Vertex shader
let (vs_lib, vs_info) = {
let mut vertex_buffer_mappings = Vec::<naga::back::msl::VertexBufferMapping>::new();
for (i, vbl) in desc.vertex_buffers.iter().enumerate() {
let mut attributes = Vec::<naga::back::msl::AttributeMapping>::new();
for attribute in vbl.attributes.iter() {
attributes.push(naga::back::msl::AttributeMapping {
shader_location: attribute.shader_location,
offset: attribute.offset as u32,
format: convert_vertex_format_to_naga(attribute.format),
});
}
vertex_buffer_mappings.push(naga::back::msl::VertexBufferMapping {
id: self.shared.private_caps.max_vertex_buffers - 1 - i as u32,
stride: if vbl.array_stride > 0 {
vbl.array_stride.try_into().unwrap()
} else {
vbl.attributes
.iter()
.map(|attribute| attribute.offset + attribute.format.size())
.max()
.unwrap_or(0)
.try_into()
.unwrap()
},
indexed_by_vertex: (vbl.step_mode == wgt::VertexStepMode::Vertex {}),
attributes,
});
}
let vs = self.load_shader(
&desc.vertex_stage,
&vertex_buffer_mappings,
desc.layout,
primitive_class,
naga::ShaderStage::Vertex,
@ -851,6 +923,7 @@ impl crate::Device for super::Device {
push_constants: desc.layout.push_constants_infos.vs,
sizes_slot: desc.layout.per_stage_map.vs.sizes_buffer,
sized_bindings: vs.sized_bindings,
vertex_buffer_mappings,
};
(vs.library, info)
@ -861,6 +934,7 @@ impl crate::Device for super::Device {
Some(ref stage) => {
let fs = self.load_shader(
stage,
&[],
desc.layout,
primitive_class,
naga::ShaderStage::Fragment,
@ -878,6 +952,7 @@ impl crate::Device for super::Device {
push_constants: desc.layout.push_constants_infos.fs,
sizes_slot: desc.layout.per_stage_map.fs.sizes_buffer,
sized_bindings: fs.sized_bindings,
vertex_buffer_mappings: vec![],
};
(Some(fs.library), Some(info))
@ -1053,6 +1128,7 @@ impl crate::Device for super::Device {
let cs = self.load_shader(
&desc.stage,
&[],
desc.layout,
metal::MTLPrimitiveTopologyClass::Unspecified,
naga::ShaderStage::Compute,
@ -1070,6 +1146,7 @@ impl crate::Device for super::Device {
push_constants: desc.layout.push_constants_infos.cs,
sizes_slot: desc.layout.per_stage_map.cs.sizes_buffer,
sized_bindings: cs.sized_bindings,
vertex_buffer_mappings: vec![],
};
if let Some(name) = desc.label {

71
third_party/rust/wgpu-hal/src/metal/mod.rs поставляемый
Просмотреть файл

@ -377,38 +377,37 @@ impl crate::Queue for Queue {
&self,
command_buffers: &[&CommandBuffer],
_surface_textures: &[&SurfaceTexture],
signal_fence: Option<(&mut Fence, crate::FenceValue)>,
(signal_fence, signal_value): (&mut Fence, crate::FenceValue),
) -> Result<(), crate::DeviceError> {
objc::rc::autoreleasepool(|| {
let extra_command_buffer = match signal_fence {
Some((fence, value)) => {
let completed_value = Arc::clone(&fence.completed_value);
let block = block::ConcreteBlock::new(move |_cmd_buf| {
completed_value.store(value, atomic::Ordering::Release);
})
.copy();
let extra_command_buffer = {
let completed_value = Arc::clone(&signal_fence.completed_value);
let block = block::ConcreteBlock::new(move |_cmd_buf| {
completed_value.store(signal_value, atomic::Ordering::Release);
})
.copy();
let raw = match command_buffers.last() {
Some(&cmd_buf) => cmd_buf.raw.to_owned(),
None => {
let queue = self.raw.lock();
queue
.new_command_buffer_with_unretained_references()
.to_owned()
}
};
raw.set_label("(wgpu internal) Signal");
raw.add_completed_handler(&block);
fence.maintain();
fence.pending_command_buffers.push((value, raw.to_owned()));
// only return an extra one if it's extra
match command_buffers.last() {
Some(_) => None,
None => Some(raw),
let raw = match command_buffers.last() {
Some(&cmd_buf) => cmd_buf.raw.to_owned(),
None => {
let queue = self.raw.lock();
queue
.new_command_buffer_with_unretained_references()
.to_owned()
}
};
raw.set_label("(wgpu internal) Signal");
raw.add_completed_handler(&block);
signal_fence.maintain();
signal_fence
.pending_command_buffers
.push((signal_value, raw.to_owned()));
// only return an extra one if it's extra
match command_buffers.last() {
Some(_) => None,
None => Some(raw),
}
None => None,
};
for cmd_buffer in command_buffers {
@ -466,6 +465,15 @@ impl Buffer {
}
}
impl crate::BufferBinding<'_, Api> {
fn resolve_size(&self) -> wgt::BufferAddress {
match self.size {
Some(size) => size.get(),
None => self.buffer.size - self.offset,
}
}
}
#[derive(Debug)]
pub struct Texture {
raw: metal::Texture,
@ -690,6 +698,9 @@ struct PipelineStageInfo {
///
/// See `device::CompiledShader::sized_bindings` for more details.
sized_bindings: Vec<naga::ResourceBinding>,
/// Info on all bound vertex buffers.
vertex_buffer_mappings: Vec<naga::back::msl::VertexBufferMapping>,
}
impl PipelineStageInfo {
@ -697,6 +708,7 @@ impl PipelineStageInfo {
self.push_constants = None;
self.sizes_slot = None;
self.sized_bindings.clear();
self.vertex_buffer_mappings.clear();
}
fn assign_from(&mut self, other: &Self) {
@ -704,6 +716,9 @@ impl PipelineStageInfo {
self.sizes_slot = other.sizes_slot;
self.sized_bindings.clear();
self.sized_bindings.extend_from_slice(&other.sized_bindings);
self.vertex_buffer_mappings.clear();
self.vertex_buffer_mappings
.extend_from_slice(&other.vertex_buffer_mappings);
}
}
@ -821,6 +836,8 @@ struct CommandState {
/// [`ResourceBinding`]: naga::ResourceBinding
storage_buffer_length_map: rustc_hash::FxHashMap<naga::ResourceBinding, wgt::BufferSize>,
vertex_buffer_size_map: rustc_hash::FxHashMap<u64, wgt::BufferSize>,
work_group_memory_sizes: Vec<u32>,
push_constants: Vec<u32>,

Просмотреть файл

@ -17,7 +17,7 @@ use objc::{
use parking_lot::{Mutex, RwLock};
#[cfg(target_os = "macos")]
#[cfg_attr(feature = "link", link(name = "QuartzCore", kind = "framework"))]
#[link(name = "QuartzCore", kind = "framework")]
extern "C" {
#[allow(non_upper_case_globals)]
static kCAGravityTopLeft: *mut Object;
@ -242,6 +242,7 @@ impl crate::Surface for super::Surface {
unsafe fn acquire_texture(
&self,
_timeout_ms: Option<std::time::Duration>, //TODO
_fence: &super::Fence,
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let render_layer = self.render_layer.lock();
let (drawable, texture) = match autoreleasepool(|| {

Просмотреть файл

@ -3,11 +3,7 @@ use super::conv;
use ash::{amd, ext, khr, vk};
use parking_lot::Mutex;
use std::{
collections::BTreeMap,
ffi::CStr,
sync::{atomic::AtomicIsize, Arc},
};
use std::{collections::BTreeMap, ffi::CStr, sync::Arc};
fn depth_stencil_required_flags() -> vk::FormatFeatureFlags {
vk::FormatFeatureFlags::SAMPLED_IMAGE | vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT
@ -1047,7 +1043,7 @@ impl PhysicalDeviceProperties {
max_compute_workgroup_size_z: max_compute_workgroup_sizes[2],
max_compute_workgroups_per_dimension,
max_buffer_size,
max_non_sampler_bindings: std::u32::MAX,
max_non_sampler_bindings: u32::MAX,
}
}
@ -1783,21 +1779,15 @@ impl super::Adapter {
render_passes: Mutex::new(Default::default()),
framebuffers: Mutex::new(Default::default()),
});
let mut relay_semaphores = [vk::Semaphore::null(); 2];
for sem in relay_semaphores.iter_mut() {
unsafe {
*sem = shared
.raw
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)?
};
}
let relay_semaphores = super::RelaySemaphores::new(&shared)?;
let queue = super::Queue {
raw: raw_queue,
swapchain_fn,
device: Arc::clone(&shared),
family_index,
relay_semaphores,
relay_index: AtomicIsize::new(-1),
relay_semaphores: Mutex::new(relay_semaphores),
};
let mem_allocator = {
@ -1807,7 +1797,7 @@ impl super::Adapter {
if let Some(maintenance_3) = self.phd_capabilities.maintenance_3 {
maintenance_3.max_memory_allocation_size
} else {
u64::max_value()
u64::MAX
};
let properties = gpu_alloc::DeviceProperties {
max_memory_allocation_count: limits.max_memory_allocation_count,

140
third_party/rust/wgpu-hal/src/vulkan/device.rs поставляемый
Просмотреть файл

@ -612,17 +612,16 @@ impl super::Device {
let images =
unsafe { functor.get_swapchain_images(raw) }.map_err(crate::DeviceError::from)?;
// NOTE: It's important that we define at least images.len() + 1 wait
// NOTE: It's important that we define at least images.len() wait
// semaphores, since we prospectively need to provide the call to
// acquire the next image with an unsignaled semaphore.
let surface_semaphores = (0..images.len() + 1)
.map(|_| unsafe {
self.shared
.raw
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
let surface_semaphores = (0..=images.len())
.map(|_| {
super::SwapchainImageSemaphores::new(&self.shared)
.map(Mutex::new)
.map(Arc::new)
})
.collect::<Result<Vec<_>, _>>()
.map_err(crate::DeviceError::from)?;
.collect::<Result<Vec<_>, _>>()?;
Ok(super::Swapchain {
raw,
@ -633,7 +632,7 @@ impl super::Device {
config: config.clone(),
view_formats: wgt_view_formats,
surface_semaphores,
next_surface_index: 0,
next_semaphore_index: 0,
})
}
@ -836,9 +835,12 @@ impl crate::Device for super::Device {
unsafe fn exit(self, queue: super::Queue) {
unsafe { self.mem_allocator.into_inner().cleanup(&*self.shared) };
unsafe { self.desc_allocator.into_inner().cleanup(&*self.shared) };
for &sem in queue.relay_semaphores.iter() {
unsafe { self.shared.raw.destroy_semaphore(sem, None) };
}
unsafe {
queue
.relay_semaphores
.into_inner()
.destroy(&self.shared.raw)
};
unsafe { self.shared.free_resources() };
}
@ -2055,54 +2057,7 @@ impl crate::Device for super::Device {
timeout_ms: u32,
) -> Result<bool, crate::DeviceError> {
let timeout_ns = timeout_ms as u64 * super::MILLIS_TO_NANOS;
match *fence {
super::Fence::TimelineSemaphore(raw) => {
let semaphores = [raw];
let values = [wait_value];
let vk_info = vk::SemaphoreWaitInfo::default()
.semaphores(&semaphores)
.values(&values);
let result = match self.shared.extension_fns.timeline_semaphore {
Some(super::ExtensionFn::Extension(ref ext)) => unsafe {
ext.wait_semaphores(&vk_info, timeout_ns)
},
Some(super::ExtensionFn::Promoted) => unsafe {
self.shared.raw.wait_semaphores(&vk_info, timeout_ns)
},
None => unreachable!(),
};
match result {
Ok(()) => Ok(true),
Err(vk::Result::TIMEOUT) => Ok(false),
Err(other) => Err(other.into()),
}
}
super::Fence::FencePool {
last_completed,
ref active,
free: _,
} => {
if wait_value <= last_completed {
Ok(true)
} else {
match active.iter().find(|&&(value, _)| value >= wait_value) {
Some(&(_, raw)) => {
match unsafe {
self.shared.raw.wait_for_fences(&[raw], true, timeout_ns)
} {
Ok(()) => Ok(true),
Err(vk::Result::TIMEOUT) => Ok(false),
Err(other) => Err(other.into()),
}
}
None => {
log::error!("No signals reached value {}", wait_value);
Err(crate::DeviceError::Lost)
}
}
}
}
}
self.shared.wait_for_fence(fence, wait_value, timeout_ns)
}
unsafe fn start_capture(&self) -> bool {
@ -2364,6 +2319,71 @@ impl crate::Device for super::Device {
}
}
impl super::DeviceShared {
pub(super) fn new_binary_semaphore(&self) -> Result<vk::Semaphore, crate::DeviceError> {
unsafe {
self.raw
.create_semaphore(&vk::SemaphoreCreateInfo::default(), None)
.map_err(crate::DeviceError::from)
}
}
pub(super) fn wait_for_fence(
&self,
fence: &super::Fence,
wait_value: crate::FenceValue,
timeout_ns: u64,
) -> Result<bool, crate::DeviceError> {
profiling::scope!("Device::wait");
match *fence {
super::Fence::TimelineSemaphore(raw) => {
let semaphores = [raw];
let values = [wait_value];
let vk_info = vk::SemaphoreWaitInfo::default()
.semaphores(&semaphores)
.values(&values);
let result = match self.extension_fns.timeline_semaphore {
Some(super::ExtensionFn::Extension(ref ext)) => unsafe {
ext.wait_semaphores(&vk_info, timeout_ns)
},
Some(super::ExtensionFn::Promoted) => unsafe {
self.raw.wait_semaphores(&vk_info, timeout_ns)
},
None => unreachable!(),
};
match result {
Ok(()) => Ok(true),
Err(vk::Result::TIMEOUT) => Ok(false),
Err(other) => Err(other.into()),
}
}
super::Fence::FencePool {
last_completed,
ref active,
free: _,
} => {
if wait_value <= last_completed {
Ok(true)
} else {
match active.iter().find(|&&(value, _)| value >= wait_value) {
Some(&(_, raw)) => {
match unsafe { self.raw.wait_for_fences(&[raw], true, timeout_ns) } {
Ok(()) => Ok(true),
Err(vk::Result::TIMEOUT) => Ok(false),
Err(other) => Err(other.into()),
}
}
None => {
log::error!("No signals reached value {}", wait_value);
Err(crate::DeviceError::Lost)
}
}
}
}
}
}
}
impl From<gpu_alloc::AllocationError> for crate::DeviceError {
fn from(error: gpu_alloc::AllocationError) -> Self {
use gpu_alloc::AllocationError as Ae;

Просмотреть файл

@ -164,10 +164,14 @@ impl super::Swapchain {
let _ = unsafe { device.device_wait_idle() };
};
// We cannot take this by value, as the function returns `self`.
for semaphore in self.surface_semaphores.drain(..) {
unsafe {
device.destroy_semaphore(semaphore, None);
}
let arc_removed = Arc::into_inner(semaphore).expect(
"Trying to destroy a SurfaceSemaphores that is still in use by a SurfaceTexture",
);
let mutex_removed = arc_removed.into_inner();
unsafe { mutex_removed.destroy(device) };
}
self
@ -966,9 +970,10 @@ impl crate::Surface for super::Surface {
unsafe fn acquire_texture(
&self,
timeout: Option<std::time::Duration>,
fence: &super::Fence,
) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
let mut swapchain = self.swapchain.write();
let sc = swapchain.as_mut().unwrap();
let swapchain = swapchain.as_mut().unwrap();
let mut timeout_ns = match timeout {
Some(duration) => duration.as_nanos() as u64,
@ -988,12 +993,40 @@ impl crate::Surface for super::Surface {
timeout_ns = u64::MAX;
}
let wait_semaphore = sc.surface_semaphores[sc.next_surface_index];
let swapchain_semaphores_arc = swapchain.get_surface_semaphores();
// Nothing should be using this, so we don't block, but panic if we fail to lock.
let locked_swapchain_semaphores = swapchain_semaphores_arc
.try_lock()
.expect("Failed to lock a SwapchainSemaphores.");
// Wait for all commands writing to the previously acquired image to
// complete.
//
// Almost all the steps in the usual acquire-draw-present flow are
// asynchronous: they get something started on the presentation engine
// or the GPU, but on the CPU, control returns immediately. Without some
// sort of intervention, the CPU could crank out frames much faster than
// the presentation engine can display them.
//
// This is the intervention: if any submissions drew on this image, and
// thus waited for `locked_swapchain_semaphores.acquire`, wait for all
// of them to finish, thus ensuring that it's okay to pass `acquire` to
// `vkAcquireNextImageKHR` again.
swapchain.device.wait_for_fence(
fence,
locked_swapchain_semaphores.previously_used_submission_index,
timeout_ns,
)?;
// will block if no image is available
let (index, suboptimal) = match unsafe {
sc.functor
.acquire_next_image(sc.raw, timeout_ns, wait_semaphore, vk::Fence::null())
profiling::scope!("vkAcquireNextImageKHR");
swapchain.functor.acquire_next_image(
swapchain.raw,
timeout_ns,
locked_swapchain_semaphores.acquire,
vk::Fence::null(),
)
} {
// We treat `VK_SUBOPTIMAL_KHR` as `VK_SUCCESS` on Android.
// See the comment in `Queue::present`.
@ -1013,16 +1046,18 @@ impl crate::Surface for super::Surface {
}
};
sc.next_surface_index += 1;
sc.next_surface_index %= sc.surface_semaphores.len();
drop(locked_swapchain_semaphores);
// We only advance the surface semaphores if we successfully acquired an image, otherwise
// we should try to re-acquire using the same semaphores.
swapchain.advance_surface_semaphores();
// special case for Intel Vulkan returning bizarre values (ugh)
if sc.device.vendor_id == crate::auxil::db::intel::VENDOR && index > 0x100 {
if swapchain.device.vendor_id == crate::auxil::db::intel::VENDOR && index > 0x100 {
return Err(crate::SurfaceError::Outdated);
}
// https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkRenderPassBeginInfo.html#VUID-VkRenderPassBeginInfo-framebuffer-03209
let raw_flags = if sc
let raw_flags = if swapchain
.raw_flags
.contains(vk::SwapchainCreateFlagsKHR::MUTABLE_FORMAT)
{
@ -1034,20 +1069,20 @@ impl crate::Surface for super::Surface {
let texture = super::SurfaceTexture {
index,
texture: super::Texture {
raw: sc.images[index as usize],
raw: swapchain.images[index as usize],
drop_guard: None,
block: None,
usage: sc.config.usage,
format: sc.config.format,
usage: swapchain.config.usage,
format: swapchain.config.format,
raw_flags,
copy_size: crate::CopyExtent {
width: sc.config.extent.width,
height: sc.config.extent.height,
width: swapchain.config.extent.width,
height: swapchain.config.extent.height,
depth: 1,
},
view_formats: sc.view_formats.clone(),
view_formats: swapchain.view_formats.clone(),
},
wait_semaphore,
surface_semaphores: swapchain_semaphores_arc,
};
Ok(Some(crate::AcquiredSurfaceTexture {
texture,

408
third_party/rust/wgpu-hal/src/vulkan/mod.rs поставляемый
Просмотреть файл

@ -33,13 +33,11 @@ mod instance;
use std::{
borrow::Borrow,
collections::HashSet,
ffi::{CStr, CString},
fmt,
fmt, mem,
num::NonZeroU32,
sync::{
atomic::{AtomicIsize, Ordering},
Arc,
},
sync::Arc,
};
use arrayvec::ArrayVec;
@ -147,6 +145,173 @@ pub struct Instance {
shared: Arc<InstanceShared>,
}
/// The semaphores needed to use one image in a swapchain.
#[derive(Debug)]
struct SwapchainImageSemaphores {
/// A semaphore that is signaled when this image is safe for us to modify.
///
/// When [`vkAcquireNextImageKHR`] returns the index of the next swapchain
/// image that we should use, that image may actually still be in use by the
/// presentation engine, and is not yet safe to modify. However, that
/// function does accept a semaphore that it will signal when the image is
/// indeed safe to begin messing with.
///
/// This semaphore is:
///
/// - waited for by the first queue submission to operate on this image
/// since it was acquired, and
///
/// - signaled by [`vkAcquireNextImageKHR`] when the acquired image is ready
/// for us to use.
///
/// [`vkAcquireNextImageKHR`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkAcquireNextImageKHR
acquire: vk::Semaphore,
/// True if the next command submission operating on this image should wait
/// for [`acquire`].
///
/// We must wait for `acquire` before drawing to this swapchain image, but
/// because `wgpu-hal` queue submissions are always strongly ordered, only
/// the first submission that works with a swapchain image actually needs to
/// wait. We set this flag when this image is acquired, and clear it the
/// first time it's passed to [`Queue::submit`] as a surface texture.
///
/// [`acquire`]: SwapchainImageSemaphores::acquire
/// [`Queue::submit`]: crate::Queue::submit
should_wait_for_acquire: bool,
/// A pool of semaphores for ordering presentation after drawing.
///
/// The first [`present_index`] semaphores in this vector are:
///
/// - all waited on by the call to [`vkQueuePresentKHR`] that presents this
/// image, and
///
/// - each signaled by some [`vkQueueSubmit`] queue submission that draws to
/// this image, when the submission finishes execution.
///
/// This vector accumulates one semaphore per submission that writes to this
/// image. This is awkward, but hard to avoid: [`vkQueuePresentKHR`]
/// requires a semaphore to order it with respect to drawing commands, and
/// we can't attach new completion semaphores to a command submission after
/// it's been submitted. This means that, at submission time, we must create
/// the semaphore we might need if the caller's next action is to enqueue a
/// presentation of this image.
///
/// An alternative strategy would be for presentation to enqueue an empty
/// submit, ordered relative to other submits in the usual way, and
/// signaling a single presentation semaphore. But we suspect that submits
/// are usually expensive enough, and semaphores usually cheap enough, that
/// performance-sensitive users will avoid making many submits, so that the
/// cost of accumulated semaphores will usually be less than the cost of an
/// additional submit.
///
/// Only the first [`present_index`] semaphores in the vector are actually
/// going to be signalled by submitted commands, and need to be waited for
/// by the next present call. Any semaphores beyond that index were created
/// for prior presents and are simply being retained for recycling.
///
/// [`present_index`]: SwapchainImageSemaphores::present_index
/// [`vkQueuePresentKHR`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueuePresentKHR
/// [`vkQueueSubmit`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueueSubmit
present: Vec<vk::Semaphore>,
/// The number of semaphores in [`present`] to be signalled for this submission.
///
/// [`present`]: SwapchainImageSemaphores::present
present_index: usize,
/// The fence value of the last command submission that wrote to this image.
///
/// The next time we try to acquire this image, we'll block until
/// this submission finishes, proving that [`acquire`] is ready to
/// pass to `vkAcquireNextImageKHR` again.
///
/// [`acquire`]: SwapchainImageSemaphores::acquire
previously_used_submission_index: crate::FenceValue,
}
impl SwapchainImageSemaphores {
fn new(device: &DeviceShared) -> Result<Self, crate::DeviceError> {
Ok(Self {
acquire: device.new_binary_semaphore()?,
should_wait_for_acquire: true,
present: Vec::new(),
present_index: 0,
previously_used_submission_index: 0,
})
}
fn set_used_fence_value(&mut self, value: crate::FenceValue) {
self.previously_used_submission_index = value;
}
/// Return the semaphore that commands drawing to this image should wait for, if any.
///
/// This only returns `Some` once per acquisition; see
/// [`SwapchainImageSemaphores::should_wait_for_acquire`] for details.
fn get_acquire_wait_semaphore(&mut self) -> Option<vk::Semaphore> {
if self.should_wait_for_acquire {
self.should_wait_for_acquire = false;
Some(self.acquire)
} else {
None
}
}
/// Return a semaphore that a submission that writes to this image should
/// signal when it's done.
///
/// See [`SwapchainImageSemaphores::present`] for details.
fn get_submit_signal_semaphore(
&mut self,
device: &DeviceShared,
) -> Result<vk::Semaphore, crate::DeviceError> {
// Try to recycle a semaphore we created for a previous presentation.
let sem = match self.present.get(self.present_index) {
Some(sem) => *sem,
None => {
let sem = device.new_binary_semaphore()?;
self.present.push(sem);
sem
}
};
self.present_index += 1;
Ok(sem)
}
/// Return the semaphores that a presentation of this image should wait on.
///
/// Return a slice of semaphores that the call to [`vkQueueSubmit`] that
/// ends this image's acquisition should wait for. See
/// [`SwapchainImageSemaphores::present`] for details.
///
/// Reset `self` to be ready for the next acquisition cycle.
///
/// [`vkQueueSubmit`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueueSubmit
fn get_present_wait_semaphores(&mut self) -> &[vk::Semaphore] {
let old_index = self.present_index;
// Since this marks the end of this acquire/draw/present cycle, take the
// opportunity to reset `self` in preparation for the next acquisition.
self.present_index = 0;
self.should_wait_for_acquire = true;
&self.present[0..old_index]
}
unsafe fn destroy(&self, device: &ash::Device) {
unsafe {
device.destroy_semaphore(self.acquire, None);
for sem in &self.present {
device.destroy_semaphore(*sem, None);
}
}
}
}
struct Swapchain {
raw: vk::SwapchainKHR,
raw_flags: vk::SwapchainCreateFlagsKHR,
@ -157,9 +322,25 @@ struct Swapchain {
view_formats: Vec<wgt::TextureFormat>,
/// One wait semaphore per swapchain image. This will be associated with the
/// surface texture, and later collected during submission.
surface_semaphores: Vec<vk::Semaphore>,
/// Current semaphore index to use when acquiring a surface.
next_surface_index: usize,
///
/// We need this to be `Arc<Mutex<>>` because we need to be able to pass this
/// data into the surface texture, so submit/present can use it.
surface_semaphores: Vec<Arc<Mutex<SwapchainImageSemaphores>>>,
/// The index of the next semaphore to use. Ideally we would use the same
/// index as the image index, but we need to specify the semaphore as an argument
/// to the acquire_next_image function which is what tells us which image to use.
next_semaphore_index: usize,
}
impl Swapchain {
fn advance_surface_semaphores(&mut self) {
let semaphore_count = self.surface_semaphores.len();
self.next_semaphore_index = (self.next_semaphore_index + 1) % semaphore_count;
}
fn get_surface_semaphores(&self) -> Arc<Mutex<SwapchainImageSemaphores>> {
self.surface_semaphores[self.next_semaphore_index].clone()
}
}
pub struct Surface {
@ -173,7 +354,7 @@ pub struct Surface {
pub struct SurfaceTexture {
index: u32,
texture: Texture,
wait_semaphore: vk::Semaphore,
surface_semaphores: Arc<Mutex<SwapchainImageSemaphores>>,
}
impl Borrow<Texture> for SurfaceTexture {
@ -359,18 +540,87 @@ pub struct Device {
render_doc: crate::auxil::renderdoc::RenderDoc,
}
/// Semaphores for forcing queue submissions to run in order.
///
/// The [`wgpu_hal::Queue`] trait promises that if two calls to [`submit`] are
/// ordered, then the first submission will finish on the GPU before the second
/// submission begins. To get this behavior on Vulkan we need to pass semaphores
/// to [`vkQueueSubmit`] for the commands to wait on before beginning execution,
/// and to signal when their execution is done.
///
/// Normally this can be done with a single semaphore, waited on and then
/// signalled for each submission. At any given time there's exactly one
/// submission that would signal the semaphore, and exactly one waiting on it,
/// as Vulkan requires.
///
/// However, as of Oct 2021, bug [#5508] in the Mesa ANV drivers caused them to
/// hang if we use a single semaphore. The workaround is to alternate between
/// two semaphores. The bug has been fixed in Mesa, but we should probably keep
/// the workaround until, say, Oct 2026.
///
/// [`wgpu_hal::Queue`]: crate::Queue
/// [`submit`]: crate::Queue::submit
/// [`vkQueueSubmit`]: https://registry.khronos.org/vulkan/specs/1.3-extensions/html/vkspec.html#vkQueueSubmit
/// [#5508]: https://gitlab.freedesktop.org/mesa/mesa/-/issues/5508
#[derive(Clone)]
struct RelaySemaphores {
/// The semaphore the next submission should wait on before beginning
/// execution on the GPU. This is `None` for the first submission, which
/// should not wait on anything at all.
wait: Option<vk::Semaphore>,
/// The semaphore the next submission should signal when it has finished
/// execution on the GPU.
signal: vk::Semaphore,
}
impl RelaySemaphores {
fn new(device: &DeviceShared) -> Result<Self, crate::DeviceError> {
Ok(Self {
wait: None,
signal: device.new_binary_semaphore()?,
})
}
/// Advances the semaphores, returning the semaphores that should be used for a submission.
fn advance(&mut self, device: &DeviceShared) -> Result<Self, crate::DeviceError> {
let old = self.clone();
// Build the state for the next submission.
match self.wait {
None => {
// The `old` values describe the first submission to this queue.
// The second submission should wait on `old.signal`, and then
// signal a new semaphore which we'll create now.
self.wait = Some(old.signal);
self.signal = device.new_binary_semaphore()?;
}
Some(ref mut wait) => {
// What this submission signals, the next should wait.
mem::swap(wait, &mut self.signal);
}
};
Ok(old)
}
/// Destroys the semaphores.
unsafe fn destroy(&self, device: &ash::Device) {
unsafe {
if let Some(wait) = self.wait {
device.destroy_semaphore(wait, None);
}
device.destroy_semaphore(self.signal, None);
}
}
}
pub struct Queue {
raw: vk::Queue,
swapchain_fn: khr::swapchain::Device,
device: Arc<DeviceShared>,
family_index: u32,
/// We use a redundant chain of semaphores to pass on the signal
/// from submissions to the last present, since it's required by the
/// specification.
/// It would be correct to use a single semaphore there, but
/// [Intel hangs in `anv_queue_finish`](https://gitlab.freedesktop.org/mesa/mesa/-/issues/5508).
relay_semaphores: [vk::Semaphore; 2],
relay_index: AtomicIsize,
relay_semaphores: Mutex<RelaySemaphores>,
}
#[derive(Debug)]
@ -702,58 +952,89 @@ impl crate::Queue for Queue {
&self,
command_buffers: &[&CommandBuffer],
surface_textures: &[&SurfaceTexture],
signal_fence: Option<(&mut Fence, crate::FenceValue)>,
(signal_fence, signal_value): (&mut Fence, crate::FenceValue),
) -> Result<(), crate::DeviceError> {
let mut fence_raw = vk::Fence::null();
let mut wait_stage_masks = Vec::new();
let mut wait_semaphores = Vec::new();
let mut signal_semaphores = ArrayVec::<_, 2>::new();
let mut signal_values = ArrayVec::<_, 2>::new();
let mut signal_semaphores = Vec::new();
let mut signal_values = Vec::new();
for &surface_texture in surface_textures {
wait_stage_masks.push(vk::PipelineStageFlags::TOP_OF_PIPE);
wait_semaphores.push(surface_texture.wait_semaphore);
// Double check that the same swapchain image isn't being given to us multiple times,
// as that will deadlock when we try to lock them all.
debug_assert!(
{
let mut check = HashSet::with_capacity(surface_textures.len());
// We compare the Arcs by pointer, as Eq isn't well defined for SurfaceSemaphores.
for st in surface_textures {
check.insert(Arc::as_ptr(&st.surface_semaphores));
}
check.len() == surface_textures.len()
},
"More than one surface texture is being used from the same swapchain. This will cause a deadlock in release."
);
let locked_swapchain_semaphores = surface_textures
.iter()
.map(|st| {
st.surface_semaphores
.try_lock()
.expect("Failed to lock surface semaphore.")
})
.collect::<Vec<_>>();
for mut swapchain_semaphore in locked_swapchain_semaphores {
swapchain_semaphore.set_used_fence_value(signal_value);
// If we're the first submission to operate on this image, wait on
// its acquire semaphore, to make sure the presentation engine is
// done with it.
if let Some(sem) = swapchain_semaphore.get_acquire_wait_semaphore() {
wait_stage_masks.push(vk::PipelineStageFlags::TOP_OF_PIPE);
wait_semaphores.push(sem);
}
// Get a semaphore to signal when we're done writing to this surface
// image. Presentation of this image will wait for this.
let signal_semaphore = swapchain_semaphore.get_submit_signal_semaphore(&self.device)?;
signal_semaphores.push(signal_semaphore);
signal_values.push(!0);
}
let old_index = self.relay_index.load(Ordering::Relaxed);
// In order for submissions to be strictly ordered, we encode a dependency between each submission
// using a pair of semaphores. This adds a wait if it is needed, and signals the next semaphore.
let semaphore_state = self.relay_semaphores.lock().advance(&self.device)?;
let sem_index = if old_index >= 0 {
if let Some(sem) = semaphore_state.wait {
wait_stage_masks.push(vk::PipelineStageFlags::TOP_OF_PIPE);
wait_semaphores.push(self.relay_semaphores[old_index as usize]);
(old_index as usize + 1) % self.relay_semaphores.len()
} else {
0
};
wait_semaphores.push(sem);
}
signal_semaphores.push(self.relay_semaphores[sem_index]);
signal_semaphores.push(semaphore_state.signal);
signal_values.push(!0);
self.relay_index
.store(sem_index as isize, Ordering::Relaxed);
if let Some((fence, value)) = signal_fence {
fence.maintain(&self.device.raw)?;
match *fence {
Fence::TimelineSemaphore(raw) => {
signal_semaphores.push(raw);
signal_values.push(!0);
signal_values.push(value);
}
Fence::FencePool {
ref mut active,
ref mut free,
..
} => {
fence_raw = match free.pop() {
Some(raw) => raw,
None => unsafe {
self.device
.raw
.create_fence(&vk::FenceCreateInfo::default(), None)?
},
};
active.push((value, fence_raw));
}
// We need to signal our wgpu::Fence if we have one, this adds it to the signal list.
signal_fence.maintain(&self.device.raw)?;
match *signal_fence {
Fence::TimelineSemaphore(raw) => {
signal_semaphores.push(raw);
signal_values.push(signal_value);
}
Fence::FencePool {
ref mut active,
ref mut free,
..
} => {
fence_raw = match free.pop() {
Some(raw) => raw,
None => unsafe {
self.device
.raw
.create_fence(&vk::FenceCreateInfo::default(), None)?
},
};
active.push((signal_value, fence_raw));
}
}
@ -771,7 +1052,7 @@ impl crate::Queue for Queue {
let mut vk_timeline_info;
if !signal_values.is_empty() {
if self.device.private_caps.timeline_semaphores {
vk_timeline_info =
vk::TimelineSemaphoreSubmitInfo::default().signal_semaphore_values(&signal_values);
vk_info = vk_info.push_next(&mut vk_timeline_info);
@ -793,19 +1074,14 @@ impl crate::Queue for Queue {
) -> Result<(), crate::SurfaceError> {
let mut swapchain = surface.swapchain.write();
let ssc = swapchain.as_mut().unwrap();
let mut swapchain_semaphores = texture.surface_semaphores.lock();
let swapchains = [ssc.raw];
let image_indices = [texture.index];
let mut vk_info = vk::PresentInfoKHR::default()
let vk_info = vk::PresentInfoKHR::default()
.swapchains(&swapchains)
.image_indices(&image_indices);
let old_index = self.relay_index.swap(-1, Ordering::Relaxed);
if old_index >= 0 {
vk_info = vk_info.wait_semaphores(
&self.relay_semaphores[old_index as usize..old_index as usize + 1],
);
}
.image_indices(&image_indices)
.wait_semaphores(swapchain_semaphores.get_present_wait_semaphores());
let suboptimal = {
profiling::scope!("vkQueuePresentKHR");

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"d8f88446d6c1740116442320eca91e06ce9a2f4713179195c1be44e8ab1fc42d","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"7db1fafd8688612e645efd49bdc8c9921a062b9f131cb0d84f9bcb39cafcbbc2","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}
{"files":{"Cargo.toml":"d8f88446d6c1740116442320eca91e06ce9a2f4713179195c1be44e8ab1fc42d","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"19d250e0354a4243d5d58673fbece59a052e6a2a217dc27eb7c8c4ed067d25c0","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}

1
third_party/rust/wgpu-types/src/lib.rs поставляемый
Просмотреть файл

@ -7226,6 +7226,7 @@ mod send_sync {
/// Corresponds to [WebGPU `GPUDeviceLostReason`](https://gpuweb.github.io/gpuweb/#enumdef-gpudevicelostreason).
#[repr(u8)]
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum DeviceLostReason {
/// Triggered by driver
Unknown = 0,