Bug 1878375 - Synchronize vendored Rust libraries with mozilla-central. r=vineet

mozilla-central: ca0abc9ab05e329c1d945ec3b8c1c44458e9ddbe
comm-central: eb9eda28754001430667418d731f1c46c7248f04

Differential Revision: https://phabricator.services.mozilla.com/D215292

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Thunderbird Updatebot 2024-06-29 23:43:39 +00:00
Родитель 05aab8ec1f
Коммит cffbb672c8
103 изменённых файлов: 5271 добавлений и 3575 удалений

Просмотреть файл

@ -21,9 +21,9 @@ git = "https://github.com/franziskuskiefer/cose-rust"
rev = "43c22248d136c8b38fe42ea709d08da6355cf04b"
replace-with = "vendored-sources"
[source."git+https://github.com/gfx-rs/wgpu?rev=a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067"]
[source."git+https://github.com/gfx-rs/wgpu?rev=82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a"]
git = "https://github.com/gfx-rs/wgpu"
rev = "a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067"
rev = "82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a"
replace-with = "vendored-sources"
[source."git+https://github.com/glandium/mio?rev=9a2ef335c366044ffe73b1c4acabe50a1daefe05"]

14
rust/Cargo.lock сгенерированный
Просмотреть файл

@ -1051,7 +1051,7 @@ dependencies = [
[[package]]
name = "d3d12"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067#a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067"
source = "git+https://github.com/gfx-rs/wgpu?rev=82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a#82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a"
dependencies = [
"bitflags 2.5.0",
"libloading",
@ -2969,9 +2969,9 @@ dependencies = [
[[package]]
name = "log"
version = "0.4.21"
version = "0.4.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"
checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"
[[package]]
name = "mach"
@ -3384,7 +3384,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067#a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067"
source = "git+https://github.com/gfx-rs/wgpu?rev=82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a#82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a"
dependencies = [
"arrayvec",
"bit-set",
@ -5682,7 +5682,7 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067#a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067"
source = "git+https://github.com/gfx-rs/wgpu?rev=82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a#82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a"
dependencies = [
"arrayvec",
"bit-vec",
@ -5707,7 +5707,7 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067#a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067"
source = "git+https://github.com/gfx-rs/wgpu?rev=82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a#82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a"
dependencies = [
"android_system_properties",
"arrayvec",
@ -5746,7 +5746,7 @@ dependencies = [
[[package]]
name = "wgpu-types"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067#a2fcd72606f83cbb58c1aca2e7e1ad52a11d2067"
source = "git+https://github.com/gfx-rs/wgpu?rev=82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a#82210e1cdc63cbd5e53f43788f9956bb0d4a2c6a"
dependencies = [
"bitflags 2.5.0",
"js-sys",

Просмотреть файл

@ -1 +1 @@
{"mc_workspace_toml": "4255537d44aabece7b21332aea423d4c093dd381fe07a1248e6719ac75230d24ad536ed0b4a3fc380b1dacce439844a0be9af06844d508224dd8b9f7008620e4", "mc_gkrust_toml": "7fe3345547be910458ce7853a5a918563435ca2b5d33c47065daed541eb1491c82576751d1d5d404e3e6cd7ce63ccb1109dc84e67f8f1b3d758dbf2e497d0570", "mc_cargo_lock": "c6646b3294c3928ff86e46ac5029c5ff3b4da37ea8ad9a83b370d16b99bb5ca1b5e96efb503e9cd37611d8b5d148483c3d6ffc622d4c4a0b997e7ad683a48098"}
{"mc_workspace_toml": "4255537d44aabece7b21332aea423d4c093dd381fe07a1248e6719ac75230d24ad536ed0b4a3fc380b1dacce439844a0be9af06844d508224dd8b9f7008620e4", "mc_gkrust_toml": "7fe3345547be910458ce7853a5a918563435ca2b5d33c47065daed541eb1491c82576751d1d5d404e3e6cd7ce63ccb1109dc84e67f8f1b3d758dbf2e497d0570", "mc_cargo_lock": "b4c2adb4a91d31f0305a85eba5f0fe6c4a80cbaa27e8154a8f7221fbb872b1e61f6d31410208e78286dae63c85c019e6137eb5eba9c72c075f6c4bbebf5c9676"}

2
third_party/rust/log/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"5291aac813a5af29d49cc9e624abcb6f16d49b40cb8b19c171ae05b0f0fb2c51","Cargo.toml":"dff834f0cd9f79ee49305bedabdae4d4c1217f8aee8fb39690ad4bdc2401368c","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"4fcd79a808300bfce99e68749f33f305a446c7bc48789810530a4b3f57a449c6","benches/value.rs":"b613ff353d3cf0ef8cb98e4ca461ea929b8ba553fe299f2eb2942d77a5b1b6a0","src/__private_api.rs":"fe9e5959840af6767e17e6d0e27985156839eef2ee3263f74990e571c640df62","src/kv/error.rs":"6dae12424164c33b93915f5e70bd6d99d616c969c8bfb543806721dd9b423981","src/kv/key.rs":"9439e91c3ab3f9574a6a11a0347c7b63fdf1652384a6b28411136e4373de2970","src/kv/mod.rs":"3521a5bcfd7f92dcfac6c3c948020d686fee696596c566333a27edbbcc8a4ea8","src/kv/source.rs":"911e480fe2719230c13b083717737d42c5d22882844cf0172798f50601442015","src/kv/value.rs":"0aade52b8e3523a17d6114f8b664793862032a94ea1ee2a4f12a20dd729b92d4","src/lib.rs":"6adbad6a28da344fef36b63e81786a94e5cfe7426f7c5eb73df0c467eb078222","src/macros.rs":"9a748a1fb6e097219561e809d30b51d1354478556f507c1d7db3d239cd4a008b","src/serde.rs":"35f520f62fdba0216ccee33e5b66ad8f81dee3af5b65b824f1816180c9350df5","triagebot.toml":"a135e10c777cd13459559bdf74fb704c1379af7c9b0f70bc49fa6f5a837daa81"},"package":"90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c"}
{"files":{"CHANGELOG.md":"df7d7ea4256611dd5e3bf160e39bb3f8b665c6805ae47fdbf28acf9f77245ffd","Cargo.toml":"2161251dd0dfbea680a9d5fd762973e343fc5215794681c5ffd641faab9a4e4c","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"a23bbe55ac94081711c081a63df10d324a8a26f4b836952cb3c45c9318a03152","benches/value.rs":"b613ff353d3cf0ef8cb98e4ca461ea929b8ba553fe299f2eb2942d77a5b1b6a0","src/__private_api.rs":"da677f1e29e3cb135c971247031bc0eb20324294ab5c1c74c5118f87e45518ae","src/kv/error.rs":"6dae12424164c33b93915f5e70bd6d99d616c969c8bfb543806721dd9b423981","src/kv/key.rs":"9439e91c3ab3f9574a6a11a0347c7b63fdf1652384a6b28411136e4373de2970","src/kv/mod.rs":"3521a5bcfd7f92dcfac6c3c948020d686fee696596c566333a27edbbcc8a4ea8","src/kv/source.rs":"73fbc180c824072d86f1f41f8c59c014db1d8988a86be38a9128d67d6aab06a5","src/kv/value.rs":"0aade52b8e3523a17d6114f8b664793862032a94ea1ee2a4f12a20dd729b92d4","src/lib.rs":"55c32130cd8b99cde2ea962a403cdade52d20e80088357ba2784ee53b2eb9a2c","src/macros.rs":"dfb98017d5f205fec632069ab857a18661d6d563cf5162eeef64d367cc3ad7f5","src/serde.rs":"35f520f62fdba0216ccee33e5b66ad8f81dee3af5b65b824f1816180c9350df5","triagebot.toml":"a135e10c777cd13459559bdf74fb704c1379af7c9b0f70bc49fa6f5a837daa81"},"package":"a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24"}

20
third_party/rust/log/CHANGELOG.md поставляемый
Просмотреть файл

@ -2,6 +2,26 @@
## [Unreleased]
## [0.4.22] - 2024-06-27
## What's Changed
* Add some clarifications to the library docs by @KodrAus in https://github.com/rust-lang/log/pull/620
* Add links to `colog` crate by @chrivers in https://github.com/rust-lang/log/pull/621
* adding line_number test + updating some testing infrastructure by @DIvkov575 in https://github.com/rust-lang/log/pull/619
* Clarify the actual set of functions that can race in _racy variants by @KodrAus in https://github.com/rust-lang/log/pull/623
* Replace deprecated std::sync::atomic::spin_loop_hint() by @Catamantaloedis in https://github.com/rust-lang/log/pull/625
* Check usage of max_level features by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/627
* Remove unneeded import by @Thomasdezeeuw in https://github.com/rust-lang/log/pull/628
* Loosen orderings for logger initialization in https://github.com/rust-lang/log/pull/632. Originally by @pwoolcoc in https://github.com/rust-lang/log/pull/599
* Use Location::caller() for file and line info in https://github.com/rust-lang/log/pull/633. Originally by @Cassy343 in https://github.com/rust-lang/log/pull/520
## New Contributors
* @chrivers made their first contribution in https://github.com/rust-lang/log/pull/621
* @DIvkov575 made their first contribution in https://github.com/rust-lang/log/pull/619
* @Catamantaloedis made their first contribution in https://github.com/rust-lang/log/pull/625
**Full Changelog**: https://github.com/rust-lang/log/compare/0.4.21...0.4.22
## [0.4.21] - 2024-02-27
## What's Changed

6
third_party/rust/log/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.60.0"
name = "log"
version = "0.4.21"
version = "0.4.22"
authors = ["The Rust Project Developers"]
exclude = ["rfcs/**/*"]
description = """
@ -36,8 +36,8 @@ features = [
]
[[test]]
name = "filters"
path = "tests/filters.rs"
name = "integration"
path = "tests/integration.rs"
harness = false
[[test]]

1
third_party/rust/log/README.md поставляемый
Просмотреть файл

@ -62,6 +62,7 @@ There are many available implementations to choose from, here are some options:
* Simple minimal loggers:
* [`env_logger`](https://docs.rs/env_logger/*/env_logger/)
* [`colog`](https://docs.rs/colog/*/colog/)
* [`simple_logger`](https://docs.rs/simple_logger/*/simple_logger/)
* [`simplelog`](https://docs.rs/simplelog/*/simplelog/)
* [`pretty_env_logger`](https://docs.rs/pretty_env_logger/*/pretty_env_logger/)

26
third_party/rust/log/src/__private_api.rs поставляемый
Просмотреть файл

@ -3,7 +3,8 @@
use self::sealed::KVs;
use crate::{Level, Metadata, Record};
use std::fmt::Arguments;
pub use std::{file, format_args, line, module_path, stringify};
use std::panic::Location;
pub use std::{format_args, module_path, stringify};
#[cfg(not(feature = "kv"))]
pub type Value<'a> = &'a str;
@ -36,8 +37,7 @@ impl<'a> KVs<'a> for () {
fn log_impl(
args: Arguments,
level: Level,
&(target, module_path, file): &(&str, &'static str, &'static str),
line: u32,
&(target, module_path, loc): &(&str, &'static str, &'static Location),
kvs: Option<&[(&str, Value)]>,
) {
#[cfg(not(feature = "kv"))]
@ -52,8 +52,8 @@ fn log_impl(
.level(level)
.target(target)
.module_path_static(Some(module_path))
.file_static(Some(file))
.line(Some(line));
.file_static(Some(loc.file()))
.line(Some(loc.line()));
#[cfg(feature = "kv")]
builder.key_values(&kvs);
@ -64,25 +64,23 @@ fn log_impl(
pub fn log<'a, K>(
args: Arguments,
level: Level,
target_module_path_and_file: &(&str, &'static str, &'static str),
line: u32,
target_module_path_and_loc: &(&str, &'static str, &'static Location),
kvs: K,
) where
K: KVs<'a>,
{
log_impl(
args,
level,
target_module_path_and_file,
line,
kvs.into_kvs(),
)
log_impl(args, level, target_module_path_and_loc, kvs.into_kvs())
}
pub fn enabled(level: Level, target: &str) -> bool {
crate::logger().enabled(&Metadata::builder().level(level).target(target).build())
}
#[track_caller]
pub fn loc() -> &'static Location<'static> {
Location::caller()
}
#[cfg(feature = "kv")]
mod kv_support {
use crate::kv;

2
third_party/rust/log/src/kv/source.rs поставляемый
Просмотреть файл

@ -406,8 +406,6 @@ mod std_support {
#[cfg(test)]
mod tests {
use std::collections::{BTreeMap, HashMap};
use crate::kv::value;
use super::*;

89
third_party/rust/log/src/lib.rs поставляемый
Просмотреть файл

@ -41,6 +41,8 @@
//! [`trace!`]: ./macro.trace.html
//! [`println!`]: https://doc.rust-lang.org/stable/std/macro.println.html
//!
//! Avoid writing expressions with side-effects in log statements. They may not be evaluated.
//!
//! ## In libraries
//!
//! Libraries should link only to the `log` crate, and use the provided
@ -133,6 +135,7 @@
//!
//! * Simple minimal loggers:
//! * [env_logger]
//! * [colog]
//! * [simple_logger]
//! * [simplelog]
//! * [pretty_env_logger]
@ -308,6 +311,7 @@
//! [`try_set_logger_raw`]: fn.try_set_logger_raw.html
//! [`shutdown_logger_raw`]: fn.shutdown_logger_raw.html
//! [env_logger]: https://docs.rs/env_logger/*/env_logger/
//! [colog]: https://docs.rs/colog/*/colog/
//! [simple_logger]: https://github.com/borntyping/rust-simple_logger
//! [simplelog]: https://github.com/drakulix/simplelog.rs
//! [pretty_env_logger]: https://docs.rs/pretty_env_logger/*/pretty_env_logger/
@ -332,15 +336,50 @@
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://docs.rs/log/0.4.21"
html_root_url = "https://docs.rs/log/0.4.22"
)]
#![warn(missing_docs)]
#![deny(missing_debug_implementations, unconditional_recursion)]
#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
// When compiled for the rustc compiler itself we want to make sure that this is
// an unstable crate
#![cfg_attr(rustbuild, feature(staged_api, rustc_private))]
#![cfg_attr(rustbuild, unstable(feature = "rustc_private", issue = "27812"))]
#[cfg(any(
all(feature = "max_level_off", feature = "max_level_error"),
all(feature = "max_level_off", feature = "max_level_warn"),
all(feature = "max_level_off", feature = "max_level_info"),
all(feature = "max_level_off", feature = "max_level_debug"),
all(feature = "max_level_off", feature = "max_level_trace"),
all(feature = "max_level_error", feature = "max_level_warn"),
all(feature = "max_level_error", feature = "max_level_info"),
all(feature = "max_level_error", feature = "max_level_debug"),
all(feature = "max_level_error", feature = "max_level_trace"),
all(feature = "max_level_warn", feature = "max_level_info"),
all(feature = "max_level_warn", feature = "max_level_debug"),
all(feature = "max_level_warn", feature = "max_level_trace"),
all(feature = "max_level_info", feature = "max_level_debug"),
all(feature = "max_level_info", feature = "max_level_trace"),
all(feature = "max_level_debug", feature = "max_level_trace"),
))]
compile_error!("multiple max_level_* features set");
#[rustfmt::skip]
#[cfg(any(
all(feature = "release_max_level_off", feature = "release_max_level_error"),
all(feature = "release_max_level_off", feature = "release_max_level_warn"),
all(feature = "release_max_level_off", feature = "release_max_level_info"),
all(feature = "release_max_level_off", feature = "release_max_level_debug"),
all(feature = "release_max_level_off", feature = "release_max_level_trace"),
all(feature = "release_max_level_error", feature = "release_max_level_warn"),
all(feature = "release_max_level_error", feature = "release_max_level_info"),
all(feature = "release_max_level_error", feature = "release_max_level_debug"),
all(feature = "release_max_level_error", feature = "release_max_level_trace"),
all(feature = "release_max_level_warn", feature = "release_max_level_info"),
all(feature = "release_max_level_warn", feature = "release_max_level_debug"),
all(feature = "release_max_level_warn", feature = "release_max_level_trace"),
all(feature = "release_max_level_info", feature = "release_max_level_debug"),
all(feature = "release_max_level_info", feature = "release_max_level_trace"),
all(feature = "release_max_level_debug", feature = "release_max_level_trace"),
))]
compile_error!("multiple release_max_level_* features set");
#[cfg(all(not(feature = "std"), not(test)))]
extern crate core as std;
@ -1149,6 +1188,11 @@ pub trait Log: Sync + Send {
fn log(&self, record: &Record);
/// Flushes any buffered records.
///
/// # For implementors
///
/// This method isn't called automatically by the `log!` macros.
/// It can be called manually on shut-down to ensure any in-flight records are flushed.
fn flush(&self);
}
@ -1234,13 +1278,13 @@ pub fn set_max_level(level: LevelFilter) {
///
/// # Safety
///
/// This function is only safe to call when no other level setting function is
/// called while this function still executes.
/// This function is only safe to call when it cannot race with any other
/// calls to `set_max_level` or `set_max_level_racy`.
///
/// This can be upheld by (for example) making sure that **there are no other
/// threads**, and (on embedded) that **interrupts are disabled**.
///
/// Is is safe to use all other logging functions while this function runs
/// It is safe to use all other logging functions while this function runs
/// (including all logging macros).
///
/// [`set_max_level`]: fn.set_max_level.html
@ -1357,27 +1401,22 @@ fn set_logger_inner<F>(make_logger: F) -> Result<(), SetLoggerError>
where
F: FnOnce() -> &'static dyn Log,
{
let old_state = match STATE.compare_exchange(
match STATE.compare_exchange(
UNINITIALIZED,
INITIALIZING,
Ordering::SeqCst,
Ordering::SeqCst,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(s) | Err(s) => s,
};
match old_state {
UNINITIALIZED => {
Ok(UNINITIALIZED) => {
unsafe {
LOGGER = make_logger();
}
STATE.store(INITIALIZED, Ordering::SeqCst);
STATE.store(INITIALIZED, Ordering::Release);
Ok(())
}
INITIALIZING => {
while STATE.load(Ordering::SeqCst) == INITIALIZING {
// TODO: replace with `hint::spin_loop` once MSRV is 1.49.0.
#[allow(deprecated)]
std::sync::atomic::spin_loop_hint();
Err(INITIALIZING) => {
while STATE.load(Ordering::Relaxed) == INITIALIZING {
std::hint::spin_loop();
}
Err(SetLoggerError(()))
}
@ -1394,8 +1433,8 @@ where
///
/// # Safety
///
/// This function is only safe to call when no other logger initialization
/// function is called while this function still executes.
/// This function is only safe to call when it cannot race with any other
/// calls to `set_logger` or `set_logger_racy`.
///
/// This can be upheld by (for example) making sure that **there are no other
/// threads**, and (on embedded) that **interrupts are disabled**.
@ -1405,10 +1444,10 @@ where
///
/// [`set_logger`]: fn.set_logger.html
pub unsafe fn set_logger_racy(logger: &'static dyn Log) -> Result<(), SetLoggerError> {
match STATE.load(Ordering::SeqCst) {
match STATE.load(Ordering::Acquire) {
UNINITIALIZED => {
LOGGER = logger;
STATE.store(INITIALIZED, Ordering::SeqCst);
STATE.store(INITIALIZED, Ordering::Release);
Ok(())
}
INITIALIZING => {

6
third_party/rust/log/src/macros.rs поставляемый
Просмотреть файл

@ -36,8 +36,7 @@ macro_rules! log {
$crate::__private_api::log::<&_>(
$crate::__private_api::format_args!($($arg)+),
lvl,
&($target, $crate::__private_api::module_path!(), $crate::__private_api::file!()),
$crate::__private_api::line!(),
&($target, $crate::__private_api::module_path!(), $crate::__private_api::loc()),
&[$(($crate::__log_key!($key), $crate::__log_value!($key $(:$capture)* = $($value)*))),+]
);
}
@ -50,8 +49,7 @@ macro_rules! log {
$crate::__private_api::log(
$crate::__private_api::format_args!($($arg)+),
lvl,
&($target, $crate::__private_api::module_path!(), $crate::__private_api::file!()),
$crate::__private_api::line!(),
&($target, $crate::__private_api::module_path!(), $crate::__private_api::loc()),
(),
);
}

2
third_party/rust/naga/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

785
third_party/rust/naga/src/arena.rs поставляемый
Просмотреть файл

@ -1,785 +0,0 @@
use std::{cmp::Ordering, fmt, hash, marker::PhantomData, num::NonZeroU32, ops};
/// An unique index in the arena array that a handle points to.
/// The "non-zero" part ensures that an `Option<Handle<T>>` has
/// the same size and representation as `Handle<T>`.
type Index = NonZeroU32;
use crate::{FastIndexSet, Span};
#[derive(Clone, Copy, Debug, thiserror::Error, PartialEq)]
#[error("Handle {index} of {kind} is either not present, or inaccessible yet")]
pub struct BadHandle {
pub kind: &'static str,
pub index: usize,
}
impl BadHandle {
fn new<T>(handle: Handle<T>) -> Self {
Self {
kind: std::any::type_name::<T>(),
index: handle.index(),
}
}
}
/// A strongly typed reference to an arena item.
///
/// A `Handle` value can be used as an index into an [`Arena`] or [`UniqueArena`].
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(
any(feature = "serialize", feature = "deserialize"),
serde(transparent)
)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct Handle<T> {
index: Index,
#[cfg_attr(any(feature = "serialize", feature = "deserialize"), serde(skip))]
marker: PhantomData<T>,
}
impl<T> Clone for Handle<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T> Copy for Handle<T> {}
impl<T> PartialEq for Handle<T> {
fn eq(&self, other: &Self) -> bool {
self.index == other.index
}
}
impl<T> Eq for Handle<T> {}
impl<T> PartialOrd for Handle<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T> Ord for Handle<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.index.cmp(&other.index)
}
}
impl<T> fmt::Debug for Handle<T> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "[{}]", self.index)
}
}
impl<T> hash::Hash for Handle<T> {
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
self.index.hash(hasher)
}
}
impl<T> Handle<T> {
#[cfg(test)]
pub const DUMMY: Self = Handle {
index: unsafe { NonZeroU32::new_unchecked(u32::MAX) },
marker: PhantomData,
};
pub(crate) const fn new(index: Index) -> Self {
Handle {
index,
marker: PhantomData,
}
}
/// Returns the zero-based index of this handle.
pub const fn index(self) -> usize {
let index = self.index.get() - 1;
index as usize
}
/// Convert a `usize` index into a `Handle<T>`.
fn from_usize(index: usize) -> Self {
let handle_index = u32::try_from(index + 1)
.ok()
.and_then(Index::new)
.expect("Failed to insert into arena. Handle overflows");
Handle::new(handle_index)
}
/// Convert a `usize` index into a `Handle<T>`, without range checks.
const unsafe fn from_usize_unchecked(index: usize) -> Self {
Handle::new(Index::new_unchecked((index + 1) as u32))
}
}
/// A strongly typed range of handles.
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(
any(feature = "serialize", feature = "deserialize"),
serde(transparent)
)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[cfg_attr(test, derive(PartialEq))]
pub struct Range<T> {
inner: ops::Range<u32>,
#[cfg_attr(any(feature = "serialize", feature = "deserialize"), serde(skip))]
marker: PhantomData<T>,
}
impl<T> Range<T> {
pub(crate) const fn erase_type(self) -> Range<()> {
let Self { inner, marker: _ } = self;
Range {
inner,
marker: PhantomData,
}
}
}
// NOTE: Keep this diagnostic in sync with that of [`BadHandle`].
#[derive(Clone, Debug, thiserror::Error)]
#[cfg_attr(test, derive(PartialEq))]
#[error("Handle range {range:?} of {kind} is either not present, or inaccessible yet")]
pub struct BadRangeError {
// This error is used for many `Handle` types, but there's no point in making this generic, so
// we just flatten them all to `Handle<()>` here.
kind: &'static str,
range: Range<()>,
}
impl BadRangeError {
pub fn new<T>(range: Range<T>) -> Self {
Self {
kind: std::any::type_name::<T>(),
range: range.erase_type(),
}
}
}
impl<T> Clone for Range<T> {
fn clone(&self) -> Self {
Range {
inner: self.inner.clone(),
marker: self.marker,
}
}
}
impl<T> fmt::Debug for Range<T> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "[{}..{}]", self.inner.start + 1, self.inner.end)
}
}
impl<T> Iterator for Range<T> {
type Item = Handle<T>;
fn next(&mut self) -> Option<Self::Item> {
if self.inner.start < self.inner.end {
self.inner.start += 1;
Some(Handle {
index: NonZeroU32::new(self.inner.start).unwrap(),
marker: self.marker,
})
} else {
None
}
}
}
impl<T> Range<T> {
/// Return a range enclosing handles `first` through `last`, inclusive.
pub fn new_from_bounds(first: Handle<T>, last: Handle<T>) -> Self {
Self {
inner: (first.index() as u32)..(last.index() as u32 + 1),
marker: Default::default(),
}
}
/// return the first and last handles included in `self`.
///
/// If `self` is an empty range, there are no handles included, so
/// return `None`.
pub fn first_and_last(&self) -> Option<(Handle<T>, Handle<T>)> {
if self.inner.start < self.inner.end {
Some((
// `Range::new_from_bounds` expects a 1-based, start- and
// end-inclusive range, but `self.inner` is a zero-based,
// end-exclusive range.
Handle::new(Index::new(self.inner.start + 1).unwrap()),
Handle::new(Index::new(self.inner.end).unwrap()),
))
} else {
None
}
}
/// Return the zero-based index range covered by `self`.
pub fn zero_based_index_range(&self) -> ops::Range<u32> {
self.inner.clone()
}
/// Construct a `Range` that covers the zero-based indices in `inner`.
pub fn from_zero_based_index_range(inner: ops::Range<u32>, arena: &Arena<T>) -> Self {
// Since `inner` is a `Range<u32>`, we only need to check that
// the start and end are well-ordered, and that the end fits
// within `arena`.
assert!(inner.start <= inner.end);
assert!(inner.end as usize <= arena.len());
Self {
inner,
marker: Default::default(),
}
}
}
/// An arena holding some kind of component (e.g., type, constant,
/// instruction, etc.) that can be referenced.
///
/// Adding new items to the arena produces a strongly-typed [`Handle`].
/// The arena can be indexed using the given handle to obtain
/// a reference to the stored item.
#[derive(Clone)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "serialize", serde(transparent))]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[cfg_attr(test, derive(PartialEq))]
pub struct Arena<T> {
/// Values of this arena.
data: Vec<T>,
#[cfg_attr(feature = "serialize", serde(skip))]
span_info: Vec<Span>,
}
impl<T> Default for Arena<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: fmt::Debug> fmt::Debug for Arena<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<T> Arena<T> {
/// Create a new arena with no initial capacity allocated.
pub const fn new() -> Self {
Arena {
data: Vec::new(),
span_info: Vec::new(),
}
}
/// Extracts the inner vector.
#[allow(clippy::missing_const_for_fn)] // ignore due to requirement of #![feature(const_precise_live_drops)]
pub fn into_inner(self) -> Vec<T> {
self.data
}
/// Returns the current number of items stored in this arena.
pub fn len(&self) -> usize {
self.data.len()
}
/// Returns `true` if the arena contains no elements.
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Returns an iterator over the items stored in this arena, returning both
/// the item's handle and a reference to it.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (Handle<T>, &T)> {
self.data
.iter()
.enumerate()
.map(|(i, v)| unsafe { (Handle::from_usize_unchecked(i), v) })
}
/// Drains the arena, returning an iterator over the items stored.
pub fn drain(&mut self) -> impl DoubleEndedIterator<Item = (Handle<T>, T, Span)> {
let arena = std::mem::take(self);
arena
.data
.into_iter()
.zip(arena.span_info)
.enumerate()
.map(|(i, (v, span))| unsafe { (Handle::from_usize_unchecked(i), v, span) })
}
/// Returns a iterator over the items stored in this arena,
/// returning both the item's handle and a mutable reference to it.
pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = (Handle<T>, &mut T)> {
self.data
.iter_mut()
.enumerate()
.map(|(i, v)| unsafe { (Handle::from_usize_unchecked(i), v) })
}
/// Adds a new value to the arena, returning a typed handle.
pub fn append(&mut self, value: T, span: Span) -> Handle<T> {
let index = self.data.len();
self.data.push(value);
self.span_info.push(span);
Handle::from_usize(index)
}
/// Fetch a handle to an existing type.
pub fn fetch_if<F: Fn(&T) -> bool>(&self, fun: F) -> Option<Handle<T>> {
self.data
.iter()
.position(fun)
.map(|index| unsafe { Handle::from_usize_unchecked(index) })
}
/// Adds a value with a custom check for uniqueness:
/// returns a handle pointing to
/// an existing element if the check succeeds, or adds a new
/// element otherwise.
pub fn fetch_if_or_append<F: Fn(&T, &T) -> bool>(
&mut self,
value: T,
span: Span,
fun: F,
) -> Handle<T> {
if let Some(index) = self.data.iter().position(|d| fun(d, &value)) {
unsafe { Handle::from_usize_unchecked(index) }
} else {
self.append(value, span)
}
}
/// Adds a value with a check for uniqueness, where the check is plain comparison.
pub fn fetch_or_append(&mut self, value: T, span: Span) -> Handle<T>
where
T: PartialEq,
{
self.fetch_if_or_append(value, span, T::eq)
}
pub fn try_get(&self, handle: Handle<T>) -> Result<&T, BadHandle> {
self.data
.get(handle.index())
.ok_or_else(|| BadHandle::new(handle))
}
/// Get a mutable reference to an element in the arena.
pub fn get_mut(&mut self, handle: Handle<T>) -> &mut T {
self.data.get_mut(handle.index()).unwrap()
}
/// Get the range of handles from a particular number of elements to the end.
pub fn range_from(&self, old_length: usize) -> Range<T> {
Range {
inner: old_length as u32..self.data.len() as u32,
marker: PhantomData,
}
}
/// Clears the arena keeping all allocations
pub fn clear(&mut self) {
self.data.clear()
}
pub fn get_span(&self, handle: Handle<T>) -> Span {
*self
.span_info
.get(handle.index())
.unwrap_or(&Span::default())
}
/// Assert that `handle` is valid for this arena.
pub fn check_contains_handle(&self, handle: Handle<T>) -> Result<(), BadHandle> {
if handle.index() < self.data.len() {
Ok(())
} else {
Err(BadHandle::new(handle))
}
}
/// Assert that `range` is valid for this arena.
pub fn check_contains_range(&self, range: &Range<T>) -> Result<(), BadRangeError> {
// Since `range.inner` is a `Range<u32>`, we only need to check that the
// start precedes the end, and that the end is in range.
if range.inner.start > range.inner.end {
return Err(BadRangeError::new(range.clone()));
}
// Empty ranges are tolerated: they can be produced by compaction.
if range.inner.start == range.inner.end {
return Ok(());
}
// `range.inner` is zero-based, but end-exclusive, so `range.inner.end`
// is actually the right one-based index for the last handle within the
// range.
let last_handle = Handle::new(range.inner.end.try_into().unwrap());
if self.check_contains_handle(last_handle).is_err() {
return Err(BadRangeError::new(range.clone()));
}
Ok(())
}
#[cfg(feature = "compact")]
pub(crate) fn retain_mut<P>(&mut self, mut predicate: P)
where
P: FnMut(Handle<T>, &mut T) -> bool,
{
let mut index = 0;
let mut retained = 0;
self.data.retain_mut(|elt| {
let handle = Handle::new(Index::new(index as u32 + 1).unwrap());
let keep = predicate(handle, elt);
// Since `predicate` needs mutable access to each element,
// we can't feasibly call it twice, so we have to compact
// spans by hand in parallel as part of this iteration.
if keep {
self.span_info[retained] = self.span_info[index];
retained += 1;
}
index += 1;
keep
});
self.span_info.truncate(retained);
}
}
#[cfg(feature = "deserialize")]
impl<'de, T> serde::Deserialize<'de> for Arena<T>
where
T: serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let data = Vec::deserialize(deserializer)?;
let span_info = std::iter::repeat(Span::default())
.take(data.len())
.collect();
Ok(Self { data, span_info })
}
}
impl<T> ops::Index<Handle<T>> for Arena<T> {
type Output = T;
fn index(&self, handle: Handle<T>) -> &T {
&self.data[handle.index()]
}
}
impl<T> ops::IndexMut<Handle<T>> for Arena<T> {
fn index_mut(&mut self, handle: Handle<T>) -> &mut T {
&mut self.data[handle.index()]
}
}
impl<T> ops::Index<Range<T>> for Arena<T> {
type Output = [T];
fn index(&self, range: Range<T>) -> &[T] {
&self.data[range.inner.start as usize..range.inner.end as usize]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn append_non_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.append(0, Default::default());
let t2 = arena.append(0, Default::default());
assert!(t1 != t2);
assert!(arena[t1] == arena[t2]);
}
#[test]
fn append_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.append(0, Default::default());
let t2 = arena.append(1, Default::default());
assert!(t1 != t2);
assert!(arena[t1] != arena[t2]);
}
#[test]
fn fetch_or_append_non_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.fetch_or_append(0, Default::default());
let t2 = arena.fetch_or_append(0, Default::default());
assert!(t1 == t2);
assert!(arena[t1] == arena[t2])
}
#[test]
fn fetch_or_append_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.fetch_or_append(0, Default::default());
let t2 = arena.fetch_or_append(1, Default::default());
assert!(t1 != t2);
assert!(arena[t1] != arena[t2]);
}
}
/// An arena whose elements are guaranteed to be unique.
///
/// A `UniqueArena` holds a set of unique values of type `T`, each with an
/// associated [`Span`]. Inserting a value returns a `Handle<T>`, which can be
/// used to index the `UniqueArena` and obtain shared access to the `T` element.
/// Access via a `Handle` is an array lookup - no hash lookup is necessary.
///
/// The element type must implement `Eq` and `Hash`. Insertions of equivalent
/// elements, according to `Eq`, all return the same `Handle`.
///
/// Once inserted, elements may not be mutated.
///
/// `UniqueArena` is similar to [`Arena`]: If `Arena` is vector-like,
/// `UniqueArena` is `HashSet`-like.
#[derive(Clone)]
pub struct UniqueArena<T> {
set: FastIndexSet<T>,
/// Spans for the elements, indexed by handle.
///
/// The length of this vector is always equal to `set.len()`. `FastIndexSet`
/// promises that its elements "are indexed in a compact range, without
/// holes in the range 0..set.len()", so we can always use the indices
/// returned by insertion as indices into this vector.
span_info: Vec<Span>,
}
impl<T> UniqueArena<T> {
/// Create a new arena with no initial capacity allocated.
pub fn new() -> Self {
UniqueArena {
set: FastIndexSet::default(),
span_info: Vec::new(),
}
}
/// Return the current number of items stored in this arena.
pub fn len(&self) -> usize {
self.set.len()
}
/// Return `true` if the arena contains no elements.
pub fn is_empty(&self) -> bool {
self.set.is_empty()
}
/// Clears the arena, keeping all allocations.
pub fn clear(&mut self) {
self.set.clear();
self.span_info.clear();
}
/// Return the span associated with `handle`.
///
/// If a value has been inserted multiple times, the span returned is the
/// one provided with the first insertion.
pub fn get_span(&self, handle: Handle<T>) -> Span {
*self
.span_info
.get(handle.index())
.unwrap_or(&Span::default())
}
#[cfg(feature = "compact")]
pub(crate) fn drain_all(&mut self) -> UniqueArenaDrain<T> {
UniqueArenaDrain {
inner_elts: self.set.drain(..),
inner_spans: self.span_info.drain(..),
index: Index::new(1).unwrap(),
}
}
}
#[cfg(feature = "compact")]
pub(crate) struct UniqueArenaDrain<'a, T> {
inner_elts: indexmap::set::Drain<'a, T>,
inner_spans: std::vec::Drain<'a, Span>,
index: Index,
}
#[cfg(feature = "compact")]
impl<'a, T> Iterator for UniqueArenaDrain<'a, T> {
type Item = (Handle<T>, T, Span);
fn next(&mut self) -> Option<Self::Item> {
match self.inner_elts.next() {
Some(elt) => {
let handle = Handle::new(self.index);
self.index = self.index.checked_add(1).unwrap();
let span = self.inner_spans.next().unwrap();
Some((handle, elt, span))
}
None => None,
}
}
}
impl<T: Eq + hash::Hash> UniqueArena<T> {
/// Returns an iterator over the items stored in this arena, returning both
/// the item's handle and a reference to it.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (Handle<T>, &T)> {
self.set.iter().enumerate().map(|(i, v)| {
let position = i + 1;
let index = unsafe { Index::new_unchecked(position as u32) };
(Handle::new(index), v)
})
}
/// Insert a new value into the arena.
///
/// Return a [`Handle<T>`], which can be used to index this arena to get a
/// shared reference to the element.
///
/// If this arena already contains an element that is `Eq` to `value`,
/// return a `Handle` to the existing element, and drop `value`.
///
/// If `value` is inserted into the arena, associate `span` with
/// it. An element's span can be retrieved with the [`get_span`]
/// method.
///
/// [`Handle<T>`]: Handle
/// [`get_span`]: UniqueArena::get_span
pub fn insert(&mut self, value: T, span: Span) -> Handle<T> {
let (index, added) = self.set.insert_full(value);
if added {
debug_assert!(index == self.span_info.len());
self.span_info.push(span);
}
debug_assert!(self.set.len() == self.span_info.len());
Handle::from_usize(index)
}
/// Replace an old value with a new value.
///
/// # Panics
///
/// - if the old value is not in the arena
/// - if the new value already exists in the arena
pub fn replace(&mut self, old: Handle<T>, new: T) {
let (index, added) = self.set.insert_full(new);
assert!(added && index == self.set.len() - 1);
self.set.swap_remove_index(old.index()).unwrap();
}
/// Return this arena's handle for `value`, if present.
///
/// If this arena already contains an element equal to `value`,
/// return its handle. Otherwise, return `None`.
pub fn get(&self, value: &T) -> Option<Handle<T>> {
self.set
.get_index_of(value)
.map(|index| unsafe { Handle::from_usize_unchecked(index) })
}
/// Return this arena's value at `handle`, if that is a valid handle.
pub fn get_handle(&self, handle: Handle<T>) -> Result<&T, BadHandle> {
self.set
.get_index(handle.index())
.ok_or_else(|| BadHandle::new(handle))
}
/// Assert that `handle` is valid for this arena.
pub fn check_contains_handle(&self, handle: Handle<T>) -> Result<(), BadHandle> {
if handle.index() < self.set.len() {
Ok(())
} else {
Err(BadHandle::new(handle))
}
}
}
impl<T> Default for UniqueArena<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: fmt::Debug + Eq + hash::Hash> fmt::Debug for UniqueArena<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<T> ops::Index<Handle<T>> for UniqueArena<T> {
type Output = T;
fn index(&self, handle: Handle<T>) -> &T {
&self.set[handle.index()]
}
}
#[cfg(feature = "serialize")]
impl<T> serde::Serialize for UniqueArena<T>
where
T: Eq + hash::Hash + serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.set.serialize(serializer)
}
}
#[cfg(feature = "deserialize")]
impl<'de, T> serde::Deserialize<'de> for UniqueArena<T>
where
T: Eq + hash::Hash + serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let set = FastIndexSet::deserialize(deserializer)?;
let span_info = std::iter::repeat(Span::default()).take(set.len()).collect();
Ok(Self { set, span_info })
}
}
//Note: largely borrowed from `HashSet` implementation
#[cfg(feature = "arbitrary")]
impl<'a, T> arbitrary::Arbitrary<'a> for UniqueArena<T>
where
T: Eq + hash::Hash + arbitrary::Arbitrary<'a>,
{
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let mut arena = Self::default();
for elem in u.arbitrary_iter()? {
arena.set.insert(elem?);
arena.span_info.push(Span::UNDEFINED);
}
Ok(arena)
}
fn arbitrary_take_rest(u: arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let mut arena = Self::default();
for elem in u.arbitrary_take_rest_iter()? {
arena.set.insert(elem?);
arena.span_info.push(Span::UNDEFINED);
}
Ok(arena)
}
#[inline]
fn size_hint(depth: usize) -> (usize, Option<usize>) {
let depth_hint = <usize as arbitrary::Arbitrary>::size_hint(depth);
arbitrary::size_hint::and(depth_hint, (0, None))
}
}

126
third_party/rust/naga/src/arena/handle.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,126 @@
//! Well-typed indices into [`Arena`]s and [`UniqueArena`]s.
//!
//! This module defines [`Handle`] and related types.
//!
//! [`Arena`]: super::Arena
//! [`UniqueArena`]: super::UniqueArena
use std::{cmp::Ordering, fmt, hash, marker::PhantomData};
/// An unique index in the arena array that a handle points to.
/// The "non-max" part ensures that an `Option<Handle<T>>` has
/// the same size and representation as `Handle<T>`.
pub type Index = crate::non_max_u32::NonMaxU32;
#[derive(Clone, Copy, Debug, thiserror::Error, PartialEq)]
#[error("Handle {index} of {kind} is either not present, or inaccessible yet")]
pub struct BadHandle {
pub kind: &'static str,
pub index: usize,
}
impl BadHandle {
pub fn new<T>(handle: Handle<T>) -> Self {
Self {
kind: std::any::type_name::<T>(),
index: handle.index(),
}
}
}
/// A strongly typed reference to an arena item.
///
/// A `Handle` value can be used as an index into an [`Arena`] or [`UniqueArena`].
///
/// [`Arena`]: super::Arena
/// [`UniqueArena`]: super::UniqueArena
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(
any(feature = "serialize", feature = "deserialize"),
serde(transparent)
)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct Handle<T> {
index: Index,
#[cfg_attr(any(feature = "serialize", feature = "deserialize"), serde(skip))]
marker: PhantomData<T>,
}
impl<T> Clone for Handle<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T> Copy for Handle<T> {}
impl<T> PartialEq for Handle<T> {
fn eq(&self, other: &Self) -> bool {
self.index == other.index
}
}
impl<T> Eq for Handle<T> {}
impl<T> PartialOrd for Handle<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<T> Ord for Handle<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.index.cmp(&other.index)
}
}
impl<T> fmt::Debug for Handle<T> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "[{}]", self.index)
}
}
impl<T> hash::Hash for Handle<T> {
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
self.index.hash(hasher)
}
}
impl<T> Handle<T> {
pub(crate) const fn new(index: Index) -> Self {
Handle {
index,
marker: PhantomData,
}
}
/// Returns the index of this handle.
pub const fn index(self) -> usize {
self.index.get() as usize
}
/// Convert a `usize` index into a `Handle<T>`.
pub(super) fn from_usize(index: usize) -> Self {
let handle_index = u32::try_from(index)
.ok()
.and_then(Index::new)
.expect("Failed to insert into arena. Handle overflows");
Handle::new(handle_index)
}
/// Convert a `usize` index into a `Handle<T>`, without range checks.
pub(super) const unsafe fn from_usize_unchecked(index: usize) -> Self {
Handle::new(Index::new_unchecked(index as u32))
}
/// Write this handle's index to `formatter`, preceded by `prefix`.
pub fn write_prefixed(
&self,
formatter: &mut fmt::Formatter,
prefix: &'static str,
) -> fmt::Result {
formatter.write_str(prefix)?;
<usize as fmt::Display>::fmt(&self.index(), formatter)
}
}

100
third_party/rust/naga/src/arena/handle_set.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,100 @@
//! The [`HandleSet`] type and associated definitions.
use crate::arena::{Arena, Handle, UniqueArena};
/// A set of `Handle<T>` values.
#[derive(Debug)]
pub struct HandleSet<T> {
/// Bound on indexes of handles stored in this set.
len: usize,
/// `members[i]` is true if the handle with index `i` is a member.
members: bit_set::BitSet,
/// This type is indexed by values of type `T`.
as_keys: std::marker::PhantomData<T>,
}
impl<T> HandleSet<T> {
/// Return a new, empty `HandleSet`.
pub fn new() -> Self {
Self {
len: 0,
members: bit_set::BitSet::new(),
as_keys: std::marker::PhantomData,
}
}
/// Return a new, empty `HandleSet`, sized to hold handles from `arena`.
pub fn for_arena(arena: &impl ArenaType<T>) -> Self {
let len = arena.len();
Self {
len,
members: bit_set::BitSet::with_capacity(len),
as_keys: std::marker::PhantomData,
}
}
/// Remove all members from `self`.
pub fn clear(&mut self) {
self.members.clear();
}
/// Remove all members from `self`, and reserve space to hold handles from `arena`.
pub fn clear_for_arena(&mut self, arena: &impl ArenaType<T>) {
self.members.clear();
self.members.reserve_len(arena.len());
}
/// Return an iterator over all handles that could be made members
/// of this set.
pub fn all_possible(&self) -> impl Iterator<Item = Handle<T>> {
super::Range::full_range_from_size(self.len)
}
/// Add `handle` to the set.
///
/// Return `true` if `handle` was not already present in the set.
pub fn insert(&mut self, handle: Handle<T>) -> bool {
self.members.insert(handle.index())
}
/// Remove `handle` from the set.
///
/// Returns `true` if `handle` was present in the set.
pub fn remove(&mut self, handle: Handle<T>) -> bool {
self.members.remove(handle.index())
}
/// Add handles from `iter` to the set.
pub fn insert_iter(&mut self, iter: impl IntoIterator<Item = Handle<T>>) {
for handle in iter {
self.insert(handle);
}
}
pub fn contains(&self, handle: Handle<T>) -> bool {
self.members.contains(handle.index())
}
/// Return an iterator over all handles in `self`.
pub fn iter(&self) -> impl '_ + Iterator<Item = Handle<T>> {
self.members.iter().map(Handle::from_usize)
}
}
pub trait ArenaType<T> {
fn len(&self) -> usize;
}
impl<T> ArenaType<T> for Arena<T> {
fn len(&self) -> usize {
self.len()
}
}
impl<T: std::hash::Hash + Eq> ArenaType<T> for UniqueArena<T> {
fn len(&self) -> usize {
self.len()
}
}

105
third_party/rust/naga/src/arena/handlevec.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,105 @@
//! The [`HandleVec`] type and associated definitions.
use super::handle::Handle;
use std::marker::PhantomData;
use std::ops;
/// A [`Vec`] indexed by [`Handle`]s.
///
/// A `HandleVec<T, U>` is a [`Vec<U>`] indexed by values of type `Handle<T>`,
/// rather than `usize`.
///
/// Rather than a `push` method, `HandleVec` has an [`insert`] method, analogous
/// to [`HashMap::insert`], that requires you to provide the handle at which the
/// new value should appear. However, since `HandleVec` only supports insertion
/// at the end, the given handle's index must be equal to the the `HandleVec`'s
/// current length; otherwise, the insertion will panic.
///
/// [`insert`]: HandleVec::insert
/// [`HashMap::insert`]: std::collections::HashMap::insert
#[derive(Debug)]
pub(crate) struct HandleVec<T, U> {
inner: Vec<U>,
as_keys: PhantomData<T>,
}
impl<T, U> Default for HandleVec<T, U> {
fn default() -> Self {
Self {
inner: vec![],
as_keys: PhantomData,
}
}
}
#[allow(dead_code)]
impl<T, U> HandleVec<T, U> {
pub(crate) const fn new() -> Self {
Self {
inner: vec![],
as_keys: PhantomData,
}
}
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self {
inner: Vec::with_capacity(capacity),
as_keys: PhantomData,
}
}
pub(crate) fn len(&self) -> usize {
self.inner.len()
}
/// Insert a mapping from `handle` to `value`.
///
/// Unlike a [`HashMap`], a `HandleVec` can only have new entries inserted at
/// the end, like [`Vec::push`]. So the index of `handle` must equal
/// [`self.len()`].
///
/// [`HashMap`]: std::collections::HashMap
/// [`self.len()`]: HandleVec::len
pub(crate) fn insert(&mut self, handle: Handle<T>, value: U) {
assert_eq!(handle.index(), self.inner.len());
self.inner.push(value);
}
pub(crate) fn get(&self, handle: Handle<T>) -> Option<&U> {
self.inner.get(handle.index())
}
pub(crate) fn clear(&mut self) {
self.inner.clear()
}
pub(crate) fn resize(&mut self, len: usize, fill: U)
where
U: Clone,
{
self.inner.resize(len, fill);
}
pub(crate) fn iter(&self) -> impl Iterator<Item = &U> {
self.inner.iter()
}
pub(crate) fn iter_mut(&mut self) -> impl Iterator<Item = &mut U> {
self.inner.iter_mut()
}
}
impl<T, U> ops::Index<Handle<T>> for HandleVec<T, U> {
type Output = U;
fn index(&self, handle: Handle<T>) -> &Self::Output {
&self.inner[handle.index()]
}
}
impl<T, U> ops::IndexMut<Handle<T>> for HandleVec<T, U> {
fn index_mut(&mut self, handle: Handle<T>) -> &mut Self::Output {
&mut self.inner[handle.index()]
}
}

329
third_party/rust/naga/src/arena/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,329 @@
/*! The [`Arena`], [`UniqueArena`], and [`Handle`] types.
To improve translator performance and reduce memory usage, most structures are
stored in an [`Arena`]. An `Arena<T>` stores a series of `T` values, indexed by
[`Handle<T>`](Handle) values, which are just wrappers around integer indexes.
For example, a `Function`'s expressions are stored in an `Arena<Expression>`,
and compound expressions refer to their sub-expressions via `Handle<Expression>`
values. (When examining the serialized form of a `Module`, note that the first
element of an `Arena` has an index of 1, not 0.)
A [`UniqueArena`] is just like an `Arena`, except that it stores only a single
instance of each value. The value type must implement `Eq` and `Hash`. Like an
`Arena`, inserting a value into a `UniqueArena` returns a `Handle` which can be
used to efficiently access the value, without a hash lookup. Inserting a value
multiple times returns the same `Handle`.
If the `span` feature is enabled, both `Arena` and `UniqueArena` can associate a
source code span with each element.
[`Handle<T>`]: Handle
*/
mod handle;
mod handle_set;
mod handlevec;
mod range;
mod unique_arena;
pub use handle::{BadHandle, Handle};
pub(crate) use handle_set::HandleSet;
pub(crate) use handlevec::HandleVec;
pub use range::{BadRangeError, Range};
pub use unique_arena::UniqueArena;
use crate::Span;
use handle::Index;
use std::{fmt, ops};
/// An arena holding some kind of component (e.g., type, constant,
/// instruction, etc.) that can be referenced.
///
/// Adding new items to the arena produces a strongly-typed [`Handle`].
/// The arena can be indexed using the given handle to obtain
/// a reference to the stored item.
#[derive(Clone)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "serialize", serde(transparent))]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[cfg_attr(test, derive(PartialEq))]
pub struct Arena<T> {
/// Values of this arena.
data: Vec<T>,
#[cfg_attr(feature = "serialize", serde(skip))]
span_info: Vec<Span>,
}
impl<T> Default for Arena<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: fmt::Debug> fmt::Debug for Arena<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<T> Arena<T> {
/// Create a new arena with no initial capacity allocated.
pub const fn new() -> Self {
Arena {
data: Vec::new(),
span_info: Vec::new(),
}
}
/// Extracts the inner vector.
#[allow(clippy::missing_const_for_fn)] // ignore due to requirement of #![feature(const_precise_live_drops)]
pub fn into_inner(self) -> Vec<T> {
self.data
}
/// Returns the current number of items stored in this arena.
pub fn len(&self) -> usize {
self.data.len()
}
/// Returns `true` if the arena contains no elements.
pub fn is_empty(&self) -> bool {
self.data.is_empty()
}
/// Returns an iterator over the items stored in this arena, returning both
/// the item's handle and a reference to it.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (Handle<T>, &T)> {
self.data
.iter()
.enumerate()
.map(|(i, v)| unsafe { (Handle::from_usize_unchecked(i), v) })
}
/// Drains the arena, returning an iterator over the items stored.
pub fn drain(&mut self) -> impl DoubleEndedIterator<Item = (Handle<T>, T, Span)> {
let arena = std::mem::take(self);
arena
.data
.into_iter()
.zip(arena.span_info)
.enumerate()
.map(|(i, (v, span))| unsafe { (Handle::from_usize_unchecked(i), v, span) })
}
/// Returns a iterator over the items stored in this arena,
/// returning both the item's handle and a mutable reference to it.
pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = (Handle<T>, &mut T)> {
self.data
.iter_mut()
.enumerate()
.map(|(i, v)| unsafe { (Handle::from_usize_unchecked(i), v) })
}
/// Adds a new value to the arena, returning a typed handle.
pub fn append(&mut self, value: T, span: Span) -> Handle<T> {
let index = self.data.len();
self.data.push(value);
self.span_info.push(span);
Handle::from_usize(index)
}
/// Fetch a handle to an existing type.
pub fn fetch_if<F: Fn(&T) -> bool>(&self, fun: F) -> Option<Handle<T>> {
self.data
.iter()
.position(fun)
.map(|index| unsafe { Handle::from_usize_unchecked(index) })
}
/// Adds a value with a custom check for uniqueness:
/// returns a handle pointing to
/// an existing element if the check succeeds, or adds a new
/// element otherwise.
pub fn fetch_if_or_append<F: Fn(&T, &T) -> bool>(
&mut self,
value: T,
span: Span,
fun: F,
) -> Handle<T> {
if let Some(index) = self.data.iter().position(|d| fun(d, &value)) {
unsafe { Handle::from_usize_unchecked(index) }
} else {
self.append(value, span)
}
}
/// Adds a value with a check for uniqueness, where the check is plain comparison.
pub fn fetch_or_append(&mut self, value: T, span: Span) -> Handle<T>
where
T: PartialEq,
{
self.fetch_if_or_append(value, span, T::eq)
}
pub fn try_get(&self, handle: Handle<T>) -> Result<&T, BadHandle> {
self.data
.get(handle.index())
.ok_or_else(|| BadHandle::new(handle))
}
/// Get a mutable reference to an element in the arena.
pub fn get_mut(&mut self, handle: Handle<T>) -> &mut T {
self.data.get_mut(handle.index()).unwrap()
}
/// Get the range of handles from a particular number of elements to the end.
pub fn range_from(&self, old_length: usize) -> Range<T> {
let range = old_length as u32..self.data.len() as u32;
Range::from_index_range(range, self)
}
/// Clears the arena keeping all allocations
pub fn clear(&mut self) {
self.data.clear()
}
pub fn get_span(&self, handle: Handle<T>) -> Span {
*self
.span_info
.get(handle.index())
.unwrap_or(&Span::default())
}
/// Assert that `handle` is valid for this arena.
pub fn check_contains_handle(&self, handle: Handle<T>) -> Result<(), BadHandle> {
if handle.index() < self.data.len() {
Ok(())
} else {
Err(BadHandle::new(handle))
}
}
/// Assert that `range` is valid for this arena.
pub fn check_contains_range(&self, range: &Range<T>) -> Result<(), BadRangeError> {
// Since `range.inner` is a `Range<u32>`, we only need to check that the
// start precedes the end, and that the end is in range.
if range.inner.start > range.inner.end {
return Err(BadRangeError::new(range.clone()));
}
// Empty ranges are tolerated: they can be produced by compaction.
if range.inner.start == range.inner.end {
return Ok(());
}
let last_handle = Handle::new(Index::new(range.inner.end - 1).unwrap());
if self.check_contains_handle(last_handle).is_err() {
return Err(BadRangeError::new(range.clone()));
}
Ok(())
}
#[cfg(feature = "compact")]
pub(crate) fn retain_mut<P>(&mut self, mut predicate: P)
where
P: FnMut(Handle<T>, &mut T) -> bool,
{
let mut index = 0;
let mut retained = 0;
self.data.retain_mut(|elt| {
let handle = Handle::from_usize(index);
let keep = predicate(handle, elt);
// Since `predicate` needs mutable access to each element,
// we can't feasibly call it twice, so we have to compact
// spans by hand in parallel as part of this iteration.
if keep {
self.span_info[retained] = self.span_info[index];
retained += 1;
}
index += 1;
keep
});
self.span_info.truncate(retained);
}
}
#[cfg(feature = "deserialize")]
impl<'de, T> serde::Deserialize<'de> for Arena<T>
where
T: serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let data = Vec::deserialize(deserializer)?;
let span_info = std::iter::repeat(Span::default())
.take(data.len())
.collect();
Ok(Self { data, span_info })
}
}
impl<T> ops::Index<Handle<T>> for Arena<T> {
type Output = T;
fn index(&self, handle: Handle<T>) -> &T {
&self.data[handle.index()]
}
}
impl<T> ops::IndexMut<Handle<T>> for Arena<T> {
fn index_mut(&mut self, handle: Handle<T>) -> &mut T {
&mut self.data[handle.index()]
}
}
impl<T> ops::Index<Range<T>> for Arena<T> {
type Output = [T];
fn index(&self, range: Range<T>) -> &[T] {
&self.data[range.inner.start as usize..range.inner.end as usize]
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn append_non_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.append(0, Default::default());
let t2 = arena.append(0, Default::default());
assert!(t1 != t2);
assert!(arena[t1] == arena[t2]);
}
#[test]
fn append_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.append(0, Default::default());
let t2 = arena.append(1, Default::default());
assert!(t1 != t2);
assert!(arena[t1] != arena[t2]);
}
#[test]
fn fetch_or_append_non_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.fetch_or_append(0, Default::default());
let t2 = arena.fetch_or_append(0, Default::default());
assert!(t1 == t2);
assert!(arena[t1] == arena[t2])
}
#[test]
fn fetch_or_append_unique() {
let mut arena: Arena<u8> = Arena::new();
let t1 = arena.fetch_or_append(0, Default::default());
let t2 = arena.fetch_or_append(1, Default::default());
assert!(t1 != t2);
assert!(arena[t1] != arena[t2]);
}
}

139
third_party/rust/naga/src/arena/range.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,139 @@
//! Well-typed ranges of [`Arena`]s.
//!
//! This module defines the [`Range`] type, representing a contiguous range of
//! entries in an [`Arena`].
//!
//! [`Arena`]: super::Arena
use super::{
handle::{Handle, Index},
Arena,
};
use std::{fmt, marker::PhantomData, ops};
/// A strongly typed range of handles.
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(
any(feature = "serialize", feature = "deserialize"),
serde(transparent)
)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
#[cfg_attr(test, derive(PartialEq))]
pub struct Range<T> {
pub(super) inner: ops::Range<u32>,
#[cfg_attr(any(feature = "serialize", feature = "deserialize"), serde(skip))]
marker: PhantomData<T>,
}
impl<T> Range<T> {
pub(crate) const fn erase_type(self) -> Range<()> {
let Self { inner, marker: _ } = self;
Range {
inner,
marker: PhantomData,
}
}
}
// NOTE: Keep this diagnostic in sync with that of [`BadHandle`].
#[derive(Clone, Debug, thiserror::Error)]
#[cfg_attr(test, derive(PartialEq))]
#[error("Handle range {range:?} of {kind} is either not present, or inaccessible yet")]
pub struct BadRangeError {
// This error is used for many `Handle` types, but there's no point in making this generic, so
// we just flatten them all to `Handle<()>` here.
kind: &'static str,
range: Range<()>,
}
impl BadRangeError {
pub fn new<T>(range: Range<T>) -> Self {
Self {
kind: std::any::type_name::<T>(),
range: range.erase_type(),
}
}
}
impl<T> Clone for Range<T> {
fn clone(&self) -> Self {
Range {
inner: self.inner.clone(),
marker: self.marker,
}
}
}
impl<T> fmt::Debug for Range<T> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "[{}..{}]", self.inner.start, self.inner.end)
}
}
impl<T> Iterator for Range<T> {
type Item = Handle<T>;
fn next(&mut self) -> Option<Self::Item> {
if self.inner.start < self.inner.end {
let next = self.inner.start;
self.inner.start += 1;
Some(Handle::new(Index::new(next).unwrap()))
} else {
None
}
}
}
impl<T> Range<T> {
/// Return a range enclosing handles `first` through `last`, inclusive.
pub fn new_from_bounds(first: Handle<T>, last: Handle<T>) -> Self {
Self {
inner: (first.index() as u32)..(last.index() as u32 + 1),
marker: Default::default(),
}
}
/// Return a range covering all handles with indices from `0` to `size`.
pub(super) fn full_range_from_size(size: usize) -> Self {
Self {
inner: 0..size as u32,
marker: Default::default(),
}
}
/// return the first and last handles included in `self`.
///
/// If `self` is an empty range, there are no handles included, so
/// return `None`.
pub fn first_and_last(&self) -> Option<(Handle<T>, Handle<T>)> {
if self.inner.start < self.inner.end {
Some((
// `Range::new_from_bounds` expects a start- and end-inclusive
// range, but `self.inner` is an end-exclusive range.
Handle::new(Index::new(self.inner.start).unwrap()),
Handle::new(Index::new(self.inner.end - 1).unwrap()),
))
} else {
None
}
}
/// Return the index range covered by `self`.
pub fn index_range(&self) -> ops::Range<u32> {
self.inner.clone()
}
/// Construct a `Range` that covers the indices in `inner`.
pub fn from_index_range(inner: ops::Range<u32>, arena: &Arena<T>) -> Self {
// Since `inner` is a `Range<u32>`, we only need to check that
// the start and end are well-ordered, and that the end fits
// within `arena`.
assert!(inner.start <= inner.end);
assert!(inner.end as usize <= arena.len());
Self {
inner,
marker: Default::default(),
}
}
}

262
third_party/rust/naga/src/arena/unique_arena.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,262 @@
//! The [`UniqueArena`] type and supporting definitions.
use crate::{FastIndexSet, Span};
use super::handle::{BadHandle, Handle, Index};
use std::{fmt, hash, ops};
/// An arena whose elements are guaranteed to be unique.
///
/// A `UniqueArena` holds a set of unique values of type `T`, each with an
/// associated [`Span`]. Inserting a value returns a `Handle<T>`, which can be
/// used to index the `UniqueArena` and obtain shared access to the `T` element.
/// Access via a `Handle` is an array lookup - no hash lookup is necessary.
///
/// The element type must implement `Eq` and `Hash`. Insertions of equivalent
/// elements, according to `Eq`, all return the same `Handle`.
///
/// Once inserted, elements may not be mutated.
///
/// `UniqueArena` is similar to [`Arena`]: If `Arena` is vector-like,
/// `UniqueArena` is `HashSet`-like.
///
/// [`Arena`]: super::Arena
#[derive(Clone)]
pub struct UniqueArena<T> {
set: FastIndexSet<T>,
/// Spans for the elements, indexed by handle.
///
/// The length of this vector is always equal to `set.len()`. `FastIndexSet`
/// promises that its elements "are indexed in a compact range, without
/// holes in the range 0..set.len()", so we can always use the indices
/// returned by insertion as indices into this vector.
span_info: Vec<Span>,
}
impl<T> UniqueArena<T> {
/// Create a new arena with no initial capacity allocated.
pub fn new() -> Self {
UniqueArena {
set: FastIndexSet::default(),
span_info: Vec::new(),
}
}
/// Return the current number of items stored in this arena.
pub fn len(&self) -> usize {
self.set.len()
}
/// Return `true` if the arena contains no elements.
pub fn is_empty(&self) -> bool {
self.set.is_empty()
}
/// Clears the arena, keeping all allocations.
pub fn clear(&mut self) {
self.set.clear();
self.span_info.clear();
}
/// Return the span associated with `handle`.
///
/// If a value has been inserted multiple times, the span returned is the
/// one provided with the first insertion.
pub fn get_span(&self, handle: Handle<T>) -> Span {
*self
.span_info
.get(handle.index())
.unwrap_or(&Span::default())
}
#[cfg(feature = "compact")]
pub(crate) fn drain_all(&mut self) -> UniqueArenaDrain<T> {
UniqueArenaDrain {
inner_elts: self.set.drain(..),
inner_spans: self.span_info.drain(..),
index: Index::new(0).unwrap(),
}
}
}
#[cfg(feature = "compact")]
pub struct UniqueArenaDrain<'a, T> {
inner_elts: indexmap::set::Drain<'a, T>,
inner_spans: std::vec::Drain<'a, Span>,
index: Index,
}
#[cfg(feature = "compact")]
impl<'a, T> Iterator for UniqueArenaDrain<'a, T> {
type Item = (Handle<T>, T, Span);
fn next(&mut self) -> Option<Self::Item> {
match self.inner_elts.next() {
Some(elt) => {
let handle = Handle::new(self.index);
self.index = self.index.checked_add(1).unwrap();
let span = self.inner_spans.next().unwrap();
Some((handle, elt, span))
}
None => None,
}
}
}
impl<T: Eq + hash::Hash> UniqueArena<T> {
/// Returns an iterator over the items stored in this arena, returning both
/// the item's handle and a reference to it.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (Handle<T>, &T)> {
self.set.iter().enumerate().map(|(i, v)| {
let index = unsafe { Index::new_unchecked(i as u32) };
(Handle::new(index), v)
})
}
/// Insert a new value into the arena.
///
/// Return a [`Handle<T>`], which can be used to index this arena to get a
/// shared reference to the element.
///
/// If this arena already contains an element that is `Eq` to `value`,
/// return a `Handle` to the existing element, and drop `value`.
///
/// If `value` is inserted into the arena, associate `span` with
/// it. An element's span can be retrieved with the [`get_span`]
/// method.
///
/// [`Handle<T>`]: Handle
/// [`get_span`]: UniqueArena::get_span
pub fn insert(&mut self, value: T, span: Span) -> Handle<T> {
let (index, added) = self.set.insert_full(value);
if added {
debug_assert!(index == self.span_info.len());
self.span_info.push(span);
}
debug_assert!(self.set.len() == self.span_info.len());
Handle::from_usize(index)
}
/// Replace an old value with a new value.
///
/// # Panics
///
/// - if the old value is not in the arena
/// - if the new value already exists in the arena
pub fn replace(&mut self, old: Handle<T>, new: T) {
let (index, added) = self.set.insert_full(new);
assert!(added && index == self.set.len() - 1);
self.set.swap_remove_index(old.index()).unwrap();
}
/// Return this arena's handle for `value`, if present.
///
/// If this arena already contains an element equal to `value`,
/// return its handle. Otherwise, return `None`.
pub fn get(&self, value: &T) -> Option<Handle<T>> {
self.set
.get_index_of(value)
.map(|index| unsafe { Handle::from_usize_unchecked(index) })
}
/// Return this arena's value at `handle`, if that is a valid handle.
pub fn get_handle(&self, handle: Handle<T>) -> Result<&T, BadHandle> {
self.set
.get_index(handle.index())
.ok_or_else(|| BadHandle::new(handle))
}
/// Assert that `handle` is valid for this arena.
pub fn check_contains_handle(&self, handle: Handle<T>) -> Result<(), BadHandle> {
if handle.index() < self.set.len() {
Ok(())
} else {
Err(BadHandle::new(handle))
}
}
}
impl<T> Default for UniqueArena<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: fmt::Debug + Eq + hash::Hash> fmt::Debug for UniqueArena<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<T> ops::Index<Handle<T>> for UniqueArena<T> {
type Output = T;
fn index(&self, handle: Handle<T>) -> &T {
&self.set[handle.index()]
}
}
#[cfg(feature = "serialize")]
impl<T> serde::Serialize for UniqueArena<T>
where
T: Eq + hash::Hash + serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.set.serialize(serializer)
}
}
#[cfg(feature = "deserialize")]
impl<'de, T> serde::Deserialize<'de> for UniqueArena<T>
where
T: Eq + hash::Hash + serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let set = FastIndexSet::deserialize(deserializer)?;
let span_info = std::iter::repeat(Span::default()).take(set.len()).collect();
Ok(Self { set, span_info })
}
}
//Note: largely borrowed from `HashSet` implementation
#[cfg(feature = "arbitrary")]
impl<'a, T> arbitrary::Arbitrary<'a> for UniqueArena<T>
where
T: Eq + hash::Hash + arbitrary::Arbitrary<'a>,
{
fn arbitrary(u: &mut arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let mut arena = Self::default();
for elem in u.arbitrary_iter()? {
arena.set.insert(elem?);
arena.span_info.push(Span::UNDEFINED);
}
Ok(arena)
}
fn arbitrary_take_rest(u: arbitrary::Unstructured<'a>) -> arbitrary::Result<Self> {
let mut arena = Self::default();
for elem in u.arbitrary_take_rest_iter()? {
arena.set.insert(elem?);
arena.span_info.push(Span::UNDEFINED);
}
Ok(arena)
}
#[inline]
fn size_hint(depth: usize) -> (usize, Option<usize>) {
let depth_hint = <usize as arbitrary::Arbitrary>::size_hint(depth);
arbitrary::size_hint::and(depth_hint, (0, None))
}
}

75
third_party/rust/naga/src/back/dot/mod.rs поставляемый
Просмотреть файл

@ -392,6 +392,32 @@ const COLORS: &[&str] = &[
"#d9d9d9",
];
struct Prefixed<T>(Handle<T>);
impl std::fmt::Display for Prefixed<crate::Expression> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, "e")
}
}
impl std::fmt::Display for Prefixed<crate::LocalVariable> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, "l")
}
}
impl std::fmt::Display for Prefixed<crate::GlobalVariable> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, "g")
}
}
impl std::fmt::Display for Prefixed<crate::Function> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, "f")
}
}
fn write_fun(
output: &mut String,
prefix: String,
@ -405,9 +431,9 @@ fn write_fun(
for (handle, var) in fun.local_variables.iter() {
writeln!(
output,
"\t\t{}_l{} [ shape=hexagon label=\"{:?} '{}'\" ]",
"\t\t{}_{} [ shape=hexagon label=\"{:?} '{}'\" ]",
prefix,
handle.index(),
Prefixed(handle),
handle,
name(&var.name),
)?;
@ -442,9 +468,9 @@ fn write_fun(
for (to, expr, label) in sg.dependencies {
writeln!(
output,
"\t\t{}_e{} -> {}_s{} [ label=\"{}\" ]",
"\t\t{}_{} -> {}_s{} [ label=\"{}\" ]",
prefix,
expr.index(),
Prefixed(expr),
prefix,
to,
label,
@ -453,22 +479,23 @@ fn write_fun(
for (from, to) in sg.emits {
writeln!(
output,
"\t\t{}_s{} -> {}_e{} [ style=dotted ]",
"\t\t{}_s{} -> {}_{} [ style=dotted ]",
prefix,
from,
prefix,
to.index(),
Prefixed(to),
)?;
}
}
assert!(sg.calls.is_empty());
for (from, function) in sg.calls {
writeln!(
output,
"\t\t{}_s{} -> f{}_s0",
"\t\t{}_s{} -> {}_s0",
prefix,
from,
function.index(),
Prefixed(function),
)?;
}
@ -688,9 +715,9 @@ fn write_function_expressions(
};
writeln!(
output,
"\t\t{}_e{} [ {}=\"{}\" label=\"{:?} {}\" ]",
"\t\t{}_{} [ {}=\"{}\" label=\"{:?} {}\" ]",
prefix,
handle.index(),
Prefixed(handle),
color_attr,
COLORS[color_id],
handle,
@ -700,11 +727,11 @@ fn write_function_expressions(
for (key, edge) in edges.drain() {
writeln!(
output,
"\t\t{}_e{} -> {}_e{} [ label=\"{}\" ]",
"\t\t{}_{} -> {}_{} [ label=\"{}\" ]",
prefix,
edge.index(),
Prefixed(edge),
prefix,
handle.index(),
Prefixed(handle),
key,
)?;
}
@ -712,27 +739,27 @@ fn write_function_expressions(
Some(Payload::Arguments(list)) => {
write!(output, "\t\t{{")?;
for &comp in list {
write!(output, " {}_e{}", prefix, comp.index())?;
write!(output, " {}_{}", prefix, Prefixed(comp))?;
}
writeln!(output, " }} -> {}_e{}", prefix, handle.index())?;
writeln!(output, " }} -> {}_{}", prefix, Prefixed(handle))?;
}
Some(Payload::Local(h)) => {
writeln!(
output,
"\t\t{}_l{} -> {}_e{}",
"\t\t{}_{} -> {}_{}",
prefix,
h.index(),
Prefixed(h),
prefix,
handle.index(),
Prefixed(handle),
)?;
}
Some(Payload::Global(h)) => {
writeln!(
output,
"\t\tg{} -> {}_e{} [fillcolor=gray]",
h.index(),
"\t\t{} -> {}_{} [fillcolor=gray]",
Prefixed(h),
prefix,
handle.index(),
Prefixed(handle),
)?;
}
None => {}
@ -759,8 +786,8 @@ pub fn write(
for (handle, var) in module.global_variables.iter() {
writeln!(
output,
"\t\tg{} [ shape=hexagon label=\"{:?} {:?}/'{}'\" ]",
handle.index(),
"\t\t{} [ shape=hexagon label=\"{:?} {:?}/'{}'\" ]",
Prefixed(handle),
handle,
var.space,
name(&var.name),
@ -770,7 +797,7 @@ pub fn write(
}
for (handle, fun) in module.functions.iter() {
let prefix = format!("f{}", handle.index());
let prefix = Prefixed(handle).to_string();
writeln!(output, "\tsubgraph cluster_{prefix} {{")?;
writeln!(
output,

Просмотреть файл

@ -250,6 +250,14 @@ pub const RESERVED_KEYWORDS: &[&str] = &[
"namespace",
"using",
"sampler3DRect",
// Reserved keywords that were unreserved in GLSL 4.2
"image1DArrayShadow",
"image1DShadow",
"image2DArrayShadow",
"image2DShadow",
// Reserved keywords that were unreserved in GLSL 4.4
"packed",
"row_major",
//
// GLSL 4.6 Built-In Functions, from https://github.com/KhronosGroup/OpenGL-Registry/blob/d00e11dc1a1ffba581d633f21f70202051248d5c/specs/gl/GLSLangSpec.4.60.html#L13314
//

37
third_party/rust/naga/src/back/glsl/mod.rs поставляемый
Просмотреть файл

@ -46,7 +46,7 @@ to output a [`Module`](crate::Module) into glsl
pub use features::Features;
use crate::{
back,
back::{self, Baked},
proc::{self, NameKey},
valid, Handle, ShaderStage, TypeInner,
};
@ -1982,7 +1982,7 @@ impl<'a, W: Write> Writer<'a, W> {
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
} else if self.need_bake_expressions.contains(&handle) {
Some(format!("{}{}", back::BAKE_PREFIX, handle.index()))
Some(Baked(handle).to_string())
} else {
None
};
@ -2310,7 +2310,7 @@ impl<'a, W: Write> Writer<'a, W> {
// This is done in `Emit` by never emitting a variable name for pointer variables
self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
let result_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let result_name = Baked(result).to_string();
write!(self.out, "{level}")?;
// Expressions cannot have side effects, so just writing the expression here is fine.
self.write_named_expr(pointer, result_name, result, ctx)?;
@ -2335,7 +2335,7 @@ impl<'a, W: Write> Writer<'a, W> {
} => {
write!(self.out, "{level}")?;
if let Some(expr) = result {
let name = format!("{}{}", back::BAKE_PREFIX, expr.index());
let name = Baked(expr).to_string();
let result = self.module.functions[function].result.as_ref().unwrap();
self.write_type(result.ty)?;
write!(self.out, " {name}")?;
@ -2369,7 +2369,7 @@ impl<'a, W: Write> Writer<'a, W> {
} => {
write!(self.out, "{level}")?;
if let Some(result) = result {
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
let res_ty = ctx.resolve_type(result, &self.module.types);
self.write_value_type(res_ty)?;
write!(self.out, " {res_name} = ")?;
@ -2399,7 +2399,7 @@ impl<'a, W: Write> Writer<'a, W> {
Statement::RayQuery { .. } => unreachable!(),
Statement::SubgroupBallot { result, predicate } => {
write!(self.out, "{level}")?;
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
self.write_value_type(res_ty)?;
write!(self.out, " {res_name} = ")?;
@ -2419,7 +2419,7 @@ impl<'a, W: Write> Writer<'a, W> {
result,
} => {
write!(self.out, "{level}")?;
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
self.write_value_type(res_ty)?;
write!(self.out, " {res_name} = ")?;
@ -2476,7 +2476,7 @@ impl<'a, W: Write> Writer<'a, W> {
result,
} => {
write!(self.out, "{level}")?;
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
self.write_value_type(res_ty)?;
write!(self.out, " {res_name} = ")?;
@ -3865,9 +3865,8 @@ impl<'a, W: Write> Writer<'a, W> {
// Define our local and start a call to `clamp`
write!(
self.out,
"int {}{}{} = clamp(",
back::BAKE_PREFIX,
expr.index(),
"int {}{} = clamp(",
Baked(expr),
CLAMPED_LOD_SUFFIX
)?;
// Write the lod that will be clamped
@ -4205,13 +4204,7 @@ impl<'a, W: Write> Writer<'a, W> {
// `textureSize` call, but this needs to be the clamped lod, this should
// have been generated earlier and put in a local.
if class.is_mipmapped() {
write!(
self.out,
", {}{}{}",
back::BAKE_PREFIX,
handle.index(),
CLAMPED_LOD_SUFFIX
)?;
write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
}
// Close the `textureSize` call
write!(self.out, ")")?;
@ -4229,13 +4222,7 @@ impl<'a, W: Write> Writer<'a, W> {
// Add the clamped lod (if present) as the second argument to the
// image load function.
if level.is_some() {
write!(
self.out,
", {}{}{}",
back::BAKE_PREFIX,
handle.index(),
CLAMPED_LOD_SUFFIX
)?;
write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
}
// If a sample argument is needed we need to clamp it between 0 and

16
third_party/rust/naga/src/back/hlsl/writer.rs поставляемый
Просмотреть файл

@ -7,7 +7,7 @@ use super::{
BackendResult, Error, Options,
};
use crate::{
back,
back::{self, Baked},
proc::{self, NameKey},
valid, Handle, Module, ScalarKind, ShaderStage, TypeInner,
};
@ -1410,7 +1410,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
} else if self.need_bake_expressions.contains(&handle) {
Some(format!("_expr{}", handle.index()))
Some(Baked(handle).to_string())
} else {
None
};
@ -1891,7 +1891,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
write!(self.out, "{level}")?;
if let Some(expr) = result {
write!(self.out, "const ")?;
let name = format!("{}{}", back::BAKE_PREFIX, expr.index());
let name = Baked(expr).to_string();
let expr_ty = &func_ctx.info[expr].ty;
match *expr_ty {
proc::TypeResolution::Handle(handle) => self.write_type(module, handle)?,
@ -1922,7 +1922,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
let res_name = match result {
None => None,
Some(result) => {
let name = format!("{}{}", back::BAKE_PREFIX, result.index());
let name = Baked(result).to_string();
match func_ctx.info[result].ty {
proc::TypeResolution::Handle(handle) => {
self.write_type(module, handle)?
@ -1992,7 +1992,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Statement::WorkGroupUniformLoad { pointer, result } => {
self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
write!(self.out, "{level}")?;
let name = format!("_expr{}", result.index());
let name = Baked(result).to_string();
self.write_named_expr(module, pointer, name, result, func_ctx)?;
self.write_barrier(crate::Barrier::WORK_GROUP, level)?;
@ -2099,7 +2099,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Statement::RayQuery { .. } => unreachable!(),
Statement::SubgroupBallot { result, predicate } => {
write!(self.out, "{level}")?;
let name = format!("{}{}", back::BAKE_PREFIX, result.index());
let name = Baked(result).to_string();
write!(self.out, "const uint4 {name} = ")?;
self.named_expressions.insert(result, name);
@ -2118,7 +2118,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
} => {
write!(self.out, "{level}")?;
write!(self.out, "const ")?;
let name = format!("{}{}", back::BAKE_PREFIX, result.index());
let name = Baked(result).to_string();
match func_ctx.info[result].ty {
proc::TypeResolution::Handle(handle) => self.write_type(module, handle)?,
proc::TypeResolution::Value(ref value) => {
@ -2182,7 +2182,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
} => {
write!(self.out, "{level}")?;
write!(self.out, "const ")?;
let name = format!("{}{}", back::BAKE_PREFIX, result.index());
let name = Baked(result).to_string();
match func_ctx.info[result].ty {
proc::TypeResolution::Handle(handle) => self.write_type(module, handle)?,
proc::TypeResolution::Value(ref value) => {

18
third_party/rust/naga/src/back/mod.rs поставляемый
Просмотреть файл

@ -28,12 +28,26 @@ pub mod pipeline_constants;
pub const COMPONENTS: &[char] = &['x', 'y', 'z', 'w'];
/// Indent for backends.
pub const INDENT: &str = " ";
/// Prefix used for baking.
pub const BAKE_PREFIX: &str = "_e";
/// Expressions that need baking.
pub type NeedBakeExpressions = crate::FastHashSet<crate::Handle<crate::Expression>>;
/// A type for displaying expression handles as baking identifiers.
///
/// Given an [`Expression`] [`Handle`] `h`, `Baked(h)` implements
/// [`std::fmt::Display`], showing the handle's index prefixed by
/// `_e`.
///
/// [`Expression`]: crate::Expression
/// [`Handle`]: crate::Handle
struct Baked(crate::Handle<crate::Expression>);
impl std::fmt::Display for Baked {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, "_e")
}
}
/// Specifies the values of pipeline-overridable constants in the shader module.
///
/// If an `@id` attribute was specified on the declaration,

110
third_party/rust/naga/src/back/msl/writer.rs поставляемый
Просмотреть файл

@ -1,12 +1,11 @@
use super::{sampler as sm, Error, LocationMode, Options, PipelineOptions, TranslationInfo};
use crate::{
arena::Handle,
back,
arena::{Handle, HandleSet},
back::{self, Baked},
proc::index,
proc::{self, NameKey, TypeResolution},
valid, FastHashMap, FastHashSet,
};
use bit_set::BitSet;
use std::{
fmt::{Display, Error as FmtError, Formatter, Write},
iter,
@ -86,6 +85,41 @@ const fn scalar_is_int(scalar: crate::Scalar) -> bool {
/// Prefix for cached clamped level-of-detail values for `ImageLoad` expressions.
const CLAMPED_LOD_LOAD_PREFIX: &str = "clamped_lod_e";
/// Wrapper for identifier names for clamped level-of-detail values
///
/// Values of this type implement [`std::fmt::Display`], formatting as
/// the name of the variable used to hold the cached clamped
/// level-of-detail value for an `ImageLoad` expression.
struct ClampedLod(Handle<crate::Expression>);
impl Display for ClampedLod {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, CLAMPED_LOD_LOAD_PREFIX)
}
}
/// Wrapper for generating `struct _mslBufferSizes` member names for
/// runtime-sized array lengths.
///
/// On Metal, `wgpu_hal` passes the element counts for all runtime-sized arrays
/// as an argument to the entry point. This argument's type in the MSL is
/// `struct _mslBufferSizes`, a Naga-synthesized struct with a `uint` member for
/// each global variable containing a runtime-sized array.
///
/// If `global` is a [`Handle`] for a [`GlobalVariable`] that contains a
/// runtime-sized array, then the value `ArraySize(global)` implements
/// [`std::fmt::Display`], formatting as the name of the struct member carrying
/// the number of elements in that runtime-sized array.
///
/// [`GlobalVariable`]: crate::GlobalVariable
struct ArraySizeMember(Handle<crate::GlobalVariable>);
impl Display for ArraySizeMember {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
self.0.write_prefixed(f, "size")
}
}
struct TypeContext<'a> {
handle: Handle<crate::Type>,
gctx: proc::GlobalCtx<'a>,
@ -549,11 +583,10 @@ struct ExpressionContext<'a> {
lang_version: (u8, u8),
policies: index::BoundsCheckPolicies,
/// A bitset containing the `Expression` handle indexes of expressions used
/// as indices in `ReadZeroSkipWrite`-policy accesses. These may need to be
/// cached in temporary variables. See `index::find_checked_indexes` for
/// details.
guarded_indices: BitSet,
/// The set of expressions used as indices in `ReadZeroSkipWrite`-policy
/// accesses. These may need to be cached in temporary variables. See
/// `index::find_checked_indexes` for details.
guarded_indices: HandleSet<crate::Expression>,
}
impl<'a> ExpressionContext<'a> {
@ -677,9 +710,7 @@ impl<W: Write> Writer<W> {
) -> BackendResult {
match level {
LevelOfDetail::Direct(expr) => self.put_expression(expr, context, true)?,
LevelOfDetail::Restricted(load) => {
write!(self.out, "{}{}", CLAMPED_LOD_LOAD_PREFIX, load.index())?
}
LevelOfDetail::Restricted(load) => write!(self.out, "{}", ClampedLod(load))?,
}
Ok(())
}
@ -1146,8 +1177,8 @@ impl<W: Write> Writer<W> {
// prevent that.
write!(
self.out,
"(_buffer_sizes.size{idx} - {offset} - {size}) / {stride}",
idx = handle.index(),
"(_buffer_sizes.{member} - {offset} - {size}) / {stride}",
member = ArraySizeMember(handle),
offset = offset,
size = size,
stride = stride,
@ -2778,13 +2809,7 @@ impl<W: Write> Writer<W> {
return Ok(());
}
write!(
self.out,
"{}uint {}{} = ",
indent,
CLAMPED_LOD_LOAD_PREFIX,
load.index(),
)?;
write!(self.out, "{}uint {} = ", indent, ClampedLod(load),)?;
self.put_restricted_scalar_image_index(
image,
level_of_detail,
@ -2846,15 +2871,14 @@ impl<W: Write> Writer<W> {
// If this expression is an index that we're going to first compare
// against a limit, and then actually use as an index, then we may
// want to cache it in a temporary, to avoid evaluating it twice.
let bake =
if context.expression.guarded_indices.contains(handle.index()) {
true
} else {
self.need_bake_expressions.contains(&handle)
};
let bake = if context.expression.guarded_indices.contains(handle) {
true
} else {
self.need_bake_expressions.contains(&handle)
};
if bake {
Some(format!("{}{}", back::BAKE_PREFIX, handle.index()))
Some(Baked(handle).to_string())
} else {
None
}
@ -3009,7 +3033,7 @@ impl<W: Write> Writer<W> {
} => {
write!(self.out, "{level}")?;
if let Some(expr) = result {
let name = format!("{}{}", back::BAKE_PREFIX, expr.index());
let name = Baked(expr).to_string();
self.start_baking_expression(expr, &context.expression, &name)?;
self.named_expressions.insert(expr, name);
}
@ -3064,7 +3088,7 @@ impl<W: Write> Writer<W> {
// operating on a 64-bit value, `result` is `None`.
write!(self.out, "{level}")?;
let fun_str = if let Some(result) = result {
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
self.start_baking_expression(result, &context.expression, &res_name)?;
self.named_expressions.insert(result, res_name);
fun.to_msl()?
@ -3170,7 +3194,7 @@ impl<W: Write> Writer<W> {
}
crate::RayQueryFunction::Proceed { result } => {
write!(self.out, "{level}")?;
let name = format!("{}{}", back::BAKE_PREFIX, result.index());
let name = Baked(result).to_string();
self.start_baking_expression(result, &context.expression, &name)?;
self.named_expressions.insert(result, name);
self.put_expression(query, &context.expression, true)?;
@ -3444,24 +3468,30 @@ impl<W: Write> Writer<W> {
writeln!(self.out)?;
{
let mut indices = vec![];
for (handle, var) in module.global_variables.iter() {
if needs_array_length(var.ty, &module.types) {
let idx = handle.index();
indices.push(idx);
}
}
// Make a `Vec` of all the `GlobalVariable`s that contain
// runtime-sized arrays.
let globals: Vec<Handle<crate::GlobalVariable>> = module
.global_variables
.iter()
.filter(|&(_, var)| needs_array_length(var.ty, &module.types))
.map(|(handle, _)| handle)
.collect();
let mut buffer_indices = vec![];
for vbm in &pipeline_options.vertex_buffer_mappings {
buffer_indices.push(vbm.id);
}
if !indices.is_empty() || !buffer_indices.is_empty() {
if !globals.is_empty() || !buffer_indices.is_empty() {
writeln!(self.out, "struct _mslBufferSizes {{")?;
for idx in indices {
writeln!(self.out, "{}uint size{};", back::INDENT, idx)?;
for global in globals {
writeln!(
self.out,
"{}uint {};",
back::INDENT,
ArraySizeMember(global)
)?;
}
for idx in buffer_indices {

Просмотреть файл

@ -1,5 +1,6 @@
use super::PipelineConstants;
use crate::{
arena::HandleVec,
proc::{ConstantEvaluator, ConstantEvaluatorError, Emitter},
valid::{Capabilities, ModuleInfo, ValidationError, ValidationFlags, Validator},
Arena, Block, Constant, Expression, Function, Handle, Literal, Module, Override, Range, Scalar,
@ -49,11 +50,11 @@ pub fn process_overrides<'a>(
// A map from override handles to the handles of the constants
// we've replaced them with.
let mut override_map = Vec::with_capacity(module.overrides.len());
let mut override_map = HandleVec::with_capacity(module.overrides.len());
// A map from `module`'s original global expression handles to
// handles in the new, simplified global expression arena.
let mut adjusted_global_expressions = Vec::with_capacity(module.global_expressions.len());
let mut adjusted_global_expressions = HandleVec::with_capacity(module.global_expressions.len());
// The set of constants whose initializer handles we've already
// updated to refer to the newly built global expression arena.
@ -105,7 +106,7 @@ pub fn process_overrides<'a>(
for (old_h, expr, span) in module.global_expressions.drain() {
let mut expr = match expr {
Expression::Override(h) => {
let c_h = if let Some(new_h) = override_map.get(h.index()) {
let c_h = if let Some(new_h) = override_map.get(h) {
*new_h
} else {
let mut new_h = None;
@ -131,7 +132,7 @@ pub fn process_overrides<'a>(
Expression::Constant(c_h) => {
if adjusted_constant_initializers.insert(c_h) {
let init = &mut module.constants[c_h].init;
*init = adjusted_global_expressions[init.index()];
*init = adjusted_global_expressions[*init];
}
expr
}
@ -144,8 +145,7 @@ pub fn process_overrides<'a>(
);
adjust_expr(&adjusted_global_expressions, &mut expr);
let h = evaluator.try_eval_and_append(expr, span)?;
debug_assert_eq!(old_h.index(), adjusted_global_expressions.len());
adjusted_global_expressions.push(h);
adjusted_global_expressions.insert(old_h, h);
}
// Finish processing any overrides we didn't visit in the loop above.
@ -169,12 +169,12 @@ pub fn process_overrides<'a>(
.iter_mut()
.filter(|&(c_h, _)| !adjusted_constant_initializers.contains(&c_h))
{
c.init = adjusted_global_expressions[c.init.index()];
c.init = adjusted_global_expressions[c.init];
}
for (_, v) in module.global_variables.iter_mut() {
if let Some(ref mut init) = v.init {
*init = adjusted_global_expressions[init.index()];
*init = adjusted_global_expressions[*init];
}
}
@ -206,8 +206,8 @@ fn process_override(
(old_h, override_, span): (Handle<Override>, Override, Span),
pipeline_constants: &PipelineConstants,
module: &mut Module,
override_map: &mut Vec<Handle<Constant>>,
adjusted_global_expressions: &[Handle<Expression>],
override_map: &mut HandleVec<Override, Handle<Constant>>,
adjusted_global_expressions: &HandleVec<Expression, Handle<Expression>>,
adjusted_constant_initializers: &mut HashSet<Handle<Constant>>,
global_expression_kind_tracker: &mut crate::proc::ExpressionKindTracker,
) -> Result<Handle<Constant>, PipelineConstantError> {
@ -234,7 +234,7 @@ fn process_override(
global_expression_kind_tracker.insert(expr, crate::proc::ExpressionKind::Const);
expr
} else if let Some(init) = override_.init {
adjusted_global_expressions[init.index()]
adjusted_global_expressions[init]
} else {
return Err(PipelineConstantError::MissingValue(key.to_string()));
};
@ -246,8 +246,7 @@ fn process_override(
init,
};
let h = module.constants.append(constant, span);
debug_assert_eq!(old_h.index(), override_map.len());
override_map.push(h);
override_map.insert(old_h, h);
adjusted_constant_initializers.insert(h);
Ok(h)
}
@ -259,16 +258,16 @@ fn process_override(
/// Replace any expressions whose values are now known with their fully
/// evaluated form.
///
/// If `h` is a `Handle<Override>`, then `override_map[h.index()]` is the
/// If `h` is a `Handle<Override>`, then `override_map[h]` is the
/// `Handle<Constant>` for the override's final value.
fn process_function(
module: &mut Module,
override_map: &[Handle<Constant>],
override_map: &HandleVec<Override, Handle<Constant>>,
function: &mut Function,
) -> Result<(), ConstantEvaluatorError> {
// A map from original local expression handles to
// handles in the new, local expression arena.
let mut adjusted_local_expressions = Vec::with_capacity(function.expressions.len());
let mut adjusted_local_expressions = HandleVec::with_capacity(function.expressions.len());
let mut local_expression_kind_tracker = crate::proc::ExpressionKindTracker::new();
@ -294,12 +293,11 @@ fn process_function(
for (old_h, mut expr, span) in expressions.drain() {
if let Expression::Override(h) = expr {
expr = Expression::Constant(override_map[h.index()]);
expr = Expression::Constant(override_map[h]);
}
adjust_expr(&adjusted_local_expressions, &mut expr);
let h = evaluator.try_eval_and_append(expr, span)?;
debug_assert_eq!(old_h.index(), adjusted_local_expressions.len());
adjusted_local_expressions.push(h);
adjusted_local_expressions.insert(old_h, h);
}
adjust_block(&adjusted_local_expressions, &mut function.body);
@ -309,7 +307,7 @@ fn process_function(
// Update local expression initializers.
for (_, local) in function.local_variables.iter_mut() {
if let &mut Some(ref mut init) = &mut local.init {
*init = adjusted_local_expressions[init.index()];
*init = adjusted_local_expressions[*init];
}
}
@ -319,7 +317,7 @@ fn process_function(
for (expr_h, name) in named_expressions {
function
.named_expressions
.insert(adjusted_local_expressions[expr_h.index()], name);
.insert(adjusted_local_expressions[expr_h], name);
}
Ok(())
@ -327,9 +325,9 @@ fn process_function(
/// Replace every expression handle in `expr` with its counterpart
/// given by `new_pos`.
fn adjust_expr(new_pos: &[Handle<Expression>], expr: &mut Expression) {
fn adjust_expr(new_pos: &HandleVec<Expression, Handle<Expression>>, expr: &mut Expression) {
let adjust = |expr: &mut Handle<Expression>| {
*expr = new_pos[expr.index()];
*expr = new_pos[*expr];
};
match *expr {
Expression::Compose {
@ -532,7 +530,7 @@ fn adjust_expr(new_pos: &[Handle<Expression>], expr: &mut Expression) {
/// Replace every expression handle in `block` with its counterpart
/// given by `new_pos`.
fn adjust_block(new_pos: &[Handle<Expression>], block: &mut Block) {
fn adjust_block(new_pos: &HandleVec<Expression, Handle<Expression>>, block: &mut Block) {
for stmt in block.iter_mut() {
adjust_stmt(new_pos, stmt);
}
@ -540,9 +538,9 @@ fn adjust_block(new_pos: &[Handle<Expression>], block: &mut Block) {
/// Replace every expression handle in `stmt` with its counterpart
/// given by `new_pos`.
fn adjust_stmt(new_pos: &[Handle<Expression>], stmt: &mut Statement) {
fn adjust_stmt(new_pos: &HandleVec<Expression, Handle<Expression>>, stmt: &mut Statement) {
let adjust = |expr: &mut Handle<Expression>| {
*expr = new_pos[expr.index()];
*expr = new_pos[*expr];
};
match *stmt {
Statement::Emit(ref mut range) => {

8
third_party/rust/naga/src/back/spv/block.rs поставляемый
Просмотреть файл

@ -213,7 +213,7 @@ impl<'w> BlockContext<'w> {
// The chain rule: if this `Access...`'s `base` operand was
// previously omitted, then omit this one, too.
_ => self.cached.ids[expr_handle.index()] == 0,
_ => self.cached.ids[expr_handle] == 0,
}
}
@ -237,7 +237,7 @@ impl<'w> BlockContext<'w> {
crate::Expression::Literal(literal) => self.writer.get_constant_scalar(literal),
crate::Expression::Constant(handle) => {
let init = self.ir_module.constants[handle].init;
self.writer.constant_ids[init.index()]
self.writer.constant_ids[init]
}
crate::Expression::Override(_) => return Err(Error::Override),
crate::Expression::ZeroValue(_) => self.writer.get_constant_null(result_type_id),
@ -430,7 +430,7 @@ impl<'w> BlockContext<'w> {
}
}
crate::Expression::GlobalVariable(handle) => {
self.writer.global_variables[handle.index()].access_id
self.writer.global_variables[handle].access_id
}
crate::Expression::Swizzle {
size,
@ -1830,7 +1830,7 @@ impl<'w> BlockContext<'w> {
base
}
crate::Expression::GlobalVariable(handle) => {
let gv = &self.writer.global_variables[handle.index()];
let gv = &self.writer.global_variables[handle];
break gv.access_id;
}
crate::Expression::LocalVariable(variable) => {

4
third_party/rust/naga/src/back/spv/image.rs поставляемый
Просмотреть файл

@ -381,7 +381,7 @@ impl<'w> BlockContext<'w> {
pub(super) fn get_handle_id(&mut self, expr_handle: Handle<crate::Expression>) -> Word {
let id = match self.ir_function.expressions[expr_handle] {
crate::Expression::GlobalVariable(handle) => {
self.writer.global_variables[handle.index()].handle_id
self.writer.global_variables[handle].handle_id
}
crate::Expression::FunctionArgument(i) => {
self.function.parameters[i as usize].handle_id
@ -974,7 +974,7 @@ impl<'w> BlockContext<'w> {
};
if let Some(offset_const) = offset {
let offset_id = self.writer.constant_ids[offset_const.index()];
let offset_id = self.writer.constant_ids[offset_const];
main_instruction.add_operand(offset_id);
}

2
third_party/rust/naga/src/back/spv/index.rs поставляемый
Просмотреть файл

@ -116,7 +116,7 @@ impl<'w> BlockContext<'w> {
_ => return Err(Error::Validation("array length expression case-4")),
};
let gvar = self.writer.global_variables[global_handle.index()].clone();
let gvar = self.writer.global_variables[global_handle].clone();
let global = &self.ir_module.global_variables[global_handle];
let (last_member_index, gvar_id) = match opt_last_member_index {
Some(index) => (index, gvar.access_id),

24
third_party/rust/naga/src/back/spv/mod.rs поставляемый
Просмотреть файл

@ -18,7 +18,7 @@ mod writer;
pub use spirv::Capability;
use crate::arena::Handle;
use crate::arena::{Handle, HandleVec};
use crate::proc::{BoundsCheckPolicies, TypeResolution};
use spirv::Word;
@ -420,7 +420,7 @@ enum Dimension {
/// [emit]: index.html#expression-evaluation-time-and-scope
#[derive(Default)]
struct CachedExpressions {
ids: Vec<Word>,
ids: HandleVec<crate::Expression, Word>,
}
impl CachedExpressions {
fn reset(&mut self, length: usize) {
@ -431,7 +431,7 @@ impl CachedExpressions {
impl ops::Index<Handle<crate::Expression>> for CachedExpressions {
type Output = Word;
fn index(&self, h: Handle<crate::Expression>) -> &Word {
let id = &self.ids[h.index()];
let id = &self.ids[h];
if *id == 0 {
unreachable!("Expression {:?} is not cached!", h);
}
@ -440,7 +440,7 @@ impl ops::Index<Handle<crate::Expression>> for CachedExpressions {
}
impl ops::IndexMut<Handle<crate::Expression>> for CachedExpressions {
fn index_mut(&mut self, h: Handle<crate::Expression>) -> &mut Word {
let id = &mut self.ids[h.index()];
let id = &mut self.ids[h];
if *id != 0 {
unreachable!("Expression {:?} is already cached!", h);
}
@ -537,32 +537,32 @@ struct FunctionArgument {
/// - OpConstantComposite
/// - OpConstantNull
struct ExpressionConstnessTracker {
inner: bit_set::BitSet,
inner: crate::arena::HandleSet<crate::Expression>,
}
impl ExpressionConstnessTracker {
fn from_arena(arena: &crate::Arena<crate::Expression>) -> Self {
let mut inner = bit_set::BitSet::new();
let mut inner = crate::arena::HandleSet::for_arena(arena);
for (handle, expr) in arena.iter() {
let insert = match *expr {
crate::Expression::Literal(_)
| crate::Expression::ZeroValue(_)
| crate::Expression::Constant(_) => true,
crate::Expression::Compose { ref components, .. } => {
components.iter().all(|h| inner.contains(h.index()))
components.iter().all(|&h| inner.contains(h))
}
crate::Expression::Splat { value, .. } => inner.contains(value.index()),
crate::Expression::Splat { value, .. } => inner.contains(value),
_ => false,
};
if insert {
inner.insert(handle.index());
inner.insert(handle);
}
}
Self { inner }
}
fn is_const(&self, value: Handle<crate::Expression>) -> bool {
self.inner.contains(value.index())
self.inner.contains(value)
}
}
@ -662,9 +662,9 @@ pub struct Writer {
lookup_function: crate::FastHashMap<Handle<crate::Function>, Word>,
lookup_function_type: crate::FastHashMap<LookupFunctionType, Word>,
/// Indexed by const-expression handle indexes
constant_ids: Vec<Word>,
constant_ids: HandleVec<crate::Expression, Word>,
cached_constants: crate::FastHashMap<CachedConstant, Word>,
global_variables: Vec<GlobalVariable>,
global_variables: HandleVec<crate::GlobalVariable, GlobalVariable>,
binding_map: BindingMap,
// Cached expressions are only meaningful within a BlockContext, but we

Просмотреть файл

@ -65,3 +65,10 @@ impl<K: Ord, V> Recyclable for std::collections::BTreeMap<K, V> {
self
}
}
impl<K, V> Recyclable for crate::arena::HandleVec<K, V> {
fn recycle(mut self) -> Self {
self.clear();
self
}
}

26
third_party/rust/naga/src/back/spv/writer.rs поставляемый
Просмотреть файл

@ -7,7 +7,7 @@ use super::{
PhysicalLayout, PipelineOptions, ResultMember, Writer, WriterFlags, BITS_PER_BYTE,
};
use crate::{
arena::{Handle, UniqueArena},
arena::{Handle, HandleVec, UniqueArena},
back::spv::BindingInfo,
proc::{Alignment, TypeResolution},
valid::{FunctionInfo, ModuleInfo},
@ -71,9 +71,9 @@ impl Writer {
lookup_type: crate::FastHashMap::default(),
lookup_function: crate::FastHashMap::default(),
lookup_function_type: crate::FastHashMap::default(),
constant_ids: Vec::new(),
constant_ids: HandleVec::new(),
cached_constants: crate::FastHashMap::default(),
global_variables: Vec::new(),
global_variables: HandleVec::new(),
binding_map: options.binding_map.clone(),
saved_cached: CachedExpressions::default(),
gl450_ext_inst_id,
@ -554,7 +554,7 @@ impl Writer {
continue;
}
let mut gv = self.global_variables[handle.index()].clone();
let mut gv = self.global_variables[handle].clone();
if let Some(ref mut iface) = interface {
// Have to include global variables in the interface
if self.physical_layout.version >= 0x10400 {
@ -599,7 +599,7 @@ impl Writer {
}
// work around borrow checking in the presence of `self.xxx()` calls
self.global_variables[handle.index()] = gv;
self.global_variables[handle] = gv;
}
// Create a `BlockContext` for generating SPIR-V for the function's
@ -1266,7 +1266,7 @@ impl Writer {
crate::Expression::Literal(literal) => self.get_constant_scalar(literal),
crate::Expression::Constant(constant) => {
let constant = &ir_module.constants[constant];
self.constant_ids[constant.init.index()]
self.constant_ids[constant.init]
}
crate::Expression::ZeroValue(ty) => {
let type_id = self.get_type_id(LookupType::Handle(ty));
@ -1279,12 +1279,12 @@ impl Writer {
&ir_module.global_expressions,
&ir_module.types,
)
.map(|component| self.constant_ids[component.index()])
.map(|component| self.constant_ids[component])
.collect();
self.get_constant_composite(LookupType::Handle(ty), component_ids.as_slice())
}
crate::Expression::Splat { size, value } => {
let value_id = self.constant_ids[value.index()];
let value_id = self.constant_ids[value];
let component_ids = &[value_id; 4][..size as usize];
let ty = self.get_expression_lookup_type(&mod_info[handle]);
@ -1294,7 +1294,7 @@ impl Writer {
_ => unreachable!(),
};
self.constant_ids[handle.index()] = id;
self.constant_ids[handle] = id;
Ok(id)
}
@ -1347,7 +1347,7 @@ impl Writer {
// It's safe to use `var_id` here, not `access_id`, because only
// variables in the `Uniform` and `StorageBuffer` address spaces
// get wrapped, and we're initializing `WorkGroup` variables.
let var_id = self.global_variables[handle.index()].var_id;
let var_id = self.global_variables[handle].var_id;
let var_type_id = self.get_type_id(LookupType::Handle(var.ty));
let init_word = self.get_constant_null(var_type_id);
Instruction::store(var_id, init_word, None)
@ -1728,7 +1728,7 @@ impl Writer {
let init_word = global_variable
.init
.map(|constant| self.constant_ids[constant.index()]);
.map(|constant| self.constant_ids[constant]);
let inner_type_id = self.get_type_id(
substitute_inner_type_lookup.unwrap_or(LookupType::Handle(global_variable.ty)),
);
@ -1986,7 +1986,7 @@ impl Writer {
if self.flags.contains(WriterFlags::DEBUG) {
for (_, constant) in ir_module.constants.iter() {
if let Some(ref name) = constant.name {
let id = self.constant_ids[constant.init.index()];
let id = self.constant_ids[constant.init];
self.debugs.push(Instruction::name(id, name));
}
}
@ -2006,7 +2006,7 @@ impl Writer {
GlobalVariable::new(id)
}
};
self.global_variables.push(gvar);
self.global_variables.insert(handle, gvar);
}
// write all functions

16
third_party/rust/naga/src/back/wgsl/writer.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
use super::Error;
use crate::{
back,
back::{self, Baked},
proc::{self, NameKey},
valid, Handle, Module, ShaderStage, TypeInner,
};
@ -641,7 +641,7 @@ impl<W: Write> Writer<W> {
_ => false,
};
if min_ref_count <= info.ref_count || required_baking_expr {
Some(format!("{}{}", back::BAKE_PREFIX, handle.index()))
Some(Baked(handle).to_string())
} else {
None
}
@ -733,7 +733,7 @@ impl<W: Write> Writer<W> {
} => {
write!(self.out, "{level}")?;
if let Some(expr) = result {
let name = format!("{}{}", back::BAKE_PREFIX, expr.index());
let name = Baked(expr).to_string();
self.start_named_expr(module, expr, func_ctx, &name)?;
self.named_expressions.insert(expr, name);
}
@ -755,7 +755,7 @@ impl<W: Write> Writer<W> {
} => {
write!(self.out, "{level}")?;
if let Some(result) = result {
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
self.start_named_expr(module, result, func_ctx, &res_name)?;
self.named_expressions.insert(result, res_name);
}
@ -774,7 +774,7 @@ impl<W: Write> Writer<W> {
Statement::WorkGroupUniformLoad { pointer, result } => {
write!(self.out, "{level}")?;
// TODO: Obey named expressions here.
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
self.start_named_expr(module, result, func_ctx, &res_name)?;
self.named_expressions.insert(result, res_name);
write!(self.out, "workgroupUniformLoad(")?;
@ -934,7 +934,7 @@ impl<W: Write> Writer<W> {
Statement::RayQuery { .. } => unreachable!(),
Statement::SubgroupBallot { result, predicate } => {
write!(self.out, "{level}")?;
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
self.start_named_expr(module, result, func_ctx, &res_name)?;
self.named_expressions.insert(result, res_name);
@ -951,7 +951,7 @@ impl<W: Write> Writer<W> {
result,
} => {
write!(self.out, "{level}")?;
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
self.start_named_expr(module, result, func_ctx, &res_name)?;
self.named_expressions.insert(result, res_name);
@ -1006,7 +1006,7 @@ impl<W: Write> Writer<W> {
result,
} => {
write!(self.out, "{level}")?;
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = Baked(result).to_string();
self.start_named_expr(module, result, func_ctx, &res_name)?;
self.named_expressions.insert(result, res_name);

Просмотреть файл

@ -88,28 +88,38 @@ impl<'tracer> ExpressionTracer<'tracer> {
match self.global_expressions_used {
Some(ref mut used) => used.insert(init),
None => self.expressions_used.insert(init),
}
};
}
Ex::Override(_) => {
// All overrides are considered used by definition. We mark
// their types and initialization expressions as used in
// `compact::compact`, so we have no more work to do here.
}
Ex::ZeroValue(ty) => self.types_used.insert(ty),
Ex::ZeroValue(ty) => {
self.types_used.insert(ty);
}
Ex::Compose { ty, ref components } => {
self.types_used.insert(ty);
self.expressions_used
.insert_iter(components.iter().cloned());
}
Ex::Access { base, index } => self.expressions_used.insert_iter([base, index]),
Ex::AccessIndex { base, index: _ } => self.expressions_used.insert(base),
Ex::Splat { size: _, value } => self.expressions_used.insert(value),
Ex::AccessIndex { base, index: _ } => {
self.expressions_used.insert(base);
}
Ex::Splat { size: _, value } => {
self.expressions_used.insert(value);
}
Ex::Swizzle {
size: _,
vector,
pattern: _,
} => self.expressions_used.insert(vector),
Ex::Load { pointer } => self.expressions_used.insert(pointer),
} => {
self.expressions_used.insert(vector);
}
Ex::Load { pointer } => {
self.expressions_used.insert(pointer);
}
Ex::ImageSample {
image,
sampler,
@ -130,7 +140,9 @@ impl<'tracer> ExpressionTracer<'tracer> {
use crate::SampleLevel as Sl;
match *level {
Sl::Auto | Sl::Zero => {}
Sl::Exact(expr) | Sl::Bias(expr) => self.expressions_used.insert(expr),
Sl::Exact(expr) | Sl::Bias(expr) => {
self.expressions_used.insert(expr);
}
Sl::Gradient { x, y } => self.expressions_used.insert_iter([x, y]),
}
self.expressions_used.insert_iter(depth_ref);
@ -156,7 +168,9 @@ impl<'tracer> ExpressionTracer<'tracer> {
Iq::NumLevels | Iq::NumLayers | Iq::NumSamples => {}
}
}
Ex::Unary { op: _, expr } => self.expressions_used.insert(expr),
Ex::Unary { op: _, expr } => {
self.expressions_used.insert(expr);
}
Ex::Binary { op: _, left, right } => {
self.expressions_used.insert_iter([left, right]);
}
@ -171,8 +185,12 @@ impl<'tracer> ExpressionTracer<'tracer> {
axis: _,
ctrl: _,
expr,
} => self.expressions_used.insert(expr),
Ex::Relational { fun: _, argument } => self.expressions_used.insert(argument),
} => {
self.expressions_used.insert(expr);
}
Ex::Relational { fun: _, argument } => {
self.expressions_used.insert(argument);
}
Ex::Math {
fun: _,
arg,
@ -189,15 +207,23 @@ impl<'tracer> ExpressionTracer<'tracer> {
expr,
kind: _,
convert: _,
} => self.expressions_used.insert(expr),
Ex::AtomicResult { ty, comparison: _ } => self.types_used.insert(ty),
Ex::WorkGroupUniformLoadResult { ty } => self.types_used.insert(ty),
Ex::ArrayLength(expr) => self.expressions_used.insert(expr),
Ex::SubgroupOperationResult { ty } => self.types_used.insert(ty),
} => {
self.expressions_used.insert(expr);
}
Ex::ArrayLength(expr) => {
self.expressions_used.insert(expr);
}
Ex::AtomicResult { ty, comparison: _ }
| Ex::WorkGroupUniformLoadResult { ty }
| Ex::SubgroupOperationResult { ty } => {
self.types_used.insert(ty);
}
Ex::RayQueryGetIntersection {
query,
committed: _,
} => self.expressions_used.insert(query),
} => {
self.expressions_used.insert(query);
}
}
}
}

Просмотреть файл

@ -1,4 +1,4 @@
use super::handle_set_map::HandleSet;
use super::arena::HandleSet;
use super::{FunctionMap, ModuleMap};
pub struct FunctionTracer<'a> {

Просмотреть файл

@ -1,75 +1,14 @@
use crate::arena::{Arena, Handle, Range, UniqueArena};
use crate::arena::{Arena, Handle, HandleSet, Range};
type Index = std::num::NonZeroU32;
/// A set of `Handle<T>` values.
pub struct HandleSet<T> {
/// Bound on zero-based indexes of handles stored in this set.
len: usize,
/// `members[i]` is true if the handle with zero-based index `i`
/// is a member.
members: bit_set::BitSet,
/// This type is indexed by values of type `T`.
as_keys: std::marker::PhantomData<T>,
}
impl<T> HandleSet<T> {
pub fn for_arena(arena: &impl ArenaType<T>) -> Self {
let len = arena.len();
Self {
len,
members: bit_set::BitSet::with_capacity(len),
as_keys: std::marker::PhantomData,
}
}
/// Add `handle` to the set.
pub fn insert(&mut self, handle: Handle<T>) {
// Note that, oddly, `Handle::index` does not return a 1-based
// `Index`, but rather a zero-based `usize`.
self.members.insert(handle.index());
}
/// Add handles from `iter` to the set.
pub fn insert_iter(&mut self, iter: impl IntoIterator<Item = Handle<T>>) {
for handle in iter {
self.insert(handle);
}
}
pub fn contains(&self, handle: Handle<T>) -> bool {
// Note that, oddly, `Handle::index` does not return a 1-based
// `Index`, but rather a zero-based `usize`.
self.members.contains(handle.index())
}
}
pub trait ArenaType<T> {
fn len(&self) -> usize;
}
impl<T> ArenaType<T> for Arena<T> {
fn len(&self) -> usize {
self.len()
}
}
impl<T: std::hash::Hash + Eq> ArenaType<T> for UniqueArena<T> {
fn len(&self) -> usize {
self.len()
}
}
type Index = crate::non_max_u32::NonMaxU32;
/// A map from old handle indices to new, compressed handle indices.
pub struct HandleMap<T> {
/// The indices assigned to handles in the compacted module.
///
/// If `new_index[i]` is `Some(n)`, then `n` is the 1-based
/// `Index` of the compacted `Handle` corresponding to the
/// pre-compacted `Handle` whose zero-based index is `i`. ("Clear
/// as mud.")
/// If `new_index[i]` is `Some(n)`, then `n` is the `Index` of the
/// compacted `Handle` corresponding to the pre-compacted `Handle`
/// whose index is `i`.
new_index: Vec<Option<Index>>,
/// This type is indexed by values of type `T`.
@ -78,11 +17,12 @@ pub struct HandleMap<T> {
impl<T: 'static> HandleMap<T> {
pub fn from_set(set: HandleSet<T>) -> Self {
let mut next_index = Index::new(1).unwrap();
let mut next_index = Index::new(0).unwrap();
Self {
new_index: (0..set.len)
.map(|zero_based_index| {
if set.members.contains(zero_based_index) {
new_index: set
.all_possible()
.map(|handle| {
if set.contains(handle) {
// This handle will be retained in the compacted version,
// so assign it a new index.
let this = next_index;
@ -111,11 +51,9 @@ impl<T: 'static> HandleMap<T> {
log::trace!(
"adjusting {} handle [{}] -> [{:?}]",
std::any::type_name::<T>(),
old.index() + 1,
old.index(),
self.new_index[old.index()]
);
// Note that `Handle::index` returns a zero-based index,
// but `Handle::new` accepts a 1-based `Index`.
self.new_index[old.index()].map(Handle::new)
}
@ -145,26 +83,24 @@ impl<T: 'static> HandleMap<T> {
///
/// Use `compacted_arena` to bounds-check the result.
pub fn adjust_range(&self, range: &mut Range<T>, compacted_arena: &Arena<T>) {
let mut index_range = range.zero_based_index_range();
let mut index_range = range.index_range();
let compacted;
// Remember that the indices we retrieve from `new_index` are 1-based
// compacted indices, but the index range we're computing is zero-based
// compacted indices.
if let Some(first1) = index_range.find_map(|i| self.new_index[i as usize]) {
if let Some(first) = index_range.find_map(|i| self.new_index[i as usize]) {
// The first call to `find_map` mutated `index_range` to hold the
// remainder of original range, which is exactly the range we need
// to search for the new last handle.
if let Some(last1) = index_range.rev().find_map(|i| self.new_index[i as usize]) {
// Build a zero-based end-exclusive range, given one-based handle indices.
compacted = first1.get() - 1..last1.get();
if let Some(last) = index_range.rev().find_map(|i| self.new_index[i as usize]) {
// Build an end-exclusive range, given the two included indices
// `first` and `last`.
compacted = first.get()..last.get() + 1;
} else {
// The range contains only a single live handle, which
// we identified with the first `find_map` call.
compacted = first1.get() - 1..first1.get();
compacted = first.get()..first.get() + 1;
}
} else {
compacted = 0..0;
};
*range = Range::from_zero_based_index_range(compacted, compacted_arena);
*range = Range::from_index_range(compacted, compacted_arena);
}
}

3
third_party/rust/naga/src/compact/mod.rs поставляемый
Просмотреть файл

@ -4,8 +4,9 @@ mod handle_set_map;
mod statements;
mod types;
use crate::arena::HandleSet;
use crate::{arena, compact::functions::FunctionTracer};
use handle_set_map::{HandleMap, HandleSet};
use handle_set_map::HandleMap;
/// Remove unused types, expressions, and constants from `module`.
///

Просмотреть файл

@ -101,9 +101,9 @@ impl FunctionTracer<'_> {
}
St::SubgroupBallot { result, predicate } => {
if let Some(predicate) = predicate {
self.expressions_used.insert(predicate)
self.expressions_used.insert(predicate);
}
self.expressions_used.insert(result)
self.expressions_used.insert(result);
}
St::SubgroupCollectiveOperation {
op: _,
@ -112,7 +112,7 @@ impl FunctionTracer<'_> {
result,
} => {
self.expressions_used.insert(argument);
self.expressions_used.insert(result)
self.expressions_used.insert(result);
}
St::SubgroupGather {
mode,
@ -126,11 +126,11 @@ impl FunctionTracer<'_> {
| crate::GatherMode::ShuffleDown(index)
| crate::GatherMode::ShuffleUp(index)
| crate::GatherMode::ShuffleXor(index) => {
self.expressions_used.insert(index)
self.expressions_used.insert(index);
}
}
self.expressions_used.insert(argument);
self.expressions_used.insert(result)
self.expressions_used.insert(result);
}
// Trivial statements.

4
third_party/rust/naga/src/compact/types.rs поставляемый
Просмотреть файл

@ -44,7 +44,9 @@ impl<'a> TypeTracer<'a> {
size: _,
stride: _,
}
| Ti::BindingArray { base, size: _ } => self.types_used.insert(base),
| Ti::BindingArray { base, size: _ } => {
self.types_used.insert(base);
}
Ti::Struct {
ref members,
span: _,

214
third_party/rust/naga/src/front/atomic_upgrade.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,214 @@
//! Upgrade the types of scalars observed to be accessed as atomics to [`Atomic`] types.
//!
//! In SPIR-V, atomic operations can be applied to any scalar value, but in Naga
//! IR atomic operations can only be applied to values of type [`Atomic`]. Naga
//! IR's restriction matches Metal Shading Language and WGSL, so we don't want
//! to relax that. Instead, when the SPIR-V front end observes a value being
//! accessed using atomic instructions, it promotes the value's type from
//! [`Scalar`] to [`Atomic`]. This module implements `Module::upgrade_atomics`,
//! the function that makes that change.
//!
//! Atomics can only appear in global variables in the [`Storage`] and
//! [`Workgroup`] address spaces. These variables can either have `Atomic` types
//! themselves, or be [`Array`]s of such, or be [`Struct`]s containing such.
//! So we only need to change the types of globals and struct fields.
//!
//! Naga IR [`Load`] expressions and [`Store`] statements can operate directly
//! on [`Atomic`] values, retrieving and depositing ordinary [`Scalar`] values,
//! so changing the types doesn't have much effect on the code that operates on
//! those values.
//!
//! Future work:
//!
//! - Atomics in structs are not implemented yet.
//!
//! - The GLSL front end could use this transformation as well.
//!
//! [`Atomic`]: TypeInner::Atomic
//! [`Scalar`]: TypeInner::Scalar
//! [`Storage`]: crate::AddressSpace::Storage
//! [`WorkGroup`]: crate::AddressSpace::WorkGroup
//! [`Array`]: TypeInner::Array
//! [`Struct`]: TypeInner::Struct
//! [`Load`]: crate::Expression::Load
//! [`Store`]: crate::Statement::Store
use std::sync::{atomic::AtomicUsize, Arc};
use crate::{GlobalVariable, Handle, Module, Type, TypeInner};
#[derive(Clone, Debug, thiserror::Error)]
pub enum Error {
#[error("encountered an unsupported expression")]
Unsupported,
#[error("upgrading structs of more than one member is not yet implemented")]
MultiMemberStruct,
#[error("encountered unsupported global initializer in an atomic variable")]
GlobalInitUnsupported,
}
impl From<Error> for crate::front::spv::Error {
fn from(source: Error) -> Self {
crate::front::spv::Error::AtomicUpgradeError(source)
}
}
#[derive(Clone, Default)]
struct Padding(Arc<AtomicUsize>);
impl std::fmt::Display for Padding {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
for _ in 0..self.0.load(std::sync::atomic::Ordering::Relaxed) {
f.write_str(" ")?;
}
Ok(())
}
}
impl Drop for Padding {
fn drop(&mut self) {
let _ = self.0.fetch_sub(1, std::sync::atomic::Ordering::Relaxed);
}
}
impl Padding {
fn trace(&self, msg: impl std::fmt::Display, t: impl std::fmt::Debug) {
format!("{msg} {t:#?}")
.split('\n')
.for_each(|ln| log::trace!("{self}{ln}"));
}
fn debug(&self, msg: impl std::fmt::Display, t: impl std::fmt::Debug) {
format!("{msg} {t:#?}")
.split('\n')
.for_each(|ln| log::debug!("{self}{ln}"));
}
fn inc_padding(&self) -> Padding {
let _ = self.0.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
self.clone()
}
}
struct UpgradeState<'a> {
padding: Padding,
module: &'a mut Module,
}
impl<'a> UpgradeState<'a> {
fn inc_padding(&self) -> Padding {
self.padding.inc_padding()
}
/// Upgrade the type, recursing until we reach the leaves.
/// At the leaves, replace scalars with atomic scalars.
fn upgrade_type(&mut self, ty: Handle<Type>) -> Result<Handle<Type>, Error> {
let padding = self.inc_padding();
padding.trace("upgrading type: ", ty);
let inner = match self.module.types[ty].inner {
TypeInner::Scalar(scalar) => {
log::trace!("{padding}hit the scalar leaf, replacing with an atomic");
TypeInner::Atomic(scalar)
}
TypeInner::Pointer { base, space } => TypeInner::Pointer {
base: self.upgrade_type(base)?,
space,
},
TypeInner::Array { base, size, stride } => TypeInner::Array {
base: self.upgrade_type(base)?,
size,
stride,
},
TypeInner::Struct { ref members, span } => {
// In the future we should have to figure out which member needs
// upgrading, but for now we'll only cover the single-member
// case.
let &[crate::StructMember {
ref name,
ty,
ref binding,
offset,
}] = &members[..]
else {
return Err(Error::MultiMemberStruct);
};
// Take our own clones of these values now, so that
// `upgrade_type` can mutate the module.
let name = name.clone();
let binding = binding.clone();
let upgraded_member_type = self.upgrade_type(ty)?;
TypeInner::Struct {
members: vec![crate::StructMember {
name,
ty: upgraded_member_type,
binding,
offset,
}],
span,
}
}
TypeInner::BindingArray { base, size } => TypeInner::BindingArray {
base: self.upgrade_type(base)?,
size,
},
_ => return Ok(ty),
};
// Now that we've upgraded any subtypes, re-borrow a reference to our
// type and update its `inner`.
let r#type = &self.module.types[ty];
let span = self.module.types.get_span(ty);
let new_type = Type {
name: r#type.name.clone(),
inner,
};
padding.debug("ty: ", ty);
padding.debug("from: ", r#type);
padding.debug("to: ", &new_type);
let new_handle = self.module.types.insert(new_type, span);
Ok(new_handle)
}
fn upgrade_global_variable(&mut self, handle: Handle<GlobalVariable>) -> Result<(), Error> {
let padding = self.inc_padding();
padding.trace("upgrading global variable: ", handle);
let var = &self.module.global_variables[handle];
if var.init.is_some() {
return Err(Error::GlobalInitUnsupported);
}
let var_ty = var.ty;
let new_ty = self.upgrade_type(var.ty)?;
if new_ty != var_ty {
padding.debug("upgrading global variable: ", handle);
padding.debug("from ty: ", var_ty);
padding.debug("to ty: ", new_ty);
self.module.global_variables[handle].ty = new_ty;
}
Ok(())
}
}
impl Module {
/// Upgrade `global_var_handles` to have [`Atomic`] leaf types.
///
/// [`Atomic`]: TypeInner::Atomic
pub(crate) fn upgrade_atomics(
&mut self,
global_var_handles: impl IntoIterator<Item = Handle<GlobalVariable>>,
) -> Result<(), Error> {
let mut state = UpgradeState {
padding: Default::default(),
module: self,
};
for handle in global_var_handles {
state.upgrade_global_variable(handle)?;
}
Ok(())
}
}

20
third_party/rust/naga/src/front/mod.rs поставляемый
Просмотреть файл

@ -5,6 +5,8 @@ Frontend parsers that consume binary and text shaders and load them into [`Modul
mod interpolator;
mod type_gen;
#[cfg(feature = "spv-in")]
pub mod atomic_upgrade;
#[cfg(feature = "glsl-in")]
pub mod glsl;
#[cfg(feature = "spv-in")]
@ -13,7 +15,7 @@ pub mod spv;
pub mod wgsl;
use crate::{
arena::{Arena, Handle, UniqueArena},
arena::{Arena, Handle, HandleVec, UniqueArena},
proc::{ResolveContext, ResolveError, TypeResolution},
FastHashMap,
};
@ -50,13 +52,13 @@ use std::ops;
/// [`LocalVariable`]: crate::LocalVariable
#[derive(Debug, Default)]
pub struct Typifier {
resolutions: Vec<TypeResolution>,
resolutions: HandleVec<crate::Expression, TypeResolution>,
}
impl Typifier {
pub const fn new() -> Self {
Typifier {
resolutions: Vec::new(),
resolutions: HandleVec::new(),
}
}
@ -69,7 +71,7 @@ impl Typifier {
expr_handle: Handle<crate::Expression>,
types: &'a UniqueArena<crate::Type>,
) -> &'a crate::TypeInner {
self.resolutions[expr_handle.index()].inner_with(types)
self.resolutions[expr_handle].inner_with(types)
}
/// Add an expression's type to an `Arena<Type>`.
@ -109,9 +111,9 @@ impl Typifier {
if self.resolutions.len() <= expr_handle.index() {
for (eh, expr) in expressions.iter().skip(self.resolutions.len()) {
//Note: the closure can't `Err` by construction
let resolution = ctx.resolve(expr, |h| Ok(&self.resolutions[h.index()]))?;
let resolution = ctx.resolve(expr, |h| Ok(&self.resolutions[h]))?;
log::debug!("Resolving {:?} = {:?} : {:?}", eh, expr, resolution);
self.resolutions.push(resolution);
self.resolutions.insert(eh, resolution);
}
}
Ok(())
@ -135,8 +137,8 @@ impl Typifier {
} else {
let expr = &expressions[expr_handle];
//Note: the closure can't `Err` by construction
let resolution = ctx.resolve(expr, |h| Ok(&self.resolutions[h.index()]))?;
self.resolutions[expr_handle.index()] = resolution;
let resolution = ctx.resolve(expr, |h| Ok(&self.resolutions[h]))?;
self.resolutions[expr_handle] = resolution;
Ok(())
}
}
@ -145,7 +147,7 @@ impl Typifier {
impl ops::Index<Handle<crate::Expression>> for Typifier {
type Output = TypeResolution;
fn index(&self, handle: Handle<crate::Expression>) -> &Self::Output {
&self.resolutions[handle.index()]
&self.resolutions[handle]
}
}

Просмотреть файл

@ -1,5 +1,5 @@
use super::ModuleState;
use crate::arena::Handle;
use crate::{arena::Handle, front::atomic_upgrade};
use codespan_reporting::diagnostic::Diagnostic;
use codespan_reporting::files::SimpleFile;
use codespan_reporting::term;
@ -134,6 +134,9 @@ pub enum Error {
NonBindingArrayOfImageOrSamplers,
#[error("naga only supports specialization constant IDs up to 65535 but was given {0}")]
SpecIdTooHigh(u32),
#[error("atomic upgrade error: {0}")]
AtomicUpgradeError(atomic_upgrade::Error),
}
impl Error {

102
third_party/rust/naga/src/front/spv/mod.rs поставляемый
Просмотреть файл

@ -36,6 +36,7 @@ mod null;
use convert::*;
pub use error::Error;
use function::*;
use indexmap::IndexSet;
use crate::{
arena::{Arena, Handle, UniqueArena},
@ -560,25 +561,44 @@ struct BlockContext<'function> {
parameter_sampling: &'function mut [image::SamplingFlags],
}
impl<'a> BlockContext<'a> {
/// Descend into the expression with the given handle, locating a contained
/// global variable.
///
/// This is used to track atomic upgrades.
fn get_contained_global_variable(
&self,
mut handle: Handle<crate::Expression>,
) -> Option<Handle<crate::GlobalVariable>> {
log::debug!("\t\tlocating global variable in {handle:?}");
loop {
match self.expressions[handle] {
crate::Expression::Access { base, index: _ } => {
handle = base;
log::debug!("\t\t access {handle:?}");
}
crate::Expression::AccessIndex { base, index: _ } => {
handle = base;
log::debug!("\t\t access index {handle:?}");
}
crate::Expression::GlobalVariable(h) => {
log::debug!("\t\t found {h:?}");
return Some(h);
}
_ => {
break;
}
}
}
None
}
}
enum SignAnchor {
Result,
Operand,
}
enum AtomicOpInst {
AtomicIIncrement,
}
#[allow(dead_code)]
struct AtomicOp {
instruction: AtomicOpInst,
result_type_id: spirv::Word,
result_id: spirv::Word,
pointer_id: spirv::Word,
scope_id: spirv::Word,
memory_semantics_id: spirv::Word,
}
pub struct Frontend<I> {
data: I,
data_offset: usize,
@ -590,8 +610,12 @@ pub struct Frontend<I> {
future_member_decor: FastHashMap<(spirv::Word, MemberIndex), Decoration>,
lookup_member: FastHashMap<(Handle<crate::Type>, MemberIndex), LookupMember>,
handle_sampling: FastHashMap<Handle<crate::GlobalVariable>, image::SamplingFlags>,
// Used to upgrade types used in atomic ops to atomic types, keyed by pointer id
lookup_atomic: FastHashMap<spirv::Word, AtomicOp>,
/// The set of all global variables accessed by [`Atomic`] statements we've
/// generated, so we can upgrade the types of their operands.
///
/// [`Atomic`]: crate::Statement::Atomic
upgrade_atomics: IndexSet<Handle<crate::GlobalVariable>>,
lookup_type: FastHashMap<spirv::Word, LookupType>,
lookup_void_type: Option<spirv::Word>,
lookup_storage_buffer_types: FastHashMap<Handle<crate::Type>, crate::StorageAccess>,
@ -647,7 +671,7 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
future_member_decor: FastHashMap::default(),
handle_sampling: FastHashMap::default(),
lookup_member: FastHashMap::default(),
lookup_atomic: FastHashMap::default(),
upgrade_atomics: Default::default(),
lookup_type: FastHashMap::default(),
lookup_void_type: None,
lookup_storage_buffer_types: FastHashMap::default(),
@ -3968,30 +3992,21 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
let result_type_id = self.next()?;
let result_id = self.next()?;
let pointer_id = self.next()?;
let scope_id = self.next()?;
let memory_semantics_id = self.next()?;
// Store the op for a later pass where we "upgrade" the pointer type
let atomic = AtomicOp {
instruction: AtomicOpInst::AtomicIIncrement,
result_type_id,
result_id,
pointer_id,
scope_id,
memory_semantics_id,
};
self.lookup_atomic.insert(pointer_id, atomic);
let _scope_id = self.next()?;
let _memory_semantics_id = self.next()?;
log::trace!("\t\t\tlooking up expr {:?}", pointer_id);
let (p_lexp_handle, p_lexp_ty_id) = {
let lexp = self.lookup_expression.lookup(pointer_id)?;
let handle = get_expr_handle!(pointer_id, &lexp);
(handle, lexp.type_id)
};
log::trace!("\t\t\tlooking up type {pointer_id:?}");
let p_ty = self.lookup_type.lookup(p_lexp_ty_id)?;
let p_ty_base_id =
p_ty.base_id.ok_or(Error::InvalidAccessType(p_lexp_ty_id))?;
log::trace!("\t\t\tlooking up base type {p_ty_base_id:?} of {p_ty:?}");
let p_base_ty = self.lookup_type.lookup(p_ty_base_id)?;
@ -4032,6 +4047,10 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
result: Some(r_lexp_handle),
};
block.push(stmt, span);
// Store any associated global variables so we can upgrade their types later
self.upgrade_atomics
.extend(ctx.get_contained_global_variable(p_lexp_handle));
}
_ => {
return Err(Error::UnsupportedInstruction(self.state, inst.op));
@ -4314,6 +4333,11 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
}?;
}
if !self.upgrade_atomics.is_empty() {
log::info!("Upgrading atomic pointers...");
module.upgrade_atomics(std::mem::take(&mut self.upgrade_atomics))?;
}
// Do entry point specific processing after all functions are parsed so that we can
// cull unused problematic builtins of gl_PerVertex.
for (ep, fun_id) in mem::take(&mut self.deferred_entry_points) {
@ -5689,17 +5713,20 @@ mod test {
#[cfg(all(feature = "wgsl-in", feature = "wgsl-out"))]
#[test]
fn atomic_i_inc() {
let _ = env_logger::builder()
.is_test(true)
.filter_level(log::LevelFilter::Trace)
.try_init();
let _ = env_logger::builder().is_test(true).try_init();
let bytes = include_bytes!("../../../tests/in/spv/atomic_i_increment.spv");
let m = super::parse_u8_slice(bytes, &Default::default()).unwrap();
let mut validator = crate::valid::Validator::new(
crate::valid::ValidationFlags::empty(),
Default::default(),
);
let info = validator.validate(&m).unwrap();
let info = match validator.validate(&m) {
Err(e) => {
log::error!("{}", e.emit_to_string(""));
return;
}
Ok(i) => i,
};
let wgsl =
crate::back::wgsl::write_string(&m, &info, crate::back::wgsl::WriterFlags::empty())
.unwrap();
@ -5709,15 +5736,14 @@ mod test {
Ok(m) => m,
Err(e) => {
log::error!("{}", e.emit_to_string(&wgsl));
// at this point we know atomics create invalid modules
// so simply bail
return;
panic!("invalid module");
}
};
let mut validator =
crate::valid::Validator::new(crate::valid::ValidationFlags::all(), Default::default());
if let Err(e) = validator.validate(&m) {
log::error!("{}", e.emit_to_string(&wgsl));
panic!("invalid generated wgsl");
}
}
}

20
third_party/rust/naga/src/lib.rs поставляемый
Просмотреть файл

@ -34,25 +34,6 @@ with optional span info, representing a series of statements executed in order.
`EntryPoint`s or `Function` is a `Block`, and `Statement` has a
[`Block`][Statement::Block] variant.
## Arenas
To improve translator performance and reduce memory usage, most structures are
stored in an [`Arena`]. An `Arena<T>` stores a series of `T` values, indexed by
[`Handle<T>`](Handle) values, which are just wrappers around integer indexes.
For example, a `Function`'s expressions are stored in an `Arena<Expression>`,
and compound expressions refer to their sub-expressions via `Handle<Expression>`
values. (When examining the serialized form of a `Module`, note that the first
element of an `Arena` has an index of 1, not 0.)
A [`UniqueArena`] is just like an `Arena`, except that it stores only a single
instance of each value. The value type must implement `Eq` and `Hash`. Like an
`Arena`, inserting a value into a `UniqueArena` returns a `Handle` which can be
used to efficiently access the value, without a hash lookup. Inserting a value
multiple times returns the same `Handle`.
If the `span` feature is enabled, both `Arena` and `UniqueArena` can associate a
source code span with each element.
## Function Calls
Naga's representation of function calls is unusual. Most languages treat
@ -277,6 +258,7 @@ pub mod compact;
pub mod error;
pub mod front;
pub mod keywords;
mod non_max_u32;
pub mod proc;
mod span;
pub mod valid;

145
third_party/rust/naga/src/non_max_u32.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,145 @@
//! [`NonMaxU32`], a 32-bit type that can represent any value except [`u32::MAX`].
//!
//! Naga would like `Option<Handle<T>>` to be a 32-bit value, which means we
//! need to exclude some index value for use in representing [`None`]. We could
//! have [`Handle`] store a [`NonZeroU32`], but zero is a very useful value for
//! indexing. We could have a [`Handle`] store a value one greater than its index,
//! but it turns out that it's not uncommon to want to work with [`Handle`]s'
//! indices, so that bias of 1 becomes more visible than one would like.
//!
//! This module defines the type [`NonMaxU32`], for which `Option<NonMaxU32>` is
//! still a 32-bit value, but which is directly usable as a [`Handle`] index
//! type. It still uses a bias of 1 under the hood, but that fact is isolated
//! within the implementation.
//!
//! [`Handle`]: crate::arena::Handle
//! [`NonZeroU32`]: std::num::NonZeroU32
#![allow(dead_code)]
use std::num::NonZeroU32;
/// An unsigned 32-bit value known not to be [`u32::MAX`].
///
/// A `NonMaxU32` value can represent any value in the range `0 .. u32::MAX -
/// 1`, and an `Option<NonMaxU32>` is still a 32-bit value. In other words,
/// `NonMaxU32` is just like [`NonZeroU32`], except that a different value is
/// missing from the full `u32` range.
///
/// Since zero is a very useful value in indexing, `NonMaxU32` is more useful
/// for representing indices than [`NonZeroU32`].
///
/// `NonMaxU32` values and `Option<NonMaxU32>` values both occupy 32 bits.
///
/// # Serialization and Deserialization
///
/// When the appropriate Cargo features are enabled, `NonMaxU32` implements
/// [`serde::Serialize`] and [`serde::Deserialize`] in the natural way, as the
/// integer value it represents. For example, serializing
/// `NonMaxU32::new(0).unwrap()` as JSON or RON yields the string `"0"`. This is
/// the case despite `NonMaxU32`'s implementation, described below.
///
/// # Implementation
///
/// Although this should not be observable to its users, a `NonMaxU32` whose
/// value is `n` is a newtype around a [`NonZeroU32`] whose value is `n + 1`.
/// This way, the range of values that `NonMaxU32` can represent, `0..=u32::MAX
/// - 1`, is mapped to the range `1..=u32::MAX`, which is the range that
/// [`NonZeroU32`] can represent. (And conversely, since [`u32`] addition wraps
/// around, the value unrepresentable in `NonMaxU32`, [`u32::MAX`], becomes the
/// value unrepresentable in [`NonZeroU32`], `0`.)
///
/// [`NonZeroU32`]: std::num::NonZeroU32
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)]
#[cfg_attr(feature = "arbitrary", derive(arbitrary::Arbitrary))]
pub struct NonMaxU32(NonZeroU32);
impl NonMaxU32 {
/// Construct a [`NonMaxU32`] whose value is `n`, if possible.
pub const fn new(n: u32) -> Option<Self> {
// If `n` is `u32::MAX`, then `n.wrapping_add(1)` is `0`,
// so `NonZeroU32::new` returns `None` in exactly the case
// where we must return `None`.
match NonZeroU32::new(n.wrapping_add(1)) {
Some(non_zero) => Some(NonMaxU32(non_zero)),
None => None,
}
}
/// Return the value of `self` as a [`u32`].
pub const fn get(self) -> u32 {
self.0.get() - 1
}
/// Construct a [`NonMaxU32`] whose value is `n`.
///
/// # Safety
///
/// The value of `n` must not be [`u32::MAX`].
pub const unsafe fn new_unchecked(n: u32) -> NonMaxU32 {
NonMaxU32(unsafe { NonZeroU32::new_unchecked(n + 1) })
}
/// Construct a [`NonMaxU32`] whose value is `index`.
///
/// # Safety
///
/// - The value of `index` must be strictly less than [`u32::MAX`].
pub const unsafe fn from_usize_unchecked(index: usize) -> Self {
NonMaxU32(unsafe { NonZeroU32::new_unchecked(index as u32 + 1) })
}
pub fn checked_add(self, n: u32) -> Option<Self> {
// Adding `n` to `self` produces `u32::MAX` if and only if
// adding `n` to `self.0` produces `0`. So we can simply
// call `NonZeroU32::checked_add` and let its check for zero
// determine whether our add would have produced `u32::MAX`.
Some(NonMaxU32(self.0.checked_add(n)?))
}
}
impl std::fmt::Debug for NonMaxU32 {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.get().fmt(f)
}
}
impl std::fmt::Display for NonMaxU32 {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.get().fmt(f)
}
}
#[cfg(feature = "serialize")]
impl serde::Serialize for NonMaxU32 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_u32(self.get())
}
}
#[cfg(feature = "deserialize")]
impl<'de> serde::Deserialize<'de> for NonMaxU32 {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
// Defer to `u32`'s `Deserialize` implementation.
let n = <u32 as serde::Deserialize>::deserialize(deserializer)?;
// Constrain the range of the value further.
NonMaxU32::new(n).ok_or_else(|| {
<D::Error as serde::de::Error>::invalid_value(
serde::de::Unexpected::Unsigned(n as u64),
&"a value no less than 0 and no greater than 4294967294 (2^32 - 2)",
)
})
}
}
#[test]
fn size() {
use core::mem::size_of;
assert_eq!(size_of::<Option<NonMaxU32>>(), size_of::<u32>());
}

Просмотреть файл

@ -3,7 +3,7 @@ use std::iter;
use arrayvec::ArrayVec;
use crate::{
arena::{Arena, Handle, UniqueArena},
arena::{Arena, Handle, HandleVec, UniqueArena},
ArraySize, BinaryOperator, Constant, Expression, Literal, Override, ScalarKind, Span, Type,
TypeInner, UnaryOperator,
};
@ -352,22 +352,23 @@ pub enum ExpressionKind {
#[derive(Debug)]
pub struct ExpressionKindTracker {
inner: Vec<ExpressionKind>,
inner: HandleVec<Expression, ExpressionKind>,
}
impl ExpressionKindTracker {
pub const fn new() -> Self {
Self { inner: Vec::new() }
Self {
inner: HandleVec::new(),
}
}
/// Forces the the expression to not be const
pub fn force_non_const(&mut self, value: Handle<Expression>) {
self.inner[value.index()] = ExpressionKind::Runtime;
self.inner[value] = ExpressionKind::Runtime;
}
pub fn insert(&mut self, value: Handle<Expression>, expr_type: ExpressionKind) {
assert_eq!(self.inner.len(), value.index());
self.inner.push(expr_type);
self.inner.insert(value, expr_type);
}
pub fn is_const(&self, h: Handle<Expression>) -> bool {
matches!(self.type_of(h), ExpressionKind::Const)
@ -381,15 +382,17 @@ impl ExpressionKindTracker {
}
fn type_of(&self, value: Handle<Expression>) -> ExpressionKind {
self.inner[value.index()]
self.inner[value]
}
pub fn from_arena(arena: &Arena<Expression>) -> Self {
let mut tracker = Self {
inner: Vec::with_capacity(arena.len()),
inner: HandleVec::with_capacity(arena.len()),
};
for (_, expr) in arena.iter() {
tracker.inner.push(tracker.type_of_with_expr(expr));
for (handle, expr) in arena.iter() {
tracker
.inner
.insert(handle, tracker.type_of_with_expr(expr));
}
tracker
}

20
third_party/rust/naga/src/proc/index.rs поставляемый
Просмотреть файл

@ -2,8 +2,8 @@
Definitions for index bounds checking.
*/
use crate::{valid, Handle, UniqueArena};
use bit_set::BitSet;
use crate::arena::{Handle, HandleSet, UniqueArena};
use crate::valid;
/// How should code generated by Naga do bounds checks?
///
@ -196,7 +196,7 @@ pub enum GuardedIndex {
/// Build a set of expressions used as indices, to cache in temporary variables when
/// emitted.
///
/// Given the bounds-check policies `policies`, construct a `BitSet` containing the handle
/// Given the bounds-check policies `policies`, construct a `HandleSet` containing the handle
/// indices of all the expressions in `function` that are ever used as guarded indices
/// under the [`ReadZeroSkipWrite`] policy. The `module` argument must be the module to
/// which `function` belongs, and `info` should be that function's analysis results.
@ -241,10 +241,10 @@ pub fn find_checked_indexes(
function: &crate::Function,
info: &valid::FunctionInfo,
policies: BoundsCheckPolicies,
) -> BitSet {
) -> HandleSet<crate::Expression> {
use crate::Expression as Ex;
let mut guarded_indices = BitSet::new();
let mut guarded_indices = HandleSet::for_arena(&function.expressions);
// Don't bother scanning if we never need `ReadZeroSkipWrite`.
if policies.contains(BoundsCheckPolicy::ReadZeroSkipWrite) {
@ -264,7 +264,7 @@ pub fn find_checked_indexes(
)
.is_some()
{
guarded_indices.insert(index.index());
guarded_indices.insert(index);
}
}
Ex::ImageLoad {
@ -275,15 +275,15 @@ pub fn find_checked_indexes(
..
} => {
if policies.image_load == BoundsCheckPolicy::ReadZeroSkipWrite {
guarded_indices.insert(coordinate.index());
guarded_indices.insert(coordinate);
if let Some(array_index) = array_index {
guarded_indices.insert(array_index.index());
guarded_indices.insert(array_index);
}
if let Some(sample) = sample {
guarded_indices.insert(sample.index());
guarded_indices.insert(sample);
}
if let Some(level) = level {
guarded_indices.insert(level.index());
guarded_indices.insert(level);
}
}
}

12
third_party/rust/naga/src/proc/layouter.rs поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
use crate::arena::Handle;
use crate::arena::{Handle, HandleVec};
use std::{fmt::Display, num::NonZeroU32, ops};
/// A newtype struct where its only valid values are powers of 2
@ -108,17 +108,15 @@ impl TypeLayout {
///
/// [WGSL §4.3.7, "Memory Layout"](https://gpuweb.github.io/gpuweb/wgsl/#memory-layouts)
#[derive(Debug, Default)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct Layouter {
/// Layouts for types in an arena, indexed by `Handle` index.
layouts: Vec<TypeLayout>,
/// Layouts for types in an arena.
layouts: HandleVec<crate::Type, TypeLayout>,
}
impl ops::Index<Handle<crate::Type>> for Layouter {
type Output = TypeLayout;
fn index(&self, handle: Handle<crate::Type>) -> &TypeLayout {
&self.layouts[handle.index()]
&self.layouts[handle]
}
}
@ -243,7 +241,7 @@ impl Layouter {
},
};
debug_assert!(size <= layout.size);
self.layouts.push(layout);
self.layouts.insert(ty_handle, layout);
}
Ok(())

34
third_party/rust/naga/src/valid/function.rs поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
use crate::arena::Handle;
use crate::arena::{Arena, UniqueArena};
use crate::arena::{Handle, HandleSet};
use super::validate_atomic_compare_exchange_struct;
@ -10,8 +10,6 @@ use super::{
use crate::span::WithSpan;
use crate::span::{AddSpan as _, MapErrWithSpan as _};
use bit_set::BitSet;
#[derive(Clone, Debug, thiserror::Error)]
#[cfg_attr(test, derive(PartialEq))]
pub enum CallError {
@ -257,9 +255,9 @@ impl<'a> BlockContext<'a> {
fn resolve_type_impl(
&self,
handle: Handle<crate::Expression>,
valid_expressions: &BitSet,
valid_expressions: &HandleSet<crate::Expression>,
) -> Result<&crate::TypeInner, WithSpan<ExpressionError>> {
if !valid_expressions.contains(handle.index()) {
if !valid_expressions.contains(handle) {
Err(ExpressionError::NotInScope.with_span_handle(handle, self.expressions))
} else {
Ok(self.info[handle].ty.inner_with(self.types))
@ -269,7 +267,7 @@ impl<'a> BlockContext<'a> {
fn resolve_type(
&self,
handle: Handle<crate::Expression>,
valid_expressions: &BitSet,
valid_expressions: &HandleSet<crate::Expression>,
) -> Result<&crate::TypeInner, WithSpan<FunctionError>> {
self.resolve_type_impl(handle, valid_expressions)
.map_err_inner(|source| FunctionError::Expression { handle, source }.with_span())
@ -315,7 +313,7 @@ impl super::Validator {
}
if let Some(expr) = result {
if self.valid_expression_set.insert(expr.index()) {
if self.valid_expression_set.insert(expr) {
self.valid_expression_list.push(expr);
} else {
return Err(CallError::ResultAlreadyInScope(expr)
@ -325,7 +323,7 @@ impl super::Validator {
crate::Expression::CallResult(callee)
if fun.result.is_some() && callee == function =>
{
if !self.needs_visit.remove(expr.index()) {
if !self.needs_visit.remove(expr) {
return Err(CallError::ResultAlreadyPopulated(expr)
.with_span_handle(expr, context.expressions));
}
@ -348,7 +346,7 @@ impl super::Validator {
handle: Handle<crate::Expression>,
context: &BlockContext,
) -> Result<(), WithSpan<FunctionError>> {
if self.valid_expression_set.insert(handle.index()) {
if self.valid_expression_set.insert(handle) {
self.valid_expression_list.push(handle);
Ok(())
} else {
@ -464,7 +462,7 @@ impl super::Validator {
// Note that this expression has been visited by the proper kind
// of statement.
if !self.needs_visit.remove(result.index()) {
if !self.needs_visit.remove(result) {
return Err(AtomicError::ResultAlreadyPopulated(result)
.with_span_handle(result, context.expressions)
.into_other());
@ -864,7 +862,7 @@ impl super::Validator {
}
for handle in self.valid_expression_list.drain(base_expression_count..) {
self.valid_expression_set.remove(handle.index());
self.valid_expression_set.remove(handle);
}
}
S::Break => {
@ -1321,7 +1319,7 @@ impl super::Validator {
let base_expression_count = self.valid_expression_list.len();
let info = self.validate_block_impl(statements, context)?;
for handle in self.valid_expression_list.drain(base_expression_count..) {
self.valid_expression_set.remove(handle.index());
self.valid_expression_set.remove(handle);
}
Ok(info)
}
@ -1429,12 +1427,12 @@ impl super::Validator {
}
}
self.valid_expression_set.clear();
self.valid_expression_set.clear_for_arena(&fun.expressions);
self.valid_expression_list.clear();
self.needs_visit.clear();
self.needs_visit.clear_for_arena(&fun.expressions);
for (handle, expr) in fun.expressions.iter() {
if expr.needs_pre_emit() {
self.valid_expression_set.insert(handle.index());
self.valid_expression_set.insert(handle);
}
if self.flags.contains(super::ValidationFlags::EXPRESSIONS) {
// Mark expressions that need to be visited by a particular kind of
@ -1442,7 +1440,7 @@ impl super::Validator {
if let crate::Expression::CallResult(_) | crate::Expression::AtomicResult { .. } =
*expr
{
self.needs_visit.insert(handle.index());
self.needs_visit.insert(handle);
}
match self.validate_expression(
@ -1473,9 +1471,7 @@ impl super::Validator {
info.available_stages &= stages;
if self.flags.contains(super::ValidationFlags::EXPRESSIONS) {
if let Some(unvisited) = self.needs_visit.iter().next() {
let index = std::num::NonZeroU32::new(unvisited as u32 + 1).unwrap();
let handle = Handle::new(index);
if let Some(handle) = self.needs_visit.iter().next() {
return Err(FunctionError::UnvisitedExpression(handle)
.with_span_handle(handle, &fun.expressions));
}

5
third_party/rust/naga/src/valid/handles.rs поставляемый
Просмотреть файл

@ -5,11 +5,12 @@ use crate::{
Handle,
};
use crate::non_max_u32::NonMaxU32;
use crate::{Arena, UniqueArena};
use super::ValidationError;
use std::{convert::TryInto, hash::Hash, num::NonZeroU32};
use std::{convert::TryInto, hash::Hash};
impl super::Validator {
/// Validates that all handles within `module` are:
@ -688,7 +689,7 @@ impl<T> Handle<T> {
Ok(self)
} else {
let erase_handle_type = |handle: Handle<_>| {
Handle::new(NonZeroU32::new((handle.index() + 1).try_into().unwrap()).unwrap())
Handle::new(NonMaxU32::new((handle.index()).try_into().unwrap()).unwrap())
};
Err(FwdDepError {
subject: erase_handle_type(self),

39
third_party/rust/naga/src/valid/mod.rs поставляемый
Просмотреть файл

@ -11,7 +11,7 @@ mod interface;
mod r#type;
use crate::{
arena::Handle,
arena::{Handle, HandleSet},
proc::{ExpressionKindTracker, LayoutError, Layouter, TypeResolution},
FastHashSet,
};
@ -124,9 +124,13 @@ bitflags::bitflags! {
/// Support for 64-bit signed and unsigned integers.
const SHADER_INT64 = 0x8000;
/// Support for subgroup operations.
/// Implies support for subgroup operations in both fragment and compute stages,
/// but not necessarily in the vertex stage, which requires [`Capabilities::SUBGROUP_VERTEX_STAGE`].
const SUBGROUP = 0x10000;
/// Support for subgroup barriers.
const SUBGROUP_BARRIER = 0x20000;
/// Support for subgroup operations in the vertex stage.
const SUBGROUP_VERTEX_STAGE = 0x40000;
/// Support for [`AtomicFunction::Min`] and [`AtomicFunction::Max`] on
/// 64-bit integers in the [`Storage`] address space, when the return
/// value is not used.
@ -136,9 +140,9 @@ bitflags::bitflags! {
/// [`AtomicFunction::Min`]: crate::AtomicFunction::Min
/// [`AtomicFunction::Max`]: crate::AtomicFunction::Max
/// [`Storage`]: crate::AddressSpace::Storage
const SHADER_INT64_ATOMIC_MIN_MAX = 0x40000;
const SHADER_INT64_ATOMIC_MIN_MAX = 0x80000;
/// Support for all atomic operations on 64-bit integers.
const SHADER_INT64_ATOMIC_ALL_OPS = 0x80000;
const SHADER_INT64_ATOMIC_ALL_OPS = 0x100000;
}
}
@ -255,7 +259,7 @@ pub struct Validator {
#[allow(dead_code)]
switch_values: FastHashSet<crate::SwitchValue>,
valid_expression_list: Vec<Handle<crate::Expression>>,
valid_expression_set: BitSet,
valid_expression_set: HandleSet<crate::Expression>,
override_ids: FastHashSet<u16>,
allow_overrides: bool,
@ -277,7 +281,7 @@ pub struct Validator {
/// [`Atomic`]: crate::Statement::Atomic
/// [`Expression`]: crate::Expression
/// [`Statement`]: crate::Statement
needs_visit: BitSet,
needs_visit: HandleSet<crate::Expression>,
}
#[derive(Clone, Debug, thiserror::Error)]
@ -416,21 +420,38 @@ impl crate::TypeInner {
impl Validator {
/// Construct a new validator instance.
pub fn new(flags: ValidationFlags, capabilities: Capabilities) -> Self {
let subgroup_operations = if capabilities.contains(Capabilities::SUBGROUP) {
use SubgroupOperationSet as S;
S::BASIC | S::VOTE | S::ARITHMETIC | S::BALLOT | S::SHUFFLE | S::SHUFFLE_RELATIVE
} else {
SubgroupOperationSet::empty()
};
let subgroup_stages = {
let mut stages = ShaderStages::empty();
if capabilities.contains(Capabilities::SUBGROUP_VERTEX_STAGE) {
stages |= ShaderStages::VERTEX;
}
if capabilities.contains(Capabilities::SUBGROUP) {
stages |= ShaderStages::FRAGMENT | ShaderStages::COMPUTE;
}
stages
};
Validator {
flags,
capabilities,
subgroup_stages: ShaderStages::empty(),
subgroup_operations: SubgroupOperationSet::empty(),
subgroup_stages,
subgroup_operations,
types: Vec::new(),
layouter: Layouter::default(),
location_mask: BitSet::new(),
ep_resource_bindings: FastHashSet::default(),
switch_values: FastHashSet::default(),
valid_expression_list: Vec::new(),
valid_expression_set: BitSet::new(),
valid_expression_set: HandleSet::new(),
override_ids: FastHashSet::default(),
allow_overrides: true,
needs_visit: BitSet::new(),
needs_visit: HandleSet::new(),
}
}

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

1
third_party/rust/wgpu-core/Cargo.toml поставляемый
Просмотреть файл

@ -93,6 +93,7 @@ cfg_aliases = "0.1"
[features]
api_log_info = []
counters = ["wgt/counters"]
dx12 = ["hal/dx12"]
fragile-send-sync-non-atomic-wasm = [
"hal/fragile-send-sync-non-atomic-wasm",

Просмотреть файл

@ -1,18 +1,18 @@
#[cfg(feature = "trace")]
use crate::device::trace;
use crate::{
device::{
bgl, Device, DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT,
},
error::{ErrorFormatter, PrettyError},
hal_api::HalApi,
id::{BindGroupLayoutId, BufferId, SamplerId, TextureId, TextureViewId},
id::{BindGroupLayoutId, BufferId, SamplerId, TextureViewId},
init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction},
resource::{Resource, ResourceInfo, ResourceType},
resource::{
DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError, ParentDevice,
Resource, ResourceInfo, ResourceType,
},
resource_log,
snatch::{SnatchGuard, Snatchable},
track::{BindGroupStates, UsageConflict},
validation::{MissingBufferUsageError, MissingTextureUsageError},
track::{BindGroupStates, ResourceUsageCompatibilityError},
Label,
};
@ -76,14 +76,14 @@ pub enum CreateBindGroupError {
Device(#[from] DeviceError),
#[error("Bind group layout is invalid")]
InvalidLayout,
#[error("Buffer {0:?} is invalid or destroyed")]
InvalidBuffer(BufferId),
#[error("Texture view {0:?} is invalid")]
InvalidTextureView(TextureViewId),
#[error("Texture {0:?} is invalid")]
InvalidTexture(TextureId),
#[error("BufferId {0:?} is invalid")]
InvalidBufferId(BufferId),
#[error("Texture view Id {0:?} is invalid")]
InvalidTextureViewId(TextureViewId),
#[error("Sampler {0:?} is invalid")]
InvalidSampler(SamplerId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error(
"Binding count declared with at most {expected} items, but {actual} items were provided"
)]
@ -182,7 +182,7 @@ pub enum CreateBindGroupError {
#[error("The adapter does not support read access for storages texture of format {0:?}")]
StorageReadNotSupported(wgt::TextureFormat),
#[error(transparent)]
ResourceUsageConflict(#[from] UsageConflict),
ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError),
}
impl PrettyError for CreateBindGroupError {
@ -198,10 +198,7 @@ impl PrettyError for CreateBindGroupError {
Self::BindingSizeTooSmall { buffer, .. } => {
fmt.buffer_label(&buffer);
}
Self::InvalidBuffer(id) => {
fmt.buffer_label(&id);
}
Self::InvalidTextureView(id) => {
Self::InvalidTextureViewId(id) => {
fmt.texture_view_label(&id);
}
Self::InvalidSampler(id) => {
@ -478,7 +475,6 @@ pub struct BindGroupLayout<A: HalApi> {
#[allow(unused)]
pub(crate) binding_count_validator: BindingTypeMaxCountValidator,
pub(crate) info: ResourceInfo<BindGroupLayout<A>>,
pub(crate) label: String,
}
impl<A: HalApi> Drop for BindGroupLayout<A> {
@ -487,12 +483,7 @@ impl<A: HalApi> Drop for BindGroupLayout<A> {
self.device.bgl_pool.remove(&self.entries);
}
if let Some(raw) = self.raw.take() {
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyBindGroupLayout(self.info.id()));
}
resource_log!("Destroy raw BindGroupLayout {:?}", self.info.label());
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_bind_group_layout(raw);
@ -513,11 +504,14 @@ impl<A: HalApi> Resource for BindGroupLayout<A> {
fn as_info_mut(&mut self) -> &mut ResourceInfo<Self> {
&mut self.info
}
}
fn label(&self) -> &str {
&self.label
impl<A: HalApi> ParentDevice<A> for BindGroupLayout<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
impl<A: HalApi> BindGroupLayout<A> {
pub(crate) fn raw(&self) -> &A::BindGroupLayout {
self.raw.as_ref().unwrap()
@ -631,13 +625,7 @@ pub struct PipelineLayout<A: HalApi> {
impl<A: HalApi> Drop for PipelineLayout<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw PipelineLayout {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyPipelineLayout(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_pipeline_layout(raw);
@ -751,6 +739,12 @@ impl<A: HalApi> Resource for PipelineLayout<A> {
}
}
impl<A: HalApi> ParentDevice<A> for PipelineLayout<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
#[repr(C)]
#[derive(Clone, Debug, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
@ -866,13 +860,7 @@ pub struct BindGroup<A: HalApi> {
impl<A: HalApi> Drop for BindGroup<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw BindGroup {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyBindGroup(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_bind_group(raw);
@ -882,17 +870,24 @@ impl<A: HalApi> Drop for BindGroup<A> {
}
impl<A: HalApi> BindGroup<A> {
pub(crate) fn raw(&self, guard: &SnatchGuard) -> Option<&A::BindGroup> {
pub(crate) fn try_raw<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<&A::BindGroup, DestroyedResourceError> {
// Clippy insist on writing it this way. The idea is to return None
// if any of the raw buffer is not valid anymore.
for buffer in &self.used_buffer_ranges {
let _ = buffer.buffer.raw(guard)?;
buffer.buffer.try_raw(guard)?;
}
for texture in &self.used_texture_ranges {
let _ = texture.texture.raw(guard)?;
texture.texture.try_raw(guard)?;
}
self.raw.get(guard)
self.raw
.get(guard)
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
pub(crate) fn validate_dynamic_bindings(
&self,
bind_group_index: u32,
@ -956,6 +951,12 @@ impl<A: HalApi> Resource for BindGroup<A> {
}
}
impl<A: HalApi> ParentDevice<A> for BindGroup<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum GetBindGroupLayoutError {

Просмотреть файл

@ -63,28 +63,18 @@ mod compat {
bgl::Origin::Derived => "implicit",
bgl::Origin::Pool => "explicit",
};
let expected_label = expected_bgl.label();
diff.push(format!(
"Should be compatible an with an {expected_bgl_type} bind group layout {}",
if expected_label.is_empty() {
"without label".to_string()
} else {
format!("with label = `{}`", expected_label)
}
"Should be compatible an with an {expected_bgl_type} {}",
expected_bgl.error_ident()
));
if let Some(assigned_bgl) = self.assigned.as_ref() {
let assigned_bgl_type = match assigned_bgl.origin {
bgl::Origin::Derived => "implicit",
bgl::Origin::Pool => "explicit",
};
let assigned_label = assigned_bgl.label();
diff.push(format!(
"Assigned {assigned_bgl_type} bind group layout {}",
if assigned_label.is_empty() {
"without label".to_string()
} else {
format!("with label = `{}`", assigned_label)
}
"Assigned {assigned_bgl_type} {}",
assigned_bgl.error_ident()
));
for (id, e_entry) in expected_bgl.entries.iter() {
if let Some(a_entry) = assigned_bgl.entries.get(*id) {
@ -325,9 +315,7 @@ impl<A: HalApi> Binder<A> {
bind_group: &Arc<BindGroup<A>>,
offsets: &[wgt::DynamicOffset],
) -> &'a [EntryPayload<A>] {
let bind_group_id = bind_group.as_info().id();
log::trace!("\tBinding [{}] = group {:?}", index, bind_group_id);
debug_assert_eq!(A::VARIANT, bind_group_id.backend());
log::trace!("\tBinding [{}] = group {}", index, bind_group.error_ident());
let payload = &mut self.payloads[index];
payload.group = Some(bind_group.clone());

Просмотреть файл

@ -48,7 +48,7 @@ To create a render bundle:
3) Call [`Global::render_bundle_encoder_finish`][Grbef], which analyzes and cleans up
the command stream and returns a `RenderBundleId`.
4) Then, any number of times, call [`wgpu_render_pass_execute_bundles`][wrpeb] to
4) Then, any number of times, call [`render_pass_execute_bundles`][wrpeb] to
execute the bundle as part of some render pass.
## Implementation
@ -73,18 +73,16 @@ index format changes.
[Gdcrbe]: crate::global::Global::device_create_render_bundle_encoder
[Grbef]: crate::global::Global::render_bundle_encoder_finish
[wrpeb]: crate::command::render::render_commands::wgpu_render_pass_execute_bundles
[wrpeb]: crate::global::Global::render_pass_execute_bundles
!*/
#![allow(clippy::reversed_empty_ranges)]
#[cfg(feature = "trace")]
use crate::device::trace;
use crate::{
binding_model::{buffer_binding_type_alignment, BindGroup, BindGroupLayout, PipelineLayout},
command::{
BasePass, BindGroupStateChange, ColorAttachmentError, DrawError, MapPassErr,
PassErrorScope, RenderCommand, RenderCommandError, StateChange,
PassErrorScope, RenderCommandError, StateChange,
},
conv,
device::{
@ -97,12 +95,13 @@ use crate::{
id,
init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction},
pipeline::{PipelineFlags, RenderPipeline, VertexStep},
resource::{Buffer, Resource, ResourceInfo, ResourceType},
resource::{
Buffer, DestroyedResourceError, ParentDevice, Resource, ResourceInfo, ResourceType,
},
resource_log,
snatch::SnatchGuard,
track::RenderBundleScope,
validation::check_buffer_usage,
Label, LabelHelpers,
Label,
};
use arrayvec::ArrayVec;
@ -111,7 +110,10 @@ use thiserror::Error;
use hal::CommandEncoder as _;
use super::ArcRenderCommand;
use super::{
render_command::{ArcRenderCommand, RenderCommand},
DrawKind,
};
/// <https://gpuweb.github.io/gpuweb/#dom-gpurendercommandsmixin-draw>
fn validate_draw<A: HalApi>(
@ -326,7 +328,7 @@ impl RenderBundleEncoder {
#[cfg(feature = "trace")]
pub(crate) fn to_base_pass(&self) -> BasePass<RenderCommand> {
BasePass::from_ref(self.base.as_ref())
self.base.clone()
}
pub fn parent(&self) -> id::DeviceId {
@ -349,6 +351,10 @@ impl RenderBundleEncoder {
device: &Arc<Device<A>>,
hub: &Hub<A>,
) -> Result<RenderBundle<A>, RenderBundleError> {
let scope = PassErrorScope::Bundle;
device.check_is_valid().map_pass_err(scope)?;
let bind_group_guard = hub.bind_groups.read();
let pipeline_guard = hub.render_pipelines.read();
let buffer_guard = hub.buffers.read();
@ -393,10 +399,11 @@ impl RenderBundleEncoder {
let mut buffer_memory_init_actions = Vec::new();
let mut texture_memory_init_actions = Vec::new();
let base = self.base.as_ref();
let mut next_dynamic_offset = 0;
for &command in base.commands {
let base = &self.base;
for &command in &base.commands {
match command {
RenderCommand::SetBindGroup {
index,
@ -405,15 +412,18 @@ impl RenderBundleEncoder {
} => {
let scope = PassErrorScope::SetBindGroup(bind_group_id);
let bind_group = state
let bind_group = bind_group_guard
.get(bind_group_id)
.map_err(|_| RenderCommandError::InvalidBindGroupId(bind_group_id))
.map_pass_err(scope)?;
state
.trackers
.bind_groups
.write()
.add_single(&*bind_group_guard, bind_group_id)
.ok_or(RenderCommandError::InvalidBindGroup(bind_group_id))
.map_pass_err(scope)?;
self.check_valid_to_use(bind_group.device.info.id())
.map_pass_err(scope)?;
.add_single(bind_group);
bind_group.same_device(device).map_pass_err(scope)?;
let max_bind_groups = device.limits.max_bind_groups;
if index >= max_bind_groups {
@ -470,15 +480,18 @@ impl RenderBundleEncoder {
RenderCommand::SetPipeline(pipeline_id) => {
let scope = PassErrorScope::SetPipelineRender(pipeline_id);
let pipeline = state
let pipeline = pipeline_guard
.get(pipeline_id)
.map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?;
state
.trackers
.render_pipelines
.write()
.add_single(&*pipeline_guard, pipeline_id)
.ok_or(RenderCommandError::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?;
self.check_valid_to_use(pipeline.device.info.id())
.map_pass_err(scope)?;
.add_single(pipeline);
pipeline.same_device(device).map_pass_err(scope)?;
self.context
.check_compatible(&pipeline.pass_context, RenderPassCompatibilityCheckType::RenderPipeline)
@ -494,7 +507,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope);
}
let pipeline_state = PipelineState::new(pipeline);
let pipeline_state = PipelineState::new(pipeline, pipeline_id);
commands.push(ArcRenderCommand::SetPipeline(pipeline.clone()));
@ -513,17 +526,22 @@ impl RenderBundleEncoder {
size,
} => {
let scope = PassErrorScope::SetIndexBuffer(buffer_id);
let buffer = state
let buffer = buffer_guard
.get(buffer_id)
.map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))
.map_pass_err(scope)?;
state
.trackers
.buffers
.write()
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDEX)
.merge_single(buffer, hal::BufferUses::INDEX)
.map_pass_err(scope)?;
buffer.same_device(device).map_pass_err(scope)?;
buffer.check_usage(wgt::BufferUsages::INDEX).map_pass_err(scope)?;
let end = match size {
Some(s) => offset + s.get(),
None => buffer.size,
@ -552,17 +570,20 @@ impl RenderBundleEncoder {
.map_pass_err(scope);
}
let buffer = state
let buffer = buffer_guard
.get(buffer_id)
.map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))
.map_pass_err(scope)?;
state
.trackers
.buffers
.write()
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::VERTEX)
.buffers.write()
.merge_single(buffer, hal::BufferUses::VERTEX)
.map_pass_err(scope)?;
buffer.same_device(device).map_pass_err(scope)?;
buffer.check_usage(wgt::BufferUsages::VERTEX).map_pass_err(scope)?;
let end = match size {
Some(s) => offset + s.get(),
None => buffer.size,
@ -598,8 +619,8 @@ impl RenderBundleEncoder {
first_instance,
} => {
let scope = PassErrorScope::Draw {
kind: DrawKind::Draw,
indexed: false,
indirect: false,
pipeline: state.pipeline_id(),
};
let pipeline = state.pipeline(scope)?;
@ -616,7 +637,7 @@ impl RenderBundleEncoder {
if instance_count > 0 && vertex_count > 0 {
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.extend(state.flush_binds(used_bind_groups, &base.dynamic_offsets));
commands.push(ArcRenderCommand::Draw {
vertex_count,
instance_count,
@ -633,8 +654,8 @@ impl RenderBundleEncoder {
first_instance,
} => {
let scope = PassErrorScope::Draw {
kind: DrawKind::Draw,
indexed: true,
indirect: false,
pipeline: state.pipeline_id(),
};
let pipeline = state.pipeline(scope)?;
@ -657,7 +678,7 @@ impl RenderBundleEncoder {
if instance_count > 0 && index_count > 0 {
commands.extend(state.flush_index());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.extend(state.flush_binds(used_bind_groups, &base.dynamic_offsets));
commands.push(ArcRenderCommand::DrawIndexed { index_count, instance_count, first_index, base_vertex, first_instance });
}
}
@ -668,8 +689,8 @@ impl RenderBundleEncoder {
indexed: false,
} => {
let scope = PassErrorScope::Draw {
kind: DrawKind::DrawIndirect,
indexed: false,
indirect: true,
pipeline: state.pipeline_id(),
};
device
@ -679,17 +700,20 @@ impl RenderBundleEncoder {
let pipeline = state.pipeline(scope)?;
let used_bind_groups = pipeline.used_bind_groups;
let buffer = state
let buffer = buffer_guard
.get(buffer_id)
.map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))
.map_pass_err(scope)?;
state
.trackers
.buffers
.write()
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.buffers.write()
.merge_single(buffer, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
buffer.same_device(device).map_pass_err(scope)?;
buffer.check_usage(wgt::BufferUsages::INDIRECT).map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
buffer,
offset..(offset + mem::size_of::<wgt::DrawIndirectArgs>() as u64),
@ -697,7 +721,7 @@ impl RenderBundleEncoder {
));
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.extend(state.flush_binds(used_bind_groups, &base.dynamic_offsets));
commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: false });
}
RenderCommand::MultiDrawIndirect {
@ -707,8 +731,8 @@ impl RenderBundleEncoder {
indexed: true,
} => {
let scope = PassErrorScope::Draw {
kind: DrawKind::DrawIndirect,
indexed: true,
indirect: true,
pipeline: state.pipeline_id(),
};
device
@ -718,17 +742,20 @@ impl RenderBundleEncoder {
let pipeline = state.pipeline(scope)?;
let used_bind_groups = pipeline.used_bind_groups;
let buffer = state
let buffer = buffer_guard
.get(buffer_id)
.map_err(|_| RenderCommandError::InvalidBufferId(buffer_id))
.map_pass_err(scope)?;
state
.trackers
.buffers
.write()
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.buffers.write()
.merge_single(buffer, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
buffer.same_device(device).map_pass_err(scope)?;
buffer.check_usage(wgt::BufferUsages::INDIRECT).map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
buffer,
offset..(offset + mem::size_of::<wgt::DrawIndirectArgs>() as u64),
@ -742,7 +769,7 @@ impl RenderBundleEncoder {
commands.extend(index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
commands.extend(state.flush_binds(used_bind_groups, &base.dynamic_offsets));
commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: true });
}
RenderCommand::MultiDrawIndirect { .. }
@ -778,24 +805,13 @@ impl RenderBundleEncoder {
buffer_memory_init_actions,
texture_memory_init_actions,
context: self.context,
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(device.tracker_indices.bundles.clone()),
),
info: ResourceInfo::new(&desc.label, Some(device.tracker_indices.bundles.clone())),
discard_hal_labels: device
.instance_flags
.contains(wgt::InstanceFlags::DISCARD_HAL_LABELS),
})
}
fn check_valid_to_use(&self, device_id: id::DeviceId) -> Result<(), RenderBundleErrorInner> {
if device_id != self.parent_id {
return Err(RenderBundleErrorInner::NotValidToUse);
}
Ok(())
}
pub fn set_index_buffer(
&mut self,
buffer_id: id::BufferId,
@ -826,25 +842,14 @@ pub enum CreateRenderBundleError {
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum ExecutionError {
#[error("Buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error("BindGroup {0:?} is invalid")]
InvalidBindGroup(id::BindGroupId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error("Using {0} in a render bundle is not implemented")]
Unimplemented(&'static str),
}
impl PrettyError for ExecutionError {
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
fmt.error(self);
match *self {
Self::DestroyedBuffer(id) => {
fmt.buffer_label(&id);
}
Self::InvalidBindGroup(id) => {
fmt.bind_group_label(&id);
}
Self::Unimplemented(_reason) => {}
};
}
}
@ -871,12 +876,7 @@ pub struct RenderBundle<A: HalApi> {
impl<A: HalApi> Drop for RenderBundle<A> {
fn drop(&mut self) {
resource_log!("Destroy raw RenderBundle {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyRenderBundle(self.info.id()));
}
resource_log!("Drop {}", self.error_ident());
}
}
@ -916,9 +916,7 @@ impl<A: HalApi> RenderBundle<A> {
num_dynamic_offsets,
bind_group,
} => {
let raw_bg = bind_group
.raw(snatch_guard)
.ok_or(ExecutionError::InvalidBindGroup(bind_group.info.id()))?;
let raw_bg = bind_group.try_raw(snatch_guard)?;
unsafe {
raw.set_bind_group(
pipeline_layout.as_ref().unwrap().raw(),
@ -940,9 +938,7 @@ impl<A: HalApi> RenderBundle<A> {
offset,
size,
} => {
let buffer: &A::Buffer = buffer
.raw(snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let buffer: &A::Buffer = buffer.try_raw(snatch_guard)?;
let bb = hal::BufferBinding {
buffer,
offset: *offset,
@ -956,9 +952,7 @@ impl<A: HalApi> RenderBundle<A> {
offset,
size,
} => {
let buffer = buffer
.raw(snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let buffer = buffer.try_raw(snatch_guard)?;
let bb = hal::BufferBinding {
buffer,
offset: *offset,
@ -1043,9 +1037,7 @@ impl<A: HalApi> RenderBundle<A> {
count: None,
indexed: false,
} => {
let buffer = buffer
.raw(snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let buffer = buffer.try_raw(snatch_guard)?;
unsafe { raw.draw_indirect(buffer, *offset, 1) };
}
Cmd::MultiDrawIndirect {
@ -1054,9 +1046,7 @@ impl<A: HalApi> RenderBundle<A> {
count: None,
indexed: true,
} => {
let buffer = buffer
.raw(snatch_guard)
.ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let buffer = buffer.try_raw(snatch_guard)?;
unsafe { raw.draw_indexed_indirect(buffer, *offset, 1) };
}
Cmd::MultiDrawIndirect { .. } | Cmd::MultiDrawIndirectCount { .. } => {
@ -1104,6 +1094,12 @@ impl<A: HalApi> Resource for RenderBundle<A> {
}
}
impl<A: HalApi> ParentDevice<A> for RenderBundle<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
/// A render bundle's current index buffer state.
///
/// [`RenderBundleEncoder::finish`] records the currently set index buffer here,
@ -1213,6 +1209,8 @@ struct PipelineState<A: HalApi> {
/// The pipeline
pipeline: Arc<RenderPipeline<A>>,
pipeline_id: id::RenderPipelineId,
/// How this pipeline's vertex shader traverses each vertex buffer, indexed
/// by vertex buffer slot number.
steps: Vec<VertexStep>,
@ -1226,9 +1224,10 @@ struct PipelineState<A: HalApi> {
}
impl<A: HalApi> PipelineState<A> {
fn new(pipeline: &Arc<RenderPipeline<A>>) -> Self {
fn new(pipeline: &Arc<RenderPipeline<A>>, pipeline_id: id::RenderPipelineId) -> Self {
Self {
pipeline: pipeline.clone(),
pipeline_id,
steps: pipeline.vertex_steps.to_vec(),
push_constant_ranges: pipeline
.layout
@ -1302,7 +1301,7 @@ struct State<A: HalApi> {
impl<A: HalApi> State<A> {
/// Return the id of the current pipeline, if any.
fn pipeline_id(&self) -> Option<id::RenderPipelineId> {
self.pipeline.as_ref().map(|p| p.pipeline.as_info().id())
self.pipeline.as_ref().map(|p| p.pipeline_id)
}
/// Return the current pipeline state. Return an error if none is set.
@ -1404,7 +1403,7 @@ impl<A: HalApi> State<A> {
) {
match self.index {
Some(ref current)
if Arc::ptr_eq(&current.buffer, &buffer)
if current.buffer.is_equal(&buffer)
&& current.format == format
&& current.range == range =>
{
@ -1473,8 +1472,6 @@ impl<A: HalApi> State<A> {
/// Error encountered when finishing recording a render bundle.
#[derive(Clone, Debug, Error)]
pub(super) enum RenderBundleErrorInner {
#[error("Resource is not valid to use with this render bundle because the resource and the bundle come from different devices")]
NotValidToUse,
#[error(transparent)]
Device(#[from] DeviceError),
#[error(transparent)]
@ -1504,10 +1501,12 @@ pub struct RenderBundleError {
}
impl RenderBundleError {
pub(crate) const INVALID_DEVICE: Self = RenderBundleError {
scope: PassErrorScope::Bundle,
inner: RenderBundleErrorInner::Device(DeviceError::Invalid),
};
pub fn from_device_error(e: DeviceError) -> Self {
Self {
scope: PassErrorScope::Bundle,
inner: e.into(),
}
}
}
impl PrettyError for RenderBundleError {
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {

Просмотреть файл

@ -9,9 +9,12 @@ use crate::{
get_lowest_common_denom,
global::Global,
hal_api::HalApi,
id::{BufferId, CommandEncoderId, DeviceId, TextureId},
id::{BufferId, CommandEncoderId, TextureId},
init_tracker::{MemoryInitKind, TextureInitRange},
resource::{Resource, Texture, TextureClearMode},
resource::{
DestroyedResourceError, ParentDevice, Resource, ResourceErrorIdent, Texture,
TextureClearMode,
},
snatch::SnatchGuard,
track::{TextureSelector, TextureTracker},
};
@ -26,14 +29,14 @@ use wgt::{math::align_to, BufferAddress, BufferUsages, ImageSubresourceRange, Te
pub enum ClearError {
#[error("To use clear_texture the CLEAR_TEXTURE feature needs to be enabled")]
MissingClearTextureFeature,
#[error("Device {0:?} is invalid")]
InvalidDevice(DeviceId),
#[error("Buffer {0:?} is invalid or destroyed")]
InvalidBuffer(BufferId),
#[error("Texture {0:?} is invalid or destroyed")]
InvalidTexture(TextureId),
#[error("Texture {0:?} can not be cleared")]
NoValidTextureClearMode(TextureId),
#[error("BufferId {0:?} is invalid")]
InvalidBufferId(BufferId),
#[error("TextureId {0:?} is invalid")]
InvalidTextureId(TextureId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error("{0} can not be cleared")]
NoValidTextureClearMode(ResourceErrorIdent),
#[error("Buffer clear size {0:?} is not a multiple of `COPY_BUFFER_ALIGNMENT`")]
UnalignedFillSize(BufferAddress),
#[error("Buffer offset {0:?} is not a multiple of `COPY_BUFFER_ALIGNMENT`")]
@ -98,27 +101,20 @@ impl Global {
list.push(TraceCommand::ClearBuffer { dst, offset, size });
}
let (dst_buffer, dst_pending) = {
let buffer_guard = hub.buffers.read();
let dst_buffer = buffer_guard
.get(dst)
.map_err(|_| ClearError::InvalidBuffer(dst))?;
let dst_buffer = hub
.buffers
.get(dst)
.map_err(|_| ClearError::InvalidBufferId(dst))?;
if dst_buffer.device.as_info().id() != cmd_buf.device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
dst_buffer.same_device_as(cmd_buf.as_ref())?;
let dst_pending = cmd_buf_data
.trackers
.buffers
.set_single(&dst_buffer, hal::BufferUses::COPY_DST);
cmd_buf_data
.trackers
.buffers
.set_single(dst_buffer, hal::BufferUses::COPY_DST)
.ok_or(ClearError::InvalidBuffer(dst))?
};
let snatch_guard = dst_buffer.device.snatchable_lock.read();
let dst_raw = dst_buffer
.raw
.get(&snatch_guard)
.ok_or(ClearError::InvalidBuffer(dst))?;
let dst_raw = dst_buffer.try_raw(&snatch_guard)?;
if !dst_buffer.usage.contains(BufferUsages::COPY_DST) {
return Err(ClearError::MissingCopyDstUsageFlag(Some(dst), None));
}
@ -201,11 +197,9 @@ impl Global {
let dst_texture = hub
.textures
.get(dst)
.map_err(|_| ClearError::InvalidTexture(dst))?;
.map_err(|_| ClearError::InvalidTextureId(dst))?;
if dst_texture.device.as_info().id() != cmd_buf.device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
dst_texture.same_device_as(cmd_buf.as_ref())?;
// Check if subresource aspects are valid.
let clear_aspects =
@ -242,9 +236,7 @@ impl Global {
}
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(ClearError::InvalidDevice(cmd_buf.device.as_info().id()));
}
device.check_is_valid()?;
let (encoder, tracker) = cmd_buf_data.open_encoder_and_tracker()?;
let snatch_guard = device.snatchable_lock.read();
@ -272,9 +264,7 @@ pub(crate) fn clear_texture<A: HalApi>(
zero_buffer: &A::Buffer,
snatch_guard: &SnatchGuard<'_>,
) -> Result<(), ClearError> {
let dst_raw = dst_texture
.raw(snatch_guard)
.ok_or_else(|| ClearError::InvalidTexture(dst_texture.as_info().id()))?;
let dst_raw = dst_texture.try_raw(snatch_guard)?;
// Issue the right barrier.
let clear_usage = match *dst_texture.clear_mode.read() {
@ -287,7 +277,7 @@ pub(crate) fn clear_texture<A: HalApi>(
}
TextureClearMode::None => {
return Err(ClearError::NoValidTextureClearMode(
dst_texture.as_info().id(),
dst_texture.error_ident(),
));
}
};
@ -312,7 +302,6 @@ pub(crate) fn clear_texture<A: HalApi>(
// change_replace_tracked whenever possible.
let dst_barrier = texture_tracker
.set_single(dst_texture, selector, clear_usage)
.unwrap()
.map(|pending| pending.into_hal(dst_raw));
unsafe {
encoder.transition_textures(dst_barrier.into_iter());
@ -329,14 +318,14 @@ pub(crate) fn clear_texture<A: HalApi>(
dst_raw,
),
TextureClearMode::Surface { .. } => {
clear_texture_via_render_passes(dst_texture, range, true, encoder)?
clear_texture_via_render_passes(dst_texture, range, true, encoder)
}
TextureClearMode::RenderPass { is_color, .. } => {
clear_texture_via_render_passes(dst_texture, range, is_color, encoder)?
clear_texture_via_render_passes(dst_texture, range, is_color, encoder)
}
TextureClearMode::None => {
return Err(ClearError::NoValidTextureClearMode(
dst_texture.as_info().id(),
dst_texture.error_ident(),
));
}
}
@ -442,7 +431,7 @@ fn clear_texture_via_render_passes<A: HalApi>(
range: TextureInitRange,
is_color: bool,
encoder: &mut A::CommandEncoder,
) -> Result<(), ClearError> {
) {
assert_eq!(dst_texture.desc.dimension, wgt::TextureDimension::D2);
let extent_base = wgt::Extent3d {
@ -506,5 +495,4 @@ fn clear_texture_via_render_passes<A: HalApi>(
}
}
}
Ok(())
}

Просмотреть файл

@ -15,10 +15,9 @@ use crate::{
hal_api::HalApi,
hal_label, id,
init_tracker::MemoryInitKind,
resource::{self, Resource},
resource::{self, DestroyedResourceError, MissingBufferUsageError, ParentDevice, Resource},
snatch::SnatchGuard,
track::{Tracker, TrackerIndex, UsageConflict, UsageScope},
validation::{check_buffer_usage, MissingBufferUsageError},
track::{ResourceUsageCompatibilityError, Tracker, TrackerIndex, UsageScope},
Label,
};
@ -53,11 +52,6 @@ pub struct ComputePass<A: HalApi> {
// Resource binding dedupe state.
current_bind_groups: BindGroupStateChange,
current_pipeline: StateChange<id::ComputePipelineId>,
/// The device that this pass is associated with.
///
/// Used for quick validation during recording.
device_id: id::DeviceId,
}
impl<A: HalApi> ComputePass<A> {
@ -68,10 +62,6 @@ impl<A: HalApi> ComputePass<A> {
timestamp_writes,
} = desc;
let device_id = parent
.as_ref()
.map_or(id::DeviceId::dummy(0), |p| p.device.as_info().id());
Self {
base: Some(BasePass::new(label)),
parent,
@ -79,16 +69,9 @@ impl<A: HalApi> ComputePass<A> {
current_bind_groups: BindGroupStateChange::new(),
current_pipeline: StateChange::new(),
device_id,
}
}
#[inline]
pub fn parent_id(&self) -> Option<id::CommandBufferId> {
self.parent.as_ref().map(|cmd_buf| cmd_buf.as_info().id())
}
#[inline]
pub fn label(&self) -> Option<&str> {
self.base.as_ref().and_then(|base| base.label.as_deref())
@ -107,7 +90,10 @@ impl<A: HalApi> ComputePass<A> {
impl<A: HalApi> fmt::Debug for ComputePass<A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "ComputePass {{ parent: {:?} }}", self.parent_id())
match self.parent {
Some(ref cmd_buf) => write!(f, "ComputePass {{ parent: {} }}", cmd_buf.error_ident()),
None => write!(f, "ComputePass {{ parent: None }}"),
}
}
}
@ -170,28 +156,26 @@ pub enum ComputePassErrorInner {
Encoder(#[from] CommandEncoderError),
#[error("Parent encoder is invalid")]
InvalidParentEncoder,
#[error("Bind group at index {0:?} is invalid")]
InvalidBindGroup(u32),
#[error("Device {0:?} is invalid")]
InvalidDevice(id::DeviceId),
#[error("BindGroupId {0:?} is invalid")]
InvalidBindGroupId(id::BindGroupId),
#[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
BindGroupIndexOutOfRange { index: u32, max: u32 },
#[error("Compute pipeline {0:?} is invalid")]
InvalidPipeline(id::ComputePipelineId),
#[error("QuerySet {0:?} is invalid")]
InvalidQuerySet(id::QuerySetId),
#[error("Indirect buffer {0:?} is invalid or destroyed")]
InvalidIndirectBuffer(id::BufferId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error("Indirect buffer uses bytes {offset}..{end_offset} which overruns indirect buffer of size {buffer_size}")]
IndirectBufferOverrun {
offset: u64,
end_offset: u64,
buffer_size: u64,
},
#[error("Buffer {0:?} is invalid or destroyed")]
InvalidBuffer(id::BufferId),
#[error("BufferId {0:?} is invalid")]
InvalidBufferId(id::BufferId),
#[error(transparent)]
ResourceUsageConflict(#[from] UsageConflict),
ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError),
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error("Cannot pop debug group, because number of pushed debug groups is zero")]
@ -225,9 +209,6 @@ impl PrettyError for ComputePassErrorInner {
Self::InvalidPipeline(id) => {
fmt.compute_pipeline_label(&id);
}
Self::InvalidIndirectBuffer(id) => {
fmt.buffer_label(&id);
}
Self::Dispatch(DispatchError::IncompatibleBindGroup { ref diff, .. }) => {
for d in diff {
fmt.note(&d);
@ -302,7 +283,7 @@ impl<'a, A: HalApi> State<'a, A> {
base_trackers: &mut Tracker<A>,
indirect_buffer: Option<TrackerIndex>,
snatch_guard: &SnatchGuard,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
for bind_group in self.binder.list_active() {
unsafe { self.scope.merge_bind_group(&bind_group.used)? };
// Note: stateless trackers are not merged: the lifetime reference
@ -361,11 +342,8 @@ impl Global {
);
};
if query_set.device.as_info().id() != cmd_buf.device.as_info().id() {
return (
ComputePass::new(None, arc_desc),
Some(CommandEncoderError::WrongDeviceForTimestampWritesQuerySet),
);
if let Err(e) = query_set.same_device_as(cmd_buf.as_ref()) {
return (ComputePass::new(None, arc_desc), Some(e.into()));
}
Some(ArcComputePassTimestampWrites {
@ -400,19 +378,22 @@ impl Global {
&self,
pass: &mut ComputePass<A>,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::Pass(pass.parent_id());
let Some(parent) = pass.parent.as_ref() else {
return Err(ComputePassErrorInner::InvalidParentEncoder).map_pass_err(scope);
};
let cmd_buf = pass
.parent
.as_ref()
.ok_or(ComputePassErrorInner::InvalidParentEncoder)
.map_pass_err(PassErrorScope::Pass(None))?;
parent.unlock_encoder().map_pass_err(scope)?;
let scope = PassErrorScope::Pass(Some(cmd_buf.as_info().id()));
cmd_buf.unlock_encoder().map_pass_err(scope)?;
let base = pass
.base
.take()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)?;
self.compute_pass_end_impl(parent, base, pass.timestamp_writes.take())
self.compute_pass_end_impl(cmd_buf, base, pass.timestamp_writes.take())
}
#[doc(hidden)]
@ -466,12 +447,7 @@ impl Global {
let pass_scope = PassErrorScope::Pass(Some(cmd_buf.as_info().id()));
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(ComputePassErrorInner::InvalidDevice(
cmd_buf.device.as_info().id(),
))
.map_pass_err(pass_scope);
}
device.check_is_valid().map_pass_err(pass_scope)?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
@ -593,6 +569,8 @@ impl Global {
} => {
let scope = PassErrorScope::SetBindGroup(bind_group.as_info().id());
bind_group.same_device_as(cmd_buf).map_pass_err(scope)?;
let max_bind_groups = cmd_buf.limits.max_bind_groups;
if index >= max_bind_groups {
return Err(ComputePassErrorInner::BindGroupIndexOutOfRange {
@ -638,10 +616,7 @@ impl Global {
let pipeline_layout = pipeline_layout.as_ref().unwrap().raw();
for (i, e) in entries.iter().enumerate() {
if let Some(group) = e.group.as_ref() {
let raw_bg = group
.raw(&snatch_guard)
.ok_or(ComputePassErrorInner::InvalidBindGroup(i as u32))
.map_pass_err(scope)?;
let raw_bg = group.try_raw(&snatch_guard).map_pass_err(scope)?;
unsafe {
raw.set_bind_group(
pipeline_layout,
@ -658,6 +633,8 @@ impl Global {
let pipeline_id = pipeline.as_info().id();
let scope = PassErrorScope::SetPipelineCompute(pipeline_id);
pipeline.same_device_as(cmd_buf).map_pass_err(scope)?;
state.pipeline = Some(pipeline_id);
let pipeline = tracker.compute_pipelines.insert_single(pipeline);
@ -682,10 +659,8 @@ impl Global {
if !entries.is_empty() {
for (i, e) in entries.iter().enumerate() {
if let Some(group) = e.group.as_ref() {
let raw_bg = group
.raw(&snatch_guard)
.ok_or(ComputePassErrorInner::InvalidBindGroup(i as u32))
.map_pass_err(scope)?;
let raw_bg =
group.try_raw(&snatch_guard).map_pass_err(scope)?;
unsafe {
raw.set_bind_group(
pipeline.layout.raw(),
@ -791,12 +766,13 @@ impl Global {
}
}
ArcComputeCommand::DispatchIndirect { buffer, offset } => {
let buffer_id = buffer.as_info().id();
let scope = PassErrorScope::Dispatch {
indirect: true,
pipeline: state.pipeline,
};
buffer.same_device_as(cmd_buf).map_pass_err(scope)?;
state.is_ready().map_pass_err(scope)?;
device
@ -806,9 +782,10 @@ impl Global {
state
.scope
.buffers
.insert_merge_single(buffer.clone(), hal::BufferUses::INDIRECT)
.merge_single(&buffer, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
buffer
.check_usage(wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
@ -821,11 +798,7 @@ impl Global {
.map_pass_err(scope);
}
let buf_raw = buffer
.raw
.get(&snatch_guard)
.ok_or(ComputePassErrorInner::InvalidIndirectBuffer(buffer_id))
.map_pass_err(scope)?;
let buf_raw = buffer.try_raw(&snatch_guard).map_pass_err(scope)?;
let stride = 3 * 4; // 3 integers, x/y/z group size
@ -890,6 +863,8 @@ impl Global {
} => {
let scope = PassErrorScope::WriteTimestamp;
query_set.same_device_as(cmd_buf).map_pass_err(scope)?;
device
.require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES)
.map_pass_err(scope)?;
@ -906,6 +881,8 @@ impl Global {
} => {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
query_set.same_device_as(cmd_buf).map_pass_err(scope)?;
let query_set = tracker.query_sets.insert_single(query_set);
validate_and_begin_pipeline_statistics_query(
@ -991,13 +968,9 @@ impl Global {
.bind_groups
.read()
.get_owned(bind_group_id)
.map_err(|_| ComputePassErrorInner::InvalidBindGroup(index))
.map_err(|_| ComputePassErrorInner::InvalidBindGroupId(bind_group_id))
.map_pass_err(scope)?;
if bind_group.device.as_info().id() != pass.device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands.push(ArcComputeCommand::SetBindGroup {
index,
num_dynamic_offsets: offsets.len(),
@ -1016,7 +989,6 @@ impl Global {
let scope = PassErrorScope::SetPipelineCompute(pipeline_id);
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
if redundant {
// Do redundant early-out **after** checking whether the pass is ended or not.
@ -1031,10 +1003,6 @@ impl Global {
.map_err(|_| ComputePassErrorInner::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?;
if pipeline.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands.push(ArcComputeCommand::SetPipeline(pipeline));
Ok(())
@ -1108,20 +1076,14 @@ impl Global {
indirect: true,
pipeline: pass.current_pipeline.last_state,
};
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
let buffer = hub
.buffers
.read()
.get_owned(buffer_id)
.map_err(|_| ComputePassErrorInner::InvalidBuffer(buffer_id))
.get(buffer_id)
.map_err(|_| ComputePassErrorInner::InvalidBufferId(buffer_id))
.map_pass_err(scope)?;
if buffer.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands
.push(ArcComputeCommand::<A>::DispatchIndirect { buffer, offset });
@ -1185,7 +1147,6 @@ impl Global {
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::WriteTimestamp;
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
@ -1196,10 +1157,6 @@ impl Global {
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
if query_set.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands.push(ArcComputeCommand::WriteTimestamp {
query_set,
query_index,
@ -1215,7 +1172,6 @@ impl Global {
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
@ -1226,10 +1182,6 @@ impl Global {
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
if query_set.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands
.push(ArcComputeCommand::BeginPipelineStatisticsQuery {
query_set,

Просмотреть файл

@ -72,7 +72,7 @@ pub enum ComputeCommand {
impl ComputeCommand {
/// Resolves all ids in a list of commands into the corresponding resource Arc.
///
//
// TODO: Once resolving is done on-the-fly during recording, this function should be only needed with the replay feature:
// #[cfg(feature = "replay")]
pub fn resolve_compute_command_ids<A: HalApi>(
@ -98,7 +98,7 @@ impl ComputeCommand {
bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| {
ComputePassError {
scope: PassErrorScope::SetBindGroup(bind_group_id),
inner: ComputePassErrorInner::InvalidBindGroup(index),
inner: ComputePassErrorInner::InvalidBindGroupId(bind_group_id),
}
})?,
},
@ -132,7 +132,7 @@ impl ComputeCommand {
indirect: true,
pipeline: None, // TODO: not used right now, but once we do the resolve during recording we can use this again.
},
inner: ComputePassErrorInner::InvalidBuffer(buffer_id),
inner: ComputePassErrorInner::InvalidBufferId(buffer_id),
}
})?,
offset,

261
third_party/rust/wgpu-core/src/command/draw.rs поставляемый
Просмотреть файл

@ -1,23 +1,14 @@
/*! Draw structures - shared between render passes and bundles.
!*/
use crate::{
binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError},
binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError},
error::ErrorFormatter,
hal_api::HalApi,
id,
pipeline::RenderPipeline,
resource::{Buffer, QuerySet},
track::UsageConflict,
validation::{MissingBufferUsageError, MissingTextureUsageError},
resource::{DestroyedResourceError, MissingBufferUsageError, MissingTextureUsageError},
track::ResourceUsageCompatibilityError,
};
use wgt::{BufferAddress, BufferSize, Color, VertexStepMode};
use wgt::VertexStepMode;
use std::{num::NonZeroU32, sync::Arc};
use thiserror::Error;
use super::RenderBundle;
/// Error validating a draw call.
#[derive(Clone, Debug, Error, Eq, PartialEq)]
#[non_exhaustive]
@ -69,8 +60,10 @@ pub enum DrawError {
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum RenderCommandError {
#[error("Bind group {0:?} is invalid")]
InvalidBindGroup(id::BindGroupId),
#[error("BufferId {0:?} is invalid")]
InvalidBufferId(id::BufferId),
#[error("BindGroupId {0:?} is invalid")]
InvalidBindGroupId(id::BindGroupId),
#[error("Render bundle {0:?} is invalid")]
InvalidRenderBundle(id::RenderBundleId),
#[error("Bind group index {index} is greater than the device's requested `max_bind_group` limit {max}")]
@ -90,9 +83,9 @@ pub enum RenderCommandError {
#[error("Pipeline writes to depth/stencil, while the pass has read-only depth/stencil")]
IncompatiblePipelineRods,
#[error(transparent)]
UsageConflict(#[from] UsageConflict),
#[error("Buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
ResourceUsageCompatibility(#[from] ResourceUsageCompatibilityError),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error(transparent)]
MissingBufferUsage(#[from] MissingBufferUsageError),
#[error(transparent)]
@ -112,19 +105,12 @@ impl crate::error::PrettyError for RenderCommandError {
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
fmt.error(self);
match *self {
Self::InvalidBindGroup(id) => {
Self::InvalidBindGroupId(id) => {
fmt.bind_group_label(&id);
}
Self::InvalidPipeline(id) => {
fmt.render_pipeline_label(&id);
}
Self::UsageConflict(UsageConflict::TextureInvalid { id }) => {
fmt.texture_label(&id);
}
Self::UsageConflict(UsageConflict::BufferInvalid { id })
| Self::DestroyedBuffer(id) => {
fmt.buffer_label(&id);
}
_ => {}
};
}
@ -138,226 +124,3 @@ pub struct Rect<T> {
pub w: T,
pub h: T,
}
#[doc(hidden)]
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum RenderCommand {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group_id: id::BindGroupId,
},
SetPipeline(id::RenderPipelineId),
SetIndexBuffer {
buffer_id: id::BufferId,
index_format: wgt::IndexFormat,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetVertexBuffer {
slot: u32,
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetBlendConstant(Color),
SetStencilReference(u32),
SetViewport {
rect: Rect<f32>,
//TODO: use half-float to reduce the size?
depth_min: f32,
depth_max: f32,
},
SetScissor(Rect<u32>),
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
///
/// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
/// of the restrictions these commands must satisfy.
SetPushConstant {
/// Which stages we are setting push constant values for.
stages: wgt::ShaderStages,
/// The byte offset within the push constant storage to write to. This
/// must be a multiple of four.
offset: u32,
/// The number of bytes to write. This must be a multiple of four.
size_bytes: u32,
/// Index in [`BasePass::push_constant_data`] of the start of the data
/// to be written.
///
/// Note: this is not a byte offset like `offset`. Rather, it is the
/// index of the first `u32` element in `push_constant_data` to read.
///
/// `None` means zeros should be written to the destination range, and
/// there is no corresponding data in `push_constant_data`. This is used
/// by render bundles, which explicitly clear out any state that
/// post-bundle code might see.
values_offset: Option<u32>,
},
Draw {
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
},
DrawIndexed {
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
},
MultiDrawIndirect {
buffer_id: id::BufferId,
offset: BufferAddress,
/// Count of `None` represents a non-multi call.
count: Option<NonZeroU32>,
indexed: bool,
},
MultiDrawIndirectCount {
buffer_id: id::BufferId,
offset: BufferAddress,
count_buffer_id: id::BufferId,
count_buffer_offset: BufferAddress,
max_count: u32,
indexed: bool,
},
PushDebugGroup {
color: u32,
len: usize,
},
PopDebugGroup,
InsertDebugMarker {
color: u32,
len: usize,
},
WriteTimestamp {
query_set_id: id::QuerySetId,
query_index: u32,
},
BeginOcclusionQuery {
query_index: u32,
},
EndOcclusionQuery,
BeginPipelineStatisticsQuery {
query_set_id: id::QuerySetId,
query_index: u32,
},
EndPipelineStatisticsQuery,
ExecuteBundle(id::RenderBundleId),
}
/// Equivalent to `RenderCommand` with the Ids resolved into resource Arcs.
#[doc(hidden)]
#[derive(Clone, Debug)]
pub enum ArcRenderCommand<A: HalApi> {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group: Arc<BindGroup<A>>,
},
SetPipeline(Arc<RenderPipeline<A>>),
SetIndexBuffer {
buffer: Arc<Buffer<A>>,
index_format: wgt::IndexFormat,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetVertexBuffer {
slot: u32,
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetBlendConstant(Color),
SetStencilReference(u32),
SetViewport {
rect: Rect<f32>,
depth_min: f32,
depth_max: f32,
},
SetScissor(Rect<u32>),
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
///
/// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
/// of the restrictions these commands must satisfy.
SetPushConstant {
/// Which stages we are setting push constant values for.
stages: wgt::ShaderStages,
/// The byte offset within the push constant storage to write to. This
/// must be a multiple of four.
offset: u32,
/// The number of bytes to write. This must be a multiple of four.
size_bytes: u32,
/// Index in [`BasePass::push_constant_data`] of the start of the data
/// to be written.
///
/// Note: this is not a byte offset like `offset`. Rather, it is the
/// index of the first `u32` element in `push_constant_data` to read.
///
/// `None` means zeros should be written to the destination range, and
/// there is no corresponding data in `push_constant_data`. This is used
/// by render bundles, which explicitly clear out any state that
/// post-bundle code might see.
values_offset: Option<u32>,
},
Draw {
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
},
DrawIndexed {
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
},
MultiDrawIndirect {
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
/// Count of `None` represents a non-multi call.
count: Option<NonZeroU32>,
indexed: bool,
},
MultiDrawIndirectCount {
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
count_buffer: Arc<Buffer<A>>,
count_buffer_offset: BufferAddress,
max_count: u32,
indexed: bool,
},
PushDebugGroup {
color: u32,
len: usize,
},
PopDebugGroup,
InsertDebugMarker {
color: u32,
len: usize,
},
WriteTimestamp {
query_set: Arc<QuerySet<A>>,
query_index: u32,
},
BeginOcclusionQuery {
query_index: u32,
},
EndOcclusionQuery,
BeginPipelineStatisticsQuery {
query_set: Arc<QuerySet<A>>,
query_index: u32,
},
EndPipelineStatisticsQuery,
ExecuteBundle(Arc<RenderBundle<A>>),
}

Просмотреть файл

@ -6,15 +6,13 @@ use crate::{
device::Device,
hal_api::HalApi,
init_tracker::*,
resource::{Resource, Texture},
resource::{DestroyedResourceError, Resource, Texture},
snatch::SnatchGuard,
track::{TextureTracker, Tracker},
FastHashMap,
};
use super::{
clear::clear_texture, BakedCommands, ClearError, DestroyedBufferError, DestroyedTextureError,
};
use super::{clear::clear_texture, BakedCommands, ClearError};
/// Surface that was discarded by `StoreOp::Discard` of a preceding renderpass.
/// Any read access to this surface needs to be preceded by a texture initialization.
@ -85,7 +83,7 @@ impl<A: HalApi> CommandBufferTextureMemoryActions<A> {
// self.discards is empty!)
let init_actions = &mut self.init_actions;
self.discards.retain(|discarded_surface| {
if discarded_surface.texture.as_info().id() == action.texture.as_info().id()
if discarded_surface.texture.is_equal(&action.texture)
&& action.range.layer_range.contains(&discarded_surface.layer)
&& action
.range
@ -171,7 +169,7 @@ impl<A: HalApi> BakedCommands<A> {
&mut self,
device_tracker: &mut Tracker<A>,
snatch_guard: &SnatchGuard<'_>,
) -> Result<(), DestroyedBufferError> {
) -> Result<(), DestroyedResourceError> {
profiling::scope!("initialize_buffer_memory");
// Gather init ranges for each buffer so we can collapse them.
@ -193,7 +191,9 @@ impl<A: HalApi> BakedCommands<A> {
match buffer_use.kind {
MemoryInitKind::ImplicitlyInitialized => {}
MemoryInitKind::NeedsInitializedMemory => {
match uninitialized_ranges_per_buffer.entry(buffer_use.buffer.as_info().id()) {
match uninitialized_ranges_per_buffer
.entry(buffer_use.buffer.as_info().tracker_index())
{
Entry::Vacant(e) => {
e.insert((
buffer_use.buffer.clone(),
@ -208,7 +208,7 @@ impl<A: HalApi> BakedCommands<A> {
}
}
for (buffer_id, (buffer, mut ranges)) in uninitialized_ranges_per_buffer {
for (buffer, mut ranges) in uninitialized_ranges_per_buffer.into_values() {
// Collapse touching ranges.
ranges.sort_by_key(|r| r.start);
for i in (1..ranges.len()).rev() {
@ -227,14 +227,9 @@ impl<A: HalApi> BakedCommands<A> {
// must already know about it.
let transition = device_tracker
.buffers
.set_single(&buffer, hal::BufferUses::COPY_DST)
.unwrap()
.1;
.set_single(&buffer, hal::BufferUses::COPY_DST);
let raw_buf = buffer
.raw
.get(snatch_guard)
.ok_or(DestroyedBufferError(buffer_id))?;
let raw_buf = buffer.try_raw(snatch_guard)?;
unsafe {
self.encoder.transition_buffers(
@ -277,7 +272,7 @@ impl<A: HalApi> BakedCommands<A> {
device_tracker: &mut Tracker<A>,
device: &Device<A>,
snatch_guard: &SnatchGuard<'_>,
) -> Result<(), DestroyedTextureError> {
) -> Result<(), DestroyedResourceError> {
profiling::scope!("initialize_texture_memory");
let mut ranges: Vec<TextureInitRange> = Vec::new();
@ -324,8 +319,8 @@ impl<A: HalApi> BakedCommands<A> {
// A Texture can be destroyed between the command recording
// and now, this is out of our control so we have to handle
// it gracefully.
if let Err(ClearError::InvalidTexture(id)) = clear_result {
return Err(DestroyedTextureError(id));
if let Err(ClearError::DestroyedResource(e)) = clear_result {
return Err(e);
}
// Other errors are unexpected.

86
third_party/rust/wgpu-core/src/command/mod.rs поставляемый
Просмотреть файл

@ -9,6 +9,7 @@ mod dyn_compute_pass;
mod memory_init;
mod query;
mod render;
mod render_command;
mod transfer;
use std::sync::Arc;
@ -16,7 +17,8 @@ use std::sync::Arc;
pub(crate) use self::clear::clear_texture;
pub use self::{
bundle::*, clear::ClearError, compute::*, compute_command::ComputeCommand, draw::*,
dyn_compute_pass::DynComputePass, query::*, render::*, transfer::*,
dyn_compute_pass::DynComputePass, query::*, render::*, render_command::RenderCommand,
transfer::*,
};
pub(crate) use allocator::CommandAllocator;
@ -29,8 +31,9 @@ use crate::lock::{rank, Mutex};
use crate::snatch::SnatchGuard;
use crate::init_tracker::BufferInitTrackerAction;
use crate::resource::{Resource, ResourceInfo, ResourceType};
use crate::resource::{ParentDevice, Resource, ResourceInfo, ResourceType};
use crate::track::{Tracker, UsageScope};
use crate::LabelHelpers;
use crate::{api_log, global::Global, hal_api::HalApi, id, resource_log, Label};
use hal::CommandEncoder as _;
@ -140,7 +143,7 @@ pub(crate) struct CommandEncoder<A: HalApi> {
/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
is_open: bool,
label: Option<String>,
hal_label: Option<String>,
}
//TODO: handle errors better
@ -216,8 +219,8 @@ impl<A: HalApi> CommandEncoder<A> {
pub(crate) fn open(&mut self) -> Result<&mut A::CommandEncoder, DeviceError> {
if !self.is_open {
self.is_open = true;
let label = self.label.as_deref();
unsafe { self.raw.begin_encoding(label)? };
let hal_label = self.hal_label.as_deref();
unsafe { self.raw.begin_encoding(hal_label)? };
}
Ok(&mut self.raw)
@ -227,9 +230,9 @@ impl<A: HalApi> CommandEncoder<A> {
/// its own label.
///
/// The underlying hal encoder is put in the "recording" state.
fn open_pass(&mut self, label: Option<&str>) -> Result<(), DeviceError> {
fn open_pass(&mut self, hal_label: Option<&str>) -> Result<(), DeviceError> {
self.is_open = true;
unsafe { self.raw.begin_encoding(label)? };
unsafe { self.raw.begin_encoding(hal_label)? };
Ok(())
}
@ -243,9 +246,6 @@ pub(crate) struct BakedCommands<A: HalApi> {
texture_memory_actions: CommandBufferTextureMemoryActions<A>,
}
pub(crate) struct DestroyedBufferError(pub id::BufferId);
pub(crate) struct DestroyedTextureError(pub id::TextureId);
/// The mutable state of a [`CommandBuffer`].
pub struct CommandBufferMutable<A: HalApi> {
/// The [`wgpu_hal::Api::CommandBuffer`]s we've built so far, and the encoder
@ -320,10 +320,10 @@ pub struct CommandBuffer<A: HalApi> {
impl<A: HalApi> Drop for CommandBuffer<A> {
fn drop(&mut self) {
resource_log!("Drop {}", self.error_ident());
if self.data.lock().is_none() {
return;
}
resource_log!("resource::CommandBuffer::drop {:?}", self.info.label());
let mut baked = self.extract_baked_commands();
unsafe {
baked.encoder.reset_all(baked.list.into_iter());
@ -340,13 +340,13 @@ impl<A: HalApi> CommandBuffer<A> {
encoder: A::CommandEncoder,
device: &Arc<Device<A>>,
#[cfg(feature = "trace")] enable_tracing: bool,
label: Option<String>,
label: &Label,
) -> Self {
CommandBuffer {
device: device.clone(),
limits: device.limits.clone(),
support_clear_texture: device.features.contains(wgt::Features::CLEAR_TEXTURE),
info: ResourceInfo::new(label.as_deref().unwrap_or("<CommandBuffer>"), None),
info: ResourceInfo::new(label, None),
data: Mutex::new(
rank::COMMAND_BUFFER_DATA,
Some(CommandBufferMutable {
@ -354,7 +354,7 @@ impl<A: HalApi> CommandBuffer<A> {
raw: encoder,
is_open: false,
list: Vec::new(),
label,
hal_label: label.to_hal(device.instance_flags).map(str::to_owned),
},
status: CommandEncoderStatus::Recording,
trackers: Tracker::new(),
@ -506,10 +506,7 @@ impl<A: HalApi> CommandBuffer<A> {
}
pub(crate) fn extract_baked_commands(&mut self) -> BakedCommands<A> {
log::trace!(
"Extracting BakedCommands from CommandBuffer {:?}",
self.info.label()
);
log::trace!("Extracting BakedCommands from {}", self.error_ident());
let data = self.data.lock().take().unwrap();
BakedCommands {
encoder: data.encoder.raw,
@ -541,13 +538,10 @@ impl<A: HalApi> Resource for CommandBuffer<A> {
}
}
#[derive(Copy, Clone, Debug)]
pub struct BasePassRef<'a, C> {
pub label: Option<&'a str>,
pub commands: &'a [C],
pub dynamic_offsets: &'a [wgt::DynamicOffset],
pub string_data: &'a [u8],
pub push_constant_data: &'a [u32],
impl<A: HalApi> ParentDevice<A> for CommandBuffer<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
/// A stream of commands for a render pass or compute pass.
@ -562,7 +556,7 @@ pub struct BasePassRef<'a, C> {
/// [`SetBindGroup`]: RenderCommand::SetBindGroup
/// [`InsertDebugMarker`]: RenderCommand::InsertDebugMarker
#[doc(hidden)]
#[derive(Debug)]
#[derive(Debug, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct BasePass<C> {
pub label: Option<String>,
@ -599,27 +593,6 @@ impl<C: Clone> BasePass<C> {
push_constant_data: Vec::new(),
}
}
#[cfg(feature = "trace")]
fn from_ref(base: BasePassRef<C>) -> Self {
Self {
label: base.label.map(str::to_string),
commands: base.commands.to_vec(),
dynamic_offsets: base.dynamic_offsets.to_vec(),
string_data: base.string_data.to_vec(),
push_constant_data: base.push_constant_data.to_vec(),
}
}
pub fn as_ref(&self) -> BasePassRef<C> {
BasePassRef {
label: self.label.as_deref(),
commands: &self.commands,
dynamic_offsets: &self.dynamic_offsets,
string_data: &self.string_data,
push_constant_data: &self.push_constant_data,
}
}
}
#[derive(Clone, Debug, Error)]
@ -633,11 +606,8 @@ pub enum CommandEncoderError {
Device(#[from] DeviceError),
#[error("Command encoder is locked by a previously created render/compute pass. Before recording any new commands, the pass must be ended.")]
Locked,
#[error("QuerySet provided for pass timestamp writes is invalid.")]
InvalidTimestampWritesQuerySetId,
#[error("QuerySet provided for pass timestamp writes that was created by a different device.")]
WrongDeviceForTimestampWritesQuerySet,
}
impl Global {
@ -879,6 +849,14 @@ trait MapPassErr<T, O> {
fn map_pass_err(self, scope: PassErrorScope) -> Result<T, O>;
}
#[derive(Clone, Copy, Debug)]
pub enum DrawKind {
Draw,
DrawIndirect,
MultiDrawIndirect,
MultiDrawIndirectCount,
}
#[derive(Clone, Copy, Debug, Error)]
pub enum PassErrorScope {
#[error("In a bundle parameter")]
@ -902,14 +880,18 @@ pub enum PassErrorScope {
SetVertexBuffer(id::BufferId),
#[error("In a set_index_buffer command")]
SetIndexBuffer(id::BufferId),
#[error("In a set_blend_constant command")]
SetBlendConstant,
#[error("In a set_stencil_reference command")]
SetStencilReference,
#[error("In a set_viewport command")]
SetViewport,
#[error("In a set_scissor_rect command")]
SetScissorRect,
#[error("In a draw command, indexed:{indexed} indirect:{indirect}")]
#[error("In a draw command, kind: {kind:?}")]
Draw {
kind: DrawKind,
indexed: bool,
indirect: bool,
pipeline: Option<id::RenderPipelineId>,
},
#[error("While resetting queries after the renderpass was ran")]

Просмотреть файл

@ -7,11 +7,11 @@ use crate::{
device::{DeviceError, MissingFeatures},
global::Global,
hal_api::HalApi,
id::{self, Id},
id,
init_tracker::MemoryInitKind,
resource::{QuerySet, Resource},
storage::Storage,
Epoch, FastHashMap, Index,
resource::{DestroyedResourceError, ParentDevice, QuerySet},
track::TrackerIndex,
FastHashMap,
};
use std::{iter, marker::PhantomData, sync::Arc};
use thiserror::Error;
@ -19,7 +19,7 @@ use wgt::BufferAddress;
#[derive(Debug)]
pub(crate) struct QueryResetMap<A: HalApi> {
map: FastHashMap<Index, (Vec<bool>, Epoch)>,
map: FastHashMap<TrackerIndex, (Vec<bool>, Arc<QuerySet<A>>)>,
_phantom: PhantomData<A>,
}
impl<A: HalApi> QueryResetMap<A> {
@ -30,31 +30,22 @@ impl<A: HalApi> QueryResetMap<A> {
}
}
pub fn use_query_set(
&mut self,
id: id::QuerySetId,
query_set: &QuerySet<A>,
query: u32,
) -> bool {
let (index, epoch, _) = id.unzip();
pub fn use_query_set(&mut self, query_set: &Arc<QuerySet<A>>, query: u32) -> bool {
let vec_pair = self
.map
.entry(index)
.or_insert_with(|| (vec![false; query_set.desc.count as usize], epoch));
.entry(query_set.info.tracker_index())
.or_insert_with(|| {
(
vec![false; query_set.desc.count as usize],
query_set.clone(),
)
});
std::mem::replace(&mut vec_pair.0[query as usize], true)
}
pub fn reset_queries(
&mut self,
raw_encoder: &mut A::CommandEncoder,
query_set_storage: &Storage<QuerySet<A>>,
backend: wgt::Backend,
) -> Result<(), id::QuerySetId> {
for (query_set_id, (state, epoch)) in self.map.drain() {
let id = Id::zip(query_set_id, epoch, backend);
let query_set = query_set_storage.get(id).map_err(|_| id)?;
pub fn reset_queries(&mut self, raw_encoder: &mut A::CommandEncoder) {
for (_, (state, query_set)) in self.map.drain() {
debug_assert_eq!(state.len(), query_set.desc.count as usize);
// Need to find all "runs" of values which need resets. If the state vector is:
@ -79,8 +70,6 @@ impl<A: HalApi> QueryResetMap<A> {
}
}
}
Ok(())
}
}
@ -114,8 +103,10 @@ pub enum QueryError {
Use(#[from] QueryUseError),
#[error("Error encountered while trying to resolve a query")]
Resolve(#[from] ResolveError),
#[error("Buffer {0:?} is invalid or destroyed")]
InvalidBuffer(id::BufferId),
#[error("BufferId {0:?} is invalid")]
InvalidBufferId(id::BufferId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error("QuerySet {0:?} is invalid or destroyed")]
InvalidQuerySet(id::QuerySetId),
}
@ -123,11 +114,8 @@ pub enum QueryError {
impl crate::error::PrettyError for QueryError {
fn fmt_pretty(&self, fmt: &mut crate::error::ErrorFormatter) {
fmt.error(self);
match *self {
Self::InvalidBuffer(id) => fmt.buffer_label(&id),
Self::InvalidQuerySet(id) => fmt.query_set_label(&id),
_ => {}
if let Self::InvalidQuerySet(id) = *self {
fmt.query_set_label(&id)
}
}
}
@ -184,7 +172,7 @@ pub enum ResolveError {
impl<A: HalApi> QuerySet<A> {
fn validate_query(
&self,
self: &Arc<Self>,
query_type: SimplifiedQueryType,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
@ -192,7 +180,7 @@ impl<A: HalApi> QuerySet<A> {
// We need to defer our resets because we are in a renderpass,
// add the usage to the reset map.
if let Some(reset) = reset_state {
let used = reset.use_query_set(self.info.id(), self, query_index);
let used = reset.use_query_set(self, query_index);
if used {
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index });
}
@ -217,7 +205,7 @@ impl<A: HalApi> QuerySet<A> {
}
pub(super) fn validate_and_write_timestamp(
&self,
self: &Arc<Self>,
raw_encoder: &mut A::CommandEncoder,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
@ -355,10 +343,11 @@ impl Global {
let raw_encoder = encoder.open()?;
let query_set_guard = hub.query_sets.read();
let query_set = tracker
.query_sets
.add_single(&*query_set_guard, query_set_id)
.ok_or(QueryError::InvalidQuerySet(query_set_id))?;
let query_set = query_set_guard
.get(query_set_id)
.map_err(|_| QueryError::InvalidQuerySet(query_set_id))?;
tracker.query_sets.add_single(query_set);
query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?;
@ -399,31 +388,26 @@ impl Global {
if destination_offset % wgt::QUERY_RESOLVE_BUFFER_ALIGNMENT != 0 {
return Err(QueryError::Resolve(ResolveError::BufferOffsetAlignment));
}
let query_set_guard = hub.query_sets.read();
let query_set = tracker
.query_sets
.add_single(&*query_set_guard, query_set_id)
.ok_or(QueryError::InvalidQuerySet(query_set_id))?;
let query_set = query_set_guard
.get(query_set_id)
.map_err(|_| QueryError::InvalidQuerySet(query_set_id))?;
if query_set.device.as_info().id() != cmd_buf.device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
tracker.query_sets.add_single(query_set);
let (dst_buffer, dst_pending) = {
let buffer_guard = hub.buffers.read();
let dst_buffer = buffer_guard
.get(destination)
.map_err(|_| QueryError::InvalidBuffer(destination))?;
query_set.same_device_as(cmd_buf.as_ref())?;
if dst_buffer.device.as_info().id() != cmd_buf.device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
let dst_buffer = hub
.buffers
.get(destination)
.map_err(|_| QueryError::InvalidBufferId(destination))?;
tracker
.buffers
.set_single(dst_buffer, hal::BufferUses::COPY_DST)
.ok_or(QueryError::InvalidBuffer(destination))?
};
dst_buffer.same_device_as(cmd_buf.as_ref())?;
let dst_pending = tracker
.buffers
.set_single(&dst_buffer, hal::BufferUses::COPY_DST);
let snatch_guard = dst_buffer.device.snatchable_lock.read();
@ -473,9 +457,7 @@ impl Global {
MemoryInitKind::ImplicitlyInitialized,
));
let raw_dst_buffer = dst_buffer
.raw(&snatch_guard)
.ok_or(QueryError::InvalidBuffer(destination))?;
let raw_dst_buffer = dst_buffer.try_raw(&snatch_guard)?;
unsafe {
raw_encoder.transition_buffers(dst_barrier.into_iter());

1026
third_party/rust/wgpu-core/src/command/render.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

668
third_party/rust/wgpu-core/src/command/render_command.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,668 @@
use crate::{
binding_model::BindGroup,
hal_api::HalApi,
id,
pipeline::RenderPipeline,
resource::{Buffer, QuerySet},
};
use wgt::{BufferAddress, BufferSize, Color};
use std::{num::NonZeroU32, sync::Arc};
use super::{
DrawKind, PassErrorScope, Rect, RenderBundle, RenderCommandError, RenderPassError,
RenderPassErrorInner,
};
#[doc(hidden)]
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum RenderCommand {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group_id: id::BindGroupId,
},
SetPipeline(id::RenderPipelineId),
SetIndexBuffer {
buffer_id: id::BufferId,
index_format: wgt::IndexFormat,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetVertexBuffer {
slot: u32,
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetBlendConstant(Color),
SetStencilReference(u32),
SetViewport {
rect: Rect<f32>,
//TODO: use half-float to reduce the size?
depth_min: f32,
depth_max: f32,
},
SetScissor(Rect<u32>),
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
///
/// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
/// of the restrictions these commands must satisfy.
SetPushConstant {
/// Which stages we are setting push constant values for.
stages: wgt::ShaderStages,
/// The byte offset within the push constant storage to write to. This
/// must be a multiple of four.
offset: u32,
/// The number of bytes to write. This must be a multiple of four.
size_bytes: u32,
/// Index in [`BasePass::push_constant_data`] of the start of the data
/// to be written.
///
/// Note: this is not a byte offset like `offset`. Rather, it is the
/// index of the first `u32` element in `push_constant_data` to read.
///
/// `None` means zeros should be written to the destination range, and
/// there is no corresponding data in `push_constant_data`. This is used
/// by render bundles, which explicitly clear out any state that
/// post-bundle code might see.
values_offset: Option<u32>,
},
Draw {
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
},
DrawIndexed {
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
},
MultiDrawIndirect {
buffer_id: id::BufferId,
offset: BufferAddress,
/// Count of `None` represents a non-multi call.
count: Option<NonZeroU32>,
indexed: bool,
},
MultiDrawIndirectCount {
buffer_id: id::BufferId,
offset: BufferAddress,
count_buffer_id: id::BufferId,
count_buffer_offset: BufferAddress,
max_count: u32,
indexed: bool,
},
PushDebugGroup {
color: u32,
len: usize,
},
PopDebugGroup,
InsertDebugMarker {
color: u32,
len: usize,
},
WriteTimestamp {
query_set_id: id::QuerySetId,
query_index: u32,
},
BeginOcclusionQuery {
query_index: u32,
},
EndOcclusionQuery,
BeginPipelineStatisticsQuery {
query_set_id: id::QuerySetId,
query_index: u32,
},
EndPipelineStatisticsQuery,
ExecuteBundle(id::RenderBundleId),
}
impl RenderCommand {
/// Resolves all ids in a list of commands into the corresponding resource Arc.
//
// TODO: Once resolving is done on-the-fly during recording, this function should be only needed with the replay feature:
// #[cfg(feature = "replay")]
pub fn resolve_render_command_ids<A: HalApi>(
hub: &crate::hub::Hub<A>,
commands: &[RenderCommand],
) -> Result<Vec<ArcRenderCommand<A>>, RenderPassError> {
let buffers_guard = hub.buffers.read();
let bind_group_guard = hub.bind_groups.read();
let query_set_guard = hub.query_sets.read();
let pipelines_guard = hub.render_pipelines.read();
let resolved_commands: Vec<ArcRenderCommand<A>> = commands
.iter()
.map(|c| -> Result<ArcRenderCommand<A>, RenderPassError> {
Ok(match *c {
RenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group_id,
} => ArcRenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group: bind_group_guard.get_owned(bind_group_id).map_err(|_| {
RenderPassError {
scope: PassErrorScope::SetBindGroup(bind_group_id),
inner: RenderPassErrorInner::InvalidBindGroup(index),
}
})?,
},
RenderCommand::SetPipeline(pipeline_id) => ArcRenderCommand::SetPipeline(
pipelines_guard
.get_owned(pipeline_id)
.map_err(|_| RenderPassError {
scope: PassErrorScope::SetPipelineRender(pipeline_id),
inner: RenderCommandError::InvalidPipeline(pipeline_id).into(),
})?,
),
RenderCommand::SetPushConstant {
offset,
size_bytes,
values_offset,
stages,
} => ArcRenderCommand::SetPushConstant {
offset,
size_bytes,
values_offset,
stages,
},
RenderCommand::PushDebugGroup { color, len } => {
ArcRenderCommand::PushDebugGroup { color, len }
}
RenderCommand::PopDebugGroup => ArcRenderCommand::PopDebugGroup,
RenderCommand::InsertDebugMarker { color, len } => {
ArcRenderCommand::InsertDebugMarker { color, len }
}
RenderCommand::WriteTimestamp {
query_set_id,
query_index,
} => ArcRenderCommand::WriteTimestamp {
query_set: query_set_guard.get_owned(query_set_id).map_err(|_| {
RenderPassError {
scope: PassErrorScope::WriteTimestamp,
inner: RenderPassErrorInner::InvalidQuerySet(query_set_id),
}
})?,
query_index,
},
RenderCommand::BeginPipelineStatisticsQuery {
query_set_id,
query_index,
} => ArcRenderCommand::BeginPipelineStatisticsQuery {
query_set: query_set_guard.get_owned(query_set_id).map_err(|_| {
RenderPassError {
scope: PassErrorScope::BeginPipelineStatisticsQuery,
inner: RenderPassErrorInner::InvalidQuerySet(query_set_id),
}
})?,
query_index,
},
RenderCommand::EndPipelineStatisticsQuery => {
ArcRenderCommand::EndPipelineStatisticsQuery
}
RenderCommand::SetIndexBuffer {
buffer_id,
index_format,
offset,
size,
} => ArcRenderCommand::SetIndexBuffer {
buffer: buffers_guard.get_owned(buffer_id).map_err(|_| {
RenderPassError {
scope: PassErrorScope::SetIndexBuffer(buffer_id),
inner: RenderCommandError::InvalidBufferId(buffer_id).into(),
}
})?,
index_format,
offset,
size,
},
RenderCommand::SetVertexBuffer {
slot,
buffer_id,
offset,
size,
} => ArcRenderCommand::SetVertexBuffer {
slot,
buffer: buffers_guard.get_owned(buffer_id).map_err(|_| {
RenderPassError {
scope: PassErrorScope::SetVertexBuffer(buffer_id),
inner: RenderCommandError::InvalidBufferId(buffer_id).into(),
}
})?,
offset,
size,
},
RenderCommand::SetBlendConstant(color) => {
ArcRenderCommand::SetBlendConstant(color)
}
RenderCommand::SetStencilReference(reference) => {
ArcRenderCommand::SetStencilReference(reference)
}
RenderCommand::SetViewport {
rect,
depth_min,
depth_max,
} => ArcRenderCommand::SetViewport {
rect,
depth_min,
depth_max,
},
RenderCommand::SetScissor(scissor) => ArcRenderCommand::SetScissor(scissor),
RenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => ArcRenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
},
RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
} => ArcRenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
},
RenderCommand::MultiDrawIndirect {
buffer_id,
offset,
count,
indexed,
} => ArcRenderCommand::MultiDrawIndirect {
buffer: buffers_guard.get_owned(buffer_id).map_err(|_| {
RenderPassError {
scope: PassErrorScope::Draw {
kind: if count.is_some() {
DrawKind::MultiDrawIndirect
} else {
DrawKind::DrawIndirect
},
indexed,
pipeline: None,
},
inner: RenderCommandError::InvalidBufferId(buffer_id).into(),
}
})?,
offset,
count,
indexed,
},
RenderCommand::MultiDrawIndirectCount {
buffer_id,
offset,
count_buffer_id,
count_buffer_offset,
max_count,
indexed,
} => {
let scope = PassErrorScope::Draw {
kind: DrawKind::MultiDrawIndirectCount,
indexed,
pipeline: None,
};
ArcRenderCommand::MultiDrawIndirectCount {
buffer: buffers_guard.get_owned(buffer_id).map_err(|_| {
RenderPassError {
scope,
inner: RenderCommandError::InvalidBufferId(buffer_id).into(),
}
})?,
offset,
count_buffer: buffers_guard.get_owned(count_buffer_id).map_err(
|_| RenderPassError {
scope,
inner: RenderCommandError::InvalidBufferId(count_buffer_id)
.into(),
},
)?,
count_buffer_offset,
max_count,
indexed,
}
}
RenderCommand::BeginOcclusionQuery { query_index } => {
ArcRenderCommand::BeginOcclusionQuery { query_index }
}
RenderCommand::EndOcclusionQuery => ArcRenderCommand::EndOcclusionQuery,
RenderCommand::ExecuteBundle(bundle) => ArcRenderCommand::ExecuteBundle(
hub.render_bundles.read().get_owned(bundle).map_err(|_| {
RenderPassError {
scope: PassErrorScope::ExecuteBundle,
inner: RenderCommandError::InvalidRenderBundle(bundle).into(),
}
})?,
),
})
})
.collect::<Result<Vec<_>, RenderPassError>>()?;
Ok(resolved_commands)
}
}
/// Equivalent to `RenderCommand` with the Ids resolved into resource Arcs.
#[doc(hidden)]
#[derive(Clone, Debug)]
pub enum ArcRenderCommand<A: HalApi> {
SetBindGroup {
index: u32,
num_dynamic_offsets: usize,
bind_group: Arc<BindGroup<A>>,
},
SetPipeline(Arc<RenderPipeline<A>>),
SetIndexBuffer {
buffer: Arc<Buffer<A>>,
index_format: wgt::IndexFormat,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetVertexBuffer {
slot: u32,
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
size: Option<BufferSize>,
},
SetBlendConstant(Color),
SetStencilReference(u32),
SetViewport {
rect: Rect<f32>,
depth_min: f32,
depth_max: f32,
},
SetScissor(Rect<u32>),
/// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
///
/// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
/// of the restrictions these commands must satisfy.
SetPushConstant {
/// Which stages we are setting push constant values for.
stages: wgt::ShaderStages,
/// The byte offset within the push constant storage to write to. This
/// must be a multiple of four.
offset: u32,
/// The number of bytes to write. This must be a multiple of four.
size_bytes: u32,
/// Index in [`BasePass::push_constant_data`] of the start of the data
/// to be written.
///
/// Note: this is not a byte offset like `offset`. Rather, it is the
/// index of the first `u32` element in `push_constant_data` to read.
///
/// `None` means zeros should be written to the destination range, and
/// there is no corresponding data in `push_constant_data`. This is used
/// by render bundles, which explicitly clear out any state that
/// post-bundle code might see.
values_offset: Option<u32>,
},
Draw {
vertex_count: u32,
instance_count: u32,
first_vertex: u32,
first_instance: u32,
},
DrawIndexed {
index_count: u32,
instance_count: u32,
first_index: u32,
base_vertex: i32,
first_instance: u32,
},
MultiDrawIndirect {
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
/// Count of `None` represents a non-multi call.
count: Option<NonZeroU32>,
indexed: bool,
},
MultiDrawIndirectCount {
buffer: Arc<Buffer<A>>,
offset: BufferAddress,
count_buffer: Arc<Buffer<A>>,
count_buffer_offset: BufferAddress,
max_count: u32,
indexed: bool,
},
PushDebugGroup {
color: u32,
len: usize,
},
PopDebugGroup,
InsertDebugMarker {
color: u32,
len: usize,
},
WriteTimestamp {
query_set: Arc<QuerySet<A>>,
query_index: u32,
},
BeginOcclusionQuery {
query_index: u32,
},
EndOcclusionQuery,
BeginPipelineStatisticsQuery {
query_set: Arc<QuerySet<A>>,
query_index: u32,
},
EndPipelineStatisticsQuery,
ExecuteBundle(Arc<RenderBundle<A>>),
}
#[cfg(feature = "trace")]
impl<A: HalApi> From<&ArcRenderCommand<A>> for RenderCommand {
fn from(value: &ArcRenderCommand<A>) -> Self {
use crate::resource::Resource as _;
match value {
ArcRenderCommand::SetBindGroup {
index,
num_dynamic_offsets,
bind_group,
} => RenderCommand::SetBindGroup {
index: *index,
num_dynamic_offsets: *num_dynamic_offsets,
bind_group_id: bind_group.as_info().id(),
},
ArcRenderCommand::SetPipeline(pipeline) => {
RenderCommand::SetPipeline(pipeline.as_info().id())
}
ArcRenderCommand::SetPushConstant {
offset,
size_bytes,
values_offset,
stages,
} => RenderCommand::SetPushConstant {
offset: *offset,
size_bytes: *size_bytes,
values_offset: *values_offset,
stages: *stages,
},
ArcRenderCommand::PushDebugGroup { color, len } => RenderCommand::PushDebugGroup {
color: *color,
len: *len,
},
ArcRenderCommand::PopDebugGroup => RenderCommand::PopDebugGroup,
ArcRenderCommand::InsertDebugMarker { color, len } => {
RenderCommand::InsertDebugMarker {
color: *color,
len: *len,
}
}
ArcRenderCommand::WriteTimestamp {
query_set,
query_index,
} => RenderCommand::WriteTimestamp {
query_set_id: query_set.as_info().id(),
query_index: *query_index,
},
ArcRenderCommand::BeginPipelineStatisticsQuery {
query_set,
query_index,
} => RenderCommand::BeginPipelineStatisticsQuery {
query_set_id: query_set.as_info().id(),
query_index: *query_index,
},
ArcRenderCommand::EndPipelineStatisticsQuery => {
RenderCommand::EndPipelineStatisticsQuery
}
ArcRenderCommand::SetIndexBuffer {
buffer,
index_format,
offset,
size,
} => RenderCommand::SetIndexBuffer {
buffer_id: buffer.as_info().id(),
index_format: *index_format,
offset: *offset,
size: *size,
},
ArcRenderCommand::SetVertexBuffer {
slot,
buffer,
offset,
size,
} => RenderCommand::SetVertexBuffer {
slot: *slot,
buffer_id: buffer.as_info().id(),
offset: *offset,
size: *size,
},
ArcRenderCommand::SetBlendConstant(color) => RenderCommand::SetBlendConstant(*color),
ArcRenderCommand::SetStencilReference(reference) => {
RenderCommand::SetStencilReference(*reference)
}
ArcRenderCommand::SetViewport {
rect,
depth_min,
depth_max,
} => RenderCommand::SetViewport {
rect: *rect,
depth_min: *depth_min,
depth_max: *depth_max,
},
ArcRenderCommand::SetScissor(scissor) => RenderCommand::SetScissor(*scissor),
ArcRenderCommand::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => RenderCommand::Draw {
vertex_count: *vertex_count,
instance_count: *instance_count,
first_vertex: *first_vertex,
first_instance: *first_instance,
},
ArcRenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
base_vertex,
first_instance,
} => RenderCommand::DrawIndexed {
index_count: *index_count,
instance_count: *instance_count,
first_index: *first_index,
base_vertex: *base_vertex,
first_instance: *first_instance,
},
ArcRenderCommand::MultiDrawIndirect {
buffer,
offset,
count,
indexed,
} => RenderCommand::MultiDrawIndirect {
buffer_id: buffer.as_info().id(),
offset: *offset,
count: *count,
indexed: *indexed,
},
ArcRenderCommand::MultiDrawIndirectCount {
buffer,
offset,
count_buffer,
count_buffer_offset,
max_count,
indexed,
} => RenderCommand::MultiDrawIndirectCount {
buffer_id: buffer.as_info().id(),
offset: *offset,
count_buffer_id: count_buffer.as_info().id(),
count_buffer_offset: *count_buffer_offset,
max_count: *max_count,
indexed: *indexed,
},
ArcRenderCommand::BeginOcclusionQuery { query_index } => {
RenderCommand::BeginOcclusionQuery {
query_index: *query_index,
}
}
ArcRenderCommand::EndOcclusionQuery => RenderCommand::EndOcclusionQuery,
ArcRenderCommand::ExecuteBundle(bundle) => {
RenderCommand::ExecuteBundle(bundle.as_info().id())
}
}
}
}

Просмотреть файл

@ -8,12 +8,12 @@ use crate::{
error::{ErrorFormatter, PrettyError},
global::Global,
hal_api::HalApi,
id::{BufferId, CommandEncoderId, DeviceId, TextureId},
id::{BufferId, CommandEncoderId, TextureId},
init_tracker::{
has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange,
TextureInitTrackerAction,
},
resource::{Resource, Texture, TextureErrorDimension},
resource::{DestroyedResourceError, ParentDevice, Texture, TextureErrorDimension},
snatch::SnatchGuard,
track::{TextureSelector, Tracker},
};
@ -41,12 +41,10 @@ pub enum CopySide {
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum TransferError {
#[error("Device {0:?} is invalid")]
InvalidDevice(DeviceId),
#[error("Buffer {0:?} is invalid or destroyed")]
InvalidBuffer(BufferId),
#[error("Texture {0:?} is invalid or destroyed")]
InvalidTexture(TextureId),
#[error("BufferId {0:?} is invalid")]
InvalidBufferId(BufferId),
#[error("TextureId {0:?} is invalid")]
InvalidTextureId(TextureId),
#[error("Source and destination cannot be the same buffer")]
SameSourceDestinationBuffer,
#[error("Source buffer/texture is missing the `COPY_SRC` usage flag")]
@ -145,33 +143,14 @@ pub enum TransferError {
impl PrettyError for TransferError {
fn fmt_pretty(&self, fmt: &mut ErrorFormatter) {
fmt.error(self);
match *self {
Self::InvalidBuffer(id) => {
fmt.buffer_label(&id);
if let Self::MissingCopyDstUsageFlag(buf_opt, tex_opt) = *self {
if let Some(buf) = buf_opt {
fmt.buffer_label_with_key(&buf, "destination");
}
Self::InvalidTexture(id) => {
fmt.texture_label(&id);
if let Some(tex) = tex_opt {
fmt.texture_label_with_key(&tex, "destination");
}
// Self::MissingCopySrcUsageFlag(buf_opt, tex_opt) => {
// if let Some(buf) = buf_opt {
// let name = crate::gfx_select!(buf => global.buffer_label(buf));
// ret.push_str(&format_label_line("source", &name));
// }
// if let Some(tex) = tex_opt {
// let name = crate::gfx_select!(tex => global.texture_label(tex));
// ret.push_str(&format_label_line("source", &name));
// }
// }
Self::MissingCopyDstUsageFlag(buf_opt, tex_opt) => {
if let Some(buf) = buf_opt {
fmt.buffer_label_with_key(&buf, "destination");
}
if let Some(tex) = tex_opt {
fmt.texture_label_with_key(&tex, "destination");
}
}
_ => {}
};
}
}
}
/// Error encountered while attempting to do a copy on a command encoder.
@ -182,6 +161,8 @@ pub enum CopyError {
Encoder(#[from] CommandEncoderError),
#[error("Copy error")]
Transfer(#[from] TransferError),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
}
impl From<DeviceError> for CopyError {
@ -579,9 +560,7 @@ impl Global {
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into());
}
device.check_is_valid()?;
#[cfg(feature = "trace")]
if let Some(ref mut list) = cmd_buf_data.commands {
@ -596,52 +575,38 @@ impl Global {
let snatch_guard = device.snatchable_lock.read();
let (src_buffer, src_pending) = {
let buffer_guard = hub.buffers.read();
let src_buffer = buffer_guard
.get(source)
.map_err(|_| TransferError::InvalidBuffer(source))?;
let src_buffer = hub
.buffers
.get(source)
.map_err(|_| TransferError::InvalidBufferId(source))?;
if src_buffer.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
src_buffer.same_device_as(cmd_buf.as_ref())?;
cmd_buf_data
.trackers
.buffers
.set_single(src_buffer, hal::BufferUses::COPY_SRC)
.ok_or(TransferError::InvalidBuffer(source))?
};
let src_raw = src_buffer
.raw
.get(&snatch_guard)
.ok_or(TransferError::InvalidBuffer(source))?;
let src_pending = cmd_buf_data
.trackers
.buffers
.set_single(&src_buffer, hal::BufferUses::COPY_SRC);
let src_raw = src_buffer.try_raw(&snatch_guard)?;
if !src_buffer.usage.contains(BufferUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
// expecting only a single barrier
let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard));
let (dst_buffer, dst_pending) = {
let buffer_guard = hub.buffers.read();
let dst_buffer = buffer_guard
.get(destination)
.map_err(|_| TransferError::InvalidBuffer(destination))?;
let dst_buffer = hub
.buffers
.get(destination)
.map_err(|_| TransferError::InvalidBufferId(destination))?;
if dst_buffer.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
dst_buffer.same_device_as(cmd_buf.as_ref())?;
cmd_buf_data
.trackers
.buffers
.set_single(dst_buffer, hal::BufferUses::COPY_DST)
.ok_or(TransferError::InvalidBuffer(destination))?
};
let dst_raw = dst_buffer
.raw
.get(&snatch_guard)
.ok_or(TransferError::InvalidBuffer(destination))?;
let dst_pending = cmd_buf_data
.trackers
.buffers
.set_single(&dst_buffer, hal::BufferUses::COPY_DST);
let dst_raw = dst_buffer.try_raw(&snatch_guard)?;
if !dst_buffer.usage.contains(BufferUsages::COPY_DST) {
return Err(TransferError::MissingCopyDstUsageFlag(Some(destination), None).into());
}
@ -750,9 +715,7 @@ impl Global {
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into());
}
device.check_is_valid()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
@ -779,11 +742,9 @@ impl Global {
let dst_texture = hub
.textures
.get(destination.texture)
.map_err(|_| TransferError::InvalidTexture(destination.texture))?;
.map_err(|_| TransferError::InvalidTextureId(destination.texture))?;
if dst_texture.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
dst_texture.same_device_as(cmd_buf.as_ref())?;
let (hal_copy_size, array_layer_count) = validate_texture_copy_range(
destination,
@ -810,37 +771,28 @@ impl Global {
&snatch_guard,
)?;
let (src_buffer, src_pending) = {
let buffer_guard = hub.buffers.read();
let src_buffer = buffer_guard
.get(source.buffer)
.map_err(|_| TransferError::InvalidBuffer(source.buffer))?;
let src_buffer = hub
.buffers
.get(source.buffer)
.map_err(|_| TransferError::InvalidBufferId(source.buffer))?;
if src_buffer.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
src_buffer.same_device_as(cmd_buf.as_ref())?;
tracker
.buffers
.set_single(src_buffer, hal::BufferUses::COPY_SRC)
.ok_or(TransferError::InvalidBuffer(source.buffer))?
};
let src_raw = src_buffer
.raw
.get(&snatch_guard)
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
let src_pending = tracker
.buffers
.set_single(&src_buffer, hal::BufferUses::COPY_SRC);
let src_raw = src_buffer.try_raw(&snatch_guard)?;
if !src_buffer.usage.contains(BufferUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
let src_barrier = src_pending.map(|pending| pending.into_hal(&src_buffer, &snatch_guard));
let dst_pending = tracker
.textures
.set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_raw = dst_texture
.raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_pending =
tracker
.textures
.set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST);
let dst_raw = dst_texture.try_raw(&snatch_guard)?;
if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
@ -921,9 +873,7 @@ impl Global {
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into());
}
device.check_is_valid()?;
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
@ -949,11 +899,9 @@ impl Global {
let src_texture = hub
.textures
.get(source.texture)
.map_err(|_| TransferError::InvalidTexture(source.texture))?;
.map_err(|_| TransferError::InvalidTextureId(source.texture))?;
if src_texture.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
src_texture.same_device_as(cmd_buf.as_ref())?;
let (hal_copy_size, array_layer_count) =
validate_texture_copy_range(source, &src_texture.desc, CopySide::Source, copy_size)?;
@ -976,13 +924,11 @@ impl Global {
&snatch_guard,
)?;
let src_pending = tracker
.textures
.set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC)
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_raw = src_texture
.raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_pending =
tracker
.textures
.set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC);
let src_raw = src_texture.try_raw(&snatch_guard)?;
if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
@ -1001,25 +947,18 @@ impl Global {
}
let src_barrier = src_pending.map(|pending| pending.into_hal(src_raw));
let (dst_buffer, dst_pending) = {
let buffer_guard = hub.buffers.read();
let dst_buffer = buffer_guard
.get(destination.buffer)
.map_err(|_| TransferError::InvalidBuffer(destination.buffer))?;
let dst_buffer = hub
.buffers
.get(destination.buffer)
.map_err(|_| TransferError::InvalidBufferId(destination.buffer))?;
if dst_buffer.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
dst_buffer.same_device_as(cmd_buf.as_ref())?;
tracker
.buffers
.set_single(dst_buffer, hal::BufferUses::COPY_DST)
.ok_or(TransferError::InvalidBuffer(destination.buffer))?
};
let dst_raw = dst_buffer
.raw
.get(&snatch_guard)
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
let dst_pending = tracker
.buffers
.set_single(&dst_buffer, hal::BufferUses::COPY_DST);
let dst_raw = dst_buffer.try_raw(&snatch_guard)?;
if !dst_buffer.usage.contains(BufferUsages::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(Some(destination.buffer), None).into(),
@ -1104,9 +1043,7 @@ impl Global {
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(TransferError::InvalidDevice(cmd_buf.device.as_info().id()).into());
}
device.check_is_valid()?;
let snatch_guard = device.snatchable_lock.read();
@ -1133,18 +1070,14 @@ impl Global {
let src_texture = hub
.textures
.get(source.texture)
.map_err(|_| TransferError::InvalidTexture(source.texture))?;
.map_err(|_| TransferError::InvalidTextureId(source.texture))?;
let dst_texture = hub
.textures
.get(destination.texture)
.map_err(|_| TransferError::InvalidTexture(source.texture))?;
.map_err(|_| TransferError::InvalidTextureId(source.texture))?;
if src_texture.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
if dst_texture.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
src_texture.same_device_as(cmd_buf.as_ref())?;
dst_texture.same_device_as(cmd_buf.as_ref())?;
// src and dst texture format must be copy-compatible
// https://gpuweb.github.io/gpuweb/#copy-compatible
@ -1203,14 +1136,12 @@ impl Global {
&snatch_guard,
)?;
let src_pending = cmd_buf_data
.trackers
.textures
.set_single(&src_texture, src_range, hal::TextureUses::COPY_SRC)
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_raw = src_texture
.raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_pending = cmd_buf_data.trackers.textures.set_single(
&src_texture,
src_range,
hal::TextureUses::COPY_SRC,
);
let src_raw = src_texture.try_raw(&snatch_guard)?;
if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
@ -1221,14 +1152,12 @@ impl Global {
.map(|pending| pending.into_hal(src_raw))
.collect();
let dst_pending = cmd_buf_data
.trackers
.textures
.set_single(&dst_texture, dst_range, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_raw = dst_texture
.raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_pending = cmd_buf_data.trackers.textures.set_single(
&dst_texture,
dst_range,
hal::TextureUses::COPY_DST,
);
let dst_raw = dst_texture.try_raw(&snatch_guard)?;
if !dst_texture.desc.usage.contains(TextureUsages::COPY_DST) {
return Err(
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),

Просмотреть файл

@ -15,9 +15,7 @@ use crate::{
pipeline, present,
resource::{
self, BufferAccessError, BufferAccessResult, BufferMapOperation, CreateBufferError,
Resource,
},
validation::check_buffer_usage,
Label, LabelHelpers as _,
};
@ -32,7 +30,7 @@ use std::{
sync::{atomic::Ordering, Arc},
};
use super::{ImplicitPipelineIds, InvalidDevice, UserClosures};
use super::{ImplicitPipelineIds, UserClosures};
impl Global {
pub fn adapter_is_surface_supported<A: HalApi>(
@ -102,13 +100,13 @@ impl Global {
pub fn device_features<A: HalApi>(
&self,
device_id: DeviceId,
) -> Result<wgt::Features, InvalidDevice> {
) -> Result<wgt::Features, DeviceError> {
let hub = A::hub(self);
let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?;
if !device.is_valid() {
return Err(InvalidDevice);
}
let device = hub
.devices
.get(device_id)
.map_err(|_| DeviceError::InvalidDeviceId)?;
Ok(device.features)
}
@ -116,13 +114,13 @@ impl Global {
pub fn device_limits<A: HalApi>(
&self,
device_id: DeviceId,
) -> Result<wgt::Limits, InvalidDevice> {
) -> Result<wgt::Limits, DeviceError> {
let hub = A::hub(self);
let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?;
if !device.is_valid() {
return Err(InvalidDevice);
}
let device = hub
.devices
.get(device_id)
.map_err(|_| DeviceError::InvalidDeviceId)?;
Ok(device.limits.clone())
}
@ -130,13 +128,13 @@ impl Global {
pub fn device_downlevel_properties<A: HalApi>(
&self,
device_id: DeviceId,
) -> Result<wgt::DownlevelCapabilities, InvalidDevice> {
) -> Result<wgt::DownlevelCapabilities, DeviceError> {
let hub = A::hub(self);
let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?;
if !device.is_valid() {
return Err(InvalidDevice);
}
let device = hub
.devices
.get(device_id)
.map_err(|_| DeviceError::InvalidDeviceId)?;
Ok(device.downlevel.clone())
}
@ -157,17 +155,9 @@ impl Global {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => {
break 'error DeviceError::Invalid.into();
break 'error DeviceError::InvalidDeviceId.into();
}
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
if desc.usage.is_empty() {
// Per spec, `usage` must not be zero.
break 'error CreateBufferError::InvalidUsage(desc.usage);
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -179,6 +169,11 @@ impl Global {
trace.add(trace::Action::CreateBuffer(fid.id(), desc));
}
if desc.usage.is_empty() {
// Per spec, `usage` must not be zero.
break 'error CreateBufferError::InvalidUsage(desc.usage);
}
let buffer = match device.create_buffer(desc, false) {
Ok(buffer) => buffer,
Err(e) => {
@ -260,7 +255,15 @@ impl Global {
};
let (id, resource) = fid.assign(Arc::new(buffer));
api_log!("Device::create_buffer({desc:?}) -> {id:?}");
api_log!(
"Device::create_buffer({:?}{}) -> {id:?}",
desc.label.as_deref().unwrap_or(""),
if desc.mapped_at_creation {
", mapped_at_creation"
} else {
""
}
);
device
.trackers
@ -358,7 +361,7 @@ impl Global {
hub.devices
.get(device_id)
.map_err(|_| DeviceError::Invalid)?
.map_err(|_| DeviceError::InvalidDeviceId)?
.wait_for_submit(last_submission)
}
@ -377,18 +380,12 @@ impl Global {
let device = hub
.devices
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
let snatch_guard = device.snatchable_lock.read();
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
.map_err(|_| DeviceError::InvalidDeviceId)?;
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
//assert!(buffer isn't used by the GPU);
.map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?;
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -401,9 +398,12 @@ impl Global {
});
}
let raw_buf = buffer
.raw(&snatch_guard)
.ok_or(BufferAccessError::Destroyed)?;
device.check_is_valid()?;
buffer.check_usage(wgt::BufferUsages::MAP_WRITE)?;
//assert!(buffer isn't used by the GPU);
let snatch_guard = device.snatchable_lock.read();
let raw_buf = buffer.try_raw(&snatch_guard)?;
unsafe {
let mapping = device
.raw()
@ -439,23 +439,19 @@ impl Global {
let device = hub
.devices
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
.map_err(|_| DeviceError::InvalidDeviceId)?;
device.check_is_valid()?;
let snatch_guard = device.snatchable_lock.read();
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_READ)?;
.map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?;
buffer.check_usage(wgt::BufferUsages::MAP_READ)?;
//assert!(buffer isn't used by the GPU);
let raw_buf = buffer
.raw(&snatch_guard)
.ok_or(BufferAccessError::Destroyed)?;
let raw_buf = buffer.try_raw(&snatch_guard)?;
unsafe {
let mapping = device
.raw()
@ -495,7 +491,15 @@ impl Global {
.get(buffer_id)
.map_err(|_| resource::DestroyError::Invalid)?;
let _ = buffer.unmap();
#[cfg(feature = "trace")]
if let Some(trace) = buffer.device.trace.lock().as_mut() {
trace.add(trace::Action::FreeBuffer(buffer_id));
}
let _ = buffer.unmap(
#[cfg(feature = "trace")]
buffer_id,
);
buffer.destroy()
}
@ -513,7 +517,15 @@ impl Global {
}
};
let _ = buffer.unmap();
#[cfg(feature = "trace")]
if let Some(t) = buffer.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyBuffer(buffer_id));
}
let _ = buffer.unmap(
#[cfg(feature = "trace")]
buffer_id,
);
let last_submit_index = buffer.info.submission_index();
@ -524,8 +536,7 @@ impl Global {
.lock()
.as_ref()
.unwrap()
.dst_buffers
.contains_key(&buffer_id)
.contains_buffer(&buffer)
{
device.lock_life().future_suspected_buffers.push(buffer);
} else {
@ -559,11 +570,9 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::CreateTexture(fid.id(), desc.clone()));
@ -613,11 +622,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
// NB: Any change done through the raw texture handle will not be
// recorded in the replay
@ -688,11 +694,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
// NB: Any change done through the raw buffer handle will not be
// recorded in the replay
@ -739,6 +742,11 @@ impl Global {
.get(texture_id)
.map_err(|_| resource::DestroyError::Invalid)?;
#[cfg(feature = "trace")]
if let Some(trace) = texture.device.trace.lock().as_mut() {
trace.add(trace::Action::FreeTexture(texture_id));
}
texture.destroy()
}
@ -749,6 +757,11 @@ impl Global {
let hub = A::hub(self);
if let Some(texture) = hub.textures.unregister(texture_id) {
#[cfg(feature = "trace")]
if let Some(t) = texture.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyTexture(texture_id));
}
let last_submit_index = texture.info.submission_index();
let device = &texture.device;
@ -758,8 +771,7 @@ impl Global {
.lock()
.as_ref()
.unwrap()
.dst_textures
.contains_key(&texture_id)
.contains_texture(&texture)
{
device
.lock_life()
@ -783,7 +795,6 @@ impl Global {
}
}
#[allow(unused_unsafe)]
pub fn texture_create_view<A: HalApi>(
&self,
texture_id: id::TextureId,
@ -799,15 +810,12 @@ impl Global {
let error = 'error: {
let texture = match hub.textures.get(texture_id) {
Ok(texture) => texture,
Err(_) => break 'error resource::CreateTextureViewError::InvalidTexture,
Err(_) => {
break 'error resource::CreateTextureViewError::InvalidTextureId(texture_id)
}
};
let device = &texture.device;
{
let snatch_guard = device.snatchable_lock.read();
if texture.is_destroyed(&snatch_guard) {
break 'error resource::CreateTextureViewError::InvalidTexture;
}
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::CreateTextureView {
@ -817,7 +825,14 @@ impl Global {
});
}
let view = match unsafe { device.create_texture_view(&texture, desc) } {
{
let snatch_guard = device.snatchable_lock.read();
if let Err(e) = texture.check_destroyed(&snatch_guard) {
break 'error e.into();
}
}
let view = match device.create_texture_view(&texture, desc) {
Ok(view) => view,
Err(e) => break 'error e,
};
@ -826,6 +841,10 @@ impl Global {
{
let mut views = texture.views.lock();
// Remove stale weak references
views.retain(|view| view.strong_count() > 0);
views.push(Arc::downgrade(&resource));
}
@ -854,6 +873,11 @@ impl Global {
let hub = A::hub(self);
if let Some(view) = hub.texture_views.unregister(texture_view_id) {
#[cfg(feature = "trace")]
if let Some(t) = view.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyTextureView(texture_view_id));
}
let last_submit_index = view.info.submission_index();
view.device
@ -888,11 +912,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -926,6 +947,11 @@ impl Global {
let hub = A::hub(self);
if let Some(sampler) = hub.samplers.unregister(sampler_id) {
#[cfg(feature = "trace")]
if let Some(t) = sampler.device.trace.lock().as_mut() {
t.add(trace::Action::DestroySampler(sampler_id));
}
sampler
.device
.lock_life()
@ -952,17 +978,19 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::CreateBindGroupLayout(fid.id(), desc.clone()));
}
// this check can't go in the body of `create_bind_group_layout` since the closure might not get called
if let Err(e) = device.check_is_valid() {
break 'error e.into();
}
let entry_map = match bgl::EntryMap::from_entries(&device.limits, &desc.entries) {
Ok(map) => map,
Err(e) => break 'error e,
@ -1025,6 +1053,11 @@ impl Global {
let hub = A::hub(self);
if let Some(layout) = hub.bind_group_layouts.unregister(bind_group_layout_id) {
#[cfg(feature = "trace")]
if let Some(t) = layout.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyBindGroupLayout(bind_group_layout_id));
}
layout
.device
.lock_life()
@ -1051,11 +1084,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -1086,6 +1116,11 @@ impl Global {
let hub = A::hub(self);
if let Some(layout) = hub.pipeline_layouts.unregister(pipeline_layout_id) {
#[cfg(feature = "trace")]
if let Some(t) = layout.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyPipelineLayout(pipeline_layout_id));
}
layout
.device
.lock_life()
@ -1109,11 +1144,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -1125,10 +1157,6 @@ impl Global {
Err(..) => break 'error binding_model::CreateBindGroupError::InvalidLayout,
};
if bind_group_layout.device.as_info().id() != device.as_info().id() {
break 'error DeviceError::WrongDevice.into();
}
let bind_group = match device.create_bind_group(&bind_group_layout, desc, hub) {
Ok(bind_group) => bind_group,
Err(e) => break 'error e,
@ -1138,10 +1166,20 @@ impl Global {
let weak_ref = Arc::downgrade(&resource);
for range in &resource.used_texture_ranges {
range.texture.bind_groups.lock().push(weak_ref.clone());
let mut bind_groups = range.texture.bind_groups.lock();
// Remove stale weak references
bind_groups.retain(|bg| bg.strong_count() > 0);
bind_groups.push(weak_ref.clone());
}
for range in &resource.used_buffer_ranges {
range.buffer.bind_groups.lock().push(weak_ref.clone());
let mut bind_groups = range.buffer.bind_groups.lock();
// Remove stale weak references
bind_groups.retain(|bg| bg.strong_count() > 0);
bind_groups.push(weak_ref.clone());
}
api_log!("Device::create_bind_group -> {id:?}");
@ -1165,6 +1203,11 @@ impl Global {
let hub = A::hub(self);
if let Some(bind_group) = hub.bind_groups.unregister(bind_group_id) {
#[cfg(feature = "trace")]
if let Some(t) = bind_group.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyBindGroup(bind_group_id));
}
bind_group
.device
.lock_life()
@ -1206,11 +1249,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -1284,11 +1324,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -1326,7 +1363,14 @@ impl Global {
api_log!("ShaderModule::drop {shader_module_id:?}");
let hub = A::hub(self);
hub.shader_modules.unregister(shader_module_id);
if let Some(shader_module) = hub.shader_modules.unregister(shader_module_id) {
#[cfg(feature = "trace")]
if let Some(t) = shader_module.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyShaderModule(shader_module_id));
}
drop(shader_module)
}
}
pub fn device_create_command_encoder<A: HalApi>(
@ -1345,28 +1389,13 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid,
Err(_) => break 'error DeviceError::InvalidDeviceId,
};
if !device.is_valid() {
break 'error DeviceError::Lost;
}
let Some(queue) = device.get_queue() else {
break 'error DeviceError::InvalidQueueId;
let command_buffer = match device.create_command_encoder(&desc.label) {
Ok(command_buffer) => command_buffer,
Err(e) => break 'error e,
};
let encoder = match device
.command_allocator
.acquire_encoder(device.raw(), queue.raw.as_ref().unwrap())
{
Ok(raw) => raw,
Err(_) => break 'error DeviceError::OutOfMemory,
};
let command_buffer = command::CommandBuffer::new(
encoder,
&device,
#[cfg(feature = "trace")]
device.trace.lock().is_some(),
desc.label.to_hal(device.instance_flags).map(str::to_owned),
);
let (id, _) = fid.assign(Arc::new(command_buffer));
api_log!("Device::create_command_encoder -> {id:?}");
@ -1436,11 +1465,12 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(bundle_encoder.parent()) {
Ok(device) => device,
Err(_) => break 'error command::RenderBundleError::INVALID_DEVICE,
Err(_) => {
break 'error command::RenderBundleError::from_device_error(
DeviceError::InvalidDeviceId,
);
}
};
if !device.is_valid() {
break 'error command::RenderBundleError::INVALID_DEVICE;
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -1482,6 +1512,11 @@ impl Global {
let hub = A::hub(self);
if let Some(bundle) = hub.render_bundles.unregister(render_bundle_id) {
#[cfg(feature = "trace")]
if let Some(t) = bundle.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyRenderBundle(render_bundle_id));
}
bundle
.device
.lock_life()
@ -1505,11 +1540,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -1545,7 +1577,7 @@ impl Global {
let device = &query_set.device;
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
if let Some(trace) = device.trace.lock().as_mut() {
trace.add(trace::Action::DestroyQuerySet(query_set_id));
}
@ -1582,11 +1614,9 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::CreateRenderPipeline {
@ -1684,6 +1714,12 @@ impl Global {
if let Some(pipeline) = hub.render_pipelines.unregister(render_pipeline_id) {
let device = &pipeline.device;
#[cfg(feature = "trace")]
if let Some(t) = pipeline.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyRenderPipeline(render_pipeline_id));
}
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
@ -1718,11 +1754,8 @@ impl Global {
let error = 'error: {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@ -1732,6 +1765,7 @@ impl Global {
implicit_context: implicit_context.clone(),
});
}
let pipeline = match device.create_compute_pipeline(desc, implicit_context, hub) {
Ok(pair) => pair,
Err(e) => break 'error e,
@ -1817,6 +1851,12 @@ impl Global {
if let Some(pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) {
let device = &pipeline.device;
#[cfg(feature = "trace")]
if let Some(t) = device.trace.lock().as_mut() {
t.add(trace::Action::DestroyComputePipeline(compute_pipeline_id));
}
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
@ -1850,11 +1890,9 @@ impl Global {
let device = match hub.devices.get(device_id) {
Ok(device) => device,
// TODO: Handle error properly
Err(crate::storage::InvalidId) => break 'error DeviceError::Invalid.into(),
Err(crate::storage::InvalidId) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::CreatePipelineCache {
@ -1862,6 +1900,7 @@ impl Global {
desc: desc.clone(),
});
}
let cache = unsafe { device.create_pipeline_cache(desc) };
match cache {
Ok(cache) => {
@ -1885,6 +1924,10 @@ impl Global {
let hub = A::hub(self);
if let Some(cache) = hub.pipeline_caches.unregister(pipeline_cache_id) {
#[cfg(feature = "trace")]
if let Some(t) = cache.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyPipelineCache(pipeline_cache_id));
}
drop(cache)
}
}
@ -2020,17 +2063,18 @@ impl Global {
let device = match device_guard.get(device_id) {
Ok(device) => device,
Err(_) => break 'error DeviceError::Invalid.into(),
Err(_) => break 'error DeviceError::InvalidDeviceId.into(),
};
if !device.is_valid() {
break 'error DeviceError::Lost.into();
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::ConfigureSurface(surface_id, config.clone()));
}
if let Err(e) = device.check_is_valid() {
break 'error e.into();
}
let surface = match surface_guard.get(surface_id) {
Ok(surface) => surface,
Err(_) => break 'error E::InvalidSurface,
@ -2159,13 +2203,16 @@ impl Global {
#[cfg(feature = "replay")]
/// Only triage suspected resource IDs. This helps us to avoid ID collisions
/// upon creating new resources when re-playing a trace.
pub fn device_maintain_ids<A: HalApi>(&self, device_id: DeviceId) -> Result<(), InvalidDevice> {
pub fn device_maintain_ids<A: HalApi>(&self, device_id: DeviceId) -> Result<(), DeviceError> {
let hub = A::hub(self);
let device = hub.devices.get(device_id).map_err(|_| InvalidDevice)?;
if !device.is_valid() {
return Err(InvalidDevice);
}
let device = hub
.devices
.get(device_id)
.map_err(|_| DeviceError::InvalidDeviceId)?;
device.check_is_valid()?;
device.lock_life().triage_suspected(&device.trackers);
Ok(())
}
@ -2178,13 +2225,13 @@ impl Global {
device_id: DeviceId,
maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
) -> Result<bool, WaitIdleError> {
api_log!("Device::poll");
api_log!("Device::poll {maintain:?}");
let hub = A::hub(self);
let device = hub
.devices
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
.map_err(|_| DeviceError::InvalidDeviceId)?;
if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
if submission_index.queue_id != device_id.into_queue_id() {
@ -2458,6 +2505,21 @@ impl Global {
}
}
pub fn device_get_internal_counters<A: HalApi>(
&self,
device_id: DeviceId,
) -> wgt::InternalCounters {
let hub = A::hub(self);
if let Ok(device) = hub.devices.get(device_id) {
wgt::InternalCounters {
hal: device.get_hal_counters(),
core: wgt::CoreCounters {},
}
} else {
Default::default()
}
}
pub fn queue_drop<A: HalApi>(&self, queue_id: QueueId) {
profiling::scope!("Queue::drop");
api_log!("Queue::drop {queue_id:?}");
@ -2475,14 +2537,24 @@ impl Global {
size: Option<BufferAddress>,
op: BufferMapOperation,
) -> BufferAccessResult {
profiling::scope!("Buffer::map_async");
api_log!("Buffer::map_async {buffer_id:?} offset {offset:?} size {size:?} op: {op:?}");
// User callbacks must not be called while holding buffer_map_async_inner's locks, so we
let hub = A::hub(self);
let op_and_err = 'error: {
let buffer = match hub.buffers.get(buffer_id) {
Ok(buffer) => buffer,
Err(_) => break 'error Some((op, BufferAccessError::InvalidBufferId(buffer_id))),
};
buffer.map_async(offset, size, op).err()
};
// User callbacks must not be called while holding `buffer.map_async`'s locks, so we
// defer the error callback if it needs to be called immediately (typically when running
// into errors).
if let Err((mut operation, err)) =
self.buffer_map_async_inner::<A>(buffer_id, offset, size, op)
{
if let Some((mut operation, err)) = op_and_err {
if let Some(callback) = operation.callback.take() {
callback.call(Err(err.clone()));
}
@ -2493,129 +2565,6 @@ impl Global {
Ok(())
}
// Returns the mapping callback in case of error so that the callback can be fired outside
// of the locks that are held in this function.
fn buffer_map_async_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
offset: BufferAddress,
size: Option<BufferAddress>,
op: BufferMapOperation,
) -> Result<(), (BufferMapOperation, BufferAccessError)> {
profiling::scope!("Buffer::map_async");
let hub = A::hub(self);
let (pub_usage, internal_use) = match op.host {
HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ),
HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE),
};
let buffer = {
let buffer = hub.buffers.get(buffer_id);
let buffer = match buffer {
Ok(b) => b,
Err(_) => {
return Err((op, BufferAccessError::Invalid));
}
};
{
let snatch_guard = buffer.device.snatchable_lock.read();
if buffer.is_destroyed(&snatch_guard) {
return Err((op, BufferAccessError::Destroyed));
}
}
let range_size = if let Some(size) = size {
size
} else if offset > buffer.size {
0
} else {
buffer.size - offset
};
if offset % wgt::MAP_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedOffset { offset }));
}
if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedRangeSize { range_size }));
}
let range = offset..(offset + range_size);
if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0
{
return Err((op, BufferAccessError::UnalignedRange));
}
let device = &buffer.device;
if !device.is_valid() {
return Err((op, DeviceError::Lost.into()));
}
if let Err(e) = check_buffer_usage(buffer.info.id(), buffer.usage, pub_usage) {
return Err((op, e.into()));
}
if range.start > range.end {
return Err((
op,
BufferAccessError::NegativeRange {
start: range.start,
end: range.end,
},
));
}
if range.end > buffer.size {
return Err((
op,
BufferAccessError::OutOfBoundsOverrun {
index: range.end,
max: buffer.size,
},
));
}
{
let map_state = &mut *buffer.map_state.lock();
*map_state = match *map_state {
resource::BufferMapState::Init { .. }
| resource::BufferMapState::Active { .. } => {
return Err((op, BufferAccessError::AlreadyMapped));
}
resource::BufferMapState::Waiting(_) => {
return Err((op, BufferAccessError::MapAlreadyPending));
}
resource::BufferMapState::Idle => {
resource::BufferMapState::Waiting(resource::BufferPendingMapping {
range,
op,
_parent_buffer: buffer.clone(),
})
}
};
}
let snatch_guard = buffer.device.snatchable_lock.read();
{
let mut trackers = buffer.device.as_ref().trackers.lock();
trackers.buffers.set_single(&buffer, internal_use);
//TODO: Check if draining ALL buffers is correct!
let _ = trackers.buffers.drain_transitions(&snatch_guard);
}
drop(snatch_guard);
buffer
};
buffer.device.lock_life().map(&buffer);
Ok(())
}
pub fn buffer_get_mapped_range<A: HalApi>(
&self,
buffer_id: id::BufferId,
@ -2630,13 +2579,11 @@ impl Global {
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
.map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?;
{
let snatch_guard = buffer.device.snatchable_lock.read();
if buffer.is_destroyed(&snatch_guard) {
return Err(BufferAccessError::Destroyed);
}
buffer.check_destroyed(&snatch_guard)?;
}
let range_size = if let Some(size) = size {
@ -2699,19 +2646,17 @@ impl Global {
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
.map_err(|_| BufferAccessError::InvalidBufferId(buffer_id))?;
let snatch_guard = buffer.device.snatchable_lock.read();
if buffer.is_destroyed(&snatch_guard) {
return Err(BufferAccessError::Destroyed);
}
buffer.check_destroyed(&snatch_guard)?;
drop(snatch_guard);
if !buffer.device.is_valid() {
return Err(DeviceError::Lost.into());
}
buffer.unmap()
buffer.device.check_is_valid()?;
buffer.unmap(
#[cfg(feature = "trace")]
buffer_id,
)
}
}

24
third_party/rust/wgpu-core/src/device/life.rs поставляемый
Просмотреть файл

@ -619,18 +619,6 @@ impl<A: HalApi> LifetimeTracker<A> {
&mut trackers.textures,
|maps| &mut maps.textures,
);
// We may have been suspected because a texture view or bind group
// referring to us was dropped. Remove stale weak references, so that
// the backlink table doesn't grow without bound.
for texture in self.suspected_resources.textures.values() {
texture.views.lock().retain(|view| view.strong_count() > 0);
texture
.bind_groups
.lock()
.retain(|bg| bg.strong_count() > 0);
}
self
}
@ -655,14 +643,6 @@ impl<A: HalApi> LifetimeTracker<A> {
&mut trackers.buffers,
|maps| &mut maps.buffers,
);
// We may have been suspected because a bind group referring to us was
// dropped. Remove stale weak references, so that the backlink table
// doesn't grow without bound.
for buffer in self.suspected_resources.buffers.values() {
buffer.bind_groups.lock().retain(|bg| bg.strong_count() > 0);
}
self
}
@ -835,8 +815,8 @@ impl<A: HalApi> LifetimeTracker<A> {
for buffer in self.mapped.drain(..) {
let submit_index = buffer.info.submission_index();
log::trace!(
"Mapping of {:?} at submission {:?} gets assigned to active {:?}",
buffer.info.id(),
"Mapping of {} at submission {:?} gets assigned to active {:?}",
buffer.error_ident(),
submit_index,
self.active.iter().position(|a| a.index == submit_index)
);

67
third_party/rust/wgpu-core/src/device/mod.rs поставляемый
Просмотреть файл

@ -3,7 +3,9 @@ use crate::{
hal_api::HalApi,
hub::Hub,
id::{BindGroupLayoutId, PipelineLayoutId},
resource::{Buffer, BufferAccessError, BufferAccessResult, BufferMapOperation},
resource::{
Buffer, BufferAccessError, BufferAccessResult, BufferMapOperation, ResourceErrorIdent,
},
snatch::SnatchGuard,
Label, DOWNLEVEL_ERROR_MESSAGE,
};
@ -321,9 +323,7 @@ fn map_buffer<A: HalApi>(
kind: HostMap,
snatch_guard: &SnatchGuard,
) -> Result<ptr::NonNull<u8>, BufferAccessError> {
let raw_buffer = buffer
.raw(snatch_guard)
.ok_or(BufferAccessError::Destroyed)?;
let raw_buffer = buffer.try_raw(snatch_guard)?;
let mapping = unsafe {
raw.map_buffer(raw_buffer, offset..offset + size)
.map_err(DeviceError::from)?
@ -378,25 +378,45 @@ fn map_buffer<A: HalApi>(
Ok(mapping.ptr)
}
#[derive(Clone, Debug, Error)]
#[error("Device is invalid")]
pub struct InvalidDevice;
#[derive(Clone, Debug)]
pub struct DeviceMismatch {
pub(super) res: ResourceErrorIdent,
pub(super) res_device: ResourceErrorIdent,
pub(super) target: Option<ResourceErrorIdent>,
pub(super) target_device: ResourceErrorIdent,
}
impl std::fmt::Display for DeviceMismatch {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(
f,
"{} of {} doesn't match {}",
self.res_device, self.res, self.target_device
)?;
if let Some(target) = self.target.as_ref() {
write!(f, " of {target}")?;
}
Ok(())
}
}
impl std::error::Error for DeviceMismatch {}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum DeviceError {
#[error("Parent device is invalid.")]
Invalid,
#[error("{0} is invalid.")]
Invalid(ResourceErrorIdent),
#[error("Parent device is lost")]
Lost,
#[error("Not enough memory left.")]
OutOfMemory,
#[error("Creation of a resource failed for a reason other than running out of memory.")]
ResourceCreationFailed,
#[error("QueueId is invalid")]
InvalidQueueId,
#[error("Attempt to use a resource with a different device from the one that created it")]
WrongDevice,
#[error("DeviceId is invalid")]
InvalidDeviceId,
#[error(transparent)]
DeviceMismatch(#[from] Box<DeviceMismatch>),
}
impl From<hal::DeviceError> for DeviceError {
@ -521,25 +541,10 @@ pub fn create_validator(
Caps::SUBGROUP_BARRIER,
features.intersects(wgt::Features::SUBGROUP_BARRIER),
);
let mut subgroup_stages = naga::valid::ShaderStages::empty();
subgroup_stages.set(
naga::valid::ShaderStages::COMPUTE | naga::valid::ShaderStages::FRAGMENT,
features.contains(wgt::Features::SUBGROUP),
);
subgroup_stages.set(
naga::valid::ShaderStages::VERTEX,
caps.set(
Caps::SUBGROUP_VERTEX_STAGE,
features.contains(wgt::Features::SUBGROUP_VERTEX),
);
let subgroup_operations = if caps.contains(Caps::SUBGROUP) {
use naga::valid::SubgroupOperationSet as S;
S::BASIC | S::VOTE | S::ARITHMETIC | S::BALLOT | S::SHUFFLE | S::SHUFFLE_RELATIVE
} else {
naga::valid::SubgroupOperationSet::empty()
};
let mut validator = naga::valid::Validator::new(flags, caps);
validator.subgroup_stages(subgroup_stages);
validator.subgroup_operations(subgroup_operations);
validator
naga::valid::Validator::new(flags, caps)
}

230
third_party/rust/wgpu-core/src/device/queue.rs поставляемый
Просмотреть файл

@ -12,14 +12,17 @@ use crate::{
global::Global,
hal_api::HalApi,
hal_label,
id::{self, DeviceId, QueueId},
id::{self, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
lock::{rank, Mutex, RwLockWriteGuard},
resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedTexture, Resource,
ResourceInfo, ResourceType, StagingBuffer, Texture, TextureInner,
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedResourceError,
DestroyedTexture, ParentDevice, Resource, ResourceErrorIdent, ResourceInfo, ResourceType,
StagingBuffer, Texture, TextureInner,
},
resource_log, track, FastHashMap, SubmissionIndex,
resource_log,
track::{self, TrackerIndex},
FastHashMap, SubmissionIndex,
};
use hal::{CommandEncoder as _, Device as _, Queue as _};
@ -53,8 +56,15 @@ impl<A: HalApi> Resource for Queue<A> {
}
}
impl<A: HalApi> ParentDevice<A> for Queue<A> {
fn device(&self) -> &Arc<Device<A>> {
self.device.as_ref().unwrap()
}
}
impl<A: HalApi> Drop for Queue<A> {
fn drop(&mut self) {
resource_log!("Drop {}", self.error_ident());
let queue = self.raw.take().unwrap();
self.device.as_ref().unwrap().release_queue(queue);
}
@ -204,9 +214,9 @@ pub(crate) struct PendingWrites<A: HalApi> {
/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
pub is_recording: bool,
pub temp_resources: Vec<TempResource<A>>,
pub dst_buffers: FastHashMap<id::BufferId, Arc<Buffer<A>>>,
pub dst_textures: FastHashMap<id::TextureId, Arc<Texture<A>>>,
temp_resources: Vec<TempResource<A>>,
dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer<A>>>,
dst_textures: FastHashMap<TrackerIndex, Arc<Texture<A>>>,
/// All command buffers allocated from `command_encoder`.
pub executing_command_buffers: Vec<A::CommandBuffer>,
@ -237,6 +247,25 @@ impl<A: HalApi> PendingWrites<A> {
self.temp_resources.clear();
}
pub fn insert_buffer(&mut self, buffer: &Arc<Buffer<A>>) {
self.dst_buffers
.insert(buffer.info.tracker_index(), buffer.clone());
}
pub fn insert_texture(&mut self, texture: &Arc<Texture<A>>) {
self.dst_textures
.insert(texture.info.tracker_index(), texture.clone());
}
pub fn contains_buffer(&self, buffer: &Arc<Buffer<A>>) -> bool {
self.dst_buffers.contains_key(&buffer.info.tracker_index())
}
pub fn contains_texture(&self, texture: &Arc<Texture<A>>) -> bool {
self.dst_textures
.contains_key(&texture.info.tracker_index())
}
pub fn consume_temp(&mut self, resource: TempResource<A>) {
self.temp_resources.push(resource);
}
@ -320,10 +349,7 @@ fn prepare_staging_buffer<A: HalApi>(
raw: Mutex::new(rank::STAGING_BUFFER_RAW, Some(buffer)),
device: device.clone(),
size,
info: ResourceInfo::new(
"<StagingBuffer>",
Some(device.tracker_indices.staging_buffers.clone()),
),
info: ResourceInfo::new(&None, Some(device.tracker_indices.staging_buffers.clone())),
is_coherent: mapping.is_coherent,
};
@ -352,36 +378,31 @@ pub struct InvalidQueue;
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum QueueWriteError {
#[error(
"Device of queue ({:?}) does not match device of write recipient ({:?})",
queue_device_id,
target_device_id
)]
DeviceMismatch {
queue_device_id: DeviceId,
target_device_id: DeviceId,
},
#[error("QueueId is invalid")]
InvalidQueueId,
#[error(transparent)]
Queue(#[from] DeviceError),
#[error(transparent)]
Transfer(#[from] TransferError),
#[error(transparent)]
MemoryInitFailure(#[from] ClearError),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum QueueSubmitError {
#[error("QueueId is invalid")]
InvalidQueueId,
#[error(transparent)]
Queue(#[from] DeviceError),
#[error("Buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error("Texture {0:?} is destroyed")]
DestroyedTexture(id::TextureId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error(transparent)]
Unmap(#[from] BufferAccessError),
#[error("Buffer {0:?} is still mapped")]
BufferStillMapped(id::BufferId),
#[error("{0} is still mapped")]
BufferStillMapped(ResourceErrorIdent),
#[error("Surface output was dropped before the command buffer got submitted")]
SurfaceOutputDropped,
#[error("Surface was unconfigured before the command buffer got submitted")]
@ -405,31 +426,18 @@ impl Global {
let hub = A::hub(self);
let buffer_device_id = hub
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| TransferError::InvalidBuffer(buffer_id))?
.device
.as_info()
.id();
.map_err(|_| TransferError::InvalidBufferId(buffer_id))?;
let queue = hub
.queues
.get(queue_id)
.map_err(|_| DeviceError::InvalidQueueId)?;
.map_err(|_| QueueWriteError::InvalidQueueId)?;
let device = queue.device.as_ref().unwrap();
{
let queue_device_id = device.as_info().id();
if buffer_device_id != queue_device_id {
return Err(QueueWriteError::DeviceMismatch {
queue_device_id,
target_device_id: buffer_device_id,
});
}
}
let data_size = data.len() as wgt::BufferAddress;
#[cfg(feature = "trace")]
@ -443,6 +451,8 @@ impl Global {
});
}
buffer.same_device_as(queue.as_ref())?;
if data_size == 0 {
log::trace!("Ignoring write_buffer of size 0");
return Ok(());
@ -469,6 +479,7 @@ impl Global {
}
let result = self.queue_write_staging_buffer_impl(
&queue,
device,
pending_writes,
&staging_buffer,
@ -492,7 +503,7 @@ impl Global {
let queue = hub
.queues
.get(queue_id)
.map_err(|_| DeviceError::InvalidQueueId)?;
.map_err(|_| QueueWriteError::InvalidQueueId)?;
let device = queue.device.as_ref().unwrap();
@ -519,13 +530,13 @@ impl Global {
let queue = hub
.queues
.get(queue_id)
.map_err(|_| DeviceError::InvalidQueueId)?;
.map_err(|_| QueueWriteError::InvalidQueueId)?;
let device = queue.device.as_ref().unwrap();
let staging_buffer = hub.staging_buffers.unregister(staging_buffer_id);
if staging_buffer.is_none() {
return Err(QueueWriteError::Transfer(TransferError::InvalidBuffer(
return Err(QueueWriteError::Transfer(TransferError::InvalidBufferId(
buffer_id,
)));
}
@ -543,6 +554,7 @@ impl Global {
}
let result = self.queue_write_staging_buffer_impl(
&queue,
device,
pending_writes,
&staging_buffer,
@ -567,7 +579,7 @@ impl Global {
let buffer = hub
.buffers
.get(buffer_id)
.map_err(|_| TransferError::InvalidBuffer(buffer_id))?;
.map_err(|_| TransferError::InvalidBufferId(buffer_id))?;
self.queue_validate_write_buffer_impl(&buffer, buffer_id, buffer_offset, buffer_size)?;
@ -607,7 +619,8 @@ impl Global {
fn queue_write_staging_buffer_impl<A: HalApi>(
&self,
device: &Device<A>,
queue: &Arc<Queue<A>>,
device: &Arc<Device<A>>,
pending_writes: &mut PendingWrites<A>,
staging_buffer: &StagingBuffer<A>,
buffer_id: id::BufferId,
@ -615,26 +628,20 @@ impl Global {
) -> Result<(), QueueWriteError> {
let hub = A::hub(self);
let (dst, transition) = {
let buffer_guard = hub.buffers.read();
let dst = buffer_guard
.get(buffer_id)
.map_err(|_| TransferError::InvalidBuffer(buffer_id))?;
let mut trackers = device.trackers.lock();
trackers
.buffers
.set_single(dst, hal::BufferUses::COPY_DST)
.ok_or(TransferError::InvalidBuffer(buffer_id))?
};
let snatch_guard = device.snatchable_lock.read();
let dst_raw = dst
.raw
.get(&snatch_guard)
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
let dst = hub
.buffers
.get(buffer_id)
.map_err(|_| TransferError::InvalidBufferId(buffer_id))?;
if dst.device.as_info().id() != device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
let transition = {
let mut trackers = device.trackers.lock();
trackers.buffers.set_single(&dst, hal::BufferUses::COPY_DST)
};
let snatch_guard = device.snatchable_lock.read();
let dst_raw = dst.try_raw(&snatch_guard)?;
dst.same_device_as(queue.as_ref())?;
let src_buffer_size = staging_buffer.size;
self.queue_validate_write_buffer_impl(&dst, buffer_id, buffer_offset, src_buffer_size)?;
@ -662,8 +669,8 @@ impl Global {
region.into_iter(),
);
}
let dst = hub.buffers.get(buffer_id).unwrap();
pending_writes.dst_buffers.insert(buffer_id, dst.clone());
pending_writes.insert_buffer(&dst);
// Ensure the overwritten bytes are marked as initialized so
// they don't need to be nulled prior to mapping or binding.
@ -692,7 +699,7 @@ impl Global {
let queue = hub
.queues
.get(queue_id)
.map_err(|_| DeviceError::InvalidQueueId)?;
.map_err(|_| QueueWriteError::InvalidQueueId)?;
let device = queue.device.as_ref().unwrap();
@ -715,11 +722,9 @@ impl Global {
let dst = hub
.textures
.get(destination.texture)
.map_err(|_| TransferError::InvalidTexture(destination.texture))?;
.map_err(|_| TransferError::InvalidTextureId(destination.texture))?;
if dst.device.as_info().id().into_queue_id() != queue_id {
return Err(DeviceError::WrongDevice.into());
}
dst.same_device_as(queue.as_ref())?;
if !dst.desc.usage.contains(wgt::TextureUsages::COPY_DST) {
return Err(
@ -845,9 +850,7 @@ impl Global {
dst.info
.use_at(device.active_submission_index.load(Ordering::Relaxed) + 1);
let dst_raw = dst
.raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_raw = dst.try_raw(&snatch_guard)?;
let bytes_per_row = data_layout
.bytes_per_row
@ -924,10 +927,10 @@ impl Global {
};
let mut trackers = device.trackers.lock();
let transition = trackers
.textures
.set_single(&dst, selector, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let transition =
trackers
.textures
.set_single(&dst, selector, hal::TextureUses::COPY_DST);
unsafe {
encoder.transition_textures(transition.map(|pending| pending.into_hal(dst_raw)));
encoder.transition_buffers(iter::once(barrier));
@ -936,9 +939,7 @@ impl Global {
}
pending_writes.consume(staging_buffer);
pending_writes
.dst_textures
.insert(destination.texture, dst.clone());
pending_writes.insert_texture(&dst);
Ok(())
}
@ -958,7 +959,7 @@ impl Global {
let queue = hub
.queues
.get(queue_id)
.map_err(|_| DeviceError::InvalidQueueId)?;
.map_err(|_| QueueWriteError::InvalidQueueId)?;
let device = queue.device.as_ref().unwrap();
@ -1108,9 +1109,7 @@ impl Global {
.use_at(device.active_submission_index.load(Ordering::Relaxed) + 1);
let snatch_guard = device.snatchable_lock.read();
let dst_raw = dst
.raw(&snatch_guard)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_raw = dst.try_raw(&snatch_guard)?;
let regions = hal::TextureCopy {
src_base: hal::TextureCopyBase {
@ -1125,10 +1124,10 @@ impl Global {
unsafe {
let mut trackers = device.trackers.lock();
let transitions = trackers
.textures
.set_single(&dst, selector, hal::TextureUses::COPY_DST)
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let transitions =
trackers
.textures
.set_single(&dst, selector, hal::TextureUses::COPY_DST);
encoder.transition_textures(transitions.map(|pending| pending.into_hal(dst_raw)));
encoder.copy_external_image_to_texture(
source,
@ -1155,7 +1154,7 @@ impl Global {
let queue = hub
.queues
.get(queue_id)
.map_err(|_| DeviceError::InvalidQueueId)?;
.map_err(|_| QueueSubmitError::InvalidQueueId)?;
let device = queue.device.as_ref().unwrap();
@ -1200,10 +1199,6 @@ impl Global {
Err(_) => continue,
};
if cmdbuf.device.as_info().id().into_queue_id() != queue_id {
return Err(DeviceError::WrongDevice.into());
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(Action::Submit(
@ -1218,6 +1213,9 @@ impl Global {
.unwrap(),
));
}
cmdbuf.same_device_as(queue.as_ref())?;
if !cmdbuf.is_finished() {
let cmdbuf = Arc::into_inner(cmdbuf).expect(
"Command buffer cannot be destroyed because is still in use",
@ -1236,18 +1234,14 @@ impl Global {
{
profiling::scope!("buffers");
for buffer in cmd_buf_trackers.buffers.used_resources() {
if buffer.raw.get(&snatch_guard).is_none() {
return Err(QueueSubmitError::DestroyedBuffer(
buffer.info.id(),
));
}
buffer.check_destroyed(&snatch_guard)?;
buffer.info.use_at(submit_index);
match *buffer.map_state.lock() {
BufferMapState::Idle => (),
_ => {
return Err(QueueSubmitError::BufferStillMapped(
buffer.info.id(),
buffer.error_ident(),
))
}
}
@ -1256,14 +1250,9 @@ impl Global {
{
profiling::scope!("textures");
for texture in cmd_buf_trackers.textures.used_resources() {
let should_extend = match texture.inner.get(&snatch_guard) {
None => {
return Err(QueueSubmitError::DestroyedTexture(
texture.info.id(),
));
}
Some(TextureInner::Native { .. }) => false,
Some(TextureInner::Surface { ref raw, .. }) => {
let should_extend = match texture.try_inner(&snatch_guard)? {
TextureInner::Native { .. } => false,
TextureInner::Surface { ref raw, .. } => {
if raw.is_some() {
// Compare the Arcs by pointer as Textures don't implement Eq.
submit_surface_textures_owned
@ -1367,12 +1356,8 @@ impl Global {
//Note: locking the trackers has to be done after the storages
let mut trackers = device.trackers.lock();
baked
.initialize_buffer_memory(&mut *trackers, &snatch_guard)
.map_err(|err| QueueSubmitError::DestroyedBuffer(err.0))?;
baked
.initialize_texture_memory(&mut *trackers, device, &snatch_guard)
.map_err(|err| QueueSubmitError::DestroyedTexture(err.0))?;
baked.initialize_buffer_memory(&mut *trackers, &snatch_guard)?;
baked.initialize_texture_memory(&mut *trackers, device, &snatch_guard)?;
//Note: stateless trackers are not merged:
// device already knows these resources exist.
CommandBuffer::insert_barriers_from_tracker(
@ -1438,13 +1423,10 @@ impl Global {
{
used_surface_textures.set_size(hub.textures.read().len());
for (&id, texture) in pending_writes.dst_textures.iter() {
match texture.inner.get(&snatch_guard) {
None => {
return Err(QueueSubmitError::DestroyedTexture(id));
}
Some(TextureInner::Native { .. }) => {}
Some(TextureInner::Surface { ref raw, .. }) => {
for texture in pending_writes.dst_textures.values() {
match texture.try_inner(&snatch_guard)? {
TextureInner::Native { .. } => {}
TextureInner::Surface { ref raw, .. } => {
if raw.is_some() {
// Compare the Arcs by pointer as Textures don't implement Eq
submit_surface_textures_owned
@ -1550,6 +1532,8 @@ impl Global {
// the closures should execute with nothing locked!
callbacks.fire();
api_log!("Queue::submit to {queue_id:?} returned submit index {submit_index}");
Ok(WrappedSubmissionIndex {
queue_id,
index: submit_index,

Просмотреть файл

@ -24,8 +24,8 @@ use crate::{
pool::ResourcePool,
registry::Registry,
resource::{
self, Buffer, QuerySet, Resource, ResourceInfo, ResourceType, Sampler, Texture,
TextureView, TextureViewNotRenderableReason,
self, Buffer, ParentDevice, QuerySet, Resource, ResourceInfo, ResourceType, Sampler,
Texture, TextureView, TextureViewNotRenderableReason,
},
resource_log,
snatch::{SnatchGuard, SnatchLock, Snatchable},
@ -34,9 +34,7 @@ use crate::{
BindGroupStates, TextureSelector, Tracker, TrackerIndexAllocators, UsageScope,
UsageScopePool,
},
validation::{
self, check_buffer_usage, check_texture_usage, validate_color_attachment_bytes_per_sample,
},
validation::{self, validate_color_attachment_bytes_per_sample},
FastHashMap, LabelHelpers as _, SubmissionIndex,
};
@ -153,7 +151,7 @@ pub(crate) enum DeferredDestroy<A: HalApi> {
impl<A: HalApi> std::fmt::Debug for Device<A> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Device")
.field("adapter", &self.adapter.info.label())
.field("label", &self.label())
.field("limits", &self.limits)
.field("features", &self.features)
.field("downlevel", &self.downlevel)
@ -163,7 +161,7 @@ impl<A: HalApi> std::fmt::Debug for Device<A> {
impl<A: HalApi> Drop for Device<A> {
fn drop(&mut self) {
resource_log!("Destroy raw Device {:?}", self.info.label());
resource_log!("Drop {}", self.error_ident());
let raw = self.raw.take().unwrap();
let pending_writes = self.pending_writes.lock().take().unwrap();
pending_writes.dispose(&raw);
@ -270,7 +268,7 @@ impl<A: HalApi> Device<A> {
queue: OnceCell::new(),
queue_to_drop: OnceCell::new(),
zero_buffer: Some(zero_buffer),
info: ResourceInfo::new("<device>", None),
info: ResourceInfo::new(&desc.label, None),
command_allocator,
active_submission_index: AtomicU64::new(0),
fence: RwLock::new(rank::DEVICE_FENCE, Some(fence)),
@ -313,6 +311,14 @@ impl<A: HalApi> Device<A> {
self.valid.load(Ordering::Acquire)
}
pub fn check_is_valid(&self) -> Result<(), DeviceError> {
if self.is_valid() {
Ok(())
} else {
Err(DeviceError::Invalid(self.error_ident()))
}
}
pub(crate) fn release_queue(&self, queue: A::Queue) {
assert!(self.queue_to_drop.set(queue).is_ok());
}
@ -339,11 +345,8 @@ impl<A: HalApi> Device<A> {
continue;
};
resource_log!("Destroy raw TextureView (destroyed) {:?}", view.label());
#[cfg(feature = "trace")]
if let Some(t) = self.trace.lock().as_mut() {
t.add(trace::Action::DestroyTextureView(view.info.id()));
}
resource_log!("Destroy raw {}", view.error_ident());
unsafe {
use hal::Device;
self.raw().destroy_texture_view(raw_view);
@ -358,11 +361,8 @@ impl<A: HalApi> Device<A> {
continue;
};
resource_log!("Destroy raw BindGroup (destroyed) {:?}", bind_group.label());
#[cfg(feature = "trace")]
if let Some(t) = self.trace.lock().as_mut() {
t.add(trace::Action::DestroyBindGroup(bind_group.info.id()));
}
resource_log!("Destroy raw {}", bind_group.error_ident());
unsafe {
use hal::Device;
self.raw().destroy_bind_group(raw_bind_group);
@ -400,6 +400,7 @@ impl<A: HalApi> Device<A> {
snatch_guard: SnatchGuard,
) -> Result<(UserClosures, bool), WaitIdleError> {
profiling::scope!("Device::maintain");
let fence = fence_guard.as_ref().unwrap();
let last_done_index = if maintain.is_wait() {
let index_to_wait_for = match maintain {
@ -427,6 +428,7 @@ impl<A: HalApi> Device<A> {
.map_err(DeviceError::from)?
}
};
log::info!("Device::maintain: last done index {last_done_index}");
let mut life_tracker = self.lock_life();
let submission_closures =
@ -562,7 +564,7 @@ impl<A: HalApi> Device<A> {
desc: &resource::BufferDescriptor,
transient: bool,
) -> Result<Buffer<A>, resource::CreateBufferError> {
debug_assert_eq!(self.as_info().id().backend(), A::VARIANT);
self.check_is_valid()?;
if desc.size > self.limits.max_buffer_size {
return Err(resource::CreateBufferError::MaxBufferSize {
@ -654,10 +656,7 @@ impl<A: HalApi> Device<A> {
),
sync_mapped_writes: Mutex::new(rank::BUFFER_SYNC_MAPPED_WRITES, None),
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.buffers.clone()),
),
info: ResourceInfo::new(&desc.label, Some(self.tracker_indices.buffers.clone())),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, Vec::new()),
})
}
@ -670,8 +669,6 @@ impl<A: HalApi> Device<A> {
format_features: wgt::TextureFormatFeatures,
clear_mode: resource::TextureClearMode<A>,
) -> Texture<A> {
debug_assert_eq!(self.as_info().id().backend(), A::VARIANT);
Texture {
inner: Snatchable::new(resource::TextureInner::Native { raw: hal_texture }),
device: self.clone(),
@ -686,10 +683,7 @@ impl<A: HalApi> Device<A> {
mips: 0..desc.mip_level_count,
layers: 0..desc.array_layer_count(),
},
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.textures.clone()),
),
info: ResourceInfo::new(&desc.label, Some(self.tracker_indices.textures.clone())),
clear_mode: RwLock::new(rank::TEXTURE_CLEAR_MODE, clear_mode),
views: Mutex::new(rank::TEXTURE_VIEWS, Vec::new()),
bind_groups: Mutex::new(rank::TEXTURE_BIND_GROUPS, Vec::new()),
@ -701,8 +695,6 @@ impl<A: HalApi> Device<A> {
hal_buffer: A::Buffer,
desc: &resource::BufferDescriptor,
) -> Buffer<A> {
debug_assert_eq!(self.as_info().id().backend(), A::VARIANT);
Buffer {
raw: Snatchable::new(hal_buffer),
device: self.clone(),
@ -714,10 +706,7 @@ impl<A: HalApi> Device<A> {
),
sync_mapped_writes: Mutex::new(rank::BUFFER_SYNC_MAPPED_WRITES, None),
map_state: Mutex::new(rank::BUFFER_MAP_STATE, resource::BufferMapState::Idle),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.buffers.clone()),
),
info: ResourceInfo::new(&desc.label, Some(self.tracker_indices.buffers.clone())),
bind_groups: Mutex::new(rank::BUFFER_BIND_GROUPS, Vec::new()),
}
}
@ -729,6 +718,8 @@ impl<A: HalApi> Device<A> {
) -> Result<Texture<A>, resource::CreateTextureError> {
use resource::{CreateTextureError, TextureDimensionError};
self.check_is_valid()?;
if desc.usage.is_empty() || desc.usage.contains_invalid_bits() {
return Err(CreateTextureError::InvalidUsage(desc.usage));
}
@ -1005,9 +996,7 @@ impl<A: HalApi> Device<A> {
) -> Result<TextureView<A>, resource::CreateTextureViewError> {
let snatch_guard = texture.device.snatchable_lock.read();
let texture_raw = texture
.raw(&snatch_guard)
.ok_or(resource::CreateTextureViewError::InvalidTexture)?;
let texture_raw = texture.try_raw(&snatch_guard)?;
// resolve TextureViewDescriptor defaults
// https://gpuweb.github.io/gpuweb/#abstract-opdef-resolving-gputextureviewdescriptor-defaults
@ -1238,8 +1227,8 @@ impl<A: HalApi> Device<A> {
};
log::debug!(
"Create view for texture {:?} filters usages to {:?}",
texture.as_info().id(),
"Create view for {} filters usages to {:?}",
texture.error_ident(),
usage
);
@ -1294,7 +1283,7 @@ impl<A: HalApi> Device<A> {
samples: texture.desc.sample_count,
selector,
info: ResourceInfo::new(
desc.label.borrow_or_default(),
&desc.label,
Some(self.tracker_indices.texture_views.clone()),
),
})
@ -1304,6 +1293,8 @@ impl<A: HalApi> Device<A> {
self: &Arc<Self>,
desc: &resource::SamplerDescriptor,
) -> Result<Sampler<A>, resource::CreateSamplerError> {
self.check_is_valid()?;
if desc
.address_modes
.iter()
@ -1400,10 +1391,7 @@ impl<A: HalApi> Device<A> {
Ok(Sampler {
raw: Some(raw),
device: self.clone(),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.samplers.clone()),
),
info: ResourceInfo::new(&desc.label, Some(self.tracker_indices.samplers.clone())),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear,
@ -1415,6 +1403,8 @@ impl<A: HalApi> Device<A> {
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: pipeline::ShaderModuleSource<'a>,
) -> Result<pipeline::ShaderModule<A>, pipeline::CreateShaderModuleError> {
self.check_is_valid()?;
let (module, source) = match source {
#[cfg(feature = "wgsl")]
pipeline::ShaderModuleSource::Wgsl(code) => {
@ -1534,8 +1524,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: Some(interface),
info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
info: ResourceInfo::new(&desc.label, None),
})
}
@ -1545,6 +1534,8 @@ impl<A: HalApi> Device<A> {
desc: &pipeline::ShaderModuleDescriptor<'a>,
source: &'a [u32],
) -> Result<pipeline::ShaderModule<A>, pipeline::CreateShaderModuleError> {
self.check_is_valid()?;
self.require_features(wgt::Features::SPIRV_SHADER_PASSTHROUGH)?;
let hal_desc = hal::ShaderModuleDescriptor {
label: desc.label.to_hal(self.instance_flags),
@ -1575,11 +1566,31 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: None,
info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
info: ResourceInfo::new(&desc.label, None),
})
}
pub(crate) fn create_command_encoder(
self: &Arc<Self>,
label: &crate::Label,
) -> Result<command::CommandBuffer<A>, DeviceError> {
self.check_is_valid()?;
let queue = self.get_queue().unwrap();
let encoder = self
.command_allocator
.acquire_encoder(self.raw(), queue.raw.as_ref().unwrap())?;
Ok(command::CommandBuffer::new(
encoder,
self,
#[cfg(feature = "trace")]
self.trace.lock().is_some(),
label,
))
}
/// Generate information about late-validated buffer bindings for pipelines.
//TODO: should this be combined with `get_introspection_bind_group_layouts` in some way?
pub(crate) fn make_late_sized_buffer_groups(
@ -1798,9 +1809,8 @@ impl<A: HalApi> Device<A> {
let bgl_flags = conv::bind_group_layout_flags(self.features);
let hal_bindings = entry_map.values().copied().collect::<Vec<_>>();
let label = label.to_hal(self.instance_flags);
let hal_desc = hal::BindGroupLayoutDescriptor {
label,
label: label.to_hal(self.instance_flags),
flags: bgl_flags,
entries: &hal_bindings,
};
@ -1828,15 +1838,12 @@ impl<A: HalApi> Device<A> {
entries: entry_map,
origin,
binding_count_validator: count_validator,
info: ResourceInfo::new(
label.unwrap_or("<BindGroupLayout>"),
Some(self.tracker_indices.bind_group_layouts.clone()),
),
label: label.unwrap_or_default().to_string(),
info: ResourceInfo::new(label, Some(self.tracker_indices.bind_group_layouts.clone())),
})
}
pub(crate) fn create_buffer_binding<'a>(
self: &Arc<Self>,
bb: &binding_model::BufferBinding,
binding: u32,
decl: &wgt::BindGroupLayoutEntry,
@ -1846,7 +1853,6 @@ impl<A: HalApi> Device<A> {
used: &mut BindGroupStates<A>,
storage: &'a Storage<Buffer<A>>,
limits: &wgt::Limits,
device_id: id::Id<id::markers::Device>,
snatch_guard: &'a SnatchGuard<'a>,
) -> Result<hal::BufferBinding<'a, A>, binding_model::CreateBindGroupError> {
use crate::binding_model::CreateBindGroupError as Error;
@ -1893,20 +1899,16 @@ impl<A: HalApi> Device<A> {
));
}
let buffer = used
.buffers
.add_single(storage, bb.buffer_id, internal_use)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
let buffer = storage
.get(bb.buffer_id)
.map_err(|_| Error::InvalidBufferId(bb.buffer_id))?;
if buffer.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice.into());
}
used.buffers.add_single(buffer, internal_use);
check_buffer_usage(bb.buffer_id, buffer.usage, pub_usage)?;
let raw_buffer = buffer
.raw
.get(snatch_guard)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
buffer.same_device(self)?;
buffer.check_usage(pub_usage)?;
let raw_buffer = buffer.try_raw(snatch_guard)?;
let (bind_size, bind_end) = match bb.size {
Some(size) => {
@ -1981,21 +1983,17 @@ impl<A: HalApi> Device<A> {
}
fn create_sampler_binding<'a>(
self: &Arc<Self>,
used: &BindGroupStates<A>,
storage: &'a Storage<Sampler<A>>,
id: id::Id<id::markers::Sampler>,
device_id: id::Id<id::markers::Device>,
) -> Result<&'a Sampler<A>, binding_model::CreateBindGroupError> {
use crate::binding_model::CreateBindGroupError as Error;
let sampler = used
.samplers
.add_single(storage, id)
.ok_or(Error::InvalidSampler(id))?;
let sampler = storage.get(id).map_err(|_| Error::InvalidSampler(id))?;
used.samplers.add_single(sampler);
if sampler.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice.into());
}
sampler.same_device(self)?;
Ok(sampler)
}
@ -2012,14 +2010,12 @@ impl<A: HalApi> Device<A> {
) -> Result<hal::TextureBinding<'a, A>, binding_model::CreateBindGroupError> {
use crate::binding_model::CreateBindGroupError as Error;
let view = used
.views
.add_single(storage, id)
.ok_or(Error::InvalidTextureView(id))?;
let view = storage
.get(id)
.map_err(|_| Error::InvalidTextureViewId(id))?;
used.views.add_single(view);
if view.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
view.same_device(self)?;
let (pub_usage, internal_use) = self.texture_use_parameters(
binding,
@ -2028,21 +2024,14 @@ impl<A: HalApi> Device<A> {
"SampledTexture, ReadonlyStorageTexture or WriteonlyStorageTexture",
)?;
let texture = &view.parent;
let texture_id = texture.as_info().id();
// Careful here: the texture may no longer have its own ref count,
// if it was deleted by the user.
let texture = used
.textures
.add_single(texture, Some(view.selector.clone()), internal_use)
.ok_or(binding_model::CreateBindGroupError::InvalidTexture(
texture_id,
))?;
used.textures
.add_single(texture, Some(view.selector.clone()), internal_use);
if texture.device.as_info().id() != view.device.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
texture.same_device_as(view.as_ref())?;
check_texture_usage(texture.desc.usage, pub_usage)?;
texture.check_usage(pub_usage)?;
used_texture_ranges.push(TextureInitTrackerAction {
texture: texture.clone(),
@ -2057,9 +2046,7 @@ impl<A: HalApi> Device<A> {
});
Ok(hal::TextureBinding {
view: view
.raw(snatch_guard)
.ok_or(Error::InvalidTextureView(id))?,
view: view.try_raw(snatch_guard)?,
usage: internal_use,
})
}
@ -2073,6 +2060,10 @@ impl<A: HalApi> Device<A> {
hub: &Hub<A>,
) -> Result<BindGroup<A>, binding_model::CreateBindGroupError> {
use crate::binding_model::{BindingResource as Br, CreateBindGroupError as Error};
self.check_is_valid()?;
layout.same_device(self)?;
{
// Check that the number of entries in the descriptor matches
// the number of entries in the layout.
@ -2113,7 +2104,7 @@ impl<A: HalApi> Device<A> {
.ok_or(Error::MissingBindingDeclaration(binding))?;
let (res_index, count) = match entry.resource {
Br::Buffer(ref bb) => {
let bb = Self::create_buffer_binding(
let bb = self.create_buffer_binding(
bb,
binding,
decl,
@ -2123,7 +2114,6 @@ impl<A: HalApi> Device<A> {
&mut used,
&*buffer_guard,
&self.limits,
self.as_info().id(),
&snatch_guard,
)?;
@ -2137,7 +2127,7 @@ impl<A: HalApi> Device<A> {
let res_index = hal_buffers.len();
for bb in bindings_array.iter() {
let bb = Self::create_buffer_binding(
let bb = self.create_buffer_binding(
bb,
binding,
decl,
@ -2147,7 +2137,6 @@ impl<A: HalApi> Device<A> {
&mut used,
&*buffer_guard,
&self.limits,
self.as_info().id(),
&snatch_guard,
)?;
hal_buffers.push(bb);
@ -2156,12 +2145,7 @@ impl<A: HalApi> Device<A> {
}
Br::Sampler(id) => match decl.ty {
wgt::BindingType::Sampler(ty) => {
let sampler = Self::create_sampler_binding(
&used,
&sampler_guard,
id,
self.as_info().id(),
)?;
let sampler = self.create_sampler_binding(&used, &sampler_guard, id)?;
let (allowed_filtering, allowed_comparison) = match ty {
wgt::SamplerBindingType::Filtering => (None, false),
@ -2203,12 +2187,7 @@ impl<A: HalApi> Device<A> {
let res_index = hal_samplers.len();
for &id in bindings_array.iter() {
let sampler = Self::create_sampler_binding(
&used,
&sampler_guard,
id,
self.as_info().id(),
)?;
let sampler = self.create_sampler_binding(&used, &sampler_guard, id)?;
hal_samplers.push(sampler.raw());
}
@ -2288,10 +2267,7 @@ impl<A: HalApi> Device<A> {
raw: Snatchable::new(raw),
device: self.clone(),
layout: layout.clone(),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
Some(self.tracker_indices.bind_groups.clone()),
),
info: ResourceInfo::new(&desc.label, Some(self.tracker_indices.bind_groups.clone())),
used,
used_buffer_ranges,
used_texture_ranges,
@ -2476,6 +2452,8 @@ impl<A: HalApi> Device<A> {
) -> Result<binding_model::PipelineLayout<A>, binding_model::CreatePipelineLayoutError> {
use crate::binding_model::CreatePipelineLayoutError as Error;
self.check_is_valid()?;
let bind_group_layouts_count = desc.bind_group_layouts.len();
let device_max_bind_groups = self.limits.max_bind_groups as usize;
if bind_group_layouts_count > device_max_bind_groups {
@ -2537,9 +2515,7 @@ impl<A: HalApi> Device<A> {
// Validate total resource counts and check for a matching device
for bgl in &bind_group_layouts {
if bgl.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
bgl.same_device(self)?;
count_validator.merge(&bgl.binding_count_validator);
}
@ -2574,7 +2550,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
&desc.label,
Some(self.tracker_indices.pipeline_layouts.clone()),
),
bind_group_layouts,
@ -2629,6 +2605,8 @@ impl<A: HalApi> Device<A> {
implicit_context: Option<ImplicitPipelineContext>,
hub: &Hub<A>,
) -> Result<pipeline::ComputePipeline<A>, pipeline::CreateComputePipelineError> {
self.check_is_valid()?;
// This has to be done first, or otherwise the IDs may be pointing to entries
// that are not even in the storage.
if let Some(ref ids) = implicit_context {
@ -2647,9 +2625,7 @@ impl<A: HalApi> Device<A> {
.get(desc.stage.module)
.map_err(|_| validation::StageError::InvalidModule)?;
if shader_module.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
shader_module.same_device(self)?;
// Get the pipeline layout from the desc if it is provided.
let pipeline_layout = match desc.layout {
@ -2659,9 +2635,7 @@ impl<A: HalApi> Device<A> {
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateComputePipelineError::InvalidLayout)?;
if pipeline_layout.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
pipeline_layout.same_device(self)?;
Some(pipeline_layout)
}
@ -2723,9 +2697,7 @@ impl<A: HalApi> Device<A> {
break 'cache None;
};
if cache.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
cache.same_device(self)?;
Some(cache)
};
@ -2767,7 +2739,7 @@ impl<A: HalApi> Device<A> {
_shader_module: shader_module,
late_sized_buffer_groups,
info: ResourceInfo::new(
desc.label.borrow_or_default(),
&desc.label,
Some(self.tracker_indices.compute_pipelines.clone()),
),
};
@ -2783,6 +2755,8 @@ impl<A: HalApi> Device<A> {
) -> Result<pipeline::RenderPipeline<A>, pipeline::CreateRenderPipelineError> {
use wgt::TextureFormatFeatureFlags as Tfff;
self.check_is_valid()?;
// This has to be done first, or otherwise the IDs may be pointing to entries
// that are not even in the storage.
if let Some(ref ids) = implicit_context {
@ -3103,9 +3077,7 @@ impl<A: HalApi> Device<A> {
.get(pipeline_layout_id)
.map_err(|_| pipeline::CreateRenderPipelineError::InvalidLayout)?;
if pipeline_layout.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
pipeline_layout.same_device(self)?;
Some(pipeline_layout)
}
@ -3140,9 +3112,7 @@ impl<A: HalApi> Device<A> {
error: validation::StageError::InvalidModule,
}
})?;
if vertex_shader_module.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
vertex_shader_module.same_device(self)?;
let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
@ -3334,9 +3304,7 @@ impl<A: HalApi> Device<A> {
break 'cache None;
};
if cache.device.as_info().id() != self.as_info().id() {
return Err(DeviceError::WrongDevice.into());
}
cache.same_device(self)?;
Some(cache)
};
@ -3425,7 +3393,7 @@ impl<A: HalApi> Device<A> {
vertex_steps,
late_sized_buffer_groups,
info: ResourceInfo::new(
desc.label.borrow_or_default(),
&desc.label,
Some(self.tracker_indices.render_pipelines.clone()),
),
};
@ -3439,6 +3407,9 @@ impl<A: HalApi> Device<A> {
desc: &pipeline::PipelineCacheDescriptor,
) -> Result<pipeline::PipelineCache<A>, pipeline::CreatePipelineCacheError> {
use crate::pipeline_cache;
self.check_is_valid()?;
self.require_features(wgt::Features::PIPELINE_CACHE)?;
let data = if let Some((data, validation_key)) = desc
.data
@ -3470,7 +3441,7 @@ impl<A: HalApi> Device<A> {
let cache = pipeline::PipelineCache {
device: self.clone(),
info: ResourceInfo::new(
desc.label.borrow_or_default(),
&desc.label,
Some(self.tracker_indices.pipeline_caches.clone()),
),
// This would be none in the error condition, which we don't implement yet
@ -3561,6 +3532,8 @@ impl<A: HalApi> Device<A> {
) -> Result<QuerySet<A>, resource::CreateQuerySetError> {
use resource::CreateQuerySetError as Error;
self.check_is_valid()?;
match desc.ty {
wgt::QueryType::Occlusion => {}
wgt::QueryType::Timestamp => {
@ -3586,7 +3559,7 @@ impl<A: HalApi> Device<A> {
Ok(QuerySet {
raw: Some(unsafe { self.raw().create_query_set(&hal_desc).unwrap() }),
device: self.clone(),
info: ResourceInfo::new("", Some(self.tracker_indices.query_sets.clone())),
info: ResourceInfo::new(&desc.label, Some(self.tracker_indices.query_sets.clone())),
desc: desc.map_label(|_| ()),
})
}
@ -3643,6 +3616,13 @@ impl<A: HalApi> Device<A> {
pub(crate) fn new_usage_scope(&self) -> UsageScope<'_, A> {
UsageScope::new_pooled(&self.usage_scopes, &self.tracker_indices)
}
pub fn get_hal_counters(&self) -> wgt::HalCounters {
self.raw
.as_ref()
.map(|raw| raw.get_internal_counters())
.unwrap_or_default()
}
}
impl<A: HalApi> Device<A> {

3
third_party/rust/wgpu-core/src/error.rs поставляемый
Просмотреть файл

@ -138,7 +138,8 @@ pub fn format_pretty_any(
if let Some(pretty_err) = error.downcast_ref::<crate::command::PassErrorScope>() {
return pretty_err.fmt_pretty(&mut fmt);
}
if let Some(pretty_err) = error.downcast_ref::<crate::track::UsageConflict>() {
if let Some(pretty_err) = error.downcast_ref::<crate::track::ResourceUsageCompatibilityError>()
{
return pretty_err.fmt_pretty(&mut fmt);
}
if let Some(pretty_err) = error.downcast_ref::<crate::command::QueryError>() {

14
third_party/rust/wgpu-core/src/instance.rs поставляемый
Просмотреть файл

@ -158,10 +158,6 @@ impl Resource for Surface {
fn as_info_mut(&mut self) -> &mut ResourceInfo<Self> {
&mut self.info
}
fn label(&self) -> &str {
"<Surface>"
}
}
impl Surface {
@ -204,7 +200,7 @@ impl<A: HalApi> Adapter<A> {
Self {
raw,
info: ResourceInfo::new("<Adapter>", None),
info: ResourceInfo::new(&None, None),
}
}
@ -309,7 +305,7 @@ impl<A: HalApi> Adapter<A> {
let queue = Queue {
device: None,
raw: Some(hal_device.queue),
info: ResourceInfo::new("<Queue>", None),
info: ResourceInfo::new(&None, None),
};
return Ok((device, queue));
}
@ -532,7 +528,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(rank::SURFACE_PRESENTATION, None),
info: ResourceInfo::new("<Surface>", None),
info: ResourceInfo::new(&None, None),
#[cfg(vulkan)]
vulkan: init::<hal::api::Vulkan>(
@ -596,7 +592,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(rank::SURFACE_PRESENTATION, None),
info: ResourceInfo::new("<Surface>", None),
info: ResourceInfo::new(&None, None),
metal: Some(self.instance.metal.as_ref().map_or(
Err(CreateSurfaceError::BackendNotEnabled(Backend::Metal)),
|inst| {
@ -625,7 +621,7 @@ impl Global {
) -> Result<SurfaceId, CreateSurfaceError> {
let surface = Surface {
presentation: Mutex::new(rank::SURFACE_PRESENTATION, None),
info: ResourceInfo::new("<Surface>", None),
info: ResourceInfo::new(&None, None),
dx12: Some(create_surface_func(
self.instance
.dx12

59
third_party/rust/wgpu-core/src/pipeline.rs поставляемый
Просмотреть файл

@ -1,5 +1,3 @@
#[cfg(feature = "trace")]
use crate::device::trace;
pub use crate::pipeline_cache::PipelineCacheValidationError;
use crate::{
binding_model::{CreateBindGroupLayoutError, CreatePipelineLayoutError, PipelineLayout},
@ -7,7 +5,7 @@ use crate::{
device::{Device, DeviceError, MissingDownlevelFlags, MissingFeatures, RenderPassContext},
hal_api::HalApi,
id::{PipelineCacheId, PipelineLayoutId, ShaderModuleId},
resource::{Resource, ResourceInfo, ResourceType},
resource::{ParentDevice, Resource, ResourceInfo, ResourceType},
resource_log, validation, Label,
};
use arrayvec::ArrayVec;
@ -53,17 +51,12 @@ pub struct ShaderModule<A: HalApi> {
pub(crate) device: Arc<Device<A>>,
pub(crate) interface: Option<validation::Interface>,
pub(crate) info: ResourceInfo<ShaderModule<A>>,
pub(crate) label: String,
}
impl<A: HalApi> Drop for ShaderModule<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw ShaderModule {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyShaderModule(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_shader_module(raw);
@ -84,9 +77,11 @@ impl<A: HalApi> Resource for ShaderModule<A> {
fn as_info_mut(&mut self) -> &mut ResourceInfo<Self> {
&mut self.info
}
}
fn label(&self) -> &str {
&self.label
impl<A: HalApi> ParentDevice<A> for ShaderModule<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
@ -229,13 +224,7 @@ pub struct ComputePipeline<A: HalApi> {
impl<A: HalApi> Drop for ComputePipeline<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw ComputePipeline {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyComputePipeline(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_compute_pipeline(raw);
@ -258,6 +247,12 @@ impl<A: HalApi> Resource for ComputePipeline<A> {
}
}
impl<A: HalApi> ParentDevice<A> for ComputePipeline<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
impl<A: HalApi> ComputePipeline<A> {
pub(crate) fn raw(&self) -> &A::ComputePipeline {
self.raw.as_ref().unwrap()
@ -297,13 +292,7 @@ pub struct PipelineCache<A: HalApi> {
impl<A: HalApi> Drop for PipelineCache<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw PipelineCache {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyPipelineCache(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_pipeline_cache(raw);
@ -326,6 +315,12 @@ impl<A: HalApi> Resource for PipelineCache<A> {
}
}
impl<A: HalApi> ParentDevice<A> for PipelineCache<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
/// Describes how the vertex buffer is interpreted.
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
@ -556,13 +551,7 @@ pub struct RenderPipeline<A: HalApi> {
impl<A: HalApi> Drop for RenderPipeline<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw RenderPipeline {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyRenderPipeline(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_render_pipeline(raw);
@ -585,6 +574,12 @@ impl<A: HalApi> Resource for RenderPipeline<A> {
}
}
impl<A: HalApi> ParentDevice<A> for RenderPipeline<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
impl<A: HalApi> RenderPipeline<A> {
pub(crate) fn raw(&self) -> &A::RenderPipeline {
self.raw.as_ref().unwrap()

Просмотреть файл

@ -16,7 +16,7 @@ pub enum PipelineCacheValidationError {
#[error("The pipeline cacha data was out of date and so cannot be safely used")]
Outdated,
#[error("The cache data was created for a different device")]
WrongDevice,
DeviceMismatch,
#[error("Pipeline cacha data was created for a future version of wgpu")]
Unsupported,
}
@ -26,7 +26,7 @@ impl PipelineCacheValidationError {
/// That is, is there a mistake in user code interacting with the cache
pub fn was_avoidable(&self) -> bool {
match self {
PipelineCacheValidationError::WrongDevice => true,
PipelineCacheValidationError::DeviceMismatch => true,
PipelineCacheValidationError::Truncated
| PipelineCacheValidationError::Unsupported
| PipelineCacheValidationError::Extended
@ -57,10 +57,10 @@ pub fn validate_pipeline_cache<'d>(
return Err(PipelineCacheValidationError::Outdated);
}
if header.backend != adapter.backend as u8 {
return Err(PipelineCacheValidationError::WrongDevice);
return Err(PipelineCacheValidationError::DeviceMismatch);
}
if header.adapter_key != adapter_key {
return Err(PipelineCacheValidationError::WrongDevice);
return Err(PipelineCacheValidationError::DeviceMismatch);
}
if header.validation_key != validation_key {
// If the validation key is wrong, that means that this device has changed
@ -420,7 +420,7 @@ mod tests {
];
let cache = cache.into_iter().flatten().collect::<Vec<u8>>();
let validation_result = super::validate_pipeline_cache(&cache, &ADAPTER, VALIDATION_KEY);
assert_eq!(validation_result, Err(E::WrongDevice));
assert_eq!(validation_result, Err(E::DeviceMismatch));
}
#[test]
fn wrong_adapter() {
@ -436,7 +436,7 @@ mod tests {
];
let cache = cache.into_iter().flatten().collect::<Vec<u8>>();
let validation_result = super::validate_pipeline_cache(&cache, &ADAPTER, VALIDATION_KEY);
assert_eq!(validation_result, Err(E::WrongDevice));
assert_eq!(validation_result, Err(E::DeviceMismatch));
}
#[test]
fn wrong_validation() {

23
third_party/rust/wgpu-core/src/present.rs поставляемый
Просмотреть файл

@ -9,7 +9,10 @@ When this texture is presented, we remove it from the device tracker as well as
extract it from the hub.
!*/
use std::{borrow::Borrow, sync::Arc};
use std::{
borrow::{Borrow, Cow},
sync::Arc,
};
#[cfg(feature = "trace")]
use crate::device::trace::Action;
@ -136,9 +139,7 @@ impl Global {
let (device, config) = if let Some(ref present) = *surface.presentation.lock() {
match present.device.downcast_clone::<A>() {
Some(device) => {
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
device.check_is_valid()?;
(device, present.config.clone())
}
None => return Err(SurfaceError::NotConfigured),
@ -227,7 +228,7 @@ impl Global {
mips: 0..1,
},
info: ResourceInfo::new(
"<Surface Texture>",
&Some(Cow::Borrowed("<Surface Texture>")),
Some(device.tracker_indices.textures.clone()),
),
clear_mode: RwLock::new(
@ -303,16 +304,15 @@ impl Global {
};
let device = present.device.downcast_ref::<A>().unwrap();
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
let queue = device.get_queue().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(Action::Present(surface_id));
}
device.check_is_valid()?;
let queue = device.get_queue().unwrap();
let result = {
let texture_id = present
.acquired_texture
@ -397,15 +397,14 @@ impl Global {
};
let device = present.device.downcast_ref::<A>().unwrap();
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(Action::DiscardSurfaceTexture(surface_id));
}
device.check_is_valid()?;
{
let texture_id = present
.acquired_texture

2
third_party/rust/wgpu-core/src/registry.rs поставляемый
Просмотреть файл

@ -255,7 +255,7 @@ mod tests {
s.spawn(|| {
for _ in 0..1000 {
let value = Arc::new(TestData {
info: ResourceInfo::new("Test data", None),
info: ResourceInfo::new(&None, None),
});
let new_id = registry.prepare(None);
let (id, _) = new_id.assign(value);

465
third_party/rust/wgpu-core/src/resource.rs поставляемый
Просмотреть файл

@ -3,8 +3,8 @@ use crate::device::trace;
use crate::{
binding_model::BindGroup,
device::{
queue, resource::DeferredDestroy, BufferMapPendingClosure, Device, DeviceError, HostMap,
MissingDownlevelFlags, MissingFeatures,
queue, resource::DeferredDestroy, BufferMapPendingClosure, Device, DeviceError,
DeviceMismatch, HostMap, MissingDownlevelFlags, MissingFeatures,
},
global::Global,
hal_api::HalApi,
@ -17,7 +17,6 @@ use crate::{
resource_log,
snatch::{ExclusiveSnatchGuard, SnatchGuard, Snatchable},
track::{SharedTrackerIndexAllocator, TextureSelector, TrackerIndex},
validation::MissingBufferUsageError,
Label, SubmissionIndex,
};
@ -72,7 +71,7 @@ pub(crate) struct ResourceInfo<T: Resource> {
submission_index: AtomicUsize,
/// The `label` from the descriptor used to create the resource.
pub(crate) label: String,
label: String,
}
impl<T: Resource> Drop for ResourceInfo<T> {
@ -84,10 +83,8 @@ impl<T: Resource> Drop for ResourceInfo<T> {
}
impl<T: Resource> ResourceInfo<T> {
// Note: Abstractly, this function should take `label: String` to minimize string cloning.
// But as actually used, every input is a literal or borrowed `&str`, so this is convenient.
pub(crate) fn new(
label: &str,
label: &Label,
tracker_indices: Option<Arc<SharedTrackerIndexAllocator>>,
) -> Self {
let tracker_index = tracker_indices
@ -99,25 +96,13 @@ impl<T: Resource> ResourceInfo<T> {
tracker_index,
tracker_indices,
submission_index: AtomicUsize::new(0),
label: label.to_string(),
label: label
.as_ref()
.map(|label| label.to_string())
.unwrap_or_default(),
}
}
pub(crate) fn label(&self) -> &dyn Debug
where
Id<T::Marker>: Debug,
{
if !self.label.is_empty() {
return &self.label;
}
if let Some(id) = &self.id {
return id;
}
&""
}
pub(crate) fn id(&self) -> Id<T::Marker> {
self.id.unwrap()
}
@ -143,6 +128,48 @@ impl<T: Resource> ResourceInfo<T> {
}
}
#[derive(Clone, Debug)]
pub struct ResourceErrorIdent {
r#type: ResourceType,
label: String,
}
impl std::fmt::Display for ResourceErrorIdent {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
write!(f, "{} with '{}' label", self.r#type, self.label)
}
}
pub(crate) trait ParentDevice<A: HalApi>: Resource {
fn device(&self) -> &Arc<Device<A>>;
fn same_device_as<O: ParentDevice<A>>(&self, other: &O) -> Result<(), DeviceError> {
if self.device().is_equal(other.device()) {
Ok(())
} else {
Err(DeviceError::DeviceMismatch(Box::new(DeviceMismatch {
res: self.error_ident(),
res_device: self.device().error_ident(),
target: Some(other.error_ident()),
target_device: other.device().error_ident(),
})))
}
}
fn same_device(&self, device: &Arc<Device<A>>) -> Result<(), DeviceError> {
if self.device().is_equal(device) {
Ok(())
} else {
Err(DeviceError::DeviceMismatch(Box::new(DeviceMismatch {
res: self.error_ident(),
res_device: self.device().error_ident(),
target: None,
target_device: device.error_ident(),
})))
}
}
}
pub(crate) type ResourceType = &'static str;
pub(crate) trait Resource: 'static + Sized + WasmNotSendSync {
@ -166,8 +193,14 @@ pub(crate) trait Resource: 'static + Sized + WasmNotSendSync {
fn is_unique(self: &Arc<Self>) -> bool {
self.ref_count() == 1
}
fn is_equal(&self, other: &Self) -> bool {
self.as_info().id().unzip() == other.as_info().id().unzip()
fn is_equal(self: &Arc<Self>, other: &Arc<Self>) -> bool {
Arc::ptr_eq(self, other)
}
fn error_ident(&self) -> ResourceErrorIdent {
ResourceErrorIdent {
r#type: Self::TYPE,
label: self.label().to_owned(),
}
}
}
@ -294,9 +327,8 @@ impl BufferMapCallback {
let status = match result {
Ok(()) => BufferMapAsyncStatus::Success,
Err(BufferAccessError::Device(_)) => BufferMapAsyncStatus::ContextLost,
Err(BufferAccessError::Invalid) | Err(BufferAccessError::Destroyed) => {
BufferMapAsyncStatus::Invalid
}
Err(BufferAccessError::InvalidBufferId(_))
| Err(BufferAccessError::DestroyedResource(_)) => BufferMapAsyncStatus::Invalid,
Err(BufferAccessError::AlreadyMapped) => BufferMapAsyncStatus::AlreadyMapped,
Err(BufferAccessError::MapAlreadyPending) => {
BufferMapAsyncStatus::MapAlreadyPending
@ -336,10 +368,10 @@ pub enum BufferAccessError {
Device(#[from] DeviceError),
#[error("Buffer map failed")]
Failed,
#[error("Buffer is invalid")]
Invalid,
#[error("Buffer is destroyed")]
Destroyed,
#[error("BufferId {0:?} is invalid")]
InvalidBufferId(BufferId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error("Buffer is already mapped")]
AlreadyMapped,
#[error("Buffer map is pending")]
@ -377,6 +409,26 @@ pub enum BufferAccessError {
MapAborted,
}
#[derive(Clone, Debug, Error)]
#[error("Usage flags {actual:?} of {res} do not contain required usage flags {expected:?}")]
pub struct MissingBufferUsageError {
pub(crate) res: ResourceErrorIdent,
pub(crate) actual: wgt::BufferUsages,
pub(crate) expected: wgt::BufferUsages,
}
#[derive(Clone, Debug, Error)]
#[error("Usage flags {actual:?} of {res} do not contain required usage flags {expected:?}")]
pub struct MissingTextureUsageError {
pub(crate) res: ResourceErrorIdent,
pub(crate) actual: wgt::TextureUsages,
pub(crate) expected: wgt::TextureUsages,
}
#[derive(Clone, Debug, Error)]
#[error("{0} has been destroyed")]
pub struct DestroyedResourceError(pub ResourceErrorIdent);
pub type BufferAccessResult = Result<(), BufferAccessError>;
#[derive(Debug)]
@ -405,13 +457,7 @@ pub struct Buffer<A: HalApi> {
impl<A: HalApi> Drop for Buffer<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw Buffer (dropped) {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyBuffer(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_buffer(raw);
@ -425,13 +471,151 @@ impl<A: HalApi> Buffer<A> {
self.raw.get(guard)
}
pub(crate) fn is_destroyed(&self, guard: &SnatchGuard) -> bool {
self.raw.get(guard).is_none()
pub(crate) fn try_raw<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<&A::Buffer, DestroyedResourceError> {
self.raw
.get(guard)
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
pub(crate) fn check_destroyed<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<(), DestroyedResourceError> {
self.raw
.get(guard)
.map(|_| ())
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
/// Checks that the given buffer usage contains the required buffer usage,
/// returns an error otherwise.
pub(crate) fn check_usage(
self: &Arc<Self>,
expected: wgt::BufferUsages,
) -> Result<(), MissingBufferUsageError> {
if self.usage.contains(expected) {
Ok(())
} else {
Err(MissingBufferUsageError {
res: self.error_ident(),
actual: self.usage,
expected,
})
}
}
/// Returns the mapping callback in case of error so that the callback can be fired outside
/// of the locks that are held in this function.
pub(crate) fn map_async(
self: &Arc<Self>,
offset: wgt::BufferAddress,
size: Option<wgt::BufferAddress>,
op: BufferMapOperation,
) -> Result<(), (BufferMapOperation, BufferAccessError)> {
let range_size = if let Some(size) = size {
size
} else if offset > self.size {
0
} else {
self.size - offset
};
if offset % wgt::MAP_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedOffset { offset }));
}
if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedRangeSize { range_size }));
}
let range = offset..(offset + range_size);
if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err((op, BufferAccessError::UnalignedRange));
}
let (pub_usage, internal_use) = match op.host {
HostMap::Read => (wgt::BufferUsages::MAP_READ, hal::BufferUses::MAP_READ),
HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE),
};
if let Err(e) = self.check_usage(pub_usage) {
return Err((op, e.into()));
}
if range.start > range.end {
return Err((
op,
BufferAccessError::NegativeRange {
start: range.start,
end: range.end,
},
));
}
if range.end > self.size {
return Err((
op,
BufferAccessError::OutOfBoundsOverrun {
index: range.end,
max: self.size,
},
));
}
let device = &self.device;
if let Err(e) = device.check_is_valid() {
return Err((op, e.into()));
}
{
let snatch_guard = device.snatchable_lock.read();
if let Err(e) = self.check_destroyed(&snatch_guard) {
return Err((op, e.into()));
}
}
{
let map_state = &mut *self.map_state.lock();
*map_state = match *map_state {
BufferMapState::Init { .. } | BufferMapState::Active { .. } => {
return Err((op, BufferAccessError::AlreadyMapped));
}
BufferMapState::Waiting(_) => {
return Err((op, BufferAccessError::MapAlreadyPending));
}
BufferMapState::Idle => BufferMapState::Waiting(BufferPendingMapping {
range,
op,
_parent_buffer: self.clone(),
}),
};
}
let snatch_guard = device.snatchable_lock.read();
{
let mut trackers = device.as_ref().trackers.lock();
trackers.buffers.set_single(self, internal_use);
//TODO: Check if draining ALL buffers is correct!
let _ = trackers.buffers.drain_transitions(&snatch_guard);
}
drop(snatch_guard);
device.lock_life().map(self);
Ok(())
}
// Note: This must not be called while holding a lock.
pub(crate) fn unmap(self: &Arc<Self>) -> Result<(), BufferAccessError> {
if let Some((mut operation, status)) = self.unmap_inner()? {
pub(crate) fn unmap(
self: &Arc<Self>,
#[cfg(feature = "trace")] buffer_id: BufferId,
) -> Result<(), BufferAccessError> {
if let Some((mut operation, status)) = self.unmap_inner(
#[cfg(feature = "trace")]
buffer_id,
)? {
if let Some(callback) = operation.callback.take() {
callback.call(status);
}
@ -440,16 +624,16 @@ impl<A: HalApi> Buffer<A> {
Ok(())
}
fn unmap_inner(self: &Arc<Self>) -> Result<Option<BufferMapPendingClosure>, BufferAccessError> {
fn unmap_inner(
self: &Arc<Self>,
#[cfg(feature = "trace")] buffer_id: BufferId,
) -> Result<Option<BufferMapPendingClosure>, BufferAccessError> {
use hal::Device;
let device = &self.device;
let snatch_guard = device.snatchable_lock.read();
let raw_buf = self
.raw(&snatch_guard)
.ok_or(BufferAccessError::Destroyed)?;
let buffer_id = self.info.id();
log::debug!("Buffer {:?} map state -> Idle", buffer_id);
let raw_buf = self.try_raw(&snatch_guard)?;
log::debug!("{} map state -> Idle", self.error_ident());
match mem::replace(&mut *self.map_state.lock(), BufferMapState::Idle) {
BufferMapState::Init {
ptr,
@ -509,7 +693,7 @@ impl<A: HalApi> Buffer<A> {
}
}
pending_writes.consume_temp(queue::TempResource::Buffer(stage_buffer));
pending_writes.dst_buffers.insert(buffer_id, self.clone());
pending_writes.insert_buffer(self);
}
BufferMapState::Idle => {
return Err(BufferAccessError::NotMapped);
@ -547,12 +731,6 @@ impl<A: HalApi> Buffer<A> {
pub(crate) fn destroy(self: &Arc<Self>) -> Result<(), DestroyError> {
let device = &self.device;
let buffer_id = self.info.id();
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::FreeBuffer(buffer_id));
}
let temp = {
let snatch_guard = device.snatchable_lock.write();
@ -572,17 +750,16 @@ impl<A: HalApi> Buffer<A> {
raw: Some(raw),
device: Arc::clone(&self.device),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
tracker_index: self.info.tracker_index(),
label: self.info.label.clone(),
label: self.label().to_owned(),
bind_groups,
}))
};
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if pending_writes.dst_buffers.contains_key(&buffer_id) {
pending_writes.temp_resources.push(temp);
if pending_writes.contains_buffer(self) {
pending_writes.consume_temp(temp);
} else {
let last_submit_index = self.info.submission_index();
device
@ -627,13 +804,18 @@ impl<A: HalApi> Resource for Buffer<A> {
}
}
impl<A: HalApi> ParentDevice<A> for Buffer<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
/// A buffer that has been marked as destroyed and is staged for actual deletion soon.
#[derive(Debug)]
pub struct DestroyedBuffer<A: HalApi> {
raw: Option<A::Buffer>,
device: Arc<Device<A>>,
label: String,
pub(crate) id: BufferId,
pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
bind_groups: Vec<Weak<BindGroup<A>>>,
@ -641,11 +823,7 @@ pub struct DestroyedBuffer<A: HalApi> {
impl<A: HalApi> DestroyedBuffer<A> {
pub fn label(&self) -> &dyn Debug {
if !self.label.is_empty() {
return &self.label;
}
&self.id
&self.label
}
}
@ -660,11 +838,6 @@ impl<A: HalApi> Drop for DestroyedBuffer<A> {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw Buffer (destroyed) {:?}", self.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyBuffer(self.id));
}
unsafe {
use hal::Device;
self.device.raw().destroy_buffer(raw);
@ -704,7 +877,7 @@ pub struct StagingBuffer<A: HalApi> {
impl<A: HalApi> Drop for StagingBuffer<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.lock().take() {
resource_log!("Destroy raw StagingBuffer {:?}", self.info.label());
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_buffer(raw);
@ -725,9 +898,11 @@ impl<A: HalApi> Resource for StagingBuffer<A> {
fn as_info_mut(&mut self) -> &mut ResourceInfo<Self> {
&mut self.info
}
}
fn label(&self) -> &str {
"<StagingBuffer>"
impl<A: HalApi> ParentDevice<A> for StagingBuffer<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
@ -785,9 +960,27 @@ pub struct Texture<A: HalApi> {
pub(crate) bind_groups: Mutex<Vec<Weak<BindGroup<A>>>>,
}
impl<A: HalApi> Texture<A> {
/// Checks that the given texture usage contains the required texture usage,
/// returns an error otherwise.
pub(crate) fn check_usage(
&self,
expected: wgt::TextureUsages,
) -> Result<(), MissingTextureUsageError> {
if self.desc.usage.contains(expected) {
Ok(())
} else {
Err(MissingTextureUsageError {
res: self.error_ident(),
actual: self.desc.usage,
expected,
})
}
}
}
impl<A: HalApi> Drop for Texture<A> {
fn drop(&mut self) {
resource_log!("Destroy raw Texture {:?}", self.info.label());
use hal::Device;
let mut clear_mode = self.clear_mode.write();
let clear_mode = &mut *clear_mode;
@ -817,11 +1010,7 @@ impl<A: HalApi> Drop for Texture<A> {
};
if let Some(TextureInner::Native { raw }) = self.inner.take() {
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyTexture(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
self.device.raw().destroy_texture(raw);
}
@ -830,12 +1019,37 @@ impl<A: HalApi> Drop for Texture<A> {
}
impl<A: HalApi> Texture<A> {
pub(crate) fn try_inner<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<&'a TextureInner<A>, DestroyedResourceError> {
self.inner
.get(guard)
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
pub(crate) fn raw<'a>(&'a self, snatch_guard: &'a SnatchGuard) -> Option<&'a A::Texture> {
self.inner.get(snatch_guard)?.raw()
}
pub(crate) fn is_destroyed(&self, guard: &SnatchGuard) -> bool {
self.inner.get(guard).is_none()
pub(crate) fn try_raw<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<&'a A::Texture, DestroyedResourceError> {
self.inner
.get(guard)
.and_then(|t| t.raw())
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
pub(crate) fn check_destroyed<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<(), DestroyedResourceError> {
self.inner
.get(guard)
.map(|_| ())
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
pub(crate) fn inner_mut<'a>(
@ -875,12 +1089,6 @@ impl<A: HalApi> Texture<A> {
pub(crate) fn destroy(self: &Arc<Self>) -> Result<(), DestroyError> {
let device = &self.device;
let texture_id = self.info.id();
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
trace.add(trace::Action::FreeTexture(texture_id));
}
let temp = {
let snatch_guard = device.snatchable_lock.write();
@ -911,15 +1119,14 @@ impl<A: HalApi> Texture<A> {
device: Arc::clone(&self.device),
tracker_index: self.info.tracker_index(),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
label: self.info.label.clone(),
label: self.label().to_owned(),
}))
};
let mut pending_writes = device.pending_writes.lock();
let pending_writes = pending_writes.as_mut().unwrap();
if pending_writes.dst_textures.contains_key(&texture_id) {
pending_writes.temp_resources.push(temp);
if pending_writes.contains_texture(self) {
pending_writes.consume_temp(temp);
} else {
let last_submit_index = self.info.submission_index();
device
@ -1095,18 +1302,13 @@ pub struct DestroyedTexture<A: HalApi> {
bind_groups: Vec<Weak<BindGroup<A>>>,
device: Arc<Device<A>>,
label: String,
pub(crate) id: TextureId,
pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
}
impl<A: HalApi> DestroyedTexture<A> {
pub fn label(&self) -> &dyn Debug {
if !self.label.is_empty() {
return &self.label;
}
&self.id
&self.label
}
}
@ -1126,11 +1328,6 @@ impl<A: HalApi> Drop for DestroyedTexture<A> {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw Texture (destroyed) {:?}", self.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyTexture(self.id));
}
unsafe {
use hal::Device;
self.device.raw().destroy_texture(raw);
@ -1245,6 +1442,12 @@ impl<A: HalApi> Resource for Texture<A> {
}
}
impl<A: HalApi> ParentDevice<A> for Texture<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
impl<A: HalApi> Borrow<TextureSelector> for Texture<A> {
fn borrow(&self) -> &TextureSelector {
&self.full_range
@ -1311,7 +1514,6 @@ pub struct TextureView<A: HalApi> {
// if it's a surface texture - it's none
pub(crate) parent: Arc<Texture<A>>,
pub(crate) device: Arc<Device<A>>,
//TODO: store device_id for quick access?
pub(crate) desc: HalTextureViewDescriptor,
pub(crate) format_features: wgt::TextureFormatFeatures,
/// This is `Err` only if the texture view is not renderable
@ -1324,13 +1526,7 @@ pub struct TextureView<A: HalApi> {
impl<A: HalApi> Drop for TextureView<A> {
fn drop(&mut self) {
if let Some(raw) = self.raw.take() {
resource_log!("Destroy raw TextureView {:?}", self.info.label());
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyTextureView(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_texture_view(raw);
@ -1343,13 +1539,24 @@ impl<A: HalApi> TextureView<A> {
pub(crate) fn raw<'a>(&'a self, snatch_guard: &'a SnatchGuard) -> Option<&'a A::TextureView> {
self.raw.get(snatch_guard)
}
pub(crate) fn try_raw<'a>(
&'a self,
guard: &'a SnatchGuard,
) -> Result<&A::TextureView, DestroyedResourceError> {
self.raw
.get(guard)
.ok_or_else(|| DestroyedResourceError(self.error_ident()))
}
}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum CreateTextureViewError {
#[error("Parent texture is invalid or destroyed")]
InvalidTexture,
#[error("TextureId {0:?} is invalid")]
InvalidTextureId(TextureId),
#[error(transparent)]
DestroyedResource(#[from] DestroyedResourceError),
#[error("Not enough memory left to create texture view")]
OutOfMemory,
#[error("Invalid texture view dimension `{view:?}` with texture of dimension `{texture:?}`")]
@ -1410,6 +1617,12 @@ impl<A: HalApi> Resource for TextureView<A> {
}
}
impl<A: HalApi> ParentDevice<A> for TextureView<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
/// Describes a [`Sampler`]
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
@ -1452,13 +1665,8 @@ pub struct Sampler<A: HalApi> {
impl<A: HalApi> Drop for Sampler<A> {
fn drop(&mut self) {
resource_log!("Destroy raw Sampler {:?}", self.info.label());
if let Some(raw) = self.raw.take() {
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroySampler(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_sampler(raw);
@ -1531,6 +1739,12 @@ impl<A: HalApi> Resource for Sampler<A> {
}
}
impl<A: HalApi> ParentDevice<A> for Sampler<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum CreateQuerySetError {
@ -1556,13 +1770,8 @@ pub struct QuerySet<A: HalApi> {
impl<A: HalApi> Drop for QuerySet<A> {
fn drop(&mut self) {
resource_log!("Destroy raw QuerySet {:?}", self.info.label());
if let Some(raw) = self.raw.take() {
#[cfg(feature = "trace")]
if let Some(t) = self.device.trace.lock().as_mut() {
t.add(trace::Action::DestroyQuerySet(self.info.id()));
}
resource_log!("Destroy raw {}", self.error_ident());
unsafe {
use hal::Device;
self.device.raw().destroy_query_set(raw);
@ -1571,6 +1780,12 @@ impl<A: HalApi> Drop for QuerySet<A> {
}
}
impl<A: HalApi> ParentDevice<A> for QuerySet<A> {
fn device(&self) -> &Arc<Device<A>> {
&self.device
}
}
impl<A: HalApi> Resource for QuerySet<A> {
const TYPE: ResourceType = "QuerySet";

Просмотреть файл

@ -10,14 +10,13 @@ use std::{borrow::Cow, marker::PhantomData, sync::Arc};
use super::{PendingTransition, ResourceTracker, TrackerIndex};
use crate::{
hal_api::HalApi,
id::BufferId,
lock::{rank, Mutex},
resource::{Buffer, Resource},
resource_log,
snatch::SnatchGuard,
storage::Storage,
track::{
invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider,
ResourceUses, UsageConflict,
ResourceUsageCompatibilityError, ResourceUses,
},
};
use hal::{BufferBarrier, BufferUses};
@ -89,18 +88,9 @@ impl<A: HalApi> BufferBindGroupState<A> {
}
/// Adds the given resource with the given state.
pub fn add_single<'a>(
&self,
storage: &'a Storage<Buffer<A>>,
id: BufferId,
state: BufferUses,
) -> Option<&'a Arc<Buffer<A>>> {
let buffer = storage.get(id).ok()?;
pub fn add_single(&self, buffer: &Arc<Buffer<A>>, state: BufferUses) {
let mut buffers = self.buffers.lock();
buffers.push((buffer.clone(), state));
Some(buffer)
}
}
@ -168,7 +158,7 @@ impl<A: HalApi> BufferUsageScope<A> {
pub unsafe fn merge_bind_group(
&mut self,
bind_group: &BufferBindGroupState<A>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let buffers = bind_group.buffers.lock();
for &(ref resource, state) in &*buffers {
let index = resource.as_info().tracker_index().as_usize();
@ -198,7 +188,10 @@ impl<A: HalApi> BufferUsageScope<A> {
///
/// If the given tracker uses IDs higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> {
pub fn merge_usage_scope(
&mut self,
scope: &Self,
) -> Result<(), ResourceUsageCompatibilityError> {
let incoming_size = scope.state.len();
if incoming_size > self.state.len() {
self.set_size(incoming_size);
@ -235,32 +228,11 @@ impl<A: HalApi> BufferUsageScope<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn merge_single<'a>(
pub fn merge_single(
&mut self,
storage: &'a Storage<Buffer<A>>,
id: BufferId,
buffer: &Arc<Buffer<A>>,
new_state: BufferUses,
) -> Result<&'a Arc<Buffer<A>>, UsageConflict> {
let buffer = storage
.get(id)
.map_err(|_| UsageConflict::BufferInvalid { id })?;
self.insert_merge_single(buffer.clone(), new_state)
.map(|_| buffer)
}
/// Merge a single state into the UsageScope, using an already resolved buffer.
///
/// If the resulting state is invalid, returns a usage
/// conflict with the details of the invalid state.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn insert_merge_single(
&mut self,
buffer: Arc<Buffer<A>>,
new_state: BufferUses,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let index = buffer.info.tracker_index().as_usize();
self.allow_index(index);
@ -276,7 +248,7 @@ impl<A: HalApi> BufferUsageScope<A> {
index,
BufferStateProvider::Direct { state: new_state },
ResourceMetadataProvider::Direct {
resource: Cow::Owned(buffer),
resource: Cow::Owned(buffer.clone()),
},
)?;
}
@ -285,9 +257,6 @@ impl<A: HalApi> BufferUsageScope<A> {
}
}
pub(crate) type SetSingleResult<A> =
Option<(Arc<Buffer<A>>, Option<PendingTransition<BufferUses>>)>;
/// Stores all buffer state within a command buffer or device.
pub(crate) struct BufferTracker<A: HalApi> {
start: Vec<BufferUses>,
@ -334,13 +303,27 @@ impl<A: HalApi> ResourceTracker for BufferTracker<A> {
//RefCount 2 means that resource is hold just by DeviceTracker and this suspected resource itself
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
resource_log!(
"BufferTracker::remove_abandoned: removing {}",
self.metadata.get_resource_unchecked(index).error_ident()
);
self.metadata.remove(index);
return true;
}
resource_log!(
"BufferTracker::remove_abandoned: not removing {}, ref count {}",
self.metadata.get_resource_unchecked(index).error_ident(),
existing_ref_count
);
return false;
}
}
resource_log!("BufferTracker::remove_abandoned: does not contain index {index:?}",);
true
}
}
@ -439,7 +422,11 @@ impl<A: HalApi> BufferTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn set_single(&mut self, buffer: &Arc<Buffer<A>>, state: BufferUses) -> SetSingleResult<A> {
pub fn set_single(
&mut self,
buffer: &Arc<Buffer<A>>,
state: BufferUses,
) -> Option<PendingTransition<BufferUses>> {
let index: usize = buffer.as_info().tracker_index().as_usize();
self.allow_index(index);
@ -463,7 +450,7 @@ impl<A: HalApi> BufferTracker<A> {
strict_assert!(self.temp.len() <= 1);
Some((buffer.clone(), self.temp.pop()))
self.temp.pop()
}
/// Sets the given state for all buffers in the given tracker.
@ -657,7 +644,7 @@ unsafe fn insert_or_merge<A: HalApi>(
index: usize,
state_provider: BufferStateProvider<'_>,
metadata_provider: ResourceMetadataProvider<'_, Buffer<A>>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
if !currently_owned {
@ -764,8 +751,8 @@ unsafe fn insert<A: HalApi>(
}
*current_states.get_unchecked_mut(index) = new_end_state;
let resource = metadata_provider.get_own(index);
resource_metadata.insert(index, resource);
let resource = metadata_provider.get(index);
resource_metadata.insert(index, resource.clone());
}
}
@ -776,15 +763,15 @@ unsafe fn merge<A: HalApi>(
index: usize,
state_provider: BufferStateProvider<'_>,
metadata_provider: ResourceMetadataProvider<'_, Buffer<A>>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let current_state = unsafe { current_states.get_unchecked_mut(index) };
let new_state = unsafe { state_provider.get_state(index) };
let merged_state = *current_state | new_state;
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_buffer(
unsafe { metadata_provider.get_own(index).info.id() },
return Err(ResourceUsageCompatibilityError::from_buffer(
unsafe { metadata_provider.get(index) },
*current_state,
new_state,
));

Просмотреть файл

@ -182,21 +182,20 @@ pub(super) enum ResourceMetadataProvider<'a, T: Resource> {
Indirect { metadata: &'a ResourceMetadata<T> },
}
impl<T: Resource> ResourceMetadataProvider<'_, T> {
/// Get the epoch and an owned refcount from this.
/// Get a reference to the resource from this.
///
/// # Safety
///
/// - The index must be in bounds of the metadata tracker if this uses an indirect source.
/// - info must be Some if this uses a Resource source.
#[inline(always)]
pub(super) unsafe fn get_own(self, index: usize) -> Arc<T> {
pub(super) unsafe fn get(&self, index: usize) -> &Arc<T> {
match self {
ResourceMetadataProvider::Direct { resource } => resource.into_owned(),
ResourceMetadataProvider::Direct { resource } => resource,
ResourceMetadataProvider::Indirect { metadata } => {
metadata.tracker_assert_in_bounds(index);
{
let resource = unsafe { metadata.resources.get_unchecked(index) };
unsafe { resource.clone().unwrap_unchecked() }
let resource = unsafe { metadata.resources.get_unchecked(index) }.as_ref();
unsafe { resource.unwrap_unchecked() }
}
}
}

62
third_party/rust/wgpu-core/src/track/mod.rs поставляемый
Просмотреть файл

@ -104,9 +104,9 @@ mod texture;
use crate::{
binding_model, command, conv,
hal_api::HalApi,
id,
lock::{rank, Mutex, RwLock},
pipeline, resource,
pipeline,
resource::{self, Resource, ResourceErrorIdent},
snatch::SnatchGuard,
};
@ -270,7 +270,7 @@ impl PendingTransition<hal::BufferUses> {
buf: &'a resource::Buffer<A>,
snatch_guard: &'a SnatchGuard<'a>,
) -> hal::BufferBarrier<'a, A> {
let buffer = buf.raw.get(snatch_guard).expect("Buffer is destroyed");
let buffer = buf.raw(snatch_guard).expect("Buffer is destroyed");
hal::BufferBarrier {
buffer,
usage: self.usage,
@ -338,34 +338,32 @@ fn skip_barrier<T: ResourceUses>(old_state: T, new_state: T) -> bool {
old_state == new_state && old_state.all_ordered()
}
#[derive(Clone, Debug, Error, Eq, PartialEq)]
pub enum UsageConflict {
#[error("Attempted to use invalid buffer")]
BufferInvalid { id: id::BufferId },
#[error("Attempted to use invalid texture")]
TextureInvalid { id: id::TextureId },
#[error("Attempted to use buffer with {invalid_use}.")]
#[derive(Clone, Debug, Error)]
pub enum ResourceUsageCompatibilityError {
#[error("Attempted to use {res} with {invalid_use}.")]
Buffer {
id: id::BufferId,
res: ResourceErrorIdent,
invalid_use: InvalidUse<hal::BufferUses>,
},
#[error("Attempted to use a texture (mips {mip_levels:?} layers {array_layers:?}) with {invalid_use}.")]
#[error(
"Attempted to use {res} (mips {mip_levels:?} layers {array_layers:?}) with {invalid_use}."
)]
Texture {
id: id::TextureId,
res: ResourceErrorIdent,
mip_levels: ops::Range<u32>,
array_layers: ops::Range<u32>,
invalid_use: InvalidUse<hal::TextureUses>,
},
}
impl UsageConflict {
fn from_buffer(
id: id::BufferId,
impl ResourceUsageCompatibilityError {
fn from_buffer<A: HalApi>(
buffer: &resource::Buffer<A>,
current_state: hal::BufferUses,
new_state: hal::BufferUses,
) -> Self {
Self::Buffer {
id,
res: buffer.error_ident(),
invalid_use: InvalidUse {
current_state,
new_state,
@ -373,14 +371,14 @@ impl UsageConflict {
}
}
fn from_texture(
id: id::TextureId,
fn from_texture<A: HalApi>(
texture: &resource::Texture<A>,
selector: TextureSelector,
current_state: hal::TextureUses,
new_state: hal::TextureUses,
) -> Self {
Self::Texture {
id,
res: texture.error_ident(),
mip_levels: selector.mips,
array_layers: selector.layers,
invalid_use: InvalidUse {
@ -391,23 +389,9 @@ impl UsageConflict {
}
}
impl crate::error::PrettyError for UsageConflict {
impl crate::error::PrettyError for ResourceUsageCompatibilityError {
fn fmt_pretty(&self, fmt: &mut crate::error::ErrorFormatter) {
fmt.error(self);
match *self {
Self::BufferInvalid { id } => {
fmt.buffer_label(&id);
}
Self::TextureInvalid { id } => {
fmt.texture_label(&id);
}
Self::Buffer { id, .. } => {
fmt.buffer_label(&id);
}
Self::Texture { id, .. } => {
fmt.texture_label(&id);
}
}
}
}
@ -525,7 +509,7 @@ impl<A: HalApi> RenderBundleScope<A> {
pub unsafe fn merge_bind_group(
&mut self,
bind_group: &BindGroupStates<A>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
unsafe { self.buffers.write().merge_bind_group(&bind_group.buffers)? };
unsafe {
self.textures
@ -595,7 +579,7 @@ impl<'a, A: HalApi> UsageScope<'a, A> {
pub unsafe fn merge_bind_group(
&mut self,
bind_group: &BindGroupStates<A>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
unsafe {
self.buffers.merge_bind_group(&bind_group.buffers)?;
self.textures.merge_bind_group(&bind_group.textures)?;
@ -616,7 +600,7 @@ impl<'a, A: HalApi> UsageScope<'a, A> {
pub unsafe fn merge_render_bundle(
&mut self,
render_bundle: &RenderBundleScope<A>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
self.buffers
.merge_usage_scope(&*render_bundle.buffers.read())?;
self.textures
@ -707,7 +691,7 @@ impl<A: HalApi> Tracker<A> {
pub unsafe fn add_from_render_bundle(
&mut self,
render_bundle: &RenderBundleScope<A>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
self.bind_groups
.add_from_tracker(&*render_bundle.bind_groups.read());
self.render_pipelines

Просмотреть файл

@ -7,23 +7,18 @@
use std::sync::Arc;
use crate::{
id::Id,
lock::{rank, Mutex},
resource::Resource,
resource_log,
storage::Storage,
track::ResourceMetadata,
};
use super::{ResourceTracker, TrackerIndex};
/// Satisfy clippy.
type Pair<T> = (Id<<T as Resource>::Marker>, Arc<T>);
/// Stores all the resources that a bind group stores.
#[derive(Debug)]
pub(crate) struct StatelessBindGroupState<T: Resource> {
resources: Mutex<Vec<Pair<T>>>,
resources: Mutex<Vec<Arc<T>>>,
}
impl<T: Resource> StatelessBindGroupState<T> {
@ -39,37 +34,25 @@ impl<T: Resource> StatelessBindGroupState<T> {
/// accesses will be in a constant ascending order.
pub(crate) fn optimize(&self) {
let mut resources = self.resources.lock();
resources.sort_unstable_by_key(|&(id, _)| id.unzip().0);
resources.sort_unstable_by_key(|resource| resource.as_info().tracker_index());
}
/// Returns a list of all resources tracked. May contain duplicates.
pub fn used_resources(&self) -> impl Iterator<Item = Arc<T>> + '_ {
let resources = self.resources.lock();
resources
.iter()
.map(|(_, resource)| resource.clone())
.collect::<Vec<_>>()
.into_iter()
resources.iter().cloned().collect::<Vec<_>>().into_iter()
}
/// Returns a list of all resources tracked. May contain duplicates.
pub fn drain_resources(&self) -> impl Iterator<Item = Arc<T>> + '_ {
let mut resources = self.resources.lock();
resources
.drain(..)
.map(|(_, r)| r)
.collect::<Vec<_>>()
.into_iter()
resources.drain(..).collect::<Vec<_>>().into_iter()
}
/// Adds the given resource.
pub fn add_single<'a>(&self, storage: &'a Storage<T>, id: Id<T::Marker>) -> Option<&'a T> {
let resource = storage.get(id).ok()?;
pub fn add_single(&self, resource: &Arc<T>) {
let mut resources = self.resources.lock();
resources.push((id, resource.clone()));
Some(resource)
resources.push(resource.clone());
}
}
@ -94,8 +77,6 @@ impl<T: Resource> ResourceTracker for StatelessTracker<T> {
return false;
}
resource_log!("StatelessTracker::remove_abandoned {index:?}");
self.tracker_assert_in_bounds(index);
unsafe {
@ -104,13 +85,32 @@ impl<T: Resource> ResourceTracker for StatelessTracker<T> {
//RefCount 2 means that resource is hold just by DeviceTracker and this suspected resource itself
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
resource_log!(
"StatelessTracker<{}>::remove_abandoned: removing {}",
T::TYPE,
self.metadata.get_resource_unchecked(index).error_ident()
);
self.metadata.remove(index);
return true;
}
resource_log!(
"StatelessTracker<{}>::remove_abandoned: not removing {}, ref count {}",
T::TYPE,
self.metadata.get_resource_unchecked(index).error_ident(),
existing_ref_count
);
return false;
}
}
resource_log!(
"StatelessTracker<{}>::remove_abandoned: does not contain index {index:?}",
T::TYPE,
);
true
}
}
@ -175,13 +175,7 @@ impl<T: Resource> StatelessTracker<T> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn add_single<'a>(
&mut self,
storage: &'a Storage<T>,
id: Id<T::Marker>,
) -> Option<&'a Arc<T>> {
let resource = storage.get(id).ok()?;
pub fn add_single(&mut self, resource: &Arc<T>) {
let index = resource.as_info().tracker_index().as_usize();
self.allow_index(index);
@ -191,8 +185,6 @@ impl<T: Resource> StatelessTracker<T> {
unsafe {
self.metadata.insert(index, resource.clone());
}
Some(resource)
}
/// Adds the given resources from the given tracker.

Просмотреть файл

@ -26,10 +26,11 @@ use crate::{
hal_api::HalApi,
lock::{rank, Mutex},
resource::{Resource, Texture, TextureInner},
resource_log,
snatch::SnatchGuard,
track::{
invalid_resource_state, skip_barrier, ResourceMetadata, ResourceMetadataProvider,
ResourceUses, UsageConflict,
ResourceUsageCompatibilityError, ResourceUses,
},
};
use hal::TextureUses;
@ -188,19 +189,18 @@ impl<A: HalApi> TextureBindGroupState<A> {
}
/// Adds the given resource with the given state.
pub fn add_single<'a>(
pub fn add_single(
&self,
texture: &'a Arc<Texture<A>>,
texture: &Arc<Texture<A>>,
selector: Option<TextureSelector>,
state: TextureUses,
) -> Option<&'a Arc<Texture<A>>> {
) {
let mut textures = self.textures.lock();
textures.push(TextureBindGroupStateData {
selector,
texture: texture.clone(),
usage: state,
});
Some(texture)
}
}
@ -295,7 +295,10 @@ impl<A: HalApi> TextureUsageScope<A> {
///
/// If the given tracker uses IDs higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> {
pub fn merge_usage_scope(
&mut self,
scope: &Self,
) -> Result<(), ResourceUsageCompatibilityError> {
let incoming_size = scope.set.simple.len();
if incoming_size > self.set.simple.len() {
self.set_size(incoming_size);
@ -339,7 +342,7 @@ impl<A: HalApi> TextureUsageScope<A> {
pub unsafe fn merge_bind_group(
&mut self,
bind_group: &TextureBindGroupState<A>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let textures = bind_group.textures.lock();
for t in &*textures {
unsafe { self.merge_single(&t.texture, t.selector.clone(), t.usage)? };
@ -366,7 +369,7 @@ impl<A: HalApi> TextureUsageScope<A> {
texture: &Arc<Texture<A>>,
selector: Option<TextureSelector>,
new_state: TextureUses,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let index = texture.as_info().tracker_index().as_usize();
self.tracker_assert_in_bounds(index);
@ -424,15 +427,29 @@ impl<A: HalApi> ResourceTracker for TextureTracker<A> {
//RefCount 2 means that resource is hold just by DeviceTracker and this suspected resource itself
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
resource_log!(
"TextureTracker::remove_abandoned: removing {}",
self.metadata.get_resource_unchecked(index).error_ident()
);
self.start_set.complex.remove(&index);
self.end_set.complex.remove(&index);
self.metadata.remove(index);
return true;
}
resource_log!(
"TextureTracker::remove_abandoned: not removing {}, ref count {}",
self.metadata.get_resource_unchecked(index).error_ident(),
existing_ref_count
);
return false;
}
}
resource_log!("TextureTracker::remove_abandoned: does not contain index {index:?}",);
true
}
}
@ -561,7 +578,7 @@ impl<A: HalApi> TextureTracker<A> {
texture: &Arc<Texture<A>>,
selector: TextureSelector,
new_state: TextureUses,
) -> Option<Drain<'_, PendingTransition<TextureUses>>> {
) -> Drain<'_, PendingTransition<TextureUses>> {
let index = texture.as_info().tracker_index().as_usize();
self.allow_index(index);
@ -587,7 +604,7 @@ impl<A: HalApi> TextureTracker<A> {
)
}
Some(self.temp.drain(..))
self.temp.drain(..)
}
/// Sets the given state for all texture in the given tracker.
@ -869,7 +886,7 @@ unsafe fn insert_or_merge<A: HalApi>(
index: usize,
state_provider: TextureStateProvider<'_>,
metadata_provider: ResourceMetadataProvider<'_, Texture<A>>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let currently_owned = unsafe { resource_metadata.contains_unchecked(index) };
if !currently_owned {
@ -1050,8 +1067,8 @@ unsafe fn insert<A: HalApi>(
}
unsafe {
let resource = metadata_provider.get_own(index);
resource_metadata.insert(index, resource);
let resource = metadata_provider.get(index);
resource_metadata.insert(index, resource.clone());
}
}
@ -1062,7 +1079,7 @@ unsafe fn merge<A: HalApi>(
index: usize,
state_provider: TextureStateProvider<'_>,
metadata_provider: ResourceMetadataProvider<'_, Texture<A>>,
) -> Result<(), UsageConflict> {
) -> Result<(), ResourceUsageCompatibilityError> {
let current_simple = unsafe { current_state_set.simple.get_unchecked_mut(index) };
let current_state = if *current_simple == TextureUses::COMPLEX {
SingleOrManyStates::Many(unsafe {
@ -1081,8 +1098,8 @@ unsafe fn merge<A: HalApi>(
log::trace!("\ttex {index}: merge simple {current_simple:?} + {new_simple:?}");
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
unsafe { metadata_provider.get_own(index).info.id() },
return Err(ResourceUsageCompatibilityError::from_texture(
unsafe { metadata_provider.get(index) },
texture_selector.clone(),
*current_simple,
new_simple,
@ -1108,8 +1125,8 @@ unsafe fn merge<A: HalApi>(
log::trace!("\ttex {index}: merge {selector:?} {current_simple:?} + {new_state:?}");
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
unsafe { metadata_provider.get_own(index).info.id() },
return Err(ResourceUsageCompatibilityError::from_texture(
unsafe { metadata_provider.get(index) },
selector,
*current_simple,
new_state,
@ -1149,8 +1166,8 @@ unsafe fn merge<A: HalApi>(
);
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
unsafe { metadata_provider.get_own(index).info.id() },
return Err(ResourceUsageCompatibilityError::from_texture(
unsafe { metadata_provider.get(index) },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),
@ -1190,8 +1207,8 @@ unsafe fn merge<A: HalApi>(
);
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
unsafe { metadata_provider.get_own(index).info.id() },
return Err(ResourceUsageCompatibilityError::from_texture(
unsafe { metadata_provider.get(index) },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),

54
third_party/rust/wgpu-core/src/validation.rs поставляемый
Просмотреть файл

@ -1,8 +1,4 @@
use crate::{
device::bgl,
id::{markers::Buffer, Id},
FastHashMap, FastHashSet,
};
use crate::{device::bgl, FastHashMap, FastHashSet};
use arrayvec::ArrayVec;
use std::{collections::hash_map::Entry, fmt};
use thiserror::Error;
@ -137,54 +133,6 @@ pub struct Interface {
entry_points: FastHashMap<(naga::ShaderStage, String), EntryPoint>,
}
#[derive(Clone, Debug, Error)]
#[error(
"Usage flags {actual:?} for buffer {id:?} do not contain required usage flags {expected:?}"
)]
pub struct MissingBufferUsageError {
pub(crate) id: Id<Buffer>,
pub(crate) actual: wgt::BufferUsages,
pub(crate) expected: wgt::BufferUsages,
}
/// Checks that the given buffer usage contains the required buffer usage,
/// returns an error otherwise.
pub fn check_buffer_usage(
id: Id<Buffer>,
actual: wgt::BufferUsages,
expected: wgt::BufferUsages,
) -> Result<(), MissingBufferUsageError> {
if !actual.contains(expected) {
Err(MissingBufferUsageError {
id,
actual,
expected,
})
} else {
Ok(())
}
}
#[derive(Clone, Debug, Error)]
#[error("Texture usage is {actual:?} which does not contain required usage {expected:?}")]
pub struct MissingTextureUsageError {
pub(crate) actual: wgt::TextureUsages,
pub(crate) expected: wgt::TextureUsages,
}
/// Checks that the given texture usage contains the required texture usage,
/// returns an error otherwise.
pub fn check_texture_usage(
actual: wgt::TextureUsages,
expected: wgt::TextureUsages,
) -> Result<(), MissingTextureUsageError> {
if !actual.contains(expected) {
Err(MissingTextureUsageError { actual, expected })
} else {
Ok(())
}
}
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum BindingError {

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

80
third_party/rust/wgpu-hal/src/dx12/device.rs поставляемый
Просмотреть файл

@ -181,6 +181,7 @@ impl super::Device {
null_rtv_handle,
mem_allocator,
dxc_container,
counters: Default::default(),
})
}
@ -377,6 +378,8 @@ impl crate::Device for super::Device {
unsafe { resource.SetName(cwstr.as_ptr()) };
}
self.counters.buffers.add(1);
Ok(super::Buffer {
resource,
size,
@ -388,11 +391,14 @@ impl crate::Device for super::Device {
// Only happens when it's using the windows_rs feature and there's an allocation
if let Some(alloc) = buffer.allocation.take() {
super::suballocation::free_buffer_allocation(
self,
alloc,
// SAFETY: for allocations to exist, the allocator must exist
unsafe { self.mem_allocator.as_ref().unwrap_unchecked() },
);
}
self.counters.buffers.sub(1);
}
unsafe fn map_buffer(
@ -459,6 +465,8 @@ impl crate::Device for super::Device {
unsafe { resource.SetName(cwstr.as_ptr()) };
}
self.counters.textures.add(1);
Ok(super::Texture {
resource,
format: desc.format,
@ -473,11 +481,14 @@ impl crate::Device for super::Device {
unsafe fn destroy_texture(&self, mut texture: super::Texture) {
if let Some(alloc) = texture.allocation.take() {
super::suballocation::free_texture_allocation(
self,
alloc,
// SAFETY: for allocations to exist, the allocator must exist
unsafe { self.mem_allocator.as_ref().unwrap_unchecked() },
);
}
self.counters.textures.sub(1);
}
unsafe fn create_texture_view(
@ -487,6 +498,8 @@ impl crate::Device for super::Device {
) -> Result<super::TextureView, DeviceError> {
let view_desc = desc.to_internal(texture);
self.counters.texture_views.add(1);
Ok(super::TextureView {
raw_format: view_desc.rtv_dsv_format,
aspects: view_desc.aspects,
@ -583,6 +596,7 @@ impl crate::Device for super::Device {
},
})
}
unsafe fn destroy_texture_view(&self, view: super::TextureView) {
if view.handle_srv.is_some() || view.handle_uav.is_some() {
let mut pool = self.srv_uav_pool.lock();
@ -605,6 +619,8 @@ impl crate::Device for super::Device {
pool.free_handle(handle);
}
}
self.counters.texture_views.sub(1);
}
unsafe fn create_sampler(
@ -643,10 +659,14 @@ impl crate::Device for super::Device {
desc.lod_clamp.clone(),
);
self.counters.samplers.add(1);
Ok(super::Sampler { handle })
}
unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
self.sampler_pool.lock().free_handle(sampler.handle);
self.counters.samplers.sub(1);
}
unsafe fn create_command_encoder(
@ -663,6 +683,8 @@ impl crate::Device for super::Device {
unsafe { allocator.SetName(cwstr.as_ptr()) };
}
self.counters.command_encoders.add(1);
Ok(super::CommandEncoder {
allocator,
device: self.raw.clone(),
@ -675,7 +697,10 @@ impl crate::Device for super::Device {
end_of_pass_timer_query: None,
})
}
unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {
self.counters.command_encoders.sub(1);
}
unsafe fn create_bind_group_layout(
&self,
@ -698,6 +723,8 @@ impl crate::Device for super::Device {
}
}
self.counters.bind_group_layouts.add(1);
let num_views = num_buffer_views + num_texture_views;
Ok(super::BindGroupLayout {
entries: desc.entries.to_vec(),
@ -724,7 +751,10 @@ impl crate::Device for super::Device {
copy_counts: vec![1; num_views.max(num_samplers) as usize],
})
}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
self.counters.bind_group_layouts.sub(1);
}
unsafe fn create_pipeline_layout(
&self,
@ -1063,6 +1093,8 @@ impl crate::Device for super::Device {
unsafe { raw.SetName(cwstr.as_ptr()) };
}
self.counters.pipeline_layouts.add(1);
Ok(super::PipelineLayout {
shared: super::PipelineLayoutShared {
signature: raw,
@ -1081,7 +1113,10 @@ impl crate::Device for super::Device {
},
})
}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
self.counters.pipeline_layouts.sub(1);
}
unsafe fn create_bind_group(
&self,
@ -1253,12 +1288,15 @@ impl crate::Device for super::Device {
None => None,
};
self.counters.bind_groups.add(1);
Ok(super::BindGroup {
handle_views,
handle_samplers,
dynamic_buffers,
})
}
unsafe fn destroy_bind_group(&self, group: super::BindGroup) {
if let Some(dual) = group.handle_views {
self.shared.heap_views.free_slice(dual);
@ -1266,6 +1304,8 @@ impl crate::Device for super::Device {
if let Some(dual) = group.handle_samplers {
self.shared.heap_samplers.free_slice(dual);
}
self.counters.bind_groups.sub(1);
}
unsafe fn create_shader_module(
@ -1273,6 +1313,8 @@ impl crate::Device for super::Device {
desc: &crate::ShaderModuleDescriptor,
shader: crate::ShaderInput,
) -> Result<super::ShaderModule, crate::ShaderError> {
self.counters.shader_modules.add(1);
let raw_name = desc.label.and_then(|label| ffi::CString::new(label).ok());
match shader {
crate::ShaderInput::Naga(naga) => Ok(super::ShaderModule { naga, raw_name }),
@ -1282,6 +1324,7 @@ impl crate::Device for super::Device {
}
}
unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
self.counters.shader_modules.sub(1);
// just drop
}
@ -1463,6 +1506,8 @@ impl crate::Device for super::Device {
unsafe { raw.SetName(cwstr.as_ptr()) };
}
self.counters.render_pipelines.add(1);
Ok(super::RenderPipeline {
raw,
layout: desc.layout.shared.clone(),
@ -1470,7 +1515,9 @@ impl crate::Device for super::Device {
vertex_strides,
})
}
unsafe fn destroy_render_pipeline(&self, _pipeline: super::RenderPipeline) {}
unsafe fn destroy_render_pipeline(&self, _pipeline: super::RenderPipeline) {
self.counters.render_pipelines.sub(1);
}
unsafe fn create_compute_pipeline(
&self,
@ -1502,12 +1549,17 @@ impl crate::Device for super::Device {
unsafe { raw.SetName(cwstr.as_ptr()) };
}
self.counters.compute_pipelines.add(1);
Ok(super::ComputePipeline {
raw,
layout: desc.layout.shared.clone(),
})
}
unsafe fn destroy_compute_pipeline(&self, _pipeline: super::ComputePipeline) {}
unsafe fn destroy_compute_pipeline(&self, _pipeline: super::ComputePipeline) {
self.counters.compute_pipelines.sub(1);
}
unsafe fn create_pipeline_cache(
&self,
@ -1548,9 +1600,14 @@ impl crate::Device for super::Device {
unsafe { raw.SetName(cwstr.as_ptr()) };
}
self.counters.query_sets.add(1);
Ok(super::QuerySet { raw, raw_ty })
}
unsafe fn destroy_query_set(&self, _set: super::QuerySet) {}
unsafe fn destroy_query_set(&self, _set: super::QuerySet) {
self.counters.query_sets.sub(1);
}
unsafe fn create_fence(&self) -> Result<super::Fence, DeviceError> {
let mut raw = d3d12::Fence::null();
@ -1565,9 +1622,14 @@ impl crate::Device for super::Device {
hr.into_device_result("Fence creation")?;
null_comptr_check(&raw)?;
self.counters.fences.add(1);
Ok(super::Fence { raw })
}
unsafe fn destroy_fence(&self, _fence: super::Fence) {}
unsafe fn destroy_fence(&self, _fence: super::Fence) {
self.counters.fences.sub(1);
}
unsafe fn get_fence_value(
&self,
fence: &super::Fence,
@ -1711,4 +1773,8 @@ impl crate::Device for super::Device {
// Destroy a D3D12 resource as per-usual.
todo!()
}
fn get_internal_counters(&self) -> wgt::HalCounters {
self.counters.clone()
}
}

7
third_party/rust/wgpu-hal/src/dx12/mod.rs поставляемый
Просмотреть файл

@ -261,6 +261,7 @@ pub struct Device {
null_rtv_handle: descriptor::Handle,
mem_allocator: Option<Mutex<suballocation::GpuAllocatorWrapper>>,
dxc_container: Option<Arc<shader_compilation::DxcContainer>>,
counters: wgt::HalCounters,
}
unsafe impl Send for Device {}
@ -761,8 +762,8 @@ impl crate::Surface for Surface {
};
match &self.target {
&SurfaceTarget::WndHandle(_) | &SurfaceTarget::SurfaceHandle(_) => {}
&SurfaceTarget::Visual(ref visual) => {
SurfaceTarget::WndHandle(_) | &SurfaceTarget::SurfaceHandle(_) => {}
SurfaceTarget::Visual(visual) => {
if let Err(err) =
unsafe { visual.SetContent(swap_chain1.as_unknown()) }.into_result()
{
@ -772,7 +773,7 @@ impl crate::Surface for Surface {
));
}
}
&SurfaceTarget::SwapChainPanel(ref swap_chain_panel) => {
SurfaceTarget::SwapChainPanel(swap_chain_panel) => {
if let Err(err) =
unsafe { swap_chain_panel.SetSwapChain(swap_chain1.as_ptr()) }
.into_result()

Просмотреть файл

@ -118,6 +118,11 @@ mod placed {
null_comptr_check(resource)?;
device
.counters
.buffer_memory
.add(allocation.size() as isize);
Ok((hr, Some(AllocationWrapper { allocation })))
}
@ -167,13 +172,23 @@ mod placed {
null_comptr_check(resource)?;
device
.counters
.texture_memory
.add(allocation.size() as isize);
Ok((hr, Some(AllocationWrapper { allocation })))
}
pub(crate) fn free_buffer_allocation(
device: &crate::dx12::Device,
allocation: AllocationWrapper,
allocator: &Mutex<GpuAllocatorWrapper>,
) {
device
.counters
.buffer_memory
.sub(allocation.allocation.size() as isize);
match allocator.lock().allocator.free(allocation.allocation) {
Ok(_) => (),
// TODO: Don't panic here
@ -182,9 +197,14 @@ mod placed {
}
pub(crate) fn free_texture_allocation(
device: &crate::dx12::Device,
allocation: AllocationWrapper,
allocator: &Mutex<GpuAllocatorWrapper>,
) {
device
.counters
.texture_memory
.sub(allocation.allocation.size() as isize);
match allocator.lock().allocator.free(allocation.allocation) {
Ok(_) => (),
// TODO: Don't panic here
@ -352,6 +372,7 @@ mod committed {
#[allow(unused)]
pub(crate) fn free_buffer_allocation(
_device: &crate::dx12::Device,
_allocation: AllocationWrapper,
_allocator: &Mutex<GpuAllocatorWrapper>,
) {
@ -360,6 +381,7 @@ mod committed {
#[allow(unused)]
pub(crate) fn free_texture_allocation(
_device: &crate::dx12::Device,
_allocation: AllocationWrapper,
_allocator: &Mutex<GpuAllocatorWrapper>,
) {

4
third_party/rust/wgpu-hal/src/empty.rs поставляемый
Просмотреть файл

@ -276,6 +276,10 @@ impl crate::Device for Context {
Default::default()
}
unsafe fn destroy_acceleration_structure(&self, _acceleration_structure: Resource) {}
fn get_internal_counters(&self) -> wgt::HalCounters {
Default::default()
}
}
impl crate::CommandEncoder for Encoder {

Просмотреть файл

@ -967,6 +967,7 @@ impl crate::Adapter for super::Adapter {
main_vao,
#[cfg(all(native, feature = "renderdoc"))]
render_doc: Default::default(),
counters: Default::default(),
},
queue: super::Queue {
shared: Arc::clone(&self.shared),

85
third_party/rust/wgpu-hal/src/gles/device.rs поставляемый
Просмотреть файл

@ -632,6 +632,8 @@ impl crate::Device for super::Device {
None
};
self.counters.buffers.add(1);
Ok(super::Buffer {
raw,
target,
@ -640,11 +642,14 @@ impl crate::Device for super::Device {
data,
})
}
unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
if let Some(raw) = buffer.raw {
let gl = &self.shared.context.lock();
unsafe { gl.delete_buffer(raw) };
}
self.counters.buffers.sub(1);
}
unsafe fn map_buffer(
@ -941,6 +946,8 @@ impl crate::Device for super::Device {
super::TextureInner::Texture { raw, target }
};
self.counters.textures.add(1);
Ok(super::Texture {
inner,
drop_guard: None,
@ -951,6 +958,7 @@ impl crate::Device for super::Device {
copy_size: desc.copy_extent(),
})
}
unsafe fn destroy_texture(&self, texture: super::Texture) {
if texture.drop_guard.is_none() {
let gl = &self.shared.context.lock();
@ -970,6 +978,8 @@ impl crate::Device for super::Device {
// For clarity, we explicitly drop the drop guard. Although this has no real semantic effect as the
// end of the scope will drop the drop guard since this function takes ownership of the texture.
drop(texture.drop_guard);
self.counters.textures.sub(1);
}
unsafe fn create_texture_view(
@ -977,6 +987,7 @@ impl crate::Device for super::Device {
texture: &super::Texture,
desc: &crate::TextureViewDescriptor,
) -> Result<super::TextureView, crate::DeviceError> {
self.counters.texture_views.add(1);
Ok(super::TextureView {
//TODO: use `conv::map_view_dimension(desc.dimension)`?
inner: texture.inner.clone(),
@ -986,7 +997,10 @@ impl crate::Device for super::Device {
format: texture.format,
})
}
unsafe fn destroy_texture_view(&self, _view: super::TextureView) {}
unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
self.counters.texture_views.sub(1);
}
unsafe fn create_sampler(
&self,
@ -1080,34 +1094,47 @@ impl crate::Device for super::Device {
}
}
self.counters.samplers.add(1);
Ok(super::Sampler { raw })
}
unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
let gl = &self.shared.context.lock();
unsafe { gl.delete_sampler(sampler.raw) };
self.counters.samplers.sub(1);
}
unsafe fn create_command_encoder(
&self,
_desc: &crate::CommandEncoderDescriptor<super::Api>,
) -> Result<super::CommandEncoder, crate::DeviceError> {
self.counters.command_encoders.add(1);
Ok(super::CommandEncoder {
cmd_buffer: super::CommandBuffer::default(),
state: Default::default(),
private_caps: self.shared.private_caps,
})
}
unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {
self.counters.command_encoders.sub(1);
}
unsafe fn create_bind_group_layout(
&self,
desc: &crate::BindGroupLayoutDescriptor,
) -> Result<super::BindGroupLayout, crate::DeviceError> {
self.counters.bind_group_layouts.add(1);
Ok(super::BindGroupLayout {
entries: Arc::from(desc.entries),
})
}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
self.counters.bind_group_layouts.sub(1);
}
unsafe fn create_pipeline_layout(
&self,
@ -1184,6 +1211,8 @@ impl crate::Device for super::Device {
});
}
self.counters.pipeline_layouts.add(1);
Ok(super::PipelineLayout {
group_infos: group_infos.into_boxed_slice(),
naga_options: glsl::Options {
@ -1194,7 +1223,10 @@ impl crate::Device for super::Device {
},
})
}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
self.counters.pipeline_layouts.sub(1);
}
unsafe fn create_bind_group(
&self,
@ -1270,17 +1302,24 @@ impl crate::Device for super::Device {
contents.push(binding);
}
self.counters.bind_groups.add(1);
Ok(super::BindGroup {
contents: contents.into_boxed_slice(),
})
}
unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {}
unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
self.counters.bind_groups.sub(1);
}
unsafe fn create_shader_module(
&self,
desc: &crate::ShaderModuleDescriptor,
shader: crate::ShaderInput,
) -> Result<super::ShaderModule, crate::ShaderError> {
self.counters.shader_modules.add(1);
Ok(super::ShaderModule {
naga: match shader {
crate::ShaderInput::SpirV(_) => {
@ -1292,7 +1331,10 @@ impl crate::Device for super::Device {
id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
})
}
unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {}
unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
self.counters.shader_modules.sub(1);
}
unsafe fn create_render_pipeline(
&self,
@ -1341,6 +1383,8 @@ impl crate::Device for super::Device {
targets.into_boxed_slice()
};
self.counters.render_pipelines.add(1);
Ok(super::RenderPipeline {
inner,
primitive: desc.primitive,
@ -1363,20 +1407,23 @@ impl crate::Device for super::Device {
alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
})
}
unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
let mut program_cache = self.shared.program_cache.lock();
// If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache`
// This is safe to assume as long as:
// - `RenderPipeline` can't be cloned
// - The only place that we can get a new reference is during `program_cache.lock()`
if Arc::strong_count(&pipeline.inner) == 2 {
let gl = &self.shared.context.lock();
let mut program_cache = self.shared.program_cache.lock();
program_cache.retain(|_, v| match *v {
Ok(ref p) => p.program != pipeline.inner.program,
Err(_) => false,
});
let gl = &self.shared.context.lock();
unsafe { gl.delete_program(pipeline.inner.program) };
}
self.counters.render_pipelines.sub(1);
}
unsafe fn create_compute_pipeline(
@ -1388,22 +1435,27 @@ impl crate::Device for super::Device {
shaders.push((naga::ShaderStage::Compute, &desc.stage));
let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
self.counters.compute_pipelines.add(1);
Ok(super::ComputePipeline { inner })
}
unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
let mut program_cache = self.shared.program_cache.lock();
// If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache``
// This is safe to assume as long as:
// - `ComputePipeline` can't be cloned
// - The only place that we can get a new reference is during `program_cache.lock()`
if Arc::strong_count(&pipeline.inner) == 2 {
let gl = &self.shared.context.lock();
let mut program_cache = self.shared.program_cache.lock();
program_cache.retain(|_, v| match *v {
Ok(ref p) => p.program != pipeline.inner.program,
Err(_) => false,
});
let gl = &self.shared.context.lock();
unsafe { gl.delete_program(pipeline.inner.program) };
}
self.counters.compute_pipelines.sub(1);
}
unsafe fn create_pipeline_cache(
@ -1437,6 +1489,8 @@ impl crate::Device for super::Device {
queries.push(query);
}
self.counters.query_sets.add(1);
Ok(super::QuerySet {
queries: queries.into_boxed_slice(),
target: match desc.ty {
@ -1446,24 +1500,31 @@ impl crate::Device for super::Device {
},
})
}
unsafe fn destroy_query_set(&self, set: super::QuerySet) {
let gl = &self.shared.context.lock();
for &query in set.queries.iter() {
unsafe { gl.delete_query(query) };
}
self.counters.query_sets.sub(1);
}
unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
self.counters.fences.add(1);
Ok(super::Fence {
last_completed: 0,
pending: Vec::new(),
})
}
unsafe fn destroy_fence(&self, fence: super::Fence) {
let gl = &self.shared.context.lock();
for (_, sync) in fence.pending {
unsafe { gl.delete_sync(sync) };
}
self.counters.fences.sub(1);
}
unsafe fn get_fence_value(
&self,
fence: &super::Fence,
@ -1542,6 +1603,10 @@ impl crate::Device for super::Device {
unimplemented!()
}
unsafe fn destroy_acceleration_structure(&self, _acceleration_structure: ()) {}
fn get_internal_counters(&self) -> wgt::HalCounters {
self.counters.clone()
}
}
#[cfg(send_sync)]

1
third_party/rust/wgpu-hal/src/gles/mod.rs поставляемый
Просмотреть файл

@ -268,6 +268,7 @@ pub struct Device {
main_vao: glow::VertexArray,
#[cfg(all(native, feature = "renderdoc"))]
render_doc: crate::auxil::renderdoc::RenderDoc,
counters: wgt::HalCounters,
}
pub struct ShaderClearProgram {

10
third_party/rust/wgpu-hal/src/lib.rs поставляемый
Просмотреть файл

@ -227,17 +227,15 @@
clippy::non_send_fields_in_send_ty,
// TODO!
clippy::missing_safety_doc,
// Clashes with clippy::pattern_type_mismatch
clippy::needless_borrowed_reference,
// It gets in the way a lot and does not prevent bugs in practice.
clippy::pattern_type_mismatch,
)]
#![warn(
trivial_casts,
trivial_numeric_casts,
unsafe_op_in_unsafe_fn,
unused_extern_crates,
unused_qualifications,
// We don't match on a reference, unless required.
clippy::pattern_type_mismatch,
unused_qualifications
)]
/// DirectX12 API internals.
@ -884,6 +882,8 @@ pub trait Device: WasmNotSendSync {
&self,
acceleration_structure: <Self::A as Api>::AccelerationStructure,
);
fn get_internal_counters(&self) -> wgt::HalCounters;
}
pub trait Queue: WasmNotSendSync {

Просмотреть файл

@ -62,6 +62,7 @@ impl crate::Adapter for super::Adapter {
device: super::Device {
shared: Arc::clone(&self.shared),
features,
counters: Default::default(),
},
queue: super::Queue {
raw: Arc::new(Mutex::new(queue)),

92
third_party/rust/wgpu-hal/src/metal/device.rs поставляемый
Просмотреть файл

@ -305,6 +305,7 @@ impl super::Device {
super::Device {
shared: Arc::new(super::AdapterShared::new(raw)),
features,
counters: Default::default(),
}
}
@ -345,13 +346,16 @@ impl crate::Device for super::Device {
if let Some(label) = desc.label {
raw.set_label(label);
}
self.counters.buffers.add(1);
Ok(super::Buffer {
raw,
size: desc.size,
})
})
}
unsafe fn destroy_buffer(&self, _buffer: super::Buffer) {}
unsafe fn destroy_buffer(&self, _buffer: super::Buffer) {
self.counters.buffers.sub(1);
}
unsafe fn map_buffer(
&self,
@ -418,6 +422,8 @@ impl crate::Device for super::Device {
raw.set_label(label);
}
self.counters.textures.add(1);
Ok(super::Texture {
raw,
format: desc.format,
@ -429,7 +435,9 @@ impl crate::Device for super::Device {
})
}
unsafe fn destroy_texture(&self, _texture: super::Texture) {}
unsafe fn destroy_texture(&self, _texture: super::Texture) {
self.counters.textures.sub(1);
}
unsafe fn create_texture_view(
&self,
@ -489,9 +497,14 @@ impl crate::Device for super::Device {
})
};
self.counters.texture_views.add(1);
Ok(super::TextureView { raw, aspects })
}
unsafe fn destroy_texture_view(&self, _view: super::TextureView) {}
unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
self.counters.texture_views.sub(1);
}
unsafe fn create_sampler(
&self,
@ -548,15 +561,20 @@ impl crate::Device for super::Device {
}
let raw = self.shared.device.lock().new_sampler(&descriptor);
self.counters.samplers.add(1);
Ok(super::Sampler { raw })
})
}
unsafe fn destroy_sampler(&self, _sampler: super::Sampler) {}
unsafe fn destroy_sampler(&self, _sampler: super::Sampler) {
self.counters.samplers.sub(1);
}
unsafe fn create_command_encoder(
&self,
desc: &crate::CommandEncoderDescriptor<super::Api>,
) -> Result<super::CommandEncoder, crate::DeviceError> {
self.counters.command_encoders.add(1);
Ok(super::CommandEncoder {
shared: Arc::clone(&self.shared),
raw_queue: Arc::clone(&desc.queue.raw),
@ -565,17 +583,25 @@ impl crate::Device for super::Device {
temp: super::Temp::default(),
})
}
unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {
self.counters.command_encoders.sub(1);
}
unsafe fn create_bind_group_layout(
&self,
desc: &crate::BindGroupLayoutDescriptor,
) -> DeviceResult<super::BindGroupLayout> {
self.counters.bind_group_layouts.add(1);
Ok(super::BindGroupLayout {
entries: Arc::from(desc.entries),
})
}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {}
unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
self.counters.bind_group_layouts.sub(1);
}
unsafe fn create_pipeline_layout(
&self,
@ -736,6 +762,8 @@ impl crate::Device for super::Device {
resources: info.resources,
});
self.counters.pipeline_layouts.add(1);
Ok(super::PipelineLayout {
bind_group_infos,
push_constants_infos,
@ -744,7 +772,10 @@ impl crate::Device for super::Device {
per_stage_map,
})
}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {}
unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
self.counters.pipeline_layouts.sub(1);
}
unsafe fn create_bind_group(
&self,
@ -831,16 +862,22 @@ impl crate::Device for super::Device {
}
}
self.counters.bind_groups.add(1);
Ok(bg)
}
unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {}
unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
self.counters.bind_groups.sub(1);
}
unsafe fn create_shader_module(
&self,
desc: &crate::ShaderModuleDescriptor,
shader: crate::ShaderInput,
) -> Result<super::ShaderModule, crate::ShaderError> {
self.counters.shader_modules.add(1);
match shader {
crate::ShaderInput::Naga(naga) => Ok(super::ShaderModule {
naga,
@ -851,7 +888,10 @@ impl crate::Device for super::Device {
}
}
}
unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {}
unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
self.counters.shader_modules.sub(1);
}
unsafe fn create_render_pipeline(
&self,
@ -1094,6 +1134,8 @@ impl crate::Device for super::Device {
)
})?;
self.counters.render_pipelines.add(1);
Ok(super::RenderPipeline {
raw,
vs_lib,
@ -1117,7 +1159,10 @@ impl crate::Device for super::Device {
})
})
}
unsafe fn destroy_render_pipeline(&self, _pipeline: super::RenderPipeline) {}
unsafe fn destroy_render_pipeline(&self, _pipeline: super::RenderPipeline) {
self.counters.render_pipelines.sub(1);
}
unsafe fn create_compute_pipeline(
&self,
@ -1165,6 +1210,8 @@ impl crate::Device for super::Device {
)
})?;
self.counters.compute_pipelines.add(1);
Ok(super::ComputePipeline {
raw,
cs_info,
@ -1174,7 +1221,10 @@ impl crate::Device for super::Device {
})
})
}
unsafe fn destroy_compute_pipeline(&self, _pipeline: super::ComputePipeline) {}
unsafe fn destroy_compute_pipeline(&self, _pipeline: super::ComputePipeline) {
self.counters.compute_pipelines.sub(1);
}
unsafe fn create_pipeline_cache(
&self,
@ -1237,6 +1287,8 @@ impl crate::Device for super::Device {
}
};
self.counters.query_sets.add(1);
Ok(super::QuerySet {
raw_buffer: destination_buffer,
counter_sample_buffer: Some(counter_sample_buffer),
@ -1249,15 +1301,23 @@ impl crate::Device for super::Device {
}
})
}
unsafe fn destroy_query_set(&self, _set: super::QuerySet) {}
unsafe fn destroy_query_set(&self, _set: super::QuerySet) {
self.counters.query_sets.add(1);
}
unsafe fn create_fence(&self) -> DeviceResult<super::Fence> {
self.counters.fences.add(1);
Ok(super::Fence {
completed_value: Arc::new(atomic::AtomicU64::new(0)),
pending_command_buffers: Vec::new(),
})
}
unsafe fn destroy_fence(&self, _fence: super::Fence) {}
unsafe fn destroy_fence(&self, _fence: super::Fence) {
self.counters.fences.sub(1);
}
unsafe fn get_fence_value(&self, fence: &super::Fence) -> DeviceResult<crate::FenceValue> {
let mut max_value = fence.completed_value.load(atomic::Ordering::Acquire);
for &(value, ref cmd_buf) in fence.pending_command_buffers.iter() {
@ -1282,7 +1342,7 @@ impl crate::Device for super::Device {
.iter()
.find(|&&(value, _)| value >= wait_value)
{
Some(&(_, ref cmd_buf)) => cmd_buf,
Some((_, cmd_buf)) => cmd_buf,
None => {
log::error!("No active command buffers for fence value {}", wait_value);
return Err(crate::DeviceError::Lost);
@ -1348,4 +1408,8 @@ impl crate::Device for super::Device {
) {
unimplemented!()
}
fn get_internal_counters(&self) -> wgt::HalCounters {
self.counters.clone()
}
}

1
third_party/rust/wgpu-hal/src/metal/mod.rs поставляемый
Просмотреть файл

@ -339,6 +339,7 @@ impl Queue {
pub struct Device {
shared: Arc<AdapterShared>,
features: wgt::Features,
counters: wgt::HalCounters,
}
pub struct Surface {

Просмотреть файл

@ -1819,6 +1819,7 @@ impl super::Adapter {
workarounds: self.workarounds,
render_passes: Mutex::new(Default::default()),
framebuffers: Mutex::new(Default::default()),
memory_allocations_counter: Default::default(),
});
let relay_semaphores = super::RelaySemaphores::new(&shared)?;
@ -1881,6 +1882,7 @@ impl super::Adapter {
naga_options,
#[cfg(feature = "renderdoc")]
render_doc: Default::default(),
counters: Default::default(),
};
Ok(crate::OpenDevice { device, queue })

Просмотреть файл

@ -312,7 +312,10 @@ impl gpu_alloc::MemoryDevice<vk::DeviceMemory> for super::DeviceShared {
}
match unsafe { self.raw.allocate_memory(&info, None) } {
Ok(memory) => Ok(memory),
Ok(memory) => {
self.memory_allocations_counter.add(1);
Ok(memory)
}
Err(vk::Result::ERROR_OUT_OF_DEVICE_MEMORY) => {
Err(gpu_alloc::OutOfMemory::OutOfDeviceMemory)
}
@ -325,6 +328,8 @@ impl gpu_alloc::MemoryDevice<vk::DeviceMemory> for super::DeviceShared {
}
unsafe fn deallocate_memory(&self, memory: vk::DeviceMemory) {
self.memory_allocations_counter.sub(1);
unsafe { self.raw.free_memory(memory, None) };
}
@ -910,6 +915,9 @@ impl crate::Device for super::Device {
unsafe { self.shared.set_object_name(raw, label) };
}
self.counters.buffer_memory.add(block.size() as isize);
self.counters.buffers.add(1);
Ok(super::Buffer {
raw,
block: Some(Mutex::new(block)),
@ -918,12 +926,12 @@ impl crate::Device for super::Device {
unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
unsafe { self.shared.raw.destroy_buffer(buffer.raw, None) };
if let Some(block) = buffer.block {
unsafe {
self.mem_allocator
.lock()
.dealloc(&*self.shared, block.into_inner())
};
let block = block.into_inner();
self.counters.buffer_memory.sub(block.size() as isize);
unsafe { self.mem_allocator.lock().dealloc(&*self.shared, block) };
}
self.counters.buffers.sub(1);
}
unsafe fn map_buffer(
@ -1049,6 +1057,8 @@ impl crate::Device for super::Device {
)?
};
self.counters.texture_memory.add(block.size() as isize);
unsafe {
self.shared
.raw
@ -1059,6 +1069,8 @@ impl crate::Device for super::Device {
unsafe { self.shared.set_object_name(raw, label) };
}
self.counters.textures.add(1);
Ok(super::Texture {
raw,
drop_guard: None,
@ -1075,8 +1087,12 @@ impl crate::Device for super::Device {
unsafe { self.shared.raw.destroy_image(texture.raw, None) };
}
if let Some(block) = texture.block {
self.counters.texture_memory.sub(block.size() as isize);
unsafe { self.mem_allocator.lock().dealloc(&*self.shared, block) };
}
self.counters.textures.sub(1);
}
unsafe fn create_texture_view(
@ -1126,6 +1142,8 @@ impl crate::Device for super::Device {
.collect(),
};
self.counters.texture_views.add(1);
Ok(super::TextureView {
raw,
layers,
@ -1143,6 +1161,8 @@ impl crate::Device for super::Device {
fbuf_lock.retain(|key, _| !key.attachments.iter().any(|at| at.raw == view.raw));
}
unsafe { self.shared.raw.destroy_image_view(view.raw, None) };
self.counters.texture_views.sub(1);
}
unsafe fn create_sampler(
@ -1184,10 +1204,14 @@ impl crate::Device for super::Device {
unsafe { self.shared.set_object_name(raw, label) };
}
self.counters.samplers.add(1);
Ok(super::Sampler { raw })
}
unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
unsafe { self.shared.raw.destroy_sampler(sampler.raw, None) };
self.counters.samplers.sub(1);
}
unsafe fn create_command_encoder(
@ -1199,6 +1223,8 @@ impl crate::Device for super::Device {
.flags(vk::CommandPoolCreateFlags::TRANSIENT);
let raw = unsafe { self.shared.raw.create_command_pool(&vk_info, None)? };
self.counters.command_encoders.add(1);
Ok(super::CommandEncoder {
raw,
device: Arc::clone(&self.shared),
@ -1219,6 +1245,8 @@ impl crate::Device for super::Device {
// fields.
self.shared.raw.destroy_command_pool(cmd_encoder.raw, None);
}
self.counters.command_encoders.sub(1);
}
unsafe fn create_bind_group_layout(
@ -1339,6 +1367,8 @@ impl crate::Device for super::Device {
unsafe { self.shared.set_object_name(raw, label) };
}
self.counters.bind_group_layouts.add(1);
Ok(super::BindGroupLayout {
raw,
desc_count,
@ -1352,6 +1382,8 @@ impl crate::Device for super::Device {
.raw
.destroy_descriptor_set_layout(bg_layout.raw, None)
};
self.counters.bind_group_layouts.sub(1);
}
unsafe fn create_pipeline_layout(
@ -1403,6 +1435,8 @@ impl crate::Device for super::Device {
}
}
self.counters.pipeline_layouts.add(1);
Ok(super::PipelineLayout {
raw,
binding_arrays,
@ -1414,6 +1448,8 @@ impl crate::Device for super::Device {
.raw
.destroy_pipeline_layout(pipeline_layout.raw, None)
};
self.counters.pipeline_layouts.sub(1);
}
unsafe fn create_bind_group(
@ -1596,14 +1632,20 @@ impl crate::Device for super::Device {
}
unsafe { self.shared.raw.update_descriptor_sets(&writes, &[]) };
self.counters.bind_groups.add(1);
Ok(super::BindGroup { set })
}
unsafe fn destroy_bind_group(&self, group: super::BindGroup) {
unsafe {
self.desc_allocator
.lock()
.free(&*self.shared, Some(group.set))
};
self.counters.bind_groups.sub(1);
}
unsafe fn create_shader_module(
@ -1661,8 +1703,11 @@ impl crate::Device for super::Device {
unsafe { self.shared.set_object_name(raw, label) };
}
self.counters.shader_modules.add(1);
Ok(super::ShaderModule::Raw(raw))
}
unsafe fn destroy_shader_module(&self, module: super::ShaderModule) {
match module {
super::ShaderModule::Raw(raw) => {
@ -1670,6 +1715,8 @@ impl crate::Device for super::Device {
}
super::ShaderModule::Intermediate { .. } => {}
}
self.counters.shader_modules.sub(1);
}
unsafe fn create_render_pipeline(
@ -1900,10 +1947,14 @@ impl crate::Device for super::Device {
unsafe { self.shared.raw.destroy_shader_module(raw_module, None) };
}
self.counters.render_pipelines.add(1);
Ok(super::RenderPipeline { raw })
}
unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) };
self.counters.render_pipelines.sub(1);
}
unsafe fn create_compute_pipeline(
@ -1946,10 +1997,15 @@ impl crate::Device for super::Device {
unsafe { self.shared.raw.destroy_shader_module(raw_module, None) };
}
self.counters.compute_pipelines.add(1);
Ok(super::ComputePipeline { raw })
}
unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
unsafe { self.shared.raw.destroy_pipeline(pipeline.raw, None) };
self.counters.compute_pipelines.sub(1);
}
unsafe fn create_pipeline_cache(
@ -2001,18 +2057,26 @@ impl crate::Device for super::Device {
unsafe { self.shared.set_object_name(raw, label) };
}
self.counters.query_sets.add(1);
Ok(super::QuerySet { raw })
}
unsafe fn destroy_query_set(&self, set: super::QuerySet) {
unsafe { self.shared.raw.destroy_query_pool(set.raw, None) };
self.counters.query_sets.sub(1);
}
unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
self.counters.fences.add(1);
Ok(if self.shared.private_caps.timeline_semaphores {
let mut sem_type_info =
vk::SemaphoreTypeCreateInfo::default().semaphore_type(vk::SemaphoreType::TIMELINE);
let vk_info = vk::SemaphoreCreateInfo::default().push_next(&mut sem_type_info);
let raw = unsafe { self.shared.raw.create_semaphore(&vk_info, None) }?;
super::Fence::TimelineSemaphore(raw)
} else {
super::Fence::FencePool {
@ -2040,6 +2104,8 @@ impl crate::Device for super::Device {
}
}
}
self.counters.fences.sub(1);
}
unsafe fn get_fence_value(
&self,
@ -2320,6 +2386,14 @@ impl crate::Device for super::Device {
.dealloc(&*self.shared, acceleration_structure.block.into_inner());
}
}
fn get_internal_counters(&self) -> wgt::HalCounters {
self.counters
.memory_allocations
.set(self.shared.memory_allocations_counter.read());
self.counters.clone()
}
}
impl super::DeviceShared {

3
third_party/rust/wgpu-hal/src/vulkan/mod.rs поставляемый
Просмотреть файл

@ -43,6 +43,7 @@ use std::{
use arrayvec::ArrayVec;
use ash::{ext, khr, vk};
use parking_lot::{Mutex, RwLock};
use wgt::InternalCounter;
const MILLIS_TO_NANOS: u64 = 1_000_000;
const MAX_TOTAL_ATTACHMENTS: usize = crate::MAX_COLOR_ATTACHMENTS * 2 + 1;
@ -527,6 +528,7 @@ struct DeviceShared {
features: wgt::Features,
render_passes: Mutex<rustc_hash::FxHashMap<RenderPassKey, vk::RenderPass>>,
framebuffers: Mutex<rustc_hash::FxHashMap<FramebufferKey, vk::Framebuffer>>,
memory_allocations_counter: InternalCounter,
}
pub struct Device {
@ -538,6 +540,7 @@ pub struct Device {
naga_options: naga::back::spv::Options<'static>,
#[cfg(feature = "renderdoc")]
render_doc: crate::auxil::renderdoc::RenderDoc,
counters: wgt::HalCounters,
}
/// Semaphores for forcing queue submissions to run in order.

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"d8f88446d6c1740116442320eca91e06ce9a2f4713179195c1be44e8ab1fc42d","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"24aba7b2a5d879fd0619f545c618c89926a41b37a4bf417f63cbb55bb4bb71c6","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}
{"files":{"Cargo.toml":"5f424ad5726d69bb19d446f3957b0d53db50870f916d4fa350699b66ebe9b9e6","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/counters.rs":"b61d1655be009945132fe6e20c5f2593602e5c6028cb9c1b69e467217d31df4f","src/lib.rs":"7c94f081f8400dbef5d34be2c18de22dafefe77f7ee706ff49b620ec3fa73fc2","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше