Bug 1772004: Update `wgpu` to 32af4f56 (2022-5-31) r=jgilbert

Minor changes are needed to the `mapAsync` implementation due to:
https://github.com/gfx-rs/wgpu/pull/2698

Differential Revision: https://phabricator.services.mozilla.com/D147805
This commit is contained in:
Jim Blandy 2022-06-05 20:30:13 +00:00
Родитель 1271c9aedb
Коммит 6c9ae4cd83
102 изменённых файлов: 6549 добавлений и 2839 удалений

Просмотреть файл

@ -85,12 +85,12 @@ rev = "3484d3e3ebdc8931493aa5df4d7ee9360a90e76b"
[source."https://github.com/gfx-rs/wgpu"]
git = "https://github.com/gfx-rs/wgpu"
replace-with = "vendored-sources"
rev = "b51fd851"
rev = "32af4f56"
[source."https://github.com/gfx-rs/naga"]
git = "https://github.com/gfx-rs/naga"
replace-with = "vendored-sources"
rev = "1aa91549"
rev = "571302e"
[source."https://github.com/gfx-rs/metal-rs"]
git = "https://github.com/gfx-rs/metal-rs"

11
Cargo.lock сгенерированный
Просмотреть файл

@ -3456,7 +3456,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.8.0"
source = "git+https://github.com/gfx-rs/naga?rev=1aa91549#1aa9154964238af8c692cf521ff90e1f2395e147"
source = "git+https://github.com/gfx-rs/naga?rev=571302e#571302e3ff09cb856f63a3683da308159872b7cc"
dependencies = [
"bit-set",
"bitflags",
@ -5913,9 +5913,10 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.12.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=b51fd851#b51fd851be51cfe40c937ef789a44244e7dc2971"
source = "git+https://github.com/gfx-rs/wgpu?rev=32af4f56#32af4f56079fd2203c46c9c452cfe33fd60a5721"
dependencies = [
"arrayvec",
"arrayvec 0.7.2",
"bit-vec",
"bitflags",
"cfg_aliases",
"codespan-reporting",
@ -5936,7 +5937,7 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "0.12.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=b51fd851#b51fd851be51cfe40c937ef789a44244e7dc2971"
source = "git+https://github.com/gfx-rs/wgpu?rev=32af4f56#32af4f56079fd2203c46c9c452cfe33fd60a5721"
dependencies = [
"arrayvec",
"ash",
@ -5973,7 +5974,7 @@ dependencies = [
[[package]]
name = "wgpu-types"
version = "0.12.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=b51fd851#b51fd851be51cfe40c937ef789a44244e7dc2971"
source = "git+https://github.com/gfx-rs/wgpu?rev=32af4f56#32af4f56079fd2203c46c9c452cfe33fd60a5721"
dependencies = [
"bitflags",
"bitflags_serde_shim",

Просмотреть файл

@ -383,10 +383,10 @@ ipc::IPCResult WebGPUParent::RecvBufferMap(RawId aSelfId,
auto* request = new MapRequest(mContext.get(), aSelfId, aHostMap, aOffset,
std::move(shmem), std::move(aResolver));
ffi::WGPUBufferMapOperation mapOperation = {
aHostMap, &MapCallback, reinterpret_cast<uint8_t*>(request)};
ffi::wgpu_server_buffer_map(mContext.get(), aSelfId, aOffset, aSize,
mapOperation);
ffi::WGPUBufferMapCallbackC callback = {&MapCallback,
reinterpret_cast<uint8_t*>(request)};
ffi::wgpu_server_buffer_map(mContext.get(), aSelfId, aOffset, aSize, aHostMap,
callback);
return IPC_OK();
}
@ -823,11 +823,10 @@ ipc::IPCResult WebGPUParent::RecvSwapChainPresent(
data,
};
ffi::WGPUBufferMapOperation mapOperation = {
ffi::WGPUHostMap_Read, &PresentCallback,
reinterpret_cast<uint8_t*>(presentRequest)};
ffi::WGPUBufferMapCallbackC callback = {
&PresentCallback, reinterpret_cast<uint8_t*>(presentRequest)};
ffi::wgpu_server_buffer_map(mContext.get(), bufferId, 0, bufferSize,
mapOperation);
ffi::WGPUHostMap_Read, callback);
return IPC_OK();
}

Просмотреть файл

@ -17,7 +17,7 @@ default = []
[dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "b51fd851"
rev = "32af4f56"
#Note: "replay" shouldn't ideally be needed,
# but it allows us to serialize everything across IPC.
features = ["replay", "trace", "serial-pass"]
@ -25,12 +25,12 @@ features = ["replay", "trace", "serial-pass"]
[dependencies.wgt]
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "b51fd851"
rev = "32af4f56"
[dependencies.wgh]
package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu"
rev = "b51fd851"
rev = "32af4f56"
[dependencies]
bincode = "1"

Просмотреть файл

@ -20,11 +20,11 @@ origin:
# Human-readable identifier for this version/release
# Generally "version NNN", "tag SSS", "bookmark SSS"
release: commit 0b61a191
release: commit 32af4f56
# Revision to pull in
# Must be a long or short commit SHA (long preferred)
revision: 0b61a191
revision: 32af4f56
license: ['MIT', 'Apache-2.0']

Просмотреть файл

@ -222,14 +222,23 @@ pub extern "C" fn wgpu_server_device_create_buffer(
}
}
/// # Safety
///
/// Callers are responsible for ensuring `callback` is well-formed.
#[no_mangle]
pub extern "C" fn wgpu_server_buffer_map(
pub unsafe extern "C" fn wgpu_server_buffer_map(
global: &Global,
buffer_id: id::BufferId,
start: wgt::BufferAddress,
size: wgt::BufferAddress,
operation: wgc::resource::BufferMapOperation,
map_mode: wgc::device::HostMap,
callback: wgc::resource::BufferMapCallbackC,
) {
let callback = wgc::resource::BufferMapCallback::from_c(callback);
let operation = wgc::resource::BufferMapOperation {
host: map_mode,
callback
};
gfx_select!(buffer_id => global.buffer_map_async(
buffer_id,
start .. start + size,

2
third_party/rust/naga/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

5
third_party/rust/naga/Cargo.toml поставляемый
Просмотреть файл

@ -61,10 +61,7 @@ serde = { version = "1.0.103", features = ["derive"], optional = true }
petgraph = { version ="0.6", optional = true }
pp-rs = { version = "0.2.1", optional = true }
hexf-parse = { version = "0.2.1", optional = true }
# update unicode-xid to the next version since it has been updated to unicode v14
# (but has no release that includes it yet)
# https://github.com/unicode-rs/unicode-xid/pull/27
unicode-xid = { version = "0.2.2", optional = true }
unicode-xid = { version = "0.2.3", optional = true }
[dev-dependencies]
bincode = "1"

33
third_party/rust/naga/README.md поставляемый
Просмотреть файл

@ -7,12 +7,10 @@
![MSRV](https://img.shields.io/badge/rustc-1.56+-blue.svg)
[![codecov.io](https://codecov.io/gh/gfx-rs/naga/branch/master/graph/badge.svg?token=9VOKYO8BM2)](https://codecov.io/gh/gfx-rs/naga)
The shader translation library for the needs of [wgpu](https://github.com/gfx-rs/wgpu) and [gfx-rs](https://github.com/gfx-rs/gfx) projects.
The shader translation library for the needs of [wgpu](https://github.com/gfx-rs/wgpu).
## Supported end-points
Everything is still work-in-progress, but some end-points are usable:
Front-end | Status | Feature | Notes |
--------------- | ------------------ | ------- | ----- |
SPIR-V (binary) | :white_check_mark: | spv-in | |
@ -35,12 +33,31 @@ DOT (GraphViz) | :ok: | dot-out | Not a shading language |
## Conversion tool
Naga includes a default binary target, which allows to test the conversion of different code paths.
Naga can be used as a CLI, which allows to test the conversion of different code paths.
First, install `naga-cli` from crates.io or directly from GitHub.
```bash
cargo run my_shader.wgsl # validate only
cargo run my_shader.spv my_shader.txt # dump the IR module into a file
cargo run my_shader.spv my_shader.metal --flow-dir flow-dir # convert the SPV to Metal, also dump the SPIR-V flow graph to `flow-dir`
cargo run my_shader.wgsl my_shader.vert --profile es310 # convert the WGSL to GLSL vertex stage under ES 3.20 profile
# release version
cargo install naga-cli
# development version
cargo install naga-cli --git https://github.com/gfx-rs/naga.git
```
Then, you can run `naga` command.
```bash
naga my_shader.wgsl # validate only
naga my_shader.spv my_shader.txt # dump the IR module into a file
naga my_shader.spv my_shader.metal --flow-dir flow-dir # convert the SPV to Metal, also dump the SPIR-V flow graph to `flow-dir`
naga my_shader.wgsl my_shader.vert --profile es310 # convert the WGSL to GLSL vertex stage under ES 3.20 profile
```
As naga includes a default binary target, you can also use `cargo run` without installation. This is useful when you develop naga itself, or investigate the behavior of naga at a specific commit (e.g. [wgpu](https://github.com/gfx-rs/wgpu) might pin a different version of naga than the `HEAD` of this repository).
```bash
cargo run my_shader.wgsl
```
## Development workflow

4
third_party/rust/naga/src/arena.rs поставляемый
Просмотреть файл

@ -77,7 +77,7 @@ impl<T> hash::Hash for Handle<T> {
impl<T> Handle<T> {
#[cfg(test)]
pub const DUMMY: Self = Handle {
index: unsafe { NonZeroU32::new_unchecked(!0) },
index: unsafe { NonZeroU32::new_unchecked(u32::MAX) },
marker: PhantomData,
};
@ -101,7 +101,7 @@ impl<T> Handle<T> {
let handle_index = u32::try_from(index + 1)
.ok()
.and_then(Index::new)
.expect("Failed to insert into UniqueArena. Handle overflows");
.expect("Failed to insert into arena. Handle overflows");
Handle::new(handle_index)
}

Просмотреть файл

@ -36,6 +36,8 @@ bitflags::bitflags! {
const MULTI_VIEW = 1 << 17;
/// Fused multiply-add.
const FMA = 1 << 18;
/// Texture samples query
const TEXTURE_SAMPLES = 1 << 19;
}
}
@ -101,7 +103,10 @@ impl FeaturesManager {
check_feature!(SAMPLE_VARIABLES, 400, 300);
check_feature!(DYNAMIC_ARRAY_SIZE, 430, 310);
check_feature!(MULTI_VIEW, 140, 310);
check_feature!(FMA, 400, 310);
// Only available on glsl core, this means that opengl es can't query the number
// of samples in a image and neither do bound checks on the sample argument
// of texelFecth
check_feature!(TEXTURE_SAMPLES, 150);
// Return an error if there are missing features
if missing.is_empty() {
@ -205,11 +210,19 @@ impl FeaturesManager {
writeln!(out, "#extension GL_EXT_multiview : require")?;
}
if self.0.contains(Features::FMA) && version.is_es() {
if self.0.contains(Features::FMA) && version >= Version::Embedded(310) {
// https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_gpu_shader5.txt
writeln!(out, "#extension GL_EXT_gpu_shader5 : require")?;
}
if self.0.contains(Features::TEXTURE_SAMPLES) {
// https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_shader_texture_image_samples.txt
writeln!(
out,
"#extension GL_ARB_shader_texture_image_samples : require"
)?;
}
Ok(())
}
}
@ -363,24 +376,26 @@ impl<'a, W> Writer<'a, W> {
}
}
if self.options.version.supports_fma_function() {
let has_fma = self
.module
.functions
.iter()
.flat_map(|(_, f)| f.expressions.iter())
.chain(
self.module
.entry_points
.iter()
.flat_map(|e| e.function.expressions.iter()),
)
.any(|(_, e)| match *e {
Expression::Math { fun, .. } if fun == MathFunction::Fma => true,
_ => false,
});
if has_fma {
self.features.request(Features::FMA);
// Loop trough all expressions in both functions and entry points
// to check for needed features
for (_, expr) in self
.module
.functions
.iter()
.flat_map(|(_, f)| f.expressions.iter())
.chain(self.entry_point.function.expressions.iter())
{
match *expr {
// Check for fused multiply add use
Expression::Math { fun, .. } if fun == MathFunction::Fma => {
self.features.request(Features::FMA)
}
// Check for samples query
Expression::ImageQuery {
query: crate::ImageQuery::NumSamples,
..
} => self.features.request(Features::TEXTURE_SAMPLES),
_ => {}
}
}

50
third_party/rust/naga/src/back/glsl/mod.rs поставляемый
Просмотреть файл

@ -1629,6 +1629,8 @@ impl<'a, W: Write> Writer<'a, W> {
Some(self.namer.call(name))
} else if self.need_bake_expressions.contains(&handle) {
Some(format!("{}{}", back::BAKE_PREFIX, handle.index()))
} else if info.ref_count == 0 {
Some(self.namer.call(""))
} else {
None
};
@ -2433,7 +2435,6 @@ impl<'a, W: Write> Writer<'a, W> {
}
}
crate::ImageQuery::NumSamples => {
// assumes ARB_shader_texture_image_samples
let fun_name = match class {
ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
"textureSamples"
@ -2581,6 +2582,18 @@ impl<'a, W: Write> Writer<'a, W> {
write!(self.out, ")")?;
}
// TODO: handle undefined behavior of BinaryOperator::Modulo
//
// sint:
// if right == 0 return 0
// if left == min(type_of(left)) && right == -1 return 0
// if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
//
// uint:
// if right == 0 return 0
//
// float:
// if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
BinaryOperation::Modulo => {
write!(self.out, "(")?;
@ -2806,6 +2819,30 @@ impl<'a, W: Write> Writer<'a, W> {
let extract_bits = fun == Mf::ExtractBits;
let insert_bits = fun == Mf::InsertBits;
// we might need to cast to unsigned integers since
// GLSL's findLSB / findMSB always return signed integers
let need_extra_paren = {
(fun == Mf::FindLsb || fun == Mf::FindMsb || fun == Mf::CountOneBits)
&& match *ctx.info[arg].ty.inner_with(&self.module.types) {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
..
} => {
write!(self.out, "uint(")?;
true
}
crate::TypeInner::Vector {
kind: crate::ScalarKind::Uint,
size,
..
} => {
write!(self.out, "uvec{}(", size as u8)?;
true
}
_ => false,
}
};
write!(self.out, "{}(", fun_name)?;
self.write_expr(arg, ctx)?;
if let Some(arg) = arg1 {
@ -2838,7 +2875,11 @@ impl<'a, W: Write> Writer<'a, W> {
self.write_expr(arg, ctx)?;
}
}
write!(self.out, ")")?
write!(self.out, ")")?;
if need_extra_paren {
write!(self.out, ")")?
}
}
// `As` is always a call.
// If `convert` is true the function name is the type
@ -2854,6 +2895,11 @@ impl<'a, W: Write> Writer<'a, W> {
// this is similar to `write_type`, but with the target kind
let scalar = glsl_scalar(target_kind, width)?;
match *inner {
TypeInner::Matrix { columns, rows, .. } => write!(
self.out,
"{}mat{}x{}",
scalar.prefix, columns as u8, rows as u8
)?,
TypeInner::Vector { size, .. } => {
write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
}

64
third_party/rust/naga/src/back/hlsl/help.rs поставляемый
Просмотреть файл

@ -375,7 +375,19 @@ impl<'a, W: Write> super::Writer<'a, W> {
const RETURN_VARIABLE_NAME: &str = "ret";
// Write function return type and name
self.write_type(module, constructor.ty)?;
if let crate::TypeInner::Array { base, size, .. } = module.types[constructor.ty].inner {
write!(self.out, "typedef ")?;
self.write_type(module, constructor.ty)?;
write!(self.out, " ret_")?;
self.write_wrapped_constructor_function_name(module, constructor)?;
self.write_array_size(module, base, size)?;
writeln!(self.out, ";")?;
write!(self.out, "ret_")?;
self.write_wrapped_constructor_function_name(module, constructor)?;
} else {
self.write_type(module, constructor.ty)?;
}
write!(self.out, " ")?;
self.write_wrapped_constructor_function_name(module, constructor)?;
@ -415,10 +427,6 @@ impl<'a, W: Write> super::Writer<'a, W> {
write!(self.out, ")")?;
if let crate::TypeInner::Array { base, size, .. } = module.types[constructor.ty].inner {
self.write_array_size(module, base, size)?;
}
// Write function body
writeln!(self.out, " {{")?;
@ -427,8 +435,8 @@ impl<'a, W: Write> super::Writer<'a, W> {
let struct_name = &self.names[&NameKey::Type(constructor.ty)];
writeln!(
self.out,
"{}{} {};",
INDENT, struct_name, RETURN_VARIABLE_NAME
"{}{} {} = ({})0;",
INDENT, struct_name, RETURN_VARIABLE_NAME, struct_name
)?;
for (i, member) in members.iter().enumerate() {
let field_name = &self.names[&NameKey::StructMember(constructor.ty, i as u32)];
@ -880,6 +888,48 @@ impl<'a, W: Write> super::Writer<'a, W> {
self.wrapped.image_queries.insert(wiq);
}
}
// Write `WrappedConstructor` for structs that are loaded from `AddressSpace::Storage`
// since they will later be used by the fn `write_storage_load`
crate::Expression::Load { pointer } => {
let pointer_space = func_ctx.info[pointer]
.ty
.inner_with(&module.types)
.pointer_space();
if let Some(crate::AddressSpace::Storage { .. }) = pointer_space {
if let Some(ty) = func_ctx.info[handle].ty.handle() {
write_wrapped_constructor(self, ty, module, func_ctx)?;
}
}
fn write_wrapped_constructor<W: Write>(
writer: &mut super::Writer<'_, W>,
ty: Handle<crate::Type>,
module: &crate::Module,
func_ctx: &FunctionCtx,
) -> BackendResult {
match module.types[ty].inner {
crate::TypeInner::Struct { ref members, .. } => {
for member in members {
write_wrapped_constructor(writer, member.ty, module, func_ctx)?;
}
let constructor = WrappedConstructor { ty };
if !writer.wrapped.constructors.contains(&constructor) {
writer
.write_wrapped_constructor_function(module, constructor)?;
writer.wrapped.constructors.insert(constructor);
}
}
crate::TypeInner::Array { base, .. } => {
write_wrapped_constructor(writer, base, module, func_ctx)?;
}
_ => {}
};
Ok(())
}
}
crate::Expression::Compose { ty, components: _ } => {
let constructor = match module.types[ty].inner {
crate::TypeInner::Struct { .. } | crate::TypeInner::Array { .. } => {

Просмотреть файл

@ -44,6 +44,9 @@ impl<W: fmt::Write> super::Writer<'_, W> {
chain: &[SubAccess],
func_ctx: &FunctionCtx,
) -> BackendResult {
if chain.is_empty() {
write!(self.out, "0")?;
}
for (i, access) in chain.iter().enumerate() {
if i != 0 {
write!(self.out, "+")?;
@ -156,12 +159,16 @@ impl<W: fmt::Write> super::Writer<'_, W> {
write!(self.out, "}}")?;
}
crate::TypeInner::Struct { ref members, .. } => {
write!(self.out, "{{")?;
let constructor = super::help::WrappedConstructor {
ty: result_ty.handle().unwrap(),
};
self.write_wrapped_constructor_function_name(module, constructor)?;
write!(self.out, "(")?;
let iter = members
.iter()
.map(|m| (TypeResolution::Handle(m.ty), m.offset));
self.write_storage_load_sequence(module, var_handle, iter, func_ctx)?;
write!(self.out, "}}")?;
write!(self.out, ")")?;
}
_ => unreachable!(),
}

87
third_party/rust/naga/src/back/hlsl/writer.rs поставляемый
Просмотреть файл

@ -6,7 +6,7 @@ use super::{
use crate::{
back,
proc::{self, NameKey},
valid, Handle, Module, ShaderStage, TypeInner,
valid, Handle, Module, ScalarKind, ShaderStage, TypeInner,
};
use std::{fmt, mem};
@ -1167,6 +1167,8 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
} else if info.ref_count == 0 {
Some(self.namer.call(""))
} else {
let min_ref_count = func_ctx.expressions[handle].bake_ref_count();
if min_ref_count <= info.ref_count {
@ -1795,6 +1797,38 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
self.write_expr(module, left, func_ctx)?;
write!(self.out, ")")?;
}
// TODO: handle undefined behavior of BinaryOperator::Modulo
//
// sint:
// if right == 0 return 0
// if left == min(type_of(left)) && right == -1 return 0
// if sign(left) != sign(right) return result as defined by WGSL
//
// uint:
// if right == 0 return 0
//
// float:
// if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
// While HLSL supports float operands with the % operator it is only
// defined in cases where both sides are either positive or negative.
Expression::Binary {
op: crate::BinaryOperator::Modulo,
left,
right,
} if func_ctx.info[left]
.ty
.inner_with(&module.types)
.scalar_kind()
== Some(crate::ScalarKind::Float) =>
{
write!(self.out, "fmod(")?;
self.write_expr(module, left, func_ctx)?;
write!(self.out, ", ")?;
self.write_expr(module, right, func_ctx)?;
write!(self.out, ")")?;
}
Expression::Binary { op, left, right } => {
write!(self.out, "(")?;
self.write_expr(module, left, func_ctx)?;
@ -2107,9 +2141,32 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
convert,
} => {
let inner = func_ctx.info[expr].ty.inner_with(&module.types);
let (size_str, src_width) = match *inner {
TypeInner::Vector { size, width, .. } => (back::vector_size_str(size), width),
TypeInner::Scalar { width, .. } => ("", width),
let get_width = |src_width| kind.to_hlsl_str(convert.unwrap_or(src_width));
match *inner {
TypeInner::Vector { size, width, .. } => {
write!(
self.out,
"{}{}(",
get_width(width)?,
back::vector_size_str(size)
)?;
}
TypeInner::Scalar { width, .. } => {
write!(self.out, "{}(", get_width(width)?,)?;
}
TypeInner::Matrix {
columns,
rows,
width,
} => {
write!(
self.out,
"{}{}x{}(",
get_width(width)?,
back::vector_size_str(columns),
back::vector_size_str(rows)
)?;
}
_ => {
return Err(Error::Unimplemented(format!(
"write_expr expression::as {:?}",
@ -2117,8 +2174,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
)));
}
};
let kind_str = kind.to_hlsl_str(convert.unwrap_or(src_width))?;
write!(self.out, "{}{}(", kind_str, size_str,)?;
self.write_expr(module, expr, func_ctx)?;
write!(self.out, ")")?;
}
@ -2135,6 +2190,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Asincosh { is_sin: bool },
Atanh,
Regular(&'static str),
MissingIntOverload(&'static str),
}
let fun = match fun {
@ -2196,8 +2252,8 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Mf::Transpose => Function::Regular("transpose"),
Mf::Determinant => Function::Regular("determinant"),
// bits
Mf::CountOneBits => Function::Regular("countbits"),
Mf::ReverseBits => Function::Regular("reversebits"),
Mf::CountOneBits => Function::MissingIntOverload("countbits"),
Mf::ReverseBits => Function::MissingIntOverload("reversebits"),
Mf::FindLsb => Function::Regular("firstbitlow"),
Mf::FindMsb => Function::Regular("firstbithigh"),
_ => return Err(Error::Unimplemented(format!("write_expr_math {:?}", fun))),
@ -2240,6 +2296,21 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
}
write!(self.out, ")")?
}
Function::MissingIntOverload(fun_name) => {
let scalar_kind = &func_ctx.info[arg]
.ty
.inner_with(&module.types)
.scalar_kind();
if let Some(ScalarKind::Sint) = *scalar_kind {
write!(self.out, "asint({}(asuint(", fun_name)?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, ")))")?;
} else {
write!(self.out, "{}(", fun_name)?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, ")")?;
}
}
}
}
Expression::Swizzle {

2
third_party/rust/naga/src/back/mod.rs поставляемый
Просмотреть файл

@ -131,7 +131,7 @@ impl crate::Expression {
const fn bake_ref_count(&self) -> usize {
match *self {
// accesses are never cached, only loads are
crate::Expression::Access { .. } | crate::Expression::AccessIndex { .. } => !0,
crate::Expression::Access { .. } | crate::Expression::AccessIndex { .. } => usize::MAX,
// sampling may use the control flow, and image ops look better by themselves
crate::Expression::ImageSample { .. } | crate::Expression::ImageLoad { .. } => 1,
// derivatives use the control flow

167
third_party/rust/naga/src/back/msl/writer.rs поставляемый
Просмотреть файл

@ -462,7 +462,9 @@ impl crate::AddressSpace {
// rely on the actual use of a global by functions. This means we
// may end up with "const" even if the binding is read-write,
// and that should be OK.
Self::Storage { .. } | Self::Private | Self::WorkGroup => true,
Self::Storage { .. } => true,
// These should always be read-write.
Self::Private | Self::WorkGroup => false,
// These translate to `constant` address space, no need for qualifiers.
Self::Uniform | Self::PushConstant => false,
// Not applicable.
@ -1482,6 +1484,20 @@ impl<W: Write> Writer<W> {
.resolve_type(left)
.scalar_kind()
.ok_or(Error::UnsupportedBinaryOp(op))?;
// TODO: handle undefined behavior of BinaryOperator::Modulo
//
// sint:
// if right == 0 return 0
// if left == min(type_of(left)) && right == -1 return 0
// if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
//
// uint:
// if right == 0 return 0
//
// float:
// if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
if op == crate::BinaryOperator::Modulo && kind == crate::ScalarKind::Float {
write!(self.out, "{}::fmod(", NAMESPACE)?;
self.put_expression(left, context, true)?;
@ -1591,21 +1607,6 @@ impl<W: Write> Writer<W> {
crate::TypeInner::Scalar { .. } => true,
_ => false,
};
let argument_size_suffix = match *context.resolve_type(arg) {
crate::TypeInner::Vector {
size: crate::VectorSize::Bi,
..
} => "2",
crate::TypeInner::Vector {
size: crate::VectorSize::Tri,
..
} => "3",
crate::TypeInner::Vector {
size: crate::VectorSize::Quad,
..
} => "4",
_ => "",
};
let fun_name = match fun {
// comparison
@ -1705,21 +1706,13 @@ impl<W: Write> Writer<W> {
self.put_expression(arg1.unwrap(), context, false)?;
write!(self.out, ")")?;
} else if fun == Mf::FindLsb {
write!(
self.out,
"(((1 + int{}({}::ctz(",
argument_size_suffix, NAMESPACE
)?;
write!(self.out, "((({}::ctz(", NAMESPACE)?;
self.put_expression(arg, context, true)?;
write!(self.out, "))) % 33) - 1)")?;
write!(self.out, ") + 1) % 33) - 1)")?;
} else if fun == Mf::FindMsb {
write!(
self.out,
"(((1 + int{}({}::clz(",
argument_size_suffix, NAMESPACE
)?;
write!(self.out, "((({}::clz(", NAMESPACE)?;
self.put_expression(arg, context, true)?;
write!(self.out, "))) % 33) - 1)")?;
write!(self.out, ") + 1) % 33) - 1)")?
} else if fun == Mf::Unpack2x16float {
write!(self.out, "float2(as_type<half2>(")?;
self.put_expression(arg, context, false)?;
@ -1748,33 +1741,45 @@ impl<W: Write> Writer<W> {
expr,
kind,
convert,
} => {
let (src_kind, src_width) = match *context.resolve_type(expr) {
crate::TypeInner::Scalar { kind, width }
| crate::TypeInner::Vector { kind, width, .. } => (kind, width),
_ => return Err(Error::Validation),
};
let is_bool_cast =
kind == crate::ScalarKind::Bool || src_kind == crate::ScalarKind::Bool;
let op = match convert {
Some(w) if w == src_width || is_bool_cast => "static_cast",
Some(8) if kind == crate::ScalarKind::Float => {
return Err(Error::CapabilityNotSupported(valid::Capabilities::FLOAT64))
}
Some(_) => return Err(Error::Validation),
None => "as_type",
};
write!(self.out, "{}<", op)?;
match *context.resolve_type(expr) {
crate::TypeInner::Vector { size, .. } => {
put_numeric_type(&mut self.out, kind, &[size])?
}
_ => put_numeric_type(&mut self.out, kind, &[])?,
};
write!(self.out, ">(")?;
self.put_expression(expr, context, true)?;
write!(self.out, ")")?;
}
} => match *context.resolve_type(expr) {
crate::TypeInner::Scalar {
kind: src_kind,
width: src_width,
}
| crate::TypeInner::Vector {
kind: src_kind,
width: src_width,
..
} => {
let is_bool_cast =
kind == crate::ScalarKind::Bool || src_kind == crate::ScalarKind::Bool;
let op = match convert {
Some(w) if w == src_width || is_bool_cast => "static_cast",
Some(8) if kind == crate::ScalarKind::Float => {
return Err(Error::CapabilityNotSupported(valid::Capabilities::FLOAT64))
}
Some(_) => return Err(Error::Validation),
None => "as_type",
};
write!(self.out, "{}<", op)?;
match *context.resolve_type(expr) {
crate::TypeInner::Vector { size, .. } => {
put_numeric_type(&mut self.out, kind, &[size])?
}
_ => put_numeric_type(&mut self.out, kind, &[])?,
};
write!(self.out, ">(")?;
self.put_expression(expr, context, true)?;
write!(self.out, ")")?;
}
crate::TypeInner::Matrix { columns, rows, .. } => {
put_numeric_type(&mut self.out, kind, &[rows, columns])?;
write!(self.out, "(")?;
self.put_expression(expr, context, true)?;
write!(self.out, ")")?;
}
_ => return Err(Error::Validation),
},
// has to be a named expression
crate::Expression::CallResult(_) | crate::Expression::AtomicResult { .. } => {
unreachable!()
@ -2445,6 +2450,8 @@ impl<W: Write> Writer<W> {
// Don't assume the names in `named_expressions` are unique,
// or even valid. Use the `Namer`.
Some(self.namer.call(name))
} else if info.ref_count == 0 {
Some(self.namer.call(""))
} else {
// If this expression is an index that we're going to first compare
// against a limit, and then actually use as an index, then we may
@ -3276,15 +3283,20 @@ impl<W: Write> Writer<W> {
};
let local_name = &self.names[&NameKey::FunctionLocal(fun_handle, local_handle)];
write!(self.out, "{}{} {}", back::INDENT, ty_name, local_name)?;
if let Some(value) = local.init {
let coco = ConstantContext {
handle: value,
arena: &module.constants,
names: &self.names,
first_time: false,
};
write!(self.out, " = {}", coco)?;
}
match local.init {
Some(value) => {
let coco = ConstantContext {
handle: value,
arena: &module.constants,
names: &self.names,
first_time: false,
};
write!(self.out, " = {}", coco)?;
}
None => {
write!(self.out, " = {{}}")?;
}
};
writeln!(self.out, ";")?;
}
@ -3808,15 +3820,20 @@ impl<W: Write> Writer<W> {
first_time: false,
};
write!(self.out, "{}{} {}", back::INDENT, ty_name, name)?;
if let Some(value) = local.init {
let coco = ConstantContext {
handle: value,
arena: &module.constants,
names: &self.names,
first_time: false,
};
write!(self.out, " = {}", coco)?;
}
match local.init {
Some(value) => {
let coco = ConstantContext {
handle: value,
arena: &module.constants,
names: &self.names,
first_time: false,
};
write!(self.out, " = {}", coco)?;
}
None => {
write!(self.out, " = {{}}")?;
}
};
writeln!(self.out, ";")?;
}
@ -3901,7 +3918,7 @@ fn test_stack_size() {
{
// check expression stack
let mut addresses = !0usize..0usize;
let mut addresses = usize::MAX..0usize;
for pointer in writer.put_expression_stack_pointers {
addresses.start = addresses.start.min(pointer as usize);
addresses.end = addresses.end.max(pointer as usize);
@ -3916,7 +3933,7 @@ fn test_stack_size() {
{
// check block stack
let mut addresses = !0usize..0usize;
let mut addresses = usize::MAX..0usize;
for pointer in writer.put_block_stack_pointers {
addresses.start = addresses.start.min(pointer as usize);
addresses.end = addresses.end.max(pointer as usize);

226
third_party/rust/naga/src/back/spv/block.rs поставляемый
Просмотреть файл

@ -529,8 +529,15 @@ impl<'w> BlockContext<'w> {
_ => unimplemented!(),
},
crate::BinaryOperator::Modulo => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SMod,
// TODO: handle undefined behavior
// if right == 0 return 0
// if left == min(type_of(left)) && right == -1 return 0
Some(crate::ScalarKind::Sint) => spirv::Op::SRem,
// TODO: handle undefined behavior
// if right == 0 return 0
Some(crate::ScalarKind::Uint) => spirv::Op::UMod,
// TODO: handle undefined behavior
// if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
Some(crate::ScalarKind::Float) => spirv::Op::FRem,
_ => unimplemented!(),
},
@ -798,10 +805,18 @@ impl<'w> BlockContext<'w> {
arg0_id,
)),
Mf::Determinant => MathOp::Ext(spirv::GLOp::Determinant),
Mf::ReverseBits | Mf::CountOneBits => {
log::error!("unimplemented math function {:?}", fun);
return Err(Error::FeatureNotImplemented("math function"));
}
Mf::ReverseBits => MathOp::Custom(Instruction::unary(
spirv::Op::BitReverse,
result_type_id,
id,
arg0_id,
)),
Mf::CountOneBits => MathOp::Custom(Instruction::unary(
spirv::Op::BitCount,
result_type_id,
id,
arg0_id,
)),
Mf::ExtractBits => {
let op = match arg_scalar_kind {
Some(crate::ScalarKind::Uint) => spirv::Op::BitFieldUExtract,
@ -923,10 +938,13 @@ impl<'w> BlockContext<'w> {
use crate::ScalarKind as Sk;
let expr_id = self.cached[expr];
let (src_kind, src_size, src_width) =
let (src_kind, src_size, src_width, is_matrix) =
match *self.fun_info[expr].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Scalar { kind, width } => (kind, None, width),
crate::TypeInner::Vector { kind, width, size } => (kind, Some(size), width),
crate::TypeInner::Scalar { kind, width } => (kind, None, width, false),
crate::TypeInner::Vector { kind, width, size } => {
(kind, Some(size), width, false)
}
crate::TypeInner::Matrix { width, .. } => (kind, None, width, true),
ref other => {
log::error!("As source {:?}", other);
return Err(Error::Validation("Unexpected Expression::As source"));
@ -939,102 +957,112 @@ impl<'w> BlockContext<'w> {
Ternary(spirv::Op, Word, Word),
}
let cast = match (src_kind, kind, convert) {
(_, _, None) | (Sk::Bool, Sk::Bool, Some(_)) => Cast::Unary(spirv::Op::Bitcast),
// casting to a bool - generate `OpXxxNotEqual`
(_, Sk::Bool, Some(_)) => {
let (op, value) = match src_kind {
Sk::Sint => (spirv::Op::INotEqual, crate::ScalarValue::Sint(0)),
Sk::Uint => (spirv::Op::INotEqual, crate::ScalarValue::Uint(0)),
Sk::Float => {
(spirv::Op::FUnordNotEqual, crate::ScalarValue::Float(0.0))
}
Sk::Bool => unreachable!(),
};
let zero_scalar_id = self.writer.get_constant_scalar(value, src_width);
let zero_id = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind: src_kind,
width: src_width,
pointer_space: None,
}));
let components = [zero_scalar_id; 4];
let cast = if is_matrix {
// we only support identity casts for matrices
Cast::Unary(spirv::Op::CopyObject)
} else {
match (src_kind, kind, convert) {
(Sk::Bool, Sk::Bool, _) => Cast::Unary(spirv::Op::CopyObject),
(_, _, None) => Cast::Unary(spirv::Op::Bitcast),
// casting to a bool - generate `OpXxxNotEqual`
(_, Sk::Bool, Some(_)) => {
let (op, value) = match src_kind {
Sk::Sint => (spirv::Op::INotEqual, crate::ScalarValue::Sint(0)),
Sk::Uint => (spirv::Op::INotEqual, crate::ScalarValue::Uint(0)),
Sk::Float => {
(spirv::Op::FUnordNotEqual, crate::ScalarValue::Float(0.0))
}
Sk::Bool => unreachable!(),
};
let zero_scalar_id = self.writer.get_constant_scalar(value, src_width);
let zero_id = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind: src_kind,
width: src_width,
pointer_space: None,
}));
let components = [zero_scalar_id; 4];
let zero_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
zero_id,
&components[..size as usize],
));
zero_id
}
None => zero_scalar_id,
};
let zero_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
zero_id,
&components[..size as usize],
));
zero_id
}
None => zero_scalar_id,
};
Cast::Binary(op, zero_id)
}
// casting from a bool - generate `OpSelect`
(Sk::Bool, _, Some(dst_width)) => {
let (val0, val1) = match kind {
Sk::Sint => (crate::ScalarValue::Sint(0), crate::ScalarValue::Sint(1)),
Sk::Uint => (crate::ScalarValue::Uint(0), crate::ScalarValue::Uint(1)),
Sk::Float => (
crate::ScalarValue::Float(0.0),
crate::ScalarValue::Float(1.0),
),
Sk::Bool => unreachable!(),
};
let scalar0_id = self.writer.get_constant_scalar(val0, dst_width);
let scalar1_id = self.writer.get_constant_scalar(val1, dst_width);
let (accept_id, reject_id) = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind,
width: dst_width,
pointer_space: None,
}));
let components0 = [scalar0_id; 4];
let components1 = [scalar1_id; 4];
Cast::Binary(op, zero_id)
}
// casting from a bool - generate `OpSelect`
(Sk::Bool, _, Some(dst_width)) => {
let (val0, val1) = match kind {
Sk::Sint => {
(crate::ScalarValue::Sint(0), crate::ScalarValue::Sint(1))
}
Sk::Uint => {
(crate::ScalarValue::Uint(0), crate::ScalarValue::Uint(1))
}
Sk::Float => (
crate::ScalarValue::Float(0.0),
crate::ScalarValue::Float(1.0),
),
Sk::Bool => unreachable!(),
};
let scalar0_id = self.writer.get_constant_scalar(val0, dst_width);
let scalar1_id = self.writer.get_constant_scalar(val1, dst_width);
let (accept_id, reject_id) = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind,
width: dst_width,
pointer_space: None,
}));
let components0 = [scalar0_id; 4];
let components1 = [scalar1_id; 4];
let vec0_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec0_id,
&components0[..size as usize],
));
let vec1_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec1_id,
&components1[..size as usize],
));
(vec1_id, vec0_id)
}
None => (scalar1_id, scalar0_id),
};
let vec0_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec0_id,
&components0[..size as usize],
));
let vec1_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec1_id,
&components1[..size as usize],
));
(vec1_id, vec0_id)
}
None => (scalar1_id, scalar0_id),
};
Cast::Ternary(spirv::Op::Select, accept_id, reject_id)
Cast::Ternary(spirv::Op::Select, accept_id, reject_id)
}
(Sk::Float, Sk::Uint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToU),
(Sk::Float, Sk::Sint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToS),
(Sk::Float, Sk::Float, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::FConvert)
}
(Sk::Sint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertSToF),
(Sk::Sint, Sk::Sint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::SConvert)
}
(Sk::Uint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertUToF),
(Sk::Uint, Sk::Uint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::UConvert)
}
// We assume it's either an identity cast, or int-uint.
_ => Cast::Unary(spirv::Op::Bitcast),
}
(Sk::Float, Sk::Uint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToU),
(Sk::Float, Sk::Sint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToS),
(Sk::Float, Sk::Float, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::FConvert)
}
(Sk::Sint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertSToF),
(Sk::Sint, Sk::Sint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::SConvert)
}
(Sk::Uint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertUToF),
(Sk::Uint, Sk::Uint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::UConvert)
}
// We assume it's either an identity cast, or int-uint.
_ => Cast::Unary(spirv::Op::Bitcast),
};
let id = self.gen_id();

12
third_party/rust/naga/src/back/spv/writer.rs поставляемый
Просмотреть файл

@ -309,7 +309,10 @@ impl Writer {
pointer_type_id,
id,
spirv::StorageClass::Function,
init_word,
init_word.or_else(|| {
let type_id = self.get_type_id(LookupType::Handle(variable.ty));
Some(self.write_constant_null(type_id))
}),
);
function
.variables
@ -1325,6 +1328,13 @@ impl Writer {
}
};
let init_word = match global_variable.space {
crate::AddressSpace::Private => {
init_word.or_else(|| Some(self.write_constant_null(inner_type_id)))
}
_ => init_word,
};
Instruction::variable(pointer_type_id, id, class, init_word)
.to_words(&mut self.logical_layout.declarations);
Ok(id)

Просмотреть файл

@ -659,6 +659,11 @@ impl<W: Write> Writer<W> {
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
} else if info.ref_count == 0 {
write!(self.out, "{}_ = ", level)?;
self.write_expr(module, handle, func_ctx)?;
writeln!(self.out, ";")?;
continue;
} else {
let expr = &func_ctx.expressions[handle];
let min_ref_count = expr.bake_ref_count();

8
third_party/rust/naga/src/front/glsl/ast.rs поставляемый
Просмотреть файл

@ -133,9 +133,17 @@ pub enum HirExprKind {
},
Variable(VariableReference),
Call(FunctionCall),
/// Represents the ternary operator in glsl (`:?`)
Conditional {
/// The expression that will decide which branch to take, must evaluate to a boolean
condition: Handle<HirExpr>,
/// The expression that will be evaluated if [`condition`] returns `true`
///
/// [`condition`]: Self::Conditional::condition
accept: Handle<HirExpr>,
/// The expression that will be evaluated if [`condition`] returns `false`
///
/// [`condition`]: Self::Conditional::condition
reject: Handle<HirExpr>,
},
Assign {

Просмотреть файл

@ -727,6 +727,17 @@ fn inject_standard_builtins(
_ => {}
}
// we need to cast the return type of findLsb / findMsb
let mc = if kind == Sk::Uint {
match mc {
MacroCall::MathFunction(MathFunction::FindLsb) => MacroCall::FindLsbUint,
MacroCall::MathFunction(MathFunction::FindMsb) => MacroCall::FindMsbUint,
mc => mc,
}
} else {
mc
};
declaration.overloads.push(module.add_builtin(args, mc))
}
}
@ -1580,6 +1591,8 @@ pub enum MacroCall {
},
ImageStore,
MathFunction(MathFunction),
FindLsbUint,
FindMsbUint,
BitfieldExtract,
BitfieldInsert,
Relational(RelationalFunction),
@ -1824,7 +1837,7 @@ impl MacroCall {
MacroCall::ImageStore => {
let comps =
parser.coordinate_components(ctx, args[0], args[1], None, meta, body)?;
ctx.emit_flush(body);
ctx.emit_restart(body);
body.push(
crate::Statement::ImageStore {
image: args[0],
@ -1834,7 +1847,6 @@ impl MacroCall {
},
meta,
);
ctx.emit_start();
return Ok(None);
}
MacroCall::MathFunction(fun) => ctx.add_expression(
@ -1848,6 +1860,33 @@ impl MacroCall {
Span::default(),
body,
),
mc @ (MacroCall::FindLsbUint | MacroCall::FindMsbUint) => {
let fun = match mc {
MacroCall::FindLsbUint => MathFunction::FindLsb,
MacroCall::FindMsbUint => MathFunction::FindMsb,
_ => unreachable!(),
};
let res = ctx.add_expression(
Expression::Math {
fun,
arg: args[0],
arg1: None,
arg2: None,
arg3: None,
},
Span::default(),
body,
);
ctx.add_expression(
Expression::As {
expr: res,
kind: Sk::Sint,
convert: Some(4),
},
Span::default(),
body,
)
}
MacroCall::BitfieldInsert => {
let conv_arg_2 = ctx.add_expression(
Expression::As {
@ -1930,14 +1969,45 @@ impl MacroCall {
MacroCall::Mod(size) => {
ctx.implicit_splat(parser, &mut args[1], meta, size)?;
ctx.add_expression(
// x - y * floor(x / y)
let div = ctx.add_expression(
Expression::Binary {
op: BinaryOperator::Modulo,
op: BinaryOperator::Divide,
left: args[0],
right: args[1],
},
Span::default(),
body,
);
let floor = ctx.add_expression(
Expression::Math {
fun: MathFunction::Floor,
arg: div,
arg1: None,
arg2: None,
arg3: None,
},
Span::default(),
body,
);
let mult = ctx.add_expression(
Expression::Binary {
op: BinaryOperator::Multiply,
left: floor,
right: args[1],
},
Span::default(),
body,
);
ctx.add_expression(
Expression::Binary {
op: BinaryOperator::Subtract,
left: args[0],
right: mult,
},
Span::default(),
body,
)
}
MacroCall::Splatted(fun, size, i) => {
@ -1998,8 +2068,7 @@ impl MacroCall {
body,
),
MacroCall::Barrier => {
ctx.emit_flush(body);
ctx.emit_start();
ctx.emit_restart(body);
body.push(crate::Statement::Barrier(crate::Barrier::all()), meta);
return Ok(None);
}

Просмотреть файл

@ -506,7 +506,7 @@ impl<'a> ConstantSolver<'a> {
BinaryOperator::Subtract => a - b,
BinaryOperator::Multiply => a * b,
BinaryOperator::Divide => a / b,
BinaryOperator::Modulo => a % b,
BinaryOperator::Modulo => a - b * (a / b).floor(),
_ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
})
}

Просмотреть файл

@ -101,7 +101,7 @@ impl Context {
}: GlobalLookup,
body: &mut Block,
) {
self.emit_flush(body);
self.emit_end(body);
let (expr, load, constant) = match kind {
GlobalLookupKind::Variable(v) => {
let span = parser.module.global_variables.get_span(v);
@ -170,14 +170,39 @@ impl Context {
self.lookup_global_var_exps.insert(name.into(), var);
}
/// Starts the expression emitter
///
/// # Panics
///
/// - If called twice in a row without calling [`emit_end`][Self::emit_end].
#[inline]
pub fn emit_start(&mut self) {
self.emitter.start(&self.expressions)
}
pub fn emit_flush(&mut self, body: &mut Block) {
/// Emits all the expressions captured by the emitter to the passed `body`
///
/// # Panics
///
/// - If called before calling [`emit_start`].
/// - If called twice in a row without calling [`emit_start`].
///
/// [`emit_start`]: Self::emit_start
pub fn emit_end(&mut self, body: &mut Block) {
body.extend(self.emitter.finish(&self.expressions))
}
/// Emits all the expressions captured by the emitter to the passed `body`
/// and starts the emitter again
///
/// # Panics
///
/// - If called before calling [`emit_start`][Self::emit_start].
pub fn emit_restart(&mut self, body: &mut Block) {
self.emit_end(body);
self.emit_start()
}
pub fn add_expression(
&mut self,
expr: Expression,
@ -186,7 +211,7 @@ impl Context {
) -> Handle<Expression> {
let needs_pre_emit = expr.needs_pre_emit();
if needs_pre_emit {
self.emit_flush(body);
self.emit_end(body);
}
let handle = self.expressions.append(expr, meta);
if needs_pre_emit {
@ -292,8 +317,7 @@ impl Context {
);
let local_expr = self.add_expression(Expression::LocalVariable(handle), meta, body);
self.emit_flush(body);
self.emit_start();
self.emit_restart(body);
body.push(
Statement::Store {
@ -462,8 +486,7 @@ impl Context {
body,
);
self.emit_flush(body);
self.emit_start();
self.emit_restart(body);
body.push(
Statement::Store {
@ -474,8 +497,7 @@ impl Context {
);
}
} else {
self.emit_flush(body);
self.emit_start();
self.emit_restart(body);
body.push(Statement::Store { pointer, value }, meta);
}
@ -1069,35 +1091,149 @@ impl Context {
)?;
return Ok((maybe_expr, meta));
}
// `HirExprKind::Conditional` represents the ternary operator in glsl (`:?`)
//
// The ternary operator is defined to only evaluate one of the two possible
// expressions which means that it's behavior is that of an `if` statement,
// and it's merely syntatic sugar for it.
HirExprKind::Conditional {
condition,
accept,
reject,
} if ExprPos::Lhs != pos => {
// Given an expression `a ? b : c`, we need to produce a Naga
// statement roughly like:
//
// var temp;
// if a {
// temp = convert(b);
// } else {
// temp = convert(c);
// }
//
// where `convert` stands for type conversions to bring `b` and `c` to
// the same type, and then use `temp` to represent the value of the whole
// conditional expression in subsequent code.
// Lower the condition first to the current bodyy
let condition = self
.lower_expect_inner(stmt, parser, condition, ExprPos::Rhs, body)?
.0;
// Emit all expressions since we will be adding statements to
// other bodies next
self.emit_restart(body);
// Create the bodies for the two cases
let mut accept_body = Block::new();
let mut reject_body = Block::new();
// Lower the `true` branch
let (mut accept, accept_meta) =
self.lower_expect_inner(stmt, parser, accept, pos, body)?;
self.lower_expect_inner(stmt, parser, accept, pos, &mut accept_body)?;
// Flush the body of the `true` branch, to start emitting on the
// `false` branch
self.emit_restart(&mut accept_body);
// Lower the `false` branch
let (mut reject, reject_meta) =
self.lower_expect_inner(stmt, parser, reject, pos, body)?;
self.lower_expect_inner(stmt, parser, reject, pos, &mut reject_body)?;
self.binary_implicit_conversion(
parser,
&mut accept,
accept_meta,
&mut reject,
reject_meta,
)?;
// Flush the body of the `false` branch
self.emit_restart(&mut reject_body);
self.add_expression(
Expression::Select {
condition,
accept,
reject,
// We need to do some custom implicit conversions since the two target expressions
// are in different bodies
if let (
Some((accept_power, accept_width, accept_kind)),
Some((reject_power, reject_width, reject_kind)),
) = (
// Get the components of both branches and calculate the type power
self.expr_scalar_components(parser, accept, accept_meta)?
.and_then(|(kind, width)| Some((type_power(kind, width)?, width, kind))),
self.expr_scalar_components(parser, reject, reject_meta)?
.and_then(|(kind, width)| Some((type_power(kind, width)?, width, kind))),
) {
match accept_power.cmp(&reject_power) {
std::cmp::Ordering::Less => {
self.conversion(&mut accept, accept_meta, reject_kind, reject_width)?;
// The expression belongs to the `true` branch so we need to flush to
// the respective body
self.emit_end(&mut accept_body);
}
// Technically there's nothing to flush but later we will need to
// add some expressions that must not be emitted so instead
// of flushing, starting and flushing again, just make sure
// everything is flushed.
std::cmp::Ordering::Equal => self.emit_end(body),
std::cmp::Ordering::Greater => {
self.conversion(&mut reject, reject_meta, accept_kind, accept_width)?;
// The expression belongs to the `false` branch so we need to flush to
// the respective body
self.emit_end(&mut reject_body);
}
}
}
// We need to get the type of the resulting expression to create the local,
// this must be done after implicit conversions to ensure both branches have
// the same type.
let ty = parser.resolve_type_handle(self, accept, accept_meta)?;
// Add the local that will hold the result of our conditional
let local = self.locals.append(
LocalVariable {
name: None,
ty,
init: None,
},
meta,
);
// Note: `Expression::LocalVariable` must not be emited so it's important
// that at this point the emitter is flushed but not started.
let local_expr = self
.expressions
.append(Expression::LocalVariable(local), meta);
// Add to each body the store to the result variable
accept_body.push(
Statement::Store {
pointer: local_expr,
value: accept,
},
accept_meta,
);
reject_body.push(
Statement::Store {
pointer: local_expr,
value: reject,
},
reject_meta,
);
// Finally add the `If` to the main body with the `condition` we lowered
// earlier and the branches we prepared.
body.push(
Statement::If {
condition,
accept: accept_body,
reject: reject_body,
},
meta,
);
// Restart the emitter
self.emit_start();
// Note: `Expression::Load` must be emited before it's used so make
// sure the emitter is active here.
self.expressions.append(
Expression::Load {
pointer: local_expr,
},
meta,
body,
)
}
HirExprKind::Assign { tgt, value } if ExprPos::Lhs != pos => {

Просмотреть файл

@ -972,7 +972,7 @@ impl Parser {
match kind {
FunctionKind::Call(function) => {
ctx.emit_flush(body);
ctx.emit_end(body);
let result = if !is_void {
Some(ctx.add_expression(Expression::CallResult(function), meta, body))
@ -995,8 +995,7 @@ impl Parser {
for (original, pointer) in proxy_writes {
let value = ctx.add_expression(Expression::Load { pointer }, meta, body);
ctx.emit_flush(body);
ctx.emit_start();
ctx.emit_restart(body);
body.push(
Statement::Store {

10
third_party/rust/naga/src/front/glsl/mod.rs поставляемый
Просмотреть файл

@ -190,7 +190,7 @@ impl Parser {
self.layouter.clear();
// This is necessary because if the last parsing errored out, the module
// wouldn't have been swapped
// wouldn't have been taken
self.module = Module::default();
}
@ -213,13 +213,9 @@ impl Parser {
}
if self.errors.is_empty() {
let mut module = Module::default();
std::mem::swap(&mut self.module, &mut module);
Ok(module)
Ok(std::mem::take(&mut self.module))
} else {
let mut errors = Vec::new();
std::mem::swap(&mut self.errors, &mut errors);
Err(errors)
Err(std::mem::take(&mut self.errors))
}
}

Просмотреть файл

@ -38,7 +38,7 @@ impl<'source> ParsingContext<'source> {
/// Helper method for backtracking from a consumed token
///
/// This method should always be used instead of assigning to `backtracked_token` since
/// it validates that backtracking hasn't ocurred more than one time in a row
/// it validates that backtracking hasn't occurred more than one time in a row
///
/// # Panics
/// - If the parser already backtracked without bumping in between
@ -172,25 +172,23 @@ impl<'source> ParsingContext<'source> {
self.parse_external_declaration(parser, &mut ctx, &mut body)?;
}
match parser.lookup_function.get("main").and_then(|declaration| {
declaration
.overloads
.iter()
.find_map(|decl| match decl.kind {
FunctionKind::Call(handle) if decl.defined && decl.parameters.is_empty() => {
Some(handle)
// Add an `EntryPoint` to `parser.module` for `main`, if a
// suitable overload exists. Error out if we can't find one.
if let Some(declaration) = parser.lookup_function.get("main") {
for decl in declaration.overloads.iter() {
if let FunctionKind::Call(handle) = decl.kind {
if decl.defined && decl.parameters.is_empty() {
parser.add_entry_point(handle, body, ctx.expressions);
return Ok(());
}
_ => None,
})
}) {
Some(handle) => parser.add_entry_point(handle, body, ctx.expressions),
None => parser.errors.push(Error {
kind: ErrorKind::SemanticError("Missing entry point".into()),
meta: Span::default(),
}),
}
}
}
Ok(())
Err(Error {
kind: ErrorKind::SemanticError("Missing entry point".into()),
meta: Span::default(),
})
}
fn parse_uint_constant(&mut self, parser: &mut Parser) -> Result<(u32, Span)> {
@ -438,8 +436,13 @@ impl<'ctx, 'qualifiers> DeclarationContext<'ctx, 'qualifiers> {
}
}
/// Emits all the expressions captured by the emitter and starts the emitter again
///
/// Alias to [`emit_restart`] with the declaration body
///
/// [`emit_restart`]: Context::emit_restart
#[inline]
fn flush_expressions(&mut self) {
self.ctx.emit_flush(self.body);
self.ctx.emit_start()
self.ctx.emit_restart(self.body);
}
}

Просмотреть файл

@ -103,8 +103,7 @@ impl<'source> ParsingContext<'source> {
}
};
ctx.emit_flush(body);
ctx.emit_start();
ctx.emit_restart(body);
body.push(Statement::Return { value }, meta);
terminator.get_or_insert(body.len());
@ -132,8 +131,7 @@ impl<'source> ParsingContext<'source> {
};
self.expect(parser, TokenValue::RightParen)?;
ctx.emit_flush(body);
ctx.emit_start();
ctx.emit_restart(body);
let mut accept = Block::new();
if let Some(more_meta) =
@ -176,8 +174,7 @@ impl<'source> ParsingContext<'source> {
self.expect(parser, TokenValue::RightParen)?;
ctx.emit_flush(body);
ctx.emit_start();
ctx.emit_restart(body);
let mut cases = Vec::new();
@ -301,8 +298,7 @@ impl<'source> ParsingContext<'source> {
&mut loop_body,
);
ctx.emit_flush(&mut loop_body);
ctx.emit_start();
ctx.emit_restart(&mut loop_body);
loop_body.push(
Statement::If {
@ -359,8 +355,7 @@ impl<'source> ParsingContext<'source> {
&mut loop_body,
);
ctx.emit_flush(&mut loop_body);
ctx.emit_start();
ctx.emit_restart(&mut loop_body);
loop_body.push(
Statement::If {
@ -427,8 +422,7 @@ impl<'source> ParsingContext<'source> {
let pointer = parser.add_local_var(ctx, &mut block, decl)?;
ctx.emit_flush(&mut block);
ctx.emit_start();
ctx.emit_restart(&mut block);
block.push(Statement::Store { pointer, value }, meta);
@ -448,8 +442,7 @@ impl<'source> ParsingContext<'source> {
&mut block,
);
ctx.emit_flush(&mut block);
ctx.emit_start();
ctx.emit_restart(&mut block);
block.push(
Statement::If {

74
third_party/rust/naga/src/front/glsl/types.rs поставляемый
Просмотреть файл

@ -225,10 +225,23 @@ pub const fn type_power(kind: ScalarKind, width: Bytes) -> Option<u32> {
}
impl Parser {
/// Resolves the types of the expressions until `expr` (inclusive)
///
/// This needs to be done before the [`typifier`] can be queried for
/// the types of the expressions in the range between the last grow and `expr`.
///
/// # Note
///
/// The `resolve_type*` methods (like [`resolve_type`]) automatically
/// grow the [`typifier`] so calling this method is not necessary when using
/// them.
///
/// [`typifier`]: Context::typifier
/// [`resolve_type`]: Self::resolve_type
pub(crate) fn typifier_grow(
&self,
ctx: &mut Context,
handle: Handle<Expression>,
expr: Handle<Expression>,
meta: Span,
) -> Result<()> {
let resolve_ctx = ResolveContext {
@ -241,28 +254,73 @@ impl Parser {
};
ctx.typifier
.grow(handle, &ctx.expressions, &resolve_ctx)
.grow(expr, &ctx.expressions, &resolve_ctx)
.map_err(|error| Error {
kind: ErrorKind::SemanticError(format!("Can't resolve type: {:?}", error).into()),
meta,
})
}
/// Gets the type for the result of the `expr` expression
///
/// Automatically grows the [`typifier`] to `expr` so calling
/// [`typifier_grow`] is not necessary
///
/// [`typifier`]: Context::typifier
/// [`typifier_grow`]: Self::typifier_grow
pub(crate) fn resolve_type<'b>(
&'b self,
ctx: &'b mut Context,
handle: Handle<Expression>,
expr: Handle<Expression>,
meta: Span,
) -> Result<&'b TypeInner> {
self.typifier_grow(ctx, handle, meta)?;
Ok(ctx.typifier.get(handle, &self.module.types))
self.typifier_grow(ctx, expr, meta)?;
Ok(ctx.typifier.get(expr, &self.module.types))
}
/// Invalidates the cached type resolution for `handle` forcing a recomputation
/// Gets the type handle for the result of the `expr` expression
///
/// Automatically grows the [`typifier`] to `expr` so calling
/// [`typifier_grow`] is not necessary
///
/// # Note
///
/// Consider using [`resolve_type`] whenever possible
/// since it doesn't require adding each type to the [`types`] arena
/// and it doesn't need to mutably borrow the [`Parser`][Self]
///
/// [`types`]: crate::Module::types
/// [`typifier`]: Context::typifier
/// [`typifier_grow`]: Self::typifier_grow
/// [`resolve_type`]: Self::resolve_type
pub(crate) fn resolve_type_handle(
&mut self,
ctx: &mut Context,
expr: Handle<Expression>,
meta: Span,
) -> Result<Handle<Type>> {
self.typifier_grow(ctx, expr, meta)?;
let resolution = &ctx.typifier[expr];
Ok(match *resolution {
// If the resolution is already a handle return early
crate::proc::TypeResolution::Handle(ty) => ty,
// If it's a value we need to clone it
crate::proc::TypeResolution::Value(_) => match resolution.clone() {
// This is unreachable
crate::proc::TypeResolution::Handle(ty) => ty,
// Add the value to the type arena and return the handle
crate::proc::TypeResolution::Value(inner) => {
self.module.types.insert(Type { name: None, inner }, meta)
}
},
})
}
/// Invalidates the cached type resolution for `expr` forcing a recomputation
pub(crate) fn invalidate_expression<'b>(
&'b self,
ctx: &'b mut Context,
handle: Handle<Expression>,
expr: Handle<Expression>,
meta: Span,
) -> Result<()> {
let resolve_ctx = ResolveContext {
@ -275,7 +333,7 @@ impl Parser {
};
ctx.typifier
.invalidate(handle, &ctx.expressions, &resolve_ctx)
.invalidate(expr, &ctx.expressions, &resolve_ctx)
.map_err(|error| Error {
kind: ErrorKind::SemanticError(format!("Can't resolve type: {:?}", error).into()),
meta,

Просмотреть файл

@ -12,7 +12,6 @@ pub(super) const fn map_binary_operator(word: spirv::Op) -> Result<crate::Binary
Op::ISub | Op::FSub => Ok(BinaryOperator::Subtract),
Op::IMul | Op::FMul => Ok(BinaryOperator::Multiply),
Op::UDiv | Op::SDiv | Op::FDiv => Ok(BinaryOperator::Divide),
Op::UMod | Op::SMod | Op::FMod => Ok(BinaryOperator::Modulo),
Op::SRem => Ok(BinaryOperator::Modulo),
// Relational and Logical Instructions
Op::IEqual | Op::FOrdEqual | Op::FUnordEqual | Op::LogicalEqual => {

183
third_party/rust/naga/src/front/spv/mod.rs поставляемый
Просмотреть файл

@ -1966,7 +1966,6 @@ impl<I: Iterator<Item = u32>> Parser<I> {
| Op::BitwiseXor
| Op::BitwiseAnd
| Op::SDiv
| Op::SMod
| Op::SRem => {
inst.expect(5)?;
let operator = map_binary_operator(inst.op)?;
@ -2009,10 +2008,188 @@ impl<I: Iterator<Item = u32>> Parser<I> {
inst.expect(5)?;
parse_expr_op!(crate::BinaryOperator::Divide, BINARY)?;
}
Op::UMod | Op::FMod | Op::FRem => {
Op::UMod | Op::FRem => {
inst.expect(5)?;
parse_expr_op!(crate::BinaryOperator::Modulo, BINARY)?;
}
Op::SMod => {
inst.expect(5)?;
// x - y * int(floor(float(x) / float(y)))
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let p1_id = self.next()?;
let p2_id = self.next()?;
let span = self.span_from_with_op(start);
let p1_lexp = self.lookup_expression.lookup(p1_id)?;
let left = self.get_expr_handle(
p1_id,
p1_lexp,
ctx,
&mut emitter,
&mut block,
body_idx,
);
let p2_lexp = self.lookup_expression.lookup(p2_id)?;
let right = self.get_expr_handle(
p2_id,
p2_lexp,
ctx,
&mut emitter,
&mut block,
body_idx,
);
let left_cast = ctx.expressions.append(
crate::Expression::As {
expr: left,
kind: crate::ScalarKind::Float,
convert: None,
},
span,
);
let right_cast = ctx.expressions.append(
crate::Expression::As {
expr: right,
kind: crate::ScalarKind::Float,
convert: None,
},
span,
);
let div = ctx.expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Divide,
left: left_cast,
right: right_cast,
},
span,
);
let floor = ctx.expressions.append(
crate::Expression::Math {
fun: crate::MathFunction::Floor,
arg: div,
arg1: None,
arg2: None,
arg3: None,
},
span,
);
let result_ty = self.lookup_type.lookup(result_type_id)?;
let kind = ctx.type_arena[result_ty.handle]
.inner
.scalar_kind()
.unwrap();
let cast = ctx.expressions.append(
crate::Expression::As {
expr: floor,
kind,
convert: None,
},
span,
);
let mult = ctx.expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Multiply,
left: cast,
right,
},
span,
);
let sub = ctx.expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Subtract,
left,
right: mult,
},
span,
);
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: sub,
type_id: result_type_id,
block_id,
},
);
}
Op::FMod => {
inst.expect(5)?;
// x - y * floor(x / y)
let start = self.data_offset;
let span = self.span_from_with_op(start);
let result_type_id = self.next()?;
let result_id = self.next()?;
let p1_id = self.next()?;
let p2_id = self.next()?;
let p1_lexp = self.lookup_expression.lookup(p1_id)?;
let left = self.get_expr_handle(
p1_id,
p1_lexp,
ctx,
&mut emitter,
&mut block,
body_idx,
);
let p2_lexp = self.lookup_expression.lookup(p2_id)?;
let right = self.get_expr_handle(
p2_id,
p2_lexp,
ctx,
&mut emitter,
&mut block,
body_idx,
);
let div = ctx.expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Divide,
left,
right,
},
span,
);
let floor = ctx.expressions.append(
crate::Expression::Math {
fun: crate::MathFunction::Floor,
arg: div,
arg1: None,
arg2: None,
arg3: None,
},
span,
);
let mult = ctx.expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Multiply,
left: floor,
right,
},
span,
);
let sub = ctx.expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Subtract,
left,
right: mult,
},
span,
);
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: sub,
type_id: result_type_id,
block_id,
},
);
}
Op::VectorTimesScalar
| Op::VectorTimesMatrix
| Op::MatrixTimesScalar
@ -2453,7 +2630,7 @@ impl<I: Iterator<Item = u32>> Parser<I> {
let mut max_component = 0;
for _ in 5..inst.wc as usize {
let mut index = self.next()?;
if index == !0 {
if index == u32::MAX {
// treat Undefined as X
index = 0;
}

2
third_party/rust/naga/src/front/spv/null.rs поставляемый
Просмотреть файл

@ -158,7 +158,7 @@ pub fn generate_default_built_in(
width: 4,
},
Some(crate::BuiltIn::SampleMask) => crate::ConstantInner::Scalar {
value: crate::ScalarValue::Uint(!0),
value: crate::ScalarValue::Uint(u64::MAX),
width: 4,
},
//Note: `crate::BuiltIn::ClipDistance` is intentionally left for the default path

Просмотреть файл

@ -184,12 +184,15 @@ fn parse_constructor_type<'a>(
Ok(Some(ConstructorType::Vector { size, kind, width }))
}
(Token::Paren('<'), ConstructorType::PartialMatrix { columns, rows }) => {
let (_, width) = lexer.next_scalar_generic()?;
Ok(Some(ConstructorType::Matrix {
columns,
rows,
width,
}))
let (kind, width, span) = lexer.next_scalar_generic_with_span()?;
match kind {
ScalarKind::Float => Ok(Some(ConstructorType::Matrix {
columns,
rows,
width,
})),
_ => Err(Error::BadMatrixScalarKind(span, kind, width)),
}
}
(Token::Paren('<'), ConstructorType::PartialArray) => {
lexer.expect_generic_paren('<')?;
@ -363,6 +366,25 @@ pub(super) fn parse_construction<'a>(
convert: Some(dst_width),
},
// Vector conversion (vector -> vector) - partial
(
Components::One {
component,
ty:
&TypeInner::Vector {
size: src_size,
kind: src_kind,
..
},
..
},
ConstructorType::PartialVector { size: dst_size },
) if dst_size == src_size => Expression::As {
expr: component,
kind: src_kind,
convert: None,
},
// Matrix conversion (matrix -> matrix)
(
Components::One {
@ -386,6 +408,28 @@ pub(super) fn parse_construction<'a>(
convert: Some(dst_width),
},
// Matrix conversion (matrix -> matrix) - partial
(
Components::One {
component,
ty:
&TypeInner::Matrix {
columns: src_columns,
rows: src_rows,
..
},
..
},
ConstructorType::PartialMatrix {
columns: dst_columns,
rows: dst_rows,
},
) if dst_columns == src_columns && dst_rows == src_rows => Expression::As {
expr: component,
kind: ScalarKind::Float,
convert: None,
},
// Vector constructor (splat) - infer type
(
Components::One {

Просмотреть файл

@ -544,6 +544,9 @@ impl<'a> Lexer<'a> {
pub(super) fn next_ident_with_span(&mut self) -> Result<(&'a str, Span), Error<'a>> {
match self.next() {
(Token::Word(word), span) if word == "_" => {
Err(Error::InvalidIdentifierUnderscore(span))
}
(Token::Word(word), span) if word.starts_with("__") => {
Err(Error::ReservedIdentifierPrefix(span))
}

177
third_party/rust/naga/src/front/wgsl/mod.rs поставляемый
Просмотреть файл

@ -130,6 +130,7 @@ pub enum Error<'a> {
BadFloat(Span, BadFloatError),
BadU32Constant(Span),
BadScalarWidth(Span, Bytes),
BadMatrixScalarKind(Span, crate::ScalarKind, u8),
BadAccessor(Span),
BadTexture(Span),
BadTypeCast {
@ -147,6 +148,7 @@ pub enum Error<'a> {
InvalidForInitializer(Span),
InvalidGatherComponent(Span, i32),
InvalidConstructorComponentType(Span, i32),
InvalidIdentifierUnderscore(Span),
ReservedIdentifierPrefix(Span),
UnknownAddressSpace(Span),
UnknownAttribute(Span),
@ -295,12 +297,20 @@ impl<'a> Error<'a> {
labels: vec![(bad_span.clone(), "expected unsigned integer".into())],
notes: vec![],
},
Error::BadScalarWidth(ref bad_span, width) => ParseError {
message: format!("invalid width of `{}` bits for literal", width as u32 * 8,),
labels: vec![(bad_span.clone(), "invalid width".into())],
notes: vec!["the only valid width is 32 for now".to_string()],
},
Error::BadMatrixScalarKind(
ref span,
kind,
width,
) => ParseError {
message: format!("matrix scalar type must be floating-point, but found `{}`", kind.to_wgsl(width)),
labels: vec![(span.clone(), "must be floating-point (e.g. `f32`)".into())],
notes: vec![],
},
Error::BadAccessor(ref accessor_span) => ParseError {
message: format!(
"invalid field accessor `{}`",
@ -362,6 +372,11 @@ impl<'a> Error<'a> {
labels: vec![(bad_span.clone(), "invalid component type".into())],
notes: vec![],
},
Error::InvalidIdentifierUnderscore(ref bad_span) => ParseError {
message: "Identifier can't be '_'".to_string(),
labels: vec![(bad_span.clone(), "invalid identifier".into())],
notes: vec!["Use phony assignment instead ('_ =' notice the absence of 'let' or 'var')".to_string()],
},
Error::ReservedIdentifierPrefix(ref bad_span) => ParseError {
message: format!("Identifier starts with a reserved prefix: '{}'", &source[bad_span.clone()]),
labels: vec![(bad_span.clone(), "invalid identifier".into())],
@ -2895,6 +2910,23 @@ impl Parser {
Ok((members, span))
}
fn parse_matrix_scalar_type<'a>(
&mut self,
lexer: &mut Lexer<'a>,
columns: crate::VectorSize,
rows: crate::VectorSize,
) -> Result<crate::TypeInner, Error<'a>> {
let (kind, width, span) = lexer.next_scalar_generic_with_span()?;
match kind {
crate::ScalarKind::Float => Ok(crate::TypeInner::Matrix {
columns,
rows,
width,
}),
_ => Err(Error::BadMatrixScalarKind(span, kind, width)),
}
}
fn parse_type_decl_impl<'a>(
&mut self,
lexer: &mut Lexer<'a>,
@ -2906,6 +2938,7 @@ impl Parser {
if let Some((kind, width)) = conv::get_scalar_type(word) {
return Ok(Some(crate::TypeInner::Scalar { kind, width }));
}
Ok(Some(match word {
"vec2" => {
let (kind, width) = lexer.next_scalar_generic()?;
@ -2932,77 +2965,44 @@ impl Parser {
}
}
"mat2x2" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Bi,
rows: crate::VectorSize::Bi,
width,
}
self.parse_matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Bi)?
}
"mat2x3" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Bi,
rows: crate::VectorSize::Tri,
width,
}
}
"mat2x4" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Bi,
rows: crate::VectorSize::Quad,
width,
}
self.parse_matrix_scalar_type(lexer, crate::VectorSize::Bi, crate::VectorSize::Tri)?
}
"mat2x4" => self.parse_matrix_scalar_type(
lexer,
crate::VectorSize::Bi,
crate::VectorSize::Quad,
)?,
"mat3x2" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Tri,
rows: crate::VectorSize::Bi,
width,
}
}
"mat3x3" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Tri,
rows: crate::VectorSize::Tri,
width,
}
}
"mat3x4" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Tri,
rows: crate::VectorSize::Quad,
width,
}
}
"mat4x2" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Quad,
rows: crate::VectorSize::Bi,
width,
}
}
"mat4x3" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Quad,
rows: crate::VectorSize::Tri,
width,
}
}
"mat4x4" => {
let (_, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Matrix {
columns: crate::VectorSize::Quad,
rows: crate::VectorSize::Quad,
width,
}
self.parse_matrix_scalar_type(lexer, crate::VectorSize::Tri, crate::VectorSize::Bi)?
}
"mat3x3" => self.parse_matrix_scalar_type(
lexer,
crate::VectorSize::Tri,
crate::VectorSize::Tri,
)?,
"mat3x4" => self.parse_matrix_scalar_type(
lexer,
crate::VectorSize::Tri,
crate::VectorSize::Quad,
)?,
"mat4x2" => self.parse_matrix_scalar_type(
lexer,
crate::VectorSize::Quad,
crate::VectorSize::Bi,
)?,
"mat4x3" => self.parse_matrix_scalar_type(
lexer,
crate::VectorSize::Quad,
crate::VectorSize::Tri,
)?,
"mat4x4" => self.parse_matrix_scalar_type(
lexer,
crate::VectorSize::Quad,
crate::VectorSize::Quad,
)?,
"atomic" => {
let (kind, width) = lexer.next_scalar_generic()?;
crate::TypeInner::Atomic { kind, width }
@ -3347,25 +3347,26 @@ impl Parser {
let _ = context.resolve_type(reference.handle)?;
let ty = context.typifier.get(reference.handle, context.types);
let constant_inner = match ty.canonical_form(context.types) {
Some(crate::TypeInner::ValuePointer {
let (kind, width) = match *ty {
crate::TypeInner::ValuePointer {
size: None,
kind,
width,
space: _,
}) => crate::ConstantInner::Scalar {
width,
value: match kind {
crate::ScalarKind::Sint => crate::ScalarValue::Sint(1),
crate::ScalarKind::Uint => crate::ScalarValue::Uint(1),
_ => {
return Err(Error::BadIncrDecrReferenceType(lhs_span));
}
},
..
} => (kind, width),
crate::TypeInner::Pointer { base, .. } => match context.types[base].inner {
crate::TypeInner::Scalar { kind, width } => (kind, width),
_ => return Err(Error::BadIncrDecrReferenceType(lhs_span)),
},
_ => return Err(Error::BadIncrDecrReferenceType(lhs_span)),
};
let constant_inner = crate::ConstantInner::Scalar {
width,
value: match kind {
crate::ScalarKind::Sint => crate::ScalarValue::Sint(1),
crate::ScalarKind::Uint => crate::ScalarValue::Uint(1),
_ => return Err(Error::BadIncrDecrReferenceType(lhs_span)),
},
_ => {
return Err(Error::BadIncrDecrReferenceType(lhs_span));
}
};
let constant = context.constants.append(
crate::Constant {
@ -3489,6 +3490,18 @@ impl Parser {
(Token::Word(word), _) => {
let mut emitter = super::Emitter::default();
let statement = match word {
"_" => {
let _ = lexer.next();
emitter.start(context.expressions);
lexer.expect(Token::Operation('='))?;
self.parse_general_expression(
lexer,
context.as_expression(block, &mut emitter),
)?;
lexer.expect(Token::Separator(';'))?;
block.extend(emitter.finish(context.expressions));
None
}
"let" => {
let _ = lexer.next();
emitter.start(context.expressions);

7
third_party/rust/naga/src/keywords/wgsl.rs поставляемый
Просмотреть файл

@ -134,7 +134,7 @@ pub const RESERVED: &[&str] = &[
"await",
"become",
"bf16",
"buffer",
"binding_array",
"cast",
"catch",
"cbuffer",
@ -217,10 +217,8 @@ pub const RESERVED: &[&str] = &[
"impl",
"implements",
"import",
"in",
"inline",
"inout",
"input",
"instanceof",
"interface",
"invariant",
@ -276,8 +274,6 @@ pub const RESERVED: &[&str] = &[
"nullptr",
"of",
"operator",
"out",
"output",
"package",
"packoffset",
"partition",
@ -348,7 +344,6 @@ pub const RESERVED: &[&str] = &[
"technique10",
"technique11",
"template",
"texture",
"texture1D",
"texture1DArray",
"texture2D",

50
third_party/rust/naga/src/lib.rs поставляемый
Просмотреть файл

@ -698,7 +698,48 @@ pub enum TypeInner {
},
/// Can be used to sample values from images.
Sampler { comparison: bool },
/// Array of bindings
/// Array of bindings.
///
/// A `BindingArray` represents an array where each element draws its value
/// from a separate bound resource. The array's element type `base` may be
/// [`Image`], [`Sampler`], or any type that would be permitted for a global
/// in the [`Uniform`] or [`Storage`] address spaces. Only global variables
/// may be binding arrays; on the host side, their values are provided by
/// [`TextureViewArray`], [`SamplerArray`], or [`BufferArray`]
/// bindings.
///
/// Since each element comes from a distinct resource, a binding array of
/// images could have images of varying sizes (but not varying dimensions;
/// they must all have the same `Image` type). Or, a binding array of
/// buffers could have elements that are dynamically sized arrays, each with
/// a different length.
///
/// Binding arrays are not [`DATA`]. This means that all binding array
/// globals must be placed in the [`Handle`] address space. Referring to
/// such a global produces a `BindingArray` value directly; there are never
/// pointers to binding arrays. The only operation permitted on
/// `BindingArray` values is indexing, which yields the element by value,
/// not a pointer to the element. (This means that buffer array contents
/// cannot be stored to; [naga#1864] covers lifting this restriction.)
///
/// Unlike textures and samplers, binding arrays are not [`ARGUMENT`], so
/// they cannot be passed as arguments to functions.
///
/// Naga's WGSL front end supports binding arrays with the type syntax
/// `binding_array<T, N>`.
///
/// [`Image`]: TypeInner::Image
/// [`Sampler`]: TypeInner::Sampler
/// [`Uniform`]: AddressSpace::Uniform
/// [`Storage`]: AddressSpace::Storage
/// [`TextureViewArray`]: https://docs.rs/wgpu/latest/wgpu/enum.BindingResource.html#variant.TextureViewArray
/// [`SamplerArray`]: https://docs.rs/wgpu/latest/wgpu/enum.BindingResource.html#variant.SamplerArray
/// [`BufferArray`]: https://docs.rs/wgpu/latest/wgpu/enum.BindingResource.html#variant.BufferArray
/// [`DATA`]: crate::valid::TypeFlags::DATA
/// [`Handle`]: AddressSpace::Handle
/// [`ARGUMENT`]: crate::valid::TypeFlags::ARGUMENT
/// [naga#1864]: https://github.com/gfx-rs/naga/issues/1864
BindingArray { base: Handle<Type>, size: ArraySize },
}
@ -1108,14 +1149,15 @@ pub enum Expression {
base: Handle<Expression>,
index: Handle<Expression>,
},
/// Array access with a known index.
/// Access the same types as [`Access`], plus [`Struct`] with a known index.
///
/// [`Access`]: Expression::Access
/// [`Struct`]: TypeInner::Struct
AccessIndex {
base: Handle<Expression>,
index: u32,
},
/// Constant value.
///
/// Every `Constant` expression
Constant(Handle<Constant>),
/// Splat scalar into a vector.
Splat {

9
third_party/rust/naga/src/proc/mod.rs поставляемый
Просмотреть файл

@ -67,6 +67,15 @@ impl super::ScalarValue {
}
}
impl super::ScalarKind {
pub const fn is_numeric(self) -> bool {
match self {
crate::ScalarKind::Sint | crate::ScalarKind::Uint | crate::ScalarKind::Float => true,
crate::ScalarKind::Bool => false,
}
}
}
pub const POINTER_SPAN: u32 = 4;
impl super::TypeInner {

19
third_party/rust/naga/src/proc/typifier.rs поставляемый
Просмотреть файл

@ -821,13 +821,13 @@ impl<'a> ResolveContext<'a> {
Mf::CountOneBits |
Mf::ReverseBits |
Mf::ExtractBits |
Mf::InsertBits => res_arg.clone(),
Mf::InsertBits |
Mf::FindLsb |
Mf::FindMsb => match *res_arg.inner_with(types) {
Ti::Scalar { kind: _, width } =>
TypeResolution::Value(Ti::Scalar { kind: crate::ScalarKind::Sint, width }),
Ti::Vector { size, kind: _, width } =>
TypeResolution::Value(Ti::Vector { size, kind: crate::ScalarKind::Sint, width }),
Ti::Scalar { kind: kind @ (crate::ScalarKind::Sint | crate::ScalarKind::Uint), width } =>
TypeResolution::Value(Ti::Scalar { kind, width }),
Ti::Vector { size, kind: kind @ (crate::ScalarKind::Sint | crate::ScalarKind::Uint), width } =>
TypeResolution::Value(Ti::Vector { size, kind, width }),
ref other => return Err(ResolveError::IncompatibleOperands(
format!("{:?}({:?})", fun, other)
)),
@ -864,6 +864,15 @@ impl<'a> ResolveContext<'a> {
size,
width: convert.unwrap_or(width),
}),
Ti::Matrix {
columns,
rows,
width,
} => TypeResolution::Value(Ti::Matrix {
columns,
rows,
width: convert.unwrap_or(width),
}),
ref other => {
return Err(ResolveError::IncompatibleOperands(format!(
"{:?} as {:?}",

Просмотреть файл

@ -241,7 +241,7 @@ impl super::Validator {
size: crate::ArraySize::Constant(handle),
..
} => module.constants[handle].to_array_length().unwrap(),
Ti::Array { .. } | Ti::BindingArray { .. } => !0, // can't statically know, but need run-time checks
Ti::Array { .. } | Ti::BindingArray { .. } => u32::MAX, // can't statically know, but need run-time checks
Ti::Pointer { base, .. } if top_level => {
resolve_index_limit(module, top, &module.types[base].inner, false)?
}

58
third_party/rust/naga/src/valid/interface.rs поставляемый
Просмотреть файл

@ -39,6 +39,8 @@ pub enum GlobalVariableError {
pub enum VaryingError {
#[error("The type {0:?} does not match the varying")]
InvalidType(Handle<crate::Type>),
#[error("The type {0:?} cannot be used for user-defined entry point inputs or outputs")]
NotIOShareableType(Handle<crate::Type>),
#[error("Interpolation is not valid")]
InvalidInterpolation,
#[error("Interpolation must be specified on vertex shader outputs and fragment shader inputs")]
@ -98,22 +100,26 @@ fn storage_usage(access: crate::StorageAccess) -> GlobalUse {
}
struct VaryingContext<'a> {
ty: Handle<crate::Type>,
stage: crate::ShaderStage,
output: bool,
types: &'a UniqueArena<crate::Type>,
type_info: &'a Vec<super::r#type::TypeInfo>,
location_mask: &'a mut BitSet,
built_ins: &'a mut crate::FastHashSet<crate::BuiltIn>,
capabilities: Capabilities,
}
impl VaryingContext<'_> {
fn validate_impl(&mut self, binding: &crate::Binding) -> Result<(), VaryingError> {
fn validate_impl(
&mut self,
ty: Handle<crate::Type>,
binding: &crate::Binding,
) -> Result<(), VaryingError> {
use crate::{
BuiltIn as Bi, ScalarKind as Sk, ShaderStage as St, TypeInner as Ti, VectorSize as Vs,
};
let ty_inner = &self.types[self.ty].inner;
let ty_inner = &self.types[ty].inner;
match *binding {
crate::Binding::BuiltIn(built_in) => {
// Ignore the `invariant` field for the sake of duplicate checks,
@ -267,6 +273,13 @@ impl VaryingContext<'_> {
interpolation,
sampling,
} => {
// Only IO-shareable types may be stored in locations.
if !self.type_info[ty.index()]
.flags
.contains(super::TypeFlags::IO_SHAREABLE)
{
return Err(VaryingError::NotIOShareableType(ty));
}
if !self.location_mask.insert(location as usize) {
return Err(VaryingError::BindingCollision { location });
}
@ -294,7 +307,7 @@ impl VaryingContext<'_> {
return Err(VaryingError::InvalidInterpolation);
}
}
None => return Err(VaryingError::InvalidType(self.ty)),
None => return Err(VaryingError::InvalidType(ty)),
}
}
}
@ -302,19 +315,22 @@ impl VaryingContext<'_> {
Ok(())
}
fn validate(&mut self, binding: Option<&crate::Binding>) -> Result<(), WithSpan<VaryingError>> {
let span_context = self.types.get_span_context(self.ty);
fn validate(
&mut self,
ty: Handle<crate::Type>,
binding: Option<&crate::Binding>,
) -> Result<(), WithSpan<VaryingError>> {
let span_context = self.types.get_span_context(ty);
match binding {
Some(binding) => self
.validate_impl(binding)
.validate_impl(ty, binding)
.map_err(|e| e.with_span_context(span_context)),
None => {
match self.types[self.ty].inner {
match self.types[ty].inner {
//TODO: check the member types
crate::TypeInner::Struct { ref members, .. } => {
for (index, member) in members.iter().enumerate() {
self.ty = member.ty;
let span_context = self.types.get_span_context(self.ty);
let span_context = self.types.get_span_context(ty);
match member.binding {
None => {
return Err(VaryingError::MemberMissingBinding(index as u32)
@ -322,7 +338,7 @@ impl VaryingContext<'_> {
}
// TODO: shouldn't this be validate?
Some(ref binding) => self
.validate_impl(binding)
.validate_impl(member.ty, binding)
.map_err(|e| e.with_span_context(span_context))?,
}
}
@ -364,7 +380,7 @@ impl super::Validator {
));
}
}
(TypeFlags::DATA | TypeFlags::HOST_SHARED, true)
(TypeFlags::DATA | TypeFlags::HOST_SHAREABLE, true)
}
crate::AddressSpace::Uniform => {
if let Err((ty_handle, disalignment)) = type_info.uniform_layout {
@ -377,7 +393,10 @@ impl super::Validator {
}
}
(
TypeFlags::DATA | TypeFlags::COPY | TypeFlags::SIZED | TypeFlags::HOST_SHARED,
TypeFlags::DATA
| TypeFlags::COPY
| TypeFlags::SIZED
| TypeFlags::HOST_SHAREABLE,
true,
)
}
@ -402,7 +421,10 @@ impl super::Validator {
));
}
(
TypeFlags::DATA | TypeFlags::COPY | TypeFlags::HOST_SHARED | TypeFlags::SIZED,
TypeFlags::DATA
| TypeFlags::COPY
| TypeFlags::HOST_SHAREABLE
| TypeFlags::SIZED,
false,
)
}
@ -470,15 +492,15 @@ impl super::Validator {
// TODO: add span info to function arguments
for (index, fa) in ep.function.arguments.iter().enumerate() {
let mut ctx = VaryingContext {
ty: fa.ty,
stage: ep.stage,
output: false,
types: &module.types,
type_info: &self.types,
location_mask: &mut self.location_mask,
built_ins: &mut argument_built_ins,
capabilities: self.capabilities,
};
ctx.validate(fa.binding.as_ref())
ctx.validate(fa.ty, fa.binding.as_ref())
.map_err_inner(|e| EntryPointError::Argument(index as u32, e).with_span())?;
}
@ -486,15 +508,15 @@ impl super::Validator {
if let Some(ref fr) = ep.function.result {
let mut result_built_ins = crate::FastHashSet::default();
let mut ctx = VaryingContext {
ty: fr.ty,
stage: ep.stage,
output: true,
types: &module.types,
type_info: &self.types,
location_mask: &mut self.location_mask,
built_ins: &mut result_built_ins,
capabilities: self.capabilities,
};
ctx.validate(fr.binding.as_ref())
ctx.validate(fr.ty, fr.binding.as_ref())
.map_err_inner(|e| EntryPointError::Result(e).with_span())?;
}

2
third_party/rust/naga/src/valid/mod.rs поставляемый
Просмотреть файл

@ -93,7 +93,7 @@ bitflags::bitflags! {
/// Support for non-uniform indexing of uniform buffers and storage texture arrays.
const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 0x10;
/// Support for non-uniform indexing of samplers.
const SAMPLER_NON_UNIFORM_INDEXING = 0x11;
const SAMPLER_NON_UNIFORM_INDEXING = 0x20;
}
}

94
third_party/rust/naga/src/valid/type.rs поставляемый
Просмотреть файл

@ -40,14 +40,15 @@ bitflags::bitflags! {
/// The data can be copied around.
const COPY = 0x4;
/// Can be be used for interfacing between pipeline stages.
/// Can be be used for user-defined IO between pipeline stages.
///
/// This includes non-bool scalars and vectors, matrices, and structs
/// and arrays containing only interface types.
const INTERFACE = 0x8;
/// This covers anything that can be in [`Location`] binding:
/// non-bool scalars and vectors, matrices, and structs and
/// arrays containing only interface types.
const IO_SHAREABLE = 0x8;
/// Can be used for host-shareable structures.
const HOST_SHARED = 0x10;
const HOST_SHAREABLE = 0x10;
/// This type can be passed as a function argument.
const ARGUMENT = 0x40;
@ -155,6 +156,22 @@ fn check_member_layout(
};
}
/// Determine whether a pointer in `space` can be passed as an argument.
///
/// If a pointer in `space` is permitted to be passed as an argument to a
/// user-defined function, return `TypeFlags::ARGUMENT`. Otherwise, return
/// `TypeFlags::empty()`.
///
/// Pointers passed as arguments to user-defined functions must be in the
/// `Function`, `Private`, or `Workgroup` storage space.
const fn ptr_space_argument_flag(space: crate::AddressSpace) -> TypeFlags {
use crate::AddressSpace as As;
match space {
As::Function | As::Private | As::WorkGroup => TypeFlags::ARGUMENT,
As::Uniform | As::Storage { .. } | As::Handle | As::PushConstant => TypeFlags::empty(),
}
}
#[derive(Clone, Debug)]
pub(super) struct TypeInfo {
pub flags: TypeFlags,
@ -210,13 +227,17 @@ impl super::Validator {
if !self.check_width(kind, width) {
return Err(TypeError::InvalidWidth(kind, width));
}
let shareable = if kind.is_numeric() {
TypeFlags::IO_SHAREABLE | TypeFlags::HOST_SHAREABLE
} else {
TypeFlags::empty()
};
TypeInfo::new(
TypeFlags::DATA
| TypeFlags::SIZED
| TypeFlags::COPY
| TypeFlags::INTERFACE
| TypeFlags::HOST_SHARED
| TypeFlags::ARGUMENT,
| TypeFlags::ARGUMENT
| shareable,
width as u32,
)
}
@ -224,14 +245,19 @@ impl super::Validator {
if !self.check_width(kind, width) {
return Err(TypeError::InvalidWidth(kind, width));
}
let shareable = if kind.is_numeric() {
TypeFlags::IO_SHAREABLE | TypeFlags::HOST_SHAREABLE
} else {
TypeFlags::empty()
};
let count = if size >= crate::VectorSize::Tri { 4 } else { 2 };
TypeInfo::new(
TypeFlags::DATA
| TypeFlags::SIZED
| TypeFlags::COPY
| TypeFlags::INTERFACE
| TypeFlags::HOST_SHARED
| TypeFlags::ARGUMENT,
| TypeFlags::HOST_SHAREABLE
| TypeFlags::ARGUMENT
| shareable,
count * (width as u32),
)
}
@ -248,8 +274,7 @@ impl super::Validator {
TypeFlags::DATA
| TypeFlags::SIZED
| TypeFlags::COPY
| TypeFlags::INTERFACE
| TypeFlags::HOST_SHARED
| TypeFlags::HOST_SHAREABLE
| TypeFlags::ARGUMENT,
count * (width as u32),
)
@ -263,7 +288,7 @@ impl super::Validator {
return Err(TypeError::InvalidAtomicWidth(kind, width));
}
TypeInfo::new(
TypeFlags::DATA | TypeFlags::SIZED | TypeFlags::HOST_SHARED,
TypeFlags::DATA | TypeFlags::SIZED | TypeFlags::HOST_SHAREABLE,
width as u32,
)
}
@ -299,20 +324,11 @@ impl super::Validator {
}
}
// Pointers passed as arguments to user-defined functions must
// be in the `Function`, `Private`, or `Workgroup` storage
// space. We only mark pointers in those spaces as `ARGUMENT`.
//
// `Validator::validate_function` actually checks the storage
// space of pointer arguments explicitly before checking the
// `ARGUMENT` flag, to give better error messages. But it seems
// best to set `ARGUMENT` accurately anyway.
let argument_flag = match space {
As::Function | As::Private | As::WorkGroup => TypeFlags::ARGUMENT,
As::Uniform | As::Storage { .. } | As::Handle | As::PushConstant => {
TypeFlags::empty()
}
};
let argument_flag = ptr_space_argument_flag(space);
// Pointers cannot be stored in variables, structure members, or
// array elements, so we do not mark them as `DATA`.
@ -322,12 +338,28 @@ impl super::Validator {
size: _,
kind,
width,
space: _,
space,
} => {
// ValuePointer should be treated the same way as the equivalent
// Pointer / Scalar / Vector combination, so each step in those
// variants' match arms should have a counterpart here.
//
// However, some cases are trivial: All our implicit base types
// are DATA and SIZED, so we can never return
// `InvalidPointerBase` or `InvalidPointerToUnsized`.
if !self.check_width(kind, width) {
return Err(TypeError::InvalidWidth(kind, width));
}
TypeInfo::new(TypeFlags::DATA | TypeFlags::SIZED | TypeFlags::COPY, 0)
// `Validator::validate_function` actually checks the storage
// space of pointer arguments explicitly before checking the
// `ARGUMENT` flag, to give better error messages. But it seems
// best to set `ARGUMENT` accurately anyway.
let argument_flag = ptr_space_argument_flag(space);
// Pointers cannot be stored in variables, structure members, or
// array elements, so we do not mark them as `DATA`.
TypeInfo::new(argument_flag | TypeFlags::SIZED | TypeFlags::COPY, 0)
}
Ti::Array { base, size, stride } => {
if base >= handle {
@ -445,7 +477,7 @@ impl super::Validator {
}
};
let base_mask = TypeFlags::COPY | TypeFlags::HOST_SHARED | TypeFlags::INTERFACE;
let base_mask = TypeFlags::COPY | TypeFlags::HOST_SHAREABLE;
TypeInfo {
flags: TypeFlags::DATA | (base_info.flags & base_mask) | sized_flag,
uniform_layout,
@ -461,8 +493,8 @@ impl super::Validator {
TypeFlags::DATA
| TypeFlags::SIZED
| TypeFlags::COPY
| TypeFlags::HOST_SHARED
| TypeFlags::INTERFACE
| TypeFlags::HOST_SHAREABLE
| TypeFlags::IO_SHAREABLE
| TypeFlags::ARGUMENT,
1,
);
@ -480,7 +512,7 @@ impl super::Validator {
if !base_info.flags.contains(TypeFlags::DATA) {
return Err(TypeError::InvalidData(member.ty));
}
if !base_info.flags.contains(TypeFlags::HOST_SHARED) {
if !base_info.flags.contains(TypeFlags::HOST_SHAREABLE) {
if ti.uniform_layout.is_ok() {
ti.uniform_layout = Err((member.ty, Disalignment::NonHostShareable));
}
@ -495,7 +527,7 @@ impl super::Validator {
// to not bother with offsets/alignments if they are never
// used for host sharing.
if member.offset == 0 {
ti.flags.set(TypeFlags::HOST_SHARED, false);
ti.flags.set(TypeFlags::HOST_SHAREABLE, false);
} else {
return Err(TypeError::MemberOverlap {
index: i as u32,

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"f4dcb8db1e3276bfa485eac69d7906030719cd93787a44acc4cfd3598969ba8e","build.rs":"fedccfe06a4d75ba68233f0756de4161213c5d25851963f5b0521d8b7292b395","src/binding_model.rs":"218bfaa87b291be693c27ea496111c4a66edcbc585ce2661c060d5996822334b","src/command/bind.rs":"309f3f1b1719d25115d385368cff0a2c85e94da825b2930141db78235901c673","src/command/bundle.rs":"f015e176b7d7f67cd17dfef9ca67f46fd2c2edcc33286fc250929b9f00c9b18c","src/command/clear.rs":"03f624521571c46080051baa95b60230aca5698e2ddc6c83100a4c1162de4e1a","src/command/compute.rs":"05f405bba84c411f300eb635a7b77bbb557bc94fb91253b5578c49f62e1530d1","src/command/draw.rs":"c04bdbcdabfdf5e3b4524bf155089b20185e283847ecfd628acc983101b2cbba","src/command/memory_init.rs":"f7c73879b9fa728039a45d78f1f865e4eca863c860b3775a4542c973dfdb68c5","src/command/mod.rs":"6bee42ee62915d9ef6afe7c17fc3da04b22574b63ed9e38067ff2d92e51fe3bd","src/command/query.rs":"18659df22356493697ce1c5b56fe2ca0e2f3f1ef29bdf00d65f064eeecba5130","src/command/render.rs":"155aaf90898dade6ee54754824a151f3b3ddc1387767f35ca8cc991c61ac0156","src/command/transfer.rs":"6277878f4d99516bd7db5612aaf4fac280a46d48c44575a7c5f501d6fbae5c61","src/conv.rs":"15d87cd2a91cb9514273e86441940b86380861ec146796e5f214e95e4124d975","src/device/life.rs":"72ae33cc066ca1537c89ba1c7b12dab89957ea0449a4348e007a73aad471f4a5","src/device/mod.rs":"79dec24662a61af89ceeddef19c5fd101c6d97ae505f2a7f95d5699777b876e6","src/device/queue.rs":"5d070ff1f94f0796ece9359548806e41ed7c8f466828328f04fc7616644796e5","src/device/trace.rs":"de575a8213c8ae9df711e4b6afe5736d71ac65bf141375fe044d3b6c4375e039","src/error.rs":"34a4adbb6ec669d8de22b932363506eeef1b1115c422bcc8daa3b26f62378518","src/hub.rs":"e12f3ac5cbb126d37ca81c736ead67c65484b61f6f9433fc2b6ac2f904112490","src/id.rs":"420ef1442ce4663b4f8c011117bdc20682aaea4ad7bfa08c526ea3022a7b6d27","src/init_tracker/buffer.rs":"ccdddaace101f921463bf6c62ed5aca01a6520717a850b5d4442c3551e9f1875","src/init_tracker/mod.rs":"273c6ee67a927784a617899c6fe5560e47108248ab67cabdc2eebcba53133364","src/init_tracker/texture.rs":"75cf8753d850792ebc11da8ed7936836a23e12cbcdd52213a5e85ea3ff1412c1","src/instance.rs":"82d91ad4d7a98ee6fd87c05e93576e8351ccc6fa0f43fb880e1fddc5d54ca319","src/lib.rs":"f7101b4e0476e90d1ab7be5575d26855e074afb868a2f6f8dbcff04040e56689","src/pipeline.rs":"ffabdc74656717276241b1ca2ed043fabf18795662a523828193aea99d7a9ef5","src/present.rs":"50759b10537bf8bd52b1bb4cc48671fa860b325bd67d7f4d5057f8dc88794fdd","src/resource.rs":"2eb571779b9e1f3adaa67861a8d982ab58ade510023e6910936a0a1a952fd9e8","src/track/buffer.rs":"58828fa4bb6d9ca0f4c2549fd271b1ada738598675cc13714db99ef676c9332a","src/track/mod.rs":"753e8041bc4d10da311b95544e768f5f6dab235763305708632d8ad5a3f7984c","src/track/range.rs":"d24340a235103fd18e740236ebfe59b434023bc07abaeebc11c4d7b44057e267","src/track/texture.rs":"f64028e06c215d7dc9d1ac293acbcc40e88b9dbf7eb0e010214f0327d4569794","src/validation.rs":"254e9fbdd24949c3786ad300e1ca90a724ea022f1698f84f059b6fb298a2094c"},"package":null}
{"files":{"Cargo.toml":"73d22ddbc04b486026d12675ef898363c6eea04ae23a9251acdd1b000c73b126","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"fedccfe06a4d75ba68233f0756de4161213c5d25851963f5b0521d8b7292b395","src/binding_model.rs":"79f024cdb136f44066d67ec7dc56bde7fdf3bf9e89874dc4db40e504099b2822","src/command/bind.rs":"309f3f1b1719d25115d385368cff0a2c85e94da825b2930141db78235901c673","src/command/bundle.rs":"6f940e6de1e84b858790e2801ba82c83f2bc0c6afbff962576b3dd64ac315de3","src/command/clear.rs":"568aaf9d0843bada18b68980b52dd8021830c28fff36551459fad5f6baea72e1","src/command/compute.rs":"b58ae86ffbd8280af27f063d514b17c5dafee3f3ddfd5637ca050135681eb764","src/command/draw.rs":"1b9b6531b7536bc0f864ab9fdeff376993de04e33554e84c7b2db7dc65e31327","src/command/memory_init.rs":"03c3267b311f389af859615ceea8a648b402a323062cc8f0fe2690a0fb390b97","src/command/mod.rs":"c0f00529bce224972d844d2fdc9f659ffa065512086315b7bcd767501961ee1a","src/command/query.rs":"34d22d33e4713ff7ca0b345b14cdbb6177236e782b5dfb38d907215c4deb6907","src/command/render.rs":"b21201c5b9574e98c066f988bcf91b1cde0d1847fc1db683291cb059a10f3dd8","src/command/transfer.rs":"7e5e13f04fef63e036291b2838c0f0097717ec497f98f420b71296b2cc691907","src/conv.rs":"87097903e86048c9110f526f7df1749186f84cb663d75d9d40a0c467befc89ea","src/device/life.rs":"857a71da94f5f6f043f304ada7dc9ab95c6a26ed0ff63f3d64a77942e28bcafe","src/device/mod.rs":"8b886c68cd2aaec9aabdbaea0f2f256fe546ae0242fe7c9b0b8a55686f215071","src/device/queue.rs":"5fe332a0d27dafff720b19e436d991a35affd2a8031f78c2a81439a49105edd6","src/device/trace.rs":"de575a8213c8ae9df711e4b6afe5736d71ac65bf141375fe044d3b6c4375e039","src/error.rs":"34a4adbb6ec669d8de22b932363506eeef1b1115c422bcc8daa3b26f62378518","src/hub.rs":"4cc404cc79578d7a6757f74ab1fbeeb357a13a4de5f0fe87affaea8895395c8d","src/id.rs":"3ec97d09f900f34f9ad38a555ddcadb77bd9977d3d39bfad030b9b34649cf502","src/init_tracker/buffer.rs":"ccdddaace101f921463bf6c62ed5aca01a6520717a850b5d4442c3551e9f1875","src/init_tracker/mod.rs":"273c6ee67a927784a617899c6fe5560e47108248ab67cabdc2eebcba53133364","src/init_tracker/texture.rs":"d02babc4f194f91853b5e9a71bd5b20d9434842cf242f29ed9d39661bfa44980","src/instance.rs":"4a19ac634a4dd22938586e3bc554ab69f079abb2d836ef932f06cee1655d9336","src/lib.rs":"f44250478f095aa7d61fb4773692037f465d1e8df9c5626000723d4e1961166e","src/pipeline.rs":"ffabdc74656717276241b1ca2ed043fabf18795662a523828193aea99d7a9ef5","src/present.rs":"5b760e252242be41d70f09cc46b95f2bfcb8258c3482755a7bec3b5a7e4bbcb6","src/resource.rs":"50021911ff214165a32129eabc2275945c2fd22bb736fad2977634ea8ef8362d","src/track/buffer.rs":"1a7400ec55f3c16bc074c46d11b9515762b558a333d36eb236d2e7d99701bbe5","src/track/mod.rs":"3a4b07c8f1ff168609ca521b441e1e2acc00c62d7e9e4dc39cb8ab83d9813d58","src/track/range.rs":"5bbfed6e103b3234d9de8e42057022da6d628c2cc1db6bb51b88f87f2d8adf8b","src/track/stateless.rs":"593ec39e01e18048100ab0e1869f430851b83b96bd0497b8e524efda38782a46","src/track/texture.rs":"de154923e4825fa120360aae61aec27370b44196464edea6468bf933976ea20c","src/validation.rs":"27c76c48eaf3ca6be111855d7b1ab8ef94c2f73ed5d5e4f758d82799099f014b"},"package":null}

3
third_party/rust/wgpu-core/Cargo.toml поставляемый
Просмотреть файл

@ -26,6 +26,7 @@ vulkan-portability = ["hal/vulkan"]
[dependencies]
arrayvec = "0.7"
bitflags = "1.0"
bit-vec = "0.6"
codespan-reporting = "0.11"
copyless = "0.1"
fxhash = "0.2"
@ -41,7 +42,7 @@ thiserror = "1"
[dependencies.naga]
git = "https://github.com/gfx-rs/naga"
rev = "1aa91549"
rev = "571302e"
#version = "0.8"
features = ["span", "validate", "wgsl-in"]

176
third_party/rust/wgpu-core/LICENSE.APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

21
third_party/rust/wgpu-core/LICENSE.MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 The gfx-rs developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

Просмотреть файл

@ -1,10 +1,10 @@
use crate::{
device::{DeviceError, MissingDownlevelFlags, MissingFeatures, SHADER_STAGE_COUNT},
error::{ErrorFormatter, PrettyError},
hub::Resource,
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureViewId, Valid},
hub::{HalApi, Resource},
id::{BindGroupLayoutId, BufferId, DeviceId, SamplerId, TextureId, TextureViewId, Valid},
init_tracker::{BufferInitTrackerAction, TextureInitTrackerAction},
track::{TrackerSet, UsageConflict, DUMMY_SELECTOR},
track::{BindGroupStates, UsageConflict},
validation::{MissingBufferUsageError, MissingTextureUsageError},
FastHashMap, Label, LifeGuard, MultiRefCount, Stored,
};
@ -16,10 +16,7 @@ use serde::Deserialize;
#[cfg(feature = "trace")]
use serde::Serialize;
use std::{
borrow::{Borrow, Cow},
ops::Range,
};
use std::{borrow::Cow, ops::Range};
use thiserror::Error;
@ -63,6 +60,8 @@ pub enum CreateBindGroupError {
InvalidBuffer(BufferId),
#[error("texture view {0:?} is invalid")]
InvalidTextureView(TextureViewId),
#[error("texture {0:?} is invalid")]
InvalidTexture(TextureId),
#[error("sampler {0:?} is invalid")]
InvalidSampler(SamplerId),
#[error(
@ -709,13 +708,12 @@ pub(crate) fn buffer_binding_type_alignment(
}
}
#[derive(Debug)]
pub struct BindGroup<A: hal::Api> {
pub struct BindGroup<A: HalApi> {
pub(crate) raw: A::BindGroup,
pub(crate) device_id: Stored<DeviceId>,
pub(crate) layout_id: Valid<BindGroupLayoutId>,
pub(crate) life_guard: LifeGuard,
pub(crate) used: TrackerSet,
pub(crate) used: BindGroupStates<A>,
pub(crate) used_buffer_ranges: Vec<BufferInitTrackerAction>,
pub(crate) used_texture_ranges: Vec<TextureInitTrackerAction>,
pub(crate) dynamic_binding_info: Vec<BindGroupDynamicBindingData>,
@ -724,7 +722,7 @@ pub struct BindGroup<A: hal::Api> {
pub(crate) late_buffer_binding_sizes: Vec<wgt::BufferSize>,
}
impl<A: hal::Api> BindGroup<A> {
impl<A: HalApi> BindGroup<A> {
pub(crate) fn validate_dynamic_bindings(
&self,
offsets: &[wgt::DynamicOffset],
@ -766,13 +764,7 @@ impl<A: hal::Api> BindGroup<A> {
}
}
impl<A: hal::Api> Borrow<()> for BindGroup<A> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
impl<A: hal::Api> Resource for BindGroup<A> {
impl<A: HalApi> Resource for BindGroup<A> {
const TYPE: &'static str = "BindGroup";
fn life_guard(&self) -> &LifeGuard {

Просмотреть файл

@ -1,40 +1,76 @@
/*! Render Bundles
## Software implementation
A render bundle is a prerecorded sequence of commands that can be replayed on a
command encoder with a single call. A single bundle can replayed any number of
times, on different encoders. Constructing a render bundle lets `wgpu` validate
and analyze its commands up front, so that replaying a bundle can be more
efficient than simply re-recording its commands each time.
The path from nothing to using a render bundle consists of 3 phases.
One important property of render bundles is that the draw calls in a render
bundle depend solely on the pipeline and state established within the render
bundle itself. A draw call in a bundle will never use a vertex buffer, say, that
was set in the `RenderPass` before executing the bundle. We call this property
'isolation', in that a render bundle is somewhat isolated from the passes that
use it.
### Initial command encoding
Render passes are also isolated from the effects of bundles. After executing a
render bundle, a render pass's pipeline, bind groups, and vertex and index
buffers are are unset, so the bundle cannot affect later draw calls in the pass.
User creates a `RenderBundleEncoder` and populates it by issuing commands
from `bundle_ffi` module, just like with `RenderPass`, except that the
set of available commands is reduced. Everything is written into a `RawPass`.
Not all commands are available in bundles; for example, a render bundle may not
contain a [`RenderCommand::SetViewport`] command.
### Bundle baking
Most of `wgpu`'s backend graphics APIs have something like bundles. For example,
Vulkan calls them "secondary command buffers", and Metal calls them "indirect
command buffers". However, `wgpu`'s implementation of render bundles does not
take advantage of those underlying platform features. At the hal level, `wgpu`
render bundles just replay the commands.
Once the commands are encoded, user calls `render_bundle_encoder_finish`.
This is perhaps the most complex part of the logic. It consumes the
commands stored in `RawPass`, while validating everything, tracking the state,
and re-recording the commands into a separate `Vec<RenderCommand>`. It
doesn't actually execute any commands.
## Render Bundle Lifecycle
What's more important, is that the produced vector of commands is "normalized",
which means it can be executed verbatim without any state tracking. More
formally, "normalized" command stream guarantees that any state required by
a draw call is set explicitly by one of the commands between the draw call
and the last changing of the pipeline.
To create a render bundle:
1) Create a [`RenderBundleEncoder`] by calling
[`Global::device_create_render_bundle_encoder`][Gdcrbe].
2) Record commands in the `RenderBundleEncoder` using functions from the
[`bundle_ffi`] module.
3) Call [`Global::render_bundle_encoder_finish`][Grbef], which analyzes and cleans up
the command stream and returns a `RenderBundleId`.
4) Then, any number of times, call [`wgpu_render_pass_execute_bundles`][wrpeb] to
execute the bundle as part of some render pass.
## Implementation
The most complex part of render bundles is the "finish" step, mostly implemented
in [`RenderBundleEncoder::finish`]. This consumes the commands stored in the
encoder's [`BasePass`], while validating everything, tracking the state,
dropping redundant or unnecessary commands, and presenting the results as a new
[`RenderBundle`]. It doesn't actually execute any commands.
This step also enforces the 'isolation' property mentioned above: every draw
call is checked to ensure that the resources it uses on were established since
the last time the pipeline was set. This means the bundle can be executed
verbatim without any state tracking.
### Execution
When the bundle is used in an actual render pass, `RenderBundle::execute` is
called. It goes through the commands and issues them into the native command
buffer. Thanks to the "normalized" property, it doesn't track any bind group
invalidations or index format changes.
buffer. Thanks to isolation, it doesn't track any bind group invalidations or
index format changes.
[Gdcrbe]: crate::hub::Global::device_create_render_bundle_encoder
[Grbef]: crate::hub::Global::render_bundle_encoder_finish
[wrpeb]: crate::command::render_ffi::wgpu_render_pass_execute_bundles
!*/
#![allow(clippy::reversed_empty_ranges)]
use crate::{
binding_model::buffer_binding_type_alignment,
binding_model::{self, buffer_binding_type_alignment},
command::{
BasePass, BindGroupStateChange, DrawError, MapPassErr, PassErrorScope, RenderCommand,
RenderCommandError, StateChange,
@ -48,8 +84,9 @@ use crate::{
hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Resource, Storage, Token},
id,
init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction},
pipeline::PipelineFlags,
track::{TrackerSet, UsageConflict},
pipeline::{self, PipelineFlags},
resource,
track::RenderBundleScope,
validation::check_buffer_usage,
Label, LabelHelpers, LifeGuard, Stored,
};
@ -117,7 +154,7 @@ impl RenderBundleEncoder {
},
sample_count: {
let sc = desc.sample_count;
if sc == 0 || sc > 32 || !conv::is_power_of_two(sc) {
if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) {
return Err(CreateRenderBundleError::InvalidSampleCount(sc));
}
sc
@ -167,39 +204,56 @@ impl RenderBundleEncoder {
self.parent_id
}
pub(crate) fn finish<A: hal::Api, G: GlobalIdentityHandlerFactory>(
/// Convert this encoder's commands into a [`RenderBundle`].
///
/// We want executing a [`RenderBundle`] to be quick, so we take
/// this opportunity to clean up the [`RenderBundleEncoder`]'s
/// command stream and gather metadata about it that will help
/// keep [`ExecuteBundle`] simple and fast. We remove redundant
/// commands (along with their side data), note resource usage,
/// and accumulate buffer and texture initialization actions.
///
/// [`ExecuteBundle`]: RenderCommand::ExecuteBundle
pub(crate) fn finish<A: HalApi, G: GlobalIdentityHandlerFactory>(
self,
desc: &RenderBundleDescriptor,
device: &Device<A>,
hub: &Hub<A, G>,
token: &mut Token<Device<A>>,
) -> Result<RenderBundle, RenderBundleError> {
) -> Result<RenderBundle<A>, RenderBundleError> {
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
let (buffer_guard, _) = hub.buffers.read(&mut token);
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, _) = hub.textures.read(&mut token);
let mut state = State {
trackers: TrackerSet::new(self.parent_id.backend()),
trackers: RenderBundleScope::new(
&*buffer_guard,
&*texture_guard,
&*bind_group_guard,
&*pipeline_guard,
&*query_set_guard,
),
index: IndexState::new(),
vertex: (0..hal::MAX_VERTEX_BUFFERS)
.map(|_| VertexState::new())
.collect(),
bind: (0..hal::MAX_BIND_GROUPS)
.map(|_| BindState::new())
.collect(),
bind: (0..hal::MAX_BIND_GROUPS).map(|_| None).collect(),
push_constant_ranges: PushConstantState::new(),
raw_dynamic_offsets: Vec::new(),
flat_dynamic_offsets: Vec::new(),
used_bind_groups: 0,
pipeline: None,
};
let mut commands = Vec::new();
let mut base = self.base.as_ref();
let mut pipeline_layout_id = None::<id::Valid<id::PipelineLayoutId>>;
let mut buffer_memory_init_actions = Vec::new();
let mut texture_memory_init_actions = Vec::new();
let base = self.base.as_ref();
let mut next_dynamic_offset = 0;
for &command in base.commands {
match command {
RenderCommand::SetBindGroup {
@ -209,6 +263,15 @@ impl RenderBundleEncoder {
} => {
let scope = PassErrorScope::SetBindGroup(bind_group_id);
let bind_group: &binding_model::BindGroup<A> = state
.trackers
.bind_groups
.add_single(&*bind_group_guard, bind_group_id)
.ok_or(RenderCommandError::InvalidBindGroup(bind_group_id))
.map_pass_err(scope)?;
self.check_valid_to_use(bind_group.device_id.value)
.map_pass_err(scope)?;
let max_bind_groups = device.limits.max_bind_groups;
if (index as u32) >= max_bind_groups {
return Err(RenderCommandError::BindGroupIndexOutOfRange {
@ -218,15 +281,13 @@ impl RenderBundleEncoder {
.map_pass_err(scope);
}
let offsets = &base.dynamic_offsets[..num_dynamic_offsets as usize];
base.dynamic_offsets = &base.dynamic_offsets[num_dynamic_offsets as usize..];
// Identify the next `num_dynamic_offsets` entries from `base.dynamic_offsets`.
let num_dynamic_offsets = num_dynamic_offsets as usize;
let offsets_range =
next_dynamic_offset..next_dynamic_offset + num_dynamic_offsets;
next_dynamic_offset = offsets_range.end;
let offsets = &base.dynamic_offsets[offsets_range.clone()];
let bind_group = state
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.map_err(|_| RenderCommandError::InvalidBindGroup(bind_group_id))
.map_pass_err(scope)?;
if bind_group.dynamic_binding_info.len() != offsets.len() {
return Err(RenderCommandError::InvalidDynamicOffsetCount {
actual: offsets.len(),
@ -254,11 +315,13 @@ impl RenderBundleEncoder {
buffer_memory_init_actions.extend_from_slice(&bind_group.used_buffer_ranges);
texture_memory_init_actions.extend_from_slice(&bind_group.used_texture_ranges);
state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets);
state
.trackers
.merge_extend_stateful(&bind_group.used)
.map_pass_err(scope)?;
state.set_bind_group(index, bind_group_id, bind_group.layout_id, offsets_range);
unsafe {
state
.trackers
.merge_bind_group(&*texture_guard, &bind_group.used)
.map_pass_err(scope)?
};
//Note: stateless trackers are not merged: the lifetime reference
// is held to the bind group itself.
}
@ -267,11 +330,13 @@ impl RenderBundleEncoder {
state.pipeline = Some(pipeline_id);
let pipeline = state
let pipeline: &pipeline::RenderPipeline<A> = state
.trackers
.render_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ())
.map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id))
.render_pipelines
.add_single(&*pipeline_guard, pipeline_id)
.ok_or(RenderCommandError::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?;
self.check_valid_to_use(pipeline.device_id.value)
.map_pass_err(scope)?;
self.context
@ -307,11 +372,13 @@ impl RenderBundleEncoder {
size,
} => {
let scope = PassErrorScope::SetIndexBuffer(buffer_id);
let buffer = state
let buffer: &resource::Buffer<A> = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::INDEX)
.unwrap();
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device_id.value)
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::INDEX)
.map_pass_err(scope)?;
@ -334,11 +401,13 @@ impl RenderBundleEncoder {
size,
} => {
let scope = PassErrorScope::SetVertexBuffer(buffer_id);
let buffer = state
let buffer: &resource::Buffer<A> = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::VERTEX)
.unwrap();
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device_id.value)
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::VERTEX)
.map_pass_err(scope)?;
@ -404,7 +473,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope);
}
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.extend(state.flush_binds(base.dynamic_offsets));
commands.push(command);
}
RenderCommand::DrawIndexed {
@ -441,7 +510,7 @@ impl RenderBundleEncoder {
}
commands.extend(state.index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.extend(state.flush_binds(base.dynamic_offsets));
commands.push(command);
}
RenderCommand::MultiDrawIndirect {
@ -459,11 +528,13 @@ impl RenderBundleEncoder {
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
.map_pass_err(scope)?;
let buffer = state
let buffer: &resource::Buffer<A> = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::INDIRECT)
.unwrap();
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device_id.value)
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
@ -474,7 +545,7 @@ impl RenderBundleEncoder {
));
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.extend(state.flush_binds(base.dynamic_offsets));
commands.push(command);
}
RenderCommand::MultiDrawIndirect {
@ -492,11 +563,12 @@ impl RenderBundleEncoder {
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
.map_pass_err(scope)?;
let buffer = state
let buffer: &resource::Buffer<A> = state
.trackers
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::INDIRECT)
.map_err(|err| RenderCommandError::Buffer(buffer_id, err))
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device_id.value)
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
@ -509,7 +581,7 @@ impl RenderBundleEncoder {
commands.extend(state.index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds());
commands.extend(state.flush_binds(base.dynamic_offsets));
commands.push(command);
}
RenderCommand::MultiDrawIndirect { .. }
@ -549,6 +621,17 @@ impl RenderBundleEncoder {
})
}
fn check_valid_to_use(
&self,
device_id: id::Valid<id::DeviceId>,
) -> Result<(), RenderBundleErrorInner> {
if device_id.0 != self.parent_id {
return Err(RenderBundleErrorInner::NotValidToUse);
}
Ok(())
}
pub fn set_index_buffer(
&mut self,
buffer_id: id::BufferId,
@ -599,24 +682,23 @@ pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
//Note: here, `RenderBundle` is just wrapping a raw stream of render commands.
// The plan is to back it by an actual Vulkan secondary buffer, D3D12 Bundle,
// or Metal indirect command buffer.
#[derive(Debug)]
pub struct RenderBundle {
pub struct RenderBundle<A: HalApi> {
// Normalized command stream. It can be executed verbatim,
// without re-binding anything on the pipeline change.
base: BasePass<RenderCommand>,
pub(super) is_ds_read_only: bool,
pub(crate) device_id: Stored<id::DeviceId>,
pub(crate) used: TrackerSet,
pub(crate) used: RenderBundleScope<A>,
pub(super) buffer_memory_init_actions: Vec<BufferInitTrackerAction>,
pub(super) texture_memory_init_actions: Vec<TextureInitTrackerAction>,
pub(super) context: RenderPassContext,
pub(crate) life_guard: LifeGuard,
}
unsafe impl Send for RenderBundle {}
unsafe impl Sync for RenderBundle {}
unsafe impl<A: HalApi> Send for RenderBundle<A> {}
unsafe impl<A: HalApi> Sync for RenderBundle<A> {}
impl RenderBundle {
impl<A: HalApi> RenderBundle<A> {
/// Actually encode the contents into a native command buffer.
///
/// This is partially duplicating the logic of `command_encoder_run_render_pass`.
@ -626,7 +708,7 @@ impl RenderBundle {
/// Note that the function isn't expected to fail, generally.
/// All the validation has already been done by this point.
/// The only failure condition is if some of the used buffers are destroyed.
pub(super) unsafe fn execute<A: HalApi>(
pub(super) unsafe fn execute(
&self,
raw: &mut A::CommandEncoder,
pipeline_layout_guard: &Storage<
@ -815,7 +897,7 @@ impl RenderBundle {
}
}
impl Resource for RenderBundle {
impl<A: HalApi> Resource for RenderBundle<A> {
const TYPE: &'static str = "RenderBundle";
fn life_guard(&self) -> &LifeGuard {
@ -823,6 +905,15 @@ impl Resource for RenderBundle {
}
}
/// A render bundle's current index buffer state.
///
/// [`RenderBundleEncoder::finish`] uses this to drop redundant
/// `SetIndexBuffer` commands from the final [`RenderBundle`]. It
/// records index buffer state changes here, and then calls this
/// type's [`flush`] method before any indexed draw command to produce
/// a `SetIndexBuffer` command if one is necessary.
///
/// [`flush`]: IndexState::flush
#[derive(Debug)]
struct IndexState {
buffer: Option<id::BufferId>,
@ -833,6 +924,7 @@ struct IndexState {
}
impl IndexState {
/// Return a fresh state: no index buffer has been set yet.
fn new() -> Self {
Self {
buffer: None,
@ -843,6 +935,9 @@ impl IndexState {
}
}
/// Return the number of entries in the current index buffer.
///
/// Panic if no index buffer has been set.
fn limit(&self) -> u32 {
assert!(self.buffer.is_some());
let bytes_per_index = match self.format {
@ -852,6 +947,8 @@ impl IndexState {
((self.range.end - self.range.start) / bytes_per_index) as u32
}
/// Prepare for an indexed draw, producing a `SetIndexBuffer`
/// command if necessary.
fn flush(&mut self) -> Option<RenderCommand> {
if self.is_dirty {
self.is_dirty = false;
@ -866,6 +963,7 @@ impl IndexState {
}
}
/// Set the current index buffer's format.
fn set_format(&mut self, format: wgt::IndexFormat) {
if self.format != format {
self.format = format;
@ -873,6 +971,7 @@ impl IndexState {
}
}
/// Set the current index buffer.
fn set_buffer(&mut self, id: id::BufferId, range: Range<wgt::BufferAddress>) {
self.buffer = Some(id);
self.range = range;
@ -880,6 +979,15 @@ impl IndexState {
}
}
/// The state of a single vertex buffer slot during render bundle encoding.
///
/// [`RenderBundleEncoder::finish`] uses this to drop redundant
/// `SetVertexBuffer` commands from the final [`RenderBundle`]. It
/// records one vertex buffer slot's state changes here, and then
/// calls this type's [`flush`] method just before any draw command to
/// produce a `SetVertexBuffer` commands if one is necessary.
///
/// [`flush`]: IndexState::flush
#[derive(Debug)]
struct VertexState {
buffer: Option<id::BufferId>,
@ -890,6 +998,8 @@ struct VertexState {
}
impl VertexState {
/// Construct a fresh `VertexState`: no buffer has been set for
/// this slot.
fn new() -> Self {
Self {
buffer: None,
@ -900,12 +1010,16 @@ impl VertexState {
}
}
/// Set this slot's vertex buffer.
fn set_buffer(&mut self, buffer_id: id::BufferId, range: Range<wgt::BufferAddress>) {
self.buffer = Some(buffer_id);
self.range = range;
self.is_dirty = true;
}
/// Generate a `SetVertexBuffer` command for this slot, if necessary.
///
/// `slot` is the index of the vertex buffer slot that `self` tracks.
fn flush(&mut self, slot: u32) -> Option<RenderCommand> {
if self.is_dirty {
self.is_dirty = false;
@ -921,41 +1035,24 @@ impl VertexState {
}
}
/// A bind group that has been set at a particular index during render bundle encoding.
#[derive(Debug)]
struct BindState {
bind_group: Option<(id::BindGroupId, id::BindGroupLayoutId)>,
/// The id of the bind group set at this index.
bind_group_id: id::BindGroupId,
/// The layout of `group`.
layout_id: id::Valid<id::BindGroupLayoutId>,
/// The range of dynamic offsets for this bind group, in the original
/// command stream's `BassPass::dynamic_offsets` array.
dynamic_offsets: Range<usize>,
/// True if this index's contents have been changed since the last time we
/// generated a `SetBindGroup` command.
is_dirty: bool,
}
impl BindState {
fn new() -> Self {
Self {
bind_group: None,
dynamic_offsets: 0..0,
is_dirty: false,
}
}
fn set_group(
&mut self,
bind_group_id: id::BindGroupId,
layout_id: id::BindGroupLayoutId,
dyn_offset: usize,
dyn_count: usize,
) -> bool {
match self.bind_group {
Some((bg_id, _)) if bg_id == bind_group_id && dyn_count == 0 => false,
_ => {
self.bind_group = Some((bind_group_id, layout_id));
self.dynamic_offsets = dyn_offset..dyn_offset + dyn_count;
self.is_dirty = true;
true
}
}
}
}
#[derive(Debug)]
struct PushConstantState {
ranges: ArrayVec<wgt::PushConstantRange, { SHADER_STAGE_COUNT }>,
@ -992,20 +1089,42 @@ struct VertexLimitState {
instance_limit_slot: u32,
}
#[derive(Debug)]
struct State {
trackers: TrackerSet,
/// State for analyzing and cleaning up bundle command streams.
///
/// To minimize state updates, [`RenderBundleEncoder::finish`]
/// actually just applies commands like [`SetBindGroup`] and
/// [`SetIndexBuffer`] to the simulated state stored here, and then
/// calls the `flush_foo` methods before draw calls to produce the
/// update commands we actually need.
struct State<A: HalApi> {
/// Resources used by this bundle. This will become [`RenderBundle::used`].
trackers: RenderBundleScope<A>,
/// The current index buffer. We flush this state before indexed
/// draw commands.
index: IndexState,
/// The state of each vertex buffer slot.
vertex: ArrayVec<VertexState, { hal::MAX_VERTEX_BUFFERS }>,
bind: ArrayVec<BindState, { hal::MAX_BIND_GROUPS }>,
/// The bind group set at each index, if any.
bind: ArrayVec<Option<BindState>, { hal::MAX_BIND_GROUPS }>,
push_constant_ranges: PushConstantState,
raw_dynamic_offsets: Vec<wgt::DynamicOffset>,
/// Dynamic offset values used by the cleaned-up command sequence.
///
/// This becomes the final [`RenderBundle`]'s [`BasePass`]'s
/// [`dynamic_offsets`] list.
///
/// [`dynamic_offsets`]: BasePass::dynamic_offsets
flat_dynamic_offsets: Vec<wgt::DynamicOffset>,
used_bind_groups: usize,
pipeline: Option<id::RenderPipelineId>,
}
impl State {
impl<A: HalApi> State<A> {
fn vertex_limits(&self) -> VertexLimitState {
let mut vert_state = VertexLimitState {
vertex_limit: u32::MAX,
@ -1036,11 +1155,10 @@ impl State {
vert_state
}
fn invalidate_group_from(&mut self, slot: usize) {
for bind in self.bind[slot..].iter_mut() {
if bind.bind_group.is_some() {
bind.is_dirty = true;
}
/// Mark all non-empty bind group table entries from `index` onwards as dirty.
fn invalidate_group_from(&mut self, index: usize) {
for contents in self.bind[index..].iter_mut().flatten() {
contents.is_dirty = true;
}
}
@ -1049,17 +1167,30 @@ impl State {
slot: u8,
bind_group_id: id::BindGroupId,
layout_id: id::Valid<id::BindGroupLayoutId>,
offsets: &[wgt::DynamicOffset],
dynamic_offsets: Range<usize>,
) {
if self.bind[slot as usize].set_group(
bind_group_id,
layout_id.0,
self.raw_dynamic_offsets.len(),
offsets.len(),
) {
self.invalidate_group_from(slot as usize + 1);
// If this call wouldn't actually change this index's state, we can
// return early. (If there are dynamic offsets, the range will always
// be different.)
if dynamic_offsets.is_empty() {
if let Some(ref contents) = self.bind[slot as usize] {
if contents.bind_group_id == bind_group_id {
return;
}
}
}
self.raw_dynamic_offsets.extend(offsets);
// Record the index's new state.
self.bind[slot as usize] = Some(BindState {
bind_group_id,
layout_id,
dynamic_offsets,
is_dirty: true,
});
// Once we've changed the bind group at a particular index, all
// subsequent indices need to be rewritten.
self.invalidate_group_from(slot as usize + 1);
}
fn set_pipeline(
@ -1090,8 +1221,8 @@ impl State {
self.bind
.iter()
.zip(layout_ids)
.position(|(bs, layout_id)| match bs.bind_group {
Some((_, bgl_id)) => bgl_id != layout_id.0,
.position(|(entry, &layout_id)| match *entry {
Some(ref contents) => contents.layout_id != layout_id,
None => false,
})
};
@ -1129,29 +1260,37 @@ impl State {
.flat_map(|(i, vs)| vs.flush(i as u32))
}
fn flush_binds(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
for bs in self.bind[..self.used_bind_groups].iter() {
if bs.is_dirty {
/// Generate `SetBindGroup` commands for any bind groups that need to be updated.
fn flush_binds(
&mut self,
dynamic_offsets: &[wgt::DynamicOffset],
) -> impl Iterator<Item = RenderCommand> + '_ {
// Append each dirty bind group's dynamic offsets to `flat_dynamic_offsets`.
for contents in self.bind[..self.used_bind_groups].iter().flatten() {
if contents.is_dirty {
self.flat_dynamic_offsets
.extend_from_slice(&self.raw_dynamic_offsets[bs.dynamic_offsets.clone()]);
.extend_from_slice(&dynamic_offsets[contents.dynamic_offsets.clone()]);
}
}
self.bind
// Then, generate `SetBindGroup` commands to update the dirty bind
// groups. After this, all bind groups are clean.
self.bind[..self.used_bind_groups]
.iter_mut()
.take(self.used_bind_groups)
.enumerate()
.flat_map(|(i, bs)| {
if bs.is_dirty {
bs.is_dirty = false;
Some(RenderCommand::SetBindGroup {
index: i as u8,
bind_group_id: bs.bind_group.unwrap().0,
num_dynamic_offsets: (bs.dynamic_offsets.end - bs.dynamic_offsets.start)
as u8,
})
} else {
None
.flat_map(|(i, entry)| {
if let Some(ref mut contents) = *entry {
if contents.is_dirty {
contents.is_dirty = false;
let offsets = &contents.dynamic_offsets;
return Some(RenderCommand::SetBindGroup {
index: i as u8,
bind_group_id: contents.bind_group_id,
num_dynamic_offsets: (offsets.end - offsets.start) as u8,
});
}
}
None
})
}
}
@ -1159,13 +1298,13 @@ impl State {
/// Error encountered when finishing recording a render bundle.
#[derive(Clone, Debug, Error)]
pub(super) enum RenderBundleErrorInner {
#[error("resource is not valid to use with this render bundle because the resource and the bundle come from different devices")]
NotValidToUse,
#[error(transparent)]
Device(#[from] DeviceError),
#[error(transparent)]
RenderCommand(RenderCommandError),
#[error(transparent)]
ResourceUsageConflict(#[from] UsageConflict),
#[error(transparent)]
Draw(#[from] DrawError),
#[error(transparent)]
MissingDownlevelFlags(#[from] MissingDownlevelFlags),

Просмотреть файл

@ -4,13 +4,12 @@ use std::{num::NonZeroU32, ops::Range};
use crate::device::trace::Command as TraceCommand;
use crate::{
command::CommandBuffer,
device::Device,
get_lowest_common_denom,
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Resource, Token},
hub::{self, Global, GlobalIdentityHandlerFactory, HalApi, Token},
id::{BufferId, CommandEncoderId, DeviceId, TextureId, Valid},
init_tracker::{MemoryInitKind, TextureInitRange},
resource::{Texture, TextureClearMode},
track::{ResourceTracker, TextureSelector, TextureState},
track::{TextureSelector, TextureTracker},
};
use hal::{auxil::align_to, CommandEncoder as _};
@ -90,8 +89,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, dst, (), hal::BufferUses::COPY_DST)
.map_err(ClearError::InvalidBuffer)?;
.set_single(&*buffer_guard, dst, hal::BufferUses::COPY_DST)
.ok_or(ClearError::InvalidBuffer(dst))?;
let dst_raw = dst_buffer
.raw
.as_ref()
@ -139,7 +138,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
let cmd_buf_raw = cmd_buf.encoder.open();
unsafe {
cmd_buf_raw.transition_buffers(dst_barrier);
cmd_buf_raw.transition_buffers(dst_barrier.into_iter());
cmd_buf_raw.clear_buffer(dst_raw, offset..end);
}
Ok(())
@ -191,13 +190,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// Check if subresource level range is valid
let subresource_level_end = match subresource_range.mip_level_count {
Some(count) => subresource_range.base_mip_level + count.get(),
None => dst_texture.full_range.levels.end,
None => dst_texture.full_range.mips.end,
};
if dst_texture.full_range.levels.start > subresource_range.base_mip_level
|| dst_texture.full_range.levels.end < subresource_level_end
if dst_texture.full_range.mips.start > subresource_range.base_mip_level
|| dst_texture.full_range.mips.end < subresource_level_end
{
return Err(ClearError::InvalidTextureLevelRange {
texture_level_range: dst_texture.full_range.levels.clone(),
texture_level_range: dst_texture.full_range.mips.clone(),
subresource_base_mip_level: subresource_range.base_mip_level,
subresource_mip_level_count: subresource_range.mip_level_count,
});
@ -217,48 +216,34 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
}
let device = &device_guard[cmd_buf.device_id.value];
clear_texture(
&*texture_guard,
Valid(dst),
dst_texture,
TextureInitRange {
mip_range: subresource_range.base_mip_level..subresource_level_end,
layer_range: subresource_range.base_array_layer..subresource_layer_end,
},
cmd_buf.encoder.open(),
&mut cmd_buf.trackers.textures,
&device_guard[cmd_buf.device_id.value],
&device.alignments,
&device.zero_buffer,
)
}
}
pub(crate) fn clear_texture<A: hal::Api>(
pub(crate) fn clear_texture<A: HalApi>(
storage: &hub::Storage<Texture<A>, TextureId>,
dst_texture_id: Valid<TextureId>,
dst_texture: &Texture<A>,
range: TextureInitRange,
encoder: &mut A::CommandEncoder,
texture_tracker: &mut ResourceTracker<TextureState>,
device: &Device<A>,
) -> Result<(), ClearError> {
clear_texture_no_device(
dst_texture_id,
dst_texture,
range,
encoder,
texture_tracker,
&device.alignments,
&device.zero_buffer,
)
}
pub(crate) fn clear_texture_no_device<A: hal::Api>(
dst_texture_id: Valid<TextureId>,
dst_texture: &Texture<A>,
range: TextureInitRange,
encoder: &mut A::CommandEncoder,
texture_tracker: &mut ResourceTracker<TextureState>,
texture_tracker: &mut TextureTracker<A>,
alignments: &hal::Alignments,
zero_buffer: &A::Buffer,
) -> Result<(), ClearError> {
let dst_texture = &storage[dst_texture_id];
let dst_raw = dst_texture
.inner
.as_raw()
@ -277,7 +262,7 @@ pub(crate) fn clear_texture_no_device<A: hal::Api>(
};
let selector = TextureSelector {
levels: range.mip_range.clone(),
mips: range.mip_range.clone(),
layers: range.layer_range.clone(),
};
@ -287,14 +272,13 @@ pub(crate) fn clear_texture_no_device<A: hal::Api>(
// On the other hand, when coming via command_encoder_clear_texture, the life_guard is still there since in order to call it a texture object is needed.
//
// We could in theory distinguish these two scenarios in the internal clear_texture api in order to remove this check and call the cheaper change_replace_tracked whenever possible.
let dst_barrier = if let Some(ref_count) = dst_texture.life_guard().ref_count.as_ref() {
texture_tracker.change_replace(dst_texture_id, ref_count, selector, clear_usage)
} else {
texture_tracker.change_replace_tracked(dst_texture_id, selector, clear_usage)
}
.map(|pending| pending.into_hal(dst_texture));
let dst_barrier = texture_tracker
.set_single(storage, dst_texture_id.0, selector, clear_usage)
.unwrap()
.1
.map(|pending| pending.into_hal(dst_texture));
unsafe {
encoder.transition_textures(dst_barrier);
encoder.transition_textures(dst_barrier.into_iter());
}
// Record actual clearing

Просмотреть файл

@ -14,8 +14,9 @@ use crate::{
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id,
init_tracker::MemoryInitKind,
resource::{Buffer, Texture},
track::{StatefulTrackerSubset, TrackerSet, UsageConflict, UseExtendError},
pipeline,
resource::{self, Buffer, Texture},
track::{Tracker, UsageConflict, UsageScope},
validation::{check_buffer_usage, MissingBufferUsageError},
Label,
};
@ -228,15 +229,14 @@ where
}
}
#[derive(Debug)]
struct State {
struct State<A: HalApi> {
binder: Binder,
pipeline: Option<id::ComputePipelineId>,
trackers: StatefulTrackerSubset,
scope: UsageScope<A>,
debug_scope_depth: u32,
}
impl State {
impl<A: HalApi> State<A> {
fn is_ready(&self) -> Result<(), DispatchError> {
let bind_mask = self.binder.invalid_mask();
if bind_mask != 0 {
@ -253,32 +253,36 @@ impl State {
Ok(())
}
fn flush_states<A: HalApi>(
fn flush_states(
&mut self,
raw_encoder: &mut A::CommandEncoder,
base_trackers: &mut TrackerSet,
base_trackers: &mut Tracker<A>,
bind_group_guard: &Storage<BindGroup<A>, id::BindGroupId>,
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
texture_guard: &Storage<Texture<A>, id::TextureId>,
) -> Result<(), UsageConflict> {
for id in self.binder.list_active() {
self.trackers.merge_extend(&bind_group_guard[id].used)?;
//Note: stateless trackers are not merged: the lifetime reference
unsafe {
self.scope
.merge_bind_group(texture_guard, &bind_group_guard[id].used)?
};
// Note: stateless trackers are not merged: the lifetime reference
// is held to the bind group itself.
}
for id in self.binder.list_active() {
unsafe {
base_trackers.set_and_remove_from_usage_scope_sparse(
texture_guard,
&mut self.scope,
&bind_group_guard[id].used,
)
}
}
log::trace!("Encoding dispatch barriers");
CommandBuffer::insert_barriers(
raw_encoder,
base_trackers,
&self.trackers.buffers,
&self.trackers.textures,
buffer_guard,
texture_guard,
);
self.trackers.clear();
CommandBuffer::drain_barriers(raw_encoder, base_trackers, buffer_guard, texture_guard);
Ok(())
}
}
@ -338,7 +342,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut state = State {
binder: Binder::new(),
pipeline: None,
trackers: StatefulTrackerSubset::new(A::VARIANT),
scope: UsageScope::new(&*buffer_guard, &*texture_guard),
debug_scope_depth: 0,
};
let mut temp_offsets = Vec::new();
@ -346,6 +350,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut string_offset = 0;
let mut active_query = None;
cmd_buf.trackers.set_size(
Some(&*buffer_guard),
Some(&*texture_guard),
None,
None,
Some(&*bind_group_guard),
Some(&*pipeline_guard),
None,
None,
Some(&*query_set_guard),
);
let hal_desc = hal::ComputePassDescriptor { label: base.label };
unsafe {
raw.begin_compute_pass(&hal_desc);
@ -379,11 +395,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
);
dynamic_offset_count += num_dynamic_offsets as usize;
let bind_group = cmd_buf
let bind_group: &BindGroup<A> = cmd_buf
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.map_err(|_| ComputePassErrorInner::InvalidBindGroup(bind_group_id))
.add_single(&*bind_group_guard, bind_group_id)
.ok_or(ComputePassErrorInner::InvalidBindGroup(bind_group_id))
.map_pass_err(scope)?;
bind_group
.validate_dynamic_bindings(&temp_offsets, &cmd_buf.limits)
@ -434,11 +450,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
state.pipeline = Some(pipeline_id);
let pipeline = cmd_buf
let pipeline: &pipeline::ComputePipeline<A> = cmd_buf
.trackers
.compute_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ())
.map_err(|_| ComputePassErrorInner::InvalidPipeline(pipeline_id))
.compute_pipelines
.add_single(&*pipeline_guard, pipeline_id)
.ok_or(ComputePassErrorInner::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?;
unsafe {
@ -587,11 +603,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
.map_pass_err(scope)?;
let indirect_buffer = state
.trackers
let indirect_buffer: &Buffer<A> = state
.scope
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::INDIRECT)
.map_err(|_| ComputePassErrorInner::InvalidIndirectBuffer(buffer_id))
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(indirect_buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
@ -670,16 +685,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} => {
let scope = PassErrorScope::WriteTimestamp;
let query_set = cmd_buf
let query_set: &resource::QuerySet<A> = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => {
ComputePassErrorInner::InvalidQuerySet(query_set_id)
}
_ => unreachable!(),
})
.add_single(&*query_set_guard, query_set_id)
.ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
query_set
@ -692,16 +702,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} => {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let query_set = cmd_buf
let query_set: &resource::QuerySet<A> = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => {
ComputePassErrorInner::InvalidQuerySet(query_set_id)
}
_ => unreachable!(),
})
.add_single(&*query_set_guard, query_set_id)
.ok_or(ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
query_set

Просмотреть файл

@ -5,7 +5,7 @@ use crate::{
binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError},
error::ErrorFormatter,
id,
track::UseExtendError,
track::UsageConflict,
validation::{MissingBufferUsageError, MissingTextureUsageError},
};
use wgt::{BufferAddress, BufferSize, Color};
@ -13,8 +13,6 @@ use wgt::{BufferAddress, BufferSize, Color};
use std::num::NonZeroU32;
use thiserror::Error;
pub type BufferError = UseExtendError<hal::BufferUses>;
/// Error validating a draw call.
#[derive(Clone, Debug, Error, PartialEq)]
pub enum DrawError {
@ -79,8 +77,8 @@ pub enum RenderCommandError {
IncompatiblePipelineTargets(#[from] crate::device::RenderPassCompatibilityError),
#[error("pipeline writes to depth/stencil, while the pass has read-only depth/stencil")]
IncompatiblePipelineRods,
#[error("buffer {0:?} is in error {1:?}")]
Buffer(id::BufferId, BufferError),
#[error(transparent)]
UsageConflict(#[from] UsageConflict),
#[error("buffer {0:?} is destroyed")]
DestroyedBuffer(id::BufferId),
#[error(transparent)]
@ -106,7 +104,11 @@ impl crate::error::PrettyError for RenderCommandError {
Self::InvalidPipeline(id) => {
fmt.render_pipeline_label(&id);
}
Self::Buffer(id, ..) | Self::DestroyedBuffer(id) => {
Self::UsageConflict(UsageConflict::TextureInvalid { id }) => {
fmt.texture_label(&id);
}
Self::UsageConflict(UsageConflict::BufferInvalid { id })
| Self::DestroyedBuffer(id) => {
fmt.buffer_label(&id);
}
_ => {}

Просмотреть файл

@ -4,11 +4,11 @@ use hal::CommandEncoder;
use crate::{
device::Device,
hub::Storage,
hub::{HalApi, Storage},
id::{self, TextureId},
init_tracker::*,
resource::{Buffer, Texture},
track::{ResourceTracker, TextureState, TrackerSet},
track::{TextureTracker, Tracker},
FastHashMap,
};
@ -121,36 +121,37 @@ impl CommandBufferTextureMemoryActions {
// Utility function that takes discarded surfaces from (several calls to) register_init_action and initializes them on the spot.
// Takes care of barriers as well!
pub(crate) fn fixup_discarded_surfaces<
A: hal::Api,
A: HalApi,
InitIter: Iterator<Item = TextureSurfaceDiscard>,
>(
inits: InitIter,
encoder: &mut A::CommandEncoder,
texture_guard: &Storage<Texture<A>, TextureId>,
texture_tracker: &mut ResourceTracker<TextureState>,
texture_tracker: &mut TextureTracker<A>,
device: &Device<A>,
) {
for init in inits {
clear_texture(
texture_guard,
id::Valid(init.texture),
texture_guard.get(init.texture).unwrap(),
TextureInitRange {
mip_range: init.mip_level..(init.mip_level + 1),
layer_range: init.layer..(init.layer + 1),
},
encoder,
texture_tracker,
device,
&device.alignments,
&device.zero_buffer,
)
.unwrap();
}
}
impl<A: hal::Api> BakedCommands<A> {
impl<A: HalApi> BakedCommands<A> {
// inserts all buffer initializations that are going to be needed for executing the commands and updates resource init states accordingly
pub(crate) fn initialize_buffer_memory(
&mut self,
device_tracker: &mut TrackerSet,
device_tracker: &mut Tracker<A>,
buffer_guard: &mut Storage<Buffer<A>, id::BufferId>,
) -> Result<(), DestroyedBufferError> {
// Gather init ranges for each buffer so we can collapse them.
@ -202,11 +203,11 @@ impl<A: hal::Api> BakedCommands<A> {
// Don't do use_replace since the buffer may already no longer have a ref_count.
// However, we *know* that it is currently in use, so the tracker must already know about it.
let transition = device_tracker.buffers.change_replace_tracked(
id::Valid(buffer_id),
(),
hal::BufferUses::COPY_DST,
);
let transition = device_tracker
.buffers
.set_single(buffer_guard, buffer_id, hal::BufferUses::COPY_DST)
.unwrap()
.1;
let buffer = buffer_guard
.get_mut(buffer_id)
@ -214,8 +215,11 @@ impl<A: hal::Api> BakedCommands<A> {
let raw_buf = buffer.raw.as_ref().ok_or(DestroyedBufferError(buffer_id))?;
unsafe {
self.encoder
.transition_buffers(transition.map(|pending| pending.into_hal(buffer)));
self.encoder.transition_buffers(
transition
.map(|pending| pending.into_hal(buffer))
.into_iter(),
);
}
for range in ranges.iter() {
@ -234,7 +238,7 @@ impl<A: hal::Api> BakedCommands<A> {
// any textures that are left discarded by this command buffer will be marked as uninitialized
pub(crate) fn initialize_texture_memory(
&mut self,
device_tracker: &mut TrackerSet,
device_tracker: &mut Tracker<A>,
texture_guard: &mut Storage<Texture<A>, TextureId>,
device: &Device<A>,
) -> Result<(), DestroyedTextureError> {
@ -274,12 +278,13 @@ impl<A: hal::Api> BakedCommands<A> {
// TODO: Could we attempt some range collapsing here?
for range in ranges.drain(..) {
clear_texture(
texture_guard,
id::Valid(texture_use.id),
&*texture,
range,
&mut self.encoder,
&mut device_tracker.textures,
device,
&device.alignments,
&device.zero_buffer,
)
.unwrap();
}

91
third_party/rust/wgpu-core/src/command/mod.rs поставляемый
Просмотреть файл

@ -10,7 +10,7 @@ mod transfer;
use std::slice;
pub(crate) use self::clear::clear_texture_no_device;
pub(crate) use self::clear::clear_texture;
pub use self::{
bundle::*, clear::ClearError, compute::*, draw::*, query::*, render::*, transfer::*,
};
@ -19,11 +19,11 @@ use self::memory_init::CommandBufferTextureMemoryActions;
use crate::error::{ErrorFormatter, PrettyError};
use crate::init_tracker::BufferInitTrackerAction;
use crate::track::{Tracker, UsageScope};
use crate::{
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id,
resource::{Buffer, Texture},
track::{BufferState, ResourceTracker, TextureState, TrackerSet},
Label, Stored,
};
@ -81,10 +81,10 @@ impl<A: hal::Api> CommandEncoder<A> {
}
}
pub struct BakedCommands<A: hal::Api> {
pub struct BakedCommands<A: HalApi> {
pub(crate) encoder: A::CommandEncoder,
pub(crate) list: Vec<A::CommandBuffer>,
pub(crate) trackers: TrackerSet,
pub(crate) trackers: Tracker<A>,
buffer_memory_init_actions: Vec<BufferInitTrackerAction>,
texture_memory_actions: CommandBufferTextureMemoryActions,
}
@ -92,11 +92,11 @@ pub struct BakedCommands<A: hal::Api> {
pub(crate) struct DestroyedBufferError(pub id::BufferId);
pub(crate) struct DestroyedTextureError(pub id::TextureId);
pub struct CommandBuffer<A: hal::Api> {
pub struct CommandBuffer<A: HalApi> {
encoder: CommandEncoder<A>,
status: CommandEncoderStatus,
pub(crate) device_id: Stored<id::DeviceId>,
pub(crate) trackers: TrackerSet,
pub(crate) trackers: Tracker<A>,
buffer_memory_init_actions: Vec<BufferInitTrackerAction>,
texture_memory_actions: CommandBufferTextureMemoryActions,
limits: wgt::Limits,
@ -124,7 +124,7 @@ impl<A: HalApi> CommandBuffer<A> {
},
status: CommandEncoderStatus::Recording,
device_id,
trackers: TrackerSet::new(A::VARIANT),
trackers: Tracker::new(),
buffer_memory_init_actions: Default::default(),
texture_memory_actions: Default::default(),
limits,
@ -138,23 +138,52 @@ impl<A: HalApi> CommandBuffer<A> {
}
}
pub(crate) fn insert_barriers(
pub(crate) fn insert_barriers_from_tracker(
raw: &mut A::CommandEncoder,
base: &mut TrackerSet,
head_buffers: &ResourceTracker<BufferState>,
head_textures: &ResourceTracker<TextureState>,
base: &mut Tracker<A>,
head: &Tracker<A>,
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
texture_guard: &Storage<Texture<A>, id::TextureId>,
) {
profiling::scope!("insert_barriers");
debug_assert_eq!(A::VARIANT, base.backend());
let buffer_barriers = base.buffers.merge_replace(head_buffers).map(|pending| {
let buf = &buffer_guard[pending.id];
base.buffers.set_from_tracker(&head.buffers);
base.textures
.set_from_tracker(&*texture_guard, &head.textures);
Self::drain_barriers(raw, base, buffer_guard, texture_guard);
}
pub(crate) fn insert_barriers_from_scope(
raw: &mut A::CommandEncoder,
base: &mut Tracker<A>,
head: &UsageScope<A>,
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
texture_guard: &Storage<Texture<A>, id::TextureId>,
) {
profiling::scope!("insert_barriers");
base.buffers.set_from_usage_scope(&head.buffers);
base.textures
.set_from_usage_scope(&*texture_guard, &head.textures);
Self::drain_barriers(raw, base, buffer_guard, texture_guard);
}
pub(crate) fn drain_barriers(
raw: &mut A::CommandEncoder,
base: &mut Tracker<A>,
buffer_guard: &Storage<Buffer<A>, id::BufferId>,
texture_guard: &Storage<Texture<A>, id::TextureId>,
) {
profiling::scope!("drain_barriers");
let buffer_barriers = base.buffers.drain().map(|pending| {
let buf = unsafe { &buffer_guard.get_unchecked(pending.id) };
pending.into_hal(buf)
});
let texture_barriers = base.textures.merge_replace(head_textures).map(|pending| {
let tex = &texture_guard[pending.id];
let texture_barriers = base.textures.drain().map(|pending| {
let tex = unsafe { texture_guard.get_unchecked(pending.id) };
pending.into_hal(tex)
});
@ -165,7 +194,7 @@ impl<A: HalApi> CommandBuffer<A> {
}
}
impl<A: hal::Api> CommandBuffer<A> {
impl<A: HalApi> CommandBuffer<A> {
fn get_encoder_mut(
storage: &mut Storage<Self, id::CommandEncoderId>,
id: id::CommandEncoderId,
@ -198,7 +227,7 @@ impl<A: hal::Api> CommandBuffer<A> {
}
}
impl<A: hal::Api> crate::hub::Resource for CommandBuffer<A> {
impl<A: HalApi> crate::hub::Resource for CommandBuffer<A> {
const TYPE: &'static str = "CommandBuffer";
fn life_guard(&self) -> &crate::LifeGuard {
@ -219,6 +248,17 @@ pub struct BasePassRef<'a, C> {
pub push_constant_data: &'a [u32],
}
/// A stream of commands for a render pass or compute pass.
///
/// This also contains side tables referred to by certain commands,
/// like dynamic offsets for [`SetBindGroup`] or string data for
/// [`InsertDebugMarker`].
///
/// Render passes use `BasePass<RenderCommand>`, whereas compute
/// passes use `BasePass<ComputeCommand>`.
///
/// [`SetBindGroup`]: RenderCommand::SetBindGroup
/// [`InsertDebugMarker`]: RenderCommand::InsertDebugMarker
#[doc(hidden)]
#[derive(Debug)]
#[cfg_attr(
@ -231,9 +271,22 @@ pub struct BasePassRef<'a, C> {
)]
pub struct BasePass<C> {
pub label: Option<String>,
/// The stream of commands.
pub commands: Vec<C>,
/// Dynamic offsets consumed by [`SetBindGroup`] commands in `commands`.
///
/// Each successive `SetBindGroup` consumes the next
/// [`num_dynamic_offsets`] values from this list.
pub dynamic_offsets: Vec<wgt::DynamicOffset>,
/// Strings used by debug instructions.
///
/// Each successive [`PushDebugGroup`] or [`InsertDebugMarker`]
/// instruction consumes the next `len` bytes from this vector.
pub string_data: Vec<u8>,
pub push_constant_data: Vec<u32>,
}
@ -297,7 +350,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
cmd_buf.status = CommandEncoderStatus::Finished;
//Note: if we want to stop tracking the swapchain texture view,
// this is the place to do it.
log::trace!("Command buffer {:?} {:#?}", encoder_id, cmd_buf.trackers);
log::trace!("Command buffer {:?}", encoder_id);
None
}
CommandEncoderStatus::Finished => Some(CommandEncoderError::NotRecording),

Просмотреть файл

@ -8,7 +8,6 @@ use crate::{
id::{self, Id, TypedId},
init_tracker::MemoryInitKind,
resource::QuerySet,
track::UseExtendError,
Epoch, FastHashMap, Index,
};
use std::{iter, marker::PhantomData};
@ -300,11 +299,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let query_set = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => QueryError::InvalidQuerySet(query_set_id),
_ => unreachable!(),
})?;
.add_single(&*query_set_guard, query_set_id)
.ok_or(QueryError::InvalidQuerySet(query_set_id))?;
query_set.validate_and_write_timestamp(raw_encoder, query_set_id, query_index, None)?;
@ -348,17 +344,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let query_set = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => QueryError::InvalidQuerySet(query_set_id),
_ => unreachable!(),
})?;
.add_single(&*query_set_guard, query_set_id)
.ok_or(QueryError::InvalidQuerySet(query_set_id))?;
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, destination, (), hal::BufferUses::COPY_DST)
.map_err(QueryError::InvalidBuffer)?;
.set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST)
.ok_or(QueryError::InvalidBuffer(destination))?;
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
if !dst_buffer.usage.contains(wgt::BufferUsages::COPY_DST) {
@ -407,7 +400,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
));
unsafe {
raw_encoder.transition_buffers(dst_barrier);
raw_encoder.transition_buffers(dst_barrier.into_iter());
raw_encoder.copy_query_results(
&query_set.raw,
start_query..end_query,

Просмотреть файл

@ -1,6 +1,7 @@
use crate::{
binding_model::BindError,
command::{
self,
bind::Binder,
end_pipeline_statistics_query,
memory_init::{fixup_discarded_surfaces, SurfacesInDiscardState},
@ -17,8 +18,8 @@ use crate::{
id,
init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction},
pipeline::PipelineFlags,
resource::{Texture, TextureView},
track::{StatefulTrackerSubset, TextureSelector, UsageConflict},
resource::{self, Buffer, Texture, TextureView},
track::{TextureSelector, UsageConflict, UsageScope},
validation::{
check_buffer_usage, check_texture_usage, MissingBufferUsageError, MissingTextureUsageError,
},
@ -38,7 +39,6 @@ use serde::Deserialize;
#[cfg(any(feature = "serial-pass", feature = "trace"))]
use serde::Serialize;
use crate::track::UseExtendError;
use std::{borrow::Cow, fmt, iter, marker::PhantomData, mem, num::NonZeroU32, ops::Range, str};
use super::{memory_init::TextureSurfaceDiscard, CommandBufferTextureMemoryActions};
@ -561,9 +561,9 @@ impl<A: hal::Api> TextureView<A> {
const MAX_TOTAL_ATTACHMENTS: usize = hal::MAX_COLOR_TARGETS + hal::MAX_COLOR_TARGETS + 1;
type AttachmentDataVec<T> = ArrayVec<T, MAX_TOTAL_ATTACHMENTS>;
struct RenderPassInfo<'a, A: hal::Api> {
struct RenderPassInfo<'a, A: HalApi> {
context: RenderPassContext,
trackers: StatefulTrackerSubset,
usage_scope: UsageScope<A>,
render_attachments: AttachmentDataVec<RenderAttachment<'a>>, // All render attachments, including depth/stencil
is_ds_read_only: bool,
extent: wgt::Extent3d,
@ -605,7 +605,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
// but recording the discard right away be alright since the texture can't be used during the pass anyways
texture_memory_actions.discard(TextureSurfaceDiscard {
texture: view.parent_id.value.0,
mip_level: view.selector.levels.start,
mip_level: view.selector.mips.start,
layer: view.selector.layers.start,
});
}
@ -618,6 +618,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
depth_stencil_attachment: Option<&RenderPassDepthStencilAttachment>,
cmd_buf: &mut CommandBuffer<A>,
view_guard: &'a Storage<TextureView<A>, id::TextureViewId>,
buffer_guard: &'a Storage<Buffer<A>, id::BufferId>,
texture_guard: &'a Storage<Texture<A>, id::TextureId>,
) -> Result<Self, RenderPassErrorInner> {
profiling::scope!("start", "RenderPassInfo");
@ -699,8 +700,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
let view = cmd_buf
.trackers
.views
.use_extend(&*view_guard, at.view, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(at.view))?;
.add_single(&*view_guard, at.view)
.ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?;
check_multiview(view)?;
add_view(view, "depth")?;
@ -779,7 +780,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
// Both are discarded using the regular path.
discarded_surfaces.push(TextureSurfaceDiscard {
texture: view.parent_id.value.0,
mip_level: view.selector.levels.start,
mip_level: view.selector.mips.start,
layer: view.selector.layers.start,
});
}
@ -808,8 +809,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
let color_view = cmd_buf
.trackers
.views
.use_extend(&*view_guard, at.view, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(at.view))?;
.add_single(&*view_guard, at.view)
.ok_or(RenderPassErrorInner::InvalidAttachment(at.view))?;
check_multiview(color_view)?;
add_view(color_view, "color")?;
@ -838,8 +839,8 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
let resolve_view = cmd_buf
.trackers
.views
.use_extend(&*view_guard, resolve_target, (), ())
.map_err(|_| RenderPassErrorInner::InvalidAttachment(resolve_target))?;
.add_single(&*view_guard, resolve_target)
.ok_or(RenderPassErrorInner::InvalidAttachment(resolve_target))?;
check_multiview(resolve_view)?;
if color_view.extent != resolve_view.extent {
@ -934,7 +935,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
Ok(Self {
context,
trackers: StatefulTrackerSubset::new(A::VARIANT),
usage_scope: UsageScope::new(buffer_guard, texture_guard),
render_attachments,
is_ds_read_only,
extent,
@ -949,7 +950,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
mut self,
raw: &mut A::CommandEncoder,
texture_guard: &Storage<Texture<A>, id::TextureId>,
) -> Result<(StatefulTrackerSubset, SurfacesInDiscardState), RenderPassErrorInner> {
) -> Result<(UsageScope<A>, SurfacesInDiscardState), RenderPassErrorInner> {
profiling::scope!("finish", "RenderPassInfo");
unsafe {
raw.end_render_pass();
@ -963,15 +964,18 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
check_texture_usage(texture.desc.usage, TextureUsages::RENDER_ATTACHMENT)?;
// the tracker set of the pass is always in "extend" mode
self.trackers
.textures
.change_extend(
ra.texture_id.value,
&ra.texture_id.ref_count,
ra.selector.clone(),
ra.usage,
)
.map_err(UsageConflict::from)?;
unsafe {
self.usage_scope
.textures
.merge_single(
&*texture_guard,
ra.texture_id.value,
Some(ra.selector.clone()),
&ra.texture_id.ref_count,
ra.usage,
)
.map_err(UsageConflict::from)?
};
}
// If either only stencil or depth was discarded, we put in a special clear pass to keep the init status of the aspects in sync.
@ -1012,7 +1016,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
}
}
Ok((self.trackers, self.pending_discard_init_fixups))
Ok((self.usage_scope, self.pending_discard_init_fixups))
}
}
@ -1047,7 +1051,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
let (trackers, query_reset_state, pending_discard_init_fixups) = {
let (scope, query_reset_state, pending_discard_init_fixups) = {
let (mut cmb_guard, mut token) = hub.command_buffers.write(&mut token);
// Spell out the type, to placate rust-analyzer.
@ -1075,7 +1079,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (bundle_guard, mut token) = hub.render_bundles.read(&mut token);
let (pipeline_layout_guard, mut token) = hub.pipeline_layouts.read(&mut token);
let (bind_group_guard, mut token) = hub.bind_groups.read(&mut token);
let (pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
let (render_pipeline_guard, mut token) = hub.render_pipelines.read(&mut token);
let (query_set_guard, mut token) = hub.query_sets.read(&mut token);
let (buffer_guard, mut token) = hub.buffers.read(&mut token);
let (texture_guard, mut token) = hub.textures.read(&mut token);
@ -1093,10 +1097,23 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
depth_stencil_attachment,
cmd_buf,
&*view_guard,
&*buffer_guard,
&*texture_guard,
)
.map_pass_err(init_scope)?;
cmd_buf.trackers.set_size(
Some(&*buffer_guard),
Some(&*texture_guard),
Some(&*view_guard),
None,
Some(&*bind_group_guard),
None,
Some(&*render_pipeline_guard),
Some(&*bundle_guard),
Some(&*query_set_guard),
);
let raw = &mut cmd_buf.encoder.raw;
let mut state = State {
@ -1142,17 +1159,19 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let bind_group = cmd_buf
.trackers
.bind_groups
.use_extend(&*bind_group_guard, bind_group_id, (), ())
.map_err(|_| RenderCommandError::InvalidBindGroup(bind_group_id))
.add_single(&*bind_group_guard, bind_group_id)
.ok_or(RenderCommandError::InvalidBindGroup(bind_group_id))
.map_pass_err(scope)?;
bind_group
.validate_dynamic_bindings(&temp_offsets, &cmd_buf.limits)
.map_pass_err(scope)?;
// merge the resource tracker in
info.trackers
.merge_extend(&bind_group.used)
.map_pass_err(scope)?;
unsafe {
info.usage_scope
.merge_bind_group(&*texture_guard, &bind_group.used)
.map_pass_err(scope)?;
}
//Note: stateless trackers are not merged: the lifetime reference
// is held to the bind group itself.
@ -1203,9 +1222,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let pipeline = cmd_buf
.trackers
.render_pipes
.use_extend(&*pipeline_guard, pipeline_id, (), ())
.map_err(|_| RenderCommandError::InvalidPipeline(pipeline_id))
.render_pipelines
.add_single(&*render_pipeline_guard, pipeline_id)
.ok_or(RenderCommandError::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?;
info.context
@ -1312,11 +1331,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
size,
} => {
let scope = PassErrorScope::SetIndexBuffer(buffer_id);
let buffer = info
.trackers
let buffer: &Buffer<A> = info
.usage_scope
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::INDEX)
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDEX)
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, BufferUsages::INDEX)
.map_pass_err(scope)?;
@ -1359,11 +1377,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
size,
} => {
let scope = PassErrorScope::SetVertexBuffer(buffer_id);
let buffer = info
.trackers
let buffer: &Buffer<A> = info
.usage_scope
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::VERTEX)
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::VERTEX)
.map_pass_err(scope)?;
check_buffer_usage(buffer.usage, BufferUsages::VERTEX)
.map_pass_err(scope)?;
@ -1617,11 +1634,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
.map_pass_err(scope)?;
let indirect_buffer = info
.trackers
let indirect_buffer: &Buffer<A> = info
.usage_scope
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::INDIRECT)
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
@ -1688,11 +1704,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.require_downlevel_flags(wgt::DownlevelFlags::INDIRECT_EXECUTION)
.map_pass_err(scope)?;
let indirect_buffer = info
.trackers
let indirect_buffer: &Buffer<A> = info
.usage_scope
.buffers
.use_extend(&*buffer_guard, buffer_id, (), hal::BufferUses::INDIRECT)
.map_err(|e| RenderCommandError::Buffer(buffer_id, e))
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
@ -1702,16 +1717,14 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.ok_or(RenderCommandError::DestroyedBuffer(buffer_id))
.map_pass_err(scope)?;
let count_buffer = info
.trackers
let count_buffer: &Buffer<A> = info
.usage_scope
.buffers
.use_extend(
.merge_single(
&*buffer_guard,
count_buffer_id,
(),
hal::BufferUses::INDIRECT,
)
.map_err(|e| RenderCommandError::Buffer(count_buffer_id, e))
.map_pass_err(scope)?;
check_buffer_usage(count_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
@ -1814,16 +1827,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} => {
let scope = PassErrorScope::WriteTimestamp;
let query_set = cmd_buf
let query_set: &resource::QuerySet<A> = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => {
RenderCommandError::InvalidQuerySet(query_set_id)
}
_ => unreachable!(),
})
.add_single(&*query_set_guard, query_set_id)
.ok_or(RenderCommandError::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
query_set
@ -1841,16 +1849,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
} => {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let query_set = cmd_buf
let query_set: &resource::QuerySet<A> = cmd_buf
.trackers
.query_sets
.use_extend(&*query_set_guard, query_set_id, (), ())
.map_err(|e| match e {
UseExtendError::InvalidResource => {
RenderCommandError::InvalidQuerySet(query_set_id)
}
_ => unreachable!(),
})
.add_single(&*query_set_guard, query_set_id)
.ok_or(RenderCommandError::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
query_set
@ -1871,11 +1874,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
RenderCommand::ExecuteBundle(bundle_id) => {
let scope = PassErrorScope::ExecuteBundle;
let bundle = cmd_buf
let bundle: &command::RenderBundle<A> = cmd_buf
.trackers
.bundles
.use_extend(&*bundle_guard, bundle_id, (), ())
.map_err(|_| RenderCommandError::InvalidRenderBundle(bundle_id))
.add_single(&*bundle_guard, bundle_id)
.ok_or(RenderCommandError::InvalidRenderBundle(bundle_id))
.map_pass_err(scope)?;
info.context
@ -1913,7 +1916,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
raw,
&*pipeline_layout_guard,
&*bind_group_guard,
&*pipeline_guard,
&*render_pipeline_guard,
&*buffer_guard,
)
}
@ -1927,23 +1930,21 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
})
.map_pass_err(scope)?;
info.trackers
.merge_extend(&bundle.used)
.map_pass_err(scope)?;
// Start tracking the bind groups specifically, as they are the only
// compound resources, to make it easier to update submission indices
// later at submission time.
cmd_buf
.trackers
.bind_groups
.merge_extend(&bundle.used.bind_groups)
.unwrap();
unsafe {
info.usage_scope
.merge_render_bundle(&*texture_guard, &bundle.used)
.map_pass_err(scope)?;
cmd_buf
.trackers
.add_from_render_bundle(&bundle.used)
.map_pass_err(scope)?;
};
state.reset_bundle();
}
}
}
log::trace!("Merging {:?} with the render pass", encoder_id);
log::trace!("Merging renderpass into cmd_buf {:?}", encoder_id);
let (trackers, pending_discard_init_fixups) =
info.finish(raw, &*texture_guard).map_pass_err(init_scope)?;
@ -1977,11 +1978,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(RenderCommandError::InvalidQuerySet)
.map_pass_err(PassErrorScope::QueryReset)?;
super::CommandBuffer::insert_barriers(
super::CommandBuffer::insert_barriers_from_scope(
transit,
&mut cmd_buf.trackers,
&trackers.buffers,
&trackers.textures,
&scope,
&*buffer_guard,
&*texture_guard,
);

Просмотреть файл

@ -1,12 +1,12 @@
#[cfg(feature = "trace")]
use crate::device::trace::Command as TraceCommand;
use crate::{
command::{CommandBuffer, CommandEncoderError},
command::{clear_texture, CommandBuffer, CommandEncoderError},
conv,
device::{Device, MissingDownlevelFlags},
error::{ErrorFormatter, PrettyError},
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Storage, Token},
id::{BufferId, CommandEncoderId, Id, TextureId, Valid},
id::{BufferId, CommandEncoderId, TextureId, Valid},
init_tracker::{
has_copy_partial_init_tracker_coverage, MemoryInitKind, TextureInitRange,
TextureInitTrackerAction,
@ -15,14 +15,13 @@ use crate::{
track::TextureSelector,
};
use arrayvec::ArrayVec;
use hal::CommandEncoder as _;
use thiserror::Error;
use wgt::{BufferAddress, BufferUsages, Extent3d, TextureUsages};
use std::iter;
use super::clear::clear_texture;
pub type ImageCopyBuffer = wgt::ImageCopyBuffer<BufferId>;
pub type ImageCopyTexture = wgt::ImageCopyTexture<TextureId>;
@ -191,7 +190,7 @@ pub(crate) fn extract_texture_selector<A: hal::Api>(
aspect: copy_aspect,
};
let selector = TextureSelector {
levels: copy_texture.mip_level..copy_texture.mip_level + 1,
mips: copy_texture.mip_level..copy_texture.mip_level + 1,
layers,
};
@ -312,8 +311,10 @@ pub(crate) fn validate_texture_copy_range(
match desc.format {
wgt::TextureFormat::Depth32Float
| wgt::TextureFormat::Depth32FloatStencil8
| wgt::TextureFormat::Depth24Plus
| wgt::TextureFormat::Depth24PlusStencil8 => {
| wgt::TextureFormat::Depth24PlusStencil8
| wgt::TextureFormat::Depth24UnormStencil8 => {
if *copy_size != extent {
return Err(TransferError::InvalidDepthTextureExtent);
}
@ -380,14 +381,13 @@ pub(crate) fn validate_texture_copy_range(
Ok((copy_extent, array_layer_count))
}
fn handle_texture_init<A: hal::Api>(
fn handle_texture_init<A: HalApi>(
init_kind: MemoryInitKind,
cmd_buf: &mut CommandBuffer<A>,
device: &Device<A>,
copy_texture: &ImageCopyTexture,
copy_size: &Extent3d,
texture_guard: &Storage<Texture<A>, Id<Texture<hal::api::Empty>>>,
texture: &Texture<A>,
texture_guard: &Storage<Texture<A>, TextureId>,
) {
let init_action = TextureInitTrackerAction {
id: copy_texture.texture,
@ -409,15 +409,16 @@ fn handle_texture_init<A: hal::Api>(
let cmd_buf_raw = cmd_buf.encoder.open();
for init in immediate_inits {
clear_texture(
texture_guard,
Valid(init.texture),
texture,
TextureInitRange {
mip_range: init.mip_level..(init.mip_level + 1),
layer_range: init.layer..(init.layer + 1),
},
cmd_buf_raw,
&mut cmd_buf.trackers.textures,
device,
&device.alignments,
&device.zero_buffer,
)
.unwrap();
}
@ -425,14 +426,14 @@ fn handle_texture_init<A: hal::Api>(
}
// Ensures the source texture of a transfer is in the right initialization state and records the state for after the transfer operation.
fn handle_src_texture_init<A: hal::Api>(
fn handle_src_texture_init<A: HalApi>(
cmd_buf: &mut CommandBuffer<A>,
device: &Device<A>,
source: &ImageCopyTexture,
copy_size: &Extent3d,
texture_guard: &Storage<Texture<A>, TextureId>,
) -> Result<(), TransferError> {
let texture = texture_guard
let _ = texture_guard
.get(source.texture)
.map_err(|_| TransferError::InvalidTexture(source.texture))?;
@ -443,13 +444,12 @@ fn handle_src_texture_init<A: hal::Api>(
source,
copy_size,
texture_guard,
texture,
);
Ok(())
}
// Ensures the destination texture of a transfer is in the right initialization state and records the state for after the transfer operation.
fn handle_dst_texture_init<A: hal::Api>(
fn handle_dst_texture_init<A: HalApi>(
cmd_buf: &mut CommandBuffer<A>,
device: &Device<A>,
destination: &ImageCopyTexture,
@ -479,7 +479,6 @@ fn handle_dst_texture_init<A: hal::Api>(
destination,
copy_size,
texture_guard,
texture,
);
Ok(())
}
@ -520,8 +519,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (src_buffer, src_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, source, (), hal::BufferUses::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?;
.set_single(&*buffer_guard, source, hal::BufferUses::COPY_SRC)
.ok_or(TransferError::InvalidBuffer(source))?;
let src_raw = src_buffer
.raw
.as_ref()
@ -530,15 +529,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
// expecting only a single barrier
let src_barrier = src_pending
.map(|pending| pending.into_hal(src_buffer))
.next();
let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer));
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, destination, (), hal::BufferUses::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
.set_single(&*buffer_guard, destination, hal::BufferUses::COPY_DST)
.ok_or(TransferError::InvalidBuffer(destination))?;
let dst_raw = dst_buffer
.raw
.as_ref()
@ -546,9 +543,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if !dst_buffer.usage.contains(BufferUsages::COPY_DST) {
return Err(TransferError::MissingCopyDstUsageFlag(Some(destination), None).into());
}
let dst_barrier = dst_pending
.map(|pending| pending.into_hal(dst_buffer))
.next();
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(TransferError::UnalignedCopySize(size).into());
@ -658,8 +653,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (src_buffer, src_pending) = cmd_buf
.trackers
.buffers
.use_replace(&*buffer_guard, source.buffer, (), hal::BufferUses::COPY_SRC)
.map_err(TransferError::InvalidBuffer)?;
.set_single(&*buffer_guard, source.buffer, hal::BufferUses::COPY_SRC)
.ok_or(TransferError::InvalidBuffer(source.buffer))?;
let src_raw = src_buffer
.raw
.as_ref()
@ -667,18 +662,18 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if !src_buffer.usage.contains(BufferUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
let src_barriers = src_pending.map(|pending| pending.into_hal(src_buffer));
let src_barrier = src_pending.map(|pending| pending.into_hal(src_buffer));
let (dst_texture, dst_pending) = cmd_buf
.trackers
.textures
.use_replace(
.set_single(
&*texture_guard,
destination.texture,
dst_range,
hal::TextureUses::COPY_DST,
)
.unwrap();
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_raw = dst_texture
.inner
.as_raw()
@ -688,7 +683,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TransferError::MissingCopyDstUsageFlag(None, Some(destination.texture)).into(),
);
}
let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_texture));
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_texture));
let format_desc = dst_texture.desc.format.describe();
let (hal_copy_size, array_layer_count) = validate_texture_copy_range(
@ -735,8 +730,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let cmd_buf_raw = cmd_buf.encoder.open();
unsafe {
cmd_buf_raw.transition_textures(dst_barriers);
cmd_buf_raw.transition_buffers(src_barriers);
cmd_buf_raw.transition_textures(dst_barrier.into_iter());
cmd_buf_raw.transition_buffers(src_barrier.into_iter());
cmd_buf_raw.copy_buffer_to_texture(src_raw, dst_raw, regions);
}
Ok(())
@ -785,13 +780,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (src_texture, src_pending) = cmd_buf
.trackers
.textures
.use_replace(
.set_single(
&*texture_guard,
source.texture,
src_range,
hal::TextureUses::COPY_SRC,
)
.unwrap();
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_raw = src_texture
.inner
.as_raw()
@ -799,18 +794,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
if !src_texture.desc.usage.contains(TextureUsages::COPY_SRC) {
return Err(TransferError::MissingCopySrcUsageFlag.into());
}
let src_barriers = src_pending.map(|pending| pending.into_hal(src_texture));
let src_barrier = src_pending.map(|pending| pending.into_hal(src_texture));
let (dst_buffer, dst_pending) = cmd_buf
.trackers
.buffers
.use_replace(
.set_single(
&*buffer_guard,
destination.buffer,
(),
hal::BufferUses::COPY_DST,
)
.map_err(TransferError::InvalidBuffer)?;
.ok_or(TransferError::InvalidBuffer(destination.buffer))?;
let dst_raw = dst_buffer
.raw
.as_ref()
@ -820,7 +814,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TransferError::MissingCopyDstUsageFlag(Some(destination.buffer), None).into(),
);
}
let dst_barriers = dst_pending.map(|pending| pending.into_hal(dst_buffer));
let dst_barrier = dst_pending.map(|pending| pending.into_hal(dst_buffer));
let format_desc = src_texture.desc.format.describe();
let (hal_copy_size, array_layer_count) =
@ -875,8 +869,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
let cmd_buf_raw = cmd_buf.encoder.open();
unsafe {
cmd_buf_raw.transition_buffers(dst_barriers);
cmd_buf_raw.transition_textures(src_barriers);
cmd_buf_raw.transition_buffers(dst_barrier.into_iter());
cmd_buf_raw.transition_textures(src_barrier.into_iter());
cmd_buf_raw.copy_texture_to_buffer(
src_raw,
hal::TextureUses::COPY_SRC,
@ -936,13 +930,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (src_texture, src_pending) = cmd_buf
.trackers
.textures
.use_replace(
.set_single(
&*texture_guard,
source.texture,
src_range,
hal::TextureUses::COPY_SRC,
)
.unwrap();
.ok_or(TransferError::InvalidTexture(source.texture))?;
let src_raw = src_texture
.inner
.as_raw()
@ -953,20 +947,21 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
//TODO: try to avoid this the collection. It's needed because both
// `src_pending` and `dst_pending` try to hold `trackers.textures` mutably.
let mut barriers = src_pending
let mut barriers: ArrayVec<_, 2> = src_pending
.map(|pending| pending.into_hal(src_texture))
.collect::<Vec<_>>();
.into_iter()
.collect();
let (dst_texture, dst_pending) = cmd_buf
.trackers
.textures
.use_replace(
.set_single(
&*texture_guard,
destination.texture,
dst_range,
hal::TextureUses::COPY_DST,
)
.unwrap();
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let dst_raw = dst_texture
.inner
.as_raw()

18
third_party/rust/wgpu-core/src/conv.rs поставляемый
Просмотреть файл

@ -1,6 +1,10 @@
use crate::resource;
pub fn is_power_of_two(val: u32) -> bool {
pub fn is_power_of_two_u16(val: u16) -> bool {
val != 0 && (val & (val - 1)) == 0
}
pub fn is_power_of_two_u32(val: u32) -> bool {
val != 0 && (val & (val - 1)) == 0
}
@ -15,7 +19,11 @@ pub fn is_valid_copy_src_texture_format(format: wgt::TextureFormat) -> bool {
pub fn is_valid_copy_dst_texture_format(format: wgt::TextureFormat) -> bool {
use wgt::TextureFormat as Tf;
match format {
Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => false,
Tf::Depth32Float
| Tf::Depth32FloatStencil8
| Tf::Depth24Plus
| Tf::Depth24PlusStencil8
| Tf::Depth24UnormStencil8 => false,
_ => true,
}
}
@ -51,7 +59,7 @@ pub fn map_buffer_usage(usage: wgt::BufferUsages) -> hal::BufferUses {
usage.contains(wgt::BufferUsages::UNIFORM),
);
u.set(
hal::BufferUses::STORAGE_READ | hal::BufferUses::STORAGE_WRITE,
hal::BufferUses::STORAGE_READ | hal::BufferUses::STORAGE_READ_WRITE,
usage.contains(wgt::BufferUsages::STORAGE),
);
u.set(
@ -79,7 +87,7 @@ pub fn map_texture_usage(
usage.contains(wgt::TextureUsages::TEXTURE_BINDING),
);
u.set(
hal::TextureUses::STORAGE_READ | hal::TextureUses::STORAGE_WRITE,
hal::TextureUses::STORAGE_READ | hal::TextureUses::STORAGE_READ_WRITE,
usage.contains(wgt::TextureUsages::STORAGE_BINDING),
);
let is_color = aspect.contains(hal::FormatAspects::COLOR);
@ -146,7 +154,7 @@ pub fn check_texture_dimension_size(
return Err(Tde::LimitExceeded { dim, given, limit });
}
}
if sample_size == 0 || sample_size > sample_limit || !is_power_of_two(sample_size) {
if sample_size == 0 || sample_size > sample_limit || !is_power_of_two_u32(sample_size) {
return Err(Tde::InvalidSampleCount(sample_size));
}

42
third_party/rust/wgpu-core/src/device/life.rs поставляемый
Просмотреть файл

@ -7,7 +7,7 @@ use crate::{
},
hub::{GlobalIdentityHandlerFactory, HalApi, Hub, Token},
id, resource,
track::TrackerSet,
track::{BindGroupStates, RenderBundleScope, Tracker},
RefCount, Stored, SubmissionIndex,
};
use smallvec::SmallVec;
@ -68,16 +68,20 @@ impl SuspectedResources {
self.query_sets.extend_from_slice(&other.query_sets);
}
pub(super) fn add_trackers(&mut self, trackers: &TrackerSet) {
pub(super) fn add_render_bundle_scope<A: HalApi>(&mut self, trackers: &RenderBundleScope<A>) {
self.buffers.extend(trackers.buffers.used());
self.textures.extend(trackers.textures.used());
self.bind_groups.extend(trackers.bind_groups.used());
self.render_pipelines
.extend(trackers.render_pipelines.used());
self.query_sets.extend(trackers.query_sets.used());
}
pub(super) fn add_bind_group_states<A: HalApi>(&mut self, trackers: &BindGroupStates<A>) {
self.buffers.extend(trackers.buffers.used());
self.textures.extend(trackers.textures.used());
self.texture_views.extend(trackers.views.used());
self.samplers.extend(trackers.samplers.used());
self.bind_groups.extend(trackers.bind_groups.used());
self.compute_pipelines.extend(trackers.compute_pipes.used());
self.render_pipelines.extend(trackers.render_pipes.used());
self.render_bundles.extend(trackers.bundles.used());
self.query_sets.extend(trackers.query_sets.used());
}
}
@ -273,7 +277,8 @@ pub(super) struct LifetimeTracker<A: hal::Api> {
/// Textures can be used in the upcoming submission by `write_texture`.
pub future_suspected_textures: Vec<Stored<id::TextureId>>,
/// Resources that are suspected for destruction.
/// Resources whose user handle has died (i.e. drop/destroy has been called)
/// and will likely be ready for destruction soon.
pub suspected_resources: SuspectedResources,
/// Resources used by queue submissions still in flight. One entry per
@ -435,15 +440,18 @@ impl<A: hal::Api> LifetimeTracker<A> {
}
}
pub fn add_work_done_closure(&mut self, closure: SubmittedWorkDoneClosure) -> bool {
pub fn add_work_done_closure(
&mut self,
closure: SubmittedWorkDoneClosure,
) -> Option<SubmittedWorkDoneClosure> {
match self.active.last_mut() {
Some(active) => {
active.work_done_closures.push(closure);
true
None
}
// Note: we can't immediately invoke the closure, since it assumes
// nothing is currently locked in the hubs.
None => false,
None => Some(closure),
}
}
}
@ -491,7 +499,7 @@ impl<A: HalApi> LifetimeTracker<A> {
pub(super) fn triage_suspected<G: GlobalIdentityHandlerFactory>(
&mut self,
hub: &Hub<A, G>,
trackers: &Mutex<TrackerSet>,
trackers: &Mutex<Tracker<A>>,
#[cfg(feature = "trace")] trace: Option<&Mutex<trace::Trace>>,
token: &mut Token<super::Device<A>>,
) {
@ -510,7 +518,7 @@ impl<A: HalApi> LifetimeTracker<A> {
}
if let Some(res) = hub.render_bundles.unregister_locked(id.0, &mut *guard) {
self.suspected_resources.add_trackers(&res.used);
self.suspected_resources.add_render_bundle_scope(&res.used);
}
}
}
@ -529,7 +537,7 @@ impl<A: HalApi> LifetimeTracker<A> {
}
if let Some(res) = hub.bind_groups.unregister_locked(id.0, &mut *guard) {
self.suspected_resources.add_trackers(&res.used);
self.suspected_resources.add_bind_group_states(&res.used);
self.suspected_resources
.bind_group_layouts
@ -670,7 +678,7 @@ impl<A: HalApi> LifetimeTracker<A> {
let mut trackers = trackers.lock();
for id in self.suspected_resources.compute_pipelines.drain(..) {
if trackers.compute_pipes.remove_abandoned(id) {
if trackers.compute_pipelines.remove_abandoned(id) {
log::debug!("Compute pipeline {:?} will be destroyed", id);
#[cfg(feature = "trace")]
if let Some(t) = trace {
@ -695,7 +703,7 @@ impl<A: HalApi> LifetimeTracker<A> {
let mut trackers = trackers.lock();
for id in self.suspected_resources.render_pipelines.drain(..) {
if trackers.render_pipes.remove_abandoned(id) {
if trackers.render_pipelines.remove_abandoned(id) {
log::debug!("Render pipeline {:?} will be destroyed", id);
#[cfg(feature = "trace")]
if let Some(t) = trace {
@ -829,7 +837,7 @@ impl<A: HalApi> LifetimeTracker<A> {
&mut self,
hub: &Hub<A, G>,
raw: &A::Device,
trackers: &Mutex<TrackerSet>,
trackers: &Mutex<Tracker<A>>,
token: &mut Token<super::Device<A>>,
) -> Vec<super::BufferMapPendingClosure> {
if self.ready_to_map.is_empty() {

269
third_party/rust/wgpu-core/src/device/mod.rs поставляемый
Просмотреть файл

@ -8,7 +8,7 @@ use crate::{
TextureInitTracker, TextureInitTrackerAction,
},
instance, pipeline, present, resource,
track::{BufferState, TextureSelector, TextureState, TrackerSet, UsageConflict},
track::{BindGroupStates, TextureSelector, Tracker},
validation::{self, check_buffer_usage, check_texture_usage},
FastHashMap, Label, LabelHelpers as _, LifeGuard, MultiRefCount, RefCount, Stored,
SubmissionIndex, DOWNLEVEL_ERROR_MESSAGE,
@ -22,7 +22,7 @@ use smallvec::SmallVec;
use thiserror::Error;
use wgt::{BufferAddress, TextureFormat, TextureViewDimension};
use std::{borrow::Cow, iter, marker::PhantomData, mem, num::NonZeroU32, ops::Range, ptr};
use std::{borrow::Cow, iter, mem, num::NonZeroU32, ops::Range, ptr};
mod life;
pub mod queue;
@ -141,14 +141,14 @@ impl UserClosures {
self.submissions.extend(other.submissions);
}
unsafe fn fire(self) {
//Note: this logic is specifically moved out of `handle_mapping()` in order to
fn fire(self) {
// Note: this logic is specifically moved out of `handle_mapping()` in order to
// have nothing locked by the time we execute users callback code.
for (operation, status) in self.mappings {
(operation.callback)(status, operation.user_data);
operation.callback.call(status);
}
for closure in self.submissions {
(closure.callback)(closure.user_data);
closure.call();
}
}
}
@ -256,7 +256,7 @@ impl<A: hal::Api> CommandAllocator<A> {
/// 1. `life_tracker` is locked after `hub.devices`, enforced by the type system
/// 1. `self.trackers` is locked last (unenforced)
/// 1. `self.trace` is locked last (unenforced)
pub struct Device<A: hal::Api> {
pub struct Device<A: HalApi> {
pub(crate) raw: A::Device,
pub(crate) adapter_id: Stored<id::AdapterId>,
pub(crate) queue: A::Queue,
@ -281,7 +281,7 @@ pub struct Device<A: hal::Api> {
/// All live resources allocated with this [`Device`].
///
/// Has to be locked temporarily only (locked last)
pub(crate) trackers: Mutex<TrackerSet>,
pub(crate) trackers: Mutex<Tracker<A>>,
// Life tracker should be locked right after the device and before anything else.
life_tracker: Mutex<life::LifetimeTracker<A>>,
/// Temporary storage for resource management functions. Cleared at the end
@ -306,7 +306,7 @@ pub enum CreateDeviceError {
FailedToCreateZeroBuffer(#[from] DeviceError),
}
impl<A: hal::Api> Device<A> {
impl<A: HalApi> Device<A> {
pub(crate) fn require_features(&self, feature: wgt::Features) -> Result<(), MissingFeatures> {
if self.features.contains(feature) {
Ok(())
@ -328,7 +328,6 @@ impl<A: hal::Api> Device<A> {
}
impl<A: HalApi> Device<A> {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
open: hal::OpenDevice<A>,
adapter_id: Stored<id::AdapterId>,
@ -394,7 +393,7 @@ impl<A: HalApi> Device<A> {
command_allocator: Mutex::new(com_alloc),
active_submission_index: 0,
fence,
trackers: Mutex::new(TrackerSet::new(A::VARIANT)),
trackers: Mutex::new(Tracker::new()),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: life::SuspectedResources::default(),
#[cfg(feature = "trace")]
@ -495,7 +494,7 @@ impl<A: HalApi> Device<A> {
fn untrack<'this, 'token: 'this, G: GlobalIdentityHandlerFactory>(
&'this mut self,
hub: &Hub<A, G>,
trackers: &TrackerSet,
trackers: &Tracker<A>,
token: &mut Token<'token, Self>,
) {
self.temp_suspected.clear();
@ -536,12 +535,12 @@ impl<A: HalApi> Device<A> {
self.temp_suspected.samplers.push(id);
}
}
for id in trackers.compute_pipes.used() {
for id in trackers.compute_pipelines.used() {
if compute_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.compute_pipelines.push(id);
}
}
for id in trackers.render_pipes.used() {
for id in trackers.render_pipelines.used() {
if render_pipe_guard[id].life_guard.ref_count.is_none() {
self.temp_suspected.render_pipelines.push(id);
}
@ -655,7 +654,7 @@ impl<A: HalApi> Device<A> {
desc.array_layer_count(),
),
full_range: TextureSelector {
levels: 0..desc.mip_level_count,
mips: 0..desc.mip_level_count,
layers: 0..desc.array_layer_count(),
},
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
@ -876,7 +875,7 @@ impl<A: HalApi> Device<A> {
_ => texture.desc.array_layer_count(),
},
};
let level_end = texture.full_range.levels.end;
let level_end = texture.full_range.mips.end;
let layer_end = texture.full_range.layers.end;
if required_level_count > level_end {
return Err(resource::CreateTextureViewError::TooManyMipLevels {
@ -927,7 +926,7 @@ impl<A: HalApi> Device<A> {
.array_layer_count
.map_or(layer_end, |_| required_layer_count);
let selector = TextureSelector {
levels: desc.range.base_mip_level..end_level,
mips: desc.range.base_mip_level..end_level,
layers: desc.range.base_array_layer..end_layer,
};
@ -972,11 +971,11 @@ impl<A: HalApi> Device<A> {
wgt::TextureViewDimension::D3 => {
hal::TextureUses::RESOURCE
| hal::TextureUses::STORAGE_READ
| hal::TextureUses::STORAGE_WRITE
| hal::TextureUses::STORAGE_READ_WRITE
}
_ => hal::TextureUses::all(),
};
let mask_mip_level = if selector.levels.end - selector.levels.start != 1 {
let mask_mip_level = if selector.mips.end - selector.mips.start != 1 {
hal::TextureUses::RESOURCE
} else {
hal::TextureUses::all()
@ -1018,16 +1017,6 @@ impl<A: HalApi> Device<A> {
format_features: texture.format_features,
extent,
samples: texture.desc.sample_count,
// once a storage - forever a storage
sampled_internal_use: if texture
.desc
.usage
.contains(wgt::TextureUsages::STORAGE_BINDING)
{
hal::TextureUses::RESOURCE | hal::TextureUses::STORAGE_READ
} else {
hal::TextureUses::RESOURCE
},
selector,
life_guard: LifeGuard::new(desc.label.borrow_or_default()),
})
@ -1058,7 +1047,8 @@ impl<A: HalApi> Device<A> {
let anisotropy_clamp = if let Some(clamp) = desc.anisotropy_clamp {
let clamp = clamp.get();
let valid_clamp = clamp <= hal::MAX_ANISOTROPY && conv::is_power_of_two(clamp as u32);
let valid_clamp =
clamp <= hal::MAX_ANISOTROPY && conv::is_power_of_two_u32(clamp as u32);
if !valid_clamp {
return Err(resource::CreateSamplerError::InvalidClamp(clamp));
}
@ -1486,7 +1476,6 @@ impl<A: HalApi> Device<A> {
})
}
#[allow(clippy::too_many_arguments)]
fn create_buffer_binding<'a>(
bb: &binding_model::BufferBinding,
binding: u32,
@ -1494,7 +1483,7 @@ impl<A: HalApi> Device<A> {
used_buffer_ranges: &mut Vec<BufferInitTrackerAction>,
dynamic_binding_info: &mut Vec<binding_model::BindGroupDynamicBindingData>,
late_buffer_binding_sizes: &mut FastHashMap<u32, wgt::BufferSize>,
used: &mut TrackerSet,
used: &mut BindGroupStates<A>,
storage: &'a Storage<resource::Buffer<A>, id::BufferId>,
limits: &wgt::Limits,
) -> Result<hal::BufferBinding<'a, A>, binding_model::CreateBindGroupError> {
@ -1525,7 +1514,7 @@ impl<A: HalApi> Device<A> {
if read_only {
hal::BufferUses::STORAGE_READ
} else {
hal::BufferUses::STORAGE_READ | hal::BufferUses::STORAGE_WRITE
hal::BufferUses::STORAGE_READ_WRITE
},
limits.max_storage_buffer_binding_size,
),
@ -1543,8 +1532,8 @@ impl<A: HalApi> Device<A> {
let buffer = used
.buffers
.use_extend(storage, bb.buffer_id, (), internal_use)
.map_err(|_| Error::InvalidBuffer(bb.buffer_id))?;
.add_single(storage, bb.buffer_id, internal_use)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
check_buffer_usage(buffer.usage, pub_usage)?;
let raw_buffer = buffer
.raw
@ -1613,26 +1602,26 @@ impl<A: HalApi> Device<A> {
fn create_texture_binding(
view: &resource::TextureView<A>,
texture_guard: &parking_lot::lock_api::RwLockReadGuard<
parking_lot::RawRwLock,
Storage<resource::Texture<A>, id::Id<resource::Texture<hal::api::Empty>>>,
>,
texture_guard: &Storage<resource::Texture<A>, id::TextureId>,
internal_use: hal::TextureUses,
pub_usage: wgt::TextureUsages,
used: &mut TrackerSet,
used: &mut BindGroupStates<A>,
used_texture_ranges: &mut Vec<TextureInitTrackerAction>,
) -> Result<(), binding_model::CreateBindGroupError> {
// Careful here: the texture may no longer have its own ref count,
// if it was deleted by the user.
let texture = &texture_guard[view.parent_id.value];
used.textures
.change_extend(
view.parent_id.value,
&view.parent_id.ref_count,
view.selector.clone(),
let texture = used
.textures
.add_single(
texture_guard,
view.parent_id.value.0,
view.parent_id.ref_count.clone(),
Some(view.selector.clone()),
internal_use,
)
.map_err(UsageConflict::from)?;
.ok_or(binding_model::CreateBindGroupError::InvalidTexture(
view.parent_id.value.0,
))?;
check_texture_usage(texture.desc.usage, pub_usage)?;
used_texture_ranges.push(TextureInitTrackerAction {
@ -1674,7 +1663,7 @@ impl<A: HalApi> Device<A> {
// it needs to be in BGL iteration order, not BG entry order.
let mut late_buffer_binding_sizes = FastHashMap::default();
// fill out the descriptors
let mut used = TrackerSet::new(A::VARIANT);
let mut used = BindGroupStates::new();
let (buffer_guard, mut token) = hub.buffers.read(token);
let (texture_guard, mut token) = hub.textures.read(&mut token); //skip token
@ -1738,8 +1727,8 @@ impl<A: HalApi> Device<A> {
wgt::BindingType::Sampler(ty) => {
let sampler = used
.samplers
.use_extend(&*sampler_guard, id, (), ())
.map_err(|_| Error::InvalidSampler(id))?;
.add_single(&*sampler_guard, id)
.ok_or(Error::InvalidSampler(id))?;
// Allowed sampler values for filtering and comparison
let (allowed_filtering, allowed_comparison) = match ty {
@ -1787,8 +1776,8 @@ impl<A: HalApi> Device<A> {
for &id in bindings_array.iter() {
let sampler = used
.samplers
.use_extend(&*sampler_guard, id, (), ())
.map_err(|_| Error::InvalidSampler(id))?;
.add_single(&*sampler_guard, id)
.ok_or(Error::InvalidSampler(id))?;
hal_samplers.push(&sampler.raw);
}
@ -1797,8 +1786,8 @@ impl<A: HalApi> Device<A> {
Br::TextureView(id) => {
let view = used
.views
.use_extend(&*texture_view_guard, id, (), ())
.map_err(|_| Error::InvalidTextureView(id))?;
.add_single(&*texture_view_guard, id)
.ok_or(Error::InvalidTextureView(id))?;
let (pub_usage, internal_use) = Self::texture_use_parameters(
binding,
decl,
@ -1828,8 +1817,8 @@ impl<A: HalApi> Device<A> {
for &id in bindings_array.iter() {
let view = used
.views
.use_extend(&*texture_view_guard, id, (), ())
.map_err(|_| Error::InvalidTextureView(id))?;
.add_single(&*texture_view_guard, id)
.ok_or(Error::InvalidTextureView(id))?;
let (pub_usage, internal_use) =
Self::texture_use_parameters(binding, decl, view,
"SampledTextureArray, ReadonlyStorageTextureArray or WriteonlyStorageTextureArray")?;
@ -1858,6 +1847,8 @@ impl<A: HalApi> Device<A> {
});
}
used.optimize();
hal_entries.sort_by_key(|entry| entry.binding);
for (a, b) in hal_entries.iter().zip(hal_entries.iter().skip(1)) {
if a.binding == b.binding {
@ -1995,7 +1986,7 @@ impl<A: HalApi> Device<A> {
}
Ok((
wgt::TextureUsages::TEXTURE_BINDING,
view.sampled_internal_use,
hal::TextureUses::RESOURCE,
))
}
wgt::BindingType::StorageTexture {
@ -2018,7 +2009,7 @@ impl<A: HalApi> Device<A> {
});
}
let mip_level_count = view.selector.levels.end - view.selector.levels.start;
let mip_level_count = view.selector.mips.end - view.selector.mips.start;
if mip_level_count != 1 {
return Err(Error::InvalidStorageTextureMipLevelCount {
binding,
@ -2027,7 +2018,7 @@ impl<A: HalApi> Device<A> {
}
let internal_use = match access {
wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_WRITE,
wgt::StorageTextureAccess::WriteOnly => hal::TextureUses::STORAGE_READ_WRITE,
wgt::StorageTextureAccess::ReadOnly => {
if !view
.format_features
@ -2047,7 +2038,7 @@ impl<A: HalApi> Device<A> {
return Err(Error::StorageReadNotSupported(view.desc.format));
}
hal::TextureUses::STORAGE_WRITE | hal::TextureUses::STORAGE_READ
hal::TextureUses::STORAGE_READ_WRITE
}
};
Ok((wgt::TextureUsages::STORAGE_BINDING, internal_use))
@ -2551,7 +2542,7 @@ impl<A: HalApi> Device<A> {
let samples = {
let sc = desc.multisample.count;
if sc == 0 || sc > 32 || !conv::is_power_of_two(sc) {
if sc == 0 || sc > 32 || !conv::is_power_of_two_u32(sc) {
return Err(pipeline::CreateRenderPipelineError::InvalidSampleCount(sc));
}
sc
@ -2879,7 +2870,7 @@ impl<A: HalApi> Device<A> {
}
}
impl<A: hal::Api> Device<A> {
impl<A: HalApi> Device<A> {
pub(crate) fn destroy_buffer(&self, buffer: resource::Buffer<A>) {
if let Some(raw) = buffer.raw {
unsafe {
@ -2925,7 +2916,7 @@ impl<A: hal::Api> Device<A> {
}
}
impl<A: hal::Api> crate::hub::Resource for Device<A> {
impl<A: HalApi> crate::hub::Resource for Device<A> {
const TYPE: &'static str = "Device";
fn life_guard(&self) -> &LifeGuard {
@ -2981,7 +2972,7 @@ pub struct ImplicitPipelineIds<'a, G: GlobalIdentityHandlerFactory> {
}
impl<G: GlobalIdentityHandlerFactory> ImplicitPipelineIds<'_, G> {
fn prepare<A: hal::Api>(self, hub: &Hub<A, G>) -> ImplicitPipelineContext {
fn prepare<A: HalApi>(self, hub: &Hub<A, G>) -> ImplicitPipelineContext {
ImplicitPipelineContext {
root_id: hub.pipeline_layouts.prepare(self.root_id).into_id(),
group_ids: self
@ -3184,8 +3175,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.trackers
.lock()
.buffers
.init(id, ref_count, BufferState::with_usage(buffer_use))
.unwrap();
.insert_single(id, ref_count, buffer_use);
return (id.0, None);
};
@ -3193,6 +3184,43 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
(id, Some(error))
}
/// Assign `id_in` an error with the given `label`.
///
/// Ensure that future attempts to use `id_in` as a buffer ID will propagate
/// the error, following the WebGPU ["contagious invalidity"] style.
///
/// Firefox uses this function to comply strictly with the WebGPU spec,
/// which requires [`GPUBufferDescriptor`] validation to be generated on the
/// Device timeline and leave the newly created [`GPUBuffer`] invalid.
///
/// Ideally, we would simply let [`device_create_buffer`] take care of all
/// of this, but some errors must be detected before we can even construct a
/// [`wgpu_types::BufferDescriptor`] to give it. For example, the WebGPU API
/// allows a `GPUBufferDescriptor`'s [`usage`] property to be any WebIDL
/// `unsigned long` value, but we can't construct a
/// [`wgpu_types::BufferUsages`] value from values with unassigned bits
/// set. This means we must validate `usage` before we can call
/// `device_create_buffer`.
///
/// When that validation fails, we must arrange for the buffer id to be
/// considered invalid. This method provides the means to do so.
///
/// ["contagious invalidity"]: https://www.w3.org/TR/webgpu/#invalidity
/// [`GPUBufferDescriptor`]: https://www.w3.org/TR/webgpu/#dictdef-gpubufferdescriptor
/// [`GPUBuffer`]: https://www.w3.org/TR/webgpu/#gpubuffer
/// [`wgpu_types::BufferDescriptor`]: wgt::BufferDescriptor
/// [`device_create_buffer`]: Global::device_create_buffer
/// [`usage`]: https://www.w3.org/TR/webgpu/#dom-gputexturedescriptor-usage
/// [`wgpu_types::BufferUsages`]: wgt::BufferUsages
pub fn create_buffer_error<A: HalApi>(&self, id_in: Input<G, id::BufferId>, label: Label) {
let hub = A::hub(self);
let mut token = Token::root();
let fid = hub.buffers.prepare(id_in);
let (_, mut token) = hub.devices.read(&mut token);
fid.assign_error(label.borrow_or_default(), &mut token);
}
#[cfg(feature = "replay")]
pub fn device_wait_for_buffer<A: HalApi>(
&self,
@ -3446,19 +3474,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Ok(texture) => texture,
Err(error) => break error,
};
let num_levels = texture.full_range.levels.end;
let num_layers = texture.full_range.layers.end;
let ref_count = texture.life_guard.add_ref();
let id = fid.assign(texture, &mut token);
log::info!("Created texture {:?} with {:?}", id, desc);
device
.trackers
.lock()
.textures
.init(id, ref_count, TextureState::new(num_levels, num_layers))
.unwrap();
device.trackers.lock().textures.insert_single(
id.0,
ref_count,
hal::TextureUses::UNINITIALIZED,
);
return (id.0, None);
};
@ -3524,19 +3550,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
texture.initialization_status = TextureInitTracker::new(desc.mip_level_count, 0);
let num_levels = texture.full_range.levels.end;
let num_layers = texture.full_range.layers.end;
let ref_count = texture.life_guard.add_ref();
let id = fid.assign(texture, &mut token);
log::info!("Created texture {:?} with {:?}", id, desc);
device
.trackers
.lock()
.textures
.init(id, ref_count, TextureState::new(num_levels, num_layers))
.unwrap();
device.trackers.lock().textures.insert_single(
id.0,
ref_count,
hal::TextureUses::UNINITIALIZED,
);
return (id.0, None);
};
@ -3694,12 +3718,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let ref_count = view.life_guard.add_ref();
let id = fid.assign(view, &mut token);
device
.trackers
.lock()
.views
.init(id, ref_count, PhantomData)
.unwrap();
device.trackers.lock().views.insert_single(id, ref_count);
return (id.0, None);
};
@ -3792,12 +3811,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let ref_count = sampler.life_guard.add_ref();
let id = fid.assign(sampler, &mut token);
device
.trackers
.lock()
.samplers
.init(id, ref_count, PhantomData)
.unwrap();
device.trackers.lock().samplers.insert_single(id, ref_count);
return (id.0, None);
};
@ -4055,18 +4070,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let ref_count = bind_group.life_guard.add_ref();
let id = fid.assign(bind_group, &mut token);
log::debug!(
"Bind group {:?} {:#?}",
id,
hub.bind_groups.read(&mut token).0[id].used
);
log::debug!("Bind group {:?}", id,);
device
.trackers
.lock()
.bind_groups
.init(id, ref_count, PhantomData)
.unwrap();
.insert_single(id, ref_count);
return (id.0, None);
};
@ -4369,16 +4379,11 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Err(e) => break e,
};
log::debug!("Render bundle {:#?}", render_bundle.used);
log::debug!("Render bundle");
let ref_count = render_bundle.life_guard.add_ref();
let id = fid.assign(render_bundle, &mut token);
device
.trackers
.lock()
.bundles
.init(id, ref_count, PhantomData)
.unwrap();
device.trackers.lock().bundles.insert_single(id, ref_count);
return (id.0, None);
};
@ -4457,8 +4462,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.trackers
.lock()
.query_sets
.init(id, ref_count, PhantomData)
.unwrap();
.insert_single(id, ref_count);
return (id.0, None);
};
@ -4552,9 +4556,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device
.trackers
.lock()
.render_pipes
.init(id, ref_count, PhantomData)
.unwrap();
.render_pipelines
.insert_single(id, ref_count);
return (id.0, None);
};
@ -4693,9 +4697,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device
.trackers
.lock()
.compute_pipes
.init(id, ref_count, PhantomData)
.unwrap();
.compute_pipelines
.insert_single(id, ref_count);
return (id.0, None);
};
@ -4965,9 +4968,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(|_| DeviceError::Invalid)?
.maintain(hub, force_wait, &mut token)?
};
unsafe {
closures.fire();
}
closures.fire();
Ok(queue_empty)
}
@ -5045,9 +5048,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
self.poll_devices::<hal::api::Gles>(force_wait, &mut closures)? && all_queue_empty;
}
unsafe {
closures.fire();
}
closures.fire();
Ok(all_queue_empty)
}
@ -5154,7 +5155,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
return Err(resource::BufferAccessError::AlreadyMapped);
}
resource::BufferMapState::Waiting(_) => {
op.call_error();
op.callback.call_error();
return Ok(());
}
resource::BufferMapState::Idle => {
@ -5167,16 +5168,20 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
log::debug!("Buffer {:?} map state -> Waiting", buffer_id);
(buffer.device_id.value, buffer.life_guard.add_ref())
let device = &device_guard[buffer.device_id.value];
let ret = (buffer.device_id.value, buffer.life_guard.add_ref());
let mut trackers = device.trackers.lock();
trackers
.buffers
.set_single(&*buffer_guard, buffer_id, internal_use);
trackers.buffers.drain();
ret
};
let device = &device_guard[device_id];
device.trackers.lock().buffers.change_replace(
id::Valid(buffer_id),
&ref_count,
(),
internal_use,
);
device
.lock_life(&mut token)
@ -5367,9 +5372,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
//Note: outside inner function so no locks are held when calling the callback
let closure = self.buffer_unmap_inner::<A>(buffer_id)?;
if let Some((operation, status)) = closure {
unsafe {
(operation.callback)(status, operation.user_data);
}
operation.callback.call(status);
}
Ok(())
}

200
third_party/rust/wgpu-core/src/device/queue.rs поставляемый
Просмотреть файл

@ -28,16 +28,56 @@ use thiserror::Error;
/// without a concrete moment of when it can be cleared.
const WRITE_COMMAND_BUFFERS_PER_POOL: usize = 64;
pub type OnSubmittedWorkDoneCallback = unsafe extern "C" fn(user_data: *mut u8);
#[repr(C)]
#[derive(Clone, Copy, Debug)]
pub struct SubmittedWorkDoneClosure {
pub callback: OnSubmittedWorkDoneCallback,
pub user_data: *mut u8,
pub struct SubmittedWorkDoneClosureC {
callback: unsafe extern "C" fn(user_data: *mut u8),
user_data: *mut u8,
}
unsafe impl Send for SubmittedWorkDoneClosure {}
unsafe impl Sync for SubmittedWorkDoneClosure {}
unsafe impl Send for SubmittedWorkDoneClosureC {}
pub struct SubmittedWorkDoneClosure {
// We wrap this so creating the enum in the C variant can be unsafe,
// allowing our call function to be safe.
inner: SubmittedWorkDoneClosureInner,
}
enum SubmittedWorkDoneClosureInner {
Rust {
callback: Box<dyn FnOnce() + Send + 'static>,
},
C {
inner: SubmittedWorkDoneClosureC,
},
}
impl SubmittedWorkDoneClosure {
pub fn from_rust(callback: Box<dyn FnOnce() + Send + 'static>) -> Self {
Self {
inner: SubmittedWorkDoneClosureInner::Rust { callback },
}
}
/// # Safety
///
/// - The callback pointer must be valid to call with the provided user_data pointer.
/// - Both pointers must point to 'static data as the callback may happen at an unspecified time.
pub unsafe fn from_c(inner: SubmittedWorkDoneClosureC) -> Self {
Self {
inner: SubmittedWorkDoneClosureInner::C { inner },
}
}
pub(crate) fn call(self) {
match self.inner {
SubmittedWorkDoneClosureInner::Rust { callback } => callback(),
// SAFETY: the contract of the call to from_c says that this unsafe is sound.
SubmittedWorkDoneClosureInner::C { inner } => unsafe {
(inner.callback)(inner.user_data)
},
}
}
}
struct StagingData<A: hal::Api> {
buffer: A::Buffer,
@ -192,7 +232,7 @@ impl<A: hal::Api> PendingWrites<A> {
}
}
impl<A: hal::Api> super::Device<A> {
impl<A: HalApi> super::Device<A> {
fn prepare_stage(&mut self, size: wgt::BufferAddress) -> Result<StagingData<A>, DeviceError> {
profiling::scope!("prepare_stage");
let stage_desc = hal::BufferDescriptor {
@ -286,8 +326,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let mut trackers = device.trackers.lock();
let (dst, transition) = trackers
.buffers
.use_replace(&*buffer_guard, buffer_id, (), hal::BufferUses::COPY_DST)
.map_err(TransferError::InvalidBuffer)?;
.set_single(&*buffer_guard, buffer_id, hal::BufferUses::COPY_DST)
.ok_or(TransferError::InvalidBuffer(buffer_id))?;
let dst_raw = dst
.raw
.as_ref()
@ -451,9 +491,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.drain(init_layer_range)
.collect::<Vec<std::ops::Range<u32>>>()
{
crate::command::clear_texture_no_device(
crate::command::clear_texture(
&*texture_guard,
id::Valid(destination.texture),
&*dst,
TextureInitRange {
mip_range: destination.mip_level..(destination.mip_level + 1),
layer_range,
@ -473,13 +513,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (dst, transition) = trackers
.textures
.use_replace(
.set_single(
&*texture_guard,
destination.texture,
selector,
hal::TextureUses::COPY_DST,
)
.unwrap();
.ok_or(TransferError::InvalidTexture(destination.texture))?;
let (hal_copy_size, array_layer_count) =
validate_texture_copy_range(destination, &dst.desc, CopySide::Destination, size)?;
@ -561,7 +601,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.ok_or(TransferError::InvalidTexture(destination.texture))?;
unsafe {
encoder.transition_textures(transition.map(|pending| pending.into_hal(dst)));
encoder
.transition_textures(transition.map(|pending| pending.into_hal(dst)).into_iter());
encoder.transition_buffers(iter::once(barrier));
encoder.copy_buffer_to_texture(&stage.buffer, dst_raw, regions);
}
@ -594,7 +635,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
device.active_submission_index += 1;
let submit_index = device.active_submission_index;
let mut active_executions = Vec::new();
let mut used_surface_textures = track::ResourceTracker::new(A::VARIANT);
let mut used_surface_textures = track::TextureUsageScope::new();
{
let (mut command_buffer_guard, mut token) = hub.command_buffers.write(&mut token);
@ -616,12 +657,15 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
//Note: locking the trackers has to be done after the storages
let mut trackers = device.trackers.lock();
used_surface_textures.set_size(texture_guard.len());
//TODO: if multiple command buffers are submitted, we can re-use the last
// native command buffer of the previous chain instead of always creating
// a temporary one, since the chains are not finished.
// finish all the command buffers first
for &cmb_id in command_buffer_ids {
#[allow(unused_mut)]
let mut cmdbuf = match hub
.command_buffers
.unregister_locked(cmb_id, &mut *command_buffer_guard)
@ -642,7 +686,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
// optimize the tracked states
cmdbuf.trackers.optimize();
// cmdbuf.trackers.optimize();
// update submission IDs
for id in cmdbuf.trackers.buffers.used() {
@ -669,33 +713,35 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
for id in cmdbuf.trackers.textures.used() {
let texture = &mut texture_guard[id];
match texture.inner {
let should_extend = match texture.inner {
TextureInner::Native { raw: None } => {
return Err(QueueSubmitError::DestroyedTexture(id.0));
}
TextureInner::Native { raw: Some(_) } => {}
TextureInner::Native { raw: Some(_) } => false,
TextureInner::Surface {
ref mut has_work, ..
} => {
use track::ResourceState as _;
*has_work = true;
let ref_count = cmdbuf.trackers.textures.get_ref_count(id);
//TODO: better error handling here?
// register it in the temporary tracker.
let mut ts = track::TextureState::default();
let _ = ts.change(
id,
texture.full_range.clone(),
hal::TextureUses::empty(), //present
None,
);
let _ = used_surface_textures.init(id, ref_count.clone(), ts);
true
}
}
};
if !texture.life_guard.use_at(submit_index) {
device.temp_suspected.textures.push(id);
}
if should_extend {
unsafe {
let ref_count = cmdbuf.trackers.textures.get_ref_count(id);
used_surface_textures
.merge_single(
&*texture_guard,
id,
None,
ref_count,
hal::TextureUses::PRESENT,
)
.unwrap();
};
}
}
for id in cmdbuf.trackers.views.used() {
if !texture_view_guard[id].life_guard.use_at(submit_index) {
@ -717,13 +763,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
sampler_guard[sub_id].life_guard.use_at(submit_index);
}
}
assert!(cmdbuf.trackers.samplers.is_empty());
for id in cmdbuf.trackers.compute_pipes.used() {
// assert!(cmdbuf.trackers.samplers.is_empty());
for id in cmdbuf.trackers.compute_pipelines.used() {
if !compute_pipe_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.compute_pipelines.push(id);
}
}
for id in cmdbuf.trackers.render_pipes.used() {
for id in cmdbuf.trackers.render_pipelines.used() {
if !render_pipe_guard[id].life_guard.use_at(submit_index) {
device.temp_suspected.render_pipelines.push(id);
}
@ -741,12 +787,12 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// We need to update the submission indices for the contained
// state-less (!) resources as well, excluding the bind groups.
// They don't get deleted too early if the bundle goes out of scope.
for sub_id in bundle.used.compute_pipes.used() {
compute_pipe_guard[sub_id].life_guard.use_at(submit_index);
}
for sub_id in bundle.used.render_pipes.used() {
for sub_id in bundle.used.render_pipelines.used() {
render_pipe_guard[sub_id].life_guard.use_at(submit_index);
}
for sub_id in bundle.used.query_sets.used() {
query_set_guard[sub_id].life_guard.use_at(submit_index);
}
}
let mut baked = cmdbuf.into_baked();
@ -766,11 +812,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.map_err(|err| QueueSubmitError::DestroyedTexture(err.0))?;
//Note: stateless trackers are not merged:
// device already knows these resources exist.
CommandBuffer::insert_barriers(
CommandBuffer::insert_barriers_from_tracker(
&mut baked.encoder,
&mut *trackers,
&baked.trackers.buffers,
&baked.trackers.textures,
&baked.trackers,
&*buffer_guard,
&*texture_guard,
);
@ -788,19 +833,19 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
.begin_encoding(Some("(wgpu internal) Present"))
.map_err(DeviceError::from)?
};
let texture_barriers = trackers
trackers
.textures
.merge_replace(&used_surface_textures)
.map(|pending| {
let tex = &texture_guard[pending.id];
pending.into_hal(tex)
});
.set_from_usage_scope(&*texture_guard, &used_surface_textures);
let texture_barriers = trackers.textures.drain().map(|pending| {
let tex = unsafe { texture_guard.get_unchecked(pending.id) };
pending.into_hal(tex)
});
let present = unsafe {
baked.encoder.transition_textures(texture_barriers);
baked.encoder.end_encoding().unwrap()
};
baked.list.push(present);
used_surface_textures.clear();
used_surface_textures = track::TextureUsageScope::new();
}
// done
@ -810,7 +855,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
});
}
log::trace!("Device after submission {}: {:#?}", submit_index, trackers);
log::trace!("Device after submission {}", submit_index);
}
let super::Device {
@ -830,6 +875,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let (_, mut token) = hub.buffers.read(&mut token); // skip token
let (mut texture_guard, _) = hub.textures.write(&mut token);
used_surface_textures.set_size(texture_guard.len());
for &id in pending_writes.dst_textures.iter() {
let texture = texture_guard.get_mut(id).unwrap();
match texture.inner {
@ -840,39 +887,39 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
TextureInner::Surface {
ref mut has_work, ..
} => {
use track::ResourceState as _;
*has_work = true;
let ref_count = texture.life_guard.add_ref();
//TODO: better error handling here?
// register it in the temporary tracker.
let mut ts = track::TextureState::default();
let _ = ts.change(
id::Valid(id),
texture.full_range.clone(),
hal::TextureUses::empty(), //present
None,
);
let _ = used_surface_textures.init(id::Valid(id), ref_count, ts);
unsafe {
used_surface_textures
.merge_single(
&*texture_guard,
id::Valid(id),
None,
&ref_count,
hal::TextureUses::PRESENT,
)
.unwrap()
};
}
}
}
if !used_surface_textures.is_empty() {
let mut trackers = device.trackers.lock();
let texture_barriers = trackers
trackers
.textures
.merge_replace(&used_surface_textures)
.map(|pending| {
let tex = &texture_guard[pending.id];
pending.into_hal(tex)
});
.set_from_usage_scope(&*texture_guard, &used_surface_textures);
let texture_barriers = trackers.textures.drain().map(|pending| {
let tex = unsafe { texture_guard.get_unchecked(pending.id) };
pending.into_hal(tex)
});
unsafe {
pending_writes
.command_encoder
.transition_textures(texture_barriers);
};
used_surface_textures.clear();
}
}
@ -925,9 +972,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
};
// the closures should execute with nothing locked!
unsafe {
callbacks.fire();
}
callbacks.fire();
Ok(())
}
@ -950,7 +996,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
closure: SubmittedWorkDoneClosure,
) -> Result<(), InvalidQueue> {
//TODO: flush pending writes
let added = {
let closure_opt = {
let hub = A::hub(self);
let mut token = Token::root();
let (device_guard, mut token) = hub.devices.read(&mut token);
@ -959,10 +1005,8 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
Err(_) => return Err(InvalidQueue),
}
};
if !added {
unsafe {
(closure.callback)(closure.user_data);
}
if let Some(closure) = closure_opt {
closure.call();
}
Ok(())
}

162
third_party/rust/wgpu-core/src/hub.rs поставляемый
Просмотреть файл

@ -194,6 +194,14 @@ impl<T, I: id::TypedId> Storage<T, I> {
result
}
pub(crate) unsafe fn get_unchecked(&self, id: u32) -> &T {
match self.map[id as usize] {
Element::Occupied(ref v, _) => v,
Element::Vacant => panic!("{}[{}] does not exist", self.kind, id),
Element::Error(_, _) => panic!(""),
}
}
pub(crate) fn label_for_invalid_id(&self, id: I) -> &str {
let (index, _, _) = id.unzip();
match self.map.get(index as usize) {
@ -266,6 +274,10 @@ impl<T, I: id::TypedId> Storage<T, I> {
})
}
pub(crate) fn len(&self) -> usize {
self.map.len()
}
fn generate_report(&self) -> StorageReport {
let mut report = StorageReport {
element_size: mem::size_of::<T>(),
@ -298,56 +310,56 @@ pub enum Root {}
impl Access<Instance> for Root {}
impl Access<Surface> for Root {}
impl Access<Surface> for Instance {}
impl<A: hal::Api> Access<Adapter<A>> for Root {}
impl<A: hal::Api> Access<Adapter<A>> for Surface {}
impl<A: hal::Api> Access<Device<A>> for Root {}
impl<A: hal::Api> Access<Device<A>> for Surface {}
impl<A: hal::Api> Access<Device<A>> for Adapter<A> {}
impl<A: hal::Api> Access<PipelineLayout<A>> for Root {}
impl<A: hal::Api> Access<PipelineLayout<A>> for Device<A> {}
impl<A: hal::Api> Access<PipelineLayout<A>> for RenderBundle {}
impl<A: hal::Api> Access<BindGroupLayout<A>> for Root {}
impl<A: hal::Api> Access<BindGroupLayout<A>> for Device<A> {}
impl<A: hal::Api> Access<BindGroupLayout<A>> for PipelineLayout<A> {}
impl<A: hal::Api> Access<BindGroup<A>> for Root {}
impl<A: hal::Api> Access<BindGroup<A>> for Device<A> {}
impl<A: hal::Api> Access<BindGroup<A>> for BindGroupLayout<A> {}
impl<A: hal::Api> Access<BindGroup<A>> for PipelineLayout<A> {}
impl<A: hal::Api> Access<BindGroup<A>> for CommandBuffer<A> {}
impl<A: hal::Api> Access<CommandBuffer<A>> for Root {}
impl<A: hal::Api> Access<CommandBuffer<A>> for Device<A> {}
impl<A: hal::Api> Access<RenderBundle> for Device<A> {}
impl<A: hal::Api> Access<RenderBundle> for CommandBuffer<A> {}
impl<A: hal::Api> Access<ComputePipeline<A>> for Device<A> {}
impl<A: hal::Api> Access<ComputePipeline<A>> for BindGroup<A> {}
impl<A: hal::Api> Access<RenderPipeline<A>> for Device<A> {}
impl<A: hal::Api> Access<RenderPipeline<A>> for BindGroup<A> {}
impl<A: hal::Api> Access<RenderPipeline<A>> for ComputePipeline<A> {}
impl<A: hal::Api> Access<QuerySet<A>> for Root {}
impl<A: hal::Api> Access<QuerySet<A>> for Device<A> {}
impl<A: hal::Api> Access<QuerySet<A>> for CommandBuffer<A> {}
impl<A: hal::Api> Access<QuerySet<A>> for RenderPipeline<A> {}
impl<A: hal::Api> Access<QuerySet<A>> for ComputePipeline<A> {}
impl<A: hal::Api> Access<QuerySet<A>> for Sampler<A> {}
impl<A: hal::Api> Access<ShaderModule<A>> for Device<A> {}
impl<A: hal::Api> Access<ShaderModule<A>> for BindGroupLayout<A> {}
impl<A: hal::Api> Access<Buffer<A>> for Root {}
impl<A: hal::Api> Access<Buffer<A>> for Device<A> {}
impl<A: hal::Api> Access<Buffer<A>> for BindGroupLayout<A> {}
impl<A: hal::Api> Access<Buffer<A>> for BindGroup<A> {}
impl<A: hal::Api> Access<Buffer<A>> for CommandBuffer<A> {}
impl<A: hal::Api> Access<Buffer<A>> for ComputePipeline<A> {}
impl<A: hal::Api> Access<Buffer<A>> for RenderPipeline<A> {}
impl<A: hal::Api> Access<Buffer<A>> for QuerySet<A> {}
impl<A: hal::Api> Access<Texture<A>> for Root {}
impl<A: hal::Api> Access<Texture<A>> for Device<A> {}
impl<A: hal::Api> Access<Texture<A>> for Buffer<A> {}
impl<A: hal::Api> Access<TextureView<A>> for Root {}
impl<A: hal::Api> Access<TextureView<A>> for Device<A> {}
impl<A: hal::Api> Access<TextureView<A>> for Texture<A> {}
impl<A: hal::Api> Access<Sampler<A>> for Root {}
impl<A: hal::Api> Access<Sampler<A>> for Device<A> {}
impl<A: hal::Api> Access<Sampler<A>> for TextureView<A> {}
impl<A: HalApi> Access<Adapter<A>> for Root {}
impl<A: HalApi> Access<Adapter<A>> for Surface {}
impl<A: HalApi> Access<Device<A>> for Root {}
impl<A: HalApi> Access<Device<A>> for Surface {}
impl<A: HalApi> Access<Device<A>> for Adapter<A> {}
impl<A: HalApi> Access<PipelineLayout<A>> for Root {}
impl<A: HalApi> Access<PipelineLayout<A>> for Device<A> {}
impl<A: HalApi> Access<PipelineLayout<A>> for RenderBundle<A> {}
impl<A: HalApi> Access<BindGroupLayout<A>> for Root {}
impl<A: HalApi> Access<BindGroupLayout<A>> for Device<A> {}
impl<A: HalApi> Access<BindGroupLayout<A>> for PipelineLayout<A> {}
impl<A: HalApi> Access<BindGroup<A>> for Root {}
impl<A: HalApi> Access<BindGroup<A>> for Device<A> {}
impl<A: HalApi> Access<BindGroup<A>> for BindGroupLayout<A> {}
impl<A: HalApi> Access<BindGroup<A>> for PipelineLayout<A> {}
impl<A: HalApi> Access<BindGroup<A>> for CommandBuffer<A> {}
impl<A: HalApi> Access<CommandBuffer<A>> for Root {}
impl<A: HalApi> Access<CommandBuffer<A>> for Device<A> {}
impl<A: HalApi> Access<RenderBundle<A>> for Device<A> {}
impl<A: HalApi> Access<RenderBundle<A>> for CommandBuffer<A> {}
impl<A: HalApi> Access<ComputePipeline<A>> for Device<A> {}
impl<A: HalApi> Access<ComputePipeline<A>> for BindGroup<A> {}
impl<A: HalApi> Access<RenderPipeline<A>> for Device<A> {}
impl<A: HalApi> Access<RenderPipeline<A>> for BindGroup<A> {}
impl<A: HalApi> Access<RenderPipeline<A>> for ComputePipeline<A> {}
impl<A: HalApi> Access<QuerySet<A>> for Root {}
impl<A: HalApi> Access<QuerySet<A>> for Device<A> {}
impl<A: HalApi> Access<QuerySet<A>> for CommandBuffer<A> {}
impl<A: HalApi> Access<QuerySet<A>> for RenderPipeline<A> {}
impl<A: HalApi> Access<QuerySet<A>> for ComputePipeline<A> {}
impl<A: HalApi> Access<QuerySet<A>> for Sampler<A> {}
impl<A: HalApi> Access<ShaderModule<A>> for Device<A> {}
impl<A: HalApi> Access<ShaderModule<A>> for BindGroupLayout<A> {}
impl<A: HalApi> Access<Buffer<A>> for Root {}
impl<A: HalApi> Access<Buffer<A>> for Device<A> {}
impl<A: HalApi> Access<Buffer<A>> for BindGroupLayout<A> {}
impl<A: HalApi> Access<Buffer<A>> for BindGroup<A> {}
impl<A: HalApi> Access<Buffer<A>> for CommandBuffer<A> {}
impl<A: HalApi> Access<Buffer<A>> for ComputePipeline<A> {}
impl<A: HalApi> Access<Buffer<A>> for RenderPipeline<A> {}
impl<A: HalApi> Access<Buffer<A>> for QuerySet<A> {}
impl<A: HalApi> Access<Texture<A>> for Root {}
impl<A: HalApi> Access<Texture<A>> for Device<A> {}
impl<A: HalApi> Access<Texture<A>> for Buffer<A> {}
impl<A: HalApi> Access<TextureView<A>> for Root {}
impl<A: HalApi> Access<TextureView<A>> for Device<A> {}
impl<A: HalApi> Access<TextureView<A>> for Texture<A> {}
impl<A: HalApi> Access<Sampler<A>> for Root {}
impl<A: HalApi> Access<Sampler<A>> for Device<A> {}
impl<A: HalApi> Access<Sampler<A>> for TextureView<A> {}
#[cfg(debug_assertions)]
thread_local! {
@ -614,7 +626,7 @@ impl HubReport {
}
}
pub struct Hub<A: hal::Api, F: GlobalIdentityHandlerFactory> {
pub struct Hub<A: HalApi, F: GlobalIdentityHandlerFactory> {
pub adapters: Registry<Adapter<A>, id::AdapterId, F>,
pub devices: Registry<Device<A>, id::DeviceId, F>,
pub pipeline_layouts: Registry<PipelineLayout<A>, id::PipelineLayoutId, F>,
@ -622,7 +634,7 @@ pub struct Hub<A: hal::Api, F: GlobalIdentityHandlerFactory> {
pub bind_group_layouts: Registry<BindGroupLayout<A>, id::BindGroupLayoutId, F>,
pub bind_groups: Registry<BindGroup<A>, id::BindGroupId, F>,
pub command_buffers: Registry<CommandBuffer<A>, id::CommandBufferId, F>,
pub render_bundles: Registry<RenderBundle, id::RenderBundleId, F>,
pub render_bundles: Registry<RenderBundle<A>, id::RenderBundleId, F>,
pub render_pipelines: Registry<RenderPipeline<A>, id::RenderPipelineId, F>,
pub compute_pipelines: Registry<ComputePipeline<A>, id::ComputePipelineId, F>,
pub query_sets: Registry<QuerySet<A>, id::QuerySetId, F>,
@ -907,6 +919,17 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
}
/// # Safety
///
/// - The raw handle obtained from the hal Instance must not be manually destroyed
pub unsafe fn instance_as_hal<A: HalApi, F: FnOnce(Option<&A::Instance>) -> R, R>(
&self,
hal_instance_callback: F,
) -> R {
let hal_instance = A::instance_as_hal(&self.instance);
hal_instance_callback(hal_instance)
}
pub fn clear_backend<A: HalApi>(&self, _dummy: ()) {
let mut surface_guard = self.surfaces.data.write();
let hub = A::hub(self);
@ -991,11 +1014,31 @@ impl<G: GlobalIdentityHandlerFactory> Drop for Global<G> {
pub trait HalApi: hal::Api {
const VARIANT: Backend;
fn create_instance_from_hal(name: &str, hal_instance: Self::Instance) -> Instance;
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance>;
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G>;
fn get_surface(surface: &Surface) -> &HalSurface<Self>;
fn get_surface_mut(surface: &mut Surface) -> &mut HalSurface<Self>;
}
impl HalApi for hal::api::Empty {
const VARIANT: Backend = Backend::Empty;
fn create_instance_from_hal(_: &str, _: Self::Instance) -> Instance {
unimplemented!("called empty api")
}
fn instance_as_hal(_: &Instance) -> Option<&Self::Instance> {
unimplemented!("called empty api")
}
fn hub<G: GlobalIdentityHandlerFactory>(_: &Global<G>) -> &Hub<Self, G> {
unimplemented!("called empty api")
}
fn get_surface(_: &Surface) -> &HalSurface<Self> {
unimplemented!("called empty api")
}
fn get_surface_mut(_: &mut Surface) -> &mut HalSurface<Self> {
unimplemented!("called empty api")
}
}
#[cfg(vulkan)]
impl HalApi for hal::api::Vulkan {
const VARIANT: Backend = Backend::Vulkan;
@ -1006,6 +1049,9 @@ impl HalApi for hal::api::Vulkan {
..Default::default()
}
}
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
instance.vulkan.as_ref()
}
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
&global.hubs.vulkan
}
@ -1027,6 +1073,9 @@ impl HalApi for hal::api::Metal {
..Default::default()
}
}
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
instance.metal.as_ref()
}
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
&global.hubs.metal
}
@ -1048,6 +1097,9 @@ impl HalApi for hal::api::Dx12 {
..Default::default()
}
}
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
instance.dx12.as_ref()
}
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
&global.hubs.dx12
}
@ -1069,6 +1121,9 @@ impl HalApi for hal::api::Dx11 {
..Default::default()
}
}
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
instance.dx11.as_ref()
}
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
&global.hubs.dx11
}
@ -1091,6 +1146,9 @@ impl HalApi for hal::api::Gles {
..Default::default()
}
}
fn instance_as_hal(instance: &Instance) -> Option<&Self::Instance> {
instance.gl.as_ref()
}
fn hub<G: GlobalIdentityHandlerFactory>(global: &Global<G>) -> &Hub<Self, G> {
&global.hubs.gl
}

10
third_party/rust/wgpu-core/src/id.rs поставляемый
Просмотреть файл

@ -64,9 +64,9 @@ impl<T> From<SerialId> for Id<T> {
}
impl<T> Id<T> {
#[cfg(test)]
pub(crate) fn dummy() -> Valid<Self> {
Valid(Id(NonZeroId::new(1).unwrap(), PhantomData))
#[allow(dead_code)]
pub(crate) fn dummy(index: u32) -> Valid<Self> {
Valid(Id::zip(index, 1, Backend::Empty))
}
pub fn backend(self) -> Backend {
@ -135,7 +135,7 @@ pub(crate) struct Valid<I>(pub I);
/// Most `wgpu-core` clients should not use this trait. Unusual clients that
/// need to construct `Id` values directly, or access their components, like the
/// WGPU recording player, may use this trait to do so.
pub trait TypedId {
pub trait TypedId: Copy {
fn zip(index: Index, epoch: Epoch, backend: Backend) -> Self;
fn unzip(self) -> (Index, Epoch, Backend);
}
@ -184,7 +184,7 @@ pub type CommandBufferId = Id<crate::command::CommandBuffer<Dummy>>;
pub type RenderPassEncoderId = *mut crate::command::RenderPass;
pub type ComputePassEncoderId = *mut crate::command::ComputePass;
pub type RenderBundleEncoderId = *mut crate::command::RenderBundleEncoder;
pub type RenderBundleId = Id<crate::command::RenderBundle>;
pub type RenderBundleId = Id<crate::command::RenderBundle<Dummy>>;
pub type QuerySetId = Id<crate::resource::QuerySet<Dummy>>;
#[test]

Просмотреть файл

@ -26,7 +26,7 @@ pub(crate) fn has_copy_partial_init_tracker_coverage(
impl From<TextureSelector> for TextureInitRange {
fn from(selector: TextureSelector) -> Self {
TextureInitRange {
mip_range: selector.levels,
mip_range: selector.mips,
layer_range: selector.layers,
}
}

3
third_party/rust/wgpu-core/src/instance.rs поставляемый
Просмотреть файл

@ -494,6 +494,9 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
}
#[cfg(dx12)]
/// # Safety
///
/// The visual must be valid and able to be used to make a swapchain with.
pub unsafe fn instance_create_surface_from_visual(
&self,
visual: *mut std::ffi::c_void,

4
third_party/rust/wgpu-core/src/lib.rs поставляемый
Просмотреть файл

@ -4,6 +4,8 @@
*/
#![allow(
// It is much clearer to assert negative conditions with eq! false
clippy::bool_assert_comparison,
// We use loops for getting early-out of scope without closures.
clippy::never_loop,
// We don't use syntax sugar where it's not necessary.
@ -16,6 +18,8 @@
clippy::new_without_default,
// Needless updates are more scaleable, easier to play with features.
clippy::needless_update,
// Need many arguments for some core functions to be able to re-use code in many situations.
clippy::too_many_arguments,
// For some reason `rustc` can warn about these in const generics even
// though they are required.
unused_braces,

19
third_party/rust/wgpu-core/src/present.rs поставляемый
Просмотреть файл

@ -174,7 +174,7 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
initialization_status: TextureInitTracker::new(1, 1),
full_range: track::TextureSelector {
layers: 0..1,
levels: 0..1,
mips: 0..1,
},
life_guard: LifeGuard::new("<Surface>"),
clear_mode: resource::TextureClearMode::RenderPass {
@ -187,20 +187,13 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
let id = fid.assign(texture, &mut token);
{
use track::ResourceState as _;
// register it in the device tracker as uninitialized
let mut trackers = device.trackers.lock();
let mut ts = track::TextureState::default();
let _ = ts.change(
id,
track::TextureSelector {
layers: 0..1,
levels: 0..1,
},
trackers.textures.insert_single(
id.0,
ref_count.clone(),
hal::TextureUses::UNINITIALIZED,
None,
);
let _ = trackers.textures.init(id, ref_count.clone(), ts);
}
if present.acquired_texture.is_some() {
@ -273,6 +266,10 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
// The texture ID got added to the device tracker by `submit()`,
// and now we are moving it away.
log::debug!(
"Removing swapchain texture {:?} from the device tracker",
texture_id.value
);
device.trackers.lock().textures.remove(texture_id.value);
let (texture, _) = hub.textures.unregister(texture_id.value.0, &mut token);

124
third_party/rust/wgpu-core/src/resource.rs поставляемый
Просмотреть файл

@ -1,9 +1,9 @@
use crate::{
device::{DeviceError, HostMap, MissingFeatures},
hub::{Global, GlobalIdentityHandlerFactory, HalApi, Resource, Token},
id::{DeviceId, SurfaceId, TextureId, Valid},
id::{AdapterId, DeviceId, SurfaceId, TextureId, Valid},
init_tracker::{BufferInitTracker, TextureInitTracker},
track::{TextureSelector, DUMMY_SELECTOR},
track::TextureSelector,
validation::MissingBufferUsageError,
Label, LifeGuard, RefCount, Stored,
};
@ -23,7 +23,6 @@ pub enum BufferMapAsyncStatus {
ContextLost,
}
#[derive(Debug)]
pub(crate) enum BufferMapState<A: hal::Api> {
/// Mapped at creation.
Init {
@ -46,27 +45,65 @@ pub(crate) enum BufferMapState<A: hal::Api> {
unsafe impl<A: hal::Api> Send for BufferMapState<A> {}
unsafe impl<A: hal::Api> Sync for BufferMapState<A> {}
pub type BufferMapCallback = unsafe extern "C" fn(status: BufferMapAsyncStatus, userdata: *mut u8);
#[repr(C)]
#[derive(Debug)]
pub struct BufferMapCallbackC {
callback: unsafe extern "C" fn(status: BufferMapAsyncStatus, user_data: *mut u8),
user_data: *mut u8,
}
unsafe impl Send for BufferMapCallbackC {}
pub struct BufferMapCallback {
// We wrap this so creating the enum in the C variant can be unsafe,
// allowing our call function to be safe.
inner: BufferMapCallbackInner,
}
enum BufferMapCallbackInner {
Rust {
callback: Box<dyn FnOnce(BufferMapAsyncStatus) + Send + 'static>,
},
C {
inner: BufferMapCallbackC,
},
}
impl BufferMapCallback {
pub fn from_rust(callback: Box<dyn FnOnce(BufferMapAsyncStatus) + Send + 'static>) -> Self {
Self {
inner: BufferMapCallbackInner::Rust { callback },
}
}
/// # Safety
///
/// - The callback pointer must be valid to call with the provided user_data pointer.
/// - Both pointers must point to 'static data as the callback may happen at an unspecified time.
pub unsafe fn from_c(inner: BufferMapCallbackC) -> Self {
Self {
inner: BufferMapCallbackInner::C { inner },
}
}
pub(crate) fn call(self, status: BufferMapAsyncStatus) {
match self.inner {
BufferMapCallbackInner::Rust { callback } => callback(status),
// SAFETY: the contract of the call to from_c says that this unsafe is sound.
BufferMapCallbackInner::C { inner } => unsafe {
(inner.callback)(status, inner.user_data)
},
}
}
pub(crate) fn call_error(self) {
log::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
self.call(BufferMapAsyncStatus::Error);
}
}
pub struct BufferMapOperation {
pub host: HostMap,
pub callback: BufferMapCallback,
pub user_data: *mut u8,
}
//TODO: clarify if/why this is needed here
unsafe impl Send for BufferMapOperation {}
unsafe impl Sync for BufferMapOperation {}
impl BufferMapOperation {
pub(crate) fn call_error(self) {
log::error!("wgpu_buffer_map_async failed: buffer mapping is pending");
unsafe {
(self.callback)(BufferMapAsyncStatus::Error, self.user_data);
}
}
}
#[derive(Clone, Debug, Error)]
@ -105,7 +142,6 @@ pub enum BufferAccessError {
},
}
#[derive(Debug)]
pub(crate) struct BufferPendingMapping {
pub range: Range<wgt::BufferAddress>,
pub op: BufferMapOperation,
@ -115,7 +151,6 @@ pub(crate) struct BufferPendingMapping {
pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
#[derive(Debug)]
pub struct Buffer<A: hal::Api> {
pub(crate) raw: Option<A::Buffer>,
pub(crate) device_id: Stored<DeviceId>,
@ -149,12 +184,6 @@ impl<A: hal::Api> Resource for Buffer<A> {
}
}
impl<A: hal::Api> Borrow<()> for Buffer<A> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
pub type TextureDescriptor<'a> = wgt::TextureDescriptor<Label<'a>>;
#[derive(Debug)]
@ -250,6 +279,26 @@ impl<G: GlobalIdentityHandlerFactory> Global<G> {
hal_texture_callback(hal_texture);
}
/// # Safety
///
/// - The raw adapter handle must not be manually destroyed
pub unsafe fn adapter_as_hal<A: HalApi, F: FnOnce(Option<&A::Adapter>) -> R, R>(
&self,
id: AdapterId,
hal_adapter_callback: F,
) -> R {
profiling::scope!("as_hal", "Adapter");
let hub = A::hub(self);
let mut token = Token::root();
let (guard, _) = hub.adapters.read(&mut token);
let adapter = guard.get(id).ok();
let hal_adapter = adapter.map(|adapter| &adapter.raw.adapter);
hal_adapter_callback(hal_adapter)
}
/// # Safety
///
/// - The raw device handle must not be manually destroyed
@ -371,8 +420,6 @@ pub struct TextureView<A: hal::Api> {
pub(crate) format_features: wgt::TextureFormatFeatures,
pub(crate) extent: wgt::Extent3d,
pub(crate) samples: u32,
/// Internal use of this texture view when used as `BindingType::Texture`.
pub(crate) sampled_internal_use: hal::TextureUses,
pub(crate) selector: TextureSelector,
pub(crate) life_guard: LifeGuard,
}
@ -428,12 +475,6 @@ impl<A: hal::Api> Resource for TextureView<A> {
}
}
impl<A: hal::Api> Borrow<()> for TextureView<A> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
/// Describes a [`Sampler`]
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "trace", derive(serde::Serialize))]
@ -510,11 +551,6 @@ impl<A: hal::Api> Resource for Sampler<A> {
}
}
impl<A: hal::Api> Borrow<()> for Sampler<A> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
#[derive(Clone, Debug, Error)]
pub enum CreateQuerySetError {
#[error(transparent)]
@ -545,12 +581,6 @@ impl<A: hal::Api> Resource for QuerySet<A> {
}
}
impl<A: hal::Api> Borrow<()> for QuerySet<A> {
fn borrow(&self) -> &() {
&DUMMY_SELECTOR
}
}
#[derive(Clone, Debug, Error)]
pub enum DestroyError {
#[error("resource is invalid")]

966
third_party/rust/wgpu-core/src/track/buffer.rs поставляемый
Просмотреть файл

@ -1,236 +1,778 @@
use super::{PendingTransition, ResourceState, Unit};
use crate::id::{BufferId, Valid};
/*! Buffer Trackers
*
* Buffers are represented by a single state for the whole resource,
* a 16 bit bitflag of buffer usages. Because there is only ever
* one subresource, they have no selector.
!*/
use std::{borrow::Cow, marker::PhantomData, vec::Drain};
use super::PendingTransition;
use crate::{
hub,
id::{BufferId, TypedId, Valid},
resource::Buffer,
track::{
invalid_resource_state, iterate_bitvec_indices, skip_barrier, ResourceMetadata,
ResourceMetadataProvider, ResourceUses, UsageConflict,
},
LifeGuard, RefCount,
};
use hal::BufferUses;
pub(crate) type BufferState = Unit<BufferUses>;
impl ResourceUses for BufferUses {
const EXCLUSIVE: Self = Self::EXCLUSIVE;
impl PendingTransition<BufferState> {
fn collapse(self) -> Result<BufferUses, Self> {
if self.usage.start.is_empty()
|| self.usage.start == self.usage.end
|| !BufferUses::EXCLUSIVE.intersects(self.usage.start | self.usage.end)
{
Ok(self.usage.start | self.usage.end)
} else {
Err(self)
}
}
}
impl Default for BufferState {
fn default() -> Self {
Self {
first: None,
last: BufferUses::empty(),
}
}
}
impl BufferState {
pub fn with_usage(usage: BufferUses) -> Self {
Unit::new(usage)
}
}
impl ResourceState for BufferState {
type Id = BufferId;
type Selector = ();
type Usage = BufferUses;
fn query(&self, _selector: Self::Selector) -> Option<Self::Usage> {
Some(self.last)
fn bits(self) -> u16 {
Self::bits(&self)
}
fn change(
&mut self,
id: Valid<Self::Id>,
_selector: Self::Selector,
usage: Self::Usage,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
let old = self.last;
if old != usage || !BufferUses::ORDERED.contains(usage) {
let pending = PendingTransition {
id,
selector: (),
usage: old..usage,
};
*self = match output {
None => {
assert_eq!(
self.first, None,
"extending a state that is already a transition"
);
Unit::new(pending.collapse()?)
}
Some(transitions) => {
transitions.push(pending);
Unit {
first: self.first.or(Some(old)),
last: usage,
}
}
};
fn all_ordered(self) -> bool {
Self::ORDERED.contains(self)
}
fn any_exclusive(self) -> bool {
self.intersects(Self::EXCLUSIVE)
}
}
/// Stores all the buffers that a bind group stores.
pub(crate) struct BufferBindGroupState<A: hub::HalApi> {
buffers: Vec<(Valid<BufferId>, RefCount, BufferUses)>,
_phantom: PhantomData<A>,
}
impl<A: hub::HalApi> BufferBindGroupState<A> {
pub fn new() -> Self {
Self {
buffers: Vec::new(),
_phantom: PhantomData,
}
}
/// Optimize the buffer bind group state by sorting it by ID.
///
/// When this list of states is merged into a tracker, the memory
/// accesses will be in a constant assending order.
pub(crate) fn optimize(&mut self) {
self.buffers
.sort_unstable_by_key(|&(id, _, _)| id.0.unzip().0);
}
/// Returns a list of all buffers tracked. May contain duplicates.
pub fn used(&self) -> impl Iterator<Item = Valid<BufferId>> + '_ {
self.buffers.iter().map(|&(id, _, _)| id)
}
/// Adds the given resource with the given state.
pub fn add_single<'a>(
&mut self,
storage: &'a hub::Storage<Buffer<A>, BufferId>,
id: BufferId,
state: BufferUses,
) -> Option<&'a Buffer<A>> {
let buffer = storage.get(id).ok()?;
self.buffers
.push((Valid(id), buffer.life_guard.add_ref(), state));
Some(buffer)
}
}
/// Stores all buffer state within a single usage scope.
#[derive(Debug)]
pub(crate) struct BufferUsageScope<A: hub::HalApi> {
state: Vec<BufferUses>,
metadata: ResourceMetadata<A>,
}
impl<A: hub::HalApi> BufferUsageScope<A> {
pub fn new() -> Self {
Self {
state: Vec::new(),
metadata: ResourceMetadata::new(),
}
}
fn debug_assert_in_bounds(&self, index: usize) {
debug_assert!(index < self.state.len());
self.metadata.debug_assert_in_bounds(index);
}
/// Sets the size of all the vectors inside the tracker.
///
/// Must be called with the highest possible Buffer ID before
/// all unsafe functions are called.
pub fn set_size(&mut self, size: usize) {
self.state.resize(size, BufferUses::empty());
self.metadata.set_size(size);
}
/// Extend the vectors to let the given index be valid.
fn allow_index(&mut self, index: usize) {
if index >= self.state.len() {
self.set_size(index + 1);
}
}
/// Returns a list of all buffers tracked.
pub fn used(&self) -> impl Iterator<Item = Valid<BufferId>> + '_ {
self.metadata.used()
}
/// Merge the list of buffer states in the given bind group into this usage scope.
///
/// If any of the resulting states is invalid, stops the merge and returns a usage
/// conflict with the details of the invalid state.
///
/// Because bind groups do not check if the union of all their states is valid,
/// this method is allowed to return Err on the first bind group bound.
///
/// # Safety
///
/// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
/// method is called.
pub unsafe fn merge_bind_group(
&mut self,
bind_group: &BufferBindGroupState<A>,
) -> Result<(), UsageConflict> {
for &(id, ref ref_count, state) in &bind_group.buffers {
let (index32, epoch, _) = id.0.unzip();
let index = index32 as usize;
insert_or_merge(
None,
None,
&mut self.state,
&mut self.metadata,
index32,
index,
BufferStateProvider::Direct { state },
ResourceMetadataProvider::Direct {
epoch,
ref_count: Cow::Borrowed(ref_count),
},
)?;
}
Ok(())
}
fn merge(
&mut self,
id: Valid<Self::Id>,
other: &Self,
output: Option<&mut Vec<PendingTransition<Self>>>,
) -> Result<(), PendingTransition<Self>> {
let old = self.last;
let new = other.port();
if old == new && BufferUses::ORDERED.contains(new) {
if output.is_some() && self.first.is_none() {
*self = Unit {
first: Some(old),
last: other.last,
};
}
} else {
let pending = PendingTransition {
id,
selector: (),
usage: old..new,
};
*self = match output {
None => {
assert_eq!(
self.first, None,
"extending a state that is already a transition"
);
Unit::new(pending.collapse()?)
}
Some(transitions) => {
transitions.push(pending);
Unit {
first: self.first.or(Some(old)),
last: other.last,
}
}
/// Merge the list of buffer states in the given usage scope into this UsageScope.
///
/// If any of the resulting states is invalid, stops the merge and returns a usage
/// conflict with the details of the invalid state.
///
/// If the given tracker uses IDs higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn merge_usage_scope(&mut self, scope: &Self) -> Result<(), UsageConflict> {
let incoming_size = scope.state.len();
if incoming_size > self.state.len() {
self.set_size(incoming_size);
}
for index in iterate_bitvec_indices(&scope.metadata.owned) {
self.debug_assert_in_bounds(index);
scope.debug_assert_in_bounds(index);
unsafe {
insert_or_merge(
None,
None,
&mut self.state,
&mut self.metadata,
index as u32,
index,
BufferStateProvider::Indirect {
state: &scope.state,
},
ResourceMetadataProvider::Indirect {
metadata: &scope.metadata,
},
)?;
};
}
Ok(())
}
fn optimize(&mut self) {}
}
/// Merge a single state into the UsageScope.
///
/// If the resulting state is invalid, returns a usage
/// conflict with the details of the invalid state.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn merge_single<'a>(
&mut self,
storage: &'a hub::Storage<Buffer<A>, BufferId>,
id: BufferId,
new_state: BufferUses,
) -> Result<&'a Buffer<A>, UsageConflict> {
let buffer = storage
.get(id)
.map_err(|_| UsageConflict::BufferInvalid { id })?;
#[cfg(test)]
mod test {
use super::*;
use crate::id::Id;
let (index32, epoch, _) = id.unzip();
let index = index32 as usize;
#[test]
fn change_extend() {
let mut bs = Unit {
first: None,
last: BufferUses::INDEX,
};
let id = Id::dummy();
assert_eq!(
bs.change(id, (), BufferUses::STORAGE_WRITE, None),
Err(PendingTransition {
id,
selector: (),
usage: BufferUses::INDEX..BufferUses::STORAGE_WRITE,
}),
);
bs.change(id, (), BufferUses::VERTEX, None).unwrap();
bs.change(id, (), BufferUses::INDEX, None).unwrap();
assert_eq!(bs, Unit::new(BufferUses::VERTEX | BufferUses::INDEX));
}
self.allow_index(index);
#[test]
fn change_replace() {
let mut bs = Unit {
first: None,
last: BufferUses::STORAGE_WRITE,
};
let id = Id::dummy();
let mut list = Vec::new();
bs.change(id, (), BufferUses::VERTEX, Some(&mut list))
.unwrap();
assert_eq!(
&list,
&[PendingTransition {
id,
selector: (),
usage: BufferUses::STORAGE_WRITE..BufferUses::VERTEX,
}],
);
assert_eq!(
bs,
Unit {
first: Some(BufferUses::STORAGE_WRITE),
last: BufferUses::VERTEX,
}
);
self.debug_assert_in_bounds(index);
list.clear();
bs.change(id, (), BufferUses::STORAGE_WRITE, Some(&mut list))
.unwrap();
assert_eq!(
&list,
&[PendingTransition {
id,
selector: (),
usage: BufferUses::VERTEX..BufferUses::STORAGE_WRITE,
}],
);
assert_eq!(
bs,
Unit {
first: Some(BufferUses::STORAGE_WRITE),
last: BufferUses::STORAGE_WRITE,
}
);
}
unsafe {
insert_or_merge(
Some(&buffer.life_guard),
None,
&mut self.state,
&mut self.metadata,
index32,
index,
BufferStateProvider::Direct { state: new_state },
ResourceMetadataProvider::Resource { epoch },
)?;
}
#[test]
fn merge_replace() {
let mut bs = Unit {
first: None,
last: BufferUses::empty(),
};
let other_smooth = Unit {
first: Some(BufferUses::empty()),
last: BufferUses::COPY_DST,
};
let id = Id::dummy();
let mut list = Vec::new();
bs.merge(id, &other_smooth, Some(&mut list)).unwrap();
assert!(list.is_empty());
assert_eq!(
bs,
Unit {
first: Some(BufferUses::empty()),
last: BufferUses::COPY_DST,
}
);
let other_rough = Unit {
first: Some(BufferUses::empty()),
last: BufferUses::UNIFORM,
};
bs.merge(id, &other_rough, Some(&mut list)).unwrap();
assert_eq!(
&list,
&[PendingTransition {
id,
selector: (),
usage: BufferUses::COPY_DST..BufferUses::empty(),
}],
);
assert_eq!(
bs,
Unit {
first: Some(BufferUses::empty()),
last: BufferUses::UNIFORM,
}
);
Ok(buffer)
}
}
/// Stores all buffer state within a command buffer or device.
pub(crate) struct BufferTracker<A: hub::HalApi> {
start: Vec<BufferUses>,
end: Vec<BufferUses>,
metadata: ResourceMetadata<A>,
temp: Vec<PendingTransition<BufferUses>>,
}
impl<A: hub::HalApi> BufferTracker<A> {
pub fn new() -> Self {
Self {
start: Vec::new(),
end: Vec::new(),
metadata: ResourceMetadata::new(),
temp: Vec::new(),
}
}
fn debug_assert_in_bounds(&self, index: usize) {
debug_assert!(index < self.start.len());
debug_assert!(index < self.end.len());
self.metadata.debug_assert_in_bounds(index);
}
/// Sets the size of all the vectors inside the tracker.
///
/// Must be called with the highest possible Buffer ID before
/// all unsafe functions are called.
pub fn set_size(&mut self, size: usize) {
self.start.resize(size, BufferUses::empty());
self.end.resize(size, BufferUses::empty());
self.metadata.set_size(size);
}
/// Extend the vectors to let the given index be valid.
fn allow_index(&mut self, index: usize) {
if index >= self.start.len() {
self.set_size(index + 1);
}
}
/// Returns a list of all buffers tracked.
pub fn used(&self) -> impl Iterator<Item = Valid<BufferId>> + '_ {
self.metadata.used()
}
/// Drains all currently pending transitions.
pub fn drain(&mut self) -> Drain<'_, PendingTransition<BufferUses>> {
self.temp.drain(..)
}
/// Inserts a single buffer and its state into the resource tracker.
///
/// If the resource already exists in the tracker, this will panic.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn insert_single(&mut self, id: Valid<BufferId>, ref_count: RefCount, state: BufferUses) {
let (index32, epoch, _) = id.0.unzip();
let index = index32 as usize;
self.allow_index(index);
self.debug_assert_in_bounds(index);
unsafe {
let currently_owned = self.metadata.owned.get(index).unwrap_unchecked();
if currently_owned {
panic!("Tried to insert buffer already tracked");
}
insert(
None,
Some(&mut self.start),
&mut self.end,
&mut self.metadata,
index,
BufferStateProvider::Direct { state },
None,
ResourceMetadataProvider::Direct {
epoch,
ref_count: Cow::Owned(ref_count),
},
)
}
}
/// Sets the state of a single buffer.
///
/// If a transition is needed to get the buffer into the given state, that transition
/// is returned. No more than one transition is needed.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn set_single<'a>(
&mut self,
storage: &'a hub::Storage<Buffer<A>, BufferId>,
id: BufferId,
state: BufferUses,
) -> Option<(&'a Buffer<A>, Option<PendingTransition<BufferUses>>)> {
let value = storage.get(id).ok()?;
let (index32, epoch, _) = id.unzip();
let index = index32 as usize;
self.allow_index(index);
self.debug_assert_in_bounds(index);
unsafe {
insert_or_barrier_update(
Some(&value.life_guard),
Some(&mut self.start),
&mut self.end,
&mut self.metadata,
index32,
index,
BufferStateProvider::Direct { state },
None,
ResourceMetadataProvider::Resource { epoch },
&mut self.temp,
)
};
debug_assert!(self.temp.len() <= 1);
Some((value, self.temp.pop()))
}
/// Sets the given state for all buffers in the given tracker.
///
/// If a transition is needed to get the buffers into the needed state,
/// those transitions are stored within the tracker. A subsequent
/// call to [`Self::drain`] is needed to get those transitions.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn set_from_tracker(&mut self, tracker: &Self) {
let incoming_size = tracker.start.len();
if incoming_size > self.start.len() {
self.set_size(incoming_size);
}
for index in iterate_bitvec_indices(&tracker.metadata.owned) {
self.debug_assert_in_bounds(index);
tracker.debug_assert_in_bounds(index);
unsafe {
insert_or_barrier_update(
None,
Some(&mut self.start),
&mut self.end,
&mut self.metadata,
index as u32,
index,
BufferStateProvider::Indirect {
state: &tracker.start,
},
Some(BufferStateProvider::Indirect {
state: &tracker.end,
}),
ResourceMetadataProvider::Indirect {
metadata: &tracker.metadata,
},
&mut self.temp,
)
}
}
}
/// Sets the given state for all buffers in the given UsageScope.
///
/// If a transition is needed to get the buffers into the needed state,
/// those transitions are stored within the tracker. A subsequent
/// call to [`Self::drain`] is needed to get those transitions.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn set_from_usage_scope(&mut self, scope: &BufferUsageScope<A>) {
let incoming_size = scope.state.len();
if incoming_size > self.start.len() {
self.set_size(incoming_size);
}
for index in iterate_bitvec_indices(&scope.metadata.owned) {
self.debug_assert_in_bounds(index);
scope.debug_assert_in_bounds(index);
unsafe {
insert_or_barrier_update(
None,
Some(&mut self.start),
&mut self.end,
&mut self.metadata,
index as u32,
index,
BufferStateProvider::Indirect {
state: &scope.state,
},
None,
ResourceMetadataProvider::Indirect {
metadata: &scope.metadata,
},
&mut self.temp,
)
}
}
}
/// Iterates through all buffers in the given bind group and adopts
/// the state given for those buffers in the UsageScope. It also
/// removes all touched buffers from the usage scope.
///
/// If a transition is needed to get the buffers into the needed state,
/// those transitions are stored within the tracker. A subsequent
/// call to [`Self::drain`] is needed to get those transitions.
///
/// This is a really funky method used by Compute Passes to generate
/// barriers after a call to dispatch without needing to iterate
/// over all elements in the usage scope. We use each the
/// bind group as a source of which IDs to look at. The bind groups
/// must have first been added to the usage scope.
///
/// # Safety
///
/// [`Self::set_size`] must be called with the maximum possible Buffer ID before this
/// method is called.
pub unsafe fn set_and_remove_from_usage_scope_sparse(
&mut self,
scope: &mut BufferUsageScope<A>,
bind_group_state: &BufferBindGroupState<A>,
) {
let incoming_size = scope.state.len();
if incoming_size > self.start.len() {
self.set_size(incoming_size);
}
for &(id, ref ref_count, _) in bind_group_state.buffers.iter() {
let (index32, epoch, _) = id.0.unzip();
let index = index32 as usize;
scope.debug_assert_in_bounds(index);
if !scope.metadata.owned.get(index).unwrap_unchecked() {
continue;
}
insert_or_barrier_update(
None,
Some(&mut self.start),
&mut self.end,
&mut self.metadata,
index as u32,
index,
BufferStateProvider::Indirect {
state: &scope.state,
},
None,
ResourceMetadataProvider::Direct {
epoch,
ref_count: Cow::Borrowed(ref_count),
},
&mut self.temp,
);
scope.metadata.reset(index);
}
}
/// Removes the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
/// Returns true if the resource was removed.
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
pub fn remove_abandoned(&mut self, id: Valid<BufferId>) -> bool {
let (index32, epoch, _) = id.0.unzip();
let index = index32 as usize;
if index > self.metadata.owned.len() {
return false;
}
self.debug_assert_in_bounds(index);
unsafe {
if self.metadata.owned.get(index).unwrap_unchecked() {
let existing_epoch = self.metadata.epochs.get_unchecked_mut(index);
let existing_ref_count = self.metadata.ref_counts.get_unchecked_mut(index);
if *existing_epoch == epoch
&& existing_ref_count.as_mut().unwrap_unchecked().load() == 1
{
self.metadata.reset(index);
return true;
}
}
}
false
}
}
/// Source of Buffer State.
#[derive(Debug, Clone)]
enum BufferStateProvider<'a> {
/// Get a state that was provided directly.
Direct { state: BufferUses },
/// Get a state from an an array of states.
Indirect { state: &'a [BufferUses] },
}
impl BufferStateProvider<'_> {
/// Gets the state from the provider, given a resource ID index.
///
/// # Safety
///
/// Index must be in bounds for the indirect source iff this is in the indirect state.
#[inline(always)]
unsafe fn get_state(&self, index: usize) -> BufferUses {
match *self {
BufferStateProvider::Direct { state } => state,
BufferStateProvider::Indirect { state } => {
debug_assert!(index < state.len());
*state.get_unchecked(index)
}
}
}
}
/// Does an insertion operation if the index isn't tracked
/// in the current metadata, otherwise merges the given state
/// with the current state. If the merging would cause
/// a conflict, returns that usage conflict.
///
/// # Safety
///
/// Indexes must be valid indexes into all arrays passed in
/// to this function, either directly or via metadata or provider structs.
#[inline(always)]
unsafe fn insert_or_merge<A: hub::HalApi>(
life_guard: Option<&LifeGuard>,
start_states: Option<&mut [BufferUses]>,
current_states: &mut [BufferUses],
resource_metadata: &mut ResourceMetadata<A>,
index32: u32,
index: usize,
state_provider: BufferStateProvider<'_>,
metadata_provider: ResourceMetadataProvider<'_, A>,
) -> Result<(), UsageConflict> {
let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked();
if !currently_owned {
insert(
life_guard,
start_states,
current_states,
resource_metadata,
index,
state_provider,
None,
metadata_provider,
);
return Ok(());
}
merge(
current_states,
index32,
index,
state_provider,
metadata_provider,
)
}
/// If the resource isn't tracked
/// - Inserts the given resource.
/// - Uses the `start_state_provider` to populate `start_states`
/// - Uses either `end_state_provider` or `start_state_provider`
/// to populate `current_states`.
/// If the resource is tracked
/// - Inserts barriers from the state in `current_states`
/// to the state provided by `start_state_provider`.
/// - Updates the `current_states` with either the state from
/// `end_state_provider` or `start_state_provider`.
///
/// Any barriers are added to the barrier vector.
///
/// # Safety
///
/// Indexes must be valid indexes into all arrays passed in
/// to this function, either directly or via metadata or provider structs.
#[inline(always)]
unsafe fn insert_or_barrier_update<A: hub::HalApi>(
life_guard: Option<&LifeGuard>,
start_states: Option<&mut [BufferUses]>,
current_states: &mut [BufferUses],
resource_metadata: &mut ResourceMetadata<A>,
index32: u32,
index: usize,
start_state_provider: BufferStateProvider<'_>,
end_state_provider: Option<BufferStateProvider<'_>>,
metadata_provider: ResourceMetadataProvider<'_, A>,
barriers: &mut Vec<PendingTransition<BufferUses>>,
) {
let currently_owned = resource_metadata.owned.get(index).unwrap_unchecked();
if !currently_owned {
insert(
life_guard,
start_states,
current_states,
resource_metadata,
index,
start_state_provider,
end_state_provider,
metadata_provider,
);
return;
}
let update_state_provider = end_state_provider.unwrap_or_else(|| start_state_provider.clone());
barrier(
current_states,
index32,
index,
start_state_provider,
barriers,
);
update(current_states, index, update_state_provider);
}
#[inline(always)]
unsafe fn insert<A: hub::HalApi>(
life_guard: Option<&LifeGuard>,
start_states: Option<&mut [BufferUses]>,
current_states: &mut [BufferUses],
resource_metadata: &mut ResourceMetadata<A>,
index: usize,
start_state_provider: BufferStateProvider<'_>,
end_state_provider: Option<BufferStateProvider<'_>>,
metadata_provider: ResourceMetadataProvider<'_, A>,
) {
let new_start_state = start_state_provider.get_state(index);
let new_end_state = end_state_provider.map_or(new_start_state, |p| p.get_state(index));
// This should only ever happen with a wgpu bug, but let's just double
// check that resource states don't have any conflicts.
debug_assert_eq!(invalid_resource_state(new_start_state), false);
debug_assert_eq!(invalid_resource_state(new_end_state), false);
log::trace!("\tbuf {index}: insert {new_start_state:?}..{new_end_state:?}");
if let Some(&mut ref mut start_state) = start_states {
*start_state.get_unchecked_mut(index) = new_start_state;
}
*current_states.get_unchecked_mut(index) = new_end_state;
let (epoch, ref_count) = metadata_provider.get_own(life_guard, index);
resource_metadata.owned.set(index, true);
*resource_metadata.epochs.get_unchecked_mut(index) = epoch;
*resource_metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count);
}
#[inline(always)]
unsafe fn merge<A: hub::HalApi>(
current_states: &mut [BufferUses],
index32: u32,
index: usize,
state_provider: BufferStateProvider<'_>,
metadata_provider: ResourceMetadataProvider<'_, A>,
) -> Result<(), UsageConflict> {
let current_state = current_states.get_unchecked_mut(index);
let new_state = state_provider.get_state(index);
let merged_state = *current_state | new_state;
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_buffer(
BufferId::zip(index32, metadata_provider.get_epoch(index), A::VARIANT),
*current_state,
new_state,
));
}
log::trace!("\tbuf {index32}: merge {current_state:?} + {new_state:?}");
*current_state = merged_state;
Ok(())
}
#[inline(always)]
unsafe fn barrier(
current_states: &mut [BufferUses],
index32: u32,
index: usize,
state_provider: BufferStateProvider<'_>,
barriers: &mut Vec<PendingTransition<BufferUses>>,
) {
let current_state = *current_states.get_unchecked(index);
let new_state = state_provider.get_state(index);
if skip_barrier(current_state, new_state) {
return;
}
barriers.push(PendingTransition {
id: index32,
selector: (),
usage: current_state..new_state,
});
log::trace!("\tbuf {index32}: transition {current_state:?} -> {new_state:?}");
}
#[inline(always)]
unsafe fn update(
current_states: &mut [BufferUses],
index: usize,
state_provider: BufferStateProvider<'_>,
) {
let current_state = current_states.get_unchecked_mut(index);
let new_state = state_provider.get_state(index);
*current_state = new_state;
}

1237
third_party/rust/wgpu-core/src/track/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

268
third_party/rust/wgpu-core/src/track/range.rs поставляемый
Просмотреть файл

@ -2,25 +2,19 @@
//TODO: consider getting rid of it.
use smallvec::SmallVec;
use std::{cmp::Ordering, fmt::Debug, iter, ops::Range, slice::Iter};
use std::{fmt::Debug, iter, ops::Range};
/// Structure that keeps track of a I -> T mapping,
/// optimized for a case where keys of the same values
/// are often grouped together linearly.
#[derive(Clone, Debug, PartialEq)]
pub struct RangedStates<I, T> {
pub(crate) struct RangedStates<I, T> {
/// List of ranges, each associated with a singe value.
/// Ranges of keys have to be non-intersecting and ordered.
ranges: SmallVec<[(Range<I>, T); 1]>,
}
impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
pub fn empty() -> Self {
Self {
ranges: SmallVec::new(),
}
}
impl<I: Copy + Ord, T: Copy + PartialEq> RangedStates<I, T> {
pub fn from_range(range: Range<I>, value: T) -> Self {
Self {
ranges: iter::once((range, value)).collect(),
@ -35,20 +29,12 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
}
}
/// Clear all the ranges.
pub fn clear(&mut self) {
self.ranges.clear();
pub fn iter(&self) -> impl Iterator<Item = &(Range<I>, T)> + Clone {
self.ranges.iter()
}
/// Append a range.
///
/// Assumes that the object is being constructed from a set of
/// ranges, and they are given in the ascending order of their keys.
pub fn append(&mut self, index: Range<I>, value: T) {
if let Some(last) = self.ranges.last() {
debug_assert!(last.0.end <= index.start);
}
self.ranges.push((index, value));
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut (Range<I>, T)> {
self.ranges.iter_mut()
}
/// Check that all the ranges are non-intersecting and ordered.
@ -64,7 +50,6 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
}
/// Merge the neighboring ranges together, where possible.
#[allow(clippy::suspicious_operation_groupings)]
pub fn coalesce(&mut self) {
let mut num_removed = 0;
let mut iter = self.ranges.iter_mut();
@ -86,25 +71,18 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
}
}
/// Check if all intersecting ranges have the same value, which is returned.
///
/// Returns `None` if no intersections are detected.
/// Returns `Some(Err)` if the intersected values are inconsistent.
pub fn query<U: PartialEq>(
&self,
index: &Range<I>,
fun: impl Fn(&T) -> U,
) -> Option<Result<U, ()>> {
let mut result = None;
for &(ref range, ref value) in self.ranges.iter() {
if range.end > index.start && range.start < index.end {
let old = result.replace(fun(value));
if old.is_some() && old != result {
return Some(Err(()));
}
}
}
result.map(Ok)
pub fn iter_filter<'a>(
&'a self,
range: &'a Range<I>,
) -> impl Iterator<Item = (Range<I>, &T)> + 'a {
self.ranges
.iter()
.filter(move |&&(ref inner, ..)| inner.end > range.start && inner.start < range.end)
.map(move |&(ref inner, ref v)| {
let new_range = inner.start.max(range.start)..inner.end.min(range.end);
(new_range, v)
})
}
/// Split the storage ranges in such a way that there is a linear subset of
@ -176,112 +154,12 @@ impl<I: Copy + PartialOrd, T: Copy + PartialEq> RangedStates<I, T> {
clone.check_sanity();
result
}
/// Produce an iterator that merges two instances together.
///
/// Each range in the returned iterator is a subset of a range in either
/// `self` or `other`, and the value returned as a `Range` from `self` to `other`.
pub fn merge<'a>(&'a self, other: &'a Self, base: I) -> Merge<'a, I, T> {
Merge {
base,
sa: self.ranges.iter().peekable(),
sb: other.ranges.iter().peekable(),
}
}
}
/// A custom iterator that goes through two `RangedStates` and process a merge.
#[derive(Debug)]
pub struct Merge<'a, I, T> {
base: I,
sa: iter::Peekable<Iter<'a, (Range<I>, T)>>,
sb: iter::Peekable<Iter<'a, (Range<I>, T)>>,
}
impl<'a, I: Copy + Debug + Ord, T: Copy + Debug> Iterator for Merge<'a, I, T> {
type Item = (Range<I>, Range<Option<T>>);
fn next(&mut self) -> Option<Self::Item> {
match (self.sa.peek(), self.sb.peek()) {
// we have both streams
(Some(&&(ref ra, va)), Some(&&(ref rb, vb))) => {
let (range, usage) = if ra.start < self.base {
// in the middle of the left stream
let (end, end_value) = if self.base == rb.start {
// right stream is starting
debug_assert!(self.base < ra.end);
(rb.end, Some(vb))
} else {
// right hasn't started yet
debug_assert!(self.base < rb.start);
(rb.start, None)
};
(self.base..ra.end.min(end), Some(va)..end_value)
} else if rb.start < self.base {
// in the middle of the right stream
let (end, start_value) = if self.base == ra.start {
// left stream is starting
debug_assert!(self.base < rb.end);
(ra.end, Some(va))
} else {
// left hasn't started yet
debug_assert!(self.base < ra.start);
(ra.start, None)
};
(self.base..rb.end.min(end), start_value..Some(vb))
} else {
// no active streams
match ra.start.cmp(&rb.start) {
// both are starting
Ordering::Equal => (ra.start..ra.end.min(rb.end), Some(va)..Some(vb)),
// only left is starting
Ordering::Less => (ra.start..rb.start.min(ra.end), Some(va)..None),
// only right is starting
Ordering::Greater => (rb.start..ra.start.min(rb.end), None..Some(vb)),
}
};
self.base = range.end;
if ra.end == range.end {
let _ = self.sa.next();
}
if rb.end == range.end {
let _ = self.sb.next();
}
Some((range, usage))
}
// only right stream
(None, Some(&&(ref rb, vb))) => {
let range = self.base.max(rb.start)..rb.end;
self.base = rb.end;
let _ = self.sb.next();
Some((range, None..Some(vb)))
}
// only left stream
(Some(&&(ref ra, va)), None) => {
let range = self.base.max(ra.start)..ra.end;
self.base = ra.end;
let _ = self.sa.next();
Some((range, Some(va)..None))
}
// done
(None, None) => None,
}
}
}
#[cfg(test)]
mod test {
//TODO: randomized/fuzzy testing
use super::RangedStates;
use std::{fmt::Debug, ops::Range};
fn easy_merge<T: PartialEq + Copy + Debug>(
ra: &[(Range<usize>, T)],
rb: &[(Range<usize>, T)],
) -> Vec<(Range<usize>, Range<Option<T>>)> {
RangedStates::from_slice(ra)
.merge(&RangedStates::from_slice(rb), 0)
.collect()
}
#[test]
fn sane_good() {
@ -311,14 +189,6 @@ mod test {
assert_eq!(rs.ranges.as_slice(), &[(1..5, 9), (5..7, 1), (8..9, 1),]);
}
#[test]
fn query() {
let rs = RangedStates::from_slice(&[(1..4, 1u8), (5..7, 2)]);
assert_eq!(rs.query(&(0..1), |v| *v), None);
assert_eq!(rs.query(&(1..3), |v| *v), Some(Ok(1)));
assert_eq!(rs.query(&(1..6), |v| *v), Some(Err(())));
}
#[test]
fn isolate() {
let rs = RangedStates::from_slice(&[(1..4, 9u8), (4..5, 9), (5..7, 1), (8..9, 1)]);
@ -333,104 +203,4 @@ mod test {
&[(6..7, 1), (7..8, 0), (8..9, 1),]
);
}
#[test]
fn merge_same() {
assert_eq!(
&easy_merge(&[(1..4, 0u8),], &[(1..4, 2u8),],),
&[(1..4, Some(0)..Some(2)),]
);
}
#[test]
fn merge_empty() {
assert_eq!(
&easy_merge(&[(1..2, 0u8),], &[],),
&[(1..2, Some(0)..None),]
);
assert_eq!(
&easy_merge(&[], &[(3..4, 1u8),],),
&[(3..4, None..Some(1)),]
);
}
#[test]
fn merge_separate() {
assert_eq!(
&easy_merge(&[(1..2, 0u8), (5..6, 1u8),], &[(2..4, 2u8),],),
&[
(1..2, Some(0)..None),
(2..4, None..Some(2)),
(5..6, Some(1)..None),
]
);
}
#[test]
fn merge_subset() {
assert_eq!(
&easy_merge(&[(1..6, 0u8),], &[(2..4, 2u8),],),
&[
(1..2, Some(0)..None),
(2..4, Some(0)..Some(2)),
(4..6, Some(0)..None),
]
);
assert_eq!(
&easy_merge(&[(2..4, 0u8),], &[(1..4, 2u8),],),
&[(1..2, None..Some(2)), (2..4, Some(0)..Some(2)),]
);
}
#[test]
fn merge_all() {
assert_eq!(
&easy_merge(&[(1..4, 0u8), (5..8, 1u8),], &[(2..6, 2u8), (7..9, 3u8),],),
&[
(1..2, Some(0)..None),
(2..4, Some(0)..Some(2)),
(4..5, None..Some(2)),
(5..6, Some(1)..Some(2)),
(6..7, Some(1)..None),
(7..8, Some(1)..Some(3)),
(8..9, None..Some(3)),
]
);
}
#[test]
fn merge_complex() {
assert_eq!(
&easy_merge(
&[
(0..8, 0u8),
(8..9, 1),
(9..16, 2),
(16..17, 3),
(17..118, 4),
(118..119, 5),
(119..124, 6),
(124..125, 7),
(125..512, 8),
],
&[(15..16, 10u8), (51..52, 11), (126..127, 12),],
),
&[
(0..8, Some(0)..None),
(8..9, Some(1)..None),
(9..15, Some(2)..None),
(15..16, Some(2)..Some(10)),
(16..17, Some(3)..None),
(17..51, Some(4)..None),
(51..52, Some(4)..Some(11)),
(52..118, Some(4)..None),
(118..119, Some(5)..None),
(119..124, Some(6)..None),
(124..125, Some(7)..None),
(125..126, Some(8)..None),
(126..127, Some(8)..Some(12)),
(127..512, Some(8)..None),
]
);
}
}

209
third_party/rust/wgpu-core/src/track/stateless.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,209 @@
/*! Stateless Trackers
*
* Stateless trackers don't have any state, so make no
* distinction between a usage scope and a full tracker.
!*/
use std::marker::PhantomData;
use crate::{
hub,
id::{TypedId, Valid},
track::{iterate_bitvec_indices, ResourceMetadata},
RefCount,
};
/// Stores all the resources that a bind group stores.
pub(crate) struct StatelessBindGroupSate<T, Id: TypedId> {
resources: Vec<(Valid<Id>, RefCount)>,
_phantom: PhantomData<T>,
}
impl<T: hub::Resource, Id: TypedId> StatelessBindGroupSate<T, Id> {
pub fn new() -> Self {
Self {
resources: Vec::new(),
_phantom: PhantomData,
}
}
/// Optimize the buffer bind group state by sorting it by ID.
///
/// When this list of states is merged into a tracker, the memory
/// accesses will be in a constant assending order.
pub(crate) fn optimize(&mut self) {
self.resources
.sort_unstable_by_key(|&(id, _)| id.0.unzip().0);
}
/// Returns a list of all resources tracked. May contain duplicates.
pub fn used(&self) -> impl Iterator<Item = Valid<Id>> + '_ {
self.resources.iter().map(|&(id, _)| id)
}
/// Adds the given resource.
pub fn add_single<'a>(&mut self, storage: &'a hub::Storage<T, Id>, id: Id) -> Option<&'a T> {
let resource = storage.get(id).ok()?;
self.resources
.push((Valid(id), resource.life_guard().add_ref()));
Some(resource)
}
}
/// Stores all resource state within a command buffer or device.
pub(crate) struct StatelessTracker<A: hub::HalApi, T, Id: TypedId> {
metadata: ResourceMetadata<A>,
_phantom: PhantomData<(T, Id)>,
}
impl<A: hub::HalApi, T: hub::Resource, Id: TypedId> StatelessTracker<A, T, Id> {
pub fn new() -> Self {
Self {
metadata: ResourceMetadata::new(),
_phantom: PhantomData,
}
}
fn debug_assert_in_bounds(&self, index: usize) {
self.metadata.debug_assert_in_bounds(index);
}
/// Sets the size of all the vectors inside the tracker.
///
/// Must be called with the highest possible Resource ID of this type
/// before all unsafe functions are called.
pub fn set_size(&mut self, size: usize) {
self.metadata.set_size(size);
}
/// Extend the vectors to let the given index be valid.
fn allow_index(&mut self, index: usize) {
if index >= self.metadata.owned.len() {
self.set_size(index + 1);
}
}
/// Returns a list of all resources tracked.
pub fn used(&self) -> impl Iterator<Item = Valid<Id>> + '_ {
self.metadata.used()
}
/// Inserts a single resource into the resource tracker.
///
/// If the resource already exists in the tracker, it will be overwritten.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn insert_single(&mut self, id: Valid<Id>, ref_count: RefCount) {
let (index32, epoch, _) = id.0.unzip();
let index = index32 as usize;
self.allow_index(index);
self.debug_assert_in_bounds(index);
unsafe {
*self.metadata.epochs.get_unchecked_mut(index) = epoch;
*self.metadata.ref_counts.get_unchecked_mut(index) = Some(ref_count);
self.metadata.owned.set(index, true);
}
}
/// Adds the given resource to the tracker.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn add_single<'a>(&mut self, storage: &'a hub::Storage<T, Id>, id: Id) -> Option<&'a T> {
let item = storage.get(id).ok()?;
let (index32, epoch, _) = id.unzip();
let index = index32 as usize;
self.allow_index(index);
self.debug_assert_in_bounds(index);
unsafe {
*self.metadata.epochs.get_unchecked_mut(index) = epoch;
*self.metadata.ref_counts.get_unchecked_mut(index) = Some(item.life_guard().add_ref());
self.metadata.owned.set(index, true);
}
Some(item)
}
/// Adds the given resources from the given tracker.
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn add_from_tracker(&mut self, other: &Self) {
let incoming_size = other.metadata.owned.len();
if incoming_size > self.metadata.owned.len() {
self.set_size(incoming_size);
}
for index in iterate_bitvec_indices(&other.metadata.owned) {
self.debug_assert_in_bounds(index);
other.debug_assert_in_bounds(index);
unsafe {
let previously_owned = self.metadata.owned.get(index).unwrap_unchecked();
if !previously_owned {
self.metadata.owned.set(index, true);
let other_ref_count = other
.metadata
.ref_counts
.get_unchecked(index)
.clone()
.unwrap_unchecked();
*self.metadata.ref_counts.get_unchecked_mut(index) = Some(other_ref_count);
let epoch = *other.metadata.epochs.get_unchecked(index);
*self.metadata.epochs.get_unchecked_mut(index) = epoch;
}
}
}
}
/// Removes the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
/// Returns true if the resource was removed.
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
pub fn remove_abandoned(&mut self, id: Valid<Id>) -> bool {
let (index32, epoch, _) = id.0.unzip();
let index = index32 as usize;
if index > self.metadata.owned.len() {
return false;
}
self.debug_assert_in_bounds(index);
unsafe {
if self.metadata.owned.get(index).unwrap_unchecked() {
let existing_epoch = self.metadata.epochs.get_unchecked_mut(index);
let existing_ref_count = self.metadata.ref_counts.get_unchecked_mut(index);
if *existing_epoch == epoch
&& existing_ref_count.as_mut().unwrap_unchecked().load() == 1
{
self.metadata.reset(index);
return true;
}
}
}
false
}
}

1826
third_party/rust/wgpu-core/src/track/texture.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

23
third_party/rust/wgpu-core/src/validation.rs поставляемый
Просмотреть файл

@ -263,6 +263,8 @@ pub enum StageError {
#[source]
error: InputError,
},
#[error("location[{location}] is provided by the previous stage output but is not consumed as input by this stage.")]
InputNotConsumed { location: wgt::ShaderLocation },
}
fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option<naga::StorageFormat> {
@ -702,7 +704,11 @@ impl NumericType {
(NumericDimension::Vector(Vs::Quad), Sk::Sint)
}
Tf::Rg11b10Float => (NumericDimension::Vector(Vs::Tri), Sk::Float),
Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => {
Tf::Depth32Float
| Tf::Depth32FloatStencil8
| Tf::Depth24Plus
| Tf::Depth24PlusStencil8
| Tf::Depth24UnormStencil8 => {
panic!("Unexpected depth format")
}
Tf::Rgb9e5Ufloat => (NumericDimension::Vector(Vs::Tri), Sk::Float),
@ -1155,6 +1161,21 @@ impl Interface {
}
}
// Check all vertex outputs and make sure the fragment shader consumes them.
if shader_stage == naga::ShaderStage::Fragment {
for &index in inputs.keys() {
// This is a linear scan, but the count should be low enough that this should be fine.
let found = entry_point.inputs.iter().any(|v| match *v {
Varying::Local { location, .. } => location == index,
Varying::BuiltIn(_) => false,
});
if !found {
return Err(StageError::InputNotConsumed { location: index });
}
}
}
if shader_stage == naga::ShaderStage::Vertex {
for output in entry_point.outputs.iter() {
//TODO: count builtins towards the limit?

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"e747e47c2303651b01be2badaa5c1df53a973d3dd7aa3e5de50247599392b450","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"c745317191eab3159465f79f4a693f671e2f75877dc3b7215a6a90d70e88ec7f","examples/halmark/shader.wgsl":"59e3628abe34c66708bf0106658e791ef24357df3cae72194d34ff07b40e8007","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"d6db84b269b934409ef85fa31914b3f4bc7e7012e40269aad3eff5454eae2a09","src/auxil/dxgi/conv.rs":"cbdb037f2be37b3886f1449b4872a8f248390e185c3d32bf61d64aef76e2008e","src/auxil/dxgi/exception.rs":"f8d69d3d475e03c4d96d22778e5a6f322afd98fcfafb1414cd4a76239fa97a37","src/auxil/dxgi/factory.rs":"82451fcfcc1f73a570ae9e708c94efa9c125d269dfb7396de97da5b32f8a4090","src/auxil/dxgi/mod.rs":"63db737b48378d4843e2f7904f104790688029ff614bc80387cd9efe444f1e52","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/mod.rs":"f899555124ad6d44f5326ef935f4911702539fd933ec2ab07c6009badb3ea22c","src/auxil/renderdoc.rs":"3a4da908ebd6230177ca55c541c8278639e83d78badb4595a941aea30dd7f80f","src/dx11/adapter.rs":"bf123464ef748d021f2e0c40d27b3f6bdd50222c6f91cce6d25686a912eef093","src/dx11/command.rs":"cdad8dcdb800acba56c931f1726ddada652af18db0f066465af643f82a034492","src/dx11/device.rs":"76ac52095c639482adc2058509cd3acafd49cebc0694fcd64f8d9f53abc823de","src/dx11/instance.rs":"3bbf2730956472cb8023bd8fbd2d53e49f93c5e4ce3d14664112a293a165d191","src/dx11/library.rs":"0da08a780eefa7ff50f2e0998117202f26e5dd3d3a433c58b585801cff9863d2","src/dx11/mod.rs":"e4f7c6100e1bec479b41f3e3af96e01d53e6597c1c3a8fcde6f14cc9eb8537f8","src/dx12/adapter.rs":"1bc8807a28a961df304b44314500ff2239bc53cc72c9308fab28504a6f00c54b","src/dx12/command.rs":"f9969744663c1f01fca3892b93fc516363fa8725739d270a5f633db78f7902da","src/dx12/conv.rs":"c27336e5b576a7e05a576548fa0e6588ff77be4208f08aac0bea7e649ed1301b","src/dx12/descriptor.rs":"7145d3dc6be13fae4cf6bb8bf34a1ea1749ad87e5f429b84f3cbbea7bf63c148","src/dx12/device.rs":"27f47868a77c77a74e63de7f2d152ac22da34559286f13add17568b83d9f7e1e","src/dx12/instance.rs":"ccc36443cb1df8ab8ed8366cf8599ec3d75fb5fefa5f9bb0f0f0b5e6fc1c5102","src/dx12/mod.rs":"e88f7396dca4aba859a6e28d3f9de64a57a0df85acd53cecd6ada3d96386062c","src/dx12/view.rs":"b7a5cb8933f30517a97b4bd767e10b4c09f686dbf493d53b9b265d2d0b16f1a6","src/empty.rs":"6bf65e405f63eff49b25d079af644b352b95a9c7edcf3a57be2e96a50307b66b","src/gles/adapter.rs":"1e246de20cdbbb5d1727955c618486ac967c2e9f326886feb609f8b321114d74","src/gles/command.rs":"3bef8a822b59c7e986c2c6dd084a2c537a96de77e2b48522f86210093daad3d0","src/gles/conv.rs":"c5d6ba4afd2b3245b1a36a84e4c392c640e6170a1e81df3741bf5323c57bdc51","src/gles/device.rs":"66c30c4010f410bf3b8a03ee9d8e14753832fa2b6e17b518481281f06e3d7cd9","src/gles/egl.rs":"38dc851eede42b6be2ff02f15fef3a4a116cd1b5803974fada89ac86d42a5df4","src/gles/mod.rs":"8686d9bcfb8a7be6f3b8ff7d96e7538e71c3d37b2b09762b06aa304518165dfd","src/gles/queue.rs":"c57f634c983dca6bd657d2f6150f699d9648e6b5726ead9bb4310dd975171cdd","src/gles/shaders/clear.frag":"aac702eed9ece5482db5ba6783a678b119a5e7802b1ecf93f4975dee8acab0b3","src/gles/shaders/clear.vert":"8f636168e1da2cac48091c466a543c3b09fb4a0dd8c60c1c9bf34cc890766740","src/gles/shaders/present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"083500c0b36d079a82754895d06b993ea8ed4393690b226c85f07cbec373a730","src/lib.rs":"bebe5582d5e9563aebb0d44e6f267dc1eeb556557761bb1afc5f4191634a8588","src/metal/adapter.rs":"83ef6668160178fcce7820d3036c25aa35d02932577e45577832f19b93c8a12d","src/metal/command.rs":"b06983d7e11cdde526b7c9f5f0b86f1ea8faef02b7666367cb231211a8301570","src/metal/conv.rs":"517c491a87ba57f275f4a2f130ef1fc2c17528176ebc4813772f9bcd86ffd8e5","src/metal/device.rs":"dd823c8e12ba3ed69ef7cdcb543e8d995d0056d1f838516b0901068c83d8ffe2","src/metal/mod.rs":"c4f3959732f5f506fa881aa5812205a6452d6a946d661d7f81d1c7785359a10c","src/metal/surface.rs":"82836cadc751d94fb016bd590cdfec5649cbfae2f44d14599ed074dfb0a004dc","src/vulkan/adapter.rs":"6dc8f42fce7ea939134860944495fe687f2bffd47a97dc8dfd14ff34c11cd5b5","src/vulkan/command.rs":"60d1867acd0e46c34dabecea708cd776a1f435721b6673a506b5bb8aee87ff80","src/vulkan/conv.rs":"0d7b706a854ff3fdcea0e66f0d8563d9c2aa1f82b2e84692c16f4bfcc3872ed5","src/vulkan/device.rs":"472e915c73e69be8559e460061d720a32f7e8b5d3601982bc2bc588cc2b80a5d","src/vulkan/instance.rs":"49bb57b65c886c14f258e8a7fc183138765310ac7efd2fe3b26115e7942aa1fe","src/vulkan/mod.rs":"95347a82e2a276c1953e542c838f2ae07acc18d4187025fb22c264e61684860d"},"package":null}
{"files":{"Cargo.toml":"61752b031b63d9ce967085f9a43eb3dbbad3b472bd264612a88faf3e7d6fcd57","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"78377f5876fafd77963eff7e3c2ba3a7e3ad5cf9201b09ed5612e49c2288eb18","examples/halmark/main.rs":"fefa4f8d16f1a40156e0c0ce7aee06569b222a7a6284b69a000adeebb34a915d","examples/halmark/shader.wgsl":"59e3628abe34c66708bf0106658e791ef24357df3cae72194d34ff07b40e8007","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"d6db84b269b934409ef85fa31914b3f4bc7e7012e40269aad3eff5454eae2a09","src/auxil/dxgi/conv.rs":"310a23866e652257e3dca55c85c78420118e6dea4e69ff907db4a52bda9ac1c5","src/auxil/dxgi/exception.rs":"f8d69d3d475e03c4d96d22778e5a6f322afd98fcfafb1414cd4a76239fa97a37","src/auxil/dxgi/factory.rs":"82451fcfcc1f73a570ae9e708c94efa9c125d269dfb7396de97da5b32f8a4090","src/auxil/dxgi/mod.rs":"63db737b48378d4843e2f7904f104790688029ff614bc80387cd9efe444f1e52","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/mod.rs":"f899555124ad6d44f5326ef935f4911702539fd933ec2ab07c6009badb3ea22c","src/auxil/renderdoc.rs":"3a4da908ebd6230177ca55c541c8278639e83d78badb4595a941aea30dd7f80f","src/dx11/adapter.rs":"bf123464ef748d021f2e0c40d27b3f6bdd50222c6f91cce6d25686a912eef093","src/dx11/command.rs":"cdad8dcdb800acba56c931f1726ddada652af18db0f066465af643f82a034492","src/dx11/device.rs":"76ac52095c639482adc2058509cd3acafd49cebc0694fcd64f8d9f53abc823de","src/dx11/instance.rs":"3bbf2730956472cb8023bd8fbd2d53e49f93c5e4ce3d14664112a293a165d191","src/dx11/library.rs":"0da08a780eefa7ff50f2e0998117202f26e5dd3d3a433c58b585801cff9863d2","src/dx11/mod.rs":"e4f7c6100e1bec479b41f3e3af96e01d53e6597c1c3a8fcde6f14cc9eb8537f8","src/dx12/adapter.rs":"3d830a70684c568a0b3f226beecc8e0dd311c3efd2b1be2caa629f688e98511e","src/dx12/command.rs":"e48636f686f4ff9efc1758f4e54522aeda284d27439c87c6a669a55352294d58","src/dx12/conv.rs":"e1bc82d9f0c019bb67aa7ee8d59e4677c047e56fee4ce3154ebc50e5388850cd","src/dx12/descriptor.rs":"7145d3dc6be13fae4cf6bb8bf34a1ea1749ad87e5f429b84f3cbbea7bf63c148","src/dx12/device.rs":"1dd830070de6e0a755164f96408d50e5c8a1bbfee539a1183a57c8f93c79e669","src/dx12/instance.rs":"ccc36443cb1df8ab8ed8366cf8599ec3d75fb5fefa5f9bb0f0f0b5e6fc1c5102","src/dx12/mod.rs":"e88f7396dca4aba859a6e28d3f9de64a57a0df85acd53cecd6ada3d96386062c","src/dx12/view.rs":"b7a5cb8933f30517a97b4bd767e10b4c09f686dbf493d53b9b265d2d0b16f1a6","src/empty.rs":"6bf65e405f63eff49b25d079af644b352b95a9c7edcf3a57be2e96a50307b66b","src/gles/adapter.rs":"47403c6cf736659b6c035873346e0aa1760b8b4b5763e64b9783e1358e599ba0","src/gles/command.rs":"31c85f3841131dc34553f7a66339396650ceb19763fa6c194c10fb4a5a3fc07e","src/gles/conv.rs":"1462ce906a4fe83139cc8375e385f8ce5a15d70588b81083ae8d5d9104f4457e","src/gles/device.rs":"66c30c4010f410bf3b8a03ee9d8e14753832fa2b6e17b518481281f06e3d7cd9","src/gles/egl.rs":"16516ef1ad62a976996a1b2123fd89ce6835a8468a2915841efd558516bb8b4f","src/gles/mod.rs":"75612e8ddd91735ba7b1bb7ecb58210b7b8469bde9671e437206c010600d16a2","src/gles/queue.rs":"b6dd8404ff53f1f9a8c9de87d4b78bd42468c146560d26fb585801d813919dab","src/gles/shaders/clear.frag":"aac702eed9ece5482db5ba6783a678b119a5e7802b1ecf93f4975dee8acab0b3","src/gles/shaders/clear.vert":"8f636168e1da2cac48091c466a543c3b09fb4a0dd8c60c1c9bf34cc890766740","src/gles/shaders/present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"083500c0b36d079a82754895d06b993ea8ed4393690b226c85f07cbec373a730","src/lib.rs":"dbd24a5fa263412c16cf821e6ff51ebda07608776accbd8b0bfb792435740619","src/metal/adapter.rs":"78f4a9eff186ab919e7c8900c08cd0b3325e15bf1656d5dcdae30a96a9d76f87","src/metal/command.rs":"b06983d7e11cdde526b7c9f5f0b86f1ea8faef02b7666367cb231211a8301570","src/metal/conv.rs":"2349ec6331a7a471c06615be249dc22b808742aca222e6d8861662d848b0c094","src/metal/device.rs":"dd823c8e12ba3ed69ef7cdcb543e8d995d0056d1f838516b0901068c83d8ffe2","src/metal/mod.rs":"c4f3959732f5f506fa881aa5812205a6452d6a946d661d7f81d1c7785359a10c","src/metal/surface.rs":"82836cadc751d94fb016bd590cdfec5649cbfae2f44d14599ed074dfb0a004dc","src/vulkan/adapter.rs":"90c4f57483589a09d9840c3f93efb8da66bc9eb5be975899877aa0192f86e4bd","src/vulkan/command.rs":"60d1867acd0e46c34dabecea708cd776a1f435721b6673a506b5bb8aee87ff80","src/vulkan/conv.rs":"b480f9d1cde0df92d6f9a07e8a42b86aaeb251f9b0692038286f4994caf45fec","src/vulkan/device.rs":"9b264c74f581345be889f1ed61ad6f7ab22e12e04183eb954dbfed1681c32d0c","src/vulkan/instance.rs":"c078d529f6955a662a3adc7739ffdb8a01b83dbef8dd1e2c3810d232b82cbb18","src/vulkan/mod.rs":"1ba41f2ea7650dc0757c1444ef62c95a8aa0f6671d98c73b4ea80eb4ea60f289"},"package":null}

8
third_party/rust/wgpu-hal/Cargo.toml поставляемый
Просмотреть файл

@ -51,7 +51,7 @@ foreign-types = { version = "0.3", optional = true }
ash = { version = "0.37", optional = true }
gpu-alloc = { version = "0.5", optional = true }
gpu-descriptor = { version = "0.2", optional = true }
inplace_it = { version ="0.3.3", optional = true }
inplace_it = { version = "0.3.3", optional = true }
# backend: Gles
glow = { version = "0.11.1", optional = true }
@ -92,20 +92,20 @@ js-sys = { version = "0.3" }
[dependencies.naga]
git = "https://github.com/gfx-rs/naga"
rev = "1aa91549"
rev = "571302e"
#version = "0.8"
# DEV dependencies
[dev-dependencies.naga]
git = "https://github.com/gfx-rs/naga"
rev = "1aa91549"
rev = "571302e"
#version = "0.8"
features = ["wgsl-in"]
[dev-dependencies]
env_logger = "0.9"
winit = "0.26" # for "halmark" example
winit = "0.26" # for "halmark" example
[target.'cfg(not(target_arch = "wasm32"))'.dev-dependencies]
glutin = "0.28" # for "gles" example

176
third_party/rust/wgpu-hal/LICENSE.APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

21
third_party/rust/wgpu-hal/LICENSE.MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2021 The gfx-rs developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

Просмотреть файл

@ -685,7 +685,7 @@ impl<A: hal::Api> Example<A> {
let target_barrier1 = hal::TextureBarrier {
texture: surface_tex.borrow(),
range: wgt::ImageSubresourceRange::default(),
usage: hal::TextureUses::COLOR_TARGET..hal::TextureUses::empty(),
usage: hal::TextureUses::COLOR_TARGET..hal::TextureUses::PRESENT,
};
unsafe {
ctx.encoder.end_render_pass();

Просмотреть файл

@ -47,8 +47,9 @@ pub fn map_texture_format(format: wgt::TextureFormat) -> dxgiformat::DXGI_FORMAT
Tf::Rgba32Sint => DXGI_FORMAT_R32G32B32A32_SINT,
Tf::Rgba32Float => DXGI_FORMAT_R32G32B32A32_FLOAT,
Tf::Depth32Float => DXGI_FORMAT_D32_FLOAT,
Tf::Depth32FloatStencil8 => DXGI_FORMAT_D32_FLOAT_S8X24_UINT,
Tf::Depth24Plus => DXGI_FORMAT_D24_UNORM_S8_UINT,
Tf::Depth24PlusStencil8 => DXGI_FORMAT_D24_UNORM_S8_UINT,
Tf::Depth24PlusStencil8 | Tf::Depth24UnormStencil8 => DXGI_FORMAT_D24_UNORM_S8_UINT,
Tf::Rgb9e5Ufloat => DXGI_FORMAT_R9G9B9E5_SHAREDEXP,
Tf::Bc1RgbaUnorm => DXGI_FORMAT_BC1_UNORM,
Tf::Bc1RgbaUnormSrgb => DXGI_FORMAT_BC1_UNORM_SRGB,
@ -96,9 +97,12 @@ pub fn map_texture_format_nosrgb(format: wgt::TextureFormat) -> dxgiformat::DXGI
pub fn map_texture_format_nodepth(format: wgt::TextureFormat) -> dxgiformat::DXGI_FORMAT {
match format {
wgt::TextureFormat::Depth32Float => dxgiformat::DXGI_FORMAT_R32_FLOAT,
wgt::TextureFormat::Depth24Plus | wgt::TextureFormat::Depth24PlusStencil8 => {
dxgiformat::DXGI_FORMAT_R24_UNORM_X8_TYPELESS
wgt::TextureFormat::Depth32FloatStencil8 => {
dxgiformat::DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS
}
wgt::TextureFormat::Depth24Plus
| wgt::TextureFormat::Depth24PlusStencil8
| wgt::TextureFormat::Depth24UnormStencil8 => dxgiformat::DXGI_FORMAT_R24_UNORM_X8_TYPELESS,
_ => {
assert_eq!(
crate::FormatAspects::from(format),
@ -112,9 +116,10 @@ pub fn map_texture_format_nodepth(format: wgt::TextureFormat) -> dxgiformat::DXG
pub fn map_texture_format_depth_typeless(format: wgt::TextureFormat) -> dxgiformat::DXGI_FORMAT {
match format {
wgt::TextureFormat::Depth32Float => dxgiformat::DXGI_FORMAT_R32_TYPELESS,
wgt::TextureFormat::Depth24Plus | wgt::TextureFormat::Depth24PlusStencil8 => {
dxgiformat::DXGI_FORMAT_R24G8_TYPELESS
}
wgt::TextureFormat::Depth32FloatStencil8 => dxgiformat::DXGI_FORMAT_R32G8X24_TYPELESS,
wgt::TextureFormat::Depth24Plus
| wgt::TextureFormat::Depth24PlusStencil8
| wgt::TextureFormat::Depth24UnormStencil8 => dxgiformat::DXGI_FORMAT_R24G8_TYPELESS,
_ => unreachable!(),
}
}

Просмотреть файл

@ -185,6 +185,8 @@ impl super::Adapter {
let mut features = wgt::Features::empty()
| wgt::Features::DEPTH_CLIP_CONTROL
| wgt::Features::DEPTH24UNORM_STENCIL8
| wgt::Features::DEPTH32FLOAT_STENCIL8
| wgt::Features::INDIRECT_FIRST_INSTANCE
| wgt::Features::MAPPABLE_PRIMARY_BUFFERS
| wgt::Features::MULTI_DRAW_INDIRECT

Просмотреть файл

@ -287,7 +287,7 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
StateAfter: s1,
};
self.temp.barriers.push(raw);
} else if barrier.usage.start == crate::BufferUses::STORAGE_WRITE {
} else if barrier.usage.start == crate::BufferUses::STORAGE_READ_WRITE {
let mut raw = d3d12::D3D12_RESOURCE_BARRIER {
Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV,
Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE,
@ -382,7 +382,7 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
}
}
}
} else if barrier.usage.start == crate::TextureUses::STORAGE_WRITE {
} else if barrier.usage.start == crate::TextureUses::STORAGE_READ_WRITE {
let mut raw = d3d12::D3D12_RESOURCE_BARRIER {
Type: d3d12::D3D12_RESOURCE_BARRIER_TYPE_UAV,
Flags: d3d12::D3D12_RESOURCE_BARRIER_FLAG_NONE,

8
third_party/rust/wgpu-hal/src/dx12/conv.rs поставляемый
Просмотреть файл

@ -3,7 +3,7 @@ use winapi::um::{d3d12, d3dcommon};
pub fn map_buffer_usage_to_resource_flags(usage: crate::BufferUses) -> d3d12::D3D12_RESOURCE_FLAGS {
let mut flags = 0;
if usage.contains(crate::BufferUses::STORAGE_WRITE) {
if usage.contains(crate::BufferUses::STORAGE_READ_WRITE) {
flags |= d3d12::D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
}
flags
@ -33,7 +33,7 @@ pub fn map_texture_usage_to_resource_flags(
flags |= d3d12::D3D12_RESOURCE_FLAG_DENY_SHADER_RESOURCE;
}
}
if usage.contains(crate::TextureUses::STORAGE_WRITE) {
if usage.contains(crate::TextureUses::STORAGE_READ_WRITE) {
flags |= d3d12::D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS;
}
@ -130,7 +130,7 @@ pub fn map_buffer_usage_to_state(usage: crate::BufferUses) -> d3d12::D3D12_RESOU
if usage.intersects(Bu::VERTEX | Bu::UNIFORM) {
state |= d3d12::D3D12_RESOURCE_STATE_VERTEX_AND_CONSTANT_BUFFER;
}
if usage.intersects(Bu::STORAGE_WRITE) {
if usage.intersects(Bu::STORAGE_READ_WRITE) {
state |= d3d12::D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
} else if usage.intersects(Bu::STORAGE_READ) {
state |= d3d12::D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE
@ -170,7 +170,7 @@ pub fn map_texture_usage_to_state(usage: crate::TextureUses) -> d3d12::D3D12_RES
if usage.intersects(Tu::DEPTH_STENCIL_WRITE) {
state |= d3d12::D3D12_RESOURCE_STATE_DEPTH_WRITE;
}
if usage.intersects(Tu::STORAGE_READ | Tu::STORAGE_WRITE) {
if usage.intersects(Tu::STORAGE_READ | Tu::STORAGE_READ_WRITE) {
state |= d3d12::D3D12_RESOURCE_STATE_UNORDERED_ACCESS;
}
state

Просмотреть файл

@ -428,7 +428,7 @@ impl crate::Device<super::Api> for super::Device {
|| !desc.usage.intersects(
crate::TextureUses::RESOURCE
| crate::TextureUses::STORAGE_READ
| crate::TextureUses::STORAGE_WRITE,
| crate::TextureUses::STORAGE_READ_WRITE,
) {
auxil::dxgi::conv::map_texture_format(desc.format)
} else {
@ -516,10 +516,9 @@ impl crate::Device<super::Api> for super::Device {
} else {
None
},
handle_uav: if desc
.usage
.intersects(crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_WRITE)
{
handle_uav: if desc.usage.intersects(
crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE,
) {
let raw_desc = view_desc.to_uav();
let handle = self.srv_uav_pool.lock().alloc_handle();
self.raw.CreateUnorderedAccessView(

Просмотреть файл

@ -659,7 +659,11 @@ impl crate::Adapter<super::Api> for super::Adapter {
Tf::Rgba32Uint => renderable | storage,
Tf::Rgba32Sint => renderable | storage,
Tf::Rgba32Float => unfilterable | storage,
Tf::Depth32Float | Tf::Depth24Plus | Tf::Depth24PlusStencil8 => depth,
Tf::Depth32Float
| Tf::Depth32FloatStencil8
| Tf::Depth24Plus
| Tf::Depth24PlusStencil8
| Tf::Depth24UnormStencil8 => depth,
Tf::Rgb9e5Ufloat
| Tf::Bc1RgbaUnorm
| Tf::Bc1RgbaUnormSrgb

25
third_party/rust/wgpu-hal/src/gles/command.rs поставляемый
Просмотреть файл

@ -230,7 +230,11 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
}
for bar in barriers {
// GLES only synchronizes storage -> anything explicitly
if !bar.usage.start.contains(crate::BufferUses::STORAGE_WRITE) {
if !bar
.usage
.start
.contains(crate::BufferUses::STORAGE_READ_WRITE)
{
continue;
}
self.cmd_buffer
@ -253,7 +257,11 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
let mut combined_usage = crate::TextureUses::empty();
for bar in barriers {
// GLES only synchronizes storage -> anything explicitly
if !bar.usage.start.contains(crate::TextureUses::STORAGE_WRITE) {
if !bar
.usage
.start
.contains(crate::TextureUses::STORAGE_READ_WRITE)
{
continue;
}
// unlike buffers, there is no need for a concrete texture
@ -520,12 +528,19 @@ impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
}
}
if let Some(ref dsat) = desc.depth_stencil_attachment {
if !dsat.depth_ops.contains(crate::AttachmentOps::LOAD) {
let clear_depth = !dsat.depth_ops.contains(crate::AttachmentOps::LOAD);
let clear_stencil = !dsat.stencil_ops.contains(crate::AttachmentOps::LOAD);
if clear_depth && clear_stencil {
self.cmd_buffer.commands.push(C::ClearDepthAndStencil(
dsat.clear_value.0,
dsat.clear_value.1,
));
} else if clear_depth {
self.cmd_buffer
.commands
.push(C::ClearDepth(dsat.clear_value.0));
}
if !dsat.stencil_ops.contains(crate::AttachmentOps::LOAD) {
} else if clear_stencil {
self.cmd_buffer
.commands
.push(C::ClearStencil(dsat.clear_value.1));

5
third_party/rust/wgpu-hal/src/gles/conv.rs поставляемый
Просмотреть файл

@ -57,12 +57,15 @@ impl super::AdapterShared {
Tf::Rgba32Sint => (glow::RGBA32I, glow::RGBA_INTEGER, glow::INT),
Tf::Rgba32Float => (glow::RGBA32F, glow::RGBA, glow::FLOAT),
Tf::Depth32Float => (glow::DEPTH_COMPONENT32F, glow::DEPTH_COMPONENT, glow::FLOAT),
Tf::Depth32FloatStencil8 => {
(glow::DEPTH32F_STENCIL8, glow::DEPTH_COMPONENT, glow::FLOAT)
}
Tf::Depth24Plus => (
glow::DEPTH_COMPONENT24,
glow::DEPTH_COMPONENT,
glow::UNSIGNED_NORMALIZED,
),
Tf::Depth24PlusStencil8 => (
Tf::Depth24PlusStencil8 | Tf::Depth24UnormStencil8 => (
glow::DEPTH24_STENCIL8,
glow::DEPTH_COMPONENT,
glow::UNSIGNED_INT,

18
third_party/rust/wgpu-hal/src/gles/egl.rs поставляемый
Просмотреть файл

@ -580,6 +580,24 @@ pub struct Instance {
inner: Mutex<Inner>,
}
impl Instance {
pub fn raw_display(&self) -> egl::Display {
self.inner
.try_lock()
.expect("Could not lock instance. This is most-likely a deadlock.")
.egl
.display
}
/// Returns the version of the EGL display.
pub fn egl_version(&self) -> (i32, i32) {
self.inner
.try_lock()
.expect("Could not lock instance. This is most-likely a deadlock.")
.version
}
}
unsafe impl Send for Instance {}
unsafe impl Sync for Instance {}

5
third_party/rust/wgpu-hal/src/gles/mod.rs поставляемый
Просмотреть файл

@ -667,6 +667,11 @@ enum Command {
ClearColorI(u32, [i32; 4]),
ClearDepth(f32),
ClearStencil(u32),
// Clearing both the depth and stencil buffer individually appears to
// result in the stencil buffer failing to clear, atleast in WebGL.
// It is also more efficient to emit a single command instead of two for
// this.
ClearDepthAndStencil(f32, u32),
BufferBarrier(glow::Buffer, crate::BufferUses),
TextureBarrier(crate::TextureUses),
SetViewport {

11
third_party/rust/wgpu-hal/src/gles/queue.rs поставляемый
Просмотреть файл

@ -758,6 +758,9 @@ impl super::Queue {
C::ClearStencil(value) => {
gl.clear_buffer_i32_slice(glow::STENCIL, 0, &[value as i32]);
}
C::ClearDepthAndStencil(depth, stencil_value) => {
gl.clear_buffer_depth_stencil(glow::DEPTH_STENCIL, 0, depth, stencil_value as i32);
}
C::BufferBarrier(raw, usage) => {
let mut flags = 0;
if usage.contains(crate::BufferUses::VERTEX) {
@ -787,9 +790,9 @@ impl super::Queue {
if usage.intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE) {
flags |= glow::BUFFER_UPDATE_BARRIER_BIT;
}
if usage
.intersects(crate::BufferUses::STORAGE_READ | crate::BufferUses::STORAGE_WRITE)
{
if usage.intersects(
crate::BufferUses::STORAGE_READ | crate::BufferUses::STORAGE_READ_WRITE,
) {
flags |= glow::SHADER_STORAGE_BARRIER_BIT;
}
gl.memory_barrier(flags);
@ -800,7 +803,7 @@ impl super::Queue {
flags |= glow::TEXTURE_FETCH_BARRIER_BIT;
}
if usage.intersects(
crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_WRITE,
crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE,
) {
flags |= glow::SHADER_IMAGE_ACCESS_BARRIER_BIT;
}

78
third_party/rust/wgpu-hal/src/lib.rs поставляемый
Просмотреть файл

@ -602,7 +602,9 @@ impl From<wgt::TextureFormat> for FormatAspects {
fn from(format: wgt::TextureFormat) -> Self {
match format {
wgt::TextureFormat::Depth32Float | wgt::TextureFormat::Depth24Plus => Self::DEPTH,
wgt::TextureFormat::Depth24PlusStencil8 => Self::DEPTH | Self::STENCIL,
wgt::TextureFormat::Depth32FloatStencil8
| wgt::TextureFormat::Depth24PlusStencil8
| wgt::TextureFormat::Depth24UnormStencil8 => Self::DEPTH | Self::STENCIL,
_ => Self::COLOR,
}
}
@ -626,53 +628,77 @@ bitflags!(
bitflags::bitflags! {
/// Similar to `wgt::BufferUsages` but for internal use.
pub struct BufferUses: u32 {
pub struct BufferUses: u16 {
/// The argument to a read-only mapping.
const MAP_READ = 1 << 0;
/// The argument to a write-only mapping.
const MAP_WRITE = 1 << 1;
/// The source of a hardware copy.
const COPY_SRC = 1 << 2;
/// The destination of a hardware copy.
const COPY_DST = 1 << 3;
/// The index buffer used for drawing.
const INDEX = 1 << 4;
/// A vertex buffer used for drawing.
const VERTEX = 1 << 5;
/// A uniform buffer bound in a bind group.
const UNIFORM = 1 << 6;
/// A read-only storage buffer used in a bind group.
const STORAGE_READ = 1 << 7;
const STORAGE_WRITE = 1 << 8;
/// A read-write or write-only buffer used in a bind group.
const STORAGE_READ_WRITE = 1 << 8;
/// The indirect or count buffer in a indirect draw or dispatch.
const INDIRECT = 1 << 9;
/// The combination of usages that can be used together (read-only).
/// The combination of states that a buffer may be in _at the same time_.
const INCLUSIVE = Self::MAP_READ.bits | Self::COPY_SRC.bits |
Self::INDEX.bits | Self::VERTEX.bits | Self::UNIFORM.bits |
Self::STORAGE_READ.bits | Self::INDIRECT.bits;
/// The combination of exclusive usages (write-only and read-write).
/// These usages may still show up with others, but can't automatically be combined.
const EXCLUSIVE = Self::MAP_WRITE.bits | Self::COPY_DST.bits | Self::STORAGE_WRITE.bits;
/// The combination of states that a buffer must exclusively be in.
const EXCLUSIVE = Self::MAP_WRITE.bits | Self::COPY_DST.bits | Self::STORAGE_READ_WRITE.bits;
/// The combination of all usages that the are guaranteed to be be ordered by the hardware.
/// If a usage is not ordered, then even if it doesn't change between draw calls, there
/// still need to be pipeline barriers inserted for synchronization.
/// If a usage is ordered, then if the buffer state doesn't change between draw calls, there
/// are no barriers needed for synchronization.
const ORDERED = Self::INCLUSIVE.bits | Self::MAP_WRITE.bits;
}
}
bitflags::bitflags! {
/// Similar to `wgt::TextureUsages` but for internal use.
pub struct TextureUses: u32 {
const COPY_SRC = 1 << 0;
const COPY_DST = 1 << 1;
const RESOURCE = 1 << 2;
const COLOR_TARGET = 1 << 3;
const DEPTH_STENCIL_READ = 1 << 4;
const DEPTH_STENCIL_WRITE = 1 << 5;
const STORAGE_READ = 1 << 6;
const STORAGE_WRITE = 1 << 7;
/// The combination of usages that can be used together (read-only).
pub struct TextureUses: u16 {
/// The texture is in unknown state.
const UNINITIALIZED = 1 << 0;
/// Ready to present image to the surface.
const PRESENT = 1 << 1;
/// The source of a hardware copy.
const COPY_SRC = 1 << 2;
/// The destination of a hardware copy.
const COPY_DST = 1 << 3;
/// Read-only sampled or fetched resource.
const RESOURCE = 1 << 4;
/// The color target of a renderpass.
const COLOR_TARGET = 1 << 5;
/// Read-only depth stencil usage.
const DEPTH_STENCIL_READ = 1 << 6;
/// Read-write depth stencil usage
const DEPTH_STENCIL_WRITE = 1 << 7;
/// Read-only storage buffer usage. Corresponds to a UAV in d3d, so is exclusive, despite being read only.
const STORAGE_READ = 1 << 8;
/// Read-write or write-only storage buffer usage.
const STORAGE_READ_WRITE = 1 << 9;
/// The combination of states that a texture may be in _at the same time_.
const INCLUSIVE = Self::COPY_SRC.bits | Self::RESOURCE.bits | Self::DEPTH_STENCIL_READ.bits;
/// The combination of exclusive usages (write-only and read-write).
/// These usages may still show up with others, but can't automatically be combined.
const EXCLUSIVE = Self::COPY_DST.bits | Self::COLOR_TARGET.bits | Self::DEPTH_STENCIL_WRITE.bits | Self::STORAGE_READ.bits | Self::STORAGE_WRITE.bits;
/// The combination of states that a texture must exclusively be in.
const EXCLUSIVE = Self::COPY_DST.bits | Self::COLOR_TARGET.bits | Self::DEPTH_STENCIL_WRITE.bits | Self::STORAGE_READ.bits | Self::STORAGE_READ_WRITE.bits | Self::PRESENT.bits;
/// The combination of all usages that the are guaranteed to be be ordered by the hardware.
/// If a usage is not ordered, then even if it doesn't change between draw calls, there
/// still need to be pipeline barriers inserted for synchronization.
/// If a usage is ordered, then if the texture state doesn't change between draw calls, there
/// are no barriers needed for synchronization.
const ORDERED = Self::INCLUSIVE.bits | Self::COLOR_TARGET.bits | Self::DEPTH_STENCIL_WRITE.bits | Self::STORAGE_READ.bits;
//TODO: remove this
const UNINITIALIZED = 0xFFFF;
/// Flag used by the wgpu-core texture tracker to say a texture is in different states for every sub-resource
const COMPLEX = 1 << 10;
/// Flag used by the wgpu-core texture tracker to say that the tracker does not know the state of the sub-resource.
/// This is different from UNINITIALIZED as that says the tracker does know, but the texture has not been initialized.
const UNKNOWN = 1 << 11;
}
}

Просмотреть файл

@ -180,21 +180,31 @@ impl crate::Adapter<super::Api> for super::Adapter {
};
flags
}
Tf::Depth32Float => {
let mut flats =
Tf::Depth32Float | Tf::Depth32FloatStencil8 => {
let mut flags =
Tfc::DEPTH_STENCIL_ATTACHMENT | Tfc::MULTISAMPLE | msaa_resolve_apple3x_if;
if pc.format_depth32float_filter {
flats |= Tfc::SAMPLED_LINEAR
flags |= Tfc::SAMPLED_LINEAR
}
flats
flags
}
Tf::Depth24Plus => Tfc::empty(),
Tf::Depth24PlusStencil8 => {
if pc.msaa_desktop {
Tfc::DEPTH_STENCIL_ATTACHMENT | Tfc::SAMPLED_LINEAR | Tfc::MULTISAMPLE
Tf::Depth24Plus | Tf::Depth24PlusStencil8 => {
let mut flags = Tfc::DEPTH_STENCIL_ATTACHMENT | Tfc::MULTISAMPLE;
if pc.format_depth24_stencil8 {
flags |= Tfc::SAMPLED_LINEAR | Tfc::MULTISAMPLE_RESOLVE
} else {
Tfc::empty()
flags |= msaa_resolve_apple3x_if;
if pc.format_depth32float_filter {
flags |= Tfc::SAMPLED_LINEAR
}
}
flags
}
Tf::Depth24UnormStencil8 => {
Tfc::DEPTH_STENCIL_ATTACHMENT
| Tfc::SAMPLED_LINEAR
| Tfc::MULTISAMPLE
| Tfc::MULTISAMPLE_RESOLVE
}
Tf::Rgb9e5Ufloat => {
if pc.msaa_apple3 {
@ -749,7 +759,8 @@ impl super::PrivateCapabilities {
| F::POLYGON_MODE_LINE
| F::CLEAR_TEXTURE
| F::TEXTURE_FORMAT_16BIT_NORM
| F::SHADER_FLOAT16;
| F::SHADER_FLOAT16
| F::DEPTH32FLOAT_STENCIL8;
features.set(F::TEXTURE_COMPRESSION_ASTC_LDR, self.format_astc);
features.set(F::TEXTURE_COMPRESSION_ASTC_HDR, self.format_astc_hdr);
@ -757,6 +768,7 @@ impl super::PrivateCapabilities {
features.set(F::TEXTURE_COMPRESSION_ETC2, self.format_eac_etc);
features.set(F::DEPTH_CLIP_CONTROL, self.supports_depth_clip_control);
features.set(F::DEPTH24UNORM_STENCIL8, self.format_depth24_stencil8);
features.set(
F::TEXTURE_BINDING_ARRAY
@ -889,6 +901,7 @@ impl super::PrivateCapabilities {
Tf::Rgba32Sint => RGBA32Sint,
Tf::Rgba32Float => RGBA32Float,
Tf::Depth32Float => Depth32Float,
Tf::Depth32FloatStencil8 => Depth32Float_Stencil8,
Tf::Depth24Plus => {
if self.format_depth24_stencil8 {
Depth24Unorm_Stencil8
@ -903,6 +916,7 @@ impl super::PrivateCapabilities {
Depth32Float_Stencil8
}
}
Tf::Depth24UnormStencil8 => Depth24Unorm_Stencil8,
Tf::Rgb9e5Ufloat => RGB9E5Float,
Tf::Bc1RgbaUnorm => BC1_RGBA,
Tf::Bc1RgbaUnormSrgb => BC1_RGBA_sRGB,

6
third_party/rust/wgpu-hal/src/metal/conv.rs поставляемый
Просмотреть файл

@ -9,11 +9,13 @@ pub fn map_texture_usage(usage: crate::TextureUses) -> mtl::MTLTextureUsage {
);
mtl_usage.set(
mtl::MTLTextureUsage::ShaderRead,
usage.intersects(Tu::RESOURCE | Tu::DEPTH_STENCIL_READ | Tu::STORAGE_READ),
usage.intersects(
Tu::RESOURCE | Tu::DEPTH_STENCIL_READ | Tu::STORAGE_READ | Tu::STORAGE_READ_WRITE,
),
);
mtl_usage.set(
mtl::MTLTextureUsage::ShaderWrite,
usage.intersects(Tu::STORAGE_WRITE),
usage.intersects(Tu::STORAGE_READ_WRITE),
);
mtl_usage

Просмотреть файл

@ -561,6 +561,24 @@ impl PhysicalDeviceFeatures {
);
}
features.set(
F::DEPTH32FLOAT_STENCIL8,
caps.supports_format(
vk::Format::D32_SFLOAT_S8_UINT,
vk::ImageTiling::OPTIMAL,
vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT,
),
);
features.set(
F::DEPTH24UNORM_STENCIL8,
caps.supports_format(
vk::Format::D24_UNORM_S8_UINT,
vk::ImageTiling::OPTIMAL,
vk::FormatFeatureFlags::DEPTH_STENCIL_ATTACHMENT,
),
);
(features, dl_flags)
}
@ -1102,6 +1120,10 @@ impl super::Instance {
}
impl super::Adapter {
pub fn raw_physical_device(&self) -> ash::vk::PhysicalDevice {
self.raw
}
pub fn required_device_extensions(&self, features: wgt::Features) -> Vec<&'static CStr> {
let (supported_extensions, unsupported_extensions) = self
.phd_capabilities

20
third_party/rust/wgpu-hal/src/vulkan/conv.rs поставляемый
Просмотреть файл

@ -49,6 +49,7 @@ impl super::PrivateCapabilities {
Tf::Rgba32Sint => F::R32G32B32A32_SINT,
Tf::Rgba32Float => F::R32G32B32A32_SFLOAT,
Tf::Depth32Float => F::D32_SFLOAT,
Tf::Depth32FloatStencil8 => F::D32_SFLOAT_S8_UINT,
Tf::Depth24Plus => {
if self.texture_d24 {
F::X8_D24_UNORM_PACK32
@ -63,6 +64,7 @@ impl super::PrivateCapabilities {
F::D32_SFLOAT_S8_UINT
}
}
Tf::Depth24UnormStencil8 => F::D24_UNORM_S8_UINT,
Tf::Rgb9e5Ufloat => F::E5B9G9R9_UFLOAT_PACK32,
Tf::Bc1RgbaUnorm => F::BC1_RGBA_UNORM_BLOCK,
Tf::Bc1RgbaUnormSrgb => F::BC1_RGBA_SRGB_BLOCK,
@ -199,7 +201,7 @@ pub fn derive_image_layout(
vk::ImageLayout::DEPTH_STENCIL_ATTACHMENT_OPTIMAL
}
_ => {
if usage.is_empty() {
if usage == crate::TextureUses::PRESENT {
vk::ImageLayout::PRESENT_SRC_KHR
} else if is_color {
vk::ImageLayout::GENERAL
@ -229,7 +231,7 @@ pub fn map_texture_usage(usage: crate::TextureUses) -> vk::ImageUsageFlags {
) {
flags |= vk::ImageUsageFlags::DEPTH_STENCIL_ATTACHMENT;
}
if usage.intersects(crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_WRITE) {
if usage.intersects(crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE) {
flags |= vk::ImageUsageFlags::STORAGE;
}
flags
@ -275,12 +277,12 @@ pub fn map_texture_usage_to_barrier(
stages |= shader_stages;
access |= vk::AccessFlags::SHADER_READ;
}
if usage.contains(crate::TextureUses::STORAGE_WRITE) {
if usage.contains(crate::TextureUses::STORAGE_READ_WRITE) {
stages |= shader_stages;
access |= vk::AccessFlags::SHADER_WRITE;
access |= vk::AccessFlags::SHADER_READ | vk::AccessFlags::SHADER_WRITE;
}
if usage == crate::TextureUses::UNINITIALIZED || usage.is_empty() {
if usage == crate::TextureUses::UNINITIALIZED || usage == crate::TextureUses::PRESENT {
(
vk::PipelineStageFlags::TOP_OF_PIPE,
vk::AccessFlags::empty(),
@ -308,7 +310,7 @@ pub fn map_vk_image_usage(usage: vk::ImageUsageFlags) -> crate::TextureUses {
bits |= crate::TextureUses::DEPTH_STENCIL_READ | crate::TextureUses::DEPTH_STENCIL_WRITE;
}
if usage.contains(vk::ImageUsageFlags::STORAGE) {
bits |= crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_WRITE;
bits |= crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE;
}
bits
}
@ -456,7 +458,7 @@ pub fn map_buffer_usage(usage: crate::BufferUses) -> vk::BufferUsageFlags {
if usage.contains(crate::BufferUses::UNIFORM) {
flags |= vk::BufferUsageFlags::UNIFORM_BUFFER;
}
if usage.intersects(crate::BufferUses::STORAGE_READ | crate::BufferUses::STORAGE_WRITE) {
if usage.intersects(crate::BufferUses::STORAGE_READ | crate::BufferUses::STORAGE_READ_WRITE) {
flags |= vk::BufferUsageFlags::STORAGE_BUFFER;
}
if usage.contains(crate::BufferUses::INDEX) {
@ -504,9 +506,9 @@ pub fn map_buffer_usage_to_barrier(
stages |= shader_stages;
access |= vk::AccessFlags::SHADER_READ;
}
if usage.intersects(crate::BufferUses::STORAGE_WRITE) {
if usage.intersects(crate::BufferUses::STORAGE_READ_WRITE) {
stages |= shader_stages;
access |= vk::AccessFlags::SHADER_WRITE;
access |= vk::AccessFlags::SHADER_READ | vk::AccessFlags::SHADER_WRITE;
}
if usage.contains(crate::BufferUses::INDEX) {
stages |= vk::PipelineStageFlags::VERTEX_INPUT;

Просмотреть файл

@ -1413,7 +1413,7 @@ impl crate::Device<super::Api> for super::Device {
unsafe fn destroy_shader_module(&self, module: super::ShaderModule) {
match module {
super::ShaderModule::Raw(raw) => {
let _ = self.shared.raw.destroy_shader_module(raw, None);
self.shared.raw.destroy_shader_module(raw, None);
}
super::ShaderModule::Intermediate { .. } => {}
}

Просмотреть файл

@ -133,6 +133,22 @@ impl super::Swapchain {
}
impl super::Instance {
pub fn entry(&self) -> &ash::Entry {
&self.shared.entry
}
pub fn raw_instance(&self) -> &ash::Instance {
&self.shared.raw
}
pub fn driver_api_version(&self) -> u32 {
self.shared.driver_api_version
}
pub fn extensions(&self) -> &[&'static CStr] {
&self.extensions[..]
}
pub fn required_extensions(
entry: &ash::Entry,
flags: crate::InstanceFlags,
@ -266,6 +282,7 @@ impl super::Instance {
get_physical_device_properties,
entry,
has_nv_optimus,
driver_api_version,
}),
extensions,
})

1
third_party/rust/wgpu-hal/src/vulkan/mod.rs поставляемый
Просмотреть файл

@ -87,6 +87,7 @@ struct InstanceShared {
get_physical_device_properties: Option<khr::GetPhysicalDeviceProperties2>,
entry: ash::Entry,
has_nv_optimus: bool,
driver_api_version: u32,
}
pub struct Instance {

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"742abe387ad0a6b054a3e525040ff1e64f16072ae7cd305db0616beaa844e389","src/lib.rs":"f1262748d81d80812fadec75241a6128842db02dced79c5e6db54586f9b35002"},"package":null}
{"files":{"Cargo.toml":"742abe387ad0a6b054a3e525040ff1e64f16072ae7cd305db0616beaa844e389","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/lib.rs":"bfac3d39cff5da6d59889dc72206ddae4d553a8aaae20c890a204d3bdc7cdf6a"},"package":null}

176
third_party/rust/wgpu-types/LICENSE.APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,176 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше