Bug 1878375 - Synchronize vendored Rust libraries with mozilla-central. r=aleca

mozilla-central: 97bce31758a53936d3a729ae713d0aead3a53137
comm-central: f81c4a927046253f16616cf0c54ad3501f0bfa51

Differential Revision: https://phabricator.services.mozilla.com/D213856

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Thunderbird Updatebot 2024-06-15 00:23:02 +00:00
Родитель 48d320624f
Коммит 0b3aa3ffaf
46 изменённых файлов: 1011 добавлений и 414 удалений

Просмотреть файл

@ -21,9 +21,9 @@ git = "https://github.com/franziskuskiefer/cose-rust"
rev = "43c22248d136c8b38fe42ea709d08da6355cf04b"
replace-with = "vendored-sources"
[source."git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587"]
[source."git+https://github.com/gfx-rs/wgpu?rev=6c370522a72e89c8784ed64c9eb574f1f54d5bd4"]
git = "https://github.com/gfx-rs/wgpu"
rev = "c7458638d14921c7562e4197ddeefa17be413587"
rev = "6c370522a72e89c8784ed64c9eb574f1f54d5bd4"
replace-with = "vendored-sources"
[source."git+https://github.com/glandium/mio?rev=9a2ef335c366044ffe73b1c4acabe50a1daefe05"]

10
rust/Cargo.lock сгенерированный
Просмотреть файл

@ -1055,7 +1055,7 @@ dependencies = [
[[package]]
name = "d3d12"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
source = "git+https://github.com/gfx-rs/wgpu?rev=6c370522a72e89c8784ed64c9eb574f1f54d5bd4#6c370522a72e89c8784ed64c9eb574f1f54d5bd4"
dependencies = [
"bitflags 2.5.0",
"libloading",
@ -3375,7 +3375,7 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
source = "git+https://github.com/gfx-rs/wgpu?rev=6c370522a72e89c8784ed64c9eb574f1f54d5bd4#6c370522a72e89c8784ed64c9eb574f1f54d5bd4"
dependencies = [
"arrayvec",
"bit-set",
@ -5671,7 +5671,7 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
source = "git+https://github.com/gfx-rs/wgpu?rev=6c370522a72e89c8784ed64c9eb574f1f54d5bd4#6c370522a72e89c8784ed64c9eb574f1f54d5bd4"
dependencies = [
"arrayvec",
"bit-vec",
@ -5696,7 +5696,7 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
source = "git+https://github.com/gfx-rs/wgpu?rev=6c370522a72e89c8784ed64c9eb574f1f54d5bd4#6c370522a72e89c8784ed64c9eb574f1f54d5bd4"
dependencies = [
"android_system_properties",
"arrayvec",
@ -5735,7 +5735,7 @@ dependencies = [
[[package]]
name = "wgpu-types"
version = "0.20.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=c7458638d14921c7562e4197ddeefa17be413587#c7458638d14921c7562e4197ddeefa17be413587"
source = "git+https://github.com/gfx-rs/wgpu?rev=6c370522a72e89c8784ed64c9eb574f1f54d5bd4#6c370522a72e89c8784ed64c9eb574f1f54d5bd4"
dependencies = [
"bitflags 2.5.0",
"js-sys",

Просмотреть файл

@ -1 +1 @@
{"mc_workspace_toml": "50c105726dd9f7aa5c90114fb6f907234844ce7750dec4e18c3b245b33ae6ba8a44936e692a2ac11271a5cfc1ae8f1f1235482ddb4e72693a9513479d611bc4d", "mc_gkrust_toml": "ec12ac730b7f709bd839c1fc9c65f36ae6f82b481902e72769b3d32cfc0a66a6cbf3246a9ab933eca3b3ca06f4f27fe9e88f4706142732460399344681da9e9e", "mc_cargo_lock": "ffcb84f8c1669cd9fcaf9dc65a259351a7c11611a21dd7eae5869a14f7b9dcd65743cab6a3b9728273864cef0259822ef29bc4b79e6841f3418f97bcae67f522"}
{"mc_workspace_toml": "50c105726dd9f7aa5c90114fb6f907234844ce7750dec4e18c3b245b33ae6ba8a44936e692a2ac11271a5cfc1ae8f1f1235482ddb4e72693a9513479d611bc4d", "mc_gkrust_toml": "ec12ac730b7f709bd839c1fc9c65f36ae6f82b481902e72769b3d32cfc0a66a6cbf3246a9ab933eca3b3ca06f4f27fe9e88f4706142732460399344681da9e9e", "mc_cargo_lock": "45535237803d3acb53d1b0a6b5fd0552e60e4da075c493d04df7be742fe9ba1c070a25af8a9fa444c44cec4f6af888749932c6e535a4c69afea15c71afc42b60"}

2
third_party/rust/naga/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

2
third_party/rust/naga/Cargo.toml поставляемый
Просмотреть файл

@ -74,7 +74,7 @@ version = "0.2.1"
optional = true
[dependencies.serde]
version = "1.0.202"
version = "1.0.203"
features = ["derive"]
optional = true

2
third_party/rust/naga/src/back/dot/mod.rs поставляемый
Просмотреть файл

@ -244,7 +244,9 @@ impl StatementGraph {
value,
result,
} => {
if let Some(result) = result {
self.emits.push((id, result));
}
self.dependencies.push((id, pointer, "pointer"));
self.dependencies.push((id, value, "value"));
if let crate::AtomicFunction::Exchange { compare: Some(cmp) } = *fun {

2
third_party/rust/naga/src/back/glsl/mod.rs поставляемый
Просмотреть файл

@ -2368,11 +2368,13 @@ impl<'a, W: Write> Writer<'a, W> {
result,
} => {
write!(self.out, "{level}")?;
if let Some(result) = result {
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_ty = ctx.resolve_type(result, &self.module.types);
self.write_value_type(res_ty)?;
write!(self.out, " {res_name} = ")?;
self.named_expressions.insert(result, res_name);
}
let fun_str = fun.to_glsl();
write!(self.out, "atomic{fun_str}(")?;

26
third_party/rust/naga/src/back/hlsl/writer.rs поставляемый
Просмотреть файл

@ -1919,13 +1919,22 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
result,
} => {
write!(self.out, "{level}")?;
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
let res_name = match result {
None => None,
Some(result) => {
let name = format!("{}{}", back::BAKE_PREFIX, result.index());
match func_ctx.info[result].ty {
proc::TypeResolution::Handle(handle) => self.write_type(module, handle)?,
proc::TypeResolution::Handle(handle) => {
self.write_type(module, handle)?
}
proc::TypeResolution::Value(ref value) => {
self.write_value_type(module, value)?
}
};
write!(self.out, " {name}; ")?;
Some((result, name))
}
};
// Validation ensures that `pointer` has a `Pointer` type.
let pointer_space = func_ctx
@ -1934,7 +1943,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
.unwrap();
let fun_str = fun.to_hlsl_suffix();
write!(self.out, " {res_name}; ")?;
match pointer_space {
crate::AddressSpace::WorkGroup => {
write!(self.out, "Interlocked{fun_str}(")?;
@ -1970,8 +1978,16 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
_ => {}
}
self.write_expr(module, value, func_ctx)?;
writeln!(self.out, ", {res_name});")?;
self.named_expressions.insert(result, res_name);
// The `original_value` out parameter is optional for all the
// `Interlocked` functions we generate other than
// `InterlockedExchange`.
if let Some((result, name)) = res_name {
write!(self.out, ", {name}")?;
self.named_expressions.insert(result, name);
}
writeln!(self.out, ");")?;
}
Statement::WorkGroupUniformLoad { pointer, result } => {
self.write_barrier(crate::Barrier::WORK_GROUP, level)?;

3
third_party/rust/naga/src/back/mod.rs поставляемый
Просмотреть файл

@ -271,10 +271,11 @@ bitflags::bitflags! {
///
/// Note that these exactly correspond to the SPIR-V "Ray Flags" mask, and
/// the SPIR-V backend passes them directly through to the
/// `OpRayQueryInitializeKHR` instruction. (We have to choose something, so
/// [`OpRayQueryInitializeKHR`][op] instruction. (We have to choose something, so
/// we might as well make one back end's life easier.)
///
/// [`RayDesc`]: crate::Module::generate_ray_desc_type
/// [op]: https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpRayQueryInitializeKHR
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct RayFlag: u32 {
const OPAQUE = 0x01;

23
third_party/rust/naga/src/back/msl/mod.rs поставляемый
Просмотреть файл

@ -1,6 +1,9 @@
/*!
Backend for [MSL][msl] (Metal Shading Language).
This backend does not support the [`SHADER_INT64_ATOMIC_ALL_OPS`][all-atom]
capability.
## Binding model
Metal's bindings are flat per resource. Since there isn't an obvious mapping
@ -24,6 +27,8 @@ For the result type, if it's a structure, we re-compose it with a temporary valu
holding the result.
[msl]: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
[all-atom]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_ALL_OPS
*/
use crate::{arena::Handle, proc::index, valid::ModuleInfo};
@ -661,21 +666,3 @@ fn test_error_size() {
use std::mem::size_of;
assert_eq!(size_of::<Error>(), 32);
}
impl crate::AtomicFunction {
fn to_msl(self) -> Result<&'static str, Error> {
Ok(match self {
Self::Add => "fetch_add",
Self::Subtract => "fetch_sub",
Self::And => "fetch_and",
Self::InclusiveOr => "fetch_or",
Self::ExclusiveOr => "fetch_xor",
Self::Min => "fetch_min",
Self::Max => "fetch_max",
Self::Exchange { compare: None } => "exchange",
Self::Exchange { compare: Some(_) } => Err(Error::FeatureNotImplemented(
"atomic CompareExchange".to_string(),
))?,
})
}
}

41
third_party/rust/naga/src/back/msl/writer.rs поставляемый
Просмотреть файл

@ -3058,11 +3058,22 @@ impl<W: Write> Writer<W> {
value,
result,
} => {
// This backend supports `SHADER_INT64_ATOMIC_MIN_MAX` but not
// `SHADER_INT64_ATOMIC_ALL_OPS`, so we can assume that if `result` is
// `Some`, we are not operating on a 64-bit value, and that if we are
// operating on a 64-bit value, `result` is `None`.
write!(self.out, "{level}")?;
let fun_str = if let Some(result) = result {
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
self.start_baking_expression(result, &context.expression, &res_name)?;
self.named_expressions.insert(result, res_name);
let fun_str = fun.to_msl()?;
fun.to_msl()?
} else if context.expression.resolve_type(value).scalar_width() == Some(8) {
fun.to_msl_64_bit()?
} else {
fun.to_msl()?
};
self.put_atomic_operation(pointer, fun_str, value, &context.expression)?;
// done
writeln!(self.out, ";")?;
@ -5914,3 +5925,31 @@ fn test_stack_size() {
}
}
}
impl crate::AtomicFunction {
fn to_msl(self) -> Result<&'static str, Error> {
Ok(match self {
Self::Add => "fetch_add",
Self::Subtract => "fetch_sub",
Self::And => "fetch_and",
Self::InclusiveOr => "fetch_or",
Self::ExclusiveOr => "fetch_xor",
Self::Min => "fetch_min",
Self::Max => "fetch_max",
Self::Exchange { compare: None } => "exchange",
Self::Exchange { compare: Some(_) } => Err(Error::FeatureNotImplemented(
"atomic CompareExchange".to_string(),
))?,
})
}
fn to_msl_64_bit(self) -> Result<&'static str, Error> {
Ok(match self {
Self::Min => "min",
Self::Max => "max",
_ => Err(Error::FeatureNotImplemented(
"64-bit atomic operation other than min/max".to_string(),
))?,
})
}
}

Просмотреть файл

@ -617,7 +617,9 @@ fn adjust_stmt(new_pos: &[Handle<Expression>], stmt: &mut Statement) {
} => {
adjust(pointer);
adjust(value);
if let Some(ref mut result) = *result {
adjust(result);
}
match *fun {
crate::AtomicFunction::Exchange {
compare: Some(ref mut compare),

8
third_party/rust/naga/src/back/spv/block.rs поставляемый
Просмотреть файл

@ -2423,9 +2423,15 @@ impl<'w> BlockContext<'w> {
result,
} => {
let id = self.gen_id();
let result_type_id = self.get_expression_type_id(&self.fun_info[result].ty);
// Compare-and-exchange operations produce a struct result,
// so use `result`'s type if it is available. For no-result
// operations, fall back to `value`'s type.
let result_type_id =
self.get_expression_type_id(&self.fun_info[result.unwrap_or(value)].ty);
if let Some(result) = result {
self.cached[result] = id;
}
let pointer_id =
match self.write_expression_pointer(pointer, &mut block, None)? {

21
third_party/rust/naga/src/back/spv/mod.rs поставляемый
Просмотреть файл

@ -682,16 +682,29 @@ bitflags::bitflags! {
pub struct WriterFlags: u32 {
/// Include debug labels for everything.
const DEBUG = 0x1;
/// Flip Y coordinate of `BuiltIn::Position` output.
/// Flip Y coordinate of [`BuiltIn::Position`] output.
///
/// [`BuiltIn::Position`]: crate::BuiltIn::Position
const ADJUST_COORDINATE_SPACE = 0x2;
/// Emit `OpName` for input/output locations.
/// Emit [`OpName`][op] for input/output locations.
///
/// Contrary to spec, some drivers treat it as semantic, not allowing
/// any conflicts.
///
/// [op]: https://registry.khronos.org/SPIR-V/specs/unified1/SPIRV.html#OpName
const LABEL_VARYINGS = 0x4;
/// Emit `PointSize` output builtin to vertex shaders, which is
/// Emit [`PointSize`] output builtin to vertex shaders, which is
/// required for drawing with `PointList` topology.
///
/// [`PointSize`]: crate::BuiltIn::PointSize
const FORCE_POINT_SIZE = 0x8;
/// Clamp `BuiltIn::FragDepth` output between 0 and 1.
/// Clamp [`BuiltIn::FragDepth`] output between 0 and 1.
///
/// [`BuiltIn::FragDepth`]: crate::BuiltIn::FragDepth
const CLAMP_FRAG_DEPTH = 0x10;
}
}

22
third_party/rust/naga/src/back/spv/writer.rs поставляемый
Просмотреть файл

@ -878,6 +878,9 @@ impl Writer {
crate::TypeInner::RayQuery => {
self.require_any("Ray Query", &[spirv::Capability::RayQueryKHR])?;
}
crate::TypeInner::Atomic(crate::Scalar { width: 8, kind: _ }) => {
self.require_any("64 bit integer atomics", &[spirv::Capability::Int64Atomics])?;
}
_ => {}
}
Ok(())
@ -1760,9 +1763,28 @@ impl Writer {
if let crate::AddressSpace::Storage { .. } = global_variable.space {
match ir_module.types[global_variable.ty].inner {
crate::TypeInner::BindingArray { base, .. } => {
let ty = &ir_module.types[base];
let mut should_decorate = true;
// Check if the type has a runtime array.
// A normal runtime array gets validated out,
// so only structs can be with runtime arrays
if let crate::TypeInner::Struct { ref members, .. } = ty.inner {
// only the last member in a struct can be dynamically sized
if let Some(last_member) = members.last() {
if let &crate::TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} = &ir_module.types[last_member.ty].inner
{
should_decorate = false;
}
}
}
if should_decorate {
let decorated_id = self.get_type_id(LookupType::Handle(base));
self.decorate(decorated_id, Decoration::Block, &[]);
}
}
_ => (),
};
}

Просмотреть файл

@ -754,9 +754,11 @@ impl<W: Write> Writer<W> {
result,
} => {
write!(self.out, "{level}")?;
if let Some(result) = result {
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
self.start_named_expr(module, result, func_ctx, &res_name)?;
self.named_expressions.insert(result, res_name);
}
let fun_str = fun.to_wgsl();
write!(self.out, "atomic{fun_str}(")?;

Просмотреть файл

@ -75,8 +75,10 @@ impl FunctionTracer<'_> {
self.expressions_used.insert(pointer);
self.trace_atomic_function(fun);
self.expressions_used.insert(value);
if let Some(result) = result {
self.expressions_used.insert(result);
}
}
St::WorkGroupUniformLoad { pointer, result } => {
self.expressions_used.insert(pointer);
self.expressions_used.insert(result);
@ -255,8 +257,10 @@ impl FunctionMap {
adjust(pointer);
self.adjust_atomic_function(fun);
adjust(value);
if let Some(ref mut result) = *result {
adjust(result);
}
}
St::WorkGroupUniformLoad {
ref mut pointer,
ref mut result,

4
third_party/rust/naga/src/front/glsl/ast.rs поставляемый
Просмотреть файл

@ -73,9 +73,9 @@ bitflags::bitflags! {
const STANDARD = 1 << 0;
/// Request overloads that use the double type
const DOUBLE = 1 << 1;
/// Request overloads that use samplerCubeArray(Shadow)
/// Request overloads that use `samplerCubeArray(Shadow)`
const CUBE_TEXTURES_ARRAY = 1 << 2;
/// Request overloads that use sampler2DMSArray
/// Request overloads that use `sampler2DMSArray`
const D2_MULTI_TEXTURES_ARRAY = 1 << 3;
}
}

Просмотреть файл

@ -2218,7 +2218,7 @@ pub fn sampled_to_depth(
}
bitflags::bitflags! {
/// Influences the operation `texture_args_generator`
/// Influences the operation [`texture_args_generator`]
struct TextureArgsOptions: u32 {
/// Generates multisampled variants of images
const MULTI = 1 << 0;

3
third_party/rust/naga/src/front/spv/mod.rs поставляемый
Просмотреть файл

@ -63,6 +63,7 @@ pub const SUPPORTED_CAPABILITIES: &[spirv::Capability] = &[
spirv::Capability::Int8,
spirv::Capability::Int16,
spirv::Capability::Int64,
spirv::Capability::Int64Atomics,
spirv::Capability::Float16,
spirv::Capability::Float64,
spirv::Capability::Geometry,
@ -4028,7 +4029,7 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
pointer: p_lexp_handle,
fun: crate::AtomicFunction::Add,
value: one_lexp_handle,
result: r_lexp_handle,
result: Some(r_lexp_handle),
};
block.push(stmt, span);
}

4
third_party/rust/naga/src/front/type_gen.rs поставляемый
Просмотреть файл

@ -291,10 +291,10 @@ impl crate::Module {
name: Some("exchanged".to_string()),
ty: bool_ty,
binding: None,
offset: 4,
offset: scalar.width as u32,
},
],
span: 8,
span: scalar.width as u32 * 2,
},
}
}

Просмотреть файл

@ -1491,6 +1491,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
function,
arguments,
&mut ctx.as_expression(block, &mut emitter),
true,
)?;
block.extend(emitter.finish(&ctx.function.expressions));
return Ok(());
@ -1747,7 +1748,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
ref arguments,
} => {
let handle = self
.call(span, function, arguments, ctx)?
.call(span, function, arguments, ctx, false)?
.ok_or(Error::FunctionReturnsVoid(function.span))?;
return Ok(Typed::Plain(handle));
}
@ -1941,6 +1942,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
function: &ast::Ident<'source>,
arguments: &[Handle<ast::Expression<'source>>],
ctx: &mut ExpressionContext<'source, '_, '_>,
is_statement: bool,
) -> Result<Option<Handle<crate::Expression>>, Error<'source>> {
match ctx.globals.get(function.name) {
Some(&LoweredGlobalDecl::Type(ty)) => {
@ -2086,7 +2088,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
self.subgroup_gather_helper(span, mode, arguments, ctx)?,
));
} else if let Some(fun) = crate::AtomicFunction::map(function.name) {
return Ok(Some(self.atomic_helper(span, fun, arguments, ctx)?));
return self.atomic_helper(span, fun, arguments, is_statement, ctx);
} else {
match function.name {
"select" => {
@ -2168,7 +2170,7 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
compare: Some(compare),
},
value,
result,
result: Some(result),
},
span,
);
@ -2459,25 +2461,38 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
span: Span,
fun: crate::AtomicFunction,
args: &[Handle<ast::Expression<'source>>],
is_statement: bool,
ctx: &mut ExpressionContext<'source, '_, '_>,
) -> Result<Handle<crate::Expression>, Error<'source>> {
) -> Result<Option<Handle<crate::Expression>>, Error<'source>> {
let mut args = ctx.prepare_args(args, 2, span);
let pointer = self.atomic_pointer(args.next()?, ctx)?;
let value = args.next()?;
let value = self.expression(value, ctx)?;
let ty = ctx.register_type(value)?;
let value = self.expression(args.next()?, ctx)?;
let value_inner = resolve_inner!(ctx, value);
args.finish()?;
let result = ctx.interrupt_emitter(
// If we don't use the return value of a 64-bit `min` or `max`
// operation, generate a no-result form of the `Atomic` statement, so
// that we can pass validation with only `SHADER_INT64_ATOMIC_MIN_MAX`
// whenever possible.
let is_64_bit_min_max =
matches!(fun, crate::AtomicFunction::Min | crate::AtomicFunction::Max)
&& matches!(
*value_inner,
crate::TypeInner::Scalar(crate::Scalar { width: 8, .. })
);
let result = if is_64_bit_min_max && is_statement {
None
} else {
let ty = ctx.register_type(value)?;
Some(ctx.interrupt_emitter(
crate::Expression::AtomicResult {
ty,
comparison: false,
},
span,
)?;
)?)
};
let rctx = ctx.runtime_expression_ctx(span)?;
rctx.block.push(
crate::Statement::Atomic {

135
third_party/rust/naga/src/lib.rs поставляемый
Просмотреть файл

@ -986,7 +986,7 @@ pub struct GlobalVariable {
pub ty: Handle<Type>,
/// Initial value for this variable.
///
/// Expression handle lives in global_expressions
/// This refers to an [`Expression`] in [`Module::global_expressions`].
pub init: Option<Handle<Expression>>,
}
@ -1002,9 +1002,9 @@ pub struct LocalVariable {
pub ty: Handle<Type>,
/// Initial value for this variable.
///
/// This handle refers to this `LocalVariable`'s function's
/// [`expressions`] arena, but it is required to be an evaluated
/// override expression.
/// This handle refers to an expression in this `LocalVariable`'s function's
/// [`expressions`] arena, but it is required to be an evaluated override
/// expression.
///
/// [`expressions`]: Function::expressions
pub init: Option<Handle<Expression>>,
@ -1092,6 +1092,9 @@ pub enum BinaryOperator {
///
/// Note: these do not include load/store, which use the existing
/// [`Expression::Load`] and [`Statement::Store`].
///
/// All `Handle<Expression>` values here refer to an expression in
/// [`Function::expressions`].
#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
@ -1233,6 +1236,9 @@ pub enum MathFunction {
}
/// Sampling modifier to control the level of detail.
///
/// All `Handle<Expression>` values here refer to an expression in
/// [`Function::expressions`].
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
@ -1249,6 +1255,9 @@ pub enum SampleLevel {
}
/// Type of an image query.
///
/// All `Handle<Expression>` values here refer to an expression in
/// [`Function::expressions`].
#[derive(Clone, Copy, Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
@ -1283,6 +1292,12 @@ pub enum SwizzleComponent {
W = 3,
}
/// The specific behavior of a [`SubgroupGather`] statement.
///
/// All `Handle<Expression>` values here refer to an expression in
/// [`Function::expressions`].
///
/// [`SubgroupGather`]: Statement::SubgroupGather
#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
@ -1335,9 +1350,9 @@ bitflags::bitflags! {
#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct Barrier: u32 {
/// Barrier affects all `AddressSpace::Storage` accesses.
/// Barrier affects all [`AddressSpace::Storage`] accesses.
const STORAGE = 1 << 0;
/// Barrier affects all `AddressSpace::WorkGroup` accesses.
/// Barrier affects all [`AddressSpace::WorkGroup`] accesses.
const WORK_GROUP = 1 << 1;
/// Barrier synchronizes execution across all invocations within a subgroup that exectue this instruction.
const SUB_GROUP = 1 << 2;
@ -1347,6 +1362,15 @@ bitflags::bitflags! {
/// An expression that can be evaluated to obtain a value.
///
/// This is a Single Static Assignment (SSA) scheme similar to SPIR-V.
///
/// When an `Expression` variant holds `Handle<Expression>` fields, they refer
/// to another expression in the same arena, unless explicitly noted otherwise.
/// One `Arena<Expression>` may only refer to a different arena indirectly, via
/// [`Constant`] or [`Override`] expressions, which hold handles for their
/// respective types.
///
/// [`Constant`]: Expression::Constant
/// [`Override`]: Expression::Override
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
@ -1483,7 +1507,7 @@ pub enum Expression {
gather: Option<SwizzleComponent>,
coordinate: Handle<Expression>,
array_index: Option<Handle<Expression>>,
/// Expression handle lives in global_expressions
/// This refers to an expression in [`Module::global_expressions`].
offset: Option<Handle<Expression>>,
level: SampleLevel,
depth_ref: Option<Handle<Expression>>,
@ -1612,8 +1636,29 @@ pub enum Expression {
},
/// Result of calling another function.
CallResult(Handle<Function>),
/// Result of an atomic operation.
///
/// This expression must be referred to by the [`result`] field of exactly one
/// [`Atomic`][stmt] statement somewhere in the same function. Let `T` be the
/// scalar type contained by the [`Atomic`][type] value that the statement
/// operates on.
///
/// If `comparison` is `false`, then `ty` must be the scalar type `T`.
///
/// If `comparison` is `true`, then `ty` must be a [`Struct`] with two members:
///
/// - A member named `old_value`, whose type is `T`, and
///
/// - A member named `exchanged`, of type [`BOOL`].
///
/// [`result`]: Statement::Atomic::result
/// [stmt]: Statement::Atomic
/// [type]: TypeInner::Atomic
/// [`Struct`]: TypeInner::Struct
/// [`BOOL`]: Scalar::BOOL
AtomicResult { ty: Handle<Type>, comparison: bool },
/// Result of a [`WorkGroupUniformLoad`] statement.
///
/// [`WorkGroupUniformLoad`]: Statement::WorkGroupUniformLoad
@ -1725,6 +1770,9 @@ pub enum RayQueryFunction {
//TODO: consider removing `Clone`. It's not valid to clone `Statement::Emit` anyway.
/// Instructions which make up an executable block.
///
/// `Handle<Expression>` and `Range<Expression>` values in `Statement` variants
/// refer to expressions in [`Function::expressions`], unless otherwise noted.
// Clone is used only for error reporting and is not intended for end users
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
@ -1890,15 +1938,66 @@ pub enum Statement {
/// Atomic function.
Atomic {
/// Pointer to an atomic value.
///
/// This must be a [`Pointer`] to an [`Atomic`] value. The atomic's
/// scalar type may be [`I32`] or [`U32`].
///
/// If [`SHADER_INT64_ATOMIC_MIN_MAX`] or [`SHADER_INT64_ATOMIC_ALL_OPS`] are
/// enabled, this may also be [`I64`] or [`U64`].
///
/// [`Pointer`]: TypeInner::Pointer
/// [`Atomic`]: TypeInner::Atomic
/// [`I32`]: Scalar::I32
/// [`U32`]: Scalar::U32
/// [`SHADER_INT64_ATOMIC_MIN_MAX`]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_MIN_MAX
/// [`SHADER_INT64_ATOMIC_ALL_OPS`]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_ALL_OPS
/// [`I64`]: Scalar::I64
/// [`U64`]: Scalar::U64
pointer: Handle<Expression>,
/// Function to run on the atomic.
/// Function to run on the atomic value.
///
/// If [`pointer`] refers to a 64-bit atomic value, then:
///
/// - The [`SHADER_INT64_ATOMIC_ALL_OPS`] capability allows any [`AtomicFunction`]
/// value here.
///
/// - The [`SHADER_INT64_ATOMIC_MIN_MAX`] capability allows
/// [`AtomicFunction::Min`] and [`AtomicFunction::Max`] here.
///
/// - If neither of those capabilities are present, then 64-bit scalar
/// atomics are not allowed.
///
/// [`pointer`]: Statement::Atomic::pointer
/// [`SHADER_INT64_ATOMIC_MIN_MAX`]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_MIN_MAX
/// [`SHADER_INT64_ATOMIC_ALL_OPS`]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_ALL_OPS
fun: AtomicFunction,
/// Value to use in the function.
///
/// This must be a scalar of the same type as [`pointer`]'s atomic's scalar type.
///
/// [`pointer`]: Statement::Atomic::pointer
value: Handle<Expression>,
/// [`AtomicResult`] expression representing this function's result.
///
/// If [`fun`] is [`Exchange { compare: None }`], this must be `Some`,
/// as otherwise that operation would be equivalent to a simple [`Store`]
/// to the atomic.
///
/// Otherwise, this may be `None` if the return value of the operation is not needed.
///
/// If `pointer` refers to a 64-bit atomic value, [`SHADER_INT64_ATOMIC_MIN_MAX`]
/// is enabled, and [`SHADER_INT64_ATOMIC_ALL_OPS`] is not, this must be `None`.
///
/// [`AtomicResult`]: crate::Expression::AtomicResult
result: Handle<Expression>,
/// [`fun`]: Statement::Atomic::fun
/// [`Store`]: Statement::Store
/// [`Exchange { compare: None }`]: AtomicFunction::Exchange
/// [`SHADER_INT64_ATOMIC_MIN_MAX`]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_MIN_MAX
/// [`SHADER_INT64_ATOMIC_ALL_OPS`]: crate::valid::Capabilities::SHADER_INT64_ATOMIC_ALL_OPS
result: Option<Handle<Expression>>,
},
/// Load uniformly from a uniform pointer in the workgroup address space.
///
@ -2013,8 +2112,18 @@ pub struct Function {
pub local_variables: Arena<LocalVariable>,
/// Expressions used inside this function.
///
/// An `Expression` must occur before all other `Expression`s that use its
/// value.
/// If an [`Expression`] is in this arena, then its subexpressions are in this
/// arena too. In other words, every `Handle<Expression>` in this arena
/// refers to an [`Expression`] in this arena too. The only way this arena
/// can refer to [`Module::global_expressions`] is indirectly, via
/// [`Constant`] and [`Override`] expressions, which hold handles for their
/// respective types.
///
/// An [`Expression`] must occur before all other [`Expression`]s that use
/// its value.
///
/// [`Constant`]: Expression::Constant
/// [`Override`]: Expression::Override
pub expressions: Arena<Expression>,
/// Map of expressions that have associated variable names
pub named_expressions: NamedExpressions,
@ -2155,6 +2264,10 @@ pub struct Module {
pub global_variables: Arena<GlobalVariable>,
/// [Constant expressions] and [override expressions] used by this module.
///
/// If an expression is in this arena, then its subexpressions are in this
/// arena too. In other words, every `Handle<Expression>` in this arena
/// refers to an [`Expression`] in this arena too.
///
/// Each `Expression` must occur in the arena before any
/// `Expression` that uses its value.
///

4
third_party/rust/naga/src/valid/analyzer.rs поставляемый
Просмотреть файл

@ -70,8 +70,10 @@ bitflags::bitflags! {
/// subsequent statements within the current function (only!)
/// to be executed in a non-uniform control flow.
const MAY_RETURN = 0x1;
/// Control flow may be killed. Anything after `Statement::Kill` is
/// Control flow may be killed. Anything after [`Statement::Kill`] is
/// considered inside non-uniform context.
///
/// [`Statement::Kill`]: crate::Statement::Kill
const MAY_KILL = 0x2;
}
}

38
third_party/rust/naga/src/valid/expression.rs поставляемый
Просмотреть файл

@ -1,7 +1,4 @@
use super::{
compose::validate_compose, validate_atomic_compare_exchange_struct, FunctionInfo, ModuleInfo,
ShaderStages, TypeFlags,
};
use super::{compose::validate_compose, FunctionInfo, ModuleInfo, ShaderStages, TypeFlags};
use crate::arena::UniqueArena;
use crate::{
@ -12,8 +9,6 @@ use crate::{
#[derive(Clone, Debug, thiserror::Error)]
#[cfg_attr(test, derive(PartialEq))]
pub enum ExpressionError {
#[error("Doesn't exist")]
DoesntExist,
#[error("Used by a statement before it was introduced into the scope by any of the dominating blocks")]
NotInScope,
#[error("Base type {0:?} is not compatible with this expression")]
@ -116,8 +111,6 @@ pub enum ExpressionError {
WrongArgumentCount(crate::MathFunction),
#[error("Argument [{1}] to {0:?} as expression {2:?} has an invalid type.")]
InvalidArgumentType(crate::MathFunction, u32, Handle<crate::Expression>),
#[error("Atomic result type can't be {0:?}")]
InvalidAtomicResultType(Handle<crate::Type>),
#[error(
"workgroupUniformLoad result type can't be {0:?}. It can only be a constructible type."
)]
@ -1584,30 +1577,11 @@ impl super::Validator {
ShaderStages::all()
}
E::CallResult(function) => mod_info.functions[function.index()].available_stages,
E::AtomicResult { ty, comparison } => {
let scalar_predicate = |ty: &crate::TypeInner| match ty {
&crate::TypeInner::Scalar(
scalar @ Sc {
kind: crate::ScalarKind::Uint | crate::ScalarKind::Sint,
..
},
) => self.check_width(scalar).is_ok(),
_ => false,
};
let good = match &module.types[ty].inner {
ty if !comparison => scalar_predicate(ty),
&crate::TypeInner::Struct { ref members, .. } if comparison => {
validate_atomic_compare_exchange_struct(
&module.types,
members,
scalar_predicate,
)
}
_ => false,
};
if !good {
return Err(ExpressionError::InvalidAtomicResultType(ty));
}
E::AtomicResult { .. } => {
// These expressions are validated when we check the `Atomic` statement
// that refers to them, because we have all the information we need at
// that point. The checks driven by `Validator::needs_visit` ensure
// that this expression is indeed visited by one `Atomic` statement.
ShaderStages::all()
}
E::WorkGroupUniformLoadResult { ty } => {

266
third_party/rust/naga/src/valid/function.rs поставляемый
Просмотреть файл

@ -22,6 +22,8 @@ pub enum CallError {
},
#[error("Result expression {0:?} has already been introduced earlier")]
ResultAlreadyInScope(Handle<crate::Expression>),
#[error("Result expression {0:?} is populated by multiple `Call` statements")]
ResultAlreadyPopulated(Handle<crate::Expression>),
#[error("Result value is invalid")]
ResultValue(#[source] ExpressionError),
#[error("Requires {required} arguments, but {seen} are provided")]
@ -41,10 +43,24 @@ pub enum CallError {
pub enum AtomicError {
#[error("Pointer {0:?} to atomic is invalid.")]
InvalidPointer(Handle<crate::Expression>),
#[error("Address space {0:?} does not support 64bit atomics.")]
InvalidAddressSpace(crate::AddressSpace),
#[error("Operand {0:?} has invalid type.")]
InvalidOperand(Handle<crate::Expression>),
#[error("Result expression {0:?} is not an `AtomicResult` expression")]
InvalidResultExpression(Handle<crate::Expression>),
#[error("Result expression {0:?} is marked as an `exchange`")]
ResultExpressionExchange(Handle<crate::Expression>),
#[error("Result expression {0:?} is not marked as an `exchange`")]
ResultExpressionNotExchange(Handle<crate::Expression>),
#[error("Result type for {0:?} doesn't match the statement")]
ResultTypeMismatch(Handle<crate::Expression>),
#[error("Exchange operations must return a value")]
MissingReturnValue,
#[error("Capability {0:?} is required")]
MissingCapability(super::Capabilities),
#[error("Result expression {0:?} is populated by multiple `Atomic` statements")]
ResultAlreadyPopulated(Handle<crate::Expression>),
}
#[derive(Clone, Debug, thiserror::Error)]
@ -174,6 +190,8 @@ pub enum FunctionError {
InvalidSubgroup(#[from] SubgroupError),
#[error("Emit statement should not cover \"result\" expressions like {0:?}")]
EmitResult(Handle<crate::Expression>),
#[error("Expression not visited by the appropriate statement")]
UnvisitedExpression(Handle<crate::Expression>),
}
bitflags::bitflags! {
@ -241,9 +259,7 @@ impl<'a> BlockContext<'a> {
handle: Handle<crate::Expression>,
valid_expressions: &BitSet,
) -> Result<&crate::TypeInner, WithSpan<ExpressionError>> {
if handle.index() >= self.expressions.len() {
Err(ExpressionError::DoesntExist.with_span())
} else if !valid_expressions.contains(handle.index()) {
if !valid_expressions.contains(handle.index()) {
Err(ExpressionError::NotInScope.with_span_handle(handle, self.expressions))
} else {
Ok(self.info[handle].ty.inner_with(self.types))
@ -259,18 +275,8 @@ impl<'a> BlockContext<'a> {
.map_err_inner(|source| FunctionError::Expression { handle, source }.with_span())
}
fn resolve_pointer_type(
&self,
handle: Handle<crate::Expression>,
) -> Result<&crate::TypeInner, FunctionError> {
if handle.index() >= self.expressions.len() {
Err(FunctionError::Expression {
handle,
source: ExpressionError::DoesntExist,
})
} else {
Ok(self.info[handle].ty.inner_with(self.types))
}
fn resolve_pointer_type(&self, handle: Handle<crate::Expression>) -> &crate::TypeInner {
self.info[handle].ty.inner_with(self.types)
}
}
@ -317,7 +323,13 @@ impl super::Validator {
}
match context.expressions[expr] {
crate::Expression::CallResult(callee)
if fun.result.is_some() && callee == function => {}
if fun.result.is_some() && callee == function =>
{
if !self.needs_visit.remove(expr.index()) {
return Err(CallError::ResultAlreadyPopulated(expr)
.with_span_handle(expr, context.expressions));
}
}
_ => {
return Err(CallError::ExpressionMismatch(result)
.with_span_handle(expr, context.expressions))
@ -350,72 +362,189 @@ impl super::Validator {
pointer: Handle<crate::Expression>,
fun: &crate::AtomicFunction,
value: Handle<crate::Expression>,
result: Handle<crate::Expression>,
result: Option<Handle<crate::Expression>>,
span: crate::Span,
context: &BlockContext,
) -> Result<(), WithSpan<FunctionError>> {
// The `pointer` operand must be a pointer to an atomic value.
let pointer_inner = context.resolve_type(pointer, &self.valid_expression_set)?;
let ptr_scalar = match *pointer_inner {
crate::TypeInner::Pointer { base, .. } => match context.types[base].inner {
crate::TypeInner::Atomic(scalar) => scalar,
ref other => {
log::error!("Atomic pointer to type {:?}", other);
let crate::TypeInner::Pointer {
base: pointer_base,
space: pointer_space,
} = *pointer_inner
else {
log::error!("Atomic operation on type {:?}", *pointer_inner);
return Err(AtomicError::InvalidPointer(pointer)
.with_span_handle(pointer, context.expressions)
.into_other());
}
},
ref other => {
log::error!("Atomic on type {:?}", other);
};
let crate::TypeInner::Atomic(pointer_scalar) = context.types[pointer_base].inner else {
log::error!(
"Atomic pointer to type {:?}",
context.types[pointer_base].inner
);
return Err(AtomicError::InvalidPointer(pointer)
.with_span_handle(pointer, context.expressions)
.into_other());
}
};
// The `value` operand must be a scalar of the same type as the atomic.
let value_inner = context.resolve_type(value, &self.valid_expression_set)?;
match *value_inner {
crate::TypeInner::Scalar(scalar) if scalar == ptr_scalar => {}
ref other => {
log::error!("Atomic operand type {:?}", other);
let crate::TypeInner::Scalar(value_scalar) = *value_inner else {
log::error!("Atomic operand type {:?}", *value_inner);
return Err(AtomicError::InvalidOperand(value)
.with_span_handle(value, context.expressions)
.into_other());
};
if pointer_scalar != value_scalar {
log::error!("Atomic operand type {:?}", *value_inner);
return Err(AtomicError::InvalidOperand(value)
.with_span_handle(value, context.expressions)
.into_other());
}
}
if let crate::AtomicFunction::Exchange { compare: Some(cmp) } = *fun {
if context.resolve_type(cmp, &self.valid_expression_set)? != value_inner {
log::error!("Atomic exchange comparison has a different type from the value");
return Err(AtomicError::InvalidOperand(cmp)
.with_span_handle(cmp, context.expressions)
// Check for the special restrictions on 64-bit atomic operations.
//
// We don't need to consider other widths here: this function has already checked
// that `pointer`'s type is an `Atomic`, and `validate_type` has already checked
// that that `Atomic` type has a permitted scalar width.
if pointer_scalar.width == 8 {
// `Capabilities::SHADER_INT64_ATOMIC_ALL_OPS` enables all sorts of 64-bit
// atomic operations.
if self
.capabilities
.contains(super::Capabilities::SHADER_INT64_ATOMIC_ALL_OPS)
{
// okay
} else {
// `Capabilities::SHADER_INT64_ATOMIC_MIN_MAX` allows `Min` and
// `Max` on operations in `Storage`, without a return value.
if matches!(
*fun,
crate::AtomicFunction::Min | crate::AtomicFunction::Max
) && matches!(pointer_space, crate::AddressSpace::Storage { .. })
&& result.is_none()
{
if !self
.capabilities
.contains(super::Capabilities::SHADER_INT64_ATOMIC_MIN_MAX)
{
log::error!("Int64 min-max atomic operations are not supported");
return Err(AtomicError::MissingCapability(
super::Capabilities::SHADER_INT64_ATOMIC_MIN_MAX,
)
.with_span_handle(value, context.expressions)
.into_other());
}
} else {
// Otherwise, we require the full 64-bit atomic capability.
log::error!("Int64 atomic operations are not supported");
return Err(AtomicError::MissingCapability(
super::Capabilities::SHADER_INT64_ATOMIC_ALL_OPS,
)
.with_span_handle(value, context.expressions)
.into_other());
}
}
}
self.emit_expression(result, context)?;
match context.expressions[result] {
crate::Expression::AtomicResult { ty, comparison }
if {
let scalar_predicate =
|ty: &crate::TypeInner| *ty == crate::TypeInner::Scalar(ptr_scalar);
match &context.types[ty].inner {
ty if !comparison => scalar_predicate(ty),
&crate::TypeInner::Struct { ref members, .. } if comparison => {
validate_atomic_compare_exchange_struct(
context.types,
members,
scalar_predicate,
)
// The result expression must be appropriate to the operation.
match result {
Some(result) => {
// The `result` handle must refer to an `AtomicResult` expression.
let crate::Expression::AtomicResult {
ty: result_ty,
comparison,
} = context.expressions[result]
else {
return Err(AtomicError::InvalidResultExpression(result)
.with_span_handle(result, context.expressions)
.into_other());
};
// Note that this expression has been visited by the proper kind
// of statement.
if !self.needs_visit.remove(result.index()) {
return Err(AtomicError::ResultAlreadyPopulated(result)
.with_span_handle(result, context.expressions)
.into_other());
}
_ => false,
// The constraints on the result type depend on the atomic function.
if let crate::AtomicFunction::Exchange {
compare: Some(compare),
} = *fun
{
// The comparison value must be a scalar of the same type as the
// atomic we're operating on.
let compare_inner =
context.resolve_type(compare, &self.valid_expression_set)?;
if !compare_inner.equivalent(value_inner, context.types) {
log::error!(
"Atomic exchange comparison has a different type from the value"
);
return Err(AtomicError::InvalidOperand(compare)
.with_span_handle(compare, context.expressions)
.into_other());
}
} => {}
_ => {
// The result expression must be an `__atomic_compare_exchange_result`
// struct whose `old_value` member is of the same type as the atomic
// we're operating on.
let crate::TypeInner::Struct { ref members, .. } =
context.types[result_ty].inner
else {
return Err(AtomicError::ResultTypeMismatch(result)
.with_span_handle(result, context.expressions)
.into_other())
.into_other());
};
if !validate_atomic_compare_exchange_struct(
context.types,
members,
|ty: &crate::TypeInner| *ty == crate::TypeInner::Scalar(pointer_scalar),
) {
return Err(AtomicError::ResultTypeMismatch(result)
.with_span_handle(result, context.expressions)
.into_other());
}
// The result expression must be for a comparison operation.
if !comparison {
return Err(AtomicError::ResultExpressionNotExchange(result)
.with_span_handle(result, context.expressions)
.into_other());
}
} else {
// The result expression must be a scalar of the same type as the
// atomic we're operating on.
let result_inner = &context.types[result_ty].inner;
if !result_inner.equivalent(value_inner, context.types) {
return Err(AtomicError::ResultTypeMismatch(result)
.with_span_handle(result, context.expressions)
.into_other());
}
// The result expression must not be for a comparison.
if comparison {
return Err(AtomicError::ResultExpressionExchange(result)
.with_span_handle(result, context.expressions)
.into_other());
}
}
self.emit_expression(result, context)?;
}
None => {
// Exchange operations must always produce a value.
if let crate::AtomicFunction::Exchange { compare: None } = *fun {
log::error!("Atomic exchange's value is unused");
return Err(AtomicError::MissingReturnValue
.with_span_static(span, "atomic exchange operation")
.into_other());
}
}
}
Ok(())
}
fn validate_subgroup_operation(
@ -819,9 +948,6 @@ impl super::Validator {
S::Store { pointer, value } => {
let mut current = pointer;
loop {
let _ = context
.resolve_pointer_type(current)
.map_err(|e| e.with_span())?;
match context.expressions[current] {
crate::Expression::Access { base, .. }
| crate::Expression::AccessIndex { base, .. } => current = base,
@ -844,9 +970,7 @@ impl super::Validator {
_ => {}
}
let pointer_ty = context
.resolve_pointer_type(pointer)
.map_err(|e| e.with_span())?;
let pointer_ty = context.resolve_pointer_type(pointer);
let good = match *pointer_ty {
Ti::Pointer { base, space: _ } => match context.types[base].inner {
@ -1015,7 +1139,7 @@ impl super::Validator {
value,
result,
} => {
self.validate_atomic(pointer, fun, value, result, context)?;
self.validate_atomic(pointer, fun, value, result, span, context)?;
}
S::WorkGroupUniformLoad { pointer, result } => {
stages &= super::ShaderStages::COMPUTE;
@ -1307,11 +1431,20 @@ impl super::Validator {
self.valid_expression_set.clear();
self.valid_expression_list.clear();
self.needs_visit.clear();
for (handle, expr) in fun.expressions.iter() {
if expr.needs_pre_emit() {
self.valid_expression_set.insert(handle.index());
}
if self.flags.contains(super::ValidationFlags::EXPRESSIONS) {
// Mark expressions that need to be visited by a particular kind of
// statement.
if let crate::Expression::CallResult(_) | crate::Expression::AtomicResult { .. } =
*expr
{
self.needs_visit.insert(handle.index());
}
match self.validate_expression(
handle,
expr,
@ -1338,6 +1471,15 @@ impl super::Validator {
)?
.stages;
info.available_stages &= stages;
if self.flags.contains(super::ValidationFlags::EXPRESSIONS) {
if let Some(unvisited) = self.needs_visit.iter().next() {
let index = std::num::NonZeroU32::new(unvisited as u32 + 1).unwrap();
let handle = Handle::new(index);
return Err(FunctionError::UnvisitedExpression(handle)
.with_span_handle(handle, &fun.expressions));
}
}
}
Ok(info)
}

2
third_party/rust/naga/src/valid/handles.rs поставляемый
Просмотреть файл

@ -530,7 +530,9 @@ impl super::Validator {
crate::AtomicFunction::Exchange { compare } => validate_expr_opt(compare)?,
};
validate_expr(value)?;
if let Some(result) = result {
validate_expr(result)?;
}
Ok(())
}
crate::Statement::WorkGroupUniformLoad { pointer, result } => {

56
third_party/rust/naga/src/valid/mod.rs поставляемый
Просмотреть файл

@ -78,11 +78,15 @@ bitflags::bitflags! {
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Capabilities: u32 {
/// Support for [`AddressSpace:PushConstant`].
/// Support for [`AddressSpace::PushConstant`][1].
///
/// [1]: crate::AddressSpace::PushConstant
const PUSH_CONSTANT = 0x1;
/// Float values with width = 8.
const FLOAT64 = 0x2;
/// Support for [`Builtin:PrimitiveIndex`].
/// Support for [`BuiltIn::PrimitiveIndex`][1].
///
/// [1]: crate::BuiltIn::PrimitiveIndex
const PRIMITIVE_INDEX = 0x4;
/// Support for non-uniform indexing of sampled textures and storage buffer arrays.
const SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING = 0x8;
@ -90,17 +94,26 @@ bitflags::bitflags! {
const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 0x10;
/// Support for non-uniform indexing of samplers.
const SAMPLER_NON_UNIFORM_INDEXING = 0x20;
/// Support for [`Builtin::ClipDistance`].
/// Support for [`BuiltIn::ClipDistance`].
///
/// [`BuiltIn::ClipDistance`]: crate::BuiltIn::ClipDistance
const CLIP_DISTANCE = 0x40;
/// Support for [`Builtin::CullDistance`].
/// Support for [`BuiltIn::CullDistance`].
///
/// [`BuiltIn::CullDistance`]: crate::BuiltIn::CullDistance
const CULL_DISTANCE = 0x80;
/// Support for 16-bit normalized storage texture formats.
const STORAGE_TEXTURE_16BIT_NORM_FORMATS = 0x100;
/// Support for [`BuiltIn::ViewIndex`].
///
/// [`BuiltIn::ViewIndex`]: crate::BuiltIn::ViewIndex
const MULTIVIEW = 0x200;
/// Support for `early_depth_test`.
const EARLY_DEPTH_TEST = 0x400;
/// Support for [`Builtin::SampleIndex`] and [`Sampling::Sample`].
/// Support for [`BuiltIn::SampleIndex`] and [`Sampling::Sample`].
///
/// [`BuiltIn::SampleIndex`]: crate::BuiltIn::SampleIndex
/// [`Sampling::Sample`]: crate::Sampling::Sample
const MULTISAMPLED_SHADING = 0x800;
/// Support for ray queries and acceleration structures.
const RAY_QUERY = 0x1000;
@ -114,6 +127,18 @@ bitflags::bitflags! {
const SUBGROUP = 0x10000;
/// Support for subgroup barriers.
const SUBGROUP_BARRIER = 0x20000;
/// Support for [`AtomicFunction::Min`] and [`AtomicFunction::Max`] on
/// 64-bit integers in the [`Storage`] address space, when the return
/// value is not used.
///
/// This is the only 64-bit atomic functionality available on Metal 3.1.
///
/// [`AtomicFunction::Min`]: crate::AtomicFunction::Min
/// [`AtomicFunction::Max`]: crate::AtomicFunction::Max
/// [`Storage`]: crate::AddressSpace::Storage
const SHADER_INT64_ATOMIC_MIN_MAX = 0x40000;
/// Support for all atomic operations on 64-bit integers.
const SHADER_INT64_ATOMIC_ALL_OPS = 0x80000;
}
}
@ -233,6 +258,26 @@ pub struct Validator {
valid_expression_set: BitSet,
override_ids: FastHashSet<u16>,
allow_overrides: bool,
/// A checklist of expressions that must be visited by a specific kind of
/// statement.
///
/// For example:
///
/// - [`CallResult`] expressions must be visited by a [`Call`] statement.
/// - [`AtomicResult`] expressions must be visited by an [`Atomic`] statement.
///
/// Be sure not to remove any [`Expression`] handle from this set unless
/// you've explicitly checked that it is the right kind of expression for
/// the visiting [`Statement`].
///
/// [`CallResult`]: crate::Expression::CallResult
/// [`Call`]: crate::Statement::Call
/// [`AtomicResult`]: crate::Expression::AtomicResult
/// [`Atomic`]: crate::Statement::Atomic
/// [`Expression`]: crate::Expression
/// [`Statement`]: crate::Statement
needs_visit: BitSet,
}
#[derive(Clone, Debug, thiserror::Error)]
@ -385,6 +430,7 @@ impl Validator {
valid_expression_set: BitSet::new(),
override_ids: FastHashSet::default(),
allow_overrides: true,
needs_visit: BitSet::new(),
}
}

42
third_party/rust/naga/src/valid/type.rs поставляемый
Просмотреть файл

@ -16,23 +16,27 @@ bitflags::bitflags! {
/// This flag is required on types of local variables, function
/// arguments, array elements, and struct members.
///
/// This includes all types except `Image`, `Sampler`,
/// and some `Pointer` types.
/// This includes all types except [`Image`], [`Sampler`],
/// and some [`Pointer`] types.
///
/// [`Image`]: crate::TypeInner::Image
/// [`Sampler`]: crate::TypeInner::Sampler
/// [`Pointer`]: crate::TypeInner::Pointer
const DATA = 0x1;
/// The data type has a size known by pipeline creation time.
///
/// Unsized types are quite restricted. The only unsized types permitted
/// by Naga, other than the non-[`DATA`] types like [`Image`] and
/// [`Sampler`], are dynamically-sized [`Array`s], and [`Struct`s] whose
/// [`Sampler`], are dynamically-sized [`Array`]s, and [`Struct`]s whose
/// last members are such arrays. See the documentation for those types
/// for details.
///
/// [`DATA`]: TypeFlags::DATA
/// [`Image`]: crate::Type::Image
/// [`Sampler`]: crate::Type::Sampler
/// [`Array`]: crate::Type::Array
/// [`Struct`]: crate::Type::struct
/// [`Image`]: crate::TypeInner::Image
/// [`Sampler`]: crate::TypeInner::Sampler
/// [`Array`]: crate::TypeInner::Array
/// [`Struct`]: crate::TypeInner::Struct
const SIZED = 0x2;
/// The data can be copied around.
@ -43,6 +47,8 @@ bitflags::bitflags! {
/// This covers anything that can be in [`Location`] binding:
/// non-bool scalars and vectors, matrices, and structs and
/// arrays containing only interface types.
///
/// [`Location`]: crate::Binding::Location
const IO_SHAREABLE = 0x8;
/// Can be used for host-shareable structures.
@ -354,16 +360,28 @@ impl super::Validator {
)
}
Ti::Atomic(crate::Scalar { kind, width }) => {
let good = match kind {
match kind {
crate::ScalarKind::Bool
| crate::ScalarKind::Float
| crate::ScalarKind::AbstractInt
| crate::ScalarKind::AbstractFloat => false,
crate::ScalarKind::Sint | crate::ScalarKind::Uint => width == 4,
};
if !good {
| crate::ScalarKind::AbstractFloat => {
return Err(TypeError::InvalidAtomicWidth(kind, width))
}
crate::ScalarKind::Sint | crate::ScalarKind::Uint => {
if width == 8 {
if !self.capabilities.intersects(
Capabilities::SHADER_INT64_ATOMIC_ALL_OPS
| Capabilities::SHADER_INT64_ATOMIC_MIN_MAX,
) {
return Err(TypeError::MissingCapability(
Capabilities::SHADER_INT64_ATOMIC_ALL_OPS,
));
}
} else if width != 4 {
return Err(TypeError::InvalidAtomicWidth(kind, width));
}
}
};
TypeInfo::new(
TypeFlags::DATA | TypeFlags::SIZED | TypeFlags::HOST_SHAREABLE,
Alignment::from_width(width),

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

2
third_party/rust/wgpu-core/Cargo.toml поставляемый
Просмотреть файл

@ -47,7 +47,7 @@ document-features = "0.2.8"
indexmap = "2"
log = "0.4"
once_cell = "1"
parking_lot = ">=0.11,<0.13"
parking_lot = ">=0.11, <0.13"
rustc-hash = "1.1"
smallvec = "1"
thiserror = "1"

Просмотреть файл

@ -5,15 +5,15 @@ use crate::{
compute_command::{ArcComputeCommand, ComputeCommand},
end_pipeline_statistics_query,
memory_init::{fixup_discarded_surfaces, SurfacesInDiscardState},
BasePass, BindGroupStateChange, CommandBuffer, CommandEncoderError, CommandEncoderStatus,
MapPassErr, PassErrorScope, QueryUseError, StateChange,
validate_and_begin_pipeline_statistics_query, BasePass, BindGroupStateChange,
CommandBuffer, CommandEncoderError, CommandEncoderStatus, MapPassErr, PassErrorScope,
QueryUseError, StateChange,
},
device::{DeviceError, MissingDownlevelFlags, MissingFeatures},
error::{ErrorFormatter, PrettyError},
global::Global,
hal_api::HalApi,
hal_label,
id::{self},
hal_label, id,
init_tracker::MemoryInitKind,
resource::{self, Resource},
snatch::SnatchGuard,
@ -48,23 +48,39 @@ pub struct ComputePass<A: HalApi> {
/// If it is none, this pass is invalid and any operation on it will return an error.
parent: Option<Arc<CommandBuffer<A>>>,
timestamp_writes: Option<ComputePassTimestampWrites>,
timestamp_writes: Option<ArcComputePassTimestampWrites<A>>,
// Resource binding dedupe state.
current_bind_groups: BindGroupStateChange,
current_pipeline: StateChange<id::ComputePipelineId>,
/// The device that this pass is associated with.
///
/// Used for quick validation during recording.
device_id: id::DeviceId,
}
impl<A: HalApi> ComputePass<A> {
/// If the parent command buffer is invalid, the returned pass will be invalid.
fn new(parent: Option<Arc<CommandBuffer<A>>>, desc: &ComputePassDescriptor) -> Self {
fn new(parent: Option<Arc<CommandBuffer<A>>>, desc: ArcComputePassDescriptor<A>) -> Self {
let ArcComputePassDescriptor {
label,
timestamp_writes,
} = desc;
let device_id = parent
.as_ref()
.map_or(id::DeviceId::dummy(0), |p| p.device.as_info().id());
Self {
base: Some(BasePass::new(&desc.label)),
base: Some(BasePass::new(label)),
parent,
timestamp_writes: desc.timestamp_writes.cloned(),
timestamp_writes,
current_bind_groups: BindGroupStateChange::new(),
current_pipeline: StateChange::new(),
device_id,
}
}
@ -107,6 +123,16 @@ pub struct ComputePassTimestampWrites {
pub end_of_pass_write_index: Option<u32>,
}
/// Describes the writing of timestamp values in a compute pass with the query set resolved.
struct ArcComputePassTimestampWrites<A: HalApi> {
/// The query set to write the timestamps to.
pub query_set: Arc<resource::QuerySet<A>>,
/// The index of the query set at which a start timestamp of this pass is written, if any.
pub beginning_of_pass_write_index: Option<u32>,
/// The index of the query set at which an end timestamp of this pass is written, if any.
pub end_of_pass_write_index: Option<u32>,
}
#[derive(Clone, Debug, Default)]
pub struct ComputePassDescriptor<'a> {
pub label: Label<'a>,
@ -114,6 +140,12 @@ pub struct ComputePassDescriptor<'a> {
pub timestamp_writes: Option<&'a ComputePassTimestampWrites>,
}
struct ArcComputePassDescriptor<'a, A: HalApi> {
pub label: &'a Label<'a>,
/// Defines where and when timestamp values will be written for this pass.
pub timestamp_writes: Option<ArcComputePassTimestampWrites<A>>,
}
#[derive(Clone, Debug, Error, Eq, PartialEq)]
#[non_exhaustive]
pub enum DispatchError {
@ -310,13 +342,44 @@ impl Global {
pub fn command_encoder_create_compute_pass<A: HalApi>(
&self,
encoder_id: id::CommandEncoderId,
desc: &ComputePassDescriptor,
desc: &ComputePassDescriptor<'_>,
) -> (ComputePass<A>, Option<CommandEncoderError>) {
let hub = A::hub(self);
let mut arc_desc = ArcComputePassDescriptor {
label: &desc.label,
timestamp_writes: None, // Handle only once we resolved the encoder.
};
match CommandBuffer::lock_encoder(hub, encoder_id) {
Ok(cmd_buf) => (ComputePass::new(Some(cmd_buf), desc), None),
Err(err) => (ComputePass::new(None, desc), Some(err)),
Ok(cmd_buf) => {
arc_desc.timestamp_writes = if let Some(tw) = desc.timestamp_writes {
let Ok(query_set) = hub.query_sets.read().get_owned(tw.query_set) else {
return (
ComputePass::new(None, arc_desc),
Some(CommandEncoderError::InvalidTimestampWritesQuerySetId),
);
};
if query_set.device.as_info().id() != cmd_buf.device.as_info().id() {
return (
ComputePass::new(None, arc_desc),
Some(CommandEncoderError::WrongDeviceForTimestampWritesQuerySet),
);
}
Some(ArcComputePassTimestampWrites {
query_set,
beginning_of_pass_write_index: tw.beginning_of_pass_write_index,
end_of_pass_write_index: tw.end_of_pass_write_index,
})
} else {
None
};
(ComputePass::new(Some(cmd_buf), arc_desc), None)
}
Err(err) => (ComputePass::new(None, arc_desc), Some(err)),
}
}
@ -349,7 +412,7 @@ impl Global {
.take()
.ok_or(ComputePassErrorInner::PassEnded)
.map_pass_err(scope)?;
self.compute_pass_end_impl(parent, base, pass.timestamp_writes.as_ref())
self.compute_pass_end_impl(parent, base, pass.timestamp_writes.take())
}
#[doc(hidden)]
@ -360,11 +423,26 @@ impl Global {
timestamp_writes: Option<&ComputePassTimestampWrites>,
) -> Result<(), ComputePassError> {
let hub = A::hub(self);
let scope = PassErrorScope::PassEncoder(encoder_id);
let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id)
.map_pass_err(PassErrorScope::PassEncoder(encoder_id))?;
let cmd_buf = CommandBuffer::get_encoder(hub, encoder_id).map_pass_err(scope)?;
let commands = ComputeCommand::resolve_compute_command_ids(A::hub(self), &base.commands)?;
let timestamp_writes = if let Some(tw) = timestamp_writes {
Some(ArcComputePassTimestampWrites {
query_set: hub
.query_sets
.read()
.get_owned(tw.query_set)
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(tw.query_set))
.map_pass_err(scope)?,
beginning_of_pass_write_index: tw.beginning_of_pass_write_index,
end_of_pass_write_index: tw.end_of_pass_write_index,
})
} else {
None
};
self.compute_pass_end_impl::<A>(
&cmd_buf,
BasePass {
@ -382,13 +460,11 @@ impl Global {
&self,
cmd_buf: &CommandBuffer<A>,
base: BasePass<ArcComputeCommand<A>>,
timestamp_writes: Option<&ComputePassTimestampWrites>,
mut timestamp_writes: Option<ArcComputePassTimestampWrites<A>>,
) -> Result<(), ComputePassError> {
profiling::scope!("CommandEncoder::run_compute_pass");
let pass_scope = PassErrorScope::Pass(Some(cmd_buf.as_info().id()));
let hub = A::hub(self);
let device = &cmd_buf.device;
if !device.is_valid() {
return Err(ComputePassErrorInner::InvalidDevice(
@ -410,7 +486,13 @@ impl Global {
string_data: base.string_data.to_vec(),
push_constant_data: base.push_constant_data.to_vec(),
},
timestamp_writes: timestamp_writes.cloned(),
timestamp_writes: timestamp_writes
.as_ref()
.map(|tw| ComputePassTimestampWrites {
query_set: tw.query_set.as_info().id(),
beginning_of_pass_write_index: tw.beginning_of_pass_write_index,
end_of_pass_write_index: tw.end_of_pass_write_index,
}),
});
}
@ -428,8 +510,6 @@ impl Global {
*status = CommandEncoderStatus::Error;
let raw = encoder.open().map_pass_err(pass_scope)?;
let query_set_guard = hub.query_sets.read();
let mut state = State {
binder: Binder::new(),
pipeline: None,
@ -441,12 +521,19 @@ impl Global {
let mut string_offset = 0;
let mut active_query = None;
let timestamp_writes = if let Some(tw) = timestamp_writes {
let query_set: &resource::QuerySet<A> = tracker
.query_sets
.add_single(&*query_set_guard, tw.query_set)
.ok_or(ComputePassErrorInner::InvalidQuerySet(tw.query_set))
.map_pass_err(pass_scope)?;
let snatch_guard = device.snatchable_lock.read();
let indices = &device.tracker_indices;
tracker.buffers.set_size(indices.buffers.size());
tracker.textures.set_size(indices.textures.size());
tracker.bind_groups.set_size(indices.bind_groups.size());
tracker
.compute_pipelines
.set_size(indices.compute_pipelines.size());
tracker.query_sets.set_size(indices.query_sets.size());
let timestamp_writes = if let Some(tw) = timestamp_writes.take() {
let query_set = tracker.query_sets.insert_single(tw.query_set);
// Unlike in render passes we can't delay resetting the query sets since
// there is no auxiliary pass.
@ -476,17 +563,6 @@ impl Global {
None
};
let snatch_guard = device.snatchable_lock.read();
let indices = &device.tracker_indices;
tracker.buffers.set_size(indices.buffers.size());
tracker.textures.set_size(indices.textures.size());
tracker.bind_groups.set_size(indices.bind_groups.size());
tracker
.compute_pipelines
.set_size(indices.compute_pipelines.size());
tracker.query_sets.set_size(indices.query_sets.size());
let discard_hal_labels = self
.instance
.flags
@ -812,7 +888,6 @@ impl Global {
query_set,
query_index,
} => {
let query_set_id = query_set.as_info().id();
let scope = PassErrorScope::WriteTimestamp;
device
@ -822,22 +897,20 @@ impl Global {
let query_set = tracker.query_sets.insert_single(query_set);
query_set
.validate_and_write_timestamp(raw, query_set_id, query_index, None)
.validate_and_write_timestamp(raw, query_index, None)
.map_pass_err(scope)?;
}
ArcComputeCommand::BeginPipelineStatisticsQuery {
query_set,
query_index,
} => {
let query_set_id = query_set.as_info().id();
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let query_set = tracker.query_sets.insert_single(query_set);
query_set
.validate_and_begin_pipeline_statistics_query(
validate_and_begin_pipeline_statistics_query(
query_set.clone(),
raw,
query_set_id,
query_index,
None,
&mut active_query,
@ -846,9 +919,7 @@ impl Global {
}
ArcComputeCommand::EndPipelineStatisticsQuery => {
let scope = PassErrorScope::EndPipelineStatisticsQuery;
end_pipeline_statistics_query(raw, &*query_set_guard, &mut active_query)
.map_pass_err(scope)?;
end_pipeline_statistics_query(raw, &mut active_query).map_pass_err(scope)?;
}
}
}
@ -919,10 +990,13 @@ impl Global {
let bind_group = hub
.bind_groups
.read()
.get(bind_group_id)
.get_owned(bind_group_id)
.map_err(|_| ComputePassErrorInner::InvalidBindGroup(index))
.map_pass_err(scope)?
.clone();
.map_pass_err(scope)?;
if bind_group.device.as_info().id() != pass.device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands.push(ArcComputeCommand::SetBindGroup {
index,
@ -941,8 +1015,9 @@ impl Global {
let redundant = pass.current_pipeline.set_and_check_redundant(pipeline_id);
let scope = PassErrorScope::SetPipelineCompute(pipeline_id);
let base = pass.base_mut(scope)?;
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
if redundant {
// Do redundant early-out **after** checking whether the pass is ended or not.
return Ok(());
@ -952,10 +1027,13 @@ impl Global {
let pipeline = hub
.compute_pipelines
.read()
.get(pipeline_id)
.get_owned(pipeline_id)
.map_err(|_| ComputePassErrorInner::InvalidPipeline(pipeline_id))
.map_pass_err(scope)?
.clone();
.map_pass_err(scope)?;
if pipeline.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands.push(ArcComputeCommand::SetPipeline(pipeline));
@ -1030,15 +1108,19 @@ impl Global {
indirect: true,
pipeline: pass.current_pipeline.last_state,
};
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
let buffer = hub
.buffers
.read()
.get(buffer_id)
.get_owned(buffer_id)
.map_err(|_| ComputePassErrorInner::InvalidBuffer(buffer_id))
.map_pass_err(scope)?
.clone();
.map_pass_err(scope)?;
if buffer.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands
.push(ArcComputeCommand::<A>::DispatchIndirect { buffer, offset });
@ -1103,16 +1185,20 @@ impl Global {
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::WriteTimestamp;
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
let query_set = hub
.query_sets
.read()
.get(query_set_id)
.get_owned(query_set_id)
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?
.clone();
.map_pass_err(scope)?;
if query_set.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands.push(ArcComputeCommand::WriteTimestamp {
query_set,
@ -1129,16 +1215,20 @@ impl Global {
query_index: u32,
) -> Result<(), ComputePassError> {
let scope = PassErrorScope::BeginPipelineStatisticsQuery;
let device_id = pass.device_id;
let base = pass.base_mut(scope)?;
let hub = A::hub(self);
let query_set = hub
.query_sets
.read()
.get(query_set_id)
.get_owned(query_set_id)
.map_err(|_| ComputePassErrorInner::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?
.clone();
.map_pass_err(scope)?;
if query_set.device.as_info().id() != device_id {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
base.commands
.push(ArcComputeCommand::BeginPipelineStatisticsQuery {

Просмотреть файл

@ -633,6 +633,11 @@ pub enum CommandEncoderError {
Device(#[from] DeviceError),
#[error("Command encoder is locked by a previously created render/compute pass. Before recording any new commands, the pass must be ended.")]
Locked,
#[error("QuerySet provided for pass timestamp writes is invalid.")]
InvalidTimestampWritesQuerySetId,
#[error("QuerySet provided for pass timestamp writes that was created by a different device.")]
WrongDeviceForTimestampWritesQuerySet,
}
impl Global {

Просмотреть файл

@ -13,7 +13,7 @@ use crate::{
storage::Storage,
Epoch, FastHashMap, Index,
};
use std::{iter, marker::PhantomData};
use std::{iter, marker::PhantomData, sync::Arc};
use thiserror::Error;
use wgt::BufferAddress;
@ -185,15 +185,14 @@ pub enum ResolveError {
impl<A: HalApi> QuerySet<A> {
fn validate_query(
&self,
query_set_id: id::QuerySetId,
query_type: SimplifiedQueryType,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
) -> Result<&A::QuerySet, QueryUseError> {
) -> Result<(), QueryUseError> {
// We need to defer our resets because we are in a renderpass,
// add the usage to the reset map.
if let Some(reset) = reset_state {
let used = reset.use_query_set(query_set_id, self, query_index);
let used = reset.use_query_set(self.info.id(), self, query_index);
if used {
return Err(QueryUseError::UsedTwiceInsideRenderpass { query_index });
}
@ -214,133 +213,110 @@ impl<A: HalApi> QuerySet<A> {
});
}
Ok(self.raw())
Ok(())
}
pub(super) fn validate_and_write_timestamp(
&self,
raw_encoder: &mut A::CommandEncoder,
query_set_id: id::QuerySetId,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
let query_set = self.validate_query(
query_set_id,
SimplifiedQueryType::Timestamp,
query_index,
reset_state,
)?;
self.validate_query(SimplifiedQueryType::Timestamp, query_index, reset_state)?;
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
raw_encoder.reset_queries(self.raw(), query_index..(query_index + 1));
}
raw_encoder.write_timestamp(query_set, query_index);
}
Ok(())
}
pub(super) fn validate_and_begin_occlusion_query(
&self,
raw_encoder: &mut A::CommandEncoder,
query_set_id: id::QuerySetId,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
active_query: &mut Option<(id::QuerySetId, u32)>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
let query_set = self.validate_query(
query_set_id,
SimplifiedQueryType::Occlusion,
query_index,
reset_state,
)?;
if let Some((_old_id, old_idx)) = active_query.replace((query_set_id, query_index)) {
return Err(QueryUseError::AlreadyStarted {
active_query_index: old_idx,
new_query_index: query_index,
});
}
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
raw_encoder
.reset_queries(self.raw.as_ref().unwrap(), query_index..(query_index + 1));
}
raw_encoder.begin_query(query_set, query_index);
}
Ok(())
}
pub(super) fn validate_and_begin_pipeline_statistics_query(
&self,
raw_encoder: &mut A::CommandEncoder,
query_set_id: id::QuerySetId,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
active_query: &mut Option<(id::QuerySetId, u32)>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
let query_set = self.validate_query(
query_set_id,
SimplifiedQueryType::PipelineStatistics,
query_index,
reset_state,
)?;
if let Some((_old_id, old_idx)) = active_query.replace((query_set_id, query_index)) {
return Err(QueryUseError::AlreadyStarted {
active_query_index: old_idx,
new_query_index: query_index,
});
}
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
raw_encoder.reset_queries(self.raw(), query_index..(query_index + 1));
}
raw_encoder.begin_query(query_set, query_index);
raw_encoder.write_timestamp(self.raw(), query_index);
}
Ok(())
}
}
pub(super) fn validate_and_begin_occlusion_query<A: HalApi>(
query_set: Arc<QuerySet<A>>,
raw_encoder: &mut A::CommandEncoder,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
active_query: &mut Option<(Arc<QuerySet<A>>, u32)>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
query_set.validate_query(SimplifiedQueryType::Occlusion, query_index, reset_state)?;
if let Some((_old, old_idx)) = active_query.take() {
return Err(QueryUseError::AlreadyStarted {
active_query_index: old_idx,
new_query_index: query_index,
});
}
let (query_set, _) = &active_query.insert((query_set, query_index));
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
raw_encoder.reset_queries(query_set.raw(), query_index..(query_index + 1));
}
raw_encoder.begin_query(query_set.raw(), query_index);
}
Ok(())
}
pub(super) fn end_occlusion_query<A: HalApi>(
raw_encoder: &mut A::CommandEncoder,
storage: &Storage<QuerySet<A>>,
active_query: &mut Option<(id::QuerySetId, u32)>,
active_query: &mut Option<(Arc<QuerySet<A>>, u32)>,
) -> Result<(), QueryUseError> {
if let Some((query_set_id, query_index)) = active_query.take() {
// We can unwrap here as the validity was validated when the active query was set
let query_set = storage.get(query_set_id).unwrap();
if let Some((query_set, query_index)) = active_query.take() {
unsafe { raw_encoder.end_query(query_set.raw.as_ref().unwrap(), query_index) };
Ok(())
} else {
Err(QueryUseError::AlreadyStopped)
}
}
pub(super) fn validate_and_begin_pipeline_statistics_query<A: HalApi>(
query_set: Arc<QuerySet<A>>,
raw_encoder: &mut A::CommandEncoder,
query_index: u32,
reset_state: Option<&mut QueryResetMap<A>>,
active_query: &mut Option<(Arc<QuerySet<A>>, u32)>,
) -> Result<(), QueryUseError> {
let needs_reset = reset_state.is_none();
query_set.validate_query(
SimplifiedQueryType::PipelineStatistics,
query_index,
reset_state,
)?;
if let Some((_old, old_idx)) = active_query.take() {
return Err(QueryUseError::AlreadyStarted {
active_query_index: old_idx,
new_query_index: query_index,
});
}
let (query_set, _) = &active_query.insert((query_set, query_index));
unsafe {
// If we don't have a reset state tracker which can defer resets, we must reset now.
if needs_reset {
raw_encoder.reset_queries(query_set.raw(), query_index..(query_index + 1));
}
raw_encoder.begin_query(query_set.raw(), query_index);
}
Ok(())
}
pub(super) fn end_pipeline_statistics_query<A: HalApi>(
raw_encoder: &mut A::CommandEncoder,
storage: &Storage<QuerySet<A>>,
active_query: &mut Option<(id::QuerySetId, u32)>,
active_query: &mut Option<(Arc<QuerySet<A>>, u32)>,
) -> Result<(), QueryUseError> {
if let Some((query_set_id, query_index)) = active_query.take() {
// We can unwrap here as the validity was validated when the active query was set
let query_set = storage.get(query_set_id).unwrap();
if let Some((query_set, query_index)) = active_query.take() {
unsafe { raw_encoder.end_query(query_set.raw(), query_index) };
Ok(())
} else {
Err(QueryUseError::AlreadyStopped)
@ -384,7 +360,7 @@ impl Global {
.add_single(&*query_set_guard, query_set_id)
.ok_or(QueryError::InvalidQuerySet(query_set_id))?;
query_set.validate_and_write_timestamp(raw_encoder, query_set_id, query_index, None)?;
query_set.validate_and_write_timestamp(raw_encoder, query_index, None)?;
Ok(())
}

Просмотреть файл

@ -1,3 +1,6 @@
use crate::command::{
validate_and_begin_occlusion_query, validate_and_begin_pipeline_statistics_query,
};
use crate::resource::Resource;
use crate::snatch::SnatchGuard;
use crate::{
@ -2258,7 +2261,6 @@ impl Global {
query_set
.validate_and_write_timestamp(
raw,
query_set_id,
query_index,
Some(&mut cmd_buf_data.pending_query_resets),
)
@ -2278,10 +2280,9 @@ impl Global {
.ok_or(RenderCommandError::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
query_set
.validate_and_begin_occlusion_query(
validate_and_begin_occlusion_query(
query_set.clone(),
raw,
query_set_id,
query_index,
Some(&mut cmd_buf_data.pending_query_resets),
&mut active_query,
@ -2292,8 +2293,7 @@ impl Global {
api_log!("RenderPass::end_occlusion_query");
let scope = PassErrorScope::EndOcclusionQuery;
end_occlusion_query(raw, &*query_set_guard, &mut active_query)
.map_pass_err(scope)?;
end_occlusion_query(raw, &mut active_query).map_pass_err(scope)?;
}
RenderCommand::BeginPipelineStatisticsQuery {
query_set_id,
@ -2308,10 +2308,9 @@ impl Global {
.ok_or(RenderCommandError::InvalidQuerySet(query_set_id))
.map_pass_err(scope)?;
query_set
.validate_and_begin_pipeline_statistics_query(
validate_and_begin_pipeline_statistics_query(
query_set.clone(),
raw,
query_set_id,
query_index,
Some(&mut cmd_buf_data.pending_query_resets),
&mut active_query,
@ -2322,7 +2321,7 @@ impl Global {
api_log!("RenderPass::end_pipeline_statistics_query");
let scope = PassErrorScope::EndPipelineStatisticsQuery;
end_pipeline_statistics_query(raw, &*query_set_guard, &mut active_query)
end_pipeline_statistics_query(raw, &mut active_query)
.map_pass_err(scope)?;
}
RenderCommand::ExecuteBundle(bundle_id) => {

10
third_party/rust/wgpu-core/src/device/mod.rs поставляемый
Просмотреть файл

@ -491,6 +491,16 @@ pub fn create_validator(
Caps::SHADER_INT64,
features.contains(wgt::Features::SHADER_INT64),
);
caps.set(
Caps::SHADER_INT64_ATOMIC_MIN_MAX,
features.intersects(
wgt::Features::SHADER_INT64_ATOMIC_MIN_MAX | wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS,
),
);
caps.set(
Caps::SHADER_INT64_ATOMIC_ALL_OPS,
features.contains(wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS),
);
caps.set(
Caps::MULTISAMPLED_SHADING,
downlevel.contains(wgt::DownlevelFlags::MULTISAMPLED_SHADING),

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

4
third_party/rust/wgpu-hal/Cargo.toml поставляемый
Просмотреть файл

@ -56,7 +56,7 @@ arrayvec = "0.7"
bitflags = "2"
log = "0.4"
once_cell = "1.19.0"
parking_lot = ">=0.11,<0.13"
parking_lot = ">=0.11, <0.13"
raw-window-handle = "0.6"
rustc-hash = "1.1"
thiserror = "1"
@ -233,7 +233,7 @@ features = ["libloading"]
optional = true
[target."cfg(windows)".dependencies.glutin_wgl_sys]
version = "0.5"
version = "0.6"
optional = true
[target."cfg(windows)".dependencies.gpu-allocator]

19
third_party/rust/wgpu-hal/src/dx12/adapter.rs поставляемый
Просмотреть файл

@ -354,6 +354,25 @@ impl super::Adapter {
&& features1.WaveOps != 0,
);
let atomic_int64_on_typed_resource_supported = {
let mut features9: crate::dx12::types::D3D12_FEATURE_DATA_D3D12_OPTIONS9 =
unsafe { mem::zeroed() };
let hr = unsafe {
device.CheckFeatureSupport(
37, // D3D12_FEATURE_D3D12_OPTIONS9
&mut features9 as *mut _ as *mut _,
mem::size_of::<crate::dx12::types::D3D12_FEATURE_DATA_D3D12_OPTIONS9>() as _,
)
};
hr == 0
&& features9.AtomicInt64OnGroupSharedSupported != 0
&& features9.AtomicInt64OnTypedResourceSupported != 0
};
features.set(
wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS | wgt::Features::SHADER_INT64_ATOMIC_MIN_MAX,
atomic_int64_on_typed_resource_supported,
);
// float32-filterable should always be available on d3d12
features.set(wgt::Features::FLOAT32_FILTERABLE, true);

18
third_party/rust/wgpu-hal/src/dx12/types.rs поставляемый
Просмотреть файл

@ -42,6 +42,24 @@ winapi::STRUCT! {
}
}
winapi::ENUM! {
enum D3D12_WAVE_MMA_TIER {
D3D12_WAVE_MMA_TIER_NOT_SUPPORTED = 0,
D3D12_WAVE_MMA_TIER_1_0 = 10,
}
}
winapi::STRUCT! {
struct D3D12_FEATURE_DATA_D3D12_OPTIONS9 {
MeshShaderPipelineStatsSupported: winapi::shared::minwindef::BOOL,
MeshShaderSupportsFullRangeRenderTargetArrayIndex: winapi::shared::minwindef::BOOL,
AtomicInt64OnTypedResourceSupported: winapi::shared::minwindef::BOOL,
AtomicInt64OnGroupSharedSupported: winapi::shared::minwindef::BOOL,
DerivativesInMeshAndAmplificationShadersSupported: winapi::shared::minwindef::BOOL,
WaveMMATier: D3D12_WAVE_MMA_TIER,
}
}
winapi::ENUM! {
enum D3D_SHADER_MODEL {
D3D_SHADER_MODEL_NONE = 0,

Просмотреть файл

@ -821,6 +821,11 @@ impl super::PrivateCapabilities {
int64: family_check
&& (device.supports_family(MTLGPUFamily::Apple3)
|| device.supports_family(MTLGPUFamily::Metal3)),
// https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf#page=6
int64_atomics: family_check
&& ((device.supports_family(MTLGPUFamily::Apple8)
&& device.supports_family(MTLGPUFamily::Mac2))
|| device.supports_family(MTLGPUFamily::Apple9)),
}
}
@ -896,6 +901,10 @@ impl super::PrivateCapabilities {
F::SHADER_INT64,
self.int64 && self.msl_version >= MTLLanguageVersion::V2_3,
);
features.set(
F::SHADER_INT64_ATOMIC_MIN_MAX,
self.int64_atomics && self.msl_version >= MTLLanguageVersion::V2_4,
);
features.set(
F::ADDRESS_MODE_CLAMP_TO_BORDER,

1
third_party/rust/wgpu-hal/src/metal/mod.rs поставляемый
Просмотреть файл

@ -272,6 +272,7 @@ struct PrivateCapabilities {
timestamp_query_support: TimestampQuerySupport,
supports_simd_scoped_operations: bool,
int64: bool,
int64_atomics: bool,
}
#[derive(Clone, Debug)]

Просмотреть файл

@ -106,6 +106,9 @@ pub struct PhysicalDeviceFeatures {
zero_initialize_workgroup_memory:
Option<vk::PhysicalDeviceZeroInitializeWorkgroupMemoryFeatures<'static>>,
/// Features provided by `VK_KHR_shader_atomic_int64`, promoted to Vulkan 1.2.
shader_atomic_int64: Option<vk::PhysicalDeviceShaderAtomicInt64Features<'static>>,
/// Features provided by `VK_EXT_subgroup_size_control`, promoted to Vulkan 1.3.
subgroup_size_control: Option<vk::PhysicalDeviceSubgroupSizeControlFeatures<'static>>,
}
@ -151,6 +154,9 @@ impl PhysicalDeviceFeatures {
if let Some(ref mut feature) = self.ray_query {
info = info.push_next(feature);
}
if let Some(ref mut feature) = self.shader_atomic_int64 {
info = info.push_next(feature);
}
if let Some(ref mut feature) = self.subgroup_size_control {
info = info.push_next(feature);
}
@ -419,6 +425,19 @@ impl PhysicalDeviceFeatures {
} else {
None
},
shader_atomic_int64: if device_api_version >= vk::API_VERSION_1_2
|| enabled_extensions.contains(&khr::shader_atomic_int64::NAME)
{
Some(
vk::PhysicalDeviceShaderAtomicInt64Features::default()
.shader_buffer_int64_atomics(requested_features.intersects(
wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS
| wgt::Features::SHADER_INT64_ATOMIC_MIN_MAX,
)),
)
} else {
None
},
subgroup_size_control: if device_api_version >= vk::API_VERSION_1_3
|| enabled_extensions.contains(&ext::subgroup_size_control::NAME)
{
@ -559,6 +578,14 @@ impl PhysicalDeviceFeatures {
features.set(F::SHADER_INT64, self.core.shader_int64 != 0);
features.set(F::SHADER_I16, self.core.shader_int16 != 0);
if let Some(ref shader_atomic_int64) = self.shader_atomic_int64 {
features.set(
F::SHADER_INT64_ATOMIC_ALL_OPS | F::SHADER_INT64_ATOMIC_MIN_MAX,
shader_atomic_int64.shader_buffer_int64_atomics != 0
&& shader_atomic_int64.shader_shared_int64_atomics != 0,
);
}
//if caps.supports_extension(khr::sampler_mirror_clamp_to_edge::NAME) {
//if caps.supports_extension(ext::sampler_filter_minmax::NAME) {
features.set(
@ -964,6 +991,13 @@ impl PhysicalDeviceProperties {
extensions.push(ext::texture_compression_astc_hdr::NAME);
}
// Require `VK_KHR_shader_atomic_int64` if the associated feature was requested
if requested_features.intersects(
wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS | wgt::Features::SHADER_INT64_ATOMIC_MIN_MAX,
) {
extensions.push(khr::shader_atomic_int64::NAME);
}
extensions
}
@ -1681,6 +1715,13 @@ impl super::Adapter {
capabilities.push(spv::Capability::Int64);
}
if features.intersects(
wgt::Features::SHADER_INT64_ATOMIC_ALL_OPS
| wgt::Features::SHADER_INT64_ATOMIC_MIN_MAX,
) {
capabilities.push(spv::Capability::Int64Atomics);
}
let mut flags = spv::WriterFlags::empty();
flags.set(
spv::WriterFlags::DEBUG,

Просмотреть файл

@ -2246,7 +2246,10 @@ impl crate::Device for super::Device {
let vk_buffer_info = vk::BufferCreateInfo::default()
.size(desc.size)
.usage(vk::BufferUsageFlags::ACCELERATION_STRUCTURE_STORAGE_KHR)
.usage(
vk::BufferUsageFlags::ACCELERATION_STRUCTURE_STORAGE_KHR
| vk::BufferUsageFlags::SHADER_DEVICE_ADDRESS,
)
.sharing_mode(vk::SharingMode::EXCLUSIVE);
unsafe {

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"d8f88446d6c1740116442320eca91e06ce9a2f4713179195c1be44e8ab1fc42d","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"19d250e0354a4243d5d58673fbece59a052e6a2a217dc27eb7c8c4ed067d25c0","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}
{"files":{"Cargo.toml":"d8f88446d6c1740116442320eca91e06ce9a2f4713179195c1be44e8ab1fc42d","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"5269a18acfd02da3fedcb8d96c681dc5dae651c5f32758aa2676c3bc79e5d2d9","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null}

17
third_party/rust/wgpu-types/src/lib.rs поставляемый
Просмотреть файл

@ -923,6 +923,23 @@ bitflags::bitflags! {
/// - DX12
/// - Metal
const PIPELINE_CACHE = 1 << 59;
/// Allows shaders to use i64 and u64 atomic min and max.
///
/// Supported platforms:
/// - Vulkan (with VK_KHR_shader_atomic_int64)
/// - DX12 (with SM 6.6+)
/// - Metal (with MSL 2.4+)
///
/// This is a native only feature.
const SHADER_INT64_ATOMIC_MIN_MAX = 1 << 60;
/// Allows shaders to use all i64 and u64 atomic operations.
///
/// Supported platforms:
/// - Vulkan (with VK_KHR_shader_atomic_int64)
/// - DX12 (with SM 6.6+)
///
/// This is a native only feature.
const SHADER_INT64_ATOMIC_ALL_OPS = 1 << 61;
}
}