Backed out changeset 9e97159bb402 (bug 1622846) for causing bp-hybrid bustages on WebGPUChild.cpp CLOSED TREE

This commit is contained in:
Csoregi Natalia 2021-11-29 21:30:42 +02:00
Родитель 8f8186eee3
Коммит 9c8715f550
347 изменённых файлов: 45021 добавлений и 26871 удалений

Просмотреть файл

@ -84,12 +84,12 @@ rev = "302c995f91f44cf26e77dc4758ad56c3ff0153ad"
[source."https://github.com/gfx-rs/wgpu"]
git = "https://github.com/gfx-rs/wgpu"
replace-with = "vendored-sources"
rev = "5f6c067"
rev = "37288a6"
[source."https://github.com/gfx-rs/naga"]
git = "https://github.com/gfx-rs/naga"
replace-with = "vendored-sources"
rev = "29571cc"
rev = "e226cf3"
[source."https://github.com/bytecodealliance/wasmtime"]
git = "https://github.com/bytecodealliance/wasmtime"

57
Cargo.lock сгенерированный
Просмотреть файл

@ -1433,6 +1433,12 @@ dependencies = [
"winapi",
]
[[package]]
name = "fixedbitset"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e"
[[package]]
name = "flate2"
version = "1.0.20"
@ -2368,7 +2374,6 @@ checksum = "824845a0bf897a9042383849b02c1bc219c2383772efcd5c6f9766fa4b81aef3"
dependencies = [
"autocfg",
"hashbrown",
"serde",
]
[[package]]
@ -3225,17 +3230,17 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
[[package]]
name = "naga"
version = "0.7.1"
source = "git+https://github.com/gfx-rs/naga?rev=29571cc#29571cc4cfbb28558948b1b31ad764f55b69f37b"
version = "0.6.0"
source = "git+https://github.com/gfx-rs/naga?rev=e226cf3#e226cf3f1d67d0bf91587e392879f0aee109ec42"
dependencies = [
"bit-set",
"bitflags",
"codespan-reporting",
"fxhash",
"hexf-parse",
"indexmap",
"log",
"num-traits",
"rustc-hash",
"petgraph",
"serde",
"spirv",
"thiserror",
@ -3655,6 +3660,16 @@ version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e"
[[package]]
name = "petgraph"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f"
dependencies = [
"fixedbitset",
"indexmap",
]
[[package]]
name = "phf"
version = "0.8.0"
@ -4191,17 +4206,6 @@ dependencies = [
"serde",
]
[[package]]
name = "ron"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b861ecaade43ac97886a512b360d01d66be9f41f3c61088b42cedf92e03d678"
dependencies = [
"base64 0.13.0",
"bitflags",
"serde",
]
[[package]]
name = "rsclientcerts"
version = "0.1.0"
@ -5583,7 +5587,7 @@ dependencies = [
"objc",
"plane-split",
"rayon",
"ron 0.6.5",
"ron",
"serde",
"smallvec",
"svg_fmt",
@ -5666,20 +5670,19 @@ dependencies = [
[[package]]
name = "wgpu-core"
version = "0.11.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=5f6c067#5f6c06781534dc5ecd54e1a1cf89ece98b46f49f"
version = "0.10.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=37288a6#37288a657f022ce508f3b1c92eb6cdbe20f4cca9"
dependencies = [
"arrayvec 0.7.1",
"bitflags",
"cfg_aliases",
"codespan-reporting",
"copyless",
"fxhash",
"log",
"naga",
"parking_lot",
"profiling",
"ron 0.7.0",
"ron",
"serde",
"smallvec",
"thiserror",
@ -5689,8 +5692,8 @@ dependencies = [
[[package]]
name = "wgpu-hal"
version = "0.11.2"
source = "git+https://github.com/gfx-rs/wgpu?rev=5f6c067#5f6c06781534dc5ecd54e1a1cf89ece98b46f49f"
version = "0.10.1"
source = "git+https://github.com/gfx-rs/wgpu?rev=37288a6#37288a657f022ce508f3b1c92eb6cdbe20f4cca9"
dependencies = [
"arrayvec 0.7.1",
"ash",
@ -5705,7 +5708,6 @@ dependencies = [
"gpu-alloc",
"gpu-descriptor",
"inplace_it",
"js-sys",
"khronos-egl",
"libloading 0.7.0",
"log",
@ -5713,21 +5715,18 @@ dependencies = [
"naga",
"objc",
"parking_lot",
"profiling",
"range-alloc",
"raw-window-handle",
"renderdoc-sys",
"thiserror",
"wasm-bindgen",
"web-sys",
"wgpu-types",
"winapi",
]
[[package]]
name = "wgpu-types"
version = "0.11.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=5f6c067#5f6c06781534dc5ecd54e1a1cf89ece98b46f49f"
version = "0.10.0"
source = "git+https://github.com/gfx-rs/wgpu?rev=37288a6#37288a657f022ce508f3b1c92eb6cdbe20f4cca9"
dependencies = [
"bitflags",
"bitflags_serde_shim",

Просмотреть файл

@ -268,10 +268,6 @@ already_AddRefed<Texture> Device::InitSwapChain(
return CreateTexture(desc);
}
bool Device::CheckNewWarning(const nsACString& aMessage) {
return mKnownWarnings.EnsureInserted(aMessage);
}
void Device::Destroy() {
// TODO
}

Просмотреть файл

@ -7,7 +7,6 @@
#define GPU_DEVICE_H_
#include "ObjectModel.h"
#include "nsTHashSet.h"
#include "mozilla/MozPromise.h"
#include "mozilla/RefPtr.h"
#include "mozilla/webgpu/WebGPUTypes.h"
@ -94,7 +93,6 @@ class Device final : public DOMEventTargetHelper {
const dom::GPUCanvasConfiguration& aDesc,
wr::ExternalImageId aExternalImageId, gfx::SurfaceFormat aFormat,
gfx::IntSize* aDefaultSize);
bool CheckNewWarning(const nsACString& aMessage);
private:
~Device();
@ -104,7 +102,6 @@ class Device final : public DOMEventTargetHelper {
bool mValid = true;
nsString mLabel;
RefPtr<Queue> mQueue;
nsTHashSet<nsCString> mKnownWarnings;
public:
void GetLabel(nsAString& aValue) const;

Просмотреть файл

@ -13,8 +13,6 @@
#include "mozilla/webgpu/ffi/wgpu.h"
#include "Sampler.h"
const int MAX_KNOWN_WARNINGS = 10;
namespace mozilla {
namespace webgpu {
@ -832,18 +830,15 @@ ipc::IPCResult WebGPUChild::RecvDeviceUncapturedError(
} else {
auto* target = targetIter->second;
MOZ_ASSERT(target);
// We don't want to spam the errors to the console indefinitely
if (target->CheckNewWarning(aMessage)) {
JsWarning(target->GetOwnerGlobal(), aMessage);
JsWarning(target->GetOwnerGlobal(), aMessage);
dom::GPUUncapturedErrorEventInit init;
init.mError.SetAsGPUValidationError() =
new ValidationError(target, aMessage);
RefPtr<mozilla::dom::GPUUncapturedErrorEvent> event =
dom::GPUUncapturedErrorEvent::Constructor(
target, u"uncapturederror"_ns, init);
target->DispatchEvent(*event);
}
dom::GPUUncapturedErrorEventInit init;
init.mError.SetAsGPUValidationError() =
new ValidationError(target, aMessage);
RefPtr<mozilla::dom::GPUUncapturedErrorEvent> event =
dom::GPUUncapturedErrorEvent::Constructor(target, u"uncapturederror"_ns,
init);
target->DispatchEvent(*event);
}
return IPC_OK();
}

Просмотреть файл

@ -362,8 +362,8 @@ typedef [EnforceRange] unsigned long GPUTextureUsageFlags;
interface GPUTextureUsage {
const GPUTextureUsageFlags COPY_SRC = 0x01;
const GPUTextureUsageFlags COPY_DST = 0x02;
const GPUTextureUsageFlags TEXTURE_BINDING = 0x04;
const GPUTextureUsageFlags STORAGE_BINDING = 0x08;
const GPUTextureUsageFlags SAMPLED = 0x04;
const GPUTextureUsageFlags STORAGE = 0x08;
const GPUTextureUsageFlags RENDER_ATTACHMENT = 0x10;
};

Просмотреть файл

@ -17,20 +17,18 @@ default = []
[dependencies.wgc]
package = "wgpu-core"
git = "https://github.com/gfx-rs/wgpu"
rev = "5f6c067"
#Note: "replay" shouldn't ideally be needed,
# but it allows us to serialize everything across IPC.
rev = "37288a6"
features = ["replay", "trace", "serial-pass"]
[dependencies.wgt]
package = "wgpu-types"
git = "https://github.com/gfx-rs/wgpu"
rev = "5f6c067"
rev = "37288a6"
[dependencies.wgh]
package = "wgpu-hal"
git = "https://github.com/gfx-rs/wgpu"
rev = "5f6c067"
rev = "37288a6"
[dependencies]
bincode = "1"

Просмотреть файл

@ -134,7 +134,7 @@ pub struct PrimitiveState<'a> {
front_face: wgt::FrontFace,
cull_mode: Option<&'a wgt::Face>,
polygon_mode: wgt::PolygonMode,
unclipped_depth: bool,
clamp_depth: bool,
}
impl PrimitiveState<'_> {
@ -145,7 +145,7 @@ impl PrimitiveState<'_> {
front_face: self.front_face.clone(),
cull_mode: self.cull_mode.cloned(),
polygon_mode: self.polygon_mode,
unclipped_depth: self.unclipped_depth,
clamp_depth: self.clamp_depth,
conservative: false,
}
}
@ -628,7 +628,6 @@ pub extern "C" fn wgpu_device_create_render_bundle_encoder(
stencil_read_only: false,
}),
sample_count: desc.sample_count,
multiview: None,
};
match wgc::command::RenderBundleEncoder::new(&descriptor, device_id, None) {
Ok(encoder) => Box::into_raw(Box::new(encoder)),
@ -778,15 +777,10 @@ pub unsafe extern "C" fn wgpu_client_create_bind_group_layout(
has_dynamic_offset: entry.has_dynamic_offset,
min_binding_size: entry.min_binding_size,
},
RawBindingType::Sampler => wgt::BindingType::Sampler(
if entry.sampler_compare {
wgt::SamplerBindingType::Comparison
} else if entry.sampler_filter {
wgt::SamplerBindingType::Filtering
} else {
wgt::SamplerBindingType::NonFiltering
}
),
RawBindingType::Sampler => wgt::BindingType::Sampler {
comparison: entry.sampler_compare,
filtering: entry.sampler_filter,
},
RawBindingType::SampledTexture => wgt::BindingType::Texture {
//TODO: the spec has a bug here
view_dimension: *entry
@ -921,7 +915,6 @@ pub unsafe extern "C" fn wgpu_client_create_shader_module(
std::str::from_utf8_unchecked(std::slice::from_raw_parts(desc.code, desc.code_length));
let desc = wgc::pipeline::ShaderModuleDescriptor {
label: cow_label(&desc.label),
shader_bound_checks: wgt::ShaderBoundChecks::new(),
};
let action = DeviceAction::CreateShaderModule(id, desc, Cow::Borrowed(code));
@ -986,7 +979,6 @@ pub unsafe extern "C" fn wgpu_client_create_render_pipeline(
primitive: desc.primitive.to_wgpu(),
depth_stencil: desc.depth_stencil.cloned(),
multisample: desc.multisample.clone(),
multiview: None,
};
let implicit = match desc.layout {

Просмотреть файл

@ -448,8 +448,8 @@ impl GlobalExt for Global {
error_buf.init(err);
}
}
CommandEncoderAction::FillBuffer { dst, offset, size } => {
if let Err(err) = self.command_encoder_fill_buffer::<A>(self_id, dst, offset, size)
CommandEncoderAction::ClearBuffer { dst, offset, size } => {
if let Err(err) = self.command_encoder_clear_buffer::<A>(self_id, dst, offset, size)
{
error_buf.init(err);
}

1
third_party/rust/fixedbitset/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"bcacef1787d5859f9f7b01a07cba08ca89f296ccca569fcb01b1ee67a8d1d90b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ce592787ff2321feab698a4c612237f4378cc658ebb1d472913e5802cc47afb4","README.rst":"ea76fe055c46ccb817d87fc68b2dcff2540e8e0a7823a7a819fe8d9a8fdff27a","benches/benches.rs":"745803c7962409ba8a63635336ca5f6b971ef1dc8f46e2cdee2a8a0c6b86e9a9","src/lib.rs":"9b6462707f38ee2c21fed5d2701102ae6c06437429858dd138a42dd163885e0d","src/range.rs":"6c9fd2462e353221dcf63393a78783428995a9460de3e4c799bd00a273dda9d8"},"package":"398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e"}

35
third_party/rust/fixedbitset/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "fixedbitset"
version = "0.4.0"
authors = ["bluss"]
description = "FixedBitSet is a simple bitset collection"
documentation = "https://docs.rs/fixedbitset/"
keywords = ["container", "data-structure", "bitvec", "bitset", "no_std"]
categories = ["data-structures"]
license = "MIT/Apache-2.0"
repository = "https://github.com/bluss/fixedbitset"
[package.metadata.release]
no-dev-version = true
tag-name = "{{version}}"
[dependencies.serde]
version = "1.0"
features = ["derive"]
optional = true
[dev-dependencies.serde_json]
version = "1.0"
[features]
default = ["std"]
std = []

Просмотреть файл

Просмотреть файл

@ -1,4 +1,4 @@
Copyright (c) 2017 RON developers
Copyright (c) 2015-2017
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

118
third_party/rust/fixedbitset/README.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,118 @@
fixedbitset
===========
A simple bitset container for Rust
Please read the `API documentation here`__
__ https://docs.rs/fixedbitset/
|build_status|_ |crates|_
.. |build_status| image:: https://github.com/petgraph/fixedbitset/workflows/Continuous%20integration/badge.svg?branch=master
.. _build_status: https://github.com/petgraph/fixedbitset/actions
.. |crates| image:: http://meritbadge.herokuapp.com/fixedbitset
.. _crates: https://crates.io/crates/fixedbitset
Recent Changes
--------------
- 0.4.0
+ `#61`_: Require Rust 1.39.
+ `#60`_: Add `const` `FixedBitSet::new` consructor by @jakobhellermann.
+ `#59`_: Add optional `serde` support by @keshavsn.
- 0.3.2
+ `#18`_: Optimize `ones` using `trailing_zeroes` by @vks
- 0.3.1
+ Add bit assign operators for references by @flaghacker
+ Improve assertion error messages by @lovasoa
+ Add documentation examples for ``with_capacity_and_blocks``
- 0.3.0
+ Add ``with_capacity_and_blocks`` by @luizirber
+ Add ``difference_with`` by @sunshowers
+ Implement ``Binary`` and ``Display`` traits by @Dolphindalt
+ Add ``toggle_range`` by @wirelyre
- 0.2.0
+ Add assign operators for the bit operations by @jrraymond
+ Add ``symmetric_difference``, ``union_with``, ``intersection_with`` by @jrraymond
+ Add ``is_subset``, ``is_superset``, ``is_disjoint`` by @nwn
+ Add ``.toggle(i)`` method by @ShiroUsagi-san
+ Add default feature "std" which can be disabled to make the crate not
link the std library. By @jonimake and @bluss
+ Require Rust 1.31.
- 0.1.9
+ Add intersection, union, difference iterators by @jrraymond
+ Add intersection: ``&`` and union: ``|`` operator implementations by @jrraymond
+ Add Extend and FromIterator implementations (from sequences of bit indices)
by @jrraymond
- 0.1.8
+ Add missing ``#[inline]`` on the ones iterator
+ Fix docs for ``insert_range, set_range``
- 0.1.7
+ Add fast methods ``.insert_range``, ``.set_range`` by @kennytm
- 0.1.6
+ Add iterator ``.ones()`` by @mneumann
+ Fix bug with ``.count_ones()`` where it would erronously have an
out-of-bounds panic for even block endpoints
- 0.1.5
+ Add method ``.count_ones(range)``.
- 0.1.4
+ Remove an assertion in ``.copy_bit(from, to)`` so that it is in line
with the documentation. The ``from`` bit does not need to be in bounds.
+ Improve ``.grow()`` to use ``Vec::resize`` internally.
- 0.1.3
+ Add method ``.put()`` to enable a bit and return previous value
- 0.1.2
+ Add method ``.copy_bit()`` (by fuine)
+ impl Default
- 0.1.1
+ Update documentation URL
- 0.1.0
+ Add method ``.grow()``
License
-------
Dual-licensed to be compatible with the Rust project.
Licensed under the Apache License, Version 2.0
http://www.apache.org/licenses/LICENSE-2.0 or the MIT license
http://opensource.org/licenses/MIT, at your
option. This file may not be copied, modified, or distributed
except according to those terms.
.. _#18: https://github.com/petgraph/fixedbitset/pull/18
.. _#59: https://github.com/petgraph/fixedbitset/pull/59
.. _#60: https://github.com/petgraph/fixedbitset/pull/60
.. _#61: https://github.com/petgraph/fixedbitset/pull/61

133
third_party/rust/fixedbitset/benches/benches.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,133 @@
#![feature(test)]
extern crate test;
extern crate fixedbitset;
use test::Bencher;
use fixedbitset::{FixedBitSet};
use std::mem::size_of;
#[inline]
fn iter_ones_using_contains<F: FnMut(usize)>(fb: &FixedBitSet, f: &mut F) {
for bit in 0 .. fb.len() {
if fb.contains(bit) {
f(bit);
}
}
}
#[inline]
fn iter_ones_using_slice_directly<F: FnMut(usize)>(fb: &FixedBitSet, f: &mut F) {
for (block_idx, &block) in fb.as_slice().iter().enumerate() {
let mut bit_pos = block_idx * size_of::<u32>() * 8;
let mut block: u32 = block;
while block != 0 {
if (block & 1) == 1 {
f(bit_pos);
}
block = block >> 1;
bit_pos += 1;
}
}
}
#[bench]
fn bench_iter_ones_using_contains_all_zeros(b: &mut Bencher) {
const N: usize = 1_000_000;
let fb = FixedBitSet::with_capacity(N);
b.iter(|| {
let mut count = 0;
iter_ones_using_contains(&fb, &mut |_bit| count += 1);
count
});
}
#[bench]
fn bench_iter_ones_using_contains_all_ones(b: &mut Bencher) {
const N: usize = 1_000_000;
let mut fb = FixedBitSet::with_capacity(N);
fb.insert_range(..);
b.iter(|| {
let mut count = 0;
iter_ones_using_contains(&fb, &mut |_bit| count += 1);
count
});
}
#[bench]
fn bench_iter_ones_using_slice_directly_all_zero(b: &mut Bencher) {
const N: usize = 1_000_000;
let fb = FixedBitSet::with_capacity(N);
b.iter(|| {
let mut count = 0;
iter_ones_using_slice_directly(&fb, &mut |_bit| count += 1);
count
});
}
#[bench]
fn bench_iter_ones_using_slice_directly_all_ones(b: &mut Bencher) {
const N: usize = 1_000_000;
let mut fb = FixedBitSet::with_capacity(N);
fb.insert_range(..);
b.iter(|| {
let mut count = 0;
iter_ones_using_slice_directly(&fb, &mut |_bit| count += 1);
count
});
}
#[bench]
fn bench_iter_ones_all_zeros(b: &mut Bencher) {
const N: usize = 1_000_000;
let fb = FixedBitSet::with_capacity(N);
b.iter(|| {
let mut count = 0;
for _ in fb.ones() {
count += 1;
}
count
});
}
#[bench]
fn bench_iter_ones_all_ones(b: &mut Bencher) {
const N: usize = 1_000_000;
let mut fb = FixedBitSet::with_capacity(N);
fb.insert_range(..);
b.iter(|| {
let mut count = 0;
for _ in fb.ones() {
count += 1;
}
count
});
}
#[bench]
fn bench_insert_range(b: &mut Bencher) {
const N: usize = 1_000_000;
let mut fb = FixedBitSet::with_capacity(N);
b.iter(|| {
fb.insert_range(..)
});
}
#[bench]
fn bench_insert_range_using_loop(b: &mut Bencher) {
const N: usize = 1_000_000;
let mut fb = FixedBitSet::with_capacity(N);
b.iter(|| {
for i in 0..N {
fb.insert(i);
}
});
}

1603
third_party/rust/fixedbitset/src/lib.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

39
third_party/rust/fixedbitset/src/range.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,39 @@
use std::ops::{
RangeFull,
RangeFrom,
RangeTo,
Range,
};
// Taken from https://github.com/bluss/odds/blob/master/src/range.rs.
/// **IndexRange** is implemented by Rust's built-in range types, produced
/// by range syntax like `..`, `a..`, `..b` or `c..d`.
pub trait IndexRange<T=usize> {
#[inline]
/// Start index (inclusive)
fn start(&self) -> Option<T> { None }
#[inline]
/// End index (exclusive)
fn end(&self) -> Option<T> { None }
}
impl<T> IndexRange<T> for RangeFull {}
impl<T: Copy> IndexRange<T> for RangeFrom<T> {
#[inline]
fn start(&self) -> Option<T> { Some(self.start) }
}
impl<T: Copy> IndexRange<T> for RangeTo<T> {
#[inline]
fn end(&self) -> Option<T> { Some(self.end) }
}
impl<T: Copy> IndexRange<T> for Range<T> {
#[inline]
fn start(&self) -> Option<T> { Some(self.start) }
#[inline]
fn end(&self) -> Option<T> { Some(self.end) }
}

2
third_party/rust/naga/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Просмотреть файл

@ -26,7 +26,6 @@ jobs:
with:
name: code-coverage-report
path: cobertura.xml
parse-dota2:
name: Parse Dota2 shaders
runs-on: ubuntu-latest
@ -45,7 +44,6 @@ jobs:
run: cargo build --release --bin naga
- name: Convert shaders
run: for file in data/*.spv ; do echo "Translating" ${file} && target/release/naga --validate 27 ${file} ${file}.metal; done
parse-vulkan-tutorial-shaders:
name: Parse Sascha Willems Vulkan tutorial shaders
runs-on: ubuntu-latest
@ -78,7 +76,6 @@ jobs:
echo "Result: $(expr $FILE_COUNT - $SUCCESS_RESULT_COUNT) / $FILE_COUNT" > counter
done
cat counter
dneto0_spirv-samples:
name: Parse dneto0 spirv-samples
runs-on: ubuntu-latest
@ -152,25 +149,3 @@ jobs:
echo "Result: $(expr $FILE_COUNT - $SUCCESS_RESULT_COUNT) / $FILE_COUNT" > counter
done
cat counter
check-snapshots-extra:
name: Check snapshots (validated or not)
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
- uses: actions-rs/cargo@v1
name: Test minimal (without span)
with:
command: test
args: --features validate -p naga
- uses: actions-rs/cargo@v1
name: Test all (without validation)
with:
command: test
args: --features wgsl-in,wgsl-out,glsl-in,glsl-out,spv-in,spv-out,msl-out,hlsl-out,dot-out --workspace
- name: Check snapshots (without validation)
run: git diff --exit-code -- tests/out

Просмотреть файл

@ -71,17 +71,3 @@ jobs:
with:
command: doc
args: -p naga --all-features --document-private-items
fmt:
name: Format
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: stable
override: true
components: rustfmt
- name: run rustfmt
run: |
cargo fmt -- --check

Просмотреть файл

@ -6,7 +6,6 @@ on:
- 'tests/out/glsl/*.glsl'
- 'tests/out/dot/*.dot'
- 'tests/out/wgsl/*.wgsl'
- 'src/front/wgsl/*'
jobs:
validate-linux:

7
third_party/rust/naga/.monocodus поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
version: 1.1.0
rust:
formatter:
name: rustfmt
repo_checkers:
- name: rust-clippy

75
third_party/rust/naga/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,80 +1,5 @@
# Change Log
## v0.7.1 (2021-10-12)
- implement casts from and to booleans in the backends
## v0.7 (2021-10-07)
- development release for wgpu-0.11
- API:
- bit extraction and packing functions
- hyperbolic trigonometry functionss
- validation is gated by a cargo feature
- `view_index` builtin
- separate bounds checking policies for locals/buffers/textures
- IR:
- types and constants are guaranteed to be unique
- WGSL-in:
- new hex literal parser
- updated list of reserved words
- rewritten logic for resolving references and pointers
- `switch` can use unsigned selectors
- GLSL-in:
- better support for texture sampling
- better logic for auto-splatting scalars
- GLSL-out:
- fixed storage buffer layout
- fix module operator
- HLSL-out:
- fixed texture queries
- SPV-in:
- control flow handling is rewritten from scratch
- SPV-out:
- fully covered out-of-bounds checking
- option to emit point size
- option to clamp output depth
### v0.6.3 (2021-09-08)
- Reduced heap allocations when generating WGSL, HLSL, and GLSL
- WGSL-in:
- support module-scope `let` type inference
- SPV-in:
- fix depth sampling with projection
- HLSL-out:
- fix local struct construction
- GLSL-out:
- fix `select()` order
- SPV-out:
- allow working around Adreno issue with `OpName`
### v0.6.2 (2021-09-01)
- SPV-out fixes:
- requested capabilities for 1D and cube images, storage formats
- handling `break` and `continue` in a `switch` statement
- avoid generating duplicate `OpTypeImage` types
- HLSL-out fixes:
- fix output struct member names
- MSL-out fixes:
- fix packing of fields in interface structs
- GLSL-out fixes:
- fix non-fallthrough `switch` cases
- GLSL-in fixes:
- avoid infinite loop on invalid statements
### v0.6.1 (2021-08-24)
- HLSL-out fixes:
- array arguments
- pointers to array arguments
- switch statement
- rewritten interface matching
- SPV-in fixes:
- array storage texture stores
- tracking sampling across function parameters
- updated petgraph dependencies
- MSL-out:
- gradient sampling
- GLSL-out:
- modulo operator on floats
## v0.6 (2021-08-18)
- development release for wgpu-0.10
- API:

23
third_party/rust/naga/Cargo.toml поставляемый
Просмотреть файл

@ -1,11 +1,11 @@
[package]
name = "naga"
version = "0.7.1"
version = "0.6.0"
authors = ["Naga Developers"]
edition = "2018"
description = "Shader translation infrastructure"
homepage = "https://github.com/gfx-rs/naga"
repository = "https://github.com/gfx-rs/naga/tree/v0.7"
repository = "https://github.com/gfx-rs/naga/tree/v0.5"
keywords = ["shader", "SPIR-V", "GLSL", "MSL"]
license = "MIT OR Apache-2.0"
exclude = ["bin/**/*", "tests/**/*", "Cargo.lock", "target/**/*"]
@ -19,14 +19,14 @@ all-features = true
bitflags = "1"
bit-set = "0.5"
codespan-reporting = { version = "0.11.0", optional = true }
rustc-hash = "1.1.0"
indexmap = "1.6" # 1.7 has MSRV 1.49
fxhash = "0.2"
log = "0.4"
num-traits = "0.2"
spirv = { version = "0.2", optional = true }
thiserror = "1.0.21"
serde = { version = "1.0", features = ["derive"], optional = true }
petgraph = { version ="0.6", optional = true }
rose_tree = { version ="0.3", optional = true }
pp-rs = { version = "0.2.1", optional = true }
hexf-parse = { version = "0.2.1", optional = true }
@ -35,25 +35,24 @@ default = []
dot-out = []
glsl-in = ["pp-rs"]
glsl-validate = []
glsl-out = []
glsl-out = ["petgraph"]
msl-out = []
serialize = ["serde", "indexmap/serde-1"]
deserialize = ["serde", "indexmap/serde-1"]
spv-in = ["petgraph", "spirv"]
serialize = ["serde"]
deserialize = ["serde"]
spv-in = ["petgraph", "spirv", "rose_tree"]
spv-out = ["spirv"]
wgsl-in = ["codespan-reporting", "hexf-parse"]
wgsl-out = []
hlsl-out = []
span = ["codespan-reporting"]
validate = []
[dev-dependencies]
diff = "0.1"
ron = "0.7"
ron = "0.6"
serde = { version = "1.0", features = ["derive"] }
spirv = { version = "0.2", features = ["deserialize"] }
rspirv = "0.11"
env_logger = "0.9"
rspirv = "0.10"
env_logger = "0.8"
[workspace]
members = [".", "cli"]

263
third_party/rust/naga/src/arena.rs поставляемый
Просмотреть файл

@ -6,11 +6,8 @@ use std::{cmp::Ordering, fmt, hash, marker::PhantomData, num::NonZeroU32, ops};
type Index = NonZeroU32;
use crate::Span;
use indexmap::set::IndexSet;
/// A strongly typed reference to an arena item.
///
/// A `Handle` value can be used as an index into an [`Arena`] or [`UniqueArena`].
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(
@ -31,35 +28,28 @@ impl<T> Clone for Handle<T> {
}
}
}
impl<T> Copy for Handle<T> {}
impl<T> PartialEq for Handle<T> {
fn eq(&self, other: &Self) -> bool {
self.index == other.index
}
}
impl<T> Eq for Handle<T> {}
impl<T> PartialOrd for Handle<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
self.index.partial_cmp(&other.index)
}
}
impl<T> Ord for Handle<T> {
fn cmp(&self, other: &Self) -> Ordering {
self.index.cmp(&other.index)
}
}
impl<T> fmt::Debug for Handle<T> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "[{}]", self.index)
}
}
impl<T> hash::Hash for Handle<T> {
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
self.index.hash(hasher)
@ -85,22 +75,6 @@ impl<T> Handle<T> {
let index = self.index.get() - 1;
index as usize
}
/// Convert a `usize` index into a `Handle<T>`.
fn from_usize(index: usize) -> Self {
use std::convert::TryFrom;
let handle_index = u32::try_from(index + 1)
.ok()
.and_then(Index::new)
.expect("Failed to insert into UniqueArena. Handle overflows");
Handle::new(handle_index)
}
/// Convert a `usize` index into a `Handle<T>`, without range checks.
unsafe fn from_usize_unchecked(index: usize) -> Self {
Handle::new(Index::new_unchecked((index + 1) as u32))
}
}
/// A strongly typed range of handles.
@ -124,13 +98,11 @@ impl<T> Clone for Range<T> {
}
}
}
impl<T> fmt::Debug for Range<T> {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
write!(formatter, "[{}..{}]", self.inner.start + 1, self.inner.end)
}
}
impl<T> Iterator for Range<T> {
type Item = Handle<T>;
fn next(&mut self) -> Option<Self::Item> {
@ -168,7 +140,6 @@ impl<T> Default for Arena<T> {
Self::new()
}
}
impl<T: fmt::Debug> fmt::Debug for Arena<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
@ -203,30 +174,34 @@ impl<T> Arena<T> {
/// Returns an iterator over the items stored in this arena, returning both
/// the item's handle and a reference to it.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (Handle<T>, &T)> {
self.data
.iter()
.enumerate()
.map(|(i, v)| unsafe { (Handle::from_usize_unchecked(i), v) })
self.data.iter().enumerate().map(|(i, v)| {
let position = i + 1;
let index = unsafe { Index::new_unchecked(position as u32) };
(Handle::new(index), v)
})
}
/// Returns a iterator over the items stored in this arena,
/// returning both the item's handle and a mutable reference to it.
pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = (Handle<T>, &mut T)> {
self.data
.iter_mut()
.enumerate()
.map(|(i, v)| unsafe { (Handle::from_usize_unchecked(i), v) })
self.data.iter_mut().enumerate().map(|(i, v)| {
let position = i + 1;
let index = unsafe { Index::new_unchecked(position as u32) };
(Handle::new(index), v)
})
}
/// Adds a new value to the arena, returning a typed handle.
pub fn append(&mut self, value: T, span: Span) -> Handle<T> {
#[cfg(not(feature = "span"))]
let _ = span;
let index = self.data.len();
let position = self.data.len() + 1;
let index =
Index::new(position as u32).expect("Failed to append to Arena. Handle overflows");
self.data.push(value);
#[cfg(feature = "span")]
self.span_info.push(span);
Handle::from_usize(index)
Handle::new(index)
}
/// Fetch a handle to an existing type.
@ -234,7 +209,7 @@ impl<T> Arena<T> {
self.data
.iter()
.position(fun)
.map(|index| unsafe { Handle::from_usize_unchecked(index) })
.map(|index| Handle::new(unsafe { Index::new_unchecked((index + 1) as u32) }))
}
/// Adds a value with a custom check for uniqueness:
@ -248,7 +223,8 @@ impl<T> Arena<T> {
fun: F,
) -> Handle<T> {
if let Some(index) = self.data.iter().position(|d| fun(d, &value)) {
unsafe { Handle::from_usize_unchecked(index) }
let index = unsafe { Index::new_unchecked((index + 1) as u32) };
Handle::new(index)
} else {
self.append(value, span)
}
@ -284,18 +260,15 @@ impl<T> Arena<T> {
self.data.clear()
}
pub fn get_span(&self, handle: Handle<T>) -> Span {
pub fn get_span(&self, handle: Handle<T>) -> &Span {
#[cfg(feature = "span")]
{
*self
.span_info
.get(handle.index())
.unwrap_or(&Span::default())
return self.span_info.get(handle.index()).unwrap_or(&Span::Unknown);
}
#[cfg(not(feature = "span"))]
{
let _ = handle;
Span::default()
&Span::Unknown
}
}
}
@ -311,9 +284,7 @@ where
{
let data = Vec::deserialize(deserializer)?;
#[cfg(feature = "span")]
let span_info = std::iter::repeat(Span::default())
.take(data.len())
.collect();
let span_info = std::iter::repeat(Span::Unknown).take(data.len()).collect();
Ok(Self {
data,
@ -383,195 +354,3 @@ mod tests {
assert!(arena[t1] != arena[t2]);
}
}
/// An arena whose elements are guaranteed to be unique.
///
/// A `UniqueArena` holds a set of unique values of type `T`, each with an
/// associated [`Span`]. Inserting a value returns a `Handle<T>`, which can be
/// used to index the `UniqueArena` and obtain shared access to the `T` element.
/// Access via a `Handle` is an array lookup - no hash lookup is necessary.
///
/// The element type must implement `Eq` and `Hash`. Insertions of equivalent
/// elements, according to `Eq`, all return the same `Handle`.
///
/// Once inserted, elements may not be mutated.
///
/// `UniqueArena` is similar to [`Arena`]: If `Arena` is vector-like,
/// `UniqueArena` is `HashSet`-like.
pub struct UniqueArena<T> {
set: IndexSet<T>,
/// Spans for the elements, indexed by handle.
///
/// The length of this vector is always equal to `set.len()`. `IndexSet`
/// promises that its elements "are indexed in a compact range, without
/// holes in the range 0..set.len()", so we can always use the indices
/// returned by insertion as indices into this vector.
#[cfg(feature = "span")]
span_info: Vec<Span>,
}
impl<T> UniqueArena<T> {
/// Create a new arena with no initial capacity allocated.
pub fn new() -> Self {
UniqueArena {
set: IndexSet::new(),
#[cfg(feature = "span")]
span_info: Vec::new(),
}
}
/// Return the current number of items stored in this arena.
pub fn len(&self) -> usize {
self.set.len()
}
/// Return `true` if the arena contains no elements.
pub fn is_empty(&self) -> bool {
self.set.is_empty()
}
/// Clears the arena, keeping all allocations.
pub fn clear(&mut self) {
self.set.clear();
#[cfg(feature = "span")]
self.span_info.clear();
}
/// Return the span associated with `handle`.
///
/// If a value has been inserted multiple times, the span returned is the
/// one provided with the first insertion.
///
/// If the `span` feature is not enabled, always return `Span::default`.
/// This can be detected with [`Span::is_defined`].
pub fn get_span(&self, handle: Handle<T>) -> Span {
#[cfg(feature = "span")]
{
*self
.span_info
.get(handle.index())
.unwrap_or(&Span::default())
}
#[cfg(not(feature = "span"))]
{
let _ = handle;
Span::default()
}
}
}
impl<T: Eq + hash::Hash> UniqueArena<T> {
/// Returns an iterator over the items stored in this arena, returning both
/// the item's handle and a reference to it.
pub fn iter(&self) -> impl DoubleEndedIterator<Item = (Handle<T>, &T)> {
self.set.iter().enumerate().map(|(i, v)| {
let position = i + 1;
let index = unsafe { Index::new_unchecked(position as u32) };
(Handle::new(index), v)
})
}
/// Insert a new value into the arena.
///
/// Return a [`Handle<T>`], which can be used to index this arena to get a
/// shared reference to the element.
///
/// If this arena already contains an element that is `Eq` to `value`,
/// return a `Handle` to the existing element, and drop `value`.
///
/// When the `span` feature is enabled, if `value` is inserted into the
/// arena, associate `span` with it. An element's span can be retrieved with
/// the [`get_span`] method.
///
/// [`Handle<T>`]: Handle
/// [`get_span`]: UniqueArena::get_span
pub fn insert(&mut self, value: T, span: Span) -> Handle<T> {
let (index, added) = self.set.insert_full(value);
#[cfg(feature = "span")]
{
if added {
debug_assert!(index == self.span_info.len());
self.span_info.push(span);
}
debug_assert!(self.set.len() == self.span_info.len());
}
#[cfg(not(feature = "span"))]
let _ = (span, added);
Handle::from_usize(index)
}
/// Return this arena's handle for `value`, if present.
///
/// If this arena already contains an element equal to `value`,
/// return its handle. Otherwise, return `None`.
pub fn get(&self, value: &T) -> Option<Handle<T>> {
self.set
.get_index_of(value)
.map(|index| unsafe { Handle::from_usize_unchecked(index) })
}
/// Return this arena's value at `handle`, if that is a valid handle.
pub fn get_handle(&self, handle: Handle<T>) -> Option<&T> {
self.set.get_index(handle.index())
}
}
impl<T> Default for UniqueArena<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: fmt::Debug + Eq + hash::Hash> fmt::Debug for UniqueArena<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<T> ops::Index<Handle<T>> for UniqueArena<T> {
type Output = T;
fn index(&self, handle: Handle<T>) -> &T {
&self.set[handle.index()]
}
}
#[cfg(feature = "serialize")]
impl<T> serde::Serialize for UniqueArena<T>
where
T: Eq + hash::Hash,
T: serde::Serialize,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.set.serialize(serializer)
}
}
#[cfg(feature = "deserialize")]
impl<'de, T> serde::Deserialize<'de> for UniqueArena<T>
where
T: Eq + hash::Hash,
T: serde::Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let set = IndexSet::deserialize(deserializer)?;
#[cfg(feature = "span")]
let span_info = std::iter::repeat(Span::default()).take(set.len()).collect();
Ok(Self {
set,
#[cfg(feature = "span")]
span_info,
})
}
}

13
third_party/rust/naga/src/back/dot/mod.rs поставляемый
Просмотреть файл

@ -63,16 +63,15 @@ impl StatementGraph {
S::Switch {
selector,
ref cases,
ref default,
} => {
self.dependencies.push((id, selector, "selector"));
for case in cases {
let case_id = self.add(&case.body);
let label = match case.value {
crate::SwitchValue::Integer(_) => "case",
crate::SwitchValue::Default => "default",
};
self.flow.push((id, case_id, label));
self.flow.push((id, case_id, "case"));
}
let default_id = self.add(default);
self.flow.push((id, default_id, "default"));
"Switch"
}
S::Loop {
@ -323,7 +322,6 @@ fn write_fun(
arg,
arg1,
arg2,
arg3,
} => {
edges.insert("arg", arg);
if let Some(expr) = arg1 {
@ -332,9 +330,6 @@ fn write_fun(
if let Some(expr) = arg2 {
edges.insert("arg2", expr);
}
if let Some(expr) = arg3 {
edges.insert("arg3", expr);
}
(format!("{:?}", fun).into(), 7)
}
E::As {

Просмотреть файл

@ -22,6 +22,8 @@ bitflags::bitflags! {
/// Adds support for image load and early depth tests
const IMAGE_LOAD_STORE = 1 << 8;
const CONSERVATIVE_DEPTH = 1 << 9;
/// Isn't supported in ES
const TEXTURE_1D = 1 << 10;
/// Interpolation and auxiliary qualifiers. Perspective, Flat, and
/// Centroid are available in all GLSL versions we support.
const NOPERSPECTIVE_QUALIFIER = 1 << 11;
@ -32,7 +34,6 @@ bitflags::bitflags! {
const SAMPLE_VARIABLES = 1 << 15;
/// Arrays with a dynamic length
const DYNAMIC_ARRAY_SIZE = 1 << 16;
const MULTI_VIEW = 1 << 17;
}
}
@ -90,6 +91,9 @@ impl FeaturesManager {
check_feature!(IMAGE_LOAD_STORE, 130, 310);
check_feature!(CONSERVATIVE_DEPTH, 130, 300);
check_feature!(CONSERVATIVE_DEPTH, 130, 300);
// 1D textures are supported by all core versions and aren't supported by an es versions
// so use 0 that way the check will always be false and can be optimized away
check_feature!(TEXTURE_1D, 0);
check_feature!(NOPERSPECTIVE_QUALIFIER, 130);
check_feature!(SAMPLE_QUALIFIER, 400, 320);
// gl_ClipDistance is supported by core versions > 1.3 and aren't supported by an es versions without extensions
@ -97,7 +101,6 @@ impl FeaturesManager {
check_feature!(CULL_DISTANCE, 450, 300);
check_feature!(SAMPLE_VARIABLES, 400, 300);
check_feature!(DYNAMIC_ARRAY_SIZE, 430, 310);
check_feature!(MULTI_VIEW, 140, 310);
// Return an error if there are missing features
if missing.is_empty() {
@ -191,16 +194,6 @@ impl FeaturesManager {
writeln!(out, "#extension GL_OES_sample_variables : require")?;
}
if self.0.contains(Features::SAMPLE_VARIABLES) && version.is_es() {
// https://www.khronos.org/registry/OpenGL/extensions/OES/OES_sample_variables.txt
writeln!(out, "#extension GL_OES_sample_variables : require")?;
}
if self.0.contains(Features::MULTI_VIEW) {
// https://github.com/KhronosGroup/GLSL/blob/master/extensions/ext/GL_EXT_multiview.txt
writeln!(out, "#extension GL_EXT_multiview : require")?;
}
Ok(())
}
}
@ -293,6 +286,8 @@ impl<'a, W> Writer<'a, W> {
} => {
if arrayed && dim == ImageDimension::Cube {
self.features.request(Features::CUBE_TEXTURES_ARRAY)
} else if dim == ImageDimension::D1 {
self.features.request(Features::TEXTURE_1D)
}
match class {
@ -377,9 +372,6 @@ impl<'a, W> Writer<'a, W> {
crate::BuiltIn::SampleIndex => {
self.features.request(Features::SAMPLE_VARIABLES)
}
crate::BuiltIn::ViewIndex => {
self.features.request(Features::MULTI_VIEW)
}
_ => {}
},
Binding::Location {

360
third_party/rust/naga/src/back/glsl/mod.rs поставляемый
Просмотреть файл

@ -129,10 +129,6 @@ impl Version {
fn supports_early_depth_test(&self) -> bool {
*self >= Version::Desktop(130) || *self >= Version::Embedded(310)
}
fn supports_std430_layout(&self) -> bool {
*self >= Version::Desktop(430) || *self >= Version::Embedded(310)
}
}
impl PartialOrd for Version {
@ -311,16 +307,6 @@ pub enum Error {
Custom(String),
}
/// Binary operation with a different logic on the GLSL side
enum BinaryOperation {
/// Vector comparison should use the function like `greaterThan()`, etc.
VectorCompare,
/// GLSL `%` is SPIR-V `OpUMod/OpSMod` and `mod()` is `OpFMod`, but [`BinaryOperator::Modulo`](crate::BinaryOperator::Modulo) is `OpFRem`
Modulo,
/// Any plain operation. No additional logic required
Other,
}
/// Main structure of the glsl backend responsible for all code generation
pub struct Writer<'a, W> {
// Inputs
@ -524,7 +510,7 @@ impl<'a, W: Write> Writer<'a, W> {
// We treat images separately because they might require
// writing the storage format
TypeInner::Image {
mut dim,
dim,
arrayed,
class,
} => {
@ -536,11 +522,6 @@ impl<'a, W: Write> Writer<'a, W> {
} => Some((format, access)),
_ => None,
};
if dim == crate::ImageDimension::D1 && es {
dim = crate::ImageDimension::D2
}
// Gether the location if needed
let layout_binding = if self.options.version.supports_explicit_locations() {
let br = global.binding.as_ref().unwrap();
@ -617,10 +598,17 @@ impl<'a, W: Write> Writer<'a, W> {
continue;
}
// We also `clone` to satisfy the borrow checker
let name = self.names[&NameKey::Function(handle)].clone();
let fun_info = &self.info[handle];
// Write the function
self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
self.write_function(
back::FunctionType::Function(handle),
function,
fun_info,
&name,
)?;
writeln!(self.out)?;
}
@ -629,6 +617,7 @@ impl<'a, W: Write> Writer<'a, W> {
back::FunctionType::EntryPoint(self.entry_point_idx),
&self.entry_point.function,
ep_info,
"main",
)?;
// Add newline at the end of file
@ -726,9 +715,7 @@ impl<'a, W: Write> Writer<'a, W> {
TypeInner::Pointer { .. }
| TypeInner::Struct { .. }
| TypeInner::Image { .. }
| TypeInner::Sampler { .. } => {
return Err(Error::Custom(format!("Unable to write type {:?}", inner)))
}
| TypeInner::Sampler { .. } => unreachable!(),
}
Ok(())
@ -827,28 +814,8 @@ impl<'a, W: Write> Writer<'a, W> {
if self.options.version.supports_explicit_locations() {
if let Some(ref br) = global.binding {
match self.options.binding_map.get(br) {
Some(binding) => {
let layout = match global.class {
crate::StorageClass::Storage { .. } => {
if self.options.version.supports_std430_layout() {
"std430, "
} else {
"std140, "
}
}
crate::StorageClass::Uniform => "std140, ",
_ => "",
};
write!(self.out, "layout({}binding = {}) ", layout, binding)?
}
None => {
log::debug!("unassigned binding for {:?}", global.name);
if let crate::StorageClass::Storage { .. } = global.class {
if self.options.version.supports_std430_layout() {
write!(self.out, "layout(std430) ")?
}
}
}
Some(binding) => write!(self.out, "layout(binding = {}) ", binding)?,
None => log::debug!("unassigned binding for {:?}", global.name),
}
}
}
@ -874,8 +841,8 @@ impl<'a, W: Write> Writer<'a, W> {
// Finally write the global name and end the global with a `;` and a newline
// Leading space is important
write!(self.out, " ")?;
self.write_global_name(handle, global)?;
let global_name = self.get_global_name(handle, global);
write!(self.out, " {}", global_name)?;
if let TypeInner::Array { size, .. } = self.module.types[global.ty].inner {
self.write_array_size(size)?;
}
@ -912,24 +879,6 @@ impl<'a, W: Write> Writer<'a, W> {
}
}
/// Helper method used to write a name for a global without additional heap allocation
fn write_global_name(
&mut self,
handle: Handle<crate::GlobalVariable>,
global: &crate::GlobalVariable,
) -> BackendResult {
match global.binding {
Some(ref br) => write!(self.out, "_group_{}_binding_{}", br.group, br.binding)?,
None => write!(
self.out,
"{}",
&self.names[&NameKey::GlobalVariable(handle)]
)?,
}
Ok(())
}
/// Writes the varying declaration.
fn write_varying(
&mut self,
@ -1023,6 +972,7 @@ impl<'a, W: Write> Writer<'a, W> {
ty: back::FunctionType,
func: &crate::Function,
info: &valid::FunctionInfo,
name: &str,
) -> BackendResult {
// Create a function context for the function being written
let ctx = back::FunctionCtx {
@ -1056,11 +1006,7 @@ impl<'a, W: Write> Writer<'a, W> {
}
// Write the function name and open parentheses for the argument list
let function_name = match ctx.ty {
back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
back::FunctionType::EntryPoint(_) => "main",
};
write!(self.out, " {}(", function_name)?;
write!(self.out, " {}(", name)?;
// Write the comma separated argument list
//
@ -1386,22 +1332,15 @@ impl<'a, W: Write> Writer<'a, W> {
// This is where we can generate intermediate constants for some expression types.
Statement::Emit(ref range) => {
for handle in range.clone() {
let info = &ctx.info[handle];
let ptr_class = info.ty.inner_with(&self.module.types).pointer_class();
let expr_name = if ptr_class.is_some() {
// GLSL can't save a pointer-valued expression in a variable,
// but we shouldn't ever need to: they should never be named expressions,
// and none of the expression types flagged by bake_ref_count can be pointer-valued.
None
} else if let Some(name) = ctx.named_expressions.get(&handle) {
let expr_name = if let Some(name) = ctx.named_expressions.get(&handle) {
// Front end provides names for all variables at the start of writing.
// But we write them to step by step. We need to recache them
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
Some(self.namer.call_unique(name))
} else {
let min_ref_count = ctx.expressions[handle].bake_ref_count();
if min_ref_count <= info.ref_count {
if min_ref_count <= ctx.info[handle].ref_count {
Some(format!("{}{}", super::BAKE_PREFIX, handle.index()))
} else {
None
@ -1481,29 +1420,18 @@ impl<'a, W: Write> Writer<'a, W> {
Statement::Switch {
selector,
ref cases,
ref default,
} => {
// Start the switch
write!(self.out, "{}", level)?;
write!(self.out, "switch(")?;
self.write_expr(selector, ctx)?;
writeln!(self.out, ") {{")?;
let type_postfix = match *ctx.info[selector].ty.inner_with(&self.module.types) {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
..
} => "u",
_ => "",
};
// Write all cases
let l2 = level.next();
for case in cases {
match case.value {
crate::SwitchValue::Integer(value) => {
writeln!(self.out, "{}case {}{}:", l2, value, type_postfix)?
}
crate::SwitchValue::Default => writeln!(self.out, "{}default:", l2)?,
}
writeln!(self.out, "{}case {}:", l2, case.value)?;
for sta in case.body.iter() {
self.write_stmt(sta, ctx, l2.next())?;
@ -1514,11 +1442,27 @@ impl<'a, W: Write> Writer<'a, W> {
// broken out of at the end of its body.
if case.fall_through {
writeln!(self.out, "{}/* fallthrough */", l2.next())?;
} else if case.body.last().map_or(true, |s| !s.is_terminator()) {
} else if !matches!(
case.body.last(),
Some(&Statement::Break)
| Some(&Statement::Continue)
| Some(&Statement::Return { .. })
| Some(&Statement::Kill)
) {
writeln!(self.out, "{}break;", l2.next())?;
}
}
// Only write the default block if the block isn't empty
// Writing default without a block is valid but it's more readable this way
if !default.is_empty() {
writeln!(self.out, "{}default:", level.next())?;
for sta in default {
self.write_stmt(sta, ctx, l2.next())?;
}
}
writeln!(self.out, "{}}}", level)?
}
// Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
@ -1622,6 +1566,9 @@ impl<'a, W: Write> Writer<'a, W> {
stage: ep.stage,
output: true,
};
let field_name = self.names
[&NameKey::StructMember(result.ty, index as u32)]
.clone();
write!(self.out, "{} = ", varying_name)?;
if let Some(struct_name) = temp_struct_name {
@ -1630,13 +1577,7 @@ impl<'a, W: Write> Writer<'a, W> {
self.write_expr(value, ctx)?;
}
// Write field name
writeln!(
self.out,
".{};",
&self.names
[&NameKey::StructMember(result.ty, index as u32)]
)?;
writeln!(self.out, ".{};", field_name)?;
write!(self.out, "{}", level)?;
}
}
@ -1888,7 +1829,7 @@ impl<'a, W: Write> Writer<'a, W> {
// `get_global_name` does the work for us
Expression::GlobalVariable(handle) => {
let global = &self.module.global_variables[handle];
self.write_global_name(handle, global)?
write!(self.out, "{}", self.get_global_name(handle, global))?
}
// A local is written as it's name
Expression::LocalVariable(handle) => {
@ -1919,24 +1860,6 @@ impl<'a, W: Write> Writer<'a, W> {
_ => unreachable!(),
};
if dim == crate::ImageDimension::Cube
&& array_index.is_some()
&& depth_ref.is_some()
{
match level {
crate::SampleLevel::Zero
| crate::SampleLevel::Exact(_)
| crate::SampleLevel::Gradient { .. }
| crate::SampleLevel::Bias(_) => {
return Err(Error::Custom(String::from(
"gsamplerCubeArrayShadow isn't supported in textureGrad, \
textureLod or texture with bias",
)))
}
crate::SampleLevel::Auto => {}
}
}
// textureLod on sampler2DArrayShadow and samplerCubeShadow does not exist in GLSL.
// To emulate this, we will have to use textureGrad with a constant gradient of 0.
let workaround_lod_array_shadow_as_grad = (array_index.is_some()
@ -1973,50 +1896,31 @@ impl<'a, W: Write> Writer<'a, W> {
// We need to get the coordinates vector size to later build a vector that's `size + 1`
// if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
let mut coord_dim = match *ctx.info[coordinate].ty.inner_with(&self.module.types) {
TypeInner::Vector { size, .. } => size as u8,
TypeInner::Scalar { .. } => 1,
let size = match *ctx.info[coordinate].ty.inner_with(&self.module.types) {
TypeInner::Vector { size, .. } => size,
_ => unreachable!(),
};
let mut coord_dim = size as u8;
if array_index.is_some() {
coord_dim += 1;
}
let cube_array_shadow = coord_dim == 4;
if depth_ref.is_some() && !cube_array_shadow {
if depth_ref.is_some() {
coord_dim += 1;
}
let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
let is_vec = tex_1d_hack || coord_dim != 1;
// Compose a new texture coordinates vector
if is_vec {
write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
}
write!(self.out, "vec{}(", coord_dim)?;
self.write_expr(coordinate, ctx)?;
if tex_1d_hack {
write!(self.out, ", 0.0")?;
}
if let Some(expr) = array_index {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
if !cube_array_shadow {
if let Some(expr) = depth_ref {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
}
if is_vec {
write!(self.out, ")")?;
}
if cube_array_shadow {
if let Some(expr) = depth_ref {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
if let Some(expr) = depth_ref {
write!(self.out, ", ")?;
self.write_expr(expr, ctx)?;
}
write!(self.out, ")")?;
match level {
// Auto needs no more arguments
@ -2053,13 +1957,7 @@ impl<'a, W: Write> Writer<'a, W> {
if let Some(constant) = offset {
write!(self.out, ", ")?;
if tex_1d_hack {
write!(self.out, "ivec2(")?;
}
self.write_constant(constant)?;
if tex_1d_hack {
write!(self.out, ", 0)")?;
}
}
// End the function
@ -2137,11 +2035,11 @@ impl<'a, W: Write> Writer<'a, W> {
ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
write!(self.out, "textureSize(")?;
self.write_expr(image, ctx)?;
write!(self.out, ", ")?;
write!(self.out, ",")?;
if let Some(expr) = level {
self.write_expr(expr, ctx)?;
} else {
write!(self.out, "0")?;
write!(self.out, "0",)?;
}
}
ImageClass::Storage { .. } => {
@ -2149,10 +2047,7 @@ impl<'a, W: Write> Writer<'a, W> {
self.write_expr(image, ctx)?;
}
}
write!(self.out, ")")?;
if components != 1 || self.options.version.is_es() {
write!(self.out, ".{}", &"xyz"[..components])?;
}
write!(self.out, ").{}", &"xyz"[..components])?;
}
crate::ImageQuery::NumLevels => {
write!(self.out, "textureQueryLevels(",)?;
@ -2166,9 +2061,7 @@ impl<'a, W: Write> Writer<'a, W> {
};
write!(self.out, "{}(", fun_name)?;
self.write_expr(image, ctx)?;
if components != 1 || self.options.version.is_es() {
write!(self.out, ", 0).{}", back::COMPONENTS[components])?;
}
write!(self.out, ",0).{}", back::COMPONENTS[components])?;
}
crate::ImageQuery::NumSamples => {
// assumes ARB_shader_texture_image_samples
@ -2228,81 +2121,36 @@ impl<'a, W: Write> Writer<'a, W> {
let right_inner = ctx.info[right].ty.inner_with(&self.module.types);
let function = match (left_inner, right_inner) {
(
&Ti::Vector {
kind: left_kind, ..
},
&Ti::Vector {
kind: right_kind, ..
},
) => match op {
Bo::Less
| Bo::LessEqual
| Bo::Greater
| Bo::GreaterEqual
| Bo::Equal
| Bo::NotEqual => BinaryOperation::VectorCompare,
Bo::Modulo => match (left_kind, right_kind) {
(Sk::Float, _) | (_, Sk::Float) => match op {
Bo::Modulo => BinaryOperation::Modulo,
_ => BinaryOperation::Other,
},
_ => BinaryOperation::Other,
},
_ => BinaryOperation::Other,
(&Ti::Vector { .. }, &Ti::Vector { .. }) => match op {
Bo::Less => Some("lessThan"),
Bo::LessEqual => Some("lessThanEqual"),
Bo::Greater => Some("greaterThan"),
Bo::GreaterEqual => Some("greaterThanEqual"),
Bo::Equal => Some("equal"),
Bo::NotEqual => Some("notEqual"),
_ => None,
},
_ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
(Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
Bo::Modulo => BinaryOperation::Modulo,
_ => BinaryOperation::Other,
Bo::Modulo => Some("mod"),
_ => None,
},
_ => BinaryOperation::Other,
_ => None,
},
};
match function {
BinaryOperation::VectorCompare => {
let op_str = match op {
Bo::Less => "lessThan(",
Bo::LessEqual => "lessThanEqual(",
Bo::Greater => "greaterThan(",
Bo::GreaterEqual => "greaterThanEqual(",
Bo::Equal => "equal(",
Bo::NotEqual => "notEqual(",
_ => unreachable!(),
};
write!(self.out, "{}", op_str)?;
self.write_expr(left, ctx)?;
write!(self.out, ", ")?;
self.write_expr(right, ctx)?;
write!(self.out, ")")?;
}
BinaryOperation::Modulo => {
write!(self.out, "(")?;
write!(self.out, "{}(", function.unwrap_or(""))?;
self.write_expr(left, ctx)?;
// write `e1 - e2 * trunc(e1 / e2)`
self.write_expr(left, ctx)?;
write!(self.out, " - ")?;
self.write_expr(right, ctx)?;
write!(self.out, " * ")?;
write!(self.out, "trunc(")?;
self.write_expr(left, ctx)?;
write!(self.out, " / ")?;
self.write_expr(right, ctx)?;
write!(self.out, ")")?;
write!(self.out, ")")?;
}
BinaryOperation::Other => {
write!(self.out, "(")?;
self.write_expr(left, ctx)?;
write!(self.out, " {} ", super::binary_operation_str(op))?;
self.write_expr(right, ctx)?;
write!(self.out, ")")?;
}
if function.is_some() {
write!(self.out, ",")?
} else {
write!(self.out, " {} ", super::binary_operation_str(op))?;
}
self.write_expr(right, ctx)?;
write!(self.out, ")")?
}
// `Select` is written as `condition ? accept : reject`
// We wrap everything in parentheses to avoid precedence issues
@ -2380,7 +2228,6 @@ impl<'a, W: Write> Writer<'a, W> {
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
@ -2445,56 +2292,17 @@ impl<'a, W: Write> Writer<'a, W> {
// bits
Mf::CountOneBits => "bitCount",
Mf::ReverseBits => "bitfieldReverse",
Mf::ExtractBits => "bitfieldExtract",
Mf::InsertBits => "bitfieldInsert",
// data packing
Mf::Pack4x8snorm => "packSnorm4x8",
Mf::Pack4x8unorm => "packUnorm4x8",
Mf::Pack2x16snorm => "packSnorm2x16",
Mf::Pack2x16unorm => "packUnorm2x16",
Mf::Pack2x16float => "packHalf2x16",
// data unpacking
Mf::Unpack4x8snorm => "unpackSnorm4x8",
Mf::Unpack4x8unorm => "unpackUnorm4x8",
Mf::Unpack2x16snorm => "unpackSnorm2x16",
Mf::Unpack2x16unorm => "unpackUnorm2x16",
Mf::Unpack2x16float => "unpackHalf2x16",
};
let extract_bits = fun == Mf::ExtractBits;
let insert_bits = fun == Mf::InsertBits;
write!(self.out, "{}(", fun_name)?;
self.write_expr(arg, ctx)?;
if let Some(arg) = arg1 {
write!(self.out, ", ")?;
if extract_bits {
write!(self.out, "int(")?;
self.write_expr(arg, ctx)?;
write!(self.out, ")")?;
} else {
self.write_expr(arg, ctx)?;
}
self.write_expr(arg, ctx)?;
}
if let Some(arg) = arg2 {
write!(self.out, ", ")?;
if extract_bits || insert_bits {
write!(self.out, "int(")?;
self.write_expr(arg, ctx)?;
write!(self.out, ")")?;
} else {
self.write_expr(arg, ctx)?;
}
}
if let Some(arg) = arg3 {
write!(self.out, ", ")?;
if insert_bits {
write!(self.out, "int(")?;
self.write_expr(arg, ctx)?;
write!(self.out, ")")?;
} else {
self.write_expr(arg, ctx)?;
}
self.write_expr(arg, ctx)?;
}
write!(self.out, ")")?
}
@ -2597,14 +2405,7 @@ impl<'a, W: Write> Writer<'a, W> {
write!(self.out, ")")?;
}
None => {
let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
if tex_1d_hack {
write!(self.out, "ivec2(")?;
}
self.write_expr(coordinate, ctx)?;
if tex_1d_hack {
write!(self.out, ", 0.0)")?;
}
}
}
Ok(())
@ -2824,7 +2625,6 @@ fn glsl_built_in(built_in: crate::BuiltIn, output: bool) -> &'static str {
"gl_FragCoord"
}
}
Bi::ViewIndex => "gl_ViewIndex",
// vertex
Bi::BaseInstance => "uint(gl_BaseInstance)",
Bi::BaseVertex => "uint(gl_BaseVertex)",

3
third_party/rust/naga/src/back/hlsl/conv.rs поставляемый
Просмотреть файл

@ -103,9 +103,6 @@ impl crate::BuiltIn {
Self::BaseInstance | Self::BaseVertex | Self::WorkGroupSize => {
return Err(Error::Unimplemented(format!("builtin {:?}", self)))
}
Self::ViewIndex => {
return Err(Error::Custom(format!("Unsupported builtin {:?}", self)))
}
})
}
}

2
third_party/rust/naga/src/back/hlsl/help.rs поставляемый
Просмотреть файл

@ -244,7 +244,7 @@ impl<'a, W: Write> super::Writer<'a, W> {
ImageDimension as IDim,
};
const ARGUMENT_VARIABLE_NAME: &str = "tex";
const ARGUMENT_VARIABLE_NAME: &str = "texture";
const RETURN_VARIABLE_NAME: &str = "ret";
const MIP_LEVEL_PARAM: &str = "mip_level";

Просмотреть файл

@ -129,6 +129,7 @@ impl<W: fmt::Write> super::Writer<'_, W> {
crate::VectorSize::Tri => 4,
columns => columns as u32,
};
let row_stride = width as u32 * padded_columns;
let iter = (0..rows as u32).map(|i| {
let ty_inner = crate::TypeInner::Vector {
@ -266,15 +267,8 @@ impl<W: fmt::Write> super::Writer<'_, W> {
)?;
self.write_store_value(module, &value, func_ctx)?;
writeln!(self.out, ";")?;
// Note: Matrices containing vec3s, due to padding, act like they contain vec4s.
let padded_columns = match columns {
crate::VectorSize::Tri => 4,
columns => columns as u32,
};
let row_stride = width as u32 * padded_columns;
// then iterate the stores
let row_stride = width as u32 * columns as u32;
for i in 0..rows as u32 {
self.temp_access_chain
.push(SubAccess::Offset(i * row_stride));
@ -367,22 +361,39 @@ impl<W: fmt::Write> super::Writer<'_, W> {
mut cur_expr: Handle<crate::Expression>,
func_ctx: &FunctionCtx,
) -> Result<Handle<crate::GlobalVariable>, Error> {
enum AccessIndex {
Expression(Handle<crate::Expression>),
Constant(u32),
}
enum Parent<'a> {
Array { stride: u32 },
Struct(&'a [crate::StructMember]),
}
self.temp_access_chain.clear();
loop {
let (next_expr, access_index) = match func_ctx.expressions[cur_expr] {
// determine the size of the pointee
let stride = match *func_ctx.info[cur_expr].ty.inner_with(&module.types) {
crate::TypeInner::Pointer { base, class: _ } => {
module.types[base].inner.span(&module.constants)
}
crate::TypeInner::ValuePointer { size, width, .. } => {
size.map_or(1, |s| s as u32) * width as u32
}
_ => 0,
};
let (next_expr, sub) = match func_ctx.expressions[cur_expr] {
crate::Expression::GlobalVariable(handle) => return Ok(handle),
crate::Expression::Access { base, index } => (base, AccessIndex::Expression(index)),
crate::Expression::Access { base, index } => (
base,
SubAccess::Index {
value: index,
stride,
},
),
crate::Expression::AccessIndex { base, index } => {
(base, AccessIndex::Constant(index))
let sub = match *func_ctx.info[base].ty.inner_with(&module.types) {
crate::TypeInner::Pointer { base, .. } => match module.types[base].inner {
crate::TypeInner::Struct { ref members, .. } => {
SubAccess::Offset(members[index as usize].offset)
}
_ => SubAccess::Offset(index * stride),
},
_ => SubAccess::Offset(index * stride),
};
(base, sub)
}
ref other => {
return Err(Error::Unimplemented(format!(
@ -391,38 +402,6 @@ impl<W: fmt::Write> super::Writer<'_, W> {
)))
}
};
let parent = match *func_ctx.info[next_expr].ty.inner_with(&module.types) {
crate::TypeInner::Pointer { base, .. } => match module.types[base].inner {
crate::TypeInner::Struct { ref members, .. } => Parent::Struct(members),
crate::TypeInner::Array { stride, .. } => Parent::Array { stride },
crate::TypeInner::Vector { width, .. } => Parent::Array {
stride: width as u32,
},
crate::TypeInner::Matrix { rows, width, .. } => Parent::Array {
stride: width as u32 * if rows > crate::VectorSize::Bi { 4 } else { 2 },
},
_ => unreachable!(),
},
crate::TypeInner::ValuePointer { width, .. } => Parent::Array {
stride: width as u32,
},
_ => unreachable!(),
};
let sub = match (parent, access_index) {
(Parent::Array { stride }, AccessIndex::Expression(value)) => {
SubAccess::Index { value, stride }
}
(Parent::Array { stride }, AccessIndex::Constant(index)) => {
SubAccess::Offset(stride * index)
}
(Parent::Struct(members), AccessIndex::Constant(index)) => {
SubAccess::Offset(members[index as usize].offset)
}
(Parent::Struct(_), AccessIndex::Expression(_)) => unreachable!(),
};
self.temp_access_chain.push(sub);
cur_expr = next_expr;
}

99
third_party/rust/naga/src/back/hlsl/writer.rs поставляемый
Просмотреть файл

@ -368,7 +368,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
}
Ok(EntryPointBinding {
arg_name: self.namer.call(struct_name.to_lowercase().as_str()),
arg_name: self.namer.call_unique(struct_name.to_lowercase().as_str()),
ty_name: struct_name,
members,
})
@ -391,10 +391,14 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
match module.types[arg.ty].inner {
TypeInner::Struct { ref members, .. } => {
for member in members.iter() {
let name = self.namer.call_or(&member.name, "member");
let member_name = if let Some(ref name) = member.name {
name
} else {
"member"
};
let index = fake_members.len() as u32;
fake_members.push(EpStructMember {
name,
name: self.namer.call(member_name),
ty: member.ty,
binding: member.binding.clone(),
index,
@ -402,7 +406,11 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
}
}
_ => {
let member_name = self.namer.call_or(&arg.name, "member");
let member_name = if let Some(ref name) = arg.name {
self.namer.call_unique(name)
} else {
self.namer.call("member")
};
let index = fake_members.len() as u32;
fake_members.push(EpStructMember {
name: member_name,
@ -440,7 +448,11 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
};
for member in members.iter() {
let member_name = self.namer.call_or(&member.name, "member");
let member_name = if let Some(ref name) = member.name {
self.namer.call_unique(name)
} else {
self.namer.call("member")
};
let index = fake_members.len() as u32;
fake_members.push(EpStructMember {
name: member_name,
@ -1045,22 +1057,15 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
match *stmt {
Statement::Emit(ref range) => {
for handle in range.clone() {
let info = &func_ctx.info[handle];
let ptr_class = info.ty.inner_with(&module.types).pointer_class();
let expr_name = if ptr_class.is_some() {
// HLSL can't save a pointer-valued expression in a variable,
// but we shouldn't ever need to: they should never be named expressions,
// and none of the expression types flagged by bake_ref_count can be pointer-valued.
None
} else if let Some(name) = func_ctx.named_expressions.get(&handle) {
let expr_name = if let Some(name) = func_ctx.named_expressions.get(&handle) {
// Front end provides names for all variables at the start of writing.
// But we write them to step by step. We need to recache them
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
Some(self.namer.call_unique(name))
} else {
let min_ref_count = func_ctx.expressions[handle].bake_ref_count();
if min_ref_count <= info.ref_count {
if min_ref_count <= func_ctx.info[handle].ref_count {
Some(format!("_expr{}", handle.index()))
} else {
None
@ -1147,7 +1152,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
};
let final_name = match ep_output {
Some(ep_output) => {
let final_name = self.namer.call(&variable_name);
let final_name = self.namer.call_unique(&variable_name);
write!(
self.out,
"{}const {} {} = {{ ",
@ -1358,35 +1363,20 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Statement::Switch {
selector,
ref cases,
ref default,
} => {
// Start the switch
write!(self.out, "{}", level)?;
write!(self.out, "switch(")?;
self.write_expr(module, selector, func_ctx)?;
writeln!(self.out, ") {{")?;
let type_postfix = match *func_ctx.info[selector].ty.inner_with(&module.types) {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
..
} => "u",
_ => "",
};
// Write all cases
let indent_level_1 = level.next();
let indent_level_2 = indent_level_1.next();
for case in cases {
match case.value {
crate::SwitchValue::Integer(value) => writeln!(
self.out,
"{}case {}{}: {{",
indent_level_1, value, type_postfix
)?,
crate::SwitchValue::Default => {
writeln!(self.out, "{}default: {{", indent_level_1)?
}
}
writeln!(self.out, "{}case {}: {{", indent_level_1, case.value)?;
if case.fall_through {
// Generate each fallthrough case statement in a new block. This is done to
@ -1405,13 +1395,25 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
if case.fall_through {
writeln!(self.out, "{}}}", indent_level_2)?;
} else if case.body.last().map_or(true, |s| !s.is_terminator()) {
} else {
writeln!(self.out, "{}break;", indent_level_2)?;
}
writeln!(self.out, "{}}}", indent_level_1)?;
}
// Only write the default block if the block isn't empty
// Writing default without a block is valid but it's more readable this way
if !default.is_empty() {
writeln!(self.out, "{}default: {{", indent_level_1)?;
for sta in default {
self.write_stmt(module, sta, func_ctx, indent_level_2)?;
}
writeln!(self.out, "{}}}", indent_level_1)?;
}
writeln!(self.out, "{}}}", level)?
}
}
@ -1778,15 +1780,20 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
write!(self.out, "{}", op_str)?;
self.write_expr(module, expr, func_ctx)?;
}
Expression::As {
expr,
kind,
convert,
} => {
Expression::As { expr, kind, .. } => {
let inner = func_ctx.info[expr].ty.inner_with(&module.types);
let (size_str, src_width) = match *inner {
TypeInner::Vector { size, width, .. } => (back::vector_size_str(size), width),
TypeInner::Scalar { width, .. } => ("", width),
match *inner {
TypeInner::Vector { size, width, .. } => {
write!(
self.out,
"{}{}",
kind.to_hlsl_str(width)?,
back::vector_size_str(size),
)?;
}
TypeInner::Scalar { width, .. } => {
write!(self.out, "{}", kind.to_hlsl_str(width)?)?
}
_ => {
return Err(Error::Unimplemented(format!(
"write_expr expression::as {:?}",
@ -1794,8 +1801,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
)));
}
};
let kind_str = kind.to_hlsl_str(convert.unwrap_or(src_width))?;
write!(self.out, "{}{}(", kind_str, size_str,)?;
write!(self.out, "(")?;
self.write_expr(module, expr, func_ctx)?;
write!(self.out, ")")?;
}
@ -1804,7 +1810,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
@ -1907,10 +1912,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
write!(self.out, ", ")?;
self.write_expr(module, arg, func_ctx)?;
}
if let Some(arg) = arg3 {
write!(self.out, ", ")?;
self.write_expr(module, arg, func_ctx)?;
}
write!(self.out, ")")?
}
}

126
third_party/rust/naga/src/back/mod.rs поставляемый
Просмотреть файл

@ -123,6 +123,117 @@ impl<'a> FunctionCtx<'_> {
}
}
/// How should code generated by Naga do bounds checks?
///
/// When a vector, matrix, or array index is out of bounds—either negative, or
/// greater than or equal to the number of elements in the type—WGSL requires
/// that some other index of the implementation's choice that is in bounds is
/// used instead. (There are no types with zero elements.)
///
/// Similarly, when out-of-bounds coordinates, array indices, or sample indices
/// are presented to the WGSL `textureLoad` and `textureStore` operations, the
/// operation is redirected to do something safe.
///
/// Different users of Naga will prefer different defaults:
///
/// - When used as part of a WebGPU implementation, the WGSL specification
/// requires the `Restrict` behavior for array, vector, and matrix accesses,
/// and either the `Restrict` or `ReadZeroSkipWrite` behaviors for texture
/// accesses.
///
/// - When used by the `wgpu` crate for native development, `wgpu` selects
/// `ReadZeroSkipWrite` as its default.
///
/// - Naga's own default is `Unchanged`, so that shader translations
/// are as faithful to the original as possible.
///
/// Sometimes the underlying hardware and drivers can perform bounds checks
/// themselves, in a way that performs better than the checks Naga would inject.
/// If you're using native checks like this, then having Naga inject its own
/// checks as well would be redundant, and the `Unchecked` policy is
/// appropriate.
#[derive(Clone, Copy, Debug)]
pub enum BoundsCheckPolicy {
/// Replace out-of-bounds indexes with some arbitrary in-bounds index.
///
/// (This does not necessarily mean clamping. For example, interpreting the
/// index as unsigned and taking the minimum with the largest valid index
/// would also be a valid implementation. That would map negative indices to
/// the last element, not the first.)
Restrict,
/// Out-of-bounds reads return zero, and writes have no effect.
ReadZeroSkipWrite,
/// Naga adds no checks to indexing operations. Generate the fastest code
/// possible. This is the default for Naga, as a translator, but consumers
/// should consider defaulting to a safer behavior.
Unchecked,
}
#[derive(Clone, Copy, Debug, Default)]
/// Policies for injecting bounds checks during code generation.
///
/// For SPIR-V generation, see [`spv::Options::bounds_check_policies`].
pub struct BoundsCheckPolicies {
/// How should the generated code handle array, vector, or matrix indices
/// that are out of range?
pub index: BoundsCheckPolicy,
/// How should the generated code handle array, vector, or matrix indices
/// that are out of range, when those values live in a [`GlobalVariable`] in
/// the [`Storage`] or [`Uniform`] storage classes?
///
/// Some graphics hardware provides "robust buffer access", a feature that
/// ensures that using a pointer cannot access memory outside the 'buffer'
/// that it was derived from. In Naga terms, this means that the hardware
/// ensures that pointers computed by applying [`Access`] and
/// [`AccessIndex`] expressions to a [`GlobalVariable`] whose [`class`] is
/// [`Storage`] or [`Uniform`] will never read or write memory outside that
/// global variable.
///
/// When hardware offers such a feature, it is probably undesirable to have
/// Naga inject bounds checking code for such accesses, since the hardware
/// can probably provide the same protection more efficiently. However,
/// bounds checks are still needed on accesses to indexable values that do
/// not live in buffers, like local variables.
///
/// So, this option provides a separate policy that applies only to accesses
/// to storage and uniform globals. When depending on hardware bounds
/// checking, this policy can be `Unchecked` to avoid unnecessary overhead.
///
/// When special hardware support is not available, this should probably be
/// the same as `index_bounds_check_policy`.
///
/// [`GlobalVariable`]: crate::GlobalVariable
/// [`class`]: crate::GlobalVariable::class
/// [`Restrict`]: crate::back::BoundsCheckPolicy::Restrict
/// [`ReadZeroSkipWrite`]: crate::back::BoundsCheckPolicy::ReadZeroSkipWrite
/// [`Access`]: crate::Expression::Access
/// [`AccessIndex`]: crate::Expression::AccessIndex
/// [`Storage`]: crate::StorageClass::Storage
/// [`Uniform`]: crate::StorageClass::Uniform
pub buffer: BoundsCheckPolicy,
/// How should the generated code handle image texel references that are out
/// of range?
///
/// This controls the behavior of [`ImageLoad`] expressions and
/// [`ImageStore`] statements when a coordinate, texture array index, level
/// of detail, or multisampled sample number is out of range.
///
/// [`ImageLoad`]: crate::Expression::ImageLoad
/// [`ImageStore`]: crate::Statement::ImageStore
pub image: BoundsCheckPolicy,
}
/// The default `BoundsCheckPolicy` is `Unchecked`.
impl Default for BoundsCheckPolicy {
fn default() -> Self {
BoundsCheckPolicy::Unchecked
}
}
impl crate::Expression {
/// Returns the ref count, upon reaching which this expression
/// should be considered for baking.
@ -200,18 +311,3 @@ impl crate::TypeInner {
}
}
}
impl crate::Statement {
/// Returns true if the statement directly terminates the current block
///
/// Used to decided wether case blocks require a explicit `break`
pub fn is_terminator(&self) -> bool {
match *self {
crate::Statement::Break
| crate::Statement::Continue
| crate::Statement::Return { .. }
| crate::Statement::Kill => true,
_ => false,
}
}
}

17
third_party/rust/naga/src/back/msl/mod.rs поставляемый
Просмотреть файл

@ -49,11 +49,14 @@ pub enum BindSamplerTarget {
#[derive(Clone, Debug, Default, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(any(feature = "serialize", feature = "deserialize"), serde(default))]
pub struct BindTarget {
#[cfg_attr(feature = "deserialize", serde(default))]
pub buffer: Option<Slot>,
#[cfg_attr(feature = "deserialize", serde(default))]
pub texture: Option<Slot>,
#[cfg_attr(feature = "deserialize", serde(default))]
pub sampler: Option<BindSamplerTarget>,
#[cfg_attr(feature = "deserialize", serde(default))]
pub mutable: bool,
}
@ -63,25 +66,29 @@ pub type BindingMap = std::collections::BTreeMap<crate::ResourceBinding, BindTar
#[derive(Clone, Debug, Default, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(any(feature = "serialize", feature = "deserialize"), serde(default))]
pub struct PerStageResources {
#[cfg_attr(feature = "deserialize", serde(default))]
pub resources: BindingMap,
#[cfg_attr(feature = "deserialize", serde(default))]
pub push_constant_buffer: Option<Slot>,
/// The slot of a buffer that contains an array of `u32`,
/// one for the size of each bound buffer that contains a runtime array,
/// in order of [`crate::GlobalVariable`] declarations.
#[cfg_attr(feature = "deserialize", serde(default))]
pub sizes_buffer: Option<Slot>,
}
#[derive(Clone, Debug, Default, Hash, Eq, PartialEq)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
#[cfg_attr(any(feature = "serialize", feature = "deserialize"), serde(default))]
pub struct PerStageMap {
#[cfg_attr(feature = "deserialize", serde(default))]
pub vs: PerStageResources,
#[cfg_attr(feature = "deserialize", serde(default))]
pub fs: PerStageResources,
#[cfg_attr(feature = "deserialize", serde(default))]
pub cs: PerStageResources,
}
@ -351,9 +358,7 @@ impl ResolvedBinding {
Bi::WorkGroupId => "threadgroup_position_in_grid",
Bi::WorkGroupSize => "dispatch_threads_per_threadgroup",
Bi::NumWorkGroups => "threadgroups_per_grid",
Bi::CullDistance | Bi::ViewIndex => {
return Err(Error::UnsupportedBuiltIn(built_in))
}
Bi::CullDistance => return Err(Error::UnsupportedBuiltIn(built_in)),
};
write!(out, "{}", name)?;
}

135
third_party/rust/naga/src/back/msl/writer.rs поставляемый
Просмотреть файл

@ -14,9 +14,6 @@ use std::{
type BackendResult = Result<(), Error>;
const NAMESPACE: &str = "metal";
// The name of the array member of the Metal struct types we generate to
// represent Naga `Array` types. See the comments in `Writer::write_type_defs`
// for details.
const WRAPPED_ARRAY_FIELD: &str = "inner";
// This is a hack: we need to pass a pointer to an atomic,
// but generally the backend isn't putting "&" in front of every pointer.
@ -25,7 +22,7 @@ const ATOMIC_REFERENCE: &str = "&";
struct TypeContext<'a> {
handle: Handle<crate::Type>,
arena: &'a crate::UniqueArena<crate::Type>,
arena: &'a crate::Arena<crate::Type>,
names: &'a FastHashMap<NameKey, String>,
access: crate::StorageAccess,
first_time: bool,
@ -165,13 +162,7 @@ impl<'a> Display for TypeContext<'a> {
} else if self.access.contains(crate::StorageAccess::LOAD) {
"read"
} else {
log::warn!(
"Storage access for {:?} (name '{}'): {:?}",
self.handle,
ty.name.as_deref().unwrap_or_default(),
self.access
);
unreachable!("module is not valid");
unreachable!("module is not valid")
};
("texture", "", format.into(), access)
}
@ -307,6 +298,7 @@ pub struct Writer<W> {
names: FastHashMap<NameKey, String>,
named_expressions: crate::NamedExpressions,
namer: proc::Namer,
runtime_sized_buffers: FastHashMap<Handle<crate::GlobalVariable>, usize>,
#[cfg(test)]
put_expression_stack_pointers: FastHashSet<*const ()>,
#[cfg(test)]
@ -362,7 +354,7 @@ fn should_pack_struct_member(
}
}
fn needs_array_length(ty: Handle<crate::Type>, arena: &crate::UniqueArena<crate::Type>) -> bool {
fn needs_array_length(ty: Handle<crate::Type>, arena: &crate::Arena<crate::Type>) -> bool {
if let crate::TypeInner::Struct { ref members, .. } = arena[ty].inner {
if let Some(member) = members.last() {
if let crate::TypeInner::Array {
@ -472,6 +464,7 @@ impl<W: Write> Writer<W> {
names: FastHashMap::default(),
named_expressions: crate::NamedExpressions::default(),
namer: proc::Namer::default(),
runtime_sized_buffers: FastHashMap::default(),
#[cfg(test)]
put_expression_stack_pointers: Default::default(),
#[cfg(test)]
@ -705,10 +698,11 @@ impl<W: Write> Writer<W> {
_ => return Err(Error::Validation),
};
let buffer_idx = self.runtime_sized_buffers[&handle];
write!(
self.out,
"(1 + (_buffer_sizes.size{idx} - {offset} - {span}) / {stride})",
idx = handle.index(),
idx = buffer_idx,
offset = offset,
span = span,
stride = stride,
@ -1113,7 +1107,6 @@ impl<W: Write> Writer<W> {
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
@ -1182,20 +1175,6 @@ impl<W: Write> Writer<W> {
// bits
Mf::CountOneBits => "popcount",
Mf::ReverseBits => "reverse_bits",
Mf::ExtractBits => "extract_bits",
Mf::InsertBits => "insert_bits",
// data packing
Mf::Pack4x8snorm => "pack_float_to_unorm4x8",
Mf::Pack4x8unorm => "pack_float_to_snorm4x8",
Mf::Pack2x16snorm => "pack_float_to_unorm2x16",
Mf::Pack2x16unorm => "pack_float_to_snorm2x16",
Mf::Pack2x16float => "",
// data unpacking
Mf::Unpack4x8snorm => "unpack_snorm4x8_to_float",
Mf::Unpack4x8unorm => "unpack_unorm4x8_to_float",
Mf::Unpack2x16snorm => "unpack_snorm2x16_to_float",
Mf::Unpack2x16unorm => "unpack_unorm2x16_to_float",
Mf::Unpack2x16float => "",
};
if fun == Mf::Distance && scalar_argument {
@ -1204,20 +1183,9 @@ impl<W: Write> Writer<W> {
write!(self.out, " - ")?;
self.put_expression(arg1.unwrap(), context, false)?;
write!(self.out, ")")?;
} else if fun == Mf::Unpack2x16float {
write!(self.out, "float2(as_type<half2>(")?;
self.put_expression(arg, context, false)?;
write!(self.out, "))")?;
} else if fun == Mf::Pack2x16float {
write!(self.out, "as_type<uint>(half2(")?;
self.put_expression(arg, context, false)?;
write!(self.out, "))")?;
} else {
write!(self.out, "{}::{}", NAMESPACE, fun_name)?;
self.put_call_parameters(
iter::once(arg).chain(arg1).chain(arg2).chain(arg3),
context,
)?;
self.put_call_parameters(iter::once(arg).chain(arg1).chain(arg2), context)?;
}
}
crate::Expression::As {
@ -1226,37 +1194,22 @@ impl<W: Write> Writer<W> {
convert,
} => {
let scalar = scalar_kind_string(kind);
let (src_kind, src_width) = match *context.resolve_type(expr) {
crate::TypeInner::Scalar { kind, width }
| crate::TypeInner::Vector { kind, width, .. } => (kind, width),
let (size, width) = match *context.resolve_type(expr) {
crate::TypeInner::Scalar { width, .. } => ("", width),
crate::TypeInner::Vector { size, width, .. } => {
(back::vector_size_str(size), width)
}
_ => return Err(Error::Validation),
};
let is_bool_cast =
kind == crate::ScalarKind::Bool || src_kind == crate::ScalarKind::Bool;
let op = match convert {
Some(w) if w == src_width || is_bool_cast => "static_cast",
Some(w) if w == width => "static_cast",
Some(8) if kind == crate::ScalarKind::Float => {
return Err(Error::CapabilityNotSupported(valid::Capabilities::FLOAT64))
}
Some(_) => return Err(Error::Validation),
None => "as_type",
};
write!(self.out, "{}<", op)?;
match *context.resolve_type(expr) {
crate::TypeInner::Vector { size, .. } => {
write!(
self.out,
"{}::{}{}",
NAMESPACE,
scalar,
back::vector_size_str(size)
)?;
}
_ => {
write!(self.out, "{}", scalar)?;
}
}
write!(self.out, ">(")?;
write!(self.out, "{}<{}{}>(", op, scalar, size)?;
self.put_expression(expr, context, true)?;
write!(self.out, ")")?;
}
@ -1389,7 +1342,7 @@ impl<W: Write> Writer<W> {
)?;
}
TypeResolution::Value(ref other) => {
log::warn!("Type {:?} isn't a known local", other); //TEMP!
log::error!("Type {:?} isn't a known local", other);
return Err(Error::FeatureNotImplemented("weird local type".to_string()));
}
}
@ -1417,25 +1370,18 @@ impl<W: Write> Writer<W> {
match *statement {
crate::Statement::Emit(ref range) => {
for handle in range.clone() {
let info = &context.expression.info[handle];
let ptr_class = info
.ty
.inner_with(&context.expression.module.types)
.pointer_class();
let expr_name = if ptr_class.is_some() {
None // don't bake pointer expressions (just yet)
} else if let Some(name) =
let expr_name = if let Some(name) =
context.expression.function.named_expressions.get(&handle)
{
// Front end provides names for all variables at the start of writing.
// But we write them to step by step. We need to recache them
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
Some(self.namer.call_unique(name))
} else {
let min_ref_count =
context.expression.function.expressions[handle].bake_ref_count();
if min_ref_count <= info.ref_count {
if min_ref_count <= context.expression.info[handle].ref_count {
Some(format!("{}{}", back::BAKE_PREFIX, handle.index()))
} else {
None
@ -1476,35 +1422,23 @@ impl<W: Write> Writer<W> {
crate::Statement::Switch {
selector,
ref cases,
ref default,
} => {
write!(self.out, "{}switch(", level)?;
self.put_expression(selector, &context.expression, true)?;
let type_postfix = match *context.expression.resolve_type(selector) {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
..
} => "u",
_ => "",
};
writeln!(self.out, ") {{")?;
let lcase = level.next();
for case in cases.iter() {
match case.value {
crate::SwitchValue::Integer(value) => {
writeln!(self.out, "{}case {}{}: {{", lcase, value, type_postfix)?;
}
crate::SwitchValue::Default => {
writeln!(self.out, "{}default: {{", lcase)?;
}
}
writeln!(self.out, "{}case {}: {{", lcase, case.value)?;
self.put_block(lcase.next(), &case.body, context)?;
if !case.fall_through
&& case.body.last().map_or(true, |s| !s.is_terminator())
{
if !case.fall_through {
writeln!(self.out, "{}break;", lcase.next())?;
}
writeln!(self.out, "{}}}", lcase)?;
}
writeln!(self.out, "{}default: {{", lcase)?;
self.put_block(lcase.next(), default, context)?;
writeln!(self.out, "{}}}", lcase)?;
writeln!(self.out, "{}}}", level)?;
}
crate::Statement::Loop {
@ -1763,6 +1697,7 @@ impl<W: Write> Writer<W> {
self.names.clear();
self.namer
.reset(module, super::keywords::RESERVED, &[], &mut self.names);
self.runtime_sized_buffers.clear();
self.struct_member_pads.clear();
writeln!(
@ -1779,6 +1714,7 @@ impl<W: Write> Writer<W> {
for (handle, var) in module.global_variables.iter() {
if needs_array_length(var.ty, &module.types) {
let idx = handle.index();
self.runtime_sized_buffers.insert(handle, idx);
indices.push(idx);
}
}
@ -1808,19 +1744,6 @@ impl<W: Write> Writer<W> {
}
let name = &self.names[&NameKey::Type(handle)];
match ty.inner {
// Naga IR can pass around arrays by value, but Metal, following
// C++, performs an array-to-pointer conversion (C++ [conv.array])
// on expressions of array type, so assigning the array by value
// isn't possible. However, Metal *does* assign structs by
// value. So in our Metal output, we wrap all array types in
// synthetic struct types:
//
// struct type1 {
// float inner[10]
// };
//
// Then we carefully include `.inner` (`WRAPPED_ARRAY_FIELD`) in
// any expression that actually wants access to the array.
crate::TypeInner::Array {
base,
size,
@ -2695,8 +2618,8 @@ fn test_stack_size() {
}
let stack_size = addresses.end - addresses.start;
// check the size (in debug only)
// last observed macOS value: 20528 (CI)
if !(15000..=25000).contains(&stack_size) {
// last observed macOS value: 18304
if !(15000..=20000).contains(&stack_size) {
panic!("`put_expression` stack size {} has changed!", stack_size);
}
}

360
third_party/rust/naga/src/back/spv/block.rs поставляемый
Просмотреть файл

@ -20,87 +20,6 @@ fn get_dimension(type_inner: &crate::TypeInner) -> Dimension {
}
impl Writer {
// Flip Y coordinate to adjust for coordinate space difference
// between SPIR-V and our IR.
// The `position_id` argument is a pointer to a `vecN<f32>`,
// whose `y` component we will negate.
fn write_epilogue_position_y_flip(
&mut self,
position_id: Word,
body: &mut Vec<Instruction>,
) -> Result<(), Error> {
let float_ptr_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: Some(spirv::StorageClass::Output),
}));
let index_y_id = self.get_index_constant(1);
let access_id = self.id_gen.next();
body.push(Instruction::access_chain(
float_ptr_type_id,
access_id,
position_id,
&[index_y_id],
));
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: None,
}));
let load_id = self.id_gen.next();
body.push(Instruction::load(float_type_id, load_id, access_id, None));
let neg_id = self.id_gen.next();
body.push(Instruction::unary(
spirv::Op::FNegate,
float_type_id,
neg_id,
load_id,
));
body.push(Instruction::store(access_id, neg_id, None));
Ok(())
}
// Clamp fragment depth between 0 and 1.
fn write_epilogue_frag_depth_clamp(
&mut self,
frag_depth_id: Word,
body: &mut Vec<Instruction>,
) -> Result<(), Error> {
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: None,
}));
let value0_id = self.get_constant_scalar(crate::ScalarValue::Float(0.0), 4);
let value1_id = self.get_constant_scalar(crate::ScalarValue::Float(1.0), 4);
let original_id = self.id_gen.next();
body.push(Instruction::load(
float_type_id,
original_id,
frag_depth_id,
None,
));
let clamp_id = self.id_gen.next();
body.push(Instruction::ext_inst(
self.gl450_ext_inst_id,
spirv::GLOp::FClamp,
float_type_id,
clamp_id,
&[original_id, value0_id, value1_id],
));
body.push(Instruction::store(frag_depth_id, clamp_id, None));
Ok(())
}
fn write_entry_point_return(
&mut self,
value_id: Word,
@ -125,18 +44,43 @@ impl Writer {
body.push(Instruction::store(res_member.id, member_value_id, None));
match res_member.built_in {
Some(crate::BuiltIn::Position)
if self.flags.contains(WriterFlags::ADJUST_COORDINATE_SPACE) =>
{
self.write_epilogue_position_y_flip(res_member.id, body)?;
}
Some(crate::BuiltIn::FragDepth)
if self.flags.contains(WriterFlags::CLAMP_FRAG_DEPTH) =>
{
self.write_epilogue_frag_depth_clamp(res_member.id, body)?;
}
_ => {}
// Flip Y coordinate to adjust for coordinate space difference
// between SPIR-V and our IR.
if self.flags.contains(WriterFlags::ADJUST_COORDINATE_SPACE)
&& res_member.built_in == Some(crate::BuiltIn::Position)
{
let access_id = self.id_gen.next();
let float_ptr_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: Some(spirv::StorageClass::Output),
}));
let index_y_id = self.get_index_constant(1);
body.push(Instruction::access_chain(
float_ptr_type_id,
access_id,
res_member.id,
&[index_y_id],
));
let load_id = self.id_gen.next();
let float_type_id = self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: None,
}));
body.push(Instruction::load(float_type_id, load_id, access_id, None));
let neg_id = self.id_gen.next();
body.push(Instruction::unary(
spirv::Op::FNegate,
float_type_id,
neg_id,
load_id,
));
body.push(Instruction::store(access_id, neg_id, None));
}
}
Ok(())
@ -232,12 +176,14 @@ impl<'w> BlockContext<'w> {
crate::Expression::Constant(handle) => self.writer.constant_ids[handle.index()],
crate::Expression::Splat { size, value } => {
let value_id = self.cached[value];
let components = [value_id; 4];
self.temp_list.clear();
self.temp_list.resize(size as usize, value_id);
let id = self.gen_id();
block.body.push(Instruction::composite_construct(
result_type_id,
id,
&components[..size as usize],
&self.temp_list,
));
id
}
@ -346,10 +292,6 @@ impl<'w> BlockContext<'w> {
(Dimension::Matrix, Dimension::Scalar { .. }) => {
spirv::Op::MatrixTimesScalar
}
(Dimension::Scalar, Dimension::Matrix { .. }) => {
preserve_order = false;
spirv::Op::MatrixTimesScalar
}
(Dimension::Matrix, Dimension::Vector) => spirv::Op::MatrixTimesVector,
(Dimension::Matrix, Dimension::Matrix) => spirv::Op::MatrixTimesMatrix,
(Dimension::Vector, Dimension::Vector)
@ -360,6 +302,7 @@ impl<'w> BlockContext<'w> {
}
(Dimension::Vector, Dimension::Vector)
| (Dimension::Scalar, Dimension::Scalar) => spirv::Op::IMul,
other => unimplemented!("Mul {:?}", other),
},
crate::BinaryOperator::Divide => match left_ty_inner.scalar_kind() {
Some(crate::ScalarKind::Sint) => spirv::Op::SDiv,
@ -440,7 +383,6 @@ impl<'w> BlockContext<'w> {
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
enum MathOp {
@ -459,10 +401,6 @@ impl<'w> BlockContext<'w> {
Some(handle) => self.cached[handle],
None => 0,
};
let arg3_id = match arg3 {
Some(handle) => self.cached[handle],
None => 0,
};
let id = self.gen_id();
let math_op = match fun {
@ -612,40 +550,6 @@ impl<'w> BlockContext<'w> {
log::error!("unimplemented math function {:?}", fun);
return Err(Error::FeatureNotImplemented("math function"));
}
Mf::ExtractBits => {
let op = match arg_scalar_kind {
Some(crate::ScalarKind::Uint) => spirv::Op::BitFieldUExtract,
Some(crate::ScalarKind::Sint) => spirv::Op::BitFieldSExtract,
other => unimplemented!("Unexpected sign({:?})", other),
};
MathOp::Custom(Instruction::ternary(
op,
result_type_id,
id,
arg0_id,
arg1_id,
arg2_id,
))
}
Mf::InsertBits => MathOp::Custom(Instruction::quaternary(
spirv::Op::BitFieldInsert,
result_type_id,
id,
arg0_id,
arg1_id,
arg2_id,
arg3_id,
)),
Mf::Pack4x8unorm => MathOp::Ext(spirv::GLOp::PackUnorm4x8),
Mf::Pack4x8snorm => MathOp::Ext(spirv::GLOp::PackSnorm4x8),
Mf::Pack2x16float => MathOp::Ext(spirv::GLOp::PackHalf2x16),
Mf::Pack2x16unorm => MathOp::Ext(spirv::GLOp::PackSnorm2x16),
Mf::Pack2x16snorm => MathOp::Ext(spirv::GLOp::PackSnorm2x16),
Mf::Unpack4x8unorm => MathOp::Ext(spirv::GLOp::UnpackUnorm4x8),
Mf::Unpack4x8snorm => MathOp::Ext(spirv::GLOp::UnpackSnorm4x8),
Mf::Unpack2x16float => MathOp::Ext(spirv::GLOp::UnpackHalf2x16),
Mf::Unpack2x16unorm => MathOp::Ext(spirv::GLOp::UnpackSnorm2x16),
Mf::Unpack2x16snorm => MathOp::Ext(spirv::GLOp::UnpackSnorm2x16),
};
block.body.push(match math_op {
@ -654,7 +558,7 @@ impl<'w> BlockContext<'w> {
op,
result_type_id,
id,
&[arg0_id, arg1_id, arg2_id, arg3_id][..fun.argument_count()],
&[arg0_id, arg1_id, arg2_id][..fun.argument_count()],
),
MathOp::Custom(inst) => inst,
});
@ -727,26 +631,25 @@ impl<'w> BlockContext<'w> {
use crate::ScalarKind as Sk;
let expr_id = self.cached[expr];
let (src_kind, src_size, src_width) =
let (src_kind, src_width) =
match *self.fun_info[expr].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Scalar { kind, width } => (kind, None, width),
crate::TypeInner::Vector { kind, width, size } => (kind, Some(size), width),
crate::TypeInner::Scalar { kind, width }
| crate::TypeInner::Vector {
kind,
width,
size: _,
} => (kind, width),
crate::TypeInner::Matrix { width, .. } => (crate::ScalarKind::Float, width),
ref other => {
log::error!("As source {:?}", other);
return Err(Error::Validation("Unexpected Expression::As source"));
}
};
enum Cast {
Unary(spirv::Op),
Binary(spirv::Op, Word),
Ternary(spirv::Op, Word, Word),
}
let id = self.gen_id();
let cast = match (src_kind, kind, convert) {
(_, _, None) | (Sk::Bool, Sk::Bool, Some(_)) => Cast::Unary(spirv::Op::Bitcast),
// casting to a bool - generate `OpXxxNotEqual`
(_, Sk::Bool, Some(_)) => {
let instruction = match (src_kind, kind, convert) {
(_, Sk::Bool, Some(_)) if src_kind != Sk::Bool => {
let (op, value) = match src_kind {
Sk::Sint => (spirv::Op::INotEqual, crate::ScalarValue::Sint(0)),
Sk::Uint => (spirv::Op::INotEqual, crate::ScalarValue::Uint(0)),
@ -755,102 +658,34 @@ impl<'w> BlockContext<'w> {
}
Sk::Bool => unreachable!(),
};
let zero_scalar_id = self.writer.get_constant_scalar(value, src_width);
let zero_id = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind: src_kind,
width: src_width,
pointer_class: None,
}));
let components = [zero_scalar_id; 4];
let zero_id = self.writer.get_constant_scalar(value, 4);
let zero_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
zero_id,
&components[..size as usize],
));
zero_id
Instruction::binary(op, result_type_id, id, expr_id, zero_id)
}
_ => {
let op = match (src_kind, kind, convert) {
(_, _, None) => spirv::Op::Bitcast,
(Sk::Float, Sk::Uint, Some(_)) => spirv::Op::ConvertFToU,
(Sk::Float, Sk::Sint, Some(_)) => spirv::Op::ConvertFToS,
(Sk::Float, Sk::Float, Some(dst_width)) if src_width != dst_width => {
spirv::Op::FConvert
}
None => zero_scalar_id,
};
Cast::Binary(op, zero_id)
}
// casting from a bool - generate `OpSelect`
(Sk::Bool, _, Some(dst_width)) => {
let (val0, val1) = match kind {
Sk::Sint => (crate::ScalarValue::Sint(0), crate::ScalarValue::Sint(1)),
Sk::Uint => (crate::ScalarValue::Uint(0), crate::ScalarValue::Uint(1)),
Sk::Float => (
crate::ScalarValue::Float(0.0),
crate::ScalarValue::Float(1.0),
),
Sk::Bool => unreachable!(),
};
let scalar0_id = self.writer.get_constant_scalar(val0, dst_width);
let scalar1_id = self.writer.get_constant_scalar(val1, dst_width);
let (accept_id, reject_id) = match src_size {
Some(size) => {
let vector_type_id =
self.get_type_id(LookupType::Local(LocalType::Value {
vector_size: Some(size),
kind,
width: dst_width,
pointer_class: None,
}));
let components0 = [scalar0_id; 4];
let components1 = [scalar1_id; 4];
let vec0_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec0_id,
&components0[..size as usize],
));
let vec1_id = self.gen_id();
block.body.push(Instruction::composite_construct(
vector_type_id,
vec1_id,
&components1[..size as usize],
));
(vec1_id, vec0_id)
(Sk::Sint, Sk::Float, Some(_)) => spirv::Op::ConvertSToF,
(Sk::Sint, Sk::Sint, Some(dst_width)) if src_width != dst_width => {
spirv::Op::SConvert
}
None => (scalar1_id, scalar0_id),
(Sk::Uint, Sk::Float, Some(_)) => spirv::Op::ConvertUToF,
(Sk::Uint, Sk::Uint, Some(dst_width)) if src_width != dst_width => {
spirv::Op::UConvert
}
// We assume it's either an identity cast, or int-uint.
_ => spirv::Op::Bitcast,
};
Cast::Ternary(spirv::Op::Select, accept_id, reject_id)
Instruction::unary(op, result_type_id, id, expr_id)
}
(Sk::Float, Sk::Uint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToU),
(Sk::Float, Sk::Sint, Some(_)) => Cast::Unary(spirv::Op::ConvertFToS),
(Sk::Float, Sk::Float, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::FConvert)
}
(Sk::Sint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertSToF),
(Sk::Sint, Sk::Sint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::SConvert)
}
(Sk::Uint, Sk::Float, Some(_)) => Cast::Unary(spirv::Op::ConvertUToF),
(Sk::Uint, Sk::Uint, Some(dst_width)) if src_width != dst_width => {
Cast::Unary(spirv::Op::UConvert)
}
// We assume it's either an identity cast, or int-uint.
_ => Cast::Unary(spirv::Op::Bitcast),
};
let id = self.gen_id();
let instruction = match cast {
Cast::Unary(op) => Instruction::unary(op, result_type_id, id, expr_id),
Cast::Binary(op, operand) => {
Instruction::binary(op, result_type_id, id, expr_id, operand)
}
Cast::Ternary(op, op1, op2) => {
Instruction::ternary(op, result_type_id, id, expr_id, op1, op2)
}
};
block.body.push(instruction);
id
}
@ -1154,6 +989,7 @@ impl<'w> BlockContext<'w> {
crate::Statement::Switch {
selector,
ref cases,
ref default,
} => {
let selector_id = self.cached[selector];
@ -1164,30 +1000,13 @@ impl<'w> BlockContext<'w> {
));
let default_id = self.gen_id();
let mut reached_default = false;
let mut raw_cases = Vec::with_capacity(cases.len());
let mut case_ids = Vec::with_capacity(cases.len());
for case in cases.iter() {
match case.value {
crate::SwitchValue::Integer(value) => {
let label_id = self.gen_id();
// No cases should be added after the default case is encountered
// since the default case catches all
if !reached_default {
raw_cases.push(super::instructions::Case {
value: value as Word,
label_id,
});
}
case_ids.push(label_id);
}
crate::SwitchValue::Default => {
case_ids.push(default_id);
reached_default = true;
}
}
}
let raw_cases = cases
.iter()
.map(|c| super::instructions::Case {
value: c.value as Word,
label_id: self.gen_id(),
})
.collect::<Vec<_>>();
self.function.consume(
block,
@ -1199,25 +1018,24 @@ impl<'w> BlockContext<'w> {
..loop_context
};
for (i, (case, label_id)) in cases.iter().zip(case_ids.iter()).enumerate() {
for (i, (case, raw_case)) in cases.iter().zip(raw_cases.iter()).enumerate() {
let case_finish_id = if case.fall_through {
case_ids[i + 1]
match raw_cases.get(i + 1) {
Some(rc) => rc.label_id,
None => default_id,
}
} else {
merge_id
};
self.write_block(
*label_id,
raw_case.label_id,
&case.body,
Some(case_finish_id),
inner_context,
)?;
}
// If no default was encountered write a empty block to satisfy the presence of
// a block the default label
if !reached_default {
self.write_block(default_id, &[], Some(merge_id), inner_context)?;
}
self.write_block(default_id, default, Some(merge_id), inner_context)?;
block = Block::new(merge_id);
}

Просмотреть файл

@ -1,4 +1,4 @@
use crate::{Handle, UniqueArena};
use crate::{Arena, Handle};
use spirv::Word;
pub(super) fn bytes_to_words(bytes: &[u8]) -> Vec<Word> {
@ -35,7 +35,7 @@ pub(super) fn map_storage_class(class: crate::StorageClass) -> spirv::StorageCla
pub(super) fn contains_builtin(
binding: Option<&crate::Binding>,
ty: Handle<crate::Type>,
arena: &UniqueArena<crate::Type>,
arena: &Arena<crate::Type>,
built_in: crate::BuiltIn,
) -> bool {
if let Some(&crate::Binding::BuiltIn(bi)) = binding {

12
third_party/rust/naga/src/back/spv/image.rs поставляемый
Просмотреть файл

@ -739,7 +739,7 @@ impl<'w> BlockContext<'w> {
// Perform the access, according to the bounds check policy.
let access_id = match self.writer.bounds_check_policies.image {
crate::proc::BoundsCheckPolicy::Restrict => {
crate::back::BoundsCheckPolicy::Restrict => {
let (coords, level_id, sample_id) = self.write_restricted_coordinates(
image_id,
coordinates,
@ -749,7 +749,7 @@ impl<'w> BlockContext<'w> {
)?;
access.generate(&mut self.writer.id_gen, coords, level_id, sample_id, block)
}
crate::proc::BoundsCheckPolicy::ReadZeroSkipWrite => self
crate::back::BoundsCheckPolicy::ReadZeroSkipWrite => self
.write_conditional_image_access(
image_id,
coordinates,
@ -758,7 +758,7 @@ impl<'w> BlockContext<'w> {
block,
&access,
)?,
crate::proc::BoundsCheckPolicy::Unchecked => access.generate(
crate::back::BoundsCheckPolicy::Unchecked => access.generate(
&mut self.writer.id_gen,
coordinates.value_id,
level_id,
@ -1131,12 +1131,12 @@ impl<'w> BlockContext<'w> {
let write = Store { image_id, value_id };
match self.writer.bounds_check_policies.image {
crate::proc::BoundsCheckPolicy::Restrict => {
crate::back::BoundsCheckPolicy::Restrict => {
let (coords, _, _) =
self.write_restricted_coordinates(image_id, coordinates, None, None, block)?;
write.generate(&mut self.writer.id_gen, coords, None, None, block);
}
crate::proc::BoundsCheckPolicy::ReadZeroSkipWrite => {
crate::back::BoundsCheckPolicy::ReadZeroSkipWrite => {
self.write_conditional_image_access(
image_id,
coordinates,
@ -1146,7 +1146,7 @@ impl<'w> BlockContext<'w> {
&write,
)?;
}
crate::proc::BoundsCheckPolicy::Unchecked => {
crate::back::BoundsCheckPolicy::Unchecked => {
write.generate(
&mut self.writer.id_gen,
coordinates.value_id,

27
third_party/rust/naga/src/back/spv/index.rs поставляемый
Просмотреть файл

@ -1,7 +1,7 @@
//! Bounds-checking for SPIR-V output.
use super::{selection::Selection, Block, BlockContext, Error, IdGenerator, Instruction, Word};
use crate::{arena::Handle, proc::BoundsCheckPolicy};
use crate::{arena::Handle, back::BoundsCheckPolicy};
/// The results of emitting code for a left-hand-side expression.
///
@ -109,6 +109,10 @@ impl<'w> BlockContext<'w> {
let length_id = self.write_runtime_array_length(sequence, block)?;
Ok(MaybeKnown::Computed(length_id))
}
crate::proc::IndexableLength::Specializable(constant) => {
let length_id = self.writer.constant_ids[constant.index()];
Ok(MaybeKnown::Computed(length_id))
}
}
}
@ -342,11 +346,22 @@ impl<'w> BlockContext<'w> {
index: Handle<crate::Expression>,
block: &mut Block,
) -> Result<BoundsCheckResult, Error> {
let policy = self.writer.bounds_check_policies.choose_policy(
base,
&self.ir_module.types,
self.fun_info,
);
// Should this access be covered by `index_bounds_check_policy` or
// `buffer_bounds_check_policy`?
let is_buffer = match *self.fun_info[base].ty.inner_with(&self.ir_module.types) {
crate::TypeInner::Pointer { class, .. }
| crate::TypeInner::ValuePointer { class, .. } => match class {
crate::StorageClass::Storage { access: _ } | crate::StorageClass::Uniform => true,
_ => false,
},
_ => false,
};
let policy = if is_buffer {
self.writer.bounds_check_policies.buffer
} else {
self.writer.bounds_check_policies.index
};
Ok(match policy {
BoundsCheckPolicy::Restrict => self.write_restricted_index(base, index, block)?,

Просмотреть файл

@ -703,42 +703,6 @@ impl super::Instruction {
instruction
}
pub(super) fn ternary(
op: Op,
result_type_id: Word,
id: Word,
operand_1: Word,
operand_2: Word,
operand_3: Word,
) -> Self {
let mut instruction = Self::new(op);
instruction.set_type(result_type_id);
instruction.set_result(id);
instruction.add_operand(operand_1);
instruction.add_operand(operand_2);
instruction.add_operand(operand_3);
instruction
}
pub(super) fn quaternary(
op: Op,
result_type_id: Word,
id: Word,
operand_1: Word,
operand_2: Word,
operand_3: Word,
operand_4: Word,
) -> Self {
let mut instruction = Self::new(op);
instruction.set_type(result_type_id);
instruction.set_result(id);
instruction.add_operand(operand_1);
instruction.add_operand(operand_2);
instruction.add_operand(operand_3);
instruction.add_operand(operand_4);
instruction
}
pub(super) fn relational(op: Op, result_type_id: Word, id: Word, expr_id: Word) -> Self {
let mut instruction = Self::new(op);
instruction.set_type(result_type_id);

93
third_party/rust/naga/src/back/spv/mod.rs поставляемый
Просмотреть файл

@ -13,8 +13,7 @@ mod writer;
pub use spirv::Capability;
use crate::arena::Handle;
use crate::proc::{BoundsCheckPolicies, TypeResolution};
use crate::{arena::Handle, back::BoundsCheckPolicies, proc::TypeResolution};
use spirv::Word;
use std::ops;
@ -219,47 +218,19 @@ impl LocalImageType {
/// A SPIR-V type constructed during code generation.
///
/// This is the variant of [`LookupType`] used to represent types that might not
/// be available in the arena. Variants are present here for one of two reasons:
/// In the process of writing SPIR-V, we need to synthesize various types for
/// intermediate results and such. However, it's inconvenient to use
/// `crate::Type` or `crate::TypeInner` for these, as the IR module is immutable
/// so we can't ever create a `Handle<Type>` to refer to them. So for local use
/// in the SPIR-V writer, we have this home-grown type enum that covers only the
/// cases we need (for example, it doesn't cover structs).
///
/// - They represent types synthesized during code generation, as explained
/// in the documentation for [`LookupType`].
///
/// - They represent types for which SPIR-V forbids duplicate `OpType...`
/// instructions, requiring deduplication.
///
/// This is not a complete copy of [`TypeInner`]: for example, SPIR-V generation
/// never synthesizes new struct types, so `LocalType` has nothing for that.
///
/// Each `LocalType` variant should be handled identically to its analogous
/// `TypeInner` variant. You can use the [`make_local`] function to help with
/// this, by converting everything possible to a `LocalType` before inspecting
/// it.
///
/// ## `Localtype` equality and SPIR-V `OpType` uniqueness
///
/// The definition of `Eq` on `LocalType` is carefully chosen to help us follow
/// certain SPIR-V rules. SPIR-V §2.8 requires some classes of `OpType...`
/// instructions to be unique; for example, you can't have two `OpTypeInt 32 1`
/// instructions in the same module. All 32-bit signed integers must use the
/// same type id.
///
/// All SPIR-V types that must be unique can be represented as a `LocalType`,
/// and two `LocalType`s are always `Eq` if SPIR-V would require them to use the
/// same `OpType...` instruction. This lets us avoid duplicates by recording the
/// ids of the type instructions we've already generated in a hash table,
/// [`Writer::lookup_type`], keyed by `LocalType`.
///
/// As another example, [`LocalImageType`], stored in the `LocalType::Image`
/// variant, is designed to help us deduplicate `OpTypeImage` instructions. See
/// its documentation for details.
///
/// `LocalType` also includes variants like `Pointer` that do not need to be
/// unique - but it is harmless to avoid the duplication.
///
/// As it always must, the `Hash` implementation respects the `Eq` relation.
///
/// [`TypeInner`]: crate::TypeInner
/// As explained in §2.8 of the SPIR-V spec, some classes of type instructions
/// must be unique; for example, you can't have two `OpTypeInt 32 1`
/// instructions in the same module. `Writer::lookup_type` maps each `LocalType`
/// value for which we've written instructions to its id, so we can avoid
/// writing out duplicates. `LocalType` also includes variants like `Pointer`
/// that do not need to be unique - but it is harmless to avoid the duplication.
#[derive(Debug, PartialEq, Hash, Eq, Copy, Clone)]
enum LocalType {
/// A scalar, vector, or pointer to one of those.
@ -288,28 +259,6 @@ enum LocalType {
Sampler,
}
/// A type encountered during SPIR-V generation.
///
/// In the process of writing SPIR-V, we need to synthesize various types for
/// intermediate results and such: pointer types, vector/matrix component types,
/// or even booleans, which usually appear in SPIR-V code even when they're not
/// used by the module source.
///
/// However, we can't use `crate::Type` or `crate::TypeInner` for these, as the
/// type arena may not contain what we need (it only contains types used
/// directly by other parts of the IR), and the IR module is immutable, so we
/// can't add anything to it.
///
/// So for local use in the SPIR-V writer, we use this type, which holds either
/// a handle into the arena, or a [`LocalType`] containing something synthesized
/// locally.
///
/// This is very similar to the [`proc::TypeResolution`] enum, with `LocalType`
/// playing the role of `TypeInner`. However, `LocalType` also has other
/// properties needed for SPIR-V generation; see the description of
/// [`LocalType`] for details.
///
/// [`proc::TypeResolution`]: crate::proc::TypeResolution
#[derive(Debug, PartialEq, Hash, Eq, Copy, Clone)]
enum LookupType {
Handle(Handle<crate::Type>),
@ -562,15 +511,9 @@ bitflags::bitflags! {
const DEBUG = 0x1;
/// Flip Y coordinate of `BuiltIn::Position` output.
const ADJUST_COORDINATE_SPACE = 0x2;
/// Emit `OpName` for input/output locations.
/// Contrary to spec, some drivers treat it as semantic, not allowing
/// any conflicts.
/// Emit `OpLabel` for input/output locations.
/// Some drivers treat it as semantic, not allowing any conflicts.
const LABEL_VARYINGS = 0x4;
/// Emit `PointSize` output builtin to vertex shaders, which is
/// required for drawing with `PointList` topology.
const FORCE_POINT_SIZE = 0x8;
/// Clamp `BuiltIn::FragDepth` output between 0 and 1.
const CLAMP_FRAG_DEPTH = 0x10;
}
}
@ -595,9 +538,7 @@ pub struct Options {
impl Default for Options {
fn default() -> Self {
let mut flags = WriterFlags::ADJUST_COORDINATE_SPACE
| WriterFlags::LABEL_VARYINGS
| WriterFlags::CLAMP_FRAG_DEPTH;
let mut flags = WriterFlags::ADJUST_COORDINATE_SPACE | WriterFlags::LABEL_VARYINGS;
if cfg!(debug_assertions) {
flags |= WriterFlags::DEBUG;
}
@ -605,7 +546,7 @@ impl Default for Options {
lang_version: (1, 0),
flags,
capabilities: None,
bounds_check_policies: crate::proc::BoundsCheckPolicies::default(),
bounds_check_policies: super::BoundsCheckPolicies::default(),
}
}
}

128
third_party/rust/naga/src/back/spv/writer.rs поставляемый
Просмотреть файл

@ -6,18 +6,13 @@ use super::{
PipelineOptions, ResultMember, Writer, WriterFlags, BITS_PER_BYTE,
};
use crate::{
arena::{Handle, UniqueArena},
arena::{Arena, Handle},
proc::TypeResolution,
valid::{FunctionInfo, ModuleInfo},
};
use spirv::Word;
use std::collections::hash_map::Entry;
struct FunctionInterface<'a> {
varying_ids: &'a mut Vec<Word>,
stage: crate::ShaderStage,
}
impl Function {
fn to_words(&self, sink: &mut impl Extend<Word>) {
self.signature.as_ref().unwrap().to_words(sink);
@ -195,7 +190,7 @@ impl Writer {
pub(super) fn get_pointer_id(
&mut self,
arena: &UniqueArena<crate::Type>,
arena: &Arena<crate::Type>,
handle: Handle<crate::Type>,
class: spirv::StorageClass,
) -> Result<Word, Error> {
@ -228,35 +223,6 @@ impl Writer {
self.get_type_id(local_type.into())
}
pub(super) fn get_float_type_id(&mut self) -> Word {
let local_type = LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: None,
};
self.get_type_id(local_type.into())
}
pub(super) fn get_float_pointer_type_id(&mut self, class: spirv::StorageClass) -> Word {
let lookup_type = LookupType::Local(LocalType::Value {
vector_size: None,
kind: crate::ScalarKind::Float,
width: 4,
pointer_class: Some(class),
});
if let Some(&id) = self.lookup_type.get(&lookup_type) {
id
} else {
let id = self.id_gen.next();
let ty_id = self.get_float_type_id();
let instruction = Instruction::type_pointer(id, class, ty_id);
instruction.to_words(&mut self.logical_layout.declarations);
self.lookup_type.insert(lookup_type, id);
id
}
}
pub(super) fn get_bool_type_id(&mut self) -> Word {
let local_type = LocalType::Value {
vector_size: None,
@ -277,7 +243,7 @@ impl Writer {
ir_function: &crate::Function,
info: &FunctionInfo,
ir_module: &crate::Module,
mut interface: Option<FunctionInterface>,
mut varying_ids: Option<&mut Vec<Word>>,
) -> Result<Word, Error> {
let mut function = Function::default();
@ -325,13 +291,12 @@ impl Writer {
)?,
false => self.get_type_id(LookupType::Handle(argument.ty)),
};
if let Some(ref mut iface) = interface {
if let Some(ref mut list) = varying_ids {
let id = if let Some(ref binding) = argument.binding {
let name = argument.name.as_ref().map(AsRef::as_ref);
let varying_id =
self.write_varying(ir_module, class, name, argument.ty, binding)?;
iface.varying_ids.push(varying_id);
list.push(varying_id);
let id = self.id_gen.next();
prelude
.body
@ -348,7 +313,7 @@ impl Writer {
let binding = member.binding.as_ref().unwrap();
let varying_id =
self.write_varying(ir_module, class, name, member.ty, binding)?;
iface.varying_ids.push(varying_id);
list.push(varying_id);
let id = self.id_gen.next();
prelude
.body
@ -368,11 +333,6 @@ impl Writer {
} else {
let argument_id = self.id_gen.next();
let instruction = Instruction::function_parameter(argument_type_id, argument_id);
if self.flags.contains(WriterFlags::DEBUG) {
if let Some(ref name) = argument.name {
self.debugs.push(Instruction::name(argument_id, name));
}
}
function.parameters.push(FunctionArgument {
instruction,
handle_id: if handle_ty {
@ -394,16 +354,13 @@ impl Writer {
let return_type_id = match ir_function.result {
Some(ref result) => {
if let Some(ref mut iface) = interface {
let mut has_point_size = false;
if let Some(ref mut list) = varying_ids {
let class = spirv::StorageClass::Output;
if let Some(ref binding) = result.binding {
has_point_size |=
*binding == crate::Binding::BuiltIn(crate::BuiltIn::PointSize);
let type_id = self.get_type_id(LookupType::Handle(result.ty));
let varying_id =
self.write_varying(ir_module, class, None, result.ty, binding)?;
iface.varying_ids.push(varying_id);
list.push(varying_id);
ep_context.results.push(ResultMember {
id: varying_id,
type_id,
@ -416,11 +373,9 @@ impl Writer {
let type_id = self.get_type_id(LookupType::Handle(member.ty));
let name = member.name.as_ref().map(AsRef::as_ref);
let binding = member.binding.as_ref().unwrap();
has_point_size |=
*binding == crate::Binding::BuiltIn(crate::BuiltIn::PointSize);
let varying_id =
self.write_varying(ir_module, class, name, member.ty, binding)?;
iface.varying_ids.push(varying_id);
list.push(varying_id);
ep_context.results.push(ResultMember {
id: varying_id,
type_id,
@ -430,29 +385,6 @@ impl Writer {
} else {
unreachable!("Missing result binding on an entry point");
}
if self.flags.contains(WriterFlags::FORCE_POINT_SIZE)
&& iface.stage == crate::ShaderStage::Vertex
&& !has_point_size
{
// add point size artificially
let varying_id = self.id_gen.next();
let pointer_type_id = self.get_float_pointer_type_id(class);
Instruction::variable(pointer_type_id, varying_id, class, None)
.to_words(&mut self.logical_layout.declarations);
self.decorate(
varying_id,
spirv::Decoration::BuiltIn,
&[spirv::BuiltIn::PointSize as u32],
);
iface.varying_ids.push(varying_id);
let default_value_id =
self.get_constant_scalar(crate::ScalarValue::Float(1.0), 4);
prelude
.body
.push(Instruction::store(varying_id, default_value_id, None));
}
self.void_type
} else {
self.get_type_id(LookupType::Handle(result.ty))
@ -481,7 +413,7 @@ impl Writer {
function_type,
));
if interface.is_some() {
if varying_ids.is_some() {
function.entry_point_context = Some(ep_context);
}
@ -569,10 +501,7 @@ impl Writer {
&entry_point.function,
info,
ir_module,
Some(FunctionInterface {
varying_ids: &mut interface_ids,
stage: entry_point.stage,
}),
Some(&mut interface_ids),
)?;
let exec_model = match entry_point.stage {
@ -771,7 +700,7 @@ impl Writer {
fn write_type_declaration_arena(
&mut self,
arena: &UniqueArena<crate::Type>,
arena: &Arena<crate::Type>,
handle: Handle<crate::Type>,
) -> Result<Word, Error> {
let ty = &arena[handle];
@ -1009,7 +938,7 @@ impl Writer {
&solo[..]
}
8 => {
pair = [val as u32, (val >> 32) as u32];
pair = [(val >> 32) as u32, val as u32];
&pair
}
_ => unreachable!(),
@ -1023,7 +952,7 @@ impl Writer {
&solo[..]
}
8 => {
pair = [val as u32, (val >> 32) as u32];
pair = [(val >> 32) as u32, val as u32];
&pair
}
_ => unreachable!(),
@ -1038,7 +967,7 @@ impl Writer {
}
8 => {
let bits = f64::to_bits(val);
pair = [bits as u32, (bits >> 32) as u32];
pair = [(bits >> 32) as u32, bits as u32];
&pair
}
_ => unreachable!(),
@ -1164,10 +1093,6 @@ impl Writer {
BuiltIn::FragCoord
}
}
Bi::ViewIndex => {
self.require_any("`view_index` built-in", &[spirv::Capability::MultiView])?;
BuiltIn::ViewIndex
}
// vertex
Bi::BaseInstance => BuiltIn::BaseInstance,
Bi::BaseVertex => BuiltIn::BaseVertex,
@ -1293,19 +1218,6 @@ impl Writer {
mod_info: &ModuleInfo,
ep_index: Option<usize>,
) -> Result<(), Error> {
fn has_view_index_check(
ir_module: &crate::Module,
binding: Option<&crate::Binding>,
ty: Handle<crate::Type>,
) -> bool {
match ir_module.types[ty].inner {
crate::TypeInner::Struct { ref members, .. } => members.iter().any(|member| {
has_view_index_check(ir_module, member.binding.as_ref(), member.ty)
}),
_ => binding == Some(&crate::Binding::BuiltIn(crate::BuiltIn::ViewIndex)),
}
}
let has_storage_buffers =
ir_module
.global_variables
@ -1314,21 +1226,11 @@ impl Writer {
crate::StorageClass::Storage { .. } => true,
_ => false,
});
let has_view_index = ir_module
.entry_points
.iter()
.flat_map(|entry| entry.function.arguments.iter())
.any(|arg| has_view_index_check(ir_module, arg.binding.as_ref(), arg.ty));
if self.physical_layout.version < 0x10300 && has_storage_buffers {
// enable the storage buffer class on < SPV-1.3
Instruction::extension("SPV_KHR_storage_buffer_storage_class")
.to_words(&mut self.logical_layout.extensions);
}
if has_view_index {
Instruction::extension("SPV_KHR_multiview")
.to_words(&mut self.logical_layout.extensions)
}
Instruction::type_void(self.void_type).to_words(&mut self.logical_layout.declarations);
Instruction::ext_inst_import(self.gl450_ext_inst_id, "GLSL.std.450")
.to_words(&mut self.logical_layout.ext_inst_imports);

Просмотреть файл

@ -1,134 +1,126 @@
// https://gpuweb.github.io/gpuweb/wgsl/#keyword-summary
pub const RESERVED: &[&str] = &[
// type-defining keywords
"array",
"atomic",
"bool",
"float32",
"int32",
"mat2x2",
"mat2x3",
"mat2x4",
"mat3x2",
"mat3x3",
"mat3x4",
"mat4x2",
"mat4x3",
"mat4x4",
"pointer",
"sampler",
"sampler_comparison",
"struct",
"texture_1d",
"texture_2d",
"texture_2d_array",
"texture_3d",
"texture_cube",
"texture_cube_array",
"texture_multisampled_2d",
"texture_storage_1d",
"texture_storage_2d",
"texture_storage_2d_array",
"texture_storage_3d",
"texture_depth_2d",
"texture_depth_2d_array",
"texture_depth_cube",
"texture_depth_cube_array",
"texture_depth_multisampled_2d",
"uint32",
"vec2",
"vec3",
"vec4",
// other keywords
"bitcast",
"block",
"break",
"case",
"continue",
"continuing",
"default",
"discard",
"else",
"else_if",
"enable",
"fallthrough",
"false",
"fn",
"for",
"function",
"if",
"let",
"loop",
"private",
"read",
"read_write",
"return",
"storage",
"switch",
"true",
"type",
"uniform",
"var",
"workgroup",
"write",
// image format keywords
"r8unorm",
"r8snorm",
"r8uint",
"r8sint",
"r16uint",
"r16sint",
"r16float",
"rg8unorm",
"rg8snorm",
"rg8uint",
"rg8sint",
"r32uint",
"r32sint",
"r32float",
"rg16uint",
"rg16sint",
"rg16float",
"rgba8unorm",
"rgba8unorm_srgb",
"rgba8snorm",
"rgba8uint",
"rgba8sint",
"bgra8unorm",
"bgra8unorm_srgb",
"rgb10a2unorm",
"rg11b10float",
"rg32uint",
"rg32sint",
"rg32float",
"rgba16uint",
"rgba16sint",
"rgba16float",
"rgba32uint",
"rgba32sint",
"rgba32float",
// reserved keywords
// Type-defining keywords
"ARRAY",
"BOOL",
"FLOAT32",
"INT32",
"MAT2x2",
"MAT2x3",
"MAT2x4",
"MAT3x2",
"MAT3x3",
"MAT3x4",
"MAT4x2",
"MAT4x3",
"MAT4x4",
"POINTER",
"SAMPLER",
"SAMPLER_COMPARISON",
"STRUCT",
"TEXTURE_1D",
"TEXTURE_2D",
"TEXTURE_2D_ARRAY",
"TEXTURE_3D",
"TEXTURE_CUBE",
"TEXTURE_CUBE_ARRAY",
"TEXTURE_MULTISAMPLED_2D",
"TEXTURE_STORAGE_1D",
"TEXTURE_STORAGE_2D",
"TEXTURE_STORAGE_2D_ARRAY",
"TEXTURE_STORAGE_3D",
"TEXTURE_DEPTH_2D",
"TEXTURE_DEPTH_2D_ARRAY",
"TEXTURE_DEPTH_CUBE",
"TEXTURE_DEPTH_CUBE_ARRAY",
"UINT32",
"VEC2",
"VEC3",
"VEC4",
// Other keywords
"BITCAST",
"BLOCK",
"BREAK",
"CASE",
"CONTINUE",
"CONTINUING",
"DEFAULT",
"DISCARD",
"ELSE",
"ELSE_IF",
"ENABLE",
"FALLTHROUGH",
"FALSE",
"FN",
"FOR",
"FUNCTION",
"IF",
"LET",
"LOOP",
"PRIVATE",
"RETURN",
"STORAGE",
"SWITCH",
"TRUE",
"TYPE",
"UNIFORM",
"VAR",
"WORKGROUP",
// Image format keywords
"R8UNORM",
"R8SNORM",
"R8UINT",
"R8SINT",
"R16UINT",
"R16SINT",
"R16FLOAT",
"RG8UNORM",
"RG8SNORM",
"RG8UINT",
"RG8SINT",
"R32UINT",
"R32SINT",
"R32FLOAT",
"RG16UINT",
"RG16SINT",
"RG16FLOAT",
"RGBA8UNORM",
"RGBA8UNORM-SRGB",
"RGBA8SNORM",
"RGBA8UINT",
"RGBA8SINT",
"BGRA8UNORM",
"BGRA8UNORM-SRGB",
"RGB10A2UNORM",
"RG11B10FLOAT",
"RG32UINT",
"RG32SINT",
"RG32FLOAT",
"RGBA16UINT",
"RGBA16SINT",
"RGBA16FLOAT",
"RGBA32UINT",
"RGBA32SINT",
"RGBA32FLOAT",
// Reserved Keywords
"asm",
"bf16",
"const",
"do",
"enum",
"f16",
"f64",
"handle",
"i8",
"i16",
"i64",
"mat",
"premerge",
"regardless",
"const",
"typedef",
"u8",
"u16",
"u64",
"unless",
"using",
"vec",
"void",
"while",
"regardless",
"premerge",
"handle",
];

2
third_party/rust/naga/src/back/wgsl/mod.rs поставляемый
Просмотреть файл

@ -5,8 +5,6 @@ use thiserror::Error;
pub use writer::Writer;
const BAKE_PREFIX: &str = "e";
#[derive(Error, Debug)]
pub enum Error {
#[error(transparent)]

446
third_party/rust/naga/src/back/wgsl/writer.rs поставляемый
Просмотреть файл

@ -22,35 +22,6 @@ enum Attribute {
WorkGroupSize([u32; 3]),
}
/// The WGSL form that `write_expr_with_indirection` should use to render a Naga
/// expression.
///
/// Sometimes a Naga `Expression` alone doesn't provide enough information to
/// choose the right rendering for it in WGSL. For example, one natural WGSL
/// rendering of a Naga `LocalVariable(x)` expression might be `&x`, since
/// `LocalVariable` produces a pointer to the local variable's storage. But when
/// rendering a `Store` statement, the `pointer` operand must be the left hand
/// side of a WGSL assignment, so the proper rendering is `x`.
///
/// The caller of `write_expr_with_indirection` must provide an `Expected` value
/// to indicate how ambiguous expressions should be rendered.
#[derive(Clone, Copy, Debug)]
enum Indirection {
/// Render pointer-construction expressions as WGSL `ptr`-typed expressions.
///
/// This is the right choice for most cases. Whenever a Naga pointer
/// expression is not the `pointer` operand of a `Load` or `Store`, it
/// must be a WGSL pointer expression.
Ordinary,
/// Render pointer-construction expressions as WGSL reference-typed
/// expressions.
///
/// For example, this is the right choice for the `pointer` operand when
/// rendering a `Store` statement as a WGSL assignment.
Reference,
}
pub struct Writer<W> {
out: W,
names: crate::FastHashMap<NameKey, String>,
@ -525,13 +496,10 @@ impl<W: Write> Writer<W> {
"storage_",
"",
storage_format_str(format),
if access.contains(crate::StorageAccess::LOAD | crate::StorageAccess::STORE)
{
",read_write"
} else if access.contains(crate::StorageAccess::LOAD) {
",read"
} else {
if access.contains(crate::StorageAccess::STORE) {
",write"
} else {
""
},
),
};
@ -583,67 +551,17 @@ impl<W: Write> Writer<W> {
}
TypeInner::Pointer { base, class } => {
let (storage, maybe_access) = storage_class_str(class);
// Everything but `StorageClass::Handle` gives us a `storage` name, but
// Naga IR never produces pointers to handles, so it doesn't matter much
// how we write such a type. Just write it as the base type alone.
if let Some(class) = storage {
write!(self.out, "ptr<{}, ", class)?;
if let Some(access) = maybe_access {
write!(self.out, ", {}", access)?;
}
}
self.write_type(module, base)?;
if storage.is_some() {
if let Some(access) = maybe_access {
write!(self.out, ", {}", access)?;
}
write!(self.out, ">")?;
}
}
TypeInner::ValuePointer {
size: None,
kind,
width: _,
class,
} => {
let (storage, maybe_access) = storage_class_str(class);
if let Some(class) = storage {
write!(self.out, "ptr<{}, {}", class, scalar_kind_str(kind))?;
if let Some(access) = maybe_access {
write!(self.out, ", {}", access)?;
}
write!(self.out, ">")?;
} else {
return Err(Error::Unimplemented(format!(
"ValuePointer to StorageClass::Handle {:?}",
inner
)));
}
}
TypeInner::ValuePointer {
size: Some(size),
kind,
width: _,
class,
} => {
let (storage, maybe_access) = storage_class_str(class);
if let Some(class) = storage {
write!(
self.out,
"ptr<{}, vec{}<{}>",
class,
back::vector_size_str(size),
scalar_kind_str(kind)
)?;
if let Some(access) = maybe_access {
write!(self.out, ", {}", access)?;
}
write!(self.out, ">")?;
} else {
return Err(Error::Unimplemented(format!(
"ValuePointer to StorageClass::Handle {:?}",
inner
)));
}
write!(self.out, ">")?;
}
_ => {
return Err(Error::Unimplemented(format!(
"write_value_type {:?}",
@ -670,13 +588,12 @@ impl<W: Write> Writer<W> {
match *stmt {
Statement::Emit(ref range) => {
for handle in range.clone() {
let info = &func_ctx.info[handle];
let expr_name = if let Some(name) = func_ctx.named_expressions.get(&handle) {
// Front end provides names for all variables at the start of writing.
// But we write them to step by step. We need to recache them
// Otherwise, we could accidentally write variable name instead of full expression.
// Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
Some(self.namer.call(name))
Some(self.namer.call_unique(name))
} else {
let expr = &func_ctx.expressions[handle];
let min_ref_count = expr.bake_ref_count();
@ -687,7 +604,8 @@ impl<W: Write> Writer<W> {
| Expression::ImageSample { .. } => true,
_ => false,
};
if min_ref_count <= info.ref_count || required_baking_expr {
if min_ref_count <= func_ctx.info[handle].ref_count || required_baking_expr
{
// If expression contains unsupported builtin we should skip it
if let Expression::Load { pointer } = func_ctx.expressions[handle] {
if let Expression::AccessIndex { base, index } =
@ -704,7 +622,7 @@ impl<W: Write> Writer<W> {
}
}
Some(format!("{}{}", super::BAKE_PREFIX, handle.index()))
Some(format!("{}{}", back::BAKE_PREFIX, handle.index()))
} else {
None
}
@ -775,26 +693,28 @@ impl<W: Write> Writer<W> {
}
write!(self.out, "{}", level)?;
let is_atomic = match *func_ctx.info[pointer].ty.inner_with(&module.types) {
crate::TypeInner::Pointer { base, .. } => match module.types[base].inner {
crate::TypeInner::Atomic { .. } => true,
_ => false,
},
_ => false,
let (is_ptr, is_atomic) = match *func_ctx.info[pointer].ty.inner_with(&module.types)
{
crate::TypeInner::Pointer { base, .. } => (
func_ctx.expressions[pointer].should_deref(),
match module.types[base].inner {
crate::TypeInner::Atomic { .. } => true,
_ => false,
},
),
_ => (false, false),
};
if is_atomic {
write!(self.out, "atomicStore(")?;
if !is_ptr {
write!(self.out, "&")?;
}
self.write_expr(module, pointer, func_ctx)?;
write!(self.out, ", ")?;
self.write_expr(module, value, func_ctx)?;
write!(self.out, ")")?;
} else {
self.write_expr_with_indirection(
module,
pointer,
func_ctx,
Indirection::Reference,
)?;
self.write_expr(module, pointer, func_ctx)?;
write!(self.out, " = ")?;
self.write_expr(module, value, func_ctx)?;
}
@ -807,14 +727,14 @@ impl<W: Write> Writer<W> {
} => {
write!(self.out, "{}", level)?;
if let Some(expr) = result {
let name = format!("{}{}", super::BAKE_PREFIX, expr.index());
let name = format!("{}{}", back::BAKE_PREFIX, expr.index());
self.start_named_expr(module, expr, func_ctx, &name)?;
self.named_expressions.insert(expr, name);
}
let func_name = &self.names[&NameKey::Function(function)];
write!(self.out, "{}(", func_name)?;
for (index, &argument) in arguments.iter().enumerate() {
self.write_expr(module, argument, func_ctx)?;
for (index, argument) in arguments.iter().enumerate() {
self.write_expr(module, *argument, func_ctx)?;
// Only write a comma if isn't the last element
if index != arguments.len().saturating_sub(1) {
// The leading space is for readability only
@ -830,12 +750,17 @@ impl<W: Write> Writer<W> {
result,
} => {
write!(self.out, "{}", level)?;
let res_name = format!("{}{}", super::BAKE_PREFIX, result.index());
let res_name = format!("{}{}", back::BAKE_PREFIX, result.index());
self.start_named_expr(module, result, func_ctx, &res_name)?;
self.named_expressions.insert(result, res_name);
let is_ptr = func_ctx.expressions[pointer].should_deref();
let fun_str = fun.to_wgsl();
write!(self.out, "atomic{}(", fun_str)?;
if !is_ptr {
write!(self.out, "&")?;
}
self.write_expr(module, pointer, func_ctx)?;
if let crate::AtomicFunction::Exchange { compare: Some(cmp) } = *fun {
write!(self.out, ", ")?;
@ -877,6 +802,7 @@ impl<W: Write> Writer<W> {
Statement::Switch {
selector,
ref cases,
ref default,
} => {
// Start the switch
write!(self.out, "{}", level)?;
@ -884,24 +810,25 @@ impl<W: Write> Writer<W> {
self.write_expr(module, selector, func_ctx)?;
writeln!(self.out, ") {{")?;
let type_postfix = match *func_ctx.info[selector].ty.inner_with(&module.types) {
crate::TypeInner::Scalar {
kind: crate::ScalarKind::Uint,
..
} => "u",
_ => "",
};
// Write all cases
let mut write_case = true;
let all_fall_through = cases
.iter()
.all(|case| case.fall_through && case.body.is_empty());
let l2 = level.next();
if !cases.is_empty() {
for case in cases {
match case.value {
crate::SwitchValue::Integer(value) => {
writeln!(self.out, "{}case {}{}: {{", l2, value, type_postfix)?;
}
crate::SwitchValue::Default => {
writeln!(self.out, "{}default: {{", l2)?;
}
if write_case {
write!(self.out, "{}case ", l2)?;
}
if !all_fall_through && case.fall_through && case.body.is_empty() {
write_case = false;
write!(self.out, "{}, ", case.value)?;
continue;
} else {
write_case = true;
writeln!(self.out, "{}: {{", case.value)?;
}
for sta in case.body.iter() {
@ -916,6 +843,16 @@ impl<W: Write> Writer<W> {
}
}
if !default.is_empty() {
writeln!(self.out, "{}default: {{", l2)?;
for sta in default {
self.write_stmt(module, sta, func_ctx, l2.next())?;
}
writeln!(self.out, "{}}}", l2)?
}
writeln!(self.out, "{}}}", level)?
}
Statement::Loop {
@ -960,65 +897,6 @@ impl<W: Write> Writer<W> {
Ok(())
}
/// Return the sort of indirection that `expr`'s plain form evaluates to.
///
/// An expression's 'plain form' is the most general rendition of that
/// expression into WGSL, lacking `&` or `*` operators:
///
/// - The plain form of `LocalVariable(x)` is simply `x`, which is a reference
/// to the local variable's storage.
///
/// - The plain form of `GlobalVariable(g)` is simply `g`, which is usually a
/// reference to the global variable's storage. However, globals in the
/// `Handle` storage class are immutable, and `GlobalVariable` expressions for
/// those produce the value directly, not a pointer to it. Such
/// `GlobalVariable` expressions are `Ordinary`.
///
/// - `Access` and `AccessIndex` are `Reference` when their `base` operand is a
/// pointer. If they are applied directly to a composite value, they are
/// `Ordinary`.
///
/// Note that `FunctionArgument` expressions are never `Reference`, even when
/// the argument's type is `Pointer`. `FunctionArgument` always evaluates to the
/// argument's value directly, so any pointer it produces is merely the value
/// passed by the caller.
fn plain_form_indirection(
&self,
expr: Handle<crate::Expression>,
module: &Module,
func_ctx: &back::FunctionCtx<'_>,
) -> Indirection {
use crate::Expression as Ex;
// Named expressions are `let` expressions, which apply the Load Rule,
// so if their type is a Naga pointer, then that must be a WGSL pointer
// as well.
if self.named_expressions.contains_key(&expr) {
return Indirection::Ordinary;
}
match func_ctx.expressions[expr] {
Ex::LocalVariable(_) => Indirection::Reference,
Ex::GlobalVariable(handle) => {
let global = &module.global_variables[handle];
match global.class {
crate::StorageClass::Handle => Indirection::Ordinary,
_ => Indirection::Reference,
}
}
Ex::Access { base, .. } | Ex::AccessIndex { base, .. } => {
let base_ty = func_ctx.info[base].ty.inner_with(&module.types);
match *base_ty {
crate::TypeInner::Pointer { .. } | crate::TypeInner::ValuePointer { .. } => {
Indirection::Reference
}
_ => Indirection::Ordinary,
}
}
_ => Indirection::Ordinary,
}
}
fn start_named_expr(
&mut self,
module: &Module,
@ -1043,70 +921,15 @@ impl<W: Write> Writer<W> {
Ok(())
}
/// Write the ordinary WGSL form of `expr`.
/// Helper method to write expressions
///
/// See `write_expr_with_indirection` for details.
/// # Notes
/// Doesn't add any newlines or leading/trailing spaces
fn write_expr(
&mut self,
module: &Module,
expr: Handle<crate::Expression>,
func_ctx: &back::FunctionCtx<'_>,
) -> BackendResult {
self.write_expr_with_indirection(module, expr, func_ctx, Indirection::Ordinary)
}
/// Write `expr` as a WGSL expression with the requested indirection.
///
/// In terms of the WGSL grammar, the resulting expression is a
/// `singular_expression`. It may be parenthesized. This makes it suitable
/// for use as the operand of a unary or binary operator without worrying
/// about precedence.
///
/// This does not produce newlines or indentation.
///
/// The `requested` argument indicates (roughly) whether Naga
/// `Pointer`-valued expressions represent WGSL references or pointers. See
/// `Indirection` for details.
fn write_expr_with_indirection(
&mut self,
module: &Module,
expr: Handle<crate::Expression>,
func_ctx: &back::FunctionCtx<'_>,
requested: Indirection,
) -> BackendResult {
// If the plain form of the expression is not what we need, emit the
// operator necessary to correct that.
let plain = self.plain_form_indirection(expr, module, func_ctx);
match (requested, plain) {
(Indirection::Ordinary, Indirection::Reference) => {
write!(self.out, "(&")?;
self.write_expr_plain_form(module, expr, func_ctx, plain)?;
write!(self.out, ")")?;
}
(Indirection::Reference, Indirection::Ordinary) => {
write!(self.out, "(*")?;
self.write_expr_plain_form(module, expr, func_ctx, plain)?;
write!(self.out, ")")?;
}
(_, _) => self.write_expr_plain_form(module, expr, func_ctx, plain)?,
}
Ok(())
}
/// Write the 'plain form' of `expr`.
///
/// An expression's 'plain form' is the most general rendition of that
/// expression into WGSL, lacking `&` or `*` operators. The plain forms of
/// `LocalVariable(x)` and `GlobalVariable(g)` are simply `x` and `g`. Such
/// Naga expressions represent both WGSL pointers and references; it's the
/// caller's responsibility to distinguish those cases appropriately.
fn write_expr_plain_form(
&mut self,
module: &Module,
expr: Handle<crate::Expression>,
func_ctx: &back::FunctionCtx<'_>,
indirection: Indirection,
) -> BackendResult {
use crate::Expression;
@ -1117,14 +940,6 @@ impl<W: Write> Writer<W> {
let expression = &func_ctx.expressions[expr];
// Write the plain WGSL form of a Naga expression.
//
// The plain form of `LocalVariable` and `GlobalVariable` expressions is
// simply the variable name; `*` and `&` operators are never emitted.
//
// The plain form of `Access` and `AccessIndex` expressions are WGSL
// `postfix_expression` forms for member/component access and
// subscripting.
match *expression {
Expression::Constant(constant) => self.write_constant(module, constant)?,
Expression::Compose { ty, ref components } => {
@ -1184,17 +999,30 @@ impl<W: Write> Writer<W> {
self.write_expr(module, right, func_ctx)?;
write!(self.out, ")")?;
}
// TODO: copy-paste from glsl-out
Expression::Access { base, index } => {
self.write_expr_with_indirection(module, base, func_ctx, indirection)?;
self.write_expr(module, base, func_ctx)?;
write!(self.out, "[")?;
self.write_expr(module, index, func_ctx)?;
write!(self.out, "]")?
}
// TODO: copy-paste from glsl-out
Expression::AccessIndex { base, index } => {
let base_ty_res = &func_ctx.info[base].ty;
let mut resolved = base_ty_res.inner_with(&module.types);
self.write_expr_with_indirection(module, base, func_ctx, indirection)?;
let deref = match *resolved {
TypeInner::Pointer { .. } => func_ctx.expressions[base].should_deref(),
_ => false,
};
if deref {
write!(self.out, "(*")?;
}
self.write_expr(module, base, func_ctx)?;
if deref {
write!(self.out, ")")?;
}
let base_ty_handle = match *resolved {
TypeInner::Pointer { base, class: _ } => {
@ -1354,17 +1182,12 @@ impl<W: Write> Writer<W> {
)?;
}
TypeInner::Vector { size, .. } => {
let vector_size_str = back::vector_size_str(size);
let scalar_kind_str = scalar_kind_str(kind);
if convert.is_some() {
write!(self.out, "vec{}<{}>", vector_size_str, scalar_kind_str)?;
} else {
write!(
self.out,
"bitcast<vec{}<{}>>",
vector_size_str, scalar_kind_str
)?;
}
write!(
self.out,
"vec{}<{}>",
back::vector_size_str(size),
scalar_kind_str(kind)
)?;
}
TypeInner::Scalar { .. } => {
if convert.is_some() {
@ -1403,25 +1226,30 @@ impl<W: Write> Writer<W> {
write!(self.out, ")")?;
}
Expression::Load { pointer } => {
let is_atomic = match *func_ctx.info[pointer].ty.inner_with(&module.types) {
crate::TypeInner::Pointer { base, .. } => match module.types[base].inner {
crate::TypeInner::Atomic { .. } => true,
_ => false,
},
_ => false,
};
let (is_pointer, is_atomic) =
match *func_ctx.info[pointer].ty.inner_with(&module.types) {
crate::TypeInner::Pointer { base, .. } => (
func_ctx.expressions[pointer].should_deref(),
match module.types[base].inner {
crate::TypeInner::Atomic { .. } => true,
_ => false,
},
),
_ => (false, false),
};
if is_atomic {
write!(self.out, "atomicLoad(")?;
self.write_expr(module, pointer, func_ctx)?;
if !is_pointer {
// Write an indirection in case the underlying
// expression isn't a pointer but a reference
write!(self.out, "&")?;
}
} else if is_pointer {
write!(self.out, "*")?;
}
self.write_expr(module, pointer, func_ctx)?;
if is_atomic {
write!(self.out, ")")?;
} else {
self.write_expr_with_indirection(
module,
pointer,
func_ctx,
Indirection::Reference,
)?;
}
}
Expression::LocalVariable(handle) => {
@ -1429,6 +1257,9 @@ impl<W: Write> Writer<W> {
}
Expression::ArrayLength(expr) => {
write!(self.out, "arrayLength(")?;
if is_deref_required(expr, module, func_ctx.info) {
write!(self.out, "&")?;
};
self.write_expr(module, expr, func_ctx)?;
write!(self.out, ")")?;
}
@ -1437,7 +1268,6 @@ impl<W: Write> Writer<W> {
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
@ -1505,20 +1335,6 @@ impl<W: Write> Writer<W> {
// bits
Mf::CountOneBits => Function::Regular("countOneBits"),
Mf::ReverseBits => Function::Regular("reverseBits"),
Mf::ExtractBits => Function::Regular("extractBits"),
Mf::InsertBits => Function::Regular("insertBits"),
// data packing
Mf::Pack4x8snorm => Function::Regular("pack4x8snorm"),
Mf::Pack4x8unorm => Function::Regular("pack4x8unorm"),
Mf::Pack2x16snorm => Function::Regular("pack2x16snorm"),
Mf::Pack2x16unorm => Function::Regular("pack2x16unorm"),
Mf::Pack2x16float => Function::Regular("pack2x16float"),
// data unpacking
Mf::Unpack4x8snorm => Function::Regular("unpack4x8snorm"),
Mf::Unpack4x8unorm => Function::Regular("unpack4x8unorm"),
Mf::Unpack2x16snorm => Function::Regular("unpack2x16snorm"),
Mf::Unpack2x16unorm => Function::Regular("unpack2x16unorm"),
Mf::Unpack2x16float => Function::Regular("unpack2x16float"),
_ => {
return Err(Error::UnsupportedMathFunction(fun));
}
@ -1555,10 +1371,6 @@ impl<W: Write> Writer<W> {
write!(self.out, ", ")?;
self.write_expr(module, arg, func_ctx)?;
}
if let Some(arg) = arg3 {
write!(self.out, ", ")?;
self.write_expr(module, arg, func_ctx)?;
}
write!(self.out, ")")?
}
}
@ -1652,6 +1464,7 @@ impl<W: Write> Writer<W> {
global: &crate::GlobalVariable,
handle: Handle<crate::GlobalVariable>,
) -> BackendResult {
let name = self.names[&NameKey::GlobalVariable(handle)].clone();
// Write group and dinding attributes if present
if let Some(ref binding) = global.binding {
self.write_attributes(
@ -1674,11 +1487,7 @@ impl<W: Write> Writer<W> {
}
write!(self.out, ">")?;
}
write!(
self.out,
" {}: ",
&self.names[&NameKey::GlobalVariable(handle)]
)?;
write!(self.out, " {}: ", name)?;
// Write global type
self.write_type(module, global.ty)?;
@ -1768,7 +1577,7 @@ impl<W: Write> Writer<W> {
width: _,
ref value,
} => {
let name = &self.names[&NameKey::Constant(handle)];
let name = self.names[&NameKey::Constant(handle)].clone();
// First write only constant name
write!(self.out, "let {}: ", name)?;
// Next write constant type and value
@ -1792,7 +1601,7 @@ impl<W: Write> Writer<W> {
writeln!(self.out, ";")?;
}
crate::ConstantInner::Composite { ty, ref components } => {
let name = &self.names[&NameKey::Constant(handle)];
let name = self.names[&NameKey::Constant(handle)].clone();
// First write only constant name
write!(self.out, "let {}: ", name)?;
// Next write constant type
@ -1823,6 +1632,21 @@ impl<W: Write> Writer<W> {
}
}
impl crate::Expression {
/// Wether an expression should be dereferenced, this is false when the
/// expression returns a reference instead of a pointer
fn should_deref(&self) -> bool {
match *self {
// Variables in the typifier have pointer types but in wgsl they
// have reference types and shouldn't be dereferenced
crate::Expression::LocalVariable(_) | crate::Expression::GlobalVariable(_)
// Access chains might have pointer types but wgsl considers them as references
| crate::Expression::AccessIndex {..} | crate::Expression::Access {..} => false,
_ => true,
}
}
}
fn builtin_str(built_in: crate::BuiltIn) -> Option<&'static str> {
use crate::BuiltIn as Bi;
@ -1841,7 +1665,6 @@ fn builtin_str(built_in: crate::BuiltIn) -> Option<&'static str> {
Bi::SampleIndex => Some("sample_index"),
Bi::SampleMask => Some("sample_mask"),
Bi::PrimitiveIndex => Some("primitive_index"),
Bi::ViewIndex => Some("view_index"),
_ => None,
}
}
@ -1974,6 +1797,23 @@ fn map_binding_to_attribute(
}
}
fn is_deref_required(
expr: Handle<crate::Expression>,
module: &Module,
info: &valid::FunctionInfo,
) -> bool {
let base_ty_res = &info[expr].ty;
let resolved = base_ty_res.inner_with(&module.types);
match *resolved {
TypeInner::Pointer { base, class: _ } => match module.types[base].inner {
TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Array { .. } => true,
_ => false,
},
TypeInner::ValuePointer { .. } => true,
_ => false,
}
}
/// Helper function that check that expression don't access to structure member with unsupported builtin.
fn access_to_unsupported_builtin(
expr: Handle<crate::Expression>,

18
third_party/rust/naga/src/block.rs поставляемый
Просмотреть файл

@ -23,9 +23,7 @@ impl Block {
pub fn from_vec(body: Vec<Statement>) -> Self {
#[cfg(feature = "span")]
let span_info = std::iter::repeat(Span::default())
.take(body.len())
.collect();
let span_info = std::iter::repeat(Span::Unknown).take(body.len()).collect();
Self {
body,
#[cfg(feature = "span")]
@ -60,12 +58,6 @@ impl Block {
self.body.extend(other.body);
}
pub fn append(&mut self, other: &mut Self) {
#[cfg(feature = "span")]
self.span_info.append(&mut other.span_info);
self.body.append(&mut other.body);
}
pub fn cull<R: RangeBounds<usize> + Clone>(&mut self, range: R) {
#[cfg(feature = "span")]
self.span_info.drain(range.clone());
@ -78,14 +70,6 @@ impl Block {
.splice(range.clone(), other.span_info.into_iter());
self.body.splice(range, other.body.into_iter());
}
pub fn span_iter(&self) -> impl Iterator<Item = (&Statement, &Span)> {
#[cfg(feature = "span")]
let span_iter = self.span_info.iter();
#[cfg(not(feature = "span"))]
let span_iter = std::iter::repeat_with(|| &Span::UNDEFINED);
self.body.iter().zip(span_iter)
}
pub fn span_iter_mut(&mut self) -> impl Iterator<Item = (&mut Statement, Option<&mut Span>)> {
#[cfg(feature = "span")]

15
third_party/rust/naga/src/front/glsl/ast.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
use std::fmt;
use super::{builtins::MacroCall, context::ExprPos, Span};
use super::{builtins::MacroCall, context::ExprPos, SourceMetadata};
use crate::{
BinaryOperator, Binding, Constant, Expression, Function, GlobalVariable, Handle, Interpolation,
Sampling, StorageAccess, StorageClass, Type, UnaryOperator,
@ -9,7 +9,7 @@ use crate::{
#[derive(Debug, Clone, Copy)]
pub enum GlobalLookupKind {
Variable(Handle<GlobalVariable>),
Constant(Handle<Constant>, Handle<Type>),
Constant(Handle<Constant>),
BlockSelect(Handle<GlobalVariable>, u32),
}
@ -82,14 +82,13 @@ pub struct VariableReference {
pub expr: Handle<Expression>,
pub load: bool,
pub mutable: bool,
pub constant: Option<(Handle<Constant>, Handle<Type>)>,
pub entry_arg: Option<usize>,
}
#[derive(Debug, Clone)]
pub struct HirExpr {
pub kind: HirExprKind,
pub meta: Span,
pub meta: SourceMetadata,
}
#[derive(Debug, Clone)]
@ -123,13 +122,9 @@ pub enum HirExprKind {
tgt: Handle<HirExpr>,
value: Handle<HirExpr>,
},
/// A prefix/postfix operator like `++`
PrePostfix {
/// The operation to be performed
op: BinaryOperator,
/// Whether this is a postfix or a prefix
IncDec {
increment: bool,
postfix: bool,
/// The target expression
expr: Handle<HirExpr>,
},
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,12 +1,12 @@
use crate::{
arena::{Arena, Handle, UniqueArena},
arena::{Arena, Handle},
BinaryOperator, Constant, ConstantInner, Expression, ScalarKind, ScalarValue, Type, TypeInner,
UnaryOperator,
};
#[derive(Debug)]
pub struct ConstantSolver<'a> {
pub types: &'a mut UniqueArena<Type>,
pub types: &'a Arena<Type>,
pub expressions: &'a Arena<Expression>,
pub constants: &'a mut Arena<Constant>,
}
@ -53,10 +53,8 @@ pub enum ConstantSolvingError {
InvalidBinaryOpArgs,
#[error("Cannot apply math function to type")]
InvalidMathArg,
#[error("Splat is defined only on scalar values")]
SplatScalarOnly,
#[error("Can only swizzle vector constants")]
SwizzleVectorOnly,
#[error("Splat/swizzle type is not registered")]
DestinationTypeNotFound,
#[error("Not implemented: {0}")]
NotImplemented(String),
}
@ -66,7 +64,7 @@ impl<'a> ConstantSolver<'a> {
&mut self,
expr: Handle<Expression>,
) -> Result<Handle<Constant>, ConstantSolvingError> {
let span = self.expressions.get_span(expr);
let span = self.expressions.get_span(expr).clone();
match self.expressions[expr] {
Expression::Constant(constant) => Ok(constant),
Expression::AccessIndex { base, index } => self.access(base, index as usize),
@ -83,24 +81,25 @@ impl<'a> ConstantSolver<'a> {
let ty = match self.constants[value_constant].inner {
ConstantInner::Scalar { ref value, width } => {
let kind = value.scalar_kind();
self.types.insert(
Type {
name: None,
inner: TypeInner::Vector { size, kind, width },
},
span,
)
}
ConstantInner::Composite { .. } => {
return Err(ConstantSolvingError::SplatScalarOnly);
self.types
.fetch_if(|t| t.inner == crate::TypeInner::Vector { size, kind, width })
}
ConstantInner::Composite { .. } => None,
};
let inner = ConstantInner::Composite {
ty,
components: vec![value_constant; size as usize],
};
Ok(self.register_constant(inner, span))
//TODO: register the new type if needed
let ty = ty.ok_or(ConstantSolvingError::DestinationTypeNotFound)?;
Ok(self.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner: ConstantInner::Composite {
ty,
components: vec![value_constant; size as usize],
},
},
span,
))
}
Expression::Swizzle {
size,
@ -109,9 +108,7 @@ impl<'a> ConstantSolver<'a> {
} => {
let src_constant = self.solve(src_vector)?;
let (ty, src_components) = match self.constants[src_constant].inner {
ConstantInner::Scalar { .. } => {
return Err(ConstantSolvingError::SwizzleVectorOnly);
}
ConstantInner::Scalar { .. } => (None, &[][..]),
ConstantInner::Composite {
ty,
components: ref src_components,
@ -121,37 +118,44 @@ impl<'a> ConstantSolver<'a> {
kind,
width,
} => {
let dst_ty = self.types.insert(
Type {
name: None,
inner: crate::TypeInner::Vector { size, kind, width },
},
span,
);
let dst_ty = self.types.fetch_if(|t| {
t.inner == crate::TypeInner::Vector { size, kind, width }
});
(dst_ty, &src_components[..])
}
_ => {
return Err(ConstantSolvingError::SwizzleVectorOnly);
}
_ => (None, &[][..]),
},
};
//TODO: register the new type if needed
let ty = ty.ok_or(ConstantSolvingError::DestinationTypeNotFound)?;
let components = pattern
.iter()
.map(|&sc| src_components[sc as usize])
.collect();
let inner = ConstantInner::Composite { ty, components };
Ok(self.register_constant(inner, span))
Ok(self.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner: ConstantInner::Composite { ty, components },
},
span,
))
}
Expression::Compose { ty, ref components } => {
let components = components
.iter()
.map(|c| self.solve(*c))
.collect::<Result<_, _>>()?;
let inner = ConstantInner::Composite { ty, components };
Ok(self.register_constant(inner, span))
Ok(self.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner: ConstantInner::Composite { ty, components },
},
span,
))
}
Expression::Unary { expr, op } => {
let expr_constant = self.solve(expr)?;
@ -198,8 +202,14 @@ impl<'a> ConstantSolver<'a> {
_ => return Err(ConstantSolvingError::InvalidMathArg),
};
let inner = ConstantInner::Scalar { width, value };
Ok(self.register_constant(inner, span))
Ok(self.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner: ConstantInner::Scalar { width, value },
},
span,
))
}
_ => Err(ConstantSolvingError::NotImplemented(format!("{:?}", fun))),
}
@ -293,6 +303,16 @@ impl<'a> ConstantSolver<'a> {
target_width: crate::Bytes,
span: crate::Span,
) -> Result<Handle<Constant>, ConstantSolvingError> {
fn inner_cast<A: num_traits::FromPrimitive>(value: ScalarValue) -> A {
match value {
ScalarValue::Sint(v) => A::from_i64(v),
ScalarValue::Uint(v) => A::from_u64(v),
ScalarValue::Float(v) => A::from_f64(v),
ScalarValue::Bool(v) => A::from_u64(v as u64),
}
.unwrap()
}
let mut inner = self.constants[constant].inner.clone();
match inner {
@ -302,30 +322,10 @@ impl<'a> ConstantSolver<'a> {
} => {
*width = target_width;
*value = match kind {
ScalarKind::Sint => ScalarValue::Sint(match *value {
ScalarValue::Sint(v) => v,
ScalarValue::Uint(v) => v as i64,
ScalarValue::Float(v) => v as i64,
ScalarValue::Bool(v) => v as i64,
}),
ScalarKind::Uint => ScalarValue::Uint(match *value {
ScalarValue::Sint(v) => v as u64,
ScalarValue::Uint(v) => v,
ScalarValue::Float(v) => v as u64,
ScalarValue::Bool(v) => v as u64,
}),
ScalarKind::Float => ScalarValue::Float(match *value {
ScalarValue::Sint(v) => v as f64,
ScalarValue::Uint(v) => v as f64,
ScalarValue::Float(v) => v,
ScalarValue::Bool(v) => v as u64 as f64,
}),
ScalarKind::Bool => ScalarValue::Bool(match *value {
ScalarValue::Sint(v) => v != 0,
ScalarValue::Uint(v) => v != 0,
ScalarValue::Float(v) => v != 0.0,
ScalarValue::Bool(v) => v,
}),
ScalarKind::Sint => ScalarValue::Sint(inner_cast(*value)),
ScalarKind::Uint => ScalarValue::Uint(inner_cast(*value)),
ScalarKind::Float => ScalarValue::Float(inner_cast(*value)),
ScalarKind::Bool => ScalarValue::Bool(inner_cast::<u64>(*value) != 0),
}
}
ConstantInner::Composite {
@ -338,12 +338,19 @@ impl<'a> ConstantSolver<'a> {
}
for component in components {
*component = self.cast(*component, kind, target_width, span)?;
*component = self.cast(*component, kind, target_width, span.clone())?;
}
}
}
Ok(self.register_constant(inner, span))
Ok(self.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner,
},
span,
))
}
fn unary_op(
@ -378,12 +385,19 @@ impl<'a> ConstantSolver<'a> {
}
for component in components {
*component = self.unary_op(op, *component, span)?
*component = self.unary_op(op, *component, span.clone())?
}
}
}
Ok(self.register_constant(inner, span))
Ok(self.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner,
},
span,
))
}
fn binary_op(
@ -418,36 +432,31 @@ impl<'a> ConstantSolver<'a> {
_ => match (left_value, right_value) {
(ScalarValue::Sint(a), ScalarValue::Sint(b)) => {
ScalarValue::Sint(match op {
BinaryOperator::Add => a.wrapping_add(b),
BinaryOperator::Subtract => a.wrapping_sub(b),
BinaryOperator::Multiply => a.wrapping_mul(b),
BinaryOperator::Divide => a.checked_div(b).unwrap_or(0),
BinaryOperator::Modulo => a.checked_rem(b).unwrap_or(0),
BinaryOperator::Add => a + b,
BinaryOperator::Subtract => a - b,
BinaryOperator::Multiply => a * b,
BinaryOperator::Divide => a / b,
BinaryOperator::Modulo => a % b,
BinaryOperator::And => a & b,
BinaryOperator::ExclusiveOr => a ^ b,
BinaryOperator::InclusiveOr => a | b,
_ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
})
}
(ScalarValue::Sint(a), ScalarValue::Uint(b)) => {
ScalarValue::Sint(match op {
BinaryOperator::ShiftLeft => a.wrapping_shl(b as u32),
BinaryOperator::ShiftRight => a.wrapping_shr(b as u32),
BinaryOperator::ShiftLeft => a << b,
BinaryOperator::ShiftRight => a >> b,
_ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
})
}
(ScalarValue::Uint(a), ScalarValue::Uint(b)) => {
ScalarValue::Uint(match op {
BinaryOperator::Add => a.wrapping_add(b),
BinaryOperator::Subtract => a.wrapping_sub(b),
BinaryOperator::Multiply => a.wrapping_mul(b),
BinaryOperator::Divide => a.checked_div(b).unwrap_or(0),
BinaryOperator::Modulo => a.checked_rem(b).unwrap_or(0),
BinaryOperator::Add => a + b,
BinaryOperator::Subtract => a - b,
BinaryOperator::Multiply => a * b,
BinaryOperator::Divide => a / b,
BinaryOperator::Modulo => a % b,
BinaryOperator::And => a & b,
BinaryOperator::ExclusiveOr => a ^ b,
BinaryOperator::InclusiveOr => a | b,
BinaryOperator::ShiftLeft => a.wrapping_shl(b as u32),
BinaryOperator::ShiftRight => a.wrapping_shr(b as u32),
BinaryOperator::ShiftLeft => a << b,
BinaryOperator::ShiftRight => a >> b,
_ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
})
}
@ -477,32 +486,28 @@ impl<'a> ConstantSolver<'a> {
(&ConstantInner::Composite { ref components, ty }, &ConstantInner::Scalar { .. }) => {
let mut components = components.clone();
for comp in components.iter_mut() {
*comp = self.binary_op(op, *comp, right, span)?;
*comp = self.binary_op(op, *comp, right, span.clone())?;
}
ConstantInner::Composite { ty, components }
}
(&ConstantInner::Scalar { .. }, &ConstantInner::Composite { ref components, ty }) => {
let mut components = components.clone();
for comp in components.iter_mut() {
*comp = self.binary_op(op, left, *comp, span)?;
*comp = self.binary_op(op, left, *comp, span.clone())?;
}
ConstantInner::Composite { ty, components }
}
_ => return Err(ConstantSolvingError::InvalidBinaryOpArgs),
};
Ok(self.register_constant(inner, span))
}
fn register_constant(&mut self, inner: ConstantInner, span: crate::Span) -> Handle<Constant> {
self.constants.fetch_or_append(
Ok(self.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner,
},
span,
)
))
}
}
@ -512,18 +517,18 @@ mod tests {
use crate::{
Arena, Constant, ConstantInner, Expression, ScalarKind, ScalarValue, Type, TypeInner,
UnaryOperator, UniqueArena, VectorSize,
UnaryOperator, VectorSize,
};
use super::ConstantSolver;
#[test]
fn unary_op() {
let mut types = UniqueArena::new();
let mut types = Arena::new();
let mut expressions = Arena::new();
let mut constants = Arena::new();
let vec_ty = types.insert(
let vec_ty = types.append(
Type {
name: None,
inner: TypeInner::Vector {
@ -599,7 +604,7 @@ mod tests {
);
let mut solver = ConstantSolver {
types: &mut types,
types: &types,
expressions: &expressions,
constants: &mut constants,
};
@ -679,7 +684,7 @@ mod tests {
);
let mut solver = ConstantSolver {
types: &mut UniqueArena::new(),
types: &Arena::new(),
expressions: &expressions,
constants: &mut constants,
};
@ -697,11 +702,11 @@ mod tests {
#[test]
fn access() {
let mut types = UniqueArena::new();
let mut types = Arena::new();
let mut expressions = Arena::new();
let mut constants = Arena::new();
let matrix_ty = types.insert(
let matrix_ty = types.append(
Type {
name: None,
inner: TypeInner::Matrix {
@ -713,7 +718,7 @@ mod tests {
Default::default(),
);
let vec_ty = types.insert(
let vec_ty = types.append(
Type {
name: None,
inner: TypeInner::Vector {
@ -810,7 +815,7 @@ mod tests {
);
let mut solver = ConstantSolver {
types: &mut types,
types: &types,
expressions: &expressions,
constants: &mut constants,
};

Просмотреть файл

@ -5,13 +5,13 @@ use super::{
},
error::{Error, ErrorKind},
types::{scalar_components, type_power},
Parser, Result,
Parser, Result, SourceMetadata,
};
use crate::{
front::{Emitter, Typifier},
Arena, BinaryOperator, Block, Constant, Expression, FastHashMap, FunctionArgument, Handle,
LocalVariable, RelationalFunction, ScalarKind, ScalarValue, Span, Statement, StorageClass,
Type, TypeInner, VectorSize,
LocalVariable, RelationalFunction, ScalarKind, ScalarValue, Statement, StorageClass, Type,
TypeInner, VectorSize,
};
use std::{convert::TryFrom, ops::Index};
@ -24,18 +24,15 @@ pub enum ExprPos {
Rhs,
/// The expression is an array being indexed, needed to allow constant
/// arrays to be dinamically indexed
ArrayBase {
/// The index is a constant
constant_index: bool,
},
ArrayBase,
}
impl ExprPos {
/// Returns an lhs position if the current position is lhs otherwise ArrayBase
fn maybe_array_base(&self, constant_index: bool) -> Self {
fn maybe_array_base(&self) -> Self {
match *self {
ExprPos::Lhs => *self,
_ => ExprPos::ArrayBase { constant_index },
_ => ExprPos::ArrayBase,
}
}
}
@ -102,57 +99,51 @@ impl Context {
body: &mut Block,
) {
self.emit_flush(body);
let (expr, load, constant) = match kind {
let (expr, load) = match kind {
GlobalLookupKind::Variable(v) => {
let span = parser.module.global_variables.get_span(v);
let span = parser.module.global_variables.get_span(v).clone();
let res = (
self.expressions.append(Expression::GlobalVariable(v), span),
parser.module.global_variables[v].class != StorageClass::Handle,
None,
);
self.emit_start();
res
}
GlobalLookupKind::BlockSelect(handle, index) => {
let span = parser.module.global_variables.get_span(handle);
let span = parser.module.global_variables.get_span(handle).clone();
let base = self
.expressions
.append(Expression::GlobalVariable(handle), span);
.append(Expression::GlobalVariable(handle), span.clone());
self.emit_start();
let expr = self
.expressions
.append(Expression::AccessIndex { base, index }, span);
(
expr,
{
let ty = parser.module.global_variables[handle].ty;
(expr, {
let ty = parser.module.global_variables[handle].ty;
match parser.module.types[ty].inner {
TypeInner::Struct { ref members, .. } => {
if let TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} = parser.module.types[members[index as usize].ty].inner
{
false
} else {
true
}
match parser.module.types[ty].inner {
TypeInner::Struct { ref members, .. } => {
if let TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} = parser.module.types[members[index as usize].ty].inner
{
false
} else {
true
}
_ => true,
}
},
None,
)
_ => true,
}
})
}
GlobalLookupKind::Constant(v, ty) => {
let span = parser.module.constants.get_span(v);
GlobalLookupKind::Constant(v) => {
let span = parser.module.constants.get_span(v).clone();
let res = (
self.expressions.append(Expression::Constant(v), span),
false,
Some((v, ty)),
);
self.emit_start();
res
@ -163,7 +154,6 @@ impl Context {
expr,
load,
mutable,
constant,
entry_arg,
};
@ -181,14 +171,14 @@ impl Context {
pub fn add_expression(
&mut self,
expr: Expression,
meta: Span,
meta: SourceMetadata,
body: &mut Block,
) -> Handle<Expression> {
let needs_pre_emit = expr.needs_pre_emit();
if needs_pre_emit {
self.emit_flush(body);
}
let handle = self.expressions.append(expr, meta);
let handle = self.expressions.append(expr, meta.as_span());
if needs_pre_emit {
self.emit_start();
}
@ -226,7 +216,6 @@ impl Context {
expr,
load: true,
mutable,
constant: None,
entry_arg: None,
},
);
@ -238,7 +227,7 @@ impl Context {
&mut self,
parser: &mut Parser,
body: &mut Block,
name_meta: Option<(String, Span)>,
name_meta: Option<(String, SourceMetadata)>,
ty: Handle<Type>,
qualifier: ParameterQualifier,
) {
@ -256,8 +245,8 @@ impl Context {
};
if qualifier.is_lhs() {
let span = parser.module.types.get_span(arg.ty);
arg.ty = parser.module.types.insert(
let span = parser.module.types.get_span(arg.ty).clone();
arg.ty = parser.module.types.fetch_or_append(
Type {
name: None,
inner: TypeInner::Pointer {
@ -288,7 +277,7 @@ impl Context {
ty,
init: None,
},
meta,
meta.as_span(),
);
let local_expr = self.add_expression(Expression::LocalVariable(handle), meta, body);
@ -300,7 +289,7 @@ impl Context {
pointer: local_expr,
value: expr,
},
meta,
meta.as_span(),
);
if let Some(current) = self.scopes.last_mut() {
@ -310,7 +299,6 @@ impl Context {
expr: local_expr,
load: true,
mutable,
constant: None,
entry_arg: None,
},
);
@ -322,7 +310,6 @@ impl Context {
expr,
load,
mutable,
constant: None,
entry_arg: None,
},
);
@ -360,7 +347,7 @@ impl Context {
expr: Handle<HirExpr>,
pos: ExprPos,
body: &mut Block,
) -> Result<(Option<Handle<Expression>>, Span)> {
) -> Result<(Option<Handle<Expression>>, SourceMetadata)> {
let res = self.lower_inner(&stmt, parser, expr, pos, body);
stmt.hir_exprs.clear();
@ -381,7 +368,7 @@ impl Context {
expr: Handle<HirExpr>,
pos: ExprPos,
body: &mut Block,
) -> Result<(Handle<Expression>, Span)> {
) -> Result<(Handle<Expression>, SourceMetadata)> {
let res = self.lower_expect_inner(&stmt, parser, expr, pos, body);
stmt.hir_exprs.clear();
@ -402,7 +389,7 @@ impl Context {
expr: Handle<HirExpr>,
pos: ExprPos,
body: &mut Block,
) -> Result<(Handle<Expression>, Span)> {
) -> Result<(Handle<Expression>, SourceMetadata)> {
let (maybe_expr, meta) = self.lower_inner(stmt, parser, expr, pos, body)?;
let expr = match maybe_expr {
@ -426,31 +413,20 @@ impl Context {
expr: Handle<HirExpr>,
pos: ExprPos,
body: &mut Block,
) -> Result<(Option<Handle<Expression>>, Span)> {
) -> Result<(Option<Handle<Expression>>, SourceMetadata)> {
let HirExpr { ref kind, meta } = stmt.hir_exprs[expr];
let handle = match *kind {
HirExprKind::Access { base, index } => {
let base = self
.lower_expect_inner(stmt, parser, base, pos.maybe_array_base(), body)?
.0;
let (index, index_meta) =
self.lower_expect_inner(stmt, parser, index, ExprPos::Rhs, body)?;
let maybe_constant_index = match pos {
// Don't try to generate `AccessIndex` if in a LHS position, since it
// wouldn't produce a pointer.
ExprPos::Lhs => None,
_ => parser.solve_constant(self, index, index_meta).ok(),
};
let base = self
.lower_expect_inner(
stmt,
parser,
base,
pos.maybe_array_base(maybe_constant_index.is_some()),
body,
)?
.0;
let pointer = maybe_constant_index
let pointer = parser
.solve_constant(self, index, index_meta)
.ok()
.and_then(|constant| {
Some(self.add_expression(
Expression::AccessIndex {
@ -492,10 +468,10 @@ impl Context {
parser.field_selection(self, ExprPos::Lhs == pos, body, base, field, meta)?
}
HirExprKind::Constant(constant) if pos != ExprPos::Lhs => {
HirExprKind::Constant(constant) if pos == ExprPos::Rhs => {
self.add_expression(Expression::Constant(constant), meta, body)
}
HirExprKind::Binary { left, op, right } if pos != ExprPos::Lhs => {
HirExprKind::Binary { left, op, right } if pos == ExprPos::Rhs => {
let (mut left, left_meta) =
self.lower_expect_inner(stmt, parser, left, pos, body)?;
let (mut right, right_meta) =
@ -528,7 +504,7 @@ impl Context {
let argument = self
.expressions
.append(Expression::Binary { op, left, right }, meta);
.append(Expression::Binary { op, left, right }, meta.as_span());
self.add_expression(
Expression::Relational { fun, argument },
@ -544,9 +520,6 @@ impl Context {
BinaryOperator::Add
| BinaryOperator::Subtract
| BinaryOperator::Divide
| BinaryOperator::And
| BinaryOperator::ExclusiveOr
| BinaryOperator::InclusiveOr
| BinaryOperator::ShiftLeft
| BinaryOperator::ShiftRight => {
let scalar_vector = self.add_expression(
@ -570,12 +543,7 @@ impl Context {
}
},
(&TypeInner::Scalar { .. }, &TypeInner::Vector { size, .. }) => match op {
BinaryOperator::Add
| BinaryOperator::Subtract
| BinaryOperator::Divide
| BinaryOperator::And
| BinaryOperator::ExclusiveOr
| BinaryOperator::InclusiveOr => {
BinaryOperator::Add | BinaryOperator::Subtract | BinaryOperator::Divide => {
let scalar_vector = self.add_expression(
Expression::Splat { size, value: left },
meta,
@ -599,14 +567,14 @@ impl Context {
_ => self.add_expression(Expression::Binary { left, op, right }, meta, body),
}
}
HirExprKind::Unary { op, expr } if pos != ExprPos::Lhs => {
HirExprKind::Unary { op, expr } if pos == ExprPos::Rhs => {
let expr = self.lower_expect_inner(stmt, parser, expr, pos, body)?.0;
self.add_expression(Expression::Unary { op, expr }, meta, body)
}
HirExprKind::Variable(ref var) => match pos {
ExprPos::Lhs => {
if !var.mutable {
HirExprKind::Variable(ref var) => {
if pos != ExprPos::Rhs {
if !var.mutable && ExprPos::Lhs == pos {
parser.errors.push(Error {
kind: ErrorKind::SemanticError(
"Variable cannot be used in LHS position".into(),
@ -616,30 +584,12 @@ impl Context {
}
var.expr
}
ExprPos::ArrayBase {
constant_index: false,
} => {
if let Some((constant, ty)) = var.constant {
let local = self.locals.append(
LocalVariable {
name: None,
ty,
init: Some(constant),
},
Span::default(),
);
self.add_expression(Expression::LocalVariable(local), Span::default(), body)
} else {
var.expr
}
}
_ if var.load => {
} else if var.load {
self.add_expression(Expression::Load { pointer: var.expr }, meta, body)
} else {
var.expr
}
_ => var.expr,
},
}
HirExprKind::Call(ref call) if pos != ExprPos::Lhs => {
let maybe_expr = parser.function_or_constructor_call(
self,
@ -739,120 +689,114 @@ impl Context {
pointer: dst,
value: src,
},
meta,
meta.as_span(),
);
}
} else {
self.emit_flush(body);
self.emit_start();
body.push(Statement::Store { pointer, value }, meta);
body.push(Statement::Store { pointer, value }, meta.as_span());
}
value
}
HirExprKind::PrePostfix { op, postfix, expr } if ExprPos::Lhs != pos => {
HirExprKind::IncDec {
increment,
postfix,
expr,
} => {
let op = match increment {
true => BinaryOperator::Add,
false => BinaryOperator::Subtract,
};
let pointer = self
.lower_expect_inner(stmt, parser, expr, ExprPos::Lhs, body)?
.0;
let left = self.add_expression(Expression::Load { pointer }, meta, body);
let make_constant_inner = |kind, width| {
let value = match kind {
ScalarKind::Sint => crate::ScalarValue::Sint(1),
ScalarKind::Uint => crate::ScalarValue::Uint(1),
ScalarKind::Float => crate::ScalarValue::Float(1.0),
ScalarKind::Bool => return None,
};
Some(crate::ConstantInner::Scalar { width, value })
};
let res = match *parser.resolve_type(self, left, meta)? {
TypeInner::Scalar { kind, width } => {
let ty = TypeInner::Scalar { kind, width };
make_constant_inner(kind, width).map(|i| (ty, i, None, None))
}
TypeInner::Vector { size, kind, width } => {
let ty = TypeInner::Vector { size, kind, width };
make_constant_inner(kind, width).map(|i| (ty, i, Some(size), None))
}
TypeInner::Matrix {
columns,
rows,
width,
} => {
let ty = TypeInner::Matrix {
columns,
rows,
width,
};
make_constant_inner(ScalarKind::Float, width)
.map(|i| (ty, i, Some(rows), Some(columns)))
}
_ => None,
};
let (ty_inner, inner, rows, columns) = match res {
Some(res) => res,
None => {
let uint = match parser.resolve_type(self, left, meta)?.scalar_kind() {
Some(ScalarKind::Sint) => false,
Some(ScalarKind::Uint) => true,
_ => {
parser.errors.push(Error {
kind: ErrorKind::SemanticError(
"Increment/decrement only works on scalar/vector/matrix".into(),
"Increment/decrement operations must operate in integers".into(),
),
meta,
});
return Ok((Some(left), meta));
true
}
};
let constant_1 = parser.module.constants.append(
let one = parser.module.constants.append(
Constant {
name: None,
specialization: None,
inner,
inner: crate::ConstantInner::Scalar {
width: 4,
value: match uint {
true => crate::ScalarValue::Uint(1),
false => crate::ScalarValue::Sint(1),
},
},
},
Default::default(),
);
let mut right = self.add_expression(Expression::Constant(constant_1), meta, body);
// Glsl allows pre/postfixes operations on vectors and matrices, so if the
// target is either of them change the right side of the addition to be splatted
// to the same size as the target, furthermore if the target is a matrix
// use a composed matrix using the splatted value.
if let Some(size) = rows {
right =
self.add_expression(Expression::Splat { size, value: right }, meta, body);
if let Some(cols) = columns {
let ty = parser.module.types.insert(
Type {
name: None,
inner: ty_inner,
},
meta,
);
right = self.add_expression(
Expression::Compose {
ty,
components: std::iter::repeat(right).take(cols as usize).collect(),
},
meta,
body,
);
}
}
let right = self.add_expression(Expression::Constant(one), meta, body);
let value = self.add_expression(Expression::Binary { op, left, right }, meta, body);
self.emit_flush(body);
self.emit_start();
body.push(Statement::Store { pointer, value }, meta);
if postfix {
left
let local = self.locals.append(
LocalVariable {
name: None,
ty: parser.module.types.fetch_or_append(
Type {
name: None,
inner: TypeInner::Scalar {
kind: match uint {
true => ScalarKind::Uint,
false => ScalarKind::Sint,
},
width: 4,
},
},
meta.as_span(),
),
init: None,
},
meta.as_span(),
);
let expr = self.add_expression(Expression::LocalVariable(local), meta, body);
let load = self.add_expression(Expression::Load { pointer: expr }, meta, body);
self.emit_flush(body);
self.emit_start();
body.push(
Statement::Store {
pointer: expr,
value: left,
},
meta.as_span(),
);
self.emit_flush(body);
self.emit_start();
body.push(Statement::Store { pointer, value }, meta.as_span());
load
} else {
value
self.emit_flush(body);
self.emit_start();
body.push(Statement::Store { pointer, value }, meta.as_span());
left
}
}
_ => {
@ -873,7 +817,7 @@ impl Context {
&mut self,
parser: &mut Parser,
expr: Handle<Expression>,
meta: Span,
meta: SourceMetadata,
) -> Result<Option<(ScalarKind, crate::Bytes)>> {
let ty = parser.resolve_type(self, expr, meta)?;
Ok(scalar_components(ty))
@ -883,37 +827,18 @@ impl Context {
&mut self,
parser: &mut Parser,
expr: Handle<Expression>,
meta: Span,
meta: SourceMetadata,
) -> Result<Option<u32>> {
Ok(self
.expr_scalar_components(parser, expr, meta)?
.and_then(|(kind, width)| type_power(kind, width)))
}
pub fn conversion(
&mut self,
expr: &mut Handle<Expression>,
meta: Span,
kind: ScalarKind,
width: crate::Bytes,
) -> Result<()> {
*expr = self.expressions.append(
Expression::As {
expr: *expr,
kind,
convert: Some(width),
},
meta,
);
Ok(())
}
pub fn implicit_conversion(
&mut self,
parser: &mut Parser,
expr: &mut Handle<Expression>,
meta: Span,
meta: SourceMetadata,
kind: ScalarKind,
width: crate::Bytes,
) -> Result<()> {
@ -922,7 +847,14 @@ impl Context {
self.expr_power(parser, *expr, meta)?,
) {
if tgt_power > expr_power {
self.conversion(expr, meta, kind, width)?;
*expr = self.expressions.append(
Expression::As {
expr: *expr,
kind,
convert: Some(width),
},
meta.as_span(),
)
}
}
@ -933,9 +865,9 @@ impl Context {
&mut self,
parser: &mut Parser,
left: &mut Handle<Expression>,
left_meta: Span,
left_meta: SourceMetadata,
right: &mut Handle<Expression>,
right_meta: Span,
right_meta: SourceMetadata,
) -> Result<()> {
let left_components = self.expr_scalar_components(parser, *left, left_meta)?;
let right_components = self.expr_scalar_components(parser, *right, right_meta)?;
@ -950,11 +882,25 @@ impl Context {
) {
match left_power.cmp(&right_power) {
std::cmp::Ordering::Less => {
self.conversion(left, left_meta, right_kind, right_width)?;
*left = self.expressions.append(
Expression::As {
expr: *left,
kind: right_kind,
convert: Some(right_width),
},
left_meta.as_span(),
)
}
std::cmp::Ordering::Equal => {}
std::cmp::Ordering::Greater => {
self.conversion(right, right_meta, left_kind, left_width)?;
*right = self.expressions.append(
Expression::As {
expr: *right,
kind: left_kind,
convert: Some(left_width),
},
right_meta.as_span(),
)
}
}
}
@ -966,7 +912,7 @@ impl Context {
&mut self,
parser: &mut Parser,
expr: &mut Handle<Expression>,
meta: Span,
meta: SourceMetadata,
vector_size: Option<VectorSize>,
) -> Result<()> {
let expr_type = parser.resolve_type(self, *expr, meta)?;
@ -974,7 +920,7 @@ impl Context {
if let (&TypeInner::Scalar { .. }, Some(size)) = (expr_type, vector_size) {
*expr = self
.expressions
.append(Expression::Splat { size, value: *expr }, meta)
.append(Expression::Splat { size, value: *expr }, meta.as_span())
}
Ok(())
@ -984,7 +930,7 @@ impl Context {
&mut self,
size: VectorSize,
vector: Handle<Expression>,
meta: Span,
meta: SourceMetadata,
body: &mut Block,
) -> Handle<Expression> {
self.add_expression(

Просмотреть файл

@ -1,5 +1,7 @@
use super::{constants::ConstantSolvingError, token::TokenValue};
use crate::Span;
use super::{
constants::ConstantSolvingError,
token::{SourceMetadata, TokenValue},
};
use pp_rs::token::PreprocessorError;
use std::borrow::Cow;
use thiserror::Error;
@ -120,5 +122,5 @@ pub struct Error {
/// Holds the information about the error itself.
pub kind: ErrorKind,
/// Holds information about the range of the source code where the error happened.
pub meta: Span,
pub meta: SourceMetadata,
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

35
third_party/rust/naga/src/front/glsl/lex.rs поставляемый
Просмотреть файл

@ -1,9 +1,9 @@
use super::{
ast::Precision,
token::{Directive, DirectiveKind, Token, TokenValue},
token::{Directive, DirectiveKind, SourceMetadata, Token, TokenValue},
types::parse_type,
};
use crate::{FastHashMap, Span, StorageAccess};
use crate::{FastHashMap, StorageAccess};
use pp_rs::{
pp::Preprocessor,
token::{PreprocessorError, Punct, TokenValue as PPTokenValue},
@ -13,7 +13,7 @@ use pp_rs::{
#[cfg_attr(test, derive(PartialEq))]
pub struct LexerResult {
pub kind: LexerResultKind,
pub meta: Span,
pub meta: SourceMetadata,
}
#[derive(Debug)]
@ -202,10 +202,9 @@ mod tests {
use pp_rs::token::{Integer, Location, Token as PPToken, TokenValue as PPTokenValue};
use super::{
super::token::{Directive, DirectiveKind, Token, TokenValue},
super::token::{Directive, DirectiveKind, SourceMetadata, Token, TokenValue},
Lexer, LexerResult, LexerResultKind,
};
use crate::Span;
#[test]
fn lex_tokens() {
@ -232,7 +231,7 @@ mod tests {
location
}]
}),
meta: Span::new(1, 8)
meta: SourceMetadata { start: 1, end: 8 }
}
);
assert_eq!(
@ -240,9 +239,9 @@ mod tests {
LexerResult {
kind: LexerResultKind::Token(Token {
value: TokenValue::Void,
meta: Span::new(13, 17)
meta: SourceMetadata { start: 13, end: 17 }
}),
meta: Span::new(13, 17)
meta: SourceMetadata { start: 13, end: 17 }
}
);
assert_eq!(
@ -250,9 +249,9 @@ mod tests {
LexerResult {
kind: LexerResultKind::Token(Token {
value: TokenValue::Identifier("main".into()),
meta: Span::new(18, 22)
meta: SourceMetadata { start: 18, end: 22 }
}),
meta: Span::new(18, 22)
meta: SourceMetadata { start: 18, end: 22 }
}
);
assert_eq!(
@ -260,9 +259,9 @@ mod tests {
LexerResult {
kind: LexerResultKind::Token(Token {
value: TokenValue::LeftParen,
meta: Span::new(23, 24)
meta: SourceMetadata { start: 23, end: 24 }
}),
meta: Span::new(23, 24)
meta: SourceMetadata { start: 23, end: 24 }
}
);
assert_eq!(
@ -270,9 +269,9 @@ mod tests {
LexerResult {
kind: LexerResultKind::Token(Token {
value: TokenValue::RightParen,
meta: Span::new(24, 25)
meta: SourceMetadata { start: 24, end: 25 }
}),
meta: Span::new(24, 25)
meta: SourceMetadata { start: 24, end: 25 }
}
);
assert_eq!(
@ -280,9 +279,9 @@ mod tests {
LexerResult {
kind: LexerResultKind::Token(Token {
value: TokenValue::LeftBrace,
meta: Span::new(26, 27)
meta: SourceMetadata { start: 26, end: 27 }
}),
meta: Span::new(26, 27)
meta: SourceMetadata { start: 26, end: 27 }
}
);
assert_eq!(
@ -290,9 +289,9 @@ mod tests {
LexerResult {
kind: LexerResultKind::Token(Token {
value: TokenValue::RightBrace,
meta: Span::new(27, 28)
meta: SourceMetadata { start: 27, end: 28 }
}),
meta: Span::new(27, 28)
meta: SourceMetadata { start: 27, end: 28 }
}
);
assert_eq!(lex.next(), None);

4
third_party/rust/naga/src/front/glsl/mod.rs поставляемый
Просмотреть файл

@ -8,9 +8,9 @@
pub use ast::{Precision, Profile};
pub use error::{Error, ErrorKind, ExpectedToken};
pub use token::TokenValue;
pub use token::{SourceMetadata, TokenValue};
use crate::{FastHashMap, FastHashSet, Handle, Module, ShaderStage, Span, Type};
use crate::{FastHashMap, FastHashSet, Handle, Module, ShaderStage, Type};
use ast::{EntryArg, FunctionDeclaration, GlobalLookup};
use parser::ParsingContext;

Просмотреть файл

@ -11,9 +11,9 @@
use super::{
ast::StructLayout,
error::{Error, ErrorKind},
Span,
SourceMetadata,
};
use crate::{front::align_up, Arena, Constant, Handle, Type, TypeInner, UniqueArena};
use crate::{front::align_up, Arena, Constant, Handle, Type, TypeInner};
/// Struct with information needed for defining a struct member.
///
@ -37,9 +37,9 @@ pub struct TypeAlignSpan {
/// change the stride and as such need to have a different type.
pub fn calculate_offset(
mut ty: Handle<Type>,
meta: Span,
meta: SourceMetadata,
layout: StructLayout,
types: &mut UniqueArena<Type>,
types: &mut Arena<Type>,
constants: &Arena<Constant>,
errors: &mut Vec<Error>,
) -> TypeAlignSpan {
@ -84,8 +84,8 @@ pub fn calculate_offset(
crate::ArraySize::Dynamic => stride,
};
let ty_span = types.get_span(ty);
ty = types.insert(
let ty_span = types.get_span(ty).clone();
ty = types.fetch_or_append(
Type {
name,
inner: TypeInner::Array {
@ -144,8 +144,8 @@ pub fn calculate_offset(
span = align_up(span, align);
let ty_span = types.get_span(ty);
ty = types.insert(
let ty_span = types.get_span(ty).clone();
ty = types.fetch_or_append(
Type {
name,
inner: TypeInner::Struct {

Просмотреть файл

@ -5,11 +5,11 @@ use super::{
error::{Error, ErrorKind},
lex::{Lexer, LexerResultKind},
token::{Directive, DirectiveKind},
token::{Token, TokenValue},
token::{SourceMetadata, Token, TokenValue},
variables::{GlobalOrConstant, VarDeclaration},
Parser, Result,
};
use crate::{arena::Handle, Block, Constant, ConstantInner, Expression, ScalarValue, Span, Type};
use crate::{arena::Handle, Block, Constant, ConstantInner, Expression, ScalarValue, Type};
use core::convert::TryFrom;
use pp_rs::token::{PreprocessorError, Token as PPToken, TokenValue as PPTokenValue};
use std::iter::Peekable;
@ -21,18 +21,18 @@ mod types;
pub struct ParsingContext<'source> {
lexer: Peekable<Lexer<'source>>,
last_meta: Span,
last_meta: SourceMetadata,
}
impl<'source> ParsingContext<'source> {
pub fn new(lexer: Lexer<'source>) -> Self {
ParsingContext {
lexer: lexer.peekable(),
last_meta: Span::default(),
last_meta: SourceMetadata::none(),
}
}
pub fn expect_ident(&mut self, parser: &mut Parser) -> Result<(String, Span)> {
pub fn expect_ident(&mut self, parser: &mut Parser) -> Result<(String, SourceMetadata)> {
let token = self.bump(parser)?;
match token.value {
@ -153,14 +153,14 @@ impl<'source> ParsingContext<'source> {
Some(handle) => parser.add_entry_point(handle, body, ctx.expressions),
None => parser.errors.push(Error {
kind: ErrorKind::SemanticError("Missing entry point".into()),
meta: Span::default(),
meta: SourceMetadata::none(),
}),
}
Ok(())
}
fn parse_uint_constant(&mut self, parser: &mut Parser) -> Result<(u32, Span)> {
fn parse_uint_constant(&mut self, parser: &mut Parser) -> Result<(u32, SourceMetadata)> {
let (value, meta) = self.parse_constant_expression(parser)?;
let int = match parser.module.constants[value].inner {
@ -192,7 +192,7 @@ impl<'source> ParsingContext<'source> {
fn parse_constant_expression(
&mut self,
parser: &mut Parser,
) -> Result<(Handle<Constant>, Span)> {
) -> Result<(Handle<Constant>, SourceMetadata)> {
let mut block = Block::new();
let mut ctx = Context::new(parser, &mut block);
@ -206,7 +206,7 @@ impl<'source> ParsingContext<'source> {
}
impl Parser {
fn handle_directive(&mut self, directive: Directive, meta: Span) {
fn handle_directive(&mut self, directive: Directive, meta: SourceMetadata) {
let mut tokens = directive.tokens.into_iter();
match directive.kind {
@ -367,7 +367,7 @@ impl Parser {
}
pub struct DeclarationContext<'ctx> {
qualifiers: Vec<(TypeQualifier, Span)>,
qualifiers: Vec<(TypeQualifier, SourceMetadata)>,
external: bool,
ctx: &'ctx mut Context,
@ -381,7 +381,7 @@ impl<'ctx> DeclarationContext<'ctx> {
ty: Handle<Type>,
name: String,
init: Option<Handle<Constant>>,
meta: Span,
meta: SourceMetadata,
) -> Result<Handle<Expression>> {
let decl = VarDeclaration {
qualifiers: &self.qualifiers,

Просмотреть файл

@ -10,7 +10,7 @@ use crate::{
token::{Token, TokenValue},
types::scalar_components,
variables::{GlobalOrConstant, VarDeclaration},
Error, ErrorKind, Parser, Span,
Error, ErrorKind, Parser, SourceMetadata,
},
Block, Expression, FunctionResult, Handle, ScalarKind, Statement, StorageClass, StructMember,
Type, TypeInner,
@ -54,7 +54,7 @@ impl<'source> ParsingContext<'source> {
ty: Handle<Type>,
ctx: &mut Context,
body: &mut Block,
) -> Result<(Handle<Expression>, Span)> {
) -> Result<(Handle<Expression>, SourceMetadata)> {
// initializer:
// assignment_expression
// LEFT_BRACE initializer_list RIGHT_BRACE
@ -76,12 +76,12 @@ impl<'source> ParsingContext<'source> {
if let Some(Token { meta: end_meta, .. }) =
self.bump_if(parser, TokenValue::RightBrace)
{
meta.subsume(end_meta);
meta = meta.union(&end_meta);
break;
}
}
TokenValue::RightBrace => {
meta.subsume(token.meta);
meta = meta.union(&token.meta);
break;
}
_ => {
@ -190,7 +190,7 @@ impl<'source> ParsingContext<'source> {
.implicit_conversion(parser, &mut expr, init_meta, kind, width)?;
}
meta.subsume(init_meta);
meta = meta.union(&init_meta);
Ok((expr, init_meta))
})
@ -206,7 +206,8 @@ impl<'source> ParsingContext<'source> {
if let Some((value, _)) = init.filter(|_| maybe_constant.is_none()) {
ctx.flush_expressions();
ctx.body.push(Statement::Store { pointer, value }, meta);
ctx.body
.push(Statement::Store { pointer, value }, meta.as_span());
}
let token = self.bump(parser)?;
@ -235,7 +236,7 @@ impl<'source> ParsingContext<'source> {
ctx: &mut Context,
body: &mut Block,
external: bool,
) -> Result<Option<Span>> {
) -> Result<Option<SourceMetadata>> {
//declaration:
// function_prototype SEMICOLON
//
@ -271,7 +272,7 @@ impl<'source> ParsingContext<'source> {
self.parse_function_args(parser, &mut context, &mut body)?;
let end_meta = self.expect(parser, TokenValue::RightParen)?.meta;
meta.subsume(end_meta);
meta = meta.union(&end_meta);
let token = self.bump(parser)?;
return match token.value {
@ -378,7 +379,7 @@ impl<'source> ParsingContext<'source> {
TokenValue::Semicolon => {
let mut meta_all = token.meta;
for &(ref qualifier, meta) in qualifiers.iter() {
meta_all.subsume(meta);
meta_all = meta_all.union(&meta);
match *qualifier {
TypeQualifier::WorkGroupSize(i, value) => {
parser.meta.workgroup_size[i] = value
@ -470,10 +471,10 @@ impl<'source> ParsingContext<'source> {
parser: &mut Parser,
ctx: &mut Context,
body: &mut Block,
qualifiers: &[(TypeQualifier, Span)],
qualifiers: &[(TypeQualifier, SourceMetadata)],
ty_name: String,
meta: Span,
) -> Result<Span> {
meta: SourceMetadata,
) -> Result<SourceMetadata> {
let mut storage = None;
let mut layout = None;
@ -497,7 +498,7 @@ impl<'source> ParsingContext<'source> {
let span = self.parse_struct_declaration_list(parser, &mut members, layout)?;
self.expect(parser, TokenValue::RightBrace)?;
let mut ty = parser.module.types.insert(
let mut ty = parser.module.types.append(
Type {
name: Some(ty_name),
inner: TypeInner::Struct {
@ -543,14 +544,15 @@ impl<'source> ParsingContext<'source> {
},
)?;
for (i, k, ty) in members.into_iter().enumerate().filter_map(|(i, m)| {
let ty = m.ty;
m.name.map(|s| (i as u32, s, ty))
}) {
for (i, k) in members
.into_iter()
.enumerate()
.filter_map(|(i, m)| m.name.map(|s| (i as u32, s)))
{
let lookup = GlobalLookup {
kind: match global {
GlobalOrConstant::Global(handle) => GlobalLookupKind::BlockSelect(handle, i),
GlobalOrConstant::Constant(handle) => GlobalLookupKind::Constant(handle, ty),
GlobalOrConstant::Constant(handle) => GlobalLookupKind::Constant(handle),
},
entry_arg: None,
mutable: true,
@ -579,7 +581,7 @@ impl<'source> ParsingContext<'source> {
let (ty, mut meta) = self.parse_type_non_void(parser)?;
let (name, end_meta) = self.expect_ident(parser)?;
meta.subsume(end_meta);
meta = meta.union(&end_meta);
let array_specifier = self.parse_array_specifier(parser)?;
let ty = parser.maybe_array(ty, meta, array_specifier);

Просмотреть файл

@ -5,7 +5,7 @@ use crate::{
error::{ErrorKind, ExpectedToken},
parser::ParsingContext,
token::{Token, TokenValue},
Error, Parser, Result, Span,
Error, Parser, Result, SourceMetadata,
},
ArraySize, BinaryOperator, Block, Constant, ConstantInner, Handle, ScalarValue, Type,
TypeInner, UnaryOperator,
@ -39,7 +39,7 @@ impl<'source> ParsingContext<'source> {
let expr = self.parse_expression(parser, ctx, stmt, body)?;
let meta = self.expect(parser, TokenValue::RightParen)?.meta;
token.meta.subsume(meta);
token.meta = token.meta.union(&meta);
return Ok(expr);
}
@ -65,7 +65,7 @@ impl<'source> ParsingContext<'source> {
specialization: None,
inner: ConstantInner::Scalar { width, value },
},
token.meta,
token.meta.as_span(),
);
Ok(stmt.hir_exprs.append(
@ -83,11 +83,11 @@ impl<'source> ParsingContext<'source> {
ctx: &mut Context,
stmt: &mut StmtContext,
body: &mut Block,
meta: &mut Span,
meta: &mut SourceMetadata,
) -> Result<Vec<Handle<HirExpr>>> {
let mut args = Vec::new();
if let Some(token) = self.bump_if(parser, TokenValue::RightParen) {
meta.subsume(token.meta);
*meta = meta.union(&token.meta);
} else {
loop {
args.push(self.parse_assignment(parser, ctx, stmt, body)?);
@ -96,7 +96,7 @@ impl<'source> ParsingContext<'source> {
match token.value {
TokenValue::Comma => {}
TokenValue::RightParen => {
meta.subsume(token.meta);
*meta = meta.union(&token.meta);
break;
}
_ => {
@ -122,89 +122,108 @@ impl<'source> ParsingContext<'source> {
stmt: &mut StmtContext,
body: &mut Block,
) -> Result<Handle<HirExpr>> {
let mut base = if self.peek_type_name(parser) {
let (mut handle, mut meta) = self.parse_type_non_void(parser)?;
let mut base = match self.expect_peek(parser)?.value {
TokenValue::Identifier(_) => {
let (name, mut meta) = self.expect_ident(parser)?;
self.expect(parser, TokenValue::LeftParen)?;
let args = self.parse_function_call_args(parser, ctx, stmt, body, &mut meta)?;
let expr = if self.bump_if(parser, TokenValue::LeftParen).is_some() {
let args = self.parse_function_call_args(parser, ctx, stmt, body, &mut meta)?;
if let TypeInner::Array {
size: ArraySize::Dynamic,
stride,
base,
} = parser.module.types[handle].inner
{
let span = parser.module.types.get_span(handle);
let kind = match parser.lookup_type.get(&name) {
Some(ty) => FunctionCallKind::TypeConstructor(*ty),
None => FunctionCallKind::Function(name),
};
let constant = parser.module.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner: ConstantInner::Scalar {
width: 4,
value: ScalarValue::Uint(args.len() as u64),
},
},
Span::default(),
);
handle = parser.module.types.insert(
Type {
name: None,
inner: TypeInner::Array {
stride,
base,
size: ArraySize::Constant(constant),
},
},
span,
)
}
HirExpr {
kind: HirExprKind::Call(FunctionCall { kind, args }),
meta,
}
} else {
let var = match parser.lookup_variable(ctx, body, &name, meta) {
Some(var) => var,
None => {
return Err(Error {
kind: ErrorKind::UnknownVariable(name),
meta,
})
}
};
stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Call(FunctionCall {
kind: FunctionCallKind::TypeConstructor(handle),
args,
}),
meta,
},
Default::default(),
)
} else if let TokenValue::Identifier(_) = self.expect_peek(parser)?.value {
let (name, mut meta) = self.expect_ident(parser)?;
let expr = if self.bump_if(parser, TokenValue::LeftParen).is_some() {
let args = self.parse_function_call_args(parser, ctx, stmt, body, &mut meta)?;
let kind = match parser.lookup_type.get(&name) {
Some(ty) => FunctionCallKind::TypeConstructor(*ty),
None => FunctionCallKind::Function(name),
};
HirExpr {
kind: HirExprKind::Call(FunctionCall { kind, args }),
meta,
}
} else {
let var = match parser.lookup_variable(ctx, body, &name, meta) {
Some(var) => var,
None => {
return Err(Error {
kind: ErrorKind::UnknownVariable(name),
meta,
})
HirExpr {
kind: HirExprKind::Variable(var),
meta,
}
};
HirExpr {
kind: HirExprKind::Variable(var),
meta,
}
};
stmt.hir_exprs.append(expr, Default::default())
}
TokenValue::TypeName(_) => {
let Token {
value,
meta: name_meta,
} = self.bump(parser)?;
let mut meta = name_meta;
stmt.hir_exprs.append(expr, Default::default())
} else {
self.parse_primary(parser, ctx, stmt, body)?
let mut handle = if let TokenValue::TypeName(ty) = value {
parser.module.types.fetch_or_append(ty, name_meta.as_span())
} else {
unreachable!()
};
let maybe_size = self.parse_array_specifier(parser)?;
self.expect(parser, TokenValue::LeftParen)?;
let args = self.parse_function_call_args(parser, ctx, stmt, body, &mut meta)?;
if let Some((array_size, array_meta)) = maybe_size {
let stride = parser.module.types[handle]
.inner
.span(&parser.module.constants);
let size = match array_size {
ArraySize::Constant(size) => ArraySize::Constant(size),
ArraySize::Dynamic => {
let constant = parser.module.constants.fetch_or_append(
Constant {
name: None,
specialization: None,
inner: ConstantInner::Scalar {
width: 4,
value: ScalarValue::Sint(args.len() as i64),
},
},
meta.as_span(),
);
ArraySize::Constant(constant)
}
};
handle = parser.module.types.fetch_or_append(
Type {
name: None,
inner: TypeInner::Array {
base: handle,
size,
stride,
},
},
name_meta.union(&array_meta).as_span(),
);
}
stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Call(FunctionCall {
kind: FunctionCallKind::TypeConstructor(handle),
args,
}),
meta,
},
Default::default(),
)
}
_ => self.parse_primary(parser, ctx, stmt, body)?,
};
while let TokenValue::LeftBracket
@ -212,18 +231,17 @@ impl<'source> ParsingContext<'source> {
| TokenValue::Increment
| TokenValue::Decrement = self.expect_peek(parser)?.value
{
let Token { value, mut meta } = self.bump(parser)?;
let Token { value, meta } = self.bump(parser)?;
match value {
TokenValue::LeftBracket => {
let index = self.parse_expression(parser, ctx, stmt, body)?;
let end_meta = self.expect(parser, TokenValue::RightBracket)?.meta;
meta.subsume(end_meta);
base = stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Access { base, index },
meta,
meta: meta.union(&end_meta),
},
Default::default(),
)
@ -231,23 +249,32 @@ impl<'source> ParsingContext<'source> {
TokenValue::Dot => {
let (field, end_meta) = self.expect_ident(parser)?;
meta.subsume(end_meta);
base = stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Select { base, field },
meta: meta.union(&end_meta),
},
Default::default(),
)
}
TokenValue::Increment => {
base = stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::IncDec {
increment: true,
postfix: true,
expr: base,
},
meta,
},
Default::default(),
)
}
TokenValue::Increment | TokenValue::Decrement => {
TokenValue::Decrement => {
base = stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::PrePostfix {
op: match value {
TokenValue::Increment => crate::BinaryOperator::Add,
_ => crate::BinaryOperator::Subtract,
},
kind: HirExprKind::IncDec {
increment: false,
postfix: true,
expr: base,
},
@ -272,7 +299,7 @@ impl<'source> ParsingContext<'source> {
) -> Result<Handle<HirExpr>> {
Ok(match self.expect_peek(parser)?.value {
TokenValue::Plus | TokenValue::Dash | TokenValue::Bang | TokenValue::Tilde => {
let Token { value, mut meta } = self.bump(parser)?;
let Token { value, meta } = self.bump(parser)?;
let expr = self.parse_unary(parser, ctx, stmt, body)?;
let end_meta = stmt.hir_exprs[expr].meta;
@ -289,9 +316,13 @@ impl<'source> ParsingContext<'source> {
_ => return Ok(expr),
};
meta.subsume(end_meta);
stmt.hir_exprs
.append(HirExpr { kind, meta }, Default::default())
stmt.hir_exprs.append(
HirExpr {
kind,
meta: meta.union(&end_meta),
},
Default::default(),
)
}
TokenValue::Increment | TokenValue::Decrement => {
let Token { value, meta } = self.bump(parser)?;
@ -300,10 +331,11 @@ impl<'source> ParsingContext<'source> {
stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::PrePostfix {
op: match value {
TokenValue::Increment => crate::BinaryOperator::Add,
_ => crate::BinaryOperator::Subtract,
kind: HirExprKind::IncDec {
increment: match value {
TokenValue::Increment => true,
TokenValue::Decrement => false,
_ => unreachable!(),
},
postfix: false,
expr,
@ -329,7 +361,7 @@ impl<'source> ParsingContext<'source> {
let mut left = passtrough
.ok_or(ErrorKind::EndOfFile /* Dummy error */)
.or_else(|_| self.parse_unary(parser, ctx, stmt, body))?;
let mut meta = stmt.hir_exprs[left].meta;
let start_meta = stmt.hir_exprs[left].meta;
while let Some((l_bp, r_bp)) = binding_power(&self.expect_peek(parser)?.value) {
if l_bp < min_bp {
@ -341,7 +373,6 @@ impl<'source> ParsingContext<'source> {
let right = self.parse_binary(parser, ctx, stmt, body, None, r_bp)?;
let end_meta = stmt.hir_exprs[right].meta;
meta.subsume(end_meta);
left = stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Binary {
@ -370,7 +401,7 @@ impl<'source> ParsingContext<'source> {
},
right,
},
meta,
meta: start_meta.union(&end_meta),
},
Default::default(),
)
@ -388,7 +419,7 @@ impl<'source> ParsingContext<'source> {
passtrough: Option<Handle<HirExpr>>,
) -> Result<Handle<HirExpr>> {
let mut condition = self.parse_binary(parser, ctx, stmt, body, passtrough, 0)?;
let mut meta = stmt.hir_exprs[condition].meta;
let start_meta = stmt.hir_exprs[condition].meta;
if self.bump_if(parser, TokenValue::Question).is_some() {
let accept = self.parse_expression(parser, ctx, stmt, body)?;
@ -396,7 +427,6 @@ impl<'source> ParsingContext<'source> {
let reject = self.parse_assignment(parser, ctx, stmt, body)?;
let end_meta = stmt.hir_exprs[reject].meta;
meta.subsume(end_meta);
condition = stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Conditional {
@ -404,7 +434,7 @@ impl<'source> ParsingContext<'source> {
accept,
reject,
},
meta,
meta: start_meta.union(&end_meta),
},
Default::default(),
)
@ -421,7 +451,7 @@ impl<'source> ParsingContext<'source> {
body: &mut Block,
) -> Result<Handle<HirExpr>> {
let tgt = self.parse_unary(parser, ctx, stmt, body)?;
let mut meta = stmt.hir_exprs[tgt].meta;
let start_meta = stmt.hir_exprs[tgt].meta;
Ok(match self.expect_peek(parser)?.value {
TokenValue::Assign => {
@ -429,11 +459,10 @@ impl<'source> ParsingContext<'source> {
let value = self.parse_assignment(parser, ctx, stmt, body)?;
let end_meta = stmt.hir_exprs[value].meta;
meta.subsume(end_meta);
stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Assign { tgt, value },
meta,
meta: start_meta.union(&end_meta),
},
Default::default(),
)
@ -452,10 +481,9 @@ impl<'source> ParsingContext<'source> {
let right = self.parse_assignment(parser, ctx, stmt, body)?;
let end_meta = stmt.hir_exprs[right].meta;
meta.subsume(end_meta);
let value = stmt.hir_exprs.append(
HirExpr {
meta,
meta: start_meta.union(&end_meta),
kind: HirExprKind::Binary {
left: tgt,
op: match token.value {
@ -480,7 +508,7 @@ impl<'source> ParsingContext<'source> {
stmt.hir_exprs.append(
HirExpr {
kind: HirExprKind::Assign { tgt, value },
meta,
meta: start_meta.union(&end_meta),
},
Default::default(),
)

Просмотреть файл

@ -1,5 +1,5 @@
use crate::front::glsl::context::ExprPos;
use crate::front::glsl::Span;
use crate::front::glsl::SourceMetadata;
use crate::{
front::glsl::{
ast::ParameterQualifier,
@ -41,7 +41,7 @@ impl<'source> ParsingContext<'source> {
ctx: &mut Context,
body: &mut Block,
terminator: &mut Option<usize>,
) -> Result<Option<Span>> {
) -> Result<Option<SourceMetadata>> {
// TODO: This prevents snippets like the following from working
// ```glsl
// vec4(1.0);
@ -55,25 +55,22 @@ impl<'source> ParsingContext<'source> {
let new_break = || {
let mut block = Block::new();
block.push(Statement::Break, crate::Span::default());
block.push(Statement::Break, crate::Span::Unknown);
block
};
let &Token {
ref value,
mut meta,
} = self.expect_peek(parser)?;
let &Token { ref value, meta } = self.expect_peek(parser)?;
let meta_rest = match *value {
TokenValue::Continue => {
let meta = self.bump(parser)?.meta;
body.push(Statement::Continue, meta);
body.push(Statement::Continue, meta.as_span());
terminator.get_or_insert(body.len());
self.expect(parser, TokenValue::Semicolon)?.meta
}
TokenValue::Break => {
let meta = self.bump(parser)?.meta;
body.push(Statement::Break, meta);
body.push(Statement::Break, meta.as_span());
terminator.get_or_insert(body.len());
self.expect(parser, TokenValue::Semicolon)?.meta
}
@ -95,14 +92,14 @@ impl<'source> ParsingContext<'source> {
ctx.emit_flush(body);
ctx.emit_start();
body.push(Statement::Return { value }, meta);
body.push(Statement::Return { value }, meta.as_span());
terminator.get_or_insert(body.len());
meta
}
TokenValue::Discard => {
let meta = self.bump(parser)?.meta;
body.push(Statement::Kill, meta);
body.push(Statement::Kill, meta.as_span());
terminator.get_or_insert(body.len());
self.expect(parser, TokenValue::Semicolon)?.meta
@ -116,7 +113,7 @@ impl<'source> ParsingContext<'source> {
let expr = self.parse_expression(parser, ctx, &mut stmt, body)?;
let (handle, more_meta) =
ctx.lower_expect(stmt, parser, expr, ExprPos::Rhs, body)?;
meta.subsume(more_meta);
meta = meta.union(&more_meta);
handle
};
self.expect(parser, TokenValue::RightParen)?;
@ -128,7 +125,7 @@ impl<'source> ParsingContext<'source> {
if let Some(more_meta) =
self.parse_statement(parser, ctx, &mut accept, &mut None)?
{
meta.subsume(more_meta)
meta = meta.union(&more_meta)
}
let mut reject = Block::new();
@ -136,7 +133,7 @@ impl<'source> ParsingContext<'source> {
if let Some(more_meta) =
self.parse_statement(parser, ctx, &mut reject, &mut None)?
{
meta.subsume(more_meta);
meta = meta.union(&more_meta);
}
}
@ -146,33 +143,33 @@ impl<'source> ParsingContext<'source> {
accept,
reject,
},
meta,
meta.as_span(),
);
meta
}
TokenValue::Switch => {
let mut meta = self.bump(parser)?.meta;
let start_meta = self.bump(parser)?.meta;
let end_meta;
self.expect(parser, TokenValue::LeftParen)?;
// TODO: Implicit conversions
let selector = {
let mut stmt = ctx.stmt_ctx();
let expr = self.parse_expression(parser, ctx, &mut stmt, body)?;
ctx.lower_expect(stmt, parser, expr, ExprPos::Rhs, body)?.0
};
self.expect(parser, TokenValue::RightParen)?;
ctx.emit_flush(body);
ctx.emit_start();
let mut cases = Vec::new();
let mut default = Block::new();
self.expect(parser, TokenValue::LeftBrace)?;
loop {
let value = match self.expect_peek(parser)?.value {
match self.expect_peek(parser)?.value {
TokenValue::Case => {
self.bump(parser)?;
let value = {
@ -203,11 +200,73 @@ impl<'source> ParsingContext<'source> {
}
}
};
crate::SwitchValue::Integer(value)
self.expect(parser, TokenValue::Colon)?;
let mut body = Block::new();
let mut case_terminator = None;
loop {
match self.expect_peek(parser)?.value {
TokenValue::Case
| TokenValue::Default
| TokenValue::RightBrace => break,
_ => {
self.parse_statement(
parser,
ctx,
&mut body,
&mut case_terminator,
)?;
}
}
}
let mut fall_through = true;
if let Some(mut idx) = case_terminator {
if let Statement::Break = body[idx - 1] {
fall_through = false;
idx -= 1;
}
body.cull(idx..)
}
cases.push(SwitchCase {
value,
body,
fall_through,
})
}
TokenValue::Default => {
self.bump(parser)?;
crate::SwitchValue::Default
let Token { meta, .. } = self.bump(parser)?;
self.expect(parser, TokenValue::Colon)?;
if !default.is_empty() {
parser.errors.push(Error {
kind: ErrorKind::SemanticError(
"Can only have one default case per switch statement"
.into(),
),
meta,
});
}
let mut default_terminator = None;
loop {
match self.expect_peek(parser)?.value {
TokenValue::Case | TokenValue::RightBrace => break,
_ => {
self.parse_statement(
parser,
ctx,
&mut default,
&mut default_terminator,
)?;
}
}
}
}
TokenValue::RightBrace => {
end_meta = self.bump(parser)?.meta;
@ -227,57 +286,31 @@ impl<'source> ParsingContext<'source> {
meta,
});
}
};
self.expect(parser, TokenValue::Colon)?;
let mut body = Block::new();
let mut case_terminator = None;
loop {
match self.expect_peek(parser)?.value {
TokenValue::Case | TokenValue::Default | TokenValue::RightBrace => {
break
}
_ => {
self.parse_statement(parser, ctx, &mut body, &mut case_terminator)?;
}
}
}
let mut fall_through = true;
if let Some(mut idx) = case_terminator {
if let Statement::Break = body[idx - 1] {
fall_through = false;
idx -= 1;
}
body.cull(idx..)
}
cases.push(SwitchCase {
value,
body,
fall_through,
})
}
meta.subsume(end_meta);
let meta = start_meta.union(&end_meta);
body.push(Statement::Switch { selector, cases }, meta);
body.push(
Statement::Switch {
selector,
cases,
default,
},
meta.as_span(),
);
meta
}
TokenValue::While => {
let mut meta = self.bump(parser)?.meta;
let meta = self.bump(parser)?.meta;
let mut loop_body = Block::new();
let mut stmt = ctx.stmt_ctx();
self.expect(parser, TokenValue::LeftParen)?;
let root = self.parse_expression(parser, ctx, &mut stmt, &mut loop_body)?;
meta.subsume(self.expect(parser, TokenValue::RightParen)?.meta);
let meta = meta.union(&self.expect(parser, TokenValue::RightParen)?.meta);
let (expr, expr_meta) =
ctx.lower_expect(stmt, parser, root, ExprPos::Rhs, &mut loop_body)?;
@ -299,15 +332,15 @@ impl<'source> ParsingContext<'source> {
accept: new_break(),
reject: Block::new(),
},
crate::Span::default(),
crate::Span::Unknown,
);
meta.subsume(expr_meta);
let mut meta = meta.union(&expr_meta);
if let Some(body_meta) =
self.parse_statement(parser, ctx, &mut loop_body, &mut None)?
{
meta.subsume(body_meta);
meta = meta.union(&body_meta);
}
body.push(
@ -315,13 +348,13 @@ impl<'source> ParsingContext<'source> {
body: loop_body,
continuing: Block::new(),
},
meta,
meta.as_span(),
);
meta
}
TokenValue::Do => {
let mut meta = self.bump(parser)?.meta;
let start_meta = self.bump(parser)?.meta;
let mut loop_body = Block::new();
@ -334,7 +367,7 @@ impl<'source> ParsingContext<'source> {
let root = self.parse_expression(parser, ctx, &mut stmt, &mut loop_body)?;
let end_meta = self.expect(parser, TokenValue::RightParen)?.meta;
meta.subsume(end_meta);
let meta = start_meta.union(&end_meta);
let (expr, expr_meta) =
ctx.lower_expect(stmt, parser, root, ExprPos::Rhs, &mut loop_body)?;
@ -356,7 +389,7 @@ impl<'source> ParsingContext<'source> {
accept: new_break(),
reject: Block::new(),
},
crate::Span::default(),
crate::Span::Unknown,
);
body.push(
@ -364,13 +397,13 @@ impl<'source> ParsingContext<'source> {
body: loop_body,
continuing: Block::new(),
},
meta,
meta.as_span(),
);
meta
}
TokenValue::For => {
let mut meta = self.bump(parser)?.meta;
let meta = self.bump(parser)?.meta;
ctx.push_scope();
self.expect(parser, TokenValue::LeftParen)?;
@ -392,14 +425,14 @@ impl<'source> ParsingContext<'source> {
let (expr, expr_meta) =
if self.peek_type_name(parser) || self.peek_type_qualifier(parser) {
let qualifiers = self.parse_type_qualifiers(parser)?;
let (ty, mut meta) = self.parse_type_non_void(parser)?;
let (ty, meta) = self.parse_type_non_void(parser)?;
let name = self.expect_ident(parser)?.0;
self.expect(parser, TokenValue::Assign)?;
let (value, end_meta) =
self.parse_initializer(parser, ty, ctx, &mut block)?;
meta.subsume(end_meta);
let meta = meta.union(&end_meta);
let decl = VarDeclaration {
qualifiers: &qualifiers,
@ -414,7 +447,7 @@ impl<'source> ParsingContext<'source> {
ctx.emit_flush(&mut block);
ctx.emit_start();
block.push(Statement::Store { pointer, value }, meta);
block.push(Statement::Store { pointer, value }, meta.as_span());
(value, end_meta)
} else {
@ -441,7 +474,7 @@ impl<'source> ParsingContext<'source> {
accept: new_break(),
reject: Block::new(),
},
crate::Span::default(),
crate::Span::Unknown,
);
self.expect(parser, TokenValue::Semicolon)?;
@ -457,10 +490,10 @@ impl<'source> ParsingContext<'source> {
}
}
meta.subsume(self.expect(parser, TokenValue::RightParen)?.meta);
let mut meta = meta.union(&self.expect(parser, TokenValue::RightParen)?.meta);
if let Some(stmt_meta) = self.parse_statement(parser, ctx, &mut block, &mut None)? {
meta.subsume(stmt_meta);
meta = meta.union(&stmt_meta);
}
body.push(
@ -468,7 +501,7 @@ impl<'source> ParsingContext<'source> {
body: block,
continuing,
},
meta,
meta.as_span(),
);
ctx.remove_current_scope();
@ -485,7 +518,7 @@ impl<'source> ParsingContext<'source> {
ctx.remove_current_scope();
body.push(Statement::Block(block), meta);
body.push(Statement::Block(block), meta.as_span());
meta
}
@ -501,31 +534,30 @@ impl<'source> ParsingContext<'source> {
}
};
meta.subsume(meta_rest);
Ok(Some(meta))
Ok(Some(meta.union(&meta_rest)))
}
pub fn parse_compound_statement(
&mut self,
mut meta: Span,
mut meta: SourceMetadata,
parser: &mut Parser,
ctx: &mut Context,
body: &mut Block,
) -> Result<Span> {
) -> Result<SourceMetadata> {
let mut terminator = None;
loop {
if let Some(Token {
meta: brace_meta, ..
}) = self.bump_if(parser, TokenValue::RightBrace)
{
meta.subsume(brace_meta);
meta = meta.union(&brace_meta);
break;
}
let stmt = self.parse_statement(parser, ctx, body, &mut terminator)?;
if let Some(stmt_meta) = stmt {
meta.subsume(stmt_meta);
meta = meta.union(&stmt_meta);
}
}
@ -542,10 +574,6 @@ impl<'source> ParsingContext<'source> {
context: &mut Context,
body: &mut Block,
) -> Result<()> {
if self.bump_if(parser, TokenValue::Void).is_some() {
return Ok(());
}
loop {
if self.peek_type_name(parser) || self.peek_parameter_qualifier(parser) {
let qualifier = self.parse_parameter_qualifier(parser);

Просмотреть файл

@ -3,10 +3,10 @@ use crate::{
ast::{StorageQualifier, StructLayout, TypeQualifier},
error::ExpectedToken,
parser::ParsingContext,
token::{Token, TokenValue},
token::{SourceMetadata, Token, TokenValue},
Error, ErrorKind, Parser, Result,
},
ArraySize, Handle, Span, StorageClass, Type, TypeInner,
ArraySize, Handle, StorageClass, Type, TypeInner,
};
impl<'source> ParsingContext<'source> {
@ -15,50 +15,45 @@ impl<'source> ParsingContext<'source> {
pub fn parse_array_specifier(
&mut self,
parser: &mut Parser,
) -> Result<Option<(ArraySize, Span)>> {
if let Some(Token { mut meta, .. }) = self.bump_if(parser, TokenValue::LeftBracket) {
) -> Result<Option<(ArraySize, SourceMetadata)>> {
if let Some(Token { meta, .. }) = self.bump_if(parser, TokenValue::LeftBracket) {
if let Some(Token { meta: end_meta, .. }) =
self.bump_if(parser, TokenValue::RightBracket)
{
meta.subsume(end_meta);
return Ok(Some((ArraySize::Dynamic, meta)));
return Ok(Some((ArraySize::Dynamic, meta.union(&end_meta))));
}
let (value, span) = self.parse_uint_constant(parser)?;
let constant = parser.module.constants.fetch_or_append(
crate::Constant {
name: None,
specialization: None,
inner: crate::ConstantInner::Scalar {
width: 4,
value: crate::ScalarValue::Uint(value as u64),
},
},
span,
);
let (constant, _) = self.parse_constant_expression(parser)?;
let end_meta = self.expect(parser, TokenValue::RightBracket)?.meta;
meta.subsume(end_meta);
Ok(Some((ArraySize::Constant(constant), meta)))
Ok(Some((ArraySize::Constant(constant), meta.union(&end_meta))))
} else {
Ok(None)
}
}
pub fn parse_type(&mut self, parser: &mut Parser) -> Result<(Option<Handle<Type>>, Span)> {
pub fn parse_type(
&mut self,
parser: &mut Parser,
) -> Result<(Option<Handle<Type>>, SourceMetadata)> {
let token = self.bump(parser)?;
let handle = match token.value {
TokenValue::Void => None,
TokenValue::TypeName(ty) => Some(parser.module.types.insert(ty, token.meta)),
TokenValue::TypeName(ty) => Some(
parser
.module
.types
.fetch_or_append(ty, token.meta.as_span()),
),
TokenValue::Struct => {
let mut meta = token.meta;
let meta = token.meta;
let ty_name = self.expect_ident(parser)?.0;
self.expect(parser, TokenValue::LeftBrace)?;
let mut members = Vec::new();
let span =
self.parse_struct_declaration_list(parser, &mut members, StructLayout::Std140)?;
let end_meta = self.expect(parser, TokenValue::RightBrace)?.meta;
meta.subsume(end_meta);
let ty = parser.module.types.insert(
let ty = parser.module.types.append(
Type {
name: Some(ty_name.clone()),
inner: TypeInner::Struct {
@ -67,7 +62,7 @@ impl<'source> ParsingContext<'source> {
span,
},
},
meta,
meta.union(&end_meta).as_span(),
);
parser.lookup_type.insert(ty_name, ty);
Some(ty)
@ -99,12 +94,14 @@ impl<'source> ParsingContext<'source> {
let token_meta = token.meta;
let array_specifier = self.parse_array_specifier(parser)?;
let handle = handle.map(|ty| parser.maybe_array(ty, token_meta, array_specifier));
let mut meta = array_specifier.map_or(token_meta, |(_, meta)| meta);
meta.subsume(token_meta);
let meta = array_specifier.map_or(token_meta, |(_, meta)| meta.union(&token_meta));
Ok((handle, meta))
}
pub fn parse_type_non_void(&mut self, parser: &mut Parser) -> Result<(Handle<Type>, Span)> {
pub fn parse_type_non_void(
&mut self,
parser: &mut Parser,
) -> Result<(Handle<Type>, SourceMetadata)> {
let (maybe_ty, meta) = self.parse_type(parser)?;
let ty = maybe_ty.ok_or_else(|| Error {
kind: ErrorKind::SemanticError("Type can't be void".into()),
@ -135,7 +132,7 @@ impl<'source> ParsingContext<'source> {
pub fn parse_type_qualifiers(
&mut self,
parser: &mut Parser,
) -> Result<Vec<(TypeQualifier, Span)>> {
) -> Result<Vec<(TypeQualifier, SourceMetadata)>> {
let mut qualifiers = Vec::new();
while self.peek_type_qualifier(parser) {
@ -180,7 +177,7 @@ impl<'source> ParsingContext<'source> {
pub fn parse_layout_qualifier_id_list(
&mut self,
parser: &mut Parser,
qualifiers: &mut Vec<(TypeQualifier, Span)>,
qualifiers: &mut Vec<(TypeQualifier, SourceMetadata)>,
) -> Result<()> {
self.expect(parser, TokenValue::LeftParen)?;
loop {
@ -200,7 +197,7 @@ impl<'source> ParsingContext<'source> {
pub fn parse_layout_qualifier_id(
&mut self,
parser: &mut Parser,
qualifiers: &mut Vec<(TypeQualifier, Span)>,
qualifiers: &mut Vec<(TypeQualifier, SourceMetadata)>,
) -> Result<()> {
// layout_qualifier_id:
// IDENTIFIER
@ -211,7 +208,7 @@ impl<'source> ParsingContext<'source> {
TokenValue::Identifier(name) => {
if self.bump_if(parser, TokenValue::Assign).is_some() {
let (value, end_meta) = self.parse_uint_constant(parser)?;
token.meta.subsume(end_meta);
token.meta = token.meta.union(&end_meta);
qualifiers.push((
match name.as_str() {

Просмотреть файл

@ -3,7 +3,7 @@ use super::{
error::ExpectedToken,
error::{Error, ErrorKind},
token::TokenValue,
Options, Parser, Span,
Options, Parser, SourceMetadata,
};
use crate::ShaderStage;
use pp_rs::token::PreprocessorError;
@ -23,7 +23,7 @@ fn version() {
.unwrap(),
vec![Error {
kind: ErrorKind::InvalidVersion(99000),
meta: Span::new(9, 14)
meta: SourceMetadata { start: 9, end: 14 }
}],
);
@ -37,7 +37,7 @@ fn version() {
.unwrap(),
vec![Error {
kind: ErrorKind::InvalidVersion(449),
meta: Span::new(9, 12)
meta: SourceMetadata { start: 9, end: 12 }
}]
);
@ -51,7 +51,7 @@ fn version() {
.unwrap(),
vec![Error {
kind: ErrorKind::InvalidProfile("smart".into()),
meta: Span::new(13, 18),
meta: SourceMetadata { start: 13, end: 18 },
}]
);
@ -66,14 +66,14 @@ fn version() {
vec![
Error {
kind: ErrorKind::PreprocessorError(PreprocessorError::UnexpectedHash,),
meta: Span::new(27, 28),
meta: SourceMetadata { start: 27, end: 28 },
},
Error {
kind: ErrorKind::InvalidToken(
TokenValue::Identifier("version".into()),
vec![ExpectedToken::Eof]
),
meta: Span::new(28, 35)
meta: SourceMetadata { start: 28, end: 35 }
}
]
);
@ -104,7 +104,7 @@ fn version() {
parser
.parse(
&Options::from(ShaderStage::Vertex),
"#version 450 core\nvoid main(void) {}",
"#version 450 core\nvoid main() {}",
)
.unwrap();
assert_eq!(
@ -449,7 +449,10 @@ fn functions() {
.unwrap(),
vec![Error {
kind: ErrorKind::SemanticError("Function already defined".into()),
meta: Span::new(134, 152),
meta: SourceMetadata {
start: 134,
end: 152
},
}]
);
@ -605,7 +608,10 @@ fn implicit_conversions() {
.unwrap(),
vec![Error {
kind: ErrorKind::SemanticError("Unknown function \'test\'".into()),
meta: Span::new(156, 165),
meta: SourceMetadata {
start: 156,
end: 165
},
}]
);
@ -627,7 +633,10 @@ fn implicit_conversions() {
.unwrap(),
vec![Error {
kind: ErrorKind::SemanticError("Ambiguous best function for \'test\'".into()),
meta: Span::new(158, 165),
meta: SourceMetadata {
start: 158,
end: 165
},
}]
);
}

81
third_party/rust/naga/src/front/glsl/token.rs поставляемый
Просмотреть файл

@ -1,11 +1,82 @@
pub use pp_rs::token::{Float, Integer, Location, PreprocessorError, Token as PPToken};
use pp_rs::token::Location;
pub use pp_rs::token::{Float, Integer, PreprocessorError, Token as PPToken};
use super::ast::Precision;
use crate::{Interpolation, Sampling, Span, Type};
use crate::{Interpolation, Sampling, Type};
use std::ops::Range;
impl From<Location> for Span {
/// Represents a range of the source code
///
/// The `SourceMetadata` is used in error reporting to indicate a range of the
/// original source code where the error happened.
///
/// For easy interaction with error crates like
/// [`codespan`][codespan] the [`From`](From) trait is
/// implemeted for [`Range<usize>`](Range) allowing for conversions from `SourceMetadata`.
///
/// ```rust
/// # use naga::front::glsl::SourceMetadata;
/// # use std::ops::Range;
/// # let meta = SourceMetadata::default();
/// let range: Range<usize> = meta.into();
/// ```
///
/// Or in the case of [`codespan`][codespan]
///
/// ```rust
/// # use naga::front::glsl::SourceMetadata;
/// use codespan_reporting::diagnostic::Label;
/// # let file = ();
/// # let meta = SourceMetadata::default();
/// let label = Label::primary(file, meta);
/// ```
///
/// # Notes
///
/// [`start`](SourceMetadata::start) can be equal to
/// [`end`](SourceMetadata::end) especially when reporting errors which aren't
/// associated with a specific portion of the code.
///
/// [codespan]: https://docs.rs/codespan-reporting
#[derive(Debug, Clone, Copy, Default)]
#[cfg_attr(test, derive(PartialEq))]
pub struct SourceMetadata {
/// Byte offset into the source where the first char starts
pub start: usize,
/// Byte offset into the source where the first char not belonging to this
/// source metadata starts
pub end: usize,
}
impl SourceMetadata {
pub(crate) fn union(&self, other: &Self) -> Self {
SourceMetadata {
start: self.start.min(other.start),
end: self.end.max(other.end),
}
}
pub fn as_span(&self) -> crate::Span {
crate::Span::ByteRange(self.start..self.end)
}
pub(crate) fn none() -> Self {
SourceMetadata::default()
}
}
impl From<Location> for SourceMetadata {
fn from(loc: Location) -> Self {
Span::new(loc.start, loc.end)
SourceMetadata {
start: loc.start as usize,
end: loc.end as usize,
}
}
}
impl From<SourceMetadata> for Range<usize> {
fn from(meta: SourceMetadata) -> Self {
meta.start..meta.end
}
}
@ -13,7 +84,7 @@ impl From<Location> for Span {
#[cfg_attr(test, derive(PartialEq))]
pub struct Token {
pub value: TokenValue,
pub meta: Span,
pub meta: SourceMetadata,
}
/// A token passed from the lexing used in the parsing

31
third_party/rust/naga/src/front/glsl/types.rs поставляемый
Просмотреть файл

@ -1,4 +1,6 @@
use super::{constants::ConstantSolver, context::Context, Error, ErrorKind, Parser, Result, Span};
use super::{
constants::ConstantSolver, context::Context, Error, ErrorKind, Parser, Result, SourceMetadata,
};
use crate::{
proc::ResolveContext, ArraySize, Bytes, Constant, Expression, Handle, ImageClass,
ImageDimension, ScalarKind, Type, TypeInner, VectorSize,
@ -129,14 +131,14 @@ pub fn parse_type(type_name: &str) -> Option<Type> {
let (dim, arrayed, class) = match size {
"1D" => (ImageDimension::D1, false, sampled(false)),
"1DArray" => (ImageDimension::D1, true, sampled(false)),
"1DArray" => (ImageDimension::D1, false, sampled(false)),
"2D" => (ImageDimension::D2, false, sampled(false)),
"2DArray" => (ImageDimension::D2, true, sampled(false)),
"2DMS" => (ImageDimension::D2, false, sampled(true)),
"2DArray" => (ImageDimension::D2, false, sampled(false)),
"2DMS" => (ImageDimension::D2, true, sampled(true)),
"2DMSArray" => (ImageDimension::D2, true, sampled(true)),
"3D" => (ImageDimension::D3, false, sampled(false)),
"Cube" => (ImageDimension::Cube, false, sampled(false)),
"CubeArray" => (ImageDimension::Cube, true, sampled(false)),
"CubeArray" => (ImageDimension::D2, false, sampled(false)),
_ => return None,
};
@ -182,7 +184,7 @@ impl Parser {
&self,
ctx: &mut Context,
handle: Handle<Expression>,
meta: Span,
meta: SourceMetadata,
) -> Result<()> {
let resolve_ctx = ResolveContext {
constants: &self.module.constants,
@ -205,7 +207,7 @@ impl Parser {
&'b self,
ctx: &'b mut Context,
handle: Handle<Expression>,
meta: Span,
meta: SourceMetadata,
) -> Result<&'b TypeInner> {
self.typifier_grow(ctx, handle, meta)?;
Ok(ctx.typifier.get(handle, &self.module.types))
@ -216,7 +218,7 @@ impl Parser {
&'b self,
ctx: &'b mut Context,
handle: Handle<Expression>,
meta: Span,
meta: SourceMetadata,
) -> Result<()> {
let resolve_ctx = ResolveContext {
constants: &self.module.constants,
@ -239,10 +241,10 @@ impl Parser {
&mut self,
ctx: &Context,
root: Handle<Expression>,
meta: Span,
meta: SourceMetadata,
) -> Result<Handle<Constant>> {
let mut solver = ConstantSolver {
types: &mut self.module.types,
types: &self.module.types,
expressions: &ctx.expressions,
constants: &mut self.module.constants,
};
@ -256,13 +258,12 @@ impl Parser {
pub(crate) fn maybe_array(
&mut self,
base: Handle<Type>,
mut meta: Span,
array_specifier: Option<(ArraySize, Span)>,
meta: SourceMetadata,
array_specifier: Option<(ArraySize, SourceMetadata)>,
) -> Handle<Type> {
array_specifier
.map(|(size, size_meta)| {
meta.subsume(size_meta);
self.module.types.insert(
self.module.types.fetch_or_append(
Type {
name: None,
inner: TypeInner::Array {
@ -271,7 +272,7 @@ impl Parser {
stride: self.module.types[base].inner.span(&self.module.constants),
},
},
meta,
meta.union(&size_meta).as_span(),
)
})
.unwrap_or(base)

Просмотреть файл

@ -2,7 +2,7 @@ use super::{
ast::*,
context::Context,
error::{Error, ErrorKind},
Parser, Result, Span,
Parser, Result, SourceMetadata,
};
use crate::{
Binding, Block, BuiltIn, Constant, Expression, GlobalVariable, Handle, Interpolation,
@ -24,11 +24,11 @@ macro_rules! qualifier_arm {
}
pub struct VarDeclaration<'a> {
pub qualifiers: &'a [(TypeQualifier, Span)],
pub qualifiers: &'a [(TypeQualifier, SourceMetadata)],
pub ty: Handle<Type>,
pub name: Option<String>,
pub init: Option<Handle<Constant>>,
pub meta: Span,
pub meta: SourceMetadata,
}
/// Information about a builtin used in [`add_builtin`](Parser::add_builtin)
@ -56,14 +56,14 @@ impl Parser {
body: &mut Block,
name: &str,
data: BuiltInData,
meta: Span,
meta: SourceMetadata,
) -> Option<VariableReference> {
let ty = self.module.types.insert(
let ty = self.module.types.fetch_or_append(
Type {
name: None,
inner: data.inner,
},
meta,
meta.as_span(),
);
let handle = self.module.global_variables.append(
@ -74,7 +74,7 @@ impl Parser {
ty,
init: None,
},
meta,
meta.as_span(),
);
let idx = self.entry_args.len();
@ -101,7 +101,6 @@ impl Parser {
expr,
load: true,
mutable: data.mutable,
constant: None,
entry_arg: Some(idx),
},
);
@ -114,7 +113,7 @@ impl Parser {
ctx: &mut Context,
body: &mut Block,
name: &str,
meta: Span,
meta: SourceMetadata,
) -> Option<VariableReference> {
if let Some(local_var) = ctx.lookup_local_var(name) {
return Some(local_var);
@ -188,7 +187,7 @@ impl Parser {
storage: StorageQualifier::Output,
},
"gl_ClipDistance" | "gl_CullDistance" => {
let base = self.module.types.insert(
let base = self.module.types.fetch_or_append(
Type {
name: None,
inner: TypeInner::Scalar {
@ -196,7 +195,7 @@ impl Parser {
width: 4,
},
},
meta,
meta.as_span(),
);
BuiltInData {
@ -248,7 +247,7 @@ impl Parser {
body: &mut Block,
expression: Handle<Expression>,
name: &str,
meta: Span,
meta: SourceMetadata,
) -> Result<Handle<Expression>> {
let (ty, is_pointer) = match *self.resolve_type(ctx, expression, meta)? {
TypeInner::Pointer { base, .. } => (&self.module.types[base].inner, true),
@ -562,7 +561,7 @@ impl Parser {
ty,
init,
},
meta,
meta.as_span(),
);
let idx = self.entry_args.len();
@ -596,7 +595,7 @@ impl Parser {
})?;
if let Some(name) = name {
let lookup = GlobalLookup {
kind: GlobalLookupKind::Constant(init, ty),
kind: GlobalLookupKind::Constant(init),
entry_arg: None,
mutable: false,
};
@ -633,7 +632,7 @@ impl Parser {
ty,
init,
},
meta,
meta.as_span(),
);
if let Some(name) = name {
@ -711,7 +710,7 @@ impl Parser {
ty,
init,
},
meta,
meta.as_span(),
);
let expr = ctx.add_expression(Expression::LocalVariable(handle), meta, body);

Просмотреть файл

@ -1,59 +0,0 @@
//! Interpolation defaults.
impl crate::Binding {
/// Apply the usual default interpolation for `ty` to `binding`.
///
/// This function is a utility front ends may use to satisfy the Naga IR's
/// requirement, meant to ensure that input languages' policies have been
/// applied appropriately, that all I/O `Binding`s from the vertex shader to the
/// fragment shader must have non-`None` `interpolation` values.
///
/// All the shader languages Naga supports have similar rules:
/// perspective-correct, center-sampled interpolation is the default for any
/// binding that can vary, and everything else either defaults to flat, or
/// requires an explicit flat qualifier/attribute/what-have-you.
///
/// If `binding` is not a [`Location`] binding, or if its [`interpolation`] is
/// already set, then make no changes. Otherwise, set `binding`'s interpolation
/// and sampling to reasonable defaults depending on `ty`, the type of the value
/// being interpolated:
///
/// - If `ty` is a floating-point scalar, vector, or matrix type, then
/// default to [`Perspective`] interpolation and [`Center`] sampling.
///
/// - If `ty` is an integral scalar or vector, then default to [`Flat`]
/// interpolation, which has no associated sampling.
///
/// - For any other types, make no change. Such types are not permitted as
/// user-defined IO values, and will probably be flagged by the verifier
///
/// When structs appear in input or output types, each member ought to have its
/// own [`Binding`], so structs are simply covered by the third case.
///
/// [`Binding`]: crate::Binding
/// [`Location`]: crate::Binding::Location
/// [`interpolation`]: crate::Binding::Location::interpolation
/// [`Perspective`]: crate::Interpolation::Perspective
/// [`Flat`]: crate::Interpolation::Flat
/// [`Center`]: crate::Sampling::Center
pub fn apply_default_interpolation(&mut self, ty: &crate::TypeInner) {
if let crate::Binding::Location {
location: _,
interpolation: ref mut interpolation @ None,
ref mut sampling,
} = *self
{
match ty.scalar_kind() {
Some(crate::ScalarKind::Float) => {
*interpolation = Some(crate::Interpolation::Perspective);
*sampling = Some(crate::Sampling::Center);
}
Some(crate::ScalarKind::Sint) | Some(crate::ScalarKind::Uint) => {
*interpolation = Some(crate::Interpolation::Flat);
*sampling = None;
}
Some(_) | None => {}
}
}
}
}

8
third_party/rust/naga/src/front/mod.rs поставляемый
Просмотреть файл

@ -1,7 +1,5 @@
//! Parsers which load shaders into memory.
mod interpolator;
#[cfg(feature = "glsl-in")]
pub mod glsl;
#[cfg(feature = "spv-in")]
@ -10,7 +8,7 @@ pub mod spv;
pub mod wgsl;
use crate::{
arena::{Arena, Handle, UniqueArena},
arena::{Arena, Handle},
proc::{ResolveContext, ResolveError, TypeResolution},
};
use std::ops;
@ -38,7 +36,7 @@ impl Emitter {
let start_len = self.start_len.take().unwrap();
if start_len != arena.len() {
#[allow(unused_mut)]
let mut span = crate::span::Span::default();
let mut span = crate::span::Span::Unknown;
let range = arena.range_from(start_len);
#[cfg(feature = "span")]
for handle in range.clone() {
@ -81,7 +79,7 @@ impl Typifier {
pub fn get<'a>(
&'a self,
expr_handle: Handle<crate::Expression>,
types: &'a UniqueArena<crate::Type>,
types: &'a Arena<crate::Type>,
) -> &'a crate::TypeInner {
self.resolutions[expr_handle.index()].inner_with(types)
}

Просмотреть файл

@ -121,7 +121,6 @@ pub(super) fn map_builtin(word: spirv::Word) -> Result<crate::BuiltIn, Error> {
use spirv::BuiltIn as Bi;
Ok(match spirv::BuiltIn::from_u32(word) {
Some(Bi::Position) | Some(Bi::FragCoord) => crate::BuiltIn::Position,
Some(Bi::ViewIndex) => crate::BuiltIn::ViewIndex,
// vertex
Some(Bi::BaseInstance) => crate::BuiltIn::BaseInstance,
Some(Bi::BaseVertex) => crate::BuiltIn::BaseVertex,

Просмотреть файл

@ -41,8 +41,6 @@ pub enum Error {
UnsupportedControlFlow(spirv::Word),
#[error("unsupported binary operator %{0}")]
UnsupportedBinaryOperator(spirv::Word),
#[error("Naga supports OpTypeRuntimeArray in the StorageBuffer storage class only")]
UnsupportedRuntimeArrayStorageClass,
#[error("unknown binary operator {0:?}")]
UnknownBinaryOperator(spirv::Op),
#[error("unknown relational function {0:?}")]

1164
third_party/rust/naga/src/front/spv/flow.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,9 +1,6 @@
use crate::{
arena::{Arena, Handle},
front::spv::{BlockContext, BodyIndex},
};
use crate::arena::{Arena, Handle};
use super::{Error, Instruction, LookupExpression, LookupHelper as _};
use super::{flow::*, Error, Instruction, LookupExpression, LookupHelper as _};
use crate::front::Emitter;
pub type BlockId = u32;
@ -13,6 +10,44 @@ pub struct MergeInstruction {
pub merge_block_id: BlockId,
pub continue_block_id: Option<BlockId>,
}
/// Terminator instruction of a SPIR-V's block.
#[derive(Clone, Debug)]
#[allow(dead_code)]
pub enum Terminator {
///
Return {
value: Option<Handle<crate::Expression>>,
},
///
Branch { target_id: BlockId },
///
BranchConditional {
condition: Handle<crate::Expression>,
true_id: BlockId,
false_id: BlockId,
},
///
/// switch(SELECTOR) {
/// case TARGET_LITERAL#: {
/// TARGET_BLOCK#
/// }
/// default: {
/// DEFAULT
/// }
/// }
Switch {
///
selector: Handle<crate::Expression>,
/// Default block of the switch case.
default_id: BlockId,
/// Tuples of (literal, target block)
targets: Vec<(i32, BlockId)>,
},
/// Fragment shader discard
Kill,
///
Unreachable,
}
impl<I: Iterator<Item = u32>> super::Parser<I> {
// Registers a function call. It will generate a dummy handle to call, which
@ -80,17 +115,8 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
crate::Expression::FunctionArgument(i as u32),
self.span_from(start),
);
self.lookup_expression.insert(
id,
LookupExpression {
handle,
type_id,
// Setting this to an invalid id will cause get_expr_handle
// to default to the main body making sure no load/stores
// are added.
block_id: 0,
},
);
self.lookup_expression
.insert(id, LookupExpression { handle, type_id });
//Note: we redo the lookup in order to work around `self` borrowing
if type_id
@ -115,44 +141,33 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
// Read body
self.function_call_graph.add_node(fun_id);
let mut flow_graph = FlowGraph::new();
let mut parameters_sampling =
vec![super::image::SamplingFlags::empty(); fun.arguments.len()];
let mut block_ctx = BlockContext {
phis: Default::default(),
blocks: Default::default(),
body_for_label: Default::default(),
mergers: Default::default(),
bodies: Default::default(),
function_id: fun_id,
expressions: &mut fun.expressions,
local_arena: &mut fun.local_variables,
const_arena: &mut module.constants,
type_arena: &module.types,
global_arena: &module.global_variables,
arguments: &fun.arguments,
parameter_sampling: &mut parameters_sampling,
};
// Insert the main body whose parent is also himself
block_ctx.bodies.push(super::Body::with_parent(0));
// Scan the blocks and add them as nodes
loop {
let fun_inst = self.next_inst()?;
log::debug!("{:?}", fun_inst.op);
match fun_inst.op {
spirv::Op::Line => {
fun_inst.expect(4)?;
let _file_id = self.next()?;
let _row_id = self.next()?;
let _col_id = self.next()?;
}
spirv::Op::Label => {
// Read the label ID
fun_inst.expect(2)?;
let block_id = self.next()?;
self.next_block(block_id, &mut block_ctx)?;
let node = self.next_block(
block_id,
fun_id,
&mut fun.expressions,
&mut fun.local_variables,
&mut module.constants,
&module.types,
&module.global_variables,
&fun.arguments,
&mut parameters_sampling,
)?;
flow_graph.add_node(node);
}
spirv::Op::FunctionEnd => {
fun_inst.expect(1)?;
@ -164,121 +179,22 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
}
}
if let Some(ref prefix) = self.options.block_ctx_dump_prefix {
flow_graph.classify();
flow_graph.remove_phi_instructions(&self.lookup_expression);
if let Some(ref prefix) = self.options.flow_graph_dump_prefix {
let dump = flow_graph.to_graphviz().unwrap_or_default();
let dump_suffix = match self.lookup_entry_point.get(&fun_id) {
Some(ep) => format!("block_ctx.{:?}-{}.txt", ep.stage, ep.name),
None => format!("block_ctx.Fun-{}.txt", module.functions.len()),
Some(ep) => format!("flow.{:?}-{}.dot", ep.stage, ep.name),
None => format!("flow.Fun-{}.dot", module.functions.len()),
};
let dest = prefix.join(dump_suffix);
let dump = format!("{:#?}", block_ctx);
if let Err(e) = std::fs::write(&dest, dump) {
log::error!("Unable to dump the block context into {:?}: {}", dest, e);
log::error!("Unable to dump the flow graph into {:?}: {}", dest, e);
}
}
// Emit `Store` statements to properly initialize all the local variables we
// created for `phi` expressions.
//
// Note that get_expr_handle also contributes slightly odd entries to this table,
// to get the spill.
for phi in block_ctx.phis.iter() {
// Get a pointer to the local variable for the phi's value.
let phi_pointer = block_ctx.expressions.append(
crate::Expression::LocalVariable(phi.local),
crate::Span::default(),
);
// At the end of each of `phi`'s predecessor blocks, store the corresponding
// source value in the phi's local variable.
for &(source, predecessor) in phi.expressions.iter() {
let source_lexp = &self.lookup_expression[&source];
let predecessor_body_idx = block_ctx.body_for_label[&predecessor];
// If the expression is a global/argument it will have a 0 block
// id so we must use a default value instead of panicking
let source_body_idx = block_ctx
.body_for_label
.get(&source_lexp.block_id)
.copied()
.unwrap_or(0);
// If the Naga `Expression` generated for `source` is in scope, then we
// can simply store that in the phi's local variable.
//
// Otherwise, spill the source value to a local variable in the block that
// defines it. (We know this store dominates the predecessor; otherwise,
// the phi wouldn't have been able to refer to that source expression in
// the first place.) Then, the predecessor block can count on finding the
// source's value in that local variable.
let value = if super::is_parent(predecessor_body_idx, source_body_idx, &block_ctx) {
source_lexp.handle
} else {
// The source SPIR-V expression is not defined in the phi's
// predecessor block, nor is it a globally available expression. So it
// must be defined off in some other block that merely dominates the
// predecessor. This means that the corresponding Naga `Expression`
// may not be in scope in the predecessor block.
//
// In the block that defines `source`, spill it to a fresh local
// variable, to ensure we can still use it at the end of the
// predecessor.
let ty = self.lookup_type[&source_lexp.type_id].handle;
let local = block_ctx.local_arena.append(
crate::LocalVariable {
name: None,
ty,
init: None,
},
crate::Span::default(),
);
let pointer = block_ctx.expressions.append(
crate::Expression::LocalVariable(local),
crate::Span::default(),
);
// Get the spilled value of the source expression.
let start = block_ctx.expressions.len();
let expr = block_ctx
.expressions
.append(crate::Expression::Load { pointer }, crate::Span::default());
let range = block_ctx.expressions.range_from(start);
block_ctx
.blocks
.get_mut(&predecessor)
.unwrap()
.push(crate::Statement::Emit(range), crate::Span::default());
// At the end of the block that defines it, spill the source
// expression's value.
block_ctx
.blocks
.get_mut(&source_lexp.block_id)
.unwrap()
.push(
crate::Statement::Store {
pointer,
value: source_lexp.handle,
},
crate::Span::default(),
);
expr
};
// At the end of the phi predecessor block, store the source
// value in the phi's value.
block_ctx.blocks.get_mut(&predecessor).unwrap().push(
crate::Statement::Store {
pointer: phi_pointer,
value,
},
crate::Span::default(),
)
}
}
fun.body = block_ctx.lower();
fun.body = flow_graph.convert_to_naga()?;
// done
let fun_handle = module.functions.append(fun, self.span_from_with_op(start));
@ -306,10 +222,10 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
for &v_id in ep.variable_ids.iter() {
let lvar = self.lookup_variable.lookup(v_id)?;
if let super::Variable::Input(ref arg) = lvar.inner {
let span = module.global_variables.get_span(lvar.handle);
let span = module.global_variables.get_span(lvar.handle).clone();
let arg_expr = function.expressions.append(
crate::Expression::FunctionArgument(function.arguments.len() as u32),
span,
span.clone(),
);
let load_expr = if arg.ty == module.global_variables[lvar.handle].ty {
arg_expr
@ -324,16 +240,17 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
kind: crate::ScalarKind::Sint,
convert: Some(4),
},
span,
span.clone(),
);
function.body.extend(emitter.finish(&function.expressions));
handle
};
function.body.push(
crate::Statement::Store {
pointer: function
.expressions
.append(crate::Expression::GlobalVariable(lvar.handle), span),
pointer: function.expressions.append(
crate::Expression::GlobalVariable(lvar.handle),
span.clone(),
),
value: load_expr,
},
span,
@ -341,8 +258,13 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
let mut arg = arg.clone();
if ep.stage == crate::ShaderStage::Fragment {
if let Some(ref mut binding) = arg.binding {
binding.apply_default_interpolation(&module.types[arg.ty].inner);
if let Some(crate::Binding::Location {
interpolation: ref mut interpolation @ None,
..
}) = arg.binding
{
*interpolation = Some(crate::Interpolation::Perspective);
// default
}
}
function.arguments.push(arg);
@ -357,7 +279,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
arguments: Vec::new(),
result: None,
},
crate::Span::default(),
crate::Span::Unknown,
);
// 3. copy the outputs from privates to the result
@ -366,26 +288,19 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
for &v_id in ep.variable_ids.iter() {
let lvar = self.lookup_variable.lookup(v_id)?;
if let super::Variable::Output(ref result) = lvar.inner {
let span = module.global_variables.get_span(lvar.handle);
let span = module.global_variables.get_span(lvar.handle).clone();
let expr_handle = function
.expressions
.append(crate::Expression::GlobalVariable(lvar.handle), span);
.append(crate::Expression::GlobalVariable(lvar.handle), span.clone());
match module.types[result.ty].inner {
crate::TypeInner::Struct {
members: ref sub_members,
..
} => {
for (index, sm) in sub_members.iter().enumerate() {
match sm.binding {
Some(crate::Binding::BuiltIn(builtin)) => {
// Cull unused builtins to preserve performances
if !self.builtin_usage.contains(&builtin) {
continue;
}
}
if sm.binding.is_none() {
// unrecognized binding, skip
None => continue,
_ => {}
continue;
}
members.push(sm.clone());
components.push(function.expressions.append(
@ -393,7 +308,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
base: expr_handle,
index: index as u32,
},
span,
span.clone(),
));
}
}
@ -420,26 +335,26 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
let mut emitter = Emitter::default();
emitter.start(&function.expressions);
let global_expr = components[member_index];
let span = function.expressions.get_span(global_expr);
let span = function.expressions.get_span(global_expr).clone();
let access_expr = function.expressions.append(
crate::Expression::AccessIndex {
base: global_expr,
index: 1,
},
span,
span.clone(),
);
let load_expr = function.expressions.append(
crate::Expression::Load {
pointer: access_expr,
},
span,
span.clone(),
);
let neg_expr = function.expressions.append(
crate::Expression::Unary {
op: crate::UnaryOperator::Negate,
expr: load_expr,
},
span,
span.clone(),
);
function.body.extend(emitter.finish(&function.expressions));
function.body.push(
@ -460,7 +375,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
let load_expr = crate::Expression::Load {
pointer: *component,
};
let span = function.expressions.get_span(*component);
let span = function.expressions.get_span(*component).clone();
*component = function.expressions.append(load_expr, span);
}
@ -468,7 +383,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
[] => {}
[member] => {
function.body.extend(emitter.finish(&function.expressions));
let span = function.expressions.get_span(components[0]);
let span = function.expressions.get_span(components[0]).clone();
function.body.push(
crate::Statement::Return {
value: components.first().cloned(),
@ -484,7 +399,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
let span = crate::Span::total_span(
components.iter().map(|h| function.expressions.get_span(*h)),
);
let ty = module.types.insert(
let ty = module.types.append(
crate::Type {
name: None,
inner: crate::TypeInner::Struct {
@ -493,11 +408,11 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
span: 0xFFFF, // shouldn't matter
},
},
span,
span.clone(),
);
let result_expr = function
.expressions
.append(crate::Expression::Compose { ty, components }, span);
.append(crate::Expression::Compose { ty, components }, span.clone());
function.body.extend(emitter.finish(&function.expressions));
function.body.push(
crate::Statement::Return {
@ -518,95 +433,8 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
});
}
module.apply_common_default_interpolation();
Ok(())
}
}
impl<'function> BlockContext<'function> {
/// Consumes the `BlockContext` producing a Ir [`Block`](crate::Block)
fn lower(mut self) -> crate::Block {
fn lower_impl(
blocks: &mut crate::FastHashMap<spirv::Word, crate::Block>,
bodies: &[super::Body],
body_idx: BodyIndex,
) -> crate::Block {
let mut block = crate::Block::new();
for item in bodies[body_idx].data.iter() {
match *item {
super::BodyFragment::BlockId(id) => block.append(blocks.get_mut(&id).unwrap()),
super::BodyFragment::If {
condition,
accept,
reject,
} => {
let accept = lower_impl(blocks, bodies, accept);
let reject = lower_impl(blocks, bodies, reject);
block.push(
crate::Statement::If {
condition,
accept,
reject,
},
crate::Span::default(),
)
}
super::BodyFragment::Loop { body, continuing } => {
let body = lower_impl(blocks, bodies, body);
let continuing = lower_impl(blocks, bodies, continuing);
block.push(
crate::Statement::Loop { body, continuing },
crate::Span::default(),
)
}
super::BodyFragment::Switch {
selector,
ref cases,
default,
} => {
let mut ir_cases: Vec<_> = cases
.iter()
.map(|&(value, body_idx)| {
let body = lower_impl(blocks, bodies, body_idx);
// Handle simple cases that would make a fallthrough statement unreachable code
let fall_through = body.last().map_or(true, |s| !s.is_terminator());
crate::SwitchCase {
value: crate::SwitchValue::Integer(value),
body,
fall_through,
}
})
.collect();
ir_cases.push(crate::SwitchCase {
value: crate::SwitchValue::Default,
body: lower_impl(blocks, bodies, default),
fall_through: false,
});
block.push(
crate::Statement::Switch {
selector,
cases: ir_cases,
},
crate::Span::default(),
)
}
super::BodyFragment::Break => {
block.push(crate::Statement::Break, crate::Span::default())
}
super::BodyFragment::Continue => {
block.push(crate::Statement::Continue, crate::Span::default())
}
}
}
block
}
lower_impl(&mut self.blocks, &self.bodies, 0)
}
}

239
third_party/rust/naga/src/front/spv/image.rs поставляемый
Просмотреть файл

@ -1,4 +1,7 @@
use crate::arena::{Arena, Handle, UniqueArena};
use crate::{
arena::{Arena, Handle},
FunctionArgument,
};
use super::{Error, LookupExpression, LookupHelper as _};
@ -18,14 +21,16 @@ bitflags::bitflags! {
}
}
impl<'function> super::BlockContext<'function> {
impl Arena<crate::Expression> {
fn get_image_expr_ty(
&self,
handle: Handle<crate::Expression>,
global_vars: &Arena<crate::GlobalVariable>,
arguments: &[FunctionArgument],
) -> Result<Handle<crate::Type>, Error> {
match self.expressions[handle] {
crate::Expression::GlobalVariable(handle) => Ok(self.global_arena[handle].ty),
crate::Expression::FunctionArgument(i) => Ok(self.arguments[i as usize].ty),
match self[handle] {
crate::Expression::GlobalVariable(handle) => Ok(global_vars[handle].ty),
crate::Expression::FunctionArgument(i) => Ok(arguments[i as usize].ty),
ref other => Err(Error::InvalidImageExpression(other.clone())),
}
}
@ -57,9 +62,10 @@ fn extract_image_coordinates(
extra_coordinate: ExtraCoordinate,
base: Handle<crate::Expression>,
coordinate_ty: Handle<crate::Type>,
ctx: &mut super::BlockContext,
type_arena: &Arena<crate::Type>,
expressions: &mut Arena<crate::Expression>,
) -> (Handle<crate::Expression>, Option<Handle<crate::Expression>>) {
let (given_size, kind) = match ctx.type_arena[coordinate_ty].inner {
let (given_size, kind) = match type_arena[coordinate_ty].inner {
crate::TypeInner::Scalar { kind, .. } => (None, kind),
crate::TypeInner::Vector { size, kind, .. } => (Some(size), kind),
ref other => unreachable!("Unexpected texture coordinate {:?}", other),
@ -67,14 +73,14 @@ fn extract_image_coordinates(
let required_size = image_dim.required_coordinate_size();
let required_ty = required_size.map(|size| {
ctx.type_arena
.get(&crate::Type {
name: None,
inner: crate::TypeInner::Vector {
size,
kind,
width: 4,
},
type_arena
.fetch_if(|ty| {
ty.inner
== crate::TypeInner::Vector {
size,
kind,
width: 4,
}
})
.expect("Required coordinate type should have been set up by `parse_type_image`!")
});
@ -83,33 +89,35 @@ fn extract_image_coordinates(
index: required_size.map_or(1, |size| size as u32),
};
let base_span = ctx.expressions.get_span(base);
let base_span = expressions.get_span(base).clone();
match extra_coordinate {
ExtraCoordinate::ArrayLayer => {
let extracted = match required_size {
None => ctx
.expressions
.append(crate::Expression::AccessIndex { base, index: 0 }, base_span),
None => expressions.append(
crate::Expression::AccessIndex { base, index: 0 },
base_span.clone(),
),
Some(size) => {
let mut components = Vec::with_capacity(size as usize);
for index in 0..size as u32 {
let comp = ctx
.expressions
.append(crate::Expression::AccessIndex { base, index }, base_span);
let comp = expressions.append(
crate::Expression::AccessIndex { base, index },
base_span.clone(),
);
components.push(comp);
}
ctx.expressions.append(
expressions.append(
crate::Expression::Compose {
ty: required_ty.unwrap(),
components,
},
base_span,
base_span.clone(),
)
}
};
let array_index_f32 = ctx.expressions.append(extra_expr, base_span);
let array_index = ctx.expressions.append(
let array_index_f32 = expressions.append(extra_expr, base_span.clone());
let array_index = expressions.append(
crate::Expression::As {
kind: crate::ScalarKind::Sint,
expr: array_index_f32,
@ -120,13 +128,14 @@ fn extract_image_coordinates(
(extracted, Some(array_index))
}
ExtraCoordinate::Projection => {
let projection = ctx.expressions.append(extra_expr, base_span);
let projection = expressions.append(extra_expr, base_span.clone());
let divided = match required_size {
None => {
let temp = ctx
.expressions
.append(crate::Expression::AccessIndex { base, index: 0 }, base_span);
ctx.expressions.append(
let temp = expressions.append(
crate::Expression::AccessIndex { base, index: 0 },
base_span.clone(),
);
expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Divide,
left: temp,
@ -138,20 +147,21 @@ fn extract_image_coordinates(
Some(size) => {
let mut components = Vec::with_capacity(size as usize);
for index in 0..size as u32 {
let temp = ctx
.expressions
.append(crate::Expression::AccessIndex { base, index }, base_span);
let comp = ctx.expressions.append(
let temp = expressions.append(
crate::Expression::AccessIndex { base, index },
base_span.clone(),
);
let comp = expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Divide,
left: temp,
right: projection,
},
base_span,
base_span.clone(),
);
components.push(comp);
}
ctx.expressions.append(
expressions.append(
crate::Expression::Compose {
ty: required_ty.unwrap(),
components,
@ -173,7 +183,7 @@ fn extract_image_coordinates(
pattern: [Sc::X, Sc::Y, Sc::Z, Sc::W],
},
};
(ctx.expressions.append(cut_expr, base_span), None)
(expressions.append(cut_expr, base_span), None)
}
}
}
@ -181,7 +191,7 @@ fn extract_image_coordinates(
pub(super) fn patch_comparison_type(
flags: SamplingFlags,
var: &mut crate::GlobalVariable,
arena: &mut UniqueArena<crate::Type>,
arena: &mut Arena<crate::Type>,
) -> bool {
if !flags.contains(SamplingFlags::COMPARISON) {
return true;
@ -192,7 +202,7 @@ pub(super) fn patch_comparison_type(
log::debug!("Flipping comparison for {:?}", var);
let original_ty = &arena[var.ty];
let original_ty_span = arena.get_span(var.ty);
let original_ty_span = arena.get_span(var.ty).clone();
let ty_inner = match original_ty.inner {
crate::TypeInner::Image {
class: crate::ImageClass::Sampled { multi, .. },
@ -208,7 +218,7 @@ pub(super) fn patch_comparison_type(
};
let name = original_ty.name.clone();
var.ty = arena.insert(
var.ty = arena.append(
crate::Type {
name,
inner: ty_inner,
@ -236,7 +246,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
Ok(())
}
pub(super) fn parse_image_uncouple(&mut self, block_id: spirv::Word) -> Result<(), Error> {
pub(super) fn parse_image_uncouple(&mut self) -> Result<(), Error> {
let result_type_id = self.next()?;
let result_id = self.next()?;
let sampled_image_id = self.next()?;
@ -245,7 +255,6 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
LookupExpression {
handle: self.lookup_sampled_image.lookup(sampled_image_id)?.image,
type_id: result_type_id,
block_id,
},
);
Ok(())
@ -254,10 +263,10 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
pub(super) fn parse_image_write(
&mut self,
words_left: u16,
ctx: &mut super::BlockContext,
emitter: &mut crate::front::Emitter,
block: &mut crate::Block,
body_idx: usize,
type_arena: &Arena<crate::Type>,
global_arena: &Arena<crate::GlobalVariable>,
arguments: &[FunctionArgument],
expressions: &mut Arena<crate::Expression>,
) -> Result<crate::Statement, Error> {
let image_id = self.next()?;
let coordinate_id = self.next()?;
@ -274,13 +283,11 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
}
let image_lexp = self.lookup_expression.lookup(image_id)?;
let image_ty = ctx.get_image_expr_ty(image_lexp.handle)?;
let image_ty = expressions.get_image_expr_ty(image_lexp.handle, global_arena, arguments)?;
let coord_lexp = self.lookup_expression.lookup(coordinate_id)?;
let coord_handle =
self.get_expr_handle(coordinate_id, coord_lexp, ctx, emitter, block, body_idx);
let coord_type_handle = self.lookup_type.lookup(coord_lexp.type_id)?.handle;
let (coordinate, array_index) = match ctx.type_arena[image_ty].inner {
let (coordinate, array_index) = match type_arena[image_ty].inner {
crate::TypeInner::Image {
dim,
arrayed,
@ -292,32 +299,31 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
} else {
ExtraCoordinate::Garbage
},
coord_handle,
coord_lexp.handle,
coord_type_handle,
ctx,
type_arena,
expressions,
),
_ => return Err(Error::InvalidImage(image_ty)),
};
let value_lexp = self.lookup_expression.lookup(value_id)?;
let value = self.get_expr_handle(value_id, value_lexp, ctx, emitter, block, body_idx);
Ok(crate::Statement::ImageStore {
image: image_lexp.handle,
coordinate,
array_index,
value,
value: value_lexp.handle,
})
}
pub(super) fn parse_image_load(
&mut self,
mut words_left: u16,
ctx: &mut super::BlockContext,
emitter: &mut crate::front::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
type_arena: &Arena<crate::Type>,
global_arena: &Arena<crate::GlobalVariable>,
arguments: &[FunctionArgument],
expressions: &mut Arena<crate::Expression>,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
@ -338,9 +344,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
match spirv::ImageOperands::from_bits_truncate(bit) {
spirv::ImageOperands::LOD => {
let lod_expr = self.next()?;
let lod_lexp = self.lookup_expression.lookup(lod_expr)?;
let lod_handle =
self.get_expr_handle(lod_expr, lod_lexp, ctx, emitter, block, body_idx);
let lod_handle = self.lookup_expression.lookup(lod_expr)?.handle;
index = Some(lod_handle);
words_left -= 1;
}
@ -361,16 +365,12 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
image_ops ^= bit;
}
// No need to call get_expr_handle here since only globals/arguments are
// allowed as images and they are always in the root scope
let image_lexp = self.lookup_expression.lookup(image_id)?;
let image_ty = ctx.get_image_expr_ty(image_lexp.handle)?;
let image_ty = expressions.get_image_expr_ty(image_lexp.handle, global_arena, arguments)?;
let coord_lexp = self.lookup_expression.lookup(coordinate_id)?;
let coord_handle =
self.get_expr_handle(coordinate_id, coord_lexp, ctx, emitter, block, body_idx);
let coord_type_handle = self.lookup_type.lookup(coord_lexp.type_id)?.handle;
let (coordinate, array_index) = match ctx.type_arena[image_ty].inner {
let (coordinate, array_index) = match type_arena[image_ty].inner {
crate::TypeInner::Image {
dim,
arrayed,
@ -382,9 +382,10 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
} else {
ExtraCoordinate::Garbage
},
coord_handle,
coord_lexp.handle,
coord_type_handle,
ctx,
type_arena,
expressions,
),
_ => return Err(Error::InvalidImage(image_ty)),
};
@ -398,9 +399,8 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, self.span_from_with_op(start)),
handle: expressions.append(expr, self.span_from_with_op(start)),
type_id: result_type_id,
block_id,
},
);
Ok(())
@ -411,11 +411,11 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
&mut self,
mut words_left: u16,
options: SamplingOptions,
ctx: &mut super::BlockContext,
emitter: &mut crate::front::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
type_arena: &Arena<crate::Type>,
global_arena: &Arena<crate::GlobalVariable>,
arguments: &[FunctionArgument],
expressions: &mut Arena<crate::Expression>,
parameters_sampling: &mut [SamplingFlags],
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
@ -442,17 +442,13 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
match spirv::ImageOperands::from_bits_truncate(bit) {
spirv::ImageOperands::BIAS => {
let bias_expr = self.next()?;
let bias_lexp = self.lookup_expression.lookup(bias_expr)?;
let bias_handle =
self.get_expr_handle(bias_expr, bias_lexp, ctx, emitter, block, body_idx);
let bias_handle = self.lookup_expression.lookup(bias_expr)?.handle;
level = crate::SampleLevel::Bias(bias_handle);
words_left -= 1;
}
spirv::ImageOperands::LOD => {
let lod_expr = self.next()?;
let lod_lexp = self.lookup_expression.lookup(lod_expr)?;
let lod_handle =
self.get_expr_handle(lod_expr, lod_lexp, ctx, emitter, block, body_idx);
let lod_handle = self.lookup_expression.lookup(lod_expr)?.handle;
level = if options.compare {
log::debug!("Assuming {:?} is zero", lod_handle);
crate::SampleLevel::Zero
@ -463,25 +459,9 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
}
spirv::ImageOperands::GRAD => {
let grad_x_expr = self.next()?;
let grad_x_lexp = self.lookup_expression.lookup(grad_x_expr)?;
let grad_x_handle = self.get_expr_handle(
grad_x_expr,
grad_x_lexp,
ctx,
emitter,
block,
body_idx,
);
let grad_x_handle = self.lookup_expression.lookup(grad_x_expr)?.handle;
let grad_y_expr = self.next()?;
let grad_y_lexp = self.lookup_expression.lookup(grad_y_expr)?;
let grad_y_handle = self.get_expr_handle(
grad_y_expr,
grad_y_lexp,
ctx,
emitter,
block,
body_idx,
);
let grad_y_handle = self.lookup_expression.lookup(grad_y_expr)?.handle;
level = if options.compare {
log::debug!(
"Assuming gradients {:?} and {:?} are not greater than 1",
@ -516,8 +496,6 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
let si_lexp = self.lookup_sampled_image.lookup(sampled_image_id)?;
let coord_lexp = self.lookup_expression.lookup(coordinate_id)?;
let coord_handle =
self.get_expr_handle(coordinate_id, coord_lexp, ctx, emitter, block, body_idx);
let coord_type_handle = self.lookup_type.lookup(coord_lexp.type_id)?.handle;
let sampling_bit = if options.compare {
@ -526,31 +504,31 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
SamplingFlags::REGULAR
};
let image_ty = match ctx.expressions[si_lexp.image] {
let image_ty = match expressions[si_lexp.image] {
crate::Expression::GlobalVariable(handle) => {
if let Some(flags) = self.handle_sampling.get_mut(&handle) {
*flags |= sampling_bit;
}
ctx.global_arena[handle].ty
global_arena[handle].ty
}
crate::Expression::FunctionArgument(i) => {
ctx.parameter_sampling[i as usize] |= sampling_bit;
ctx.arguments[i as usize].ty
parameters_sampling[i as usize] |= sampling_bit;
arguments[i as usize].ty
}
ref other => return Err(Error::InvalidGlobalVar(other.clone())),
};
match ctx.expressions[si_lexp.sampler] {
match expressions[si_lexp.sampler] {
crate::Expression::GlobalVariable(handle) => {
*self.handle_sampling.get_mut(&handle).unwrap() |= sampling_bit
}
crate::Expression::FunctionArgument(i) => {
ctx.parameter_sampling[i as usize] |= sampling_bit;
parameters_sampling[i as usize] |= sampling_bit;
}
ref other => return Err(Error::InvalidGlobalVar(other.clone())),
}
let ((coordinate, array_index), depth_ref) = match ctx.type_arena[image_ty].inner {
let ((coordinate, array_index), depth_ref) = match type_arena[image_ty].inner {
crate::TypeInner::Image {
dim,
arrayed,
@ -565,33 +543,32 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
} else {
ExtraCoordinate::Garbage
},
coord_handle,
coord_lexp.handle,
coord_type_handle,
ctx,
type_arena,
expressions,
),
{
match dref_id {
Some(id) => {
let expr_lexp = self.lookup_expression.lookup(id)?;
let mut expr =
self.get_expr_handle(id, expr_lexp, ctx, emitter, block, body_idx);
let mut expr = self.lookup_expression.lookup(id)?.handle;
if options.project {
let required_size = dim.required_coordinate_size();
let right = ctx.expressions.append(
let right = expressions.append(
crate::Expression::AccessIndex {
base: coord_handle,
base: coord_lexp.handle,
index: required_size.map_or(1, |size| size as u32),
},
crate::Span::default(),
crate::Span::Unknown,
);
expr = ctx.expressions.append(
expr = expressions.append(
crate::Expression::Binary {
op: crate::BinaryOperator::Divide,
left: expr,
right,
},
crate::Span::default(),
crate::Span::Unknown,
)
};
Some(expr)
@ -615,9 +592,8 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, self.span_from_with_op(start)),
handle: expressions.append(expr, self.span_from_with_op(start)),
type_id: result_type_id,
block_id,
},
);
Ok(())
@ -626,11 +602,7 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
pub(super) fn parse_image_query_size(
&mut self,
at_level: bool,
ctx: &mut super::BlockContext,
emitter: &mut crate::front::Emitter,
block: &mut crate::Block,
block_id: spirv::Word,
body_idx: usize,
expressions: &mut Arena<crate::Expression>,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
@ -639,13 +611,11 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
let level = if at_level {
let level_id = self.next()?;
let level_lexp = self.lookup_expression.lookup(level_id)?;
Some(self.get_expr_handle(level_id, level_lexp, ctx, emitter, block, body_idx))
Some(level_lexp.handle)
} else {
None
};
// No need to call get_expr_handle here since only globals/arguments are
// allowed as images and they are always in the root scope
//TODO: handle arrays and cubes
let image_lexp = self.lookup_expression.lookup(image_id)?;
@ -656,9 +626,8 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
self.lookup_expression.insert(
result_id,
LookupExpression {
handle: ctx.expressions.append(expr, self.span_from_with_op(start)),
handle: expressions.append(expr, self.span_from_with_op(start)),
type_id: result_type_id,
block_id,
},
);
Ok(())
@ -668,15 +637,12 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
&mut self,
query: crate::ImageQuery,
expressions: &mut Arena<crate::Expression>,
block_id: spirv::Word,
) -> Result<(), Error> {
let start = self.data_offset;
let result_type_id = self.next()?;
let result_id = self.next()?;
let image_id = self.next()?;
// No need to call get_expr_handle here since only globals/arguments are
// allowed as images and they are always in the root scope
let image_lexp = self.lookup_expression.lookup(image_id)?.clone();
let expr = crate::Expression::ImageQuery {
@ -688,7 +654,6 @@ impl<I: Iterator<Item = u32>> super::Parser<I> {
LookupExpression {
handle: expressions.append(expr, self.span_from_with_op(start)),
type_id: result_type_id,
block_id,
},
);
Ok(())

1666
third_party/rust/naga/src/front/spv/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

38
third_party/rust/naga/src/front/spv/null.rs поставляемый
Просмотреть файл

@ -1,5 +1,5 @@
use super::Error;
use crate::arena::{Arena, Handle, UniqueArena};
use crate::arena::{Arena, Handle};
fn make_scalar_inner(kind: crate::ScalarKind, width: crate::Bytes) -> crate::ConstantInner {
crate::ConstantInner::Scalar {
@ -15,7 +15,7 @@ fn make_scalar_inner(kind: crate::ScalarKind, width: crate::Bytes) -> crate::Con
pub fn generate_null_constant(
ty: Handle<crate::Type>,
type_arena: &UniqueArena<crate::Type>,
type_arena: &Arena<crate::Type>,
constant_arena: &mut Arena<crate::Constant>,
span: crate::Span,
) -> Result<crate::ConstantInner, Error> {
@ -30,7 +30,7 @@ pub fn generate_null_constant(
specialization: None,
inner: make_scalar_inner(kind, width),
},
span,
span.clone(),
));
}
crate::ConstantInner::Composite { ty, components }
@ -42,16 +42,17 @@ pub fn generate_null_constant(
} => {
// If we successfully declared a matrix type, we have declared a vector type for it too.
let vector_ty = type_arena
.get(&crate::Type {
name: None,
inner: crate::TypeInner::Vector {
kind: crate::ScalarKind::Float,
size: rows,
width,
},
.fetch_if(|t| {
t.inner
== crate::TypeInner::Vector {
kind: crate::ScalarKind::Float,
size: rows,
width,
}
})
.unwrap();
let vector_inner = generate_null_constant(vector_ty, type_arena, constant_arena, span)?;
let vector_inner =
generate_null_constant(vector_ty, type_arena, constant_arena, span.clone())?;
let vector_handle = constant_arena.fetch_or_append(
crate::Constant {
name: None,
@ -70,14 +71,15 @@ pub fn generate_null_constant(
// copy out the types to avoid borrowing `members`
let member_tys = members.iter().map(|member| member.ty).collect::<Vec<_>>();
for member_ty in member_tys {
let inner = generate_null_constant(member_ty, type_arena, constant_arena, span)?;
let inner =
generate_null_constant(member_ty, type_arena, constant_arena, span.clone())?;
components.push(constant_arena.fetch_or_append(
crate::Constant {
name: None,
specialization: None,
inner,
},
span,
span.clone(),
));
}
crate::ConstantInner::Composite { ty, components }
@ -90,7 +92,7 @@ pub fn generate_null_constant(
let size = constant_arena[handle]
.to_array_length()
.ok_or(Error::InvalidArraySize(handle))?;
let inner = generate_null_constant(base, type_arena, constant_arena, span)?;
let inner = generate_null_constant(base, type_arena, constant_arena, span.clone())?;
let value = constant_arena.fetch_or_append(
crate::Constant {
name: None,
@ -116,7 +118,7 @@ pub fn generate_null_constant(
pub fn generate_default_built_in(
built_in: Option<crate::BuiltIn>,
ty: Handle<crate::Type>,
type_arena: &UniqueArena<crate::Type>,
type_arena: &Arena<crate::Type>,
constant_arena: &mut Arena<crate::Constant>,
span: crate::Span,
) -> Result<Handle<crate::Constant>, Error> {
@ -131,7 +133,7 @@ pub fn generate_default_built_in(
width: 4,
},
},
span,
span.clone(),
);
let one = constant_arena.fetch_or_append(
crate::Constant {
@ -142,7 +144,7 @@ pub fn generate_default_built_in(
width: 4,
},
},
span,
span.clone(),
);
crate::ConstantInner::Composite {
ty,
@ -162,7 +164,7 @@ pub fn generate_default_built_in(
width: 4,
},
//Note: `crate::BuiltIn::ClipDistance` is intentionally left for the default path
_ => generate_null_constant(ty, type_arena, constant_arena, span)?,
_ => generate_null_constant(ty, type_arena, constant_arena, span.clone())?,
};
Ok(constant_arena.fetch_or_append(
crate::Constant {

15
third_party/rust/naga/src/front/wgsl/conv.rs поставляемый
Просмотреть файл

@ -20,7 +20,6 @@ pub fn map_built_in(word: &str, span: Span) -> Result<crate::BuiltIn, Error<'_>>
// vertex
"vertex_index" => crate::BuiltIn::VertexIndex,
"instance_index" => crate::BuiltIn::InstanceIndex,
"view_index" => crate::BuiltIn::ViewIndex,
// fragment
"front_facing" => crate::BuiltIn::FrontFacing,
"frag_depth" => crate::BuiltIn::FragDepth,
@ -199,20 +198,6 @@ pub fn map_standard_fun(word: &str) -> Option<crate::MathFunction> {
// bits
"countOneBits" => Mf::CountOneBits,
"reverseBits" => Mf::ReverseBits,
"extractBits" => Mf::ExtractBits,
"insertBits" => Mf::InsertBits,
// data packing
"pack4x8snorm" => Mf::Pack4x8snorm,
"pack4x8unorm" => Mf::Pack4x8unorm,
"pack2x16snorm" => Mf::Pack2x16snorm,
"pack2x16unorm" => Mf::Pack2x16unorm,
"pack2x16float" => Mf::Pack2x16float,
// data unpacking
"unpack4x8snorm" => Mf::Unpack4x8snorm,
"unpack4x8unorm" => Mf::Unpack4x8unorm,
"unpack2x16snorm" => Mf::Unpack2x16snorm,
"unpack2x16unorm" => Mf::Unpack2x16unorm,
"unpack2x16float" => Mf::Unpack2x16float,
_ => return None,
})
}

27
third_party/rust/naga/src/front/wgsl/lexer.rs поставляемый
Просмотреть файл

@ -336,7 +336,7 @@ fn consume_token(mut input: &str, generic: bool) -> (Token<'_>, &str) {
}
}
'0'..='9' => consume_number(input),
'a'..='z' | 'A'..='Z' => {
'a'..='z' | 'A'..='Z' | '_' => {
let (word, rest) = consume_any(input, |c| c.is_ascii_alphanumeric() || c == '_');
(Token::Word(word), rest)
}
@ -558,24 +558,24 @@ impl<'a> Lexer<'a> {
Ok(pair)
}
pub(super) fn next_storage_access(&mut self) -> Result<crate::StorageAccess, Error<'a>> {
let (ident, span) = self.next_ident_with_span()?;
match ident {
"read" => Ok(crate::StorageAccess::LOAD),
"write" => Ok(crate::StorageAccess::STORE),
"read_write" => Ok(crate::StorageAccess::LOAD | crate::StorageAccess::STORE),
_ => Err(Error::UnknownAccess(span)),
}
}
// TODO relocate storage texture specifics
pub(super) fn next_format_generic(
&mut self,
) -> Result<(crate::StorageFormat, crate::StorageAccess), Error<'a>> {
self.expect(Token::Paren('<'))?;
let (ident, ident_span) = self.next_ident_with_span()?;
let format = conv::map_storage_format(ident, ident_span)?;
self.expect(Token::Separator(','))?;
let access = self.next_storage_access()?;
let access = if self.skip(Token::Separator(',')) {
let (raw, span) = self.next_ident_with_span()?;
match raw {
"read" => crate::StorageAccess::LOAD,
"write" => crate::StorageAccess::STORE,
"read_write" => crate::StorageAccess::all(),
_ => return Err(Error::UnknownAccess(span)),
}
} else {
crate::StorageAccess::LOAD
};
self.expect(Token::Paren('>'))?;
Ok((format, access))
}
@ -655,7 +655,6 @@ fn test_tokens() {
);
sub_test("No¾", &[Token::Word("No"), Token::Unknown('¾')]);
sub_test("No好", &[Token::Word("No"), Token::Unknown('好')]);
sub_test("_No", &[Token::Unknown('_'), Token::Word("No")]);
sub_test("\"\u{2}ПЀ\u{0}\"", &[Token::String("\u{2}ПЀ\u{0}")]); // https://github.com/gfx-rs/naga/issues/90
}

982
third_party/rust/naga/src/front/wgsl/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -69,6 +69,36 @@ pub fn get_f32_literal(word: &str, span: Span) -> Result<f32, Error<'_>> {
parsed_val.map_err(|e| Error::BadFloat(span, e))
}
pub(super) fn parse_sint_literal<'a>(
lexer: &mut Lexer<'a>,
width: Bytes,
) -> Result<i32, Error<'a>> {
let token_span = lexer.next();
if width != 4 {
// Only 32-bit literals supported by the spec and naga for now!
return Err(Error::BadScalarWidth(token_span.1, width));
}
match token_span {
(
Token::Number {
value,
ty: NumberType::Sint,
width: token_width,
},
span,
) if token_width.unwrap_or(4) == width => get_i32_literal(value, span),
other => Err(Error::Unexpected(
other,
ExpectedToken::Number {
ty: Some(NumberType::Sint),
width: Some(width),
},
)),
}
}
pub(super) fn _parse_uint_literal<'a>(
lexer: &mut Lexer<'a>,
width: Bytes,

Просмотреть файл

@ -92,7 +92,7 @@ fn parse_types() {
parse_str("var t: texture_cube_array<i32>;").unwrap();
parse_str("var t: texture_multisampled_2d<u32>;").unwrap();
parse_str("var t: texture_storage_1d<rgba8uint,write>;").unwrap();
parse_str("var t: texture_storage_3d<r32float,read>;").unwrap();
parse_str("var t: texture_storage_3d<r32float>;").unwrap();
}
#[test]
@ -305,7 +305,7 @@ fn parse_texture_load() {
.unwrap();
parse_str(
"
var t: texture_storage_1d_array<r32float,read>;
var t: texture_storage_1d_array<r32float>;
fn foo() {
let r: vec4<f32> = textureLoad(t, 10, 2);
}

149
third_party/rust/naga/src/lib.rs поставляемый
Просмотреть файл

@ -44,15 +44,6 @@ and compound expressions refer to their sub-expressions via `Handle<Expression>`
values. (When examining the serialized form of a `Module`, note that the first
element of an `Arena` has an index of 1, not 0.)
A [`UniqueArena`] is just like an `Arena`, except that it stores only a single
instance of each value. The value type must implement `Eq` and `Hash`. Like an
`Arena`, inserting a value into a `UniqueArena` returns a `Handle` which can be
used to efficiently access the value, without a hash lookup. Inserting a value
multiple times returns the same `Handle`.
If the `span` feature is enabled, both `Arena` and `UniqueArena` can associate a
source code span with each element.
## Function Calls
Naga's representation of function calls is unusual. Most languages treat
@ -96,13 +87,9 @@ Naga's rules for when `Expression`s are evaluated are as follows:
a pointer. Such global variables hold opaque types like shaders or
images, and cannot be assigned to.
- A [`CallResult`] expression that is the `result` of a [`Statement::Call`],
representing the call's return value, is evaluated when the `Call` statement
is executed.
- Similarly, an [`AtomicResult`] expression that is the `result` of an
[`Atomic`] statement, representing the result of the atomic operation, is
evaluated when the `Atomic` statement is executed.
- A [`Call`](Expression::CallResult) expression that is the `result` of a
[`Statement::Call`], representing the call's return value, is evaluated when
the `Call` statement is executed.
- All other expressions are evaluated when the (unique) [`Statement::Emit`]
statement that covers them is executed. The [`Expression::needs_pre_emit`]
@ -155,17 +142,15 @@ An expression's scope is defined as follows:
subsequent expressions in that `Emit`, the subsequent statements in the `Block`
to which that `Emit` belongs (if any) and their sub-statements (if any).
- The `result` expression of a [`Call`] or [`Atomic`] statement has a scope
covering the subsequent statements in the `Block` in which the statement
occurs (if any) and their sub-statements (if any).
- If a [`Call`] statement has a `result` expression, then that expression's
scope covers the subsequent statements in the `Block` to which that `Call`
belongs (if any) and their sub-statements (if any).
For example, this implies that an expression evaluated by some statement in a
nested `Block` is not available in the `Block`'s parents. Such a value would
need to be stored in a local variable to be carried upwards in the statement
tree.
[`AtomicResult`]: Expression::AtomicResult
[`CallResult`]: Expression::CallResult
[`Constant`]: Expression::Constant
[`Derivative`]: Expression::Derivative
[`FunctionArgument`]: Expression::FunctionArgument
@ -175,7 +160,6 @@ tree.
[`Load`]: Expression::Load
[`LocalVariable`]: Expression::LocalVariable
[`Atomic`]: Statement::Atomic
[`Call`]: Statement::Call
[`Emit`]: Statement::Emit
[`Store`]: Statement::Store
@ -193,7 +177,6 @@ tree.
clippy::unneeded_field_pattern,
clippy::match_like_matches_macro,
clippy::manual_strip,
clippy::if_same_then_else,
clippy::unknown_clippy_lints,
)]
#![warn(
@ -213,9 +196,14 @@ pub mod proc;
mod span;
pub mod valid;
pub use crate::arena::{Arena, Handle, Range, UniqueArena};
pub use crate::arena::{Arena, Handle, Range};
pub use crate::span::{Span, SpanContext, WithSpan};
use std::{
collections::{HashMap, HashSet},
hash::BuildHasherDefault,
};
pub use crate::span::Span;
#[cfg(feature = "deserialize")]
use serde::Deserialize;
#[cfg(feature = "serialize")]
@ -225,9 +213,9 @@ use serde::Serialize;
pub const BOOL_WIDTH: Bytes = 1;
/// Hash map that is faster but not resilient to DoS attacks.
pub type FastHashMap<K, T> = rustc_hash::FxHashMap<K, T>;
pub type FastHashMap<K, T> = HashMap<K, T, BuildHasherDefault<fxhash::FxHasher>>;
/// Hash set that is faster but not resilient to DoS attacks.
pub type FastHashSet<K> = rustc_hash::FxHashSet<K>;
pub type FastHashSet<K> = HashSet<K, BuildHasherDefault<fxhash::FxHasher>>;
/// Map of expressions that have associated variable names
pub(crate) type NamedExpressions = FastHashMap<Handle<Expression>, String>;
@ -312,7 +300,6 @@ pub enum StorageClass {
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub enum BuiltIn {
Position,
ViewIndex,
// vertex
BaseInstance,
BaseVertex,
@ -417,7 +404,7 @@ pub enum Sampling {
/// Member of a user-defined structure.
// Clone is used only for error reporting and is not intended for end users
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub struct StructMember {
@ -536,7 +523,7 @@ pub enum ImageClass {
}
/// A data type declared in the module.
#[derive(Debug, Eq, Hash, PartialEq)]
#[derive(Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub struct Type {
@ -547,7 +534,7 @@ pub struct Type {
}
/// Enum with additional information, depending on the kind of type.
#[derive(Debug, Eq, Hash, PartialEq)]
#[derive(Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub enum TypeInner {
@ -569,10 +556,6 @@ pub enum TypeInner {
Atomic { kind: ScalarKind, width: Bytes },
/// Pointer to another type.
///
/// Pointers to scalars and vectors should be treated as equivalent to
/// [`ValuePointer`] types. Use the [`TypeInner::equivalent`] method to
/// compare types in a way that treats pointers correctly.
///
/// ## Pointers to non-`SIZED` types
///
/// The `base` type of a pointer may be a non-[`SIZED`] type like a
@ -590,26 +573,13 @@ pub enum TypeInner {
/// [`DATA`]: valid::TypeFlags::DATA
/// [`Array`]: TypeInner::Array
/// [`Struct`]: TypeInner::Struct
/// [`ValuePointer`]: TypeInner::ValuePointer
/// [`GlobalVariable`]: Expression::GlobalVariable
/// [`AccessIndex`]: Expression::AccessIndex
Pointer {
base: Handle<Type>,
class: StorageClass,
},
/// Pointer to a scalar or vector.
///
/// A `ValuePointer` type is equivalent to a `Pointer` whose `base` is a
/// `Scalar` or `Vector` type. This is for use in [`TypeResolution::Value`]
/// variants; see the documentation for [`TypeResolution`] for details.
///
/// Use the [`TypeInner::equivalent`] method to compare types that could be
/// pointers, to ensure that `Pointer` and `ValuePointer` types are
/// recognized as equivalent.
///
/// [`TypeResolution`]: proc::TypeResolution
/// [`TypeResolution::Value`]: proc::TypeResolution::Value
/// Pointer to a value.
ValuePointer {
size: Option<VectorSize>,
kind: ScalarKind,
@ -720,29 +690,13 @@ pub enum ConstantInner {
}
/// Describes how an input/output variable is to be bound.
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
#[derive(Clone, Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub enum Binding {
/// Built-in shader variable.
BuiltIn(BuiltIn),
/// Indexed location.
///
/// Values passed from the [`Vertex`] stage to the [`Fragment`] stage must
/// have their `interpolation` defaulted (i.e. not `None`) by the front end
/// as appropriate for that language.
///
/// For other stages, we permit interpolations even though they're ignored.
/// When a front end is parsing a struct type, it usually doesn't know what
/// stages will be using it for IO, so it's easiest if it can apply the
/// defaults to anything with a `Location` binding, just in case.
///
/// For anything other than floating-point scalars and vectors, the
/// interpolation must be `Flat`.
///
/// [`Vertex`]: crate::ShaderStage::Vertex
/// [`Fragment`]: crate::ShaderStage::Fragment
Location {
location: u32,
interpolation: Option<Interpolation>,
@ -809,7 +763,6 @@ pub enum BinaryOperator {
Subtract,
Multiply,
Divide,
/// Equivalent of the WGSL's `%` operator or SPIR-V's `OpFRem`
Modulo,
Equal,
NotEqual,
@ -931,20 +884,6 @@ pub enum MathFunction {
// bits
CountOneBits,
ReverseBits,
ExtractBits,
InsertBits,
// data packing
Pack4x8snorm,
Pack4x8unorm,
Pack2x16snorm,
Pack2x16unorm,
Pack2x16float,
// data unpacking
Unpack4x8snorm,
Unpack4x8unorm,
Unpack2x16snorm,
Unpack2x16unorm,
Unpack2x16float,
}
/// Sampling modifier to control the level of detail.
@ -1030,12 +969,17 @@ pub enum Expression {
/// Indexing a [`Vector`] or [`Array`] produces a value of its element type.
/// Indexing a [`Matrix`] produces a [`Vector`].
///
/// Indexing a [`Pointer`] to any of the above produces a pointer to the
/// element/component type, in the same [`class`]. In the case of [`Array`],
/// the result is an actual [`Pointer`], but for vectors and matrices, there
/// may not be any type in the arena representing the component's type, so
/// those produce [`ValuePointer`] types equivalent to the appropriate
/// [`Pointer`].
/// Indexing a [`Pointer`] to an [`Array`] produces a [`Pointer`] to its
/// `base` type, taking on the `Pointer`'s storage class.
///
/// Indexing a [`Pointer`] to a [`Vector`] produces a [`ValuePointer`] whose
/// size is `None`, taking on the [`Vector`]'s scalar kind and width and the
/// [`Pointer`]'s storage class.
///
/// Indexing a [`Pointer`] to a [`Matrix`] produces a [`ValuePointer`] for a
/// column of the matrix: its size is the matrix's height, its `kind` is
/// [`Float`], and it inherits the [`Matrix`]'s width and the [`Pointer`]'s
/// storage class.
///
/// ## Dynamic indexing restrictions
///
@ -1060,7 +1004,6 @@ pub enum Expression {
/// [`Matrix`]: TypeInner::Matrix
/// [`Array`]: TypeInner::Array
/// [`Pointer`]: TypeInner::Pointer
/// [`class`]: TypeInner::Pointer::class
/// [`ValuePointer`]: TypeInner::ValuePointer
/// [`Float`]: ScalarKind::Float
Access {
@ -1266,7 +1209,6 @@ pub enum Expression {
arg: Handle<Expression>,
arg1: Option<Handle<Expression>>,
arg2: Option<Handle<Expression>>,
arg3: Option<Handle<Expression>>,
},
/// Cast a simple type to another kind.
As {
@ -1296,16 +1238,6 @@ pub enum Expression {
pub use block::Block;
/// The value of the switch case
// Clone is used only for error reporting and is not intended for end users
#[derive(Clone, Debug)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub enum SwitchValue {
Integer(i32),
Default,
}
/// A case for a switch statement.
// Clone is used only for error reporting and is not intended for end users
#[derive(Clone, Debug)]
@ -1313,8 +1245,8 @@ pub enum SwitchValue {
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub struct SwitchCase {
/// Value, upon which the case is considered true.
pub value: SwitchValue,
/// Body of the case.
pub value: i32,
/// Body of the cae.
pub body: Block,
/// If true, the control flow continues to the next case in the list,
/// or default.
@ -1346,6 +1278,7 @@ pub enum Statement {
Switch {
selector: Handle<Expression>, //int
cases: Vec<SwitchCase>,
default: Block,
},
/// Executes a block repeatedly.
@ -1449,9 +1382,7 @@ pub enum Statement {
fun: AtomicFunction,
/// Value to use in the function.
value: Handle<Expression>,
/// [`AtomicResult`] expression representing this function's result.
///
/// [`AtomicResult`]: crate::Expression::AtomicResult
/// Emitted expression as a result.
result: Handle<Expression>,
},
/// Calls a function.
@ -1505,9 +1436,6 @@ pub struct Function {
/// Local variables defined and used in the function.
pub local_variables: Arena<LocalVariable>,
/// Expressions used inside this function.
///
/// An `Expression` must occur before all other `Expression`s that use its
/// value.
pub expressions: Arena<Expression>,
/// Map of expressions that have associated variable names
pub named_expressions: NamedExpressions,
@ -1560,8 +1488,6 @@ pub struct Function {
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub struct EntryPoint {
/// Name of this entry point, visible externally.
///
/// Entry point names for a given `stage` must be distinct within a module.
pub name: String,
/// Shader stage.
pub stage: ShaderStage,
@ -1589,15 +1515,12 @@ pub struct EntryPoint {
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
pub struct Module {
/// Storage for the types defined in this module.
pub types: UniqueArena<Type>,
pub types: Arena<Type>,
/// Storage for the constants defined in this module.
pub constants: Arena<Constant>,
/// Storage for the global variables defined in this module.
pub global_variables: Arena<GlobalVariable>,
/// Storage for the functions defined in this module.
///
/// Each function must appear in this arena strictly before all its callers.
/// Recursion is not supported.
pub functions: Arena<Function>,
/// Entry points.
pub entry_points: Vec<EntryPoint>,

146
third_party/rust/naga/src/proc/index.rs поставляемый
Просмотреть файл

@ -1,142 +1,6 @@
//! Definitions for index bounds checking.
use super::ProcError;
use crate::valid;
use crate::{Handle, UniqueArena};
/// How should code generated by Naga do bounds checks?
///
/// When a vector, matrix, or array index is out of bounds—either negative, or
/// greater than or equal to the number of elements in the type—WGSL requires
/// that some other index of the implementation's choice that is in bounds is
/// used instead. (There are no types with zero elements.)
///
/// Similarly, when out-of-bounds coordinates, array indices, or sample indices
/// are presented to the WGSL `textureLoad` and `textureStore` operations, the
/// operation is redirected to do something safe.
///
/// Different users of Naga will prefer different defaults:
///
/// - When used as part of a WebGPU implementation, the WGSL specification
/// requires the `Restrict` behavior for array, vector, and matrix accesses,
/// and either the `Restrict` or `ReadZeroSkipWrite` behaviors for texture
/// accesses.
///
/// - When used by the `wgpu` crate for native development, `wgpu` selects
/// `ReadZeroSkipWrite` as its default.
///
/// - Naga's own default is `Unchecked`, so that shader translations
/// are as faithful to the original as possible.
///
/// Sometimes the underlying hardware and drivers can perform bounds checks
/// themselves, in a way that performs better than the checks Naga would inject.
/// If you're using native checks like this, then having Naga inject its own
/// checks as well would be redundant, and the `Unchecked` policy is
/// appropriate.
#[derive(Clone, Copy, Debug)]
pub enum BoundsCheckPolicy {
/// Replace out-of-bounds indexes with some arbitrary in-bounds index.
///
/// (This does not necessarily mean clamping. For example, interpreting the
/// index as unsigned and taking the minimum with the largest valid index
/// would also be a valid implementation. That would map negative indices to
/// the last element, not the first.)
Restrict,
/// Out-of-bounds reads return zero, and writes have no effect.
ReadZeroSkipWrite,
/// Naga adds no checks to indexing operations. Generate the fastest code
/// possible. This is the default for Naga, as a translator, but consumers
/// should consider defaulting to a safer behavior.
Unchecked,
}
#[derive(Clone, Copy, Debug, Default)]
/// Policies for injecting bounds checks during code generation.
pub struct BoundsCheckPolicies {
/// How should the generated code handle array, vector, or matrix indices
/// that are out of range?
pub index: BoundsCheckPolicy,
/// How should the generated code handle array, vector, or matrix indices
/// that are out of range, when those values live in a [`GlobalVariable`] in
/// the [`Storage`] or [`Uniform`] storage classes?
///
/// Some graphics hardware provides "robust buffer access", a feature that
/// ensures that using a pointer cannot access memory outside the 'buffer'
/// that it was derived from. In Naga terms, this means that the hardware
/// ensures that pointers computed by applying [`Access`] and
/// [`AccessIndex`] expressions to a [`GlobalVariable`] whose [`class`] is
/// [`Storage`] or [`Uniform`] will never read or write memory outside that
/// global variable.
///
/// When hardware offers such a feature, it is probably undesirable to have
/// Naga inject bounds checking code for such accesses, since the hardware
/// can probably provide the same protection more efficiently. However,
/// bounds checks are still needed on accesses to indexable values that do
/// not live in buffers, like local variables.
///
/// So, this option provides a separate policy that applies only to accesses
/// to storage and uniform globals. When depending on hardware bounds
/// checking, this policy can be `Unchecked` to avoid unnecessary overhead.
///
/// When special hardware support is not available, this should probably be
/// the same as `index_bounds_check_policy`.
///
/// [`GlobalVariable`]: crate::GlobalVariable
/// [`class`]: crate::GlobalVariable::class
/// [`Restrict`]: crate::back::BoundsCheckPolicy::Restrict
/// [`ReadZeroSkipWrite`]: crate::back::BoundsCheckPolicy::ReadZeroSkipWrite
/// [`Access`]: crate::Expression::Access
/// [`AccessIndex`]: crate::Expression::AccessIndex
/// [`Storage`]: crate::StorageClass::Storage
/// [`Uniform`]: crate::StorageClass::Uniform
pub buffer: BoundsCheckPolicy,
/// How should the generated code handle image texel references that are out
/// of range?
///
/// This controls the behavior of [`ImageLoad`] expressions and
/// [`ImageStore`] statements when a coordinate, texture array index, level
/// of detail, or multisampled sample number is out of range.
///
/// [`ImageLoad`]: crate::Expression::ImageLoad
/// [`ImageStore`]: crate::Statement::ImageStore
pub image: BoundsCheckPolicy,
}
/// The default `BoundsCheckPolicy` is `Unchecked`.
impl Default for BoundsCheckPolicy {
fn default() -> Self {
BoundsCheckPolicy::Unchecked
}
}
impl BoundsCheckPolicies {
/// Determine which policy applies to a load or store of `pointer`.
///
/// See the documentation for [`BoundsCheckPolicy`] for details about
/// when each policy applies.
pub fn choose_policy(
&self,
pointer: Handle<crate::Expression>,
types: &UniqueArena<crate::Type>,
info: &valid::FunctionInfo,
) -> BoundsCheckPolicy {
let is_buffer = match info[pointer].ty.inner_with(types).pointer_class() {
Some(crate::StorageClass::Storage { access: _ })
| Some(crate::StorageClass::Uniform) => true,
_ => false,
};
if is_buffer {
self.buffer
} else {
self.index
}
}
}
impl crate::TypeInner {
/// Return the length of a subscriptable type.
@ -185,6 +49,10 @@ pub enum IndexableLength {
/// Values of this type always have the given number of elements.
Known(u32),
/// The value of the given specializable constant is the number of elements.
/// (Non-specializable constants are reported as `Known`.)
Specializable(crate::Handle<crate::Constant>),
/// The number of elements is determined at runtime.
Dynamic,
}
@ -197,11 +65,7 @@ impl crate::ArraySize {
K {
specialization: Some(_),
..
} => {
// Specializable constants are not supported as array lengths.
// See valid::TypeError::UnsupportedSpecializedArrayLength.
return Err(ProcError::InvalidArraySizeConstant(k));
}
} => IndexableLength::Specializable(k),
ref unspecialized => {
let length = unspecialized
.to_array_length()

149
third_party/rust/naga/src/proc/interpolator.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,149 @@
pub use crate::{Arena, Handle};
impl crate::Module {
/// Apply the usual default interpolation for vertex shader outputs and fragment shader inputs.
///
/// For every [`Binding`] that is a vertex shader output or a fragment shader
/// input, and that has an `interpolation` or `sampling` of `None`, assign a
/// default interpolation and sampling as follows:
///
/// - If the `Binding`'s type contains only 32-bit floating-point values or
/// vectors of such, default its interpolation to `Perspective` and its
/// sampling to `Center`.
///
/// - Otherwise, mark its interpolation as `Flat`.
///
/// When struct appear in input or output types, apply these rules to their
/// leaves, since those are the things that actually get assigned locations.
///
/// This function is a utility front ends may use to satisfy the Naga IR's
/// requirement that all I/O `Binding`s from the vertex shader to the
/// fragment shader must have non-`None` `interpolation` values. This
/// requirement is meant to ensure that input languages' policies have been
/// applied appropriately.
///
/// All the shader languages Naga supports have similar rules:
/// perspective-correct, center-sampled interpolation is the default for any
/// binding that can vary, and everything else either defaults to flat, or
/// requires an explicit flat qualifier/attribute/what-have-you.
///
/// [`Binding`]: crate::Binding
pub fn apply_common_default_interpolation(&mut self) {
use crate::{Binding, ScalarKind, Type, TypeInner};
/// Choose a default interpolation for a function argument or result.
///
/// `binding` refers to the `Binding` whose type is `ty`. If `ty` is a struct, then it's the
/// bindings of the struct's members that we care about, and the binding of the struct
/// itself is meaningless, so `binding` should be `None`.
fn default_binding_or_struct(
binding: &mut Option<Binding>,
ty: Handle<Type>,
types: &mut Arena<Type>,
) {
let inner = &mut types.get_mut(ty).inner;
if let TypeInner::Struct {
members: ref mut m, ..
} = *inner
{
// A struct. It's the individual members we care about, so recurse.
// To choose the right interpolations for `members`, we must consult other
// elements of `types`. But both `members` and the types it refers to are stored
// in `types`, and Rust won't let us mutate one element of the `Arena`'s `Vec`
// while reading others.
//
// So, temporarily swap the member list out its type, assign appropriate
// interpolations to its members, and then swap the list back in.
use std::mem;
let mut members = mem::take(m);
for member in &mut members {
default_binding_or_struct(&mut member.binding, member.ty, types);
}
// Swap the member list back in. It's essential that we call `types.get_mut`
// afresh here, rather than just using `m`: it's only because `m` was dead that
// we were able to pass `types` to the recursive call.
match types.get_mut(ty).inner {
TypeInner::Struct {
members: ref mut m, ..
} => mem::replace(m, members),
_ => unreachable!("ty must be a struct"),
};
return;
}
// For all other types, a binding is required. Missing bindings will
// be caught during validation, but this processor is meant for use
// by front ends before validation, so just return for now.
let binding = match binding.as_mut() {
None => return,
Some(binding) => binding,
};
match *inner {
// Some interpolatable type.
//
// GLSL has 64-bit floats, but it won't interpolate them. WGSL and MSL only have
// 32-bit floats. SPIR-V has 16- and 64-bit float capabilities, but Vulkan is vague
// about what can and cannot be interpolated.
TypeInner::Scalar {
kind: ScalarKind::Float,
width: 4,
}
| TypeInner::Vector {
kind: ScalarKind::Float,
width: 4,
..
} => {
if let Binding::Location {
ref mut interpolation,
ref mut sampling,
..
} = *binding
{
if interpolation.is_none() {
*interpolation = Some(crate::Interpolation::Perspective);
}
if sampling.is_none() && *interpolation != Some(crate::Interpolation::Flat)
{
*sampling = Some(crate::Sampling::Center);
}
}
}
// Some type that can't be interpolated.
_ => {
if let Binding::Location {
ref mut interpolation,
ref mut sampling,
..
} = *binding
{
*interpolation = Some(crate::Interpolation::Flat);
*sampling = None;
}
}
}
}
for ep in &mut self.entry_points {
let function = &mut ep.function;
match ep.stage {
crate::ShaderStage::Fragment => {
for arg in &mut function.arguments {
default_binding_or_struct(&mut arg.binding, arg.ty, &mut self.types);
}
}
crate::ShaderStage::Vertex => {
if let Some(result) = function.result.as_mut() {
default_binding_or_struct(&mut result.binding, result.ty, &mut self.types);
}
}
_ => (),
}
}
}
}

4
third_party/rust/naga/src/proc/layouter.rs поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
use crate::arena::{Arena, Handle, UniqueArena};
use crate::arena::{Arena, Handle};
use std::{num::NonZeroU32, ops};
pub type Alignment = NonZeroU32;
@ -64,7 +64,7 @@ impl Layouter {
pub fn update(
&mut self,
types: &UniqueArena<crate::Type>,
types: &Arena<crate::Type>,
constants: &Arena<crate::Constant>,
) -> Result<(), InvalidBaseType> {
use crate::TypeInner as Ti;

85
third_party/rust/naga/src/proc/mod.rs поставляемый
Просмотреть файл

@ -1,14 +1,13 @@
//! Module processing functionality.
mod index;
mod interpolator;
mod layouter;
mod namer;
mod terminator;
mod typifier;
use std::cmp::PartialEq;
pub use index::{BoundsCheckPolicies, BoundsCheckPolicy, IndexableLength};
pub use index::IndexableLength;
pub use layouter::{Alignment, InvalidBaseType, Layouter, TypeLayout};
pub use namer::{EntryPointIndex, NameKey, Namer};
pub use terminator::ensure_block_returns;
@ -131,72 +130,6 @@ impl super::TypeInner {
Self::Image { .. } | Self::Sampler { .. } => 0,
}
}
/// Return the canoncal form of `self`, or `None` if it's already in
/// canonical form.
///
/// Certain types have multiple representations in `TypeInner`. This
/// function converts all forms of equivalent types to a single
/// representative of their class, so that simply applying `Eq` to the
/// result indicates whether the types are equivalent, as far as Naga IR is
/// concerned.
pub fn canonical_form(
&self,
types: &crate::UniqueArena<crate::Type>,
) -> Option<crate::TypeInner> {
use crate::TypeInner as Ti;
match *self {
Ti::Pointer { base, class } => match types[base].inner {
Ti::Scalar { kind, width } => Some(Ti::ValuePointer {
size: None,
kind,
width,
class,
}),
Ti::Vector { size, kind, width } => Some(Ti::ValuePointer {
size: Some(size),
kind,
width,
class,
}),
_ => None,
},
_ => None,
}
}
/// Compare `self` and `rhs` as types.
///
/// This is mostly the same as `<TypeInner as Eq>::eq`, but it treats
/// `ValuePointer` and `Pointer` types as equivalent.
///
/// When you know that one side of the comparison is never a pointer, it's
/// fine to not bother with canonicalization, and just compare `TypeInner`
/// values with `==`.
pub fn equivalent(
&self,
rhs: &crate::TypeInner,
types: &crate::UniqueArena<crate::Type>,
) -> bool {
let left = self.canonical_form(types);
let right = rhs.canonical_form(types);
left.as_ref().unwrap_or(self) == right.as_ref().unwrap_or(rhs)
}
}
impl super::StorageClass {
pub fn access(self) -> crate::StorageAccess {
use crate::StorageAccess as Sa;
match self {
crate::StorageClass::Function
| crate::StorageClass::Private
| crate::StorageClass::WorkGroup => Sa::LOAD | Sa::STORE,
crate::StorageClass::Uniform => Sa::LOAD,
crate::StorageClass::Storage { access } => access,
crate::StorageClass::Handle => Sa::LOAD,
crate::StorageClass::PushConstant => Sa::LOAD,
}
}
}
impl super::MathFunction {
@ -260,20 +193,6 @@ impl super::MathFunction {
// bits
Self::CountOneBits => 1,
Self::ReverseBits => 1,
Self::ExtractBits => 3,
Self::InsertBits => 4,
// data packing
Self::Pack4x8snorm => 1,
Self::Pack4x8unorm => 1,
Self::Pack2x16snorm => 1,
Self::Pack2x16unorm => 1,
Self::Pack2x16float => 1,
// data unpacking
Self::Unpack4x8snorm => 1,
Self::Unpack4x8unorm => 1,
Self::Unpack2x16snorm => 1,
Self::Unpack2x16unorm => 1,
Self::Unpack2x16float => 1,
}
}
}

155
third_party/rust/naga/src/proc/namer.rs поставляемый
Просмотреть файл

@ -1,8 +1,7 @@
use crate::{arena::Handle, FastHashMap, FastHashSet};
use std::borrow::Cow;
use std::collections::hash_map::Entry;
pub type EntryPointIndex = u16;
const SEPARATOR: char = '_';
#[derive(Debug, Eq, Hash, PartialEq)]
pub enum NameKey {
@ -22,115 +21,93 @@ pub enum NameKey {
/// that may need identifiers in a textual backend.
#[derive(Default)]
pub struct Namer {
/// The last numeric suffix used for each base name. Zero means "no suffix".
unique: FastHashMap<String, u32>,
unique: FastHashMap<(String, u32), u32>,
keywords: FastHashSet<String>,
/// Currently active namespace.
namespace_index: u32,
reserved_prefixes: Vec<String>,
}
impl Namer {
/// Return a form of `string` suitable for use as the base of an identifier.
///
/// - Drop leading digits.
/// - Retain only alphanumeric and `_` characters.
/// - Avoid prefixes in [`Namer::reserved_prefixes`].
///
/// The return value is a valid identifier prefix in all of Naga's output languages,
/// and it never ends with a `SEPARATOR` character.
/// It is used as a key into the unique table.
fn sanitize<'s>(&self, string: &'s str) -> Cow<'s, str> {
let string = string
.trim_start_matches(|c: char| c.is_numeric())
.trim_end_matches(SEPARATOR);
let base = if !string.is_empty()
&& string
.chars()
.all(|c: char| c.is_ascii_alphanumeric() || c == '_')
{
Cow::Borrowed(string)
} else {
let mut filtered = string
.chars()
.filter(|&c| c.is_ascii_alphanumeric() || c == '_')
.collect::<String>();
let stripped_len = filtered.trim_end_matches(SEPARATOR).len();
filtered.truncate(stripped_len);
if filtered.is_empty() {
filtered.push_str("unnamed");
}
Cow::Owned(filtered)
fn sanitize(&self, string: &str) -> String {
let mut base = string
.chars()
.skip_while(|c| c.is_numeric())
.filter(|&c| c.is_ascii_alphanumeric() || c == '_')
.collect::<String>();
// close the name by '_' if the re is a number, so that
// we can have our own number!
match base.chars().next_back() {
Some(c) if !c.is_numeric() => {}
_ => base.push('_'),
};
for prefix in &self.reserved_prefixes {
if base.starts_with(prefix) {
return format!("gen_{}", base).into();
return format!("gen_{}", base);
}
}
base
}
/// Return a new identifier based on `label_raw`.
///
/// The result:
/// - is a valid identifier even if `label_raw` is not
/// - conflicts with no keywords listed in `Namer::keywords`, and
/// - is different from any identifier previously constructed by this
/// `Namer`.
///
/// Guarantee uniqueness by applying a numeric suffix when necessary. If `label_raw`
/// itself ends with digits, separate them from the suffix with an underscore.
pub fn call(&mut self, label_raw: &str) -> String {
use std::fmt::Write as _; // for write!-ing to Strings
let base = self.sanitize(label_raw);
debug_assert!(!base.is_empty() && !base.ends_with(SEPARATOR));
// This would seem to be a natural place to use `HashMap::entry`. However, `entry`
// requires an owned key, and we'd like to avoid heap-allocating strings we're
// just going to throw away. The approach below double-hashes only when we create
// a new entry, in which case the heap allocation of the owned key was more
// expensive anyway.
match self.unique.get_mut(base.as_ref()) {
Some(count) => {
*count += 1;
// Add the suffix. This may fit in base's existing allocation.
let mut suffixed = base.into_owned();
write!(&mut suffixed, "{}{}", SEPARATOR, *count).unwrap();
suffixed
/// Helper function that return unique name without cache update.
/// This function should be used **after** [`Namer`](crate::proc::Namer) initialization by [`reset`](Self::reset()) function.
pub fn call_unique(&mut self, string: &str) -> String {
let base = self.sanitize(string);
match self.unique.entry((base, self.namespace_index)) {
Entry::Occupied(mut e) => {
*e.get_mut() += 1;
format!("{}{}", e.key().0, e.get())
}
None => {
let mut suffixed = base.to_string();
if base.ends_with(char::is_numeric) || self.keywords.contains(base.as_ref()) {
suffixed.push(SEPARATOR);
Entry::Vacant(e) => {
let name = &e.key().0;
if self.keywords.contains(&e.key().0) {
let name = format!("{}1", name);
e.insert(1);
name
} else {
name.to_string()
}
debug_assert!(!self.keywords.contains(&suffixed));
// `self.unique` wants to own its keys. This allocates only if we haven't
// already done so earlier.
self.unique.insert(base.into_owned(), 0);
suffixed
}
}
}
pub fn call_or(&mut self, label: &Option<String>, fallback: &str) -> String {
pub fn call(&mut self, label_raw: &str) -> String {
let base = self.sanitize(label_raw);
match self.unique.entry((base, self.namespace_index)) {
Entry::Occupied(mut e) => {
*e.get_mut() += 1;
format!("{}{}", e.key().0, e.get())
}
Entry::Vacant(e) => {
let name = &e.key().0;
if self.keywords.contains(&e.key().0) {
let name = format!("{}1", name);
e.insert(1);
name
} else {
let name = name.to_string();
e.insert(0);
name
}
}
}
}
fn call_or(&mut self, label: &Option<String>, fallback: &str) -> String {
self.call(match *label {
Some(ref name) => name,
None => fallback,
})
}
/// Enter a local namespace for things like structs.
///
/// Struct member names only need to be unique amongst themselves, not
/// globally. This function temporarily establishes a fresh, empty naming
/// context for the duration of the call to `body`.
fn namespace(&mut self, capacity: usize, body: impl FnOnce(&mut Self)) {
let fresh = FastHashMap::with_capacity_and_hasher(capacity, Default::default());
let outer = std::mem::replace(&mut self.unique, fresh);
body(self);
self.unique = outer;
fn namespace(&mut self, f: impl FnOnce(&mut Self)) {
self.namespace_index += 1;
f(self);
let current_ns = self.namespace_index;
self.unique.retain(|&(_, ns), _| ns != current_ns);
self.namespace_index -= 1;
}
pub fn reset(
@ -156,7 +133,7 @@ impl Namer {
if let crate::TypeInner::Struct { ref members, .. } = ty.inner {
// struct members have their own namespace, because access is always prefixed
self.namespace(members.len(), |namer| {
self.namespace(|namer| {
for (index, member) in members.iter().enumerate() {
let name = namer.call_or(&member.name, "member");
output.insert(NameKey::StructMember(ty_handle, index as u32), name);
@ -251,11 +228,3 @@ impl Namer {
}
}
}
#[test]
fn test() {
let mut namer = Namer::default();
assert_eq!(namer.call("x"), "x");
assert_eq!(namer.call("x"), "x_1");
assert_eq!(namer.call("x1"), "x1_");
}

Просмотреть файл

@ -21,12 +21,14 @@ pub fn ensure_block_returns(block: &mut crate::Block) {
Some(&mut S::Switch {
selector: _,
ref mut cases,
ref mut default,
}) => {
for case in cases.iter_mut() {
if !case.fall_through {
ensure_block_returns(&mut case.body);
}
}
ensure_block_returns(default);
}
Some(&mut S::Emit(_))
| Some(&mut S::Break)

156
third_party/rust/naga/src/proc/typifier.rs поставляемый
Просмотреть файл

@ -1,98 +1,12 @@
use crate::arena::{Arena, Handle, UniqueArena};
use crate::arena::{Arena, Handle};
use thiserror::Error;
/// The result of computing an expression's type.
///
/// This is the (Rust) type returned by [`ResolveContext::resolve`] to represent
/// the (Naga) type it ascribes to some expression.
///
/// You might expect such a function to simply return a `Handle<Type>`. However,
/// we want type resolution to be a read-only process, and that would limit the
/// possible results to types already present in the expression's associated
/// `UniqueArena<Type>`. Naga IR does have certain expressions whose types are
/// not certain to be present.
///
/// So instead, type resolution returns a `TypeResolution` enum: either a
/// [`Handle`], referencing some type in the arena, or a [`Value`], holding a
/// free-floating [`TypeInner`]. This extends the range to cover anything that
/// can be represented with a `TypeInner` referring to the existing arena.
///
/// What sorts of expressions can have types not available in the arena?
///
/// - An [`Access`] or [`AccessIndex`] expression applied to a [`Vector`] or
/// [`Matrix`] must have a [`Scalar`] or [`Vector`] type. But since `Vector`
/// and `Matrix` represent their element and column types implicitly, not
/// via a handle, there may not be a suitable type in the expression's
/// associated arena. Instead, resolving such an expression returns a
/// `TypeResolution::Value(TypeInner::X { ... })`, where `X` is `Scalar` or
/// `Vector`.
///
/// - Similarly, the type of an [`Access`] or [`AccessIndex`] expression
/// applied to a *pointer to* a vector or matrix must produce a *pointer to*
/// a scalar or vector type. These cannot be represented with a
/// [`TypeInner::Pointer`], since the `Pointer`'s `base` must point into the
/// arena, and as before, we cannot assume that a suitable scalar or vector
/// type is there. So we take things one step further and provide
/// [`TypeInner::ValuePointer`], specifically for the case of pointers to
/// scalars or vectors. This type fits in a `TypeInner` and is exactly
/// equivalent to a `Pointer` to a `Vector` or `Scalar`.
///
/// So, for example, the type of an `Access` expression applied to a value of type:
///
/// ```ignore
/// TypeInner::Matrix { columns, rows, width }
/// ```
///
/// might be:
///
/// ```ignore
/// TypeResolution::Value(TypeInner::Vector {
/// size: rows,
/// kind: ScalarKind::Float,
/// width,
/// })
/// ```
///
/// and the type of an access to a pointer of storage class `class` to such a
/// matrix might be:
///
/// ```ignore
/// TypeResolution::Value(TypeInner::ValuePointer {
/// size: Some(rows),
/// kind: ScalarKind::Float,
/// width,
/// class
/// })
/// ```
///
/// [`Handle`]: TypeResolution::Handle
/// [`Value`]: TypeResolution::Value
///
/// [`Access`]: crate::Expression::Access
/// [`AccessIndex`]: crate::Expression::AccessIndex
///
/// [`TypeInner`]: crate::TypeInner
/// [`Matrix`]: crate::TypeInner::Matrix
/// [`Pointer`]: crate::TypeInner::Pointer
/// [`Scalar`]: crate::TypeInner::Scalar
/// [`ValuePointer`]: crate::TypeInner::ValuePointer
/// [`Vector`]: crate::TypeInner::Vector
///
/// [`TypeInner::Pointer`]: crate::TypeInner::Pointer
/// [`TypeInner::ValuePointer`]: crate::TypeInner::ValuePointer
#[derive(Debug, PartialEq)]
#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub enum TypeResolution {
/// A type stored in the associated arena.
Handle(Handle<crate::Type>),
/// A free-floating [`TypeInner`], representing a type that may not be
/// available in the associated arena. However, the `TypeInner` itself may
/// contain `Handle<Type>` values referring to types from the arena.
///
/// [`TypeInner`]: crate::TypeInner
Value(crate::TypeInner),
}
@ -104,7 +18,7 @@ impl TypeResolution {
}
}
pub fn inner_with<'a>(&'a self, arena: &'a UniqueArena<crate::Type>) -> &'a crate::TypeInner {
pub fn inner_with<'a>(&'a self, arena: &'a Arena<crate::Type>) -> &'a crate::TypeInner {
match *self {
Self::Handle(handle) => &arena[handle].inner,
Self::Value(ref inner) => inner,
@ -197,7 +111,7 @@ pub enum ResolveError {
pub struct ResolveContext<'a> {
pub constants: &'a Arena<crate::Constant>,
pub types: &'a UniqueArena<crate::Type>,
pub types: &'a Arena<crate::Type>,
pub global_vars: &'a Arena<crate::GlobalVariable>,
pub local_vars: &'a Arena<crate::LocalVariable>,
pub functions: &'a Arena<crate::Function>,
@ -205,21 +119,6 @@ pub struct ResolveContext<'a> {
}
impl<'a> ResolveContext<'a> {
/// Determine the type of `expr`.
///
/// The `past` argument must be a closure that can resolve the types of any
/// expressions that `expr` refers to. These can be gathered by caching the
/// results of prior calls to `resolve`, perhaps as done by the
/// [`front::Typifier`] utility type.
///
/// Type resolution is a read-only process: this method takes `self` by
/// shared reference. However, this means that we cannot add anything to
/// `self.types` that we might need to describe `expr`. To work around this,
/// this method returns a [`TypeResolution`], rather than simply returning a
/// `Handle<Type>`; see the documentation for [`TypeResolution`] for
/// details.
///
/// [`front::Typifier`]: crate::front::Typifier
pub fn resolve(
&self,
expr: &crate::Expression,
@ -626,40 +525,15 @@ impl<'a> ResolveContext<'a> {
}
crate::Expression::Select { accept, .. } => past(accept).clone(),
crate::Expression::Derivative { axis: _, expr } => past(expr).clone(),
crate::Expression::Relational { fun, argument } => match fun {
crate::RelationalFunction::All | crate::RelationalFunction::Any => {
TypeResolution::Value(Ti::Scalar {
kind: crate::ScalarKind::Bool,
width: crate::BOOL_WIDTH,
})
}
crate::RelationalFunction::IsNan
| crate::RelationalFunction::IsInf
| crate::RelationalFunction::IsFinite
| crate::RelationalFunction::IsNormal => match *past(argument).inner_with(types) {
Ti::Scalar { .. } => TypeResolution::Value(Ti::Scalar {
kind: crate::ScalarKind::Bool,
width: crate::BOOL_WIDTH,
}),
Ti::Vector { size, .. } => TypeResolution::Value(Ti::Vector {
kind: crate::ScalarKind::Bool,
width: crate::BOOL_WIDTH,
size,
}),
ref other => {
return Err(ResolveError::IncompatibleOperands(format!(
"{:?}({:?})",
fun, other
)))
}
},
},
crate::Expression::Relational { .. } => TypeResolution::Value(Ti::Scalar {
kind: crate::ScalarKind::Bool,
width: crate::BOOL_WIDTH,
}),
crate::Expression::Math {
fun,
arg,
arg1,
arg2: _,
arg3: _,
} => {
use crate::MathFunction as Mf;
let res_arg = past(arg);
@ -782,21 +656,7 @@ impl<'a> ResolveContext<'a> {
},
// bits
Mf::CountOneBits |
Mf::ReverseBits |
Mf::ExtractBits |
Mf::InsertBits => res_arg.clone(),
// data packing
Mf::Pack4x8snorm |
Mf::Pack4x8unorm |
Mf::Pack2x16snorm |
Mf::Pack2x16unorm |
Mf::Pack2x16float => TypeResolution::Value(Ti::Scalar { kind: crate::ScalarKind::Uint, width: 4 }),
// data unpacking
Mf::Unpack4x8snorm |
Mf::Unpack4x8unorm => TypeResolution::Value(Ti::Vector { size: crate::VectorSize::Quad, kind: crate::ScalarKind::Float, width: 4 }),
Mf::Unpack2x16snorm |
Mf::Unpack2x16unorm |
Mf::Unpack2x16float => TypeResolution::Value(Ti::Vector { size: crate::VectorSize::Bi, kind: crate::ScalarKind::Float, width: 4 }),
Mf::ReverseBits => res_arg.clone(),
}
}
crate::Expression::As {

277
third_party/rust/naga/src/span.rs поставляемый
Просмотреть файл

@ -1,269 +1,38 @@
use crate::{Arena, Handle, UniqueArena};
use std::{error::Error, fmt, ops::Range};
use std::ops::Range;
/// A source code span, used for error reporting.
#[derive(Clone, Copy, Debug, PartialEq, Default)]
pub struct Span {
start: u32,
end: u32,
// A source code span, used for error reporting.
#[derive(Clone, Debug, PartialEq)]
pub enum Span {
// Span is unknown - no source information.
Unknown,
// Byte range.
ByteRange(Range<usize>),
}
impl Default for Span {
fn default() -> Self {
Self::Unknown
}
}
impl Span {
pub const UNDEFINED: Self = Self { start: 0, end: 0 };
/// Creates a new `Span` from a range of byte indices
///
/// Note: end is exclusive, it doesn't belong to the `Span`
pub fn new(start: u32, end: u32) -> Self {
Span { start, end }
}
/// Modifies `self` to contain the smallest `Span` possible that
/// contains both `self` and `other`
pub fn subsume(&mut self, other: Self) {
*self = if !self.is_defined() {
// self isn't defined so use other
other
} else if !other.is_defined() {
// other isn't defined so don't try to subsume
*self
} else {
// Both self and other are defined so calculate the span that contains them both
Span {
start: self.start.min(other.start),
end: self.end.max(other.end),
pub fn subsume(&mut self, other: &Self) {
match *self {
Self::Unknown => self.clone_from(other),
Self::ByteRange(ref mut self_range) => {
if let Self::ByteRange(ref other_range) = *other {
self_range.start = self_range.start.min(other_range.start);
self_range.end = self_range.end.max(other_range.end);
}
}
}
}
/// Returns the smallest `Span` possible that contains all the `Span`s
/// defined in the `from` iterator
pub fn total_span<T: Iterator<Item = Self>>(from: T) -> Self {
pub fn total_span<'a, T: Iterator<Item = &'a Self>>(from: T) -> Self {
let mut span: Self = Default::default();
for other in from {
span.subsume(other);
}
span
}
/// Converts `self` to a range if the span is not unknown
pub fn to_range(self) -> Option<Range<usize>> {
if self.is_defined() {
Some(self.start as usize..self.end as usize)
} else {
None
}
}
/// Check wether `self` was defined or is a default/unknown span
pub fn is_defined(&self) -> bool {
*self != Self::default()
}
}
impl From<Range<usize>> for Span {
fn from(range: Range<usize>) -> Self {
Span {
start: range.start as u32,
end: range.end as u32,
}
}
}
/// A source code span together with "context", a user-readable description of what part of the error it refers to.
pub type SpanContext = (Span, String);
/// Wrapper class for [`Error`], augmenting it with a list of [`SpanContext`]s.
#[derive(Debug)]
pub struct WithSpan<E> {
inner: E,
#[cfg(feature = "span")]
spans: Vec<SpanContext>,
}
impl<E> fmt::Display for WithSpan<E>
where
E: fmt::Display,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> std::fmt::Result {
self.inner.fmt(f)
}
}
#[cfg(test)]
impl<E> PartialEq for WithSpan<E>
where
E: PartialEq,
{
fn eq(&self, other: &Self) -> bool {
self.inner.eq(&other.inner)
}
}
impl<E> Error for WithSpan<E>
where
E: Error,
{
fn source(&self) -> Option<&(dyn Error + 'static)> {
self.inner.source()
}
}
impl<E> WithSpan<E> {
/// Create a new [`WithSpan`] from an [`Error`], containing no spans.
pub fn new(inner: E) -> Self {
Self {
inner,
#[cfg(feature = "span")]
spans: Vec::new(),
}
}
/// Reverse of [`Self::new`], discards span information and returns an inner error.
pub fn into_inner(self) -> E {
self.inner
}
/// Iterator over stored [`SpanContext`]s.
pub fn spans(&self) -> impl Iterator<Item = &SpanContext> {
#[cfg(feature = "span")]
return self.spans.iter();
#[cfg(not(feature = "span"))]
return std::iter::empty();
}
/// Add a new span with description.
#[cfg_attr(not(feature = "span"), allow(unused_variables, unused_mut))]
pub fn with_span<S>(mut self, span: Span, description: S) -> Self
where
S: ToString,
{
#[cfg(feature = "span")]
if span.is_defined() {
self.spans.push((span, description.to_string()));
}
self
}
/// Add a [`SpanContext`].
pub fn with_context(self, span_context: SpanContext) -> Self {
let (span, description) = span_context;
self.with_span(span, description)
}
/// Add a [`Handle`] from either [`Arena`] or [`UniqueArena`], borrowing its span information from there
/// and annotating with a type and the handle representation.
pub(crate) fn with_handle<T, A: SpanProvider<T>>(self, handle: Handle<T>, arena: &A) -> Self {
self.with_context(arena.get_span_context(handle))
}
/// Convert inner error using [`From`].
pub fn into_other<E2>(self) -> WithSpan<E2>
where
E2: From<E>,
{
WithSpan {
inner: self.inner.into(),
#[cfg(feature = "span")]
spans: self.spans,
}
}
/// Convert inner error into another type. Joins span information contained in `self`
/// with what is returned from `func`.
pub fn and_then<F, E2>(self, func: F) -> WithSpan<E2>
where
F: FnOnce(E) -> WithSpan<E2>,
{
#[cfg_attr(not(feature = "span"), allow(unused_mut))]
let mut res = func(self.inner);
#[cfg(feature = "span")]
res.spans.extend(self.spans);
res
}
}
/// Convenience trait for [`Error`] to be able to apply spans to anything.
pub(crate) trait AddSpan: Sized {
type Output;
/// See [`WithSpan::new`].
fn with_span(self) -> Self::Output;
/// See [`WithSpan::with_span`].
fn with_span_static(self, span: Span, description: &'static str) -> Self::Output;
/// See [`WithSpan::with_context`].
fn with_span_context(self, span_context: SpanContext) -> Self::Output;
/// See [`WithSpan::with_handle`].
fn with_span_handle<T, A: SpanProvider<T>>(self, handle: Handle<T>, arena: &A) -> Self::Output;
}
/// Trait abstracting over getting a span from an [`Arena`] or a [`UniqueArena`].
pub(crate) trait SpanProvider<T> {
fn get_span(&self, handle: Handle<T>) -> Span;
fn get_span_context(&self, handle: Handle<T>) -> SpanContext {
match self.get_span(handle) {
x if !x.is_defined() => (Default::default(), "".to_string()),
known => (
known,
format!("{} {:?}", std::any::type_name::<T>(), handle),
),
}
}
}
impl<T> SpanProvider<T> for Arena<T> {
fn get_span(&self, handle: Handle<T>) -> Span {
self.get_span(handle)
}
}
impl<T> SpanProvider<T> for UniqueArena<T> {
fn get_span(&self, handle: Handle<T>) -> Span {
self.get_span(handle)
}
}
impl<E> AddSpan for E
where
E: Error,
{
type Output = WithSpan<Self>;
fn with_span(self) -> WithSpan<Self> {
WithSpan::new(self)
}
fn with_span_static(self, span: Span, description: &'static str) -> WithSpan<Self> {
WithSpan::new(self).with_span(span, description)
}
fn with_span_context(self, span_context: SpanContext) -> WithSpan<Self> {
WithSpan::new(self).with_context(span_context)
}
fn with_span_handle<T, A: SpanProvider<T>>(
self,
handle: Handle<T>,
arena: &A,
) -> WithSpan<Self> {
WithSpan::new(self).with_handle(handle, arena)
}
}
/// Convenience trait for [`Result`], adding a [`MapErrWithSpan::map_err_inner`]
/// mapping to [`WithSpan::and_then`].
pub trait MapErrWithSpan<E, E2>: Sized {
type Output: Sized;
fn map_err_inner<F, E3>(self, func: F) -> Self::Output
where
F: FnOnce(E) -> WithSpan<E3>,
E2: From<E3>;
}
impl<T, E, E2> MapErrWithSpan<E, E2> for Result<T, WithSpan<E>> {
type Output = Result<T, WithSpan<E2>>;
fn map_err_inner<F, E3>(self, func: F) -> Result<T, WithSpan<E2>>
where
F: FnOnce(E) -> WithSpan<E3>,
E2: From<E3>,
{
self.map_err(|e| e.and_then(func).into_other::<E2>())
}
}

68
third_party/rust/naga/src/valid/analyzer.rs поставляемый
Просмотреть файл

@ -7,7 +7,6 @@ Figures out the following properties:
!*/
use super::{CallError, ExpressionError, FunctionError, ModuleInfo, ShaderStages, ValidationFlags};
use crate::span::{AddSpan as _, WithSpan};
use crate::{
arena::{Arena, Handle},
proc::{ResolveContext, TypeResolution},
@ -191,7 +190,6 @@ struct Sampling {
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct FunctionInfo {
/// Validation flags.
#[allow(dead_code)]
flags: ValidationFlags,
/// Set of shader stages where calling this function is valid.
pub available_stages: ShaderStages,
@ -308,7 +306,7 @@ impl FunctionInfo {
info: &Self,
arguments: &[Handle<crate::Expression>],
expression_arena: &Arena<crate::Expression>,
) -> Result<FunctionUniformity, WithSpan<FunctionError>> {
) -> Result<FunctionUniformity, FunctionError> {
for key in info.sampling_set.iter() {
self.sampling_set.insert(key.clone());
}
@ -319,10 +317,7 @@ impl FunctionInfo {
let handle = arguments[i as usize];
expression_arena[handle]
.to_global_or_argument()
.map_err(|error| {
FunctionError::Expression { handle, error }
.with_span_handle(handle, expression_arena)
})?
.map_err(|error| FunctionError::Expression { handle, error })?
}
};
@ -332,10 +327,7 @@ impl FunctionInfo {
let handle = arguments[i as usize];
expression_arena[handle]
.to_global_or_argument()
.map_err(|error| {
FunctionError::Expression { handle, error }
.with_span_handle(handle, expression_arena)
})?
.map_err(|error| FunctionError::Expression { handle, error })?
}
};
@ -615,29 +607,27 @@ impl FunctionInfo {
#[allow(clippy::or_fun_call)]
fn process_block(
&mut self,
statements: &crate::Block,
statements: &[crate::Statement],
other_functions: &[FunctionInfo],
mut disruptor: Option<UniformityDisruptor>,
expression_arena: &Arena<crate::Expression>,
) -> Result<FunctionUniformity, WithSpan<FunctionError>> {
) -> Result<FunctionUniformity, FunctionError> {
use crate::Statement as S;
let mut combined_uniformity = FunctionUniformity::new();
for (statement, &span) in statements.span_iter() {
for statement in statements {
let uniformity = match *statement {
S::Emit(ref range) => {
let mut requirements = UniformityRequirements::empty();
for expr in range.clone() {
let req = self.expressions[expr.index()].uniformity.requirements;
#[cfg(feature = "validate")]
if self
.flags
.contains(super::ValidationFlags::CONTROL_FLOW_UNIFORMITY)
&& !req.is_empty()
{
if let Some(cause) = disruptor {
return Err(FunctionError::NonUniformControlFlow(req, expr, cause)
.with_span_handle(expr, expression_arena));
return Err(FunctionError::NonUniformControlFlow(req, expr, cause));
}
}
requirements |= req;
@ -694,6 +684,7 @@ impl FunctionInfo {
S::Switch {
selector,
ref cases,
ref default,
} => {
let selector_nur = self.add_ref(selector);
let branch_disruptor =
@ -714,7 +705,14 @@ impl FunctionInfo {
};
uniformity = uniformity | case_uniformity;
}
uniformity
// using the disruptor inherited from the last fall-through chain
let default_exit = self.process_block(
default,
other_functions,
case_disruptor,
expression_arena,
)?;
uniformity | default_exit
}
S::Loop {
ref body,
@ -776,8 +774,7 @@ impl FunctionInfo {
FunctionError::InvalidCall {
function,
error: CallError::ForwardDeclaredFunction,
}
.with_span_static(span, "forward call"),
},
)?;
//Note: the result is validated by the Validator, not here
self.process_call(info, arguments, expression_arena)?
@ -812,7 +809,7 @@ impl ModuleInfo {
fun: &crate::Function,
module: &crate::Module,
flags: ValidationFlags,
) -> Result<FunctionInfo, WithSpan<FunctionError>> {
) -> Result<FunctionInfo, FunctionError> {
let mut info = FunctionInfo {
flags,
available_stages: ShaderStages::all(),
@ -840,8 +837,7 @@ impl ModuleInfo {
&self.functions,
&resolve_context,
) {
return Err(FunctionError::Expression { handle, error }
.with_span_handle(handle, &fun.expressions));
return Err(FunctionError::Expression { handle, error });
}
}
@ -858,7 +854,6 @@ impl ModuleInfo {
}
#[test]
#[cfg(feature = "validate")]
fn uniform_control_flow() {
use crate::{Expression as E, Statement as S};
@ -874,8 +869,8 @@ fn uniform_control_flow() {
},
Default::default(),
);
let mut type_arena = crate::UniqueArena::new();
let ty = type_arena.insert(
let mut type_arena = Arena::new();
let ty = type_arena.append(
crate::Type {
name: None,
inner: crate::TypeInner::Vector {
@ -981,12 +976,7 @@ fn uniform_control_flow() {
.into(),
};
assert_eq!(
info.process_block(
&vec![stmt_emit1, stmt_if_uniform].into(),
&[],
None,
&expressions
),
info.process_block(&[stmt_emit1, stmt_if_uniform], &[], None, &expressions),
Ok(FunctionUniformity {
result: Uniformity {
non_uniform_result: None,
@ -1012,18 +1002,12 @@ fn uniform_control_flow() {
reject: crate::Block::new(),
};
assert_eq!(
info.process_block(
&vec![stmt_emit2, stmt_if_non_uniform].into(),
&[],
None,
&expressions
),
info.process_block(&[stmt_emit2, stmt_if_non_uniform], &[], None, &expressions),
Err(FunctionError::NonUniformControlFlow(
UniformityRequirements::DERIVATIVE,
derivative_expr,
UniformityDisruptor::Expression(non_uniform_global_expr)
)
.with_span()),
)),
);
assert_eq!(info[derivative_expr].ref_count, 1);
assert_eq!(info[non_uniform_global], GlobalUse::READ);
@ -1034,7 +1018,7 @@ fn uniform_control_flow() {
};
assert_eq!(
info.process_block(
&vec![stmt_emit3, stmt_return_non_uniform].into(),
&[stmt_emit3, stmt_return_non_uniform],
&[],
Some(UniformityDisruptor::Return),
&expressions
@ -1061,7 +1045,7 @@ fn uniform_control_flow() {
let stmt_kill = S::Kill;
assert_eq!(
info.process_block(
&vec![stmt_emit4, stmt_assign, stmt_kill, stmt_return_pointer].into(),
&[stmt_emit4, stmt_assign, stmt_kill, stmt_return_pointer],
&[],
Some(UniformityDisruptor::Discard),
&expressions

22
third_party/rust/naga/src/valid/compose.rs поставляемый
Просмотреть файл

@ -1,11 +1,8 @@
#[cfg(feature = "validate")]
use crate::{
arena::{Arena, UniqueArena},
arena::{Arena, Handle},
proc::TypeResolution,
};
use crate::Handle;
#[derive(Clone, Debug, thiserror::Error)]
#[cfg_attr(test, derive(PartialEq))]
pub enum ComposeError {
@ -19,17 +16,16 @@ pub enum ComposeError {
ComponentType { index: u32 },
}
#[cfg(feature = "validate")]
pub fn validate_compose(
self_ty_handle: Handle<crate::Type>,
constant_arena: &Arena<crate::Constant>,
type_arena: &UniqueArena<crate::Type>,
type_arena: &Arena<crate::Type>,
component_resolutions: impl ExactSizeIterator<Item = TypeResolution>,
) -> Result<(), ComposeError> {
use crate::TypeInner as Ti;
let self_ty = type_arena
.get_handle(self_ty_handle)
.try_get(self_ty_handle)
.ok_or(ComposeError::TypeDoesntExist(self_ty_handle))?;
match self_ty.inner {
// vectors are composed from scalars or other vectors
@ -100,11 +96,7 @@ pub fn validate_compose(
});
}
for (index, comp_res) in component_resolutions.enumerate() {
let base_inner = &type_arena[base].inner;
let comp_res_inner = comp_res.inner_with(type_arena);
// We don't support arrays of pointers, but it seems best not to
// embed that assumption here, so use `TypeInner::equivalent`.
if !base_inner.equivalent(comp_res_inner, type_arena) {
if comp_res.inner_with(type_arena) != &type_arena[base].inner {
log::error!("Array component[{}] type {:?}", index, comp_res);
return Err(ComposeError::ComponentType {
index: index as u32,
@ -121,11 +113,7 @@ pub fn validate_compose(
}
for (index, (member, comp_res)) in members.iter().zip(component_resolutions).enumerate()
{
let member_inner = &type_arena[member.ty].inner;
let comp_res_inner = comp_res.inner_with(type_arena);
// We don't support pointers in structs, but it seems best not to embed
// that assumption here, so use `TypeInner::equivalent`.
if !comp_res_inner.equivalent(member_inner, type_arena) {
if comp_res.inner_with(type_arena) != &type_arena[member.ty].inner {
log::error!("Struct component[{}] type {:?}", index, comp_res);
return Err(ComposeError::ComponentType {
index: index as u32,

184
third_party/rust/naga/src/valid/expression.rs поставляемый
Просмотреть файл

@ -1,10 +1,6 @@
#[cfg(feature = "validate")]
use super::{compose::validate_compose, FunctionInfo, ShaderStages, TypeFlags};
#[cfg(feature = "validate")]
use crate::arena::UniqueArena;
use super::{compose::validate_compose, ComposeError, FunctionInfo, ShaderStages, TypeFlags};
use crate::{
arena::Handle,
arena::{Arena, Handle},
proc::{ProcError, ResolveError},
};
@ -44,7 +40,7 @@ pub enum ExpressionError {
#[error("Swizzle component {0:?} is outside of vector size {1:?}")]
InvalidSwizzleComponent(crate::SwizzleComponent, crate::VectorSize),
#[error(transparent)]
Compose(#[from] super::ComposeError),
Compose(#[from] ComposeError),
#[error(transparent)]
Proc(#[from] ProcError),
#[error("Operation {0:?} can't work with {1:?}")]
@ -115,14 +111,12 @@ pub enum ExpressionError {
InvalidAtomicResultType(crate::ScalarKind, crate::Bytes),
}
#[cfg(feature = "validate")]
struct ExpressionTypeResolver<'a> {
root: Handle<crate::Expression>,
types: &'a UniqueArena<crate::Type>,
types: &'a Arena<crate::Type>,
info: &'a FunctionInfo,
}
#[cfg(feature = "validate")]
impl<'a> ExpressionTypeResolver<'a> {
fn resolve(
&self,
@ -136,7 +130,6 @@ impl<'a> ExpressionTypeResolver<'a> {
}
}
#[cfg(feature = "validate")]
impl super::Validator {
pub(super) fn validate_expression(
&self,
@ -802,14 +795,7 @@ impl super::Validator {
Ti::Scalar {
kind: Sk::Bool,
width: _,
} => {
// When `condition` is a single boolean, `accept` and
// `reject` can be vectors or scalars.
match *accept_inner {
Ti::Scalar { .. } | Ti::Vector { .. } => true,
_ => false,
}
}
} => accept_inner.is_sized(),
Ti::Vector {
size,
kind: Sk::Bool,
@ -870,17 +856,15 @@ impl super::Validator {
arg,
arg1,
arg2,
arg3,
} => {
use crate::MathFunction as Mf;
let arg_ty = resolver.resolve(arg)?;
let arg1_ty = arg1.map(|expr| resolver.resolve(expr)).transpose()?;
let arg2_ty = arg2.map(|expr| resolver.resolve(expr)).transpose()?;
let arg3_ty = arg3.map(|expr| resolver.resolve(expr)).transpose()?;
match fun {
Mf::Abs => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
if arg1_ty.is_some() | arg2_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
let good = match *arg_ty {
@ -892,8 +876,8 @@ impl super::Validator {
}
}
Mf::Min | Mf::Max => {
let arg1_ty = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), None, None) => ty1,
let arg1_ty = match (arg1_ty, arg2_ty) {
(Some(ty1), None) => ty1,
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
let good = match *arg_ty {
@ -912,8 +896,8 @@ impl super::Validator {
}
}
Mf::Clamp => {
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), Some(ty2), None) => (ty1, ty2),
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty) {
(Some(ty1), Some(ty2)) => (ty1, ty2),
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
let good = match *arg_ty {
@ -963,7 +947,7 @@ impl super::Validator {
| Mf::Sign
| Mf::Sqrt
| Mf::InverseSqrt => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
if arg1_ty.is_some() | arg2_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
@ -977,8 +961,8 @@ impl super::Validator {
}
}
Mf::Atan2 | Mf::Pow | Mf::Distance | Mf::Step => {
let arg1_ty = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), None, None) => ty1,
let arg1_ty = match (arg1_ty, arg2_ty) {
(Some(ty1), None) => ty1,
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
match *arg_ty {
@ -999,8 +983,8 @@ impl super::Validator {
}
}
Mf::Modf | Mf::Frexp | Mf::Ldexp => {
let arg1_ty = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), None, None) => ty1,
let arg1_ty = match (arg1_ty, arg2_ty) {
(Some(ty1), None) => ty1,
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
let (size0, width0) = match *arg_ty {
@ -1034,8 +1018,8 @@ impl super::Validator {
}
}
Mf::Dot | Mf::Outer | Mf::Cross | Mf::Reflect => {
let arg1_ty = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), None, None) => ty1,
let arg1_ty = match (arg1_ty, arg2_ty) {
(Some(ty1), None) => ty1,
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
match *arg_ty {
@ -1053,8 +1037,8 @@ impl super::Validator {
}
}
Mf::Refract => {
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), Some(ty2), None) => (ty1, ty2),
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty) {
(Some(ty1), Some(ty2)) => (ty1, ty2),
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
@ -1094,7 +1078,7 @@ impl super::Validator {
}
}
Mf::Normalize => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
if arg1_ty.is_some() | arg2_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
@ -1105,8 +1089,8 @@ impl super::Validator {
}
}
Mf::FaceForward | Mf::Fma | Mf::SmoothStep => {
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), Some(ty2), None) => (ty1, ty2),
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty) {
(Some(ty1), Some(ty2)) => (ty1, ty2),
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
match *arg_ty {
@ -1134,8 +1118,8 @@ impl super::Validator {
}
}
Mf::Mix => {
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), Some(ty2), None) => (ty1, ty2),
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty) {
(Some(ty1), Some(ty2)) => (ty1, ty2),
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
let arg_width = match *arg_ty {
@ -1174,7 +1158,7 @@ impl super::Validator {
}
}
Mf::Inverse | Mf::Determinant => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
if arg1_ty.is_some() | arg2_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
let good = match *arg_ty {
@ -1186,7 +1170,7 @@ impl super::Validator {
}
}
Mf::Transpose => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
if arg1_ty.is_some() | arg2_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
@ -1195,7 +1179,7 @@ impl super::Validator {
}
}
Mf::CountOneBits | Mf::ReverseBits => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
if arg1_ty.is_some() | arg2_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
@ -1206,118 +1190,6 @@ impl super::Validator {
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
}
Mf::InsertBits => {
let (arg1_ty, arg2_ty, arg3_ty) = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), Some(ty2), Some(ty3)) => (ty1, ty2, ty3),
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
match *arg_ty {
Ti::Scalar { kind: Sk::Sint, .. }
| Ti::Scalar { kind: Sk::Uint, .. }
| Ti::Vector { kind: Sk::Sint, .. }
| Ti::Vector { kind: Sk::Uint, .. } => {}
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
if arg1_ty != arg_ty {
return Err(ExpressionError::InvalidArgumentType(
fun,
1,
arg1.unwrap(),
));
}
match *arg2_ty {
Ti::Scalar { kind: Sk::Uint, .. } => {}
_ => {
return Err(ExpressionError::InvalidArgumentType(
fun,
2,
arg2.unwrap(),
))
}
}
match *arg3_ty {
Ti::Scalar { kind: Sk::Uint, .. } => {}
_ => {
return Err(ExpressionError::InvalidArgumentType(
fun,
2,
arg3.unwrap(),
))
}
}
}
Mf::ExtractBits => {
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty, arg3_ty) {
(Some(ty1), Some(ty2), None) => (ty1, ty2),
_ => return Err(ExpressionError::WrongArgumentCount(fun)),
};
match *arg_ty {
Ti::Scalar { kind: Sk::Sint, .. }
| Ti::Scalar { kind: Sk::Uint, .. }
| Ti::Vector { kind: Sk::Sint, .. }
| Ti::Vector { kind: Sk::Uint, .. } => {}
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
match *arg1_ty {
Ti::Scalar { kind: Sk::Uint, .. } => {}
_ => {
return Err(ExpressionError::InvalidArgumentType(
fun,
2,
arg1.unwrap(),
))
}
}
match *arg2_ty {
Ti::Scalar { kind: Sk::Uint, .. } => {}
_ => {
return Err(ExpressionError::InvalidArgumentType(
fun,
2,
arg2.unwrap(),
))
}
}
}
Mf::Pack2x16unorm | Mf::Pack2x16snorm | Mf::Pack2x16float => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
Ti::Vector {
size: crate::VectorSize::Bi,
kind: Sk::Float,
..
} => {}
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
}
Mf::Pack4x8snorm | Mf::Pack4x8unorm => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
Ti::Vector {
size: crate::VectorSize::Quad,
kind: Sk::Float,
..
} => {}
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
}
Mf::Unpack2x16float
| Mf::Unpack2x16snorm
| Mf::Unpack2x16unorm
| Mf::Unpack4x8snorm
| Mf::Unpack4x8unorm => {
if arg1_ty.is_some() | arg2_ty.is_some() | arg3_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
Ti::Scalar { kind: Sk::Uint, .. } => {}
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
}
}
ShaderStages::all()
}
@ -1352,7 +1224,7 @@ impl super::Validator {
if let Some(&Ti::Array {
size: crate::ArraySize::Dynamic,
..
}) = resolver.types.get_handle(base).map(|ty| &ty.inner)
}) = resolver.types.try_get(base).map(|ty| &ty.inner)
{
ShaderStages::all()
} else {

396
third_party/rust/naga/src/valid/function.rs поставляемый
Просмотреть файл

@ -1,16 +1,8 @@
use crate::arena::Handle;
#[cfg(feature = "validate")]
use crate::arena::{Arena, UniqueArena};
use super::{
analyzer::{UniformityDisruptor, UniformityRequirements},
ExpressionError, FunctionInfo, ModuleInfo,
ExpressionError, FunctionInfo, ModuleInfo, ShaderStages, TypeFlags, ValidationFlags,
};
use crate::span::WithSpan;
#[cfg(feature = "validate")]
use crate::span::{AddSpan as _, MapErrWithSpan as _};
#[cfg(feature = "validate")]
use crate::arena::{Arena, Handle};
use bit_set::BitSet;
#[derive(Clone, Debug, thiserror::Error)]
@ -84,12 +76,6 @@ pub enum FunctionError {
},
#[error("Argument '{name}' at index {index} has a type that can't be passed into functions.")]
InvalidArgumentType { index: usize, name: String },
#[error("Argument '{name}' at index {index} is a pointer of class {class:?}, which can't be passed into functions.")]
InvalidArgumentPointerClass {
index: usize,
name: String,
class: crate::StorageClass,
},
#[error("There are instructions after `return`/`break`/`continue`")]
InstructionsAfterReturn,
#[error("The `break` is used outside of a `loop` or `switch` context")]
@ -104,14 +90,8 @@ pub enum FunctionError {
InvalidIfType(Handle<crate::Expression>),
#[error("The `switch` value {0:?} is not an integer scalar")]
InvalidSwitchType(Handle<crate::Expression>),
#[error("Multiple `switch` cases for {0:?} are present")]
#[error("Multiple `switch` cases for {0} are present")]
ConflictingSwitchCase(i32),
#[error("The `switch` is missing a `default` case")]
MissingDefaultCase,
#[error("Multiple `default` cases are present")]
MultipleDefaultCases,
#[error("The last `switch` case contains a `falltrough`")]
LastCaseFallTrough,
#[error("The pointer {0:?} doesn't relate to a valid destination for a store")]
InvalidStorePointer(Handle<crate::Expression>),
#[error("The value {0:?} can not be stored")]
@ -155,25 +135,17 @@ bitflags::bitflags! {
}
}
#[cfg(feature = "validate")]
struct BlockInfo {
stages: super::ShaderStages,
finished: bool,
}
#[cfg(feature = "validate")]
struct BlockContext<'a> {
abilities: ControlFlowAbility,
info: &'a FunctionInfo,
expressions: &'a Arena<crate::Expression>,
types: &'a UniqueArena<crate::Type>,
types: &'a Arena<crate::Type>,
global_vars: &'a Arena<crate::GlobalVariable>,
functions: &'a Arena<crate::Function>,
prev_infos: &'a [FunctionInfo],
return_type: Option<Handle<crate::Type>>,
}
#[cfg(feature = "validate")]
impl<'a> BlockContext<'a> {
fn new(
fun: &'a crate::Function,
@ -210,11 +182,11 @@ impl<'a> BlockContext<'a> {
&self,
handle: Handle<crate::Expression>,
valid_expressions: &BitSet,
) -> Result<&crate::TypeInner, WithSpan<ExpressionError>> {
) -> Result<&crate::TypeInner, ExpressionError> {
if handle.index() >= self.expressions.len() {
Err(ExpressionError::DoesntExist.with_span())
Err(ExpressionError::DoesntExist)
} else if !valid_expressions.contains(handle.index()) {
Err(ExpressionError::NotInScope.with_span_handle(handle, self.expressions))
Err(ExpressionError::NotInScope)
} else {
Ok(self.info[handle].ty.inner_with(self.types))
}
@ -224,9 +196,9 @@ impl<'a> BlockContext<'a> {
&self,
handle: Handle<crate::Expression>,
valid_expressions: &BitSet,
) -> Result<&crate::TypeInner, WithSpan<FunctionError>> {
) -> Result<&crate::TypeInner, FunctionError> {
self.resolve_type_impl(handle, valid_expressions)
.map_err_inner(|error| FunctionError::Expression { handle, error }.with_span())
.map_err(|error| FunctionError::Expression { handle, error })
}
fn resolve_pointer_type(
@ -245,40 +217,33 @@ impl<'a> BlockContext<'a> {
}
impl super::Validator {
#[cfg(feature = "validate")]
fn validate_call(
&mut self,
function: Handle<crate::Function>,
arguments: &[Handle<crate::Expression>],
result: Option<Handle<crate::Expression>>,
context: &BlockContext,
) -> Result<super::ShaderStages, WithSpan<CallError>> {
) -> Result<ShaderStages, CallError> {
let fun = context
.functions
.try_get(function)
.ok_or(CallError::InvalidFunction)
.map_err(WithSpan::new)?;
.ok_or(CallError::InvalidFunction)?;
if fun.arguments.len() != arguments.len() {
return Err(CallError::ArgumentCount {
required: fun.arguments.len(),
seen: arguments.len(),
}
.with_span());
});
}
for (index, (arg, &expr)) in fun.arguments.iter().zip(arguments).enumerate() {
let ty = context
.resolve_type_impl(expr, &self.valid_expression_set)
.map_err_inner(|error| {
CallError::Argument { index, error }.with_span_handle(expr, context.expressions)
})?;
let arg_inner = &context.types[arg.ty].inner;
if !ty.equivalent(arg_inner, context.types) {
.map_err(|error| CallError::Argument { index, error })?;
if ty != &context.types[arg.ty].inner {
return Err(CallError::ArgumentType {
index,
required: arg.ty,
seen_expression: expr,
}
.with_span_handle(expr, context.expressions));
});
}
}
@ -286,26 +251,21 @@ impl super::Validator {
if self.valid_expression_set.insert(expr.index()) {
self.valid_expression_list.push(expr);
} else {
return Err(CallError::ResultAlreadyInScope(expr)
.with_span_handle(expr, context.expressions));
return Err(CallError::ResultAlreadyInScope(expr));
}
match context.expressions[expr] {
crate::Expression::CallResult(callee)
if fun.result.is_some() && callee == function => {}
_ => {
return Err(CallError::ExpressionMismatch(result)
.with_span_handle(expr, context.expressions))
}
_ => return Err(CallError::ExpressionMismatch(result)),
}
} else if fun.result.is_some() {
return Err(CallError::ExpressionMismatch(result).with_span());
return Err(CallError::ExpressionMismatch(result));
}
let callee_info = &context.prev_infos[function.index()];
Ok(callee_info.available_stages)
}
#[cfg(feature = "validate")]
fn validate_atomic(
&mut self,
pointer: Handle<crate::Expression>,
@ -313,23 +273,19 @@ impl super::Validator {
value: Handle<crate::Expression>,
result: Handle<crate::Expression>,
context: &BlockContext,
) -> Result<(), WithSpan<FunctionError>> {
) -> Result<(), FunctionError> {
let pointer_inner = context.resolve_type(pointer, &self.valid_expression_set)?;
let (ptr_kind, ptr_width) = match *pointer_inner {
crate::TypeInner::Pointer { base, .. } => match context.types[base].inner {
crate::TypeInner::Atomic { kind, width } => (kind, width),
ref other => {
log::error!("Atomic pointer to type {:?}", other);
return Err(AtomicError::InvalidPointer(pointer)
.with_span_handle(pointer, context.expressions)
.into_other());
return Err(AtomicError::InvalidPointer(pointer).into());
}
},
ref other => {
log::error!("Atomic on type {:?}", other);
return Err(AtomicError::InvalidPointer(pointer)
.with_span_handle(pointer, context.expressions)
.into_other());
return Err(AtomicError::InvalidPointer(pointer).into());
}
};
@ -338,27 +294,21 @@ impl super::Validator {
crate::TypeInner::Scalar { width, kind } if kind == ptr_kind && width == ptr_width => {}
ref other => {
log::error!("Atomic operand type {:?}", other);
return Err(AtomicError::InvalidOperand(value)
.with_span_handle(value, context.expressions)
.into_other());
return Err(AtomicError::InvalidOperand(value).into());
}
}
if let crate::AtomicFunction::Exchange { compare: Some(cmp) } = *fun {
if context.resolve_type(cmp, &self.valid_expression_set)? != value_inner {
log::error!("Atomic exchange comparison has a different type from the value");
return Err(AtomicError::InvalidOperand(cmp)
.with_span_handle(cmp, context.expressions)
.into_other());
return Err(AtomicError::InvalidOperand(cmp).into());
}
}
if self.valid_expression_set.insert(result.index()) {
self.valid_expression_list.push(result);
} else {
return Err(AtomicError::ResultAlreadyInScope(result)
.with_span_handle(result, context.expressions)
.into_other());
return Err(AtomicError::ResultAlreadyInScope(result).into());
}
match context.expressions[result] {
//TODO: support atomic result with comparison
@ -367,28 +317,22 @@ impl super::Validator {
width,
comparison: false,
} if kind == ptr_kind && width == ptr_width => {}
_ => {
return Err(AtomicError::ResultTypeMismatch(result)
.with_span_handle(result, context.expressions)
.into_other())
}
_ => return Err(AtomicError::ResultTypeMismatch(result).into()),
}
Ok(())
}
#[cfg(feature = "validate")]
fn validate_block_impl(
&mut self,
statements: &crate::Block,
statements: &[crate::Statement],
context: &BlockContext,
) -> Result<BlockInfo, WithSpan<FunctionError>> {
) -> Result<ShaderStages, FunctionError> {
use crate::{Statement as S, TypeInner as Ti};
let mut finished = false;
let mut stages = super::ShaderStages::all();
for (statement, &span) in statements.span_iter() {
let mut stages = ShaderStages::all();
for statement in statements {
if finished {
return Err(FunctionError::InstructionsAfterReturn
.with_span_static(span, "instructions after return"));
return Err(FunctionError::InstructionsAfterReturn);
}
match *statement {
S::Emit(ref range) => {
@ -396,15 +340,12 @@ impl super::Validator {
if self.valid_expression_set.insert(handle.index()) {
self.valid_expression_list.push(handle);
} else {
return Err(FunctionError::ExpressionAlreadyInScope(handle)
.with_span_handle(handle, context.expressions));
return Err(FunctionError::ExpressionAlreadyInScope(handle));
}
}
}
S::Block(ref block) => {
let info = self.validate_block(block, context)?;
stages &= info.stages;
finished = info.finished;
stages &= self.validate_block(block, context)?;
}
S::If {
condition,
@ -416,76 +357,27 @@ impl super::Validator {
kind: crate::ScalarKind::Bool,
width: _,
} => {}
_ => {
return Err(FunctionError::InvalidIfType(condition)
.with_span_handle(condition, context.expressions))
}
_ => return Err(FunctionError::InvalidIfType(condition)),
}
stages &= self.validate_block(accept, context)?.stages;
stages &= self.validate_block(reject, context)?.stages;
stages &= self.validate_block(accept, context)?;
stages &= self.validate_block(reject, context)?;
}
S::Switch {
selector,
ref cases,
ref default,
} => {
match *context.resolve_type(selector, &self.valid_expression_set)? {
Ti::Scalar {
kind: crate::ScalarKind::Uint,
width: _,
} => {}
Ti::Scalar {
kind: crate::ScalarKind::Sint,
width: _,
} => {}
_ => {
return Err(FunctionError::InvalidSwitchType(selector)
.with_span_handle(selector, context.expressions))
}
_ => return Err(FunctionError::InvalidSwitchType(selector)),
}
self.select_cases.clear();
let mut default = false;
for case in cases {
match case.value {
crate::SwitchValue::Integer(value) => {
if !self.select_cases.insert(value) {
return Err(FunctionError::ConflictingSwitchCase(value)
.with_span_static(
case.body
.span_iter()
.next()
.map_or(Default::default(), |(_, s)| *s),
"conflicting switch arm here",
));
}
}
crate::SwitchValue::Default => {
if default {
return Err(FunctionError::MultipleDefaultCases
.with_span_static(
case.body
.span_iter()
.next()
.map_or(Default::default(), |(_, s)| *s),
"duplicated switch arm here",
));
}
default = true
}
}
}
if !default {
return Err(FunctionError::MissingDefaultCase
.with_span_static(span, "missing default case"));
}
if let Some(case) = cases.last() {
if case.fall_through {
return Err(FunctionError::LastCaseFallTrough.with_span_static(
case.body
.span_iter()
.next()
.map_or(Default::default(), |(_, s)| *s),
"bad switch arm here",
));
if !self.select_cases.insert(case.value) {
return Err(FunctionError::ConflictingSwitchCase(case.value));
}
}
let pass_through_abilities = context.abilities
@ -493,8 +385,9 @@ impl super::Validator {
let sub_context =
context.with_abilities(pass_through_abilities | ControlFlowAbility::BREAK);
for case in cases {
stages &= self.validate_block(&case.body, &sub_context)?.stages;
stages &= self.validate_block(&case.body, &sub_context)?;
}
stages &= self.validate_block(default, &sub_context)?;
}
S::Loop {
ref body,
@ -504,72 +397,49 @@ impl super::Validator {
// because the continuing{} block inherits the scope
let base_expression_count = self.valid_expression_list.len();
let pass_through_abilities = context.abilities & ControlFlowAbility::RETURN;
stages &= self
.validate_block_impl(
body,
&context.with_abilities(
pass_through_abilities
| ControlFlowAbility::BREAK
| ControlFlowAbility::CONTINUE,
),
)?
.stages;
stages &= self
.validate_block_impl(
continuing,
&context.with_abilities(ControlFlowAbility::empty()),
)?
.stages;
stages &= self.validate_block_impl(
body,
&context.with_abilities(
pass_through_abilities
| ControlFlowAbility::BREAK
| ControlFlowAbility::CONTINUE,
),
)?;
stages &= self.validate_block_impl(
continuing,
&context.with_abilities(ControlFlowAbility::empty()),
)?;
for handle in self.valid_expression_list.drain(base_expression_count..) {
self.valid_expression_set.remove(handle.index());
}
}
S::Break => {
if !context.abilities.contains(ControlFlowAbility::BREAK) {
return Err(FunctionError::BreakOutsideOfLoopOrSwitch
.with_span_static(span, "invalid break"));
return Err(FunctionError::BreakOutsideOfLoopOrSwitch);
}
finished = true;
}
S::Continue => {
if !context.abilities.contains(ControlFlowAbility::CONTINUE) {
return Err(FunctionError::ContinueOutsideOfLoop
.with_span_static(span, "invalid continue"));
return Err(FunctionError::ContinueOutsideOfLoop);
}
finished = true;
}
S::Return { value } => {
if !context.abilities.contains(ControlFlowAbility::RETURN) {
return Err(FunctionError::InvalidReturnSpot
.with_span_static(span, "invalid return"));
return Err(FunctionError::InvalidReturnSpot);
}
let value_ty = value
.map(|expr| context.resolve_type(expr, &self.valid_expression_set))
.transpose()?;
let expected_ty = context.return_type.map(|ty| &context.types[ty].inner);
// We can't return pointers, but it seems best not to embed that
// assumption here, so use `TypeInner::equivalent` for comparison.
let okay = match (value_ty, expected_ty) {
(None, None) => true,
(Some(value_inner), Some(expected_inner)) => {
value_inner.equivalent(expected_inner, context.types)
}
(_, _) => false,
};
if !okay {
if value_ty != expected_ty {
log::error!(
"Returning {:?} where {:?} is expected",
value_ty,
expected_ty
);
if let Some(handle) = value {
return Err(FunctionError::InvalidReturnType(value)
.with_span_handle(handle, context.expressions));
} else {
return Err(FunctionError::InvalidReturnType(value)
.with_span_static(span, "invalid return"));
}
return Err(FunctionError::InvalidReturnType(value));
}
finished = true;
}
@ -577,41 +447,30 @@ impl super::Validator {
finished = true;
}
S::Barrier(_) => {
stages &= super::ShaderStages::COMPUTE;
stages &= ShaderStages::COMPUTE;
}
S::Store { pointer, value } => {
let mut current = pointer;
loop {
let _ = context
.resolve_pointer_type(current)
.map_err(|e| e.with_span())?;
match context.expressions[current] {
let _ = context.resolve_pointer_type(current)?;
match *context.get_expression(current)? {
crate::Expression::Access { base, .. }
| crate::Expression::AccessIndex { base, .. } => current = base,
crate::Expression::LocalVariable(_)
| crate::Expression::GlobalVariable(_)
| crate::Expression::FunctionArgument(_) => break,
_ => {
return Err(FunctionError::InvalidStorePointer(current)
.with_span_handle(pointer, context.expressions))
}
_ => return Err(FunctionError::InvalidStorePointer(current)),
}
}
let value_ty = context.resolve_type(value, &self.valid_expression_set)?;
match *value_ty {
Ti::Image { .. } | Ti::Sampler { .. } => {
return Err(FunctionError::InvalidStoreValue(value)
.with_span_handle(value, context.expressions));
return Err(FunctionError::InvalidStoreValue(value));
}
_ => {}
}
let pointer_ty = context
.resolve_pointer_type(pointer)
.map_err(|e| e.with_span())?;
let good = match *pointer_ty {
let good = match *context.resolve_pointer_type(pointer)? {
Ti::Pointer { base, class: _ } => match context.types[base].inner {
Ti::Atomic { kind, width } => *value_ty == Ti::Scalar { kind, width },
ref other => value_ty == other,
@ -631,20 +490,7 @@ impl super::Validator {
_ => false,
};
if !good {
return Err(FunctionError::InvalidStoreTypes { pointer, value }
.with_span()
.with_handle(pointer, context.expressions)
.with_handle(value, context.expressions));
}
if let Some(class) = pointer_ty.pointer_class() {
if !class.access().contains(crate::StorageAccess::STORE) {
return Err(FunctionError::InvalidStorePointer(pointer)
.with_span_static(
context.expressions.get_span(pointer),
"writing to this location is not permitted",
));
}
return Err(FunctionError::InvalidStoreTypes { pointer, value });
}
}
S::ImageStore {
@ -655,15 +501,14 @@ impl super::Validator {
} => {
//Note: this code uses a lot of `FunctionError::InvalidImageStore`,
// and could probably be refactored.
let var = match *context.get_expression(image).map_err(|e| e.with_span())? {
let var = match *context.get_expression(image)? {
crate::Expression::GlobalVariable(var_handle) => {
&context.global_vars[var_handle]
}
_ => {
return Err(FunctionError::InvalidImageStore(
ExpressionError::ExpectedGlobalVariable,
)
.with_span_handle(image, context.expressions))
))
}
};
@ -683,15 +528,13 @@ impl super::Validator {
ExpressionError::InvalidImageCoordinateType(
dim, coordinate,
),
)
.with_span_handle(coordinate, context.expressions));
))
}
};
if arrayed != array_index.is_some() {
return Err(FunctionError::InvalidImageStore(
ExpressionError::InvalidImageArrayIndex,
)
.with_span_handle(coordinate, context.expressions));
));
}
if let Some(expr) = array_index {
match *context.resolve_type(expr, &self.valid_expression_set)? {
@ -702,8 +545,7 @@ impl super::Validator {
_ => {
return Err(FunctionError::InvalidImageStore(
ExpressionError::InvalidImageArrayIndexType(expr),
)
.with_span_handle(expr, context.expressions));
))
}
}
}
@ -718,24 +560,19 @@ impl super::Validator {
_ => {
return Err(FunctionError::InvalidImageStore(
ExpressionError::InvalidImageClass(class),
)
.with_span_handle(image, context.expressions));
))
}
}
}
_ => {
return Err(FunctionError::InvalidImageStore(
ExpressionError::ExpectedImageType(var.ty),
)
.with_span()
.with_handle(var.ty, context.types)
.with_handle(image, context.expressions))
))
}
};
if *context.resolve_type(value, &self.valid_expression_set)? != value_ty {
return Err(FunctionError::InvalidStoreValue(value)
.with_span_handle(value, context.expressions));
return Err(FunctionError::InvalidStoreValue(value));
}
}
S::Call {
@ -744,12 +581,7 @@ impl super::Validator {
result,
} => match self.validate_call(function, arguments, result, context) {
Ok(callee_stages) => stages &= callee_stages,
Err(error) => {
return Err(error.and_then(|error| {
FunctionError::InvalidCall { function, error }
.with_span_static(span, "invalid function call")
}))
}
Err(error) => return Err(FunctionError::InvalidCall { function, error }),
},
S::Atomic {
pointer,
@ -761,34 +593,32 @@ impl super::Validator {
}
}
}
Ok(BlockInfo { stages, finished })
Ok(stages)
}
#[cfg(feature = "validate")]
fn validate_block(
&mut self,
statements: &crate::Block,
statements: &[crate::Statement],
context: &BlockContext,
) -> Result<BlockInfo, WithSpan<FunctionError>> {
) -> Result<ShaderStages, FunctionError> {
let base_expression_count = self.valid_expression_list.len();
let info = self.validate_block_impl(statements, context)?;
let stages = self.validate_block_impl(statements, context)?;
for handle in self.valid_expression_list.drain(base_expression_count..) {
self.valid_expression_set.remove(handle.index());
}
Ok(info)
Ok(stages)
}
#[cfg(feature = "validate")]
fn validate_local_var(
&self,
var: &crate::LocalVariable,
types: &UniqueArena<crate::Type>,
types: &Arena<crate::Type>,
constants: &Arena<crate::Constant>,
) -> Result<(), LocalVariableError> {
log::debug!("var {:?}", var);
if !self.types[var.ty.index()]
.flags
.contains(super::TypeFlags::DATA | super::TypeFlags::SIZED)
.contains(TypeFlags::DATA | TypeFlags::SIZED)
{
return Err(LocalVariableError::InvalidType(var.ty));
}
@ -818,54 +648,27 @@ impl super::Validator {
fun: &crate::Function,
module: &crate::Module,
mod_info: &ModuleInfo,
) -> Result<FunctionInfo, WithSpan<FunctionError>> {
#[cfg(feature = "validate")]
let mut info = mod_info
.process_function(fun, module, self.flags)
.map_err(WithSpan::into_other)?;
) -> Result<FunctionInfo, FunctionError> {
let mut info = mod_info.process_function(fun, module, self.flags)?;
#[cfg(not(feature = "validate"))]
let info = mod_info.process_function(fun, module, self.flags)?;
#[cfg(feature = "validate")]
for (var_handle, var) in fun.local_variables.iter() {
self.validate_local_var(var, &module.types, &module.constants)
.map_err(|error| {
FunctionError::LocalVariable {
handle: var_handle,
name: var.name.clone().unwrap_or_default(),
error,
}
.with_span_handle(var.ty, &module.types)
.with_handle(var_handle, &fun.local_variables)
.map_err(|error| FunctionError::LocalVariable {
handle: var_handle,
name: var.name.clone().unwrap_or_default(),
error,
})?;
}
#[cfg(feature = "validate")]
for (index, argument) in fun.arguments.iter().enumerate() {
if !self.types[argument.ty.index()]
.flags
.contains(super::TypeFlags::ARGUMENT)
.contains(TypeFlags::ARGUMENT)
{
return Err(FunctionError::InvalidArgumentType {
index,
name: argument.name.clone().unwrap_or_default(),
}
.with_span_handle(argument.ty, &module.types));
}
match module.types[argument.ty].inner.pointer_class() {
Some(crate::StorageClass::Private)
| Some(crate::StorageClass::Function)
| Some(crate::StorageClass::WorkGroup)
| None => {}
Some(other) => {
return Err(FunctionError::InvalidArgumentPointerClass {
index,
name: argument.name.clone().unwrap_or_default(),
class: other,
}
.with_span_handle(argument.ty, &module.types))
}
});
}
}
@ -875,8 +678,7 @@ impl super::Validator {
if expr.needs_pre_emit() {
self.valid_expression_set.insert(handle.index());
}
#[cfg(feature = "validate")]
if self.flags.contains(super::ValidationFlags::EXPRESSIONS) {
if self.flags.contains(ValidationFlags::EXPRESSIONS) {
match self.validate_expression(
handle,
expr,
@ -886,22 +688,16 @@ impl super::Validator {
&mod_info.functions,
) {
Ok(stages) => info.available_stages &= stages,
Err(error) => {
return Err(FunctionError::Expression { handle, error }
.with_span_handle(handle, &fun.expressions))
}
Err(error) => return Err(FunctionError::Expression { handle, error }),
}
}
}
#[cfg(feature = "validate")]
if self.flags.contains(super::ValidationFlags::BLOCKS) {
let stages = self
.validate_block(
&fun.body,
&BlockContext::new(fun, module, &info, &mod_info.functions),
)?
.stages;
if self.flags.contains(ValidationFlags::BLOCKS) {
let stages = self.validate_block(
&fun.body,
&BlockContext::new(fun, module, &info, &mod_info.functions),
)?;
info.available_stages &= stages;
}
Ok(info)

107
third_party/rust/naga/src/valid/interface.rs поставляемый
Просмотреть файл

@ -1,13 +1,12 @@
use super::{
analyzer::{FunctionInfo, GlobalUse},
Capabilities, Disalignment, FunctionError, ModuleInfo,
Capabilities, Disalignment, FunctionError, ModuleInfo, ShaderStages, TypeFlags,
ValidationFlags,
};
use crate::arena::{Handle, UniqueArena};
use crate::arena::{Arena, Handle};
use crate::span::{AddSpan as _, MapErrWithSpan as _, SpanProvider as _, WithSpan};
use bit_set::BitSet;
#[cfg(feature = "validate")]
const MAX_WORKGROUP_SIZE: u32 = 0x4000;
#[derive(Clone, Debug, thiserror::Error)]
@ -18,8 +17,8 @@ pub enum GlobalVariableError {
InvalidType,
#[error("Type flags {seen:?} do not meet the required {required:?}")]
MissingTypeFlags {
required: super::TypeFlags,
seen: super::TypeFlags,
required: TypeFlags,
seen: TypeFlags,
},
#[error("Capability {0:?} is not supported")]
UnsupportedCapability(Capabilities),
@ -71,15 +70,14 @@ pub enum EntryPointError {
BindingCollision(Handle<crate::GlobalVariable>),
#[error("Argument {0} varying error")]
Argument(u32, #[source] VaryingError),
#[error(transparent)]
Result(#[from] VaryingError),
#[error("Result varying error")]
Result(#[source] VaryingError),
#[error("Location {location} onterpolation of an integer has to be flat")]
InvalidIntegerInterpolation { location: u32 },
#[error(transparent)]
Function(#[from] FunctionError),
}
#[cfg(feature = "validate")]
fn storage_usage(access: crate::StorageAccess) -> GlobalUse {
let mut storage_usage = GlobalUse::QUERY;
if access.contains(crate::StorageAccess::LOAD) {
@ -95,7 +93,7 @@ struct VaryingContext<'a> {
ty: Handle<crate::Type>,
stage: crate::ShaderStage,
output: bool,
types: &'a UniqueArena<crate::Type>,
types: &'a Arena<crate::Type>,
location_mask: &'a mut BitSet,
built_in_mask: u32,
capabilities: Capabilities,
@ -160,17 +158,6 @@ impl VaryingContext<'_> {
width,
},
),
Bi::ViewIndex => (
match self.stage {
St::Vertex | St::Fragment => !self.output,
St::Compute => false,
},
*ty_inner
== Ti::Scalar {
kind: Sk::Sint,
width,
},
),
Bi::FragDepth => (
self.stage == St::Fragment && self.output,
*ty_inner
@ -258,6 +245,10 @@ impl VaryingContext<'_> {
return Err(VaryingError::BindingCollision { location });
}
// Values passed from the vertex shader to the fragment shader must have their
// interpolation defaulted (i.e. not `None`) by the front end, as appropriate for
// that language. For anything other than floating-point scalars and vectors, the
// interpolation must be `Flat`.
let needs_interpolation = match self.stage {
crate::ShaderStage::Vertex => self.output,
crate::ShaderStage::Fragment => !self.output,
@ -289,12 +280,9 @@ impl VaryingContext<'_> {
Ok(())
}
fn validate(&mut self, binding: Option<&crate::Binding>) -> Result<(), WithSpan<VaryingError>> {
let span_context = self.types.get_span_context(self.ty);
fn validate(&mut self, binding: Option<&crate::Binding>) -> Result<(), VaryingError> {
match binding {
Some(binding) => self
.validate_impl(binding)
.map_err(|e| e.with_span_context(span_context)),
Some(binding) => self.validate_impl(binding),
None => {
match self.types[self.ty].inner {
//TODO: check the member types
@ -305,20 +293,15 @@ impl VaryingContext<'_> {
} => {
for (index, member) in members.iter().enumerate() {
self.ty = member.ty;
let span_context = self.types.get_span_context(self.ty);
match member.binding {
None => {
return Err(VaryingError::MemberMissingBinding(index as u32)
.with_span_context(span_context))
return Err(VaryingError::MemberMissingBinding(index as u32))
}
// TODO: shouldn't this be validate?
Some(ref binding) => self
.validate_impl(binding)
.map_err(|e| e.with_span_context(span_context))?,
Some(ref binding) => self.validate_impl(binding)?,
}
}
}
_ => return Err(VaryingError::MissingBinding.with_span()),
_ => return Err(VaryingError::MissingBinding),
}
Ok(())
}
@ -327,14 +310,11 @@ impl VaryingContext<'_> {
}
impl super::Validator {
#[cfg(feature = "validate")]
pub(super) fn validate_global_var(
&self,
var: &crate::GlobalVariable,
types: &UniqueArena<crate::Type>,
types: &Arena<crate::Type>,
) -> Result<(), GlobalVariableError> {
use super::TypeFlags;
log::debug!("var {:?}", var);
let type_info = &self.types[var.ty.index()];
@ -342,7 +322,7 @@ impl super::Validator {
crate::StorageClass::Function => return Err(GlobalVariableError::InvalidUsage),
crate::StorageClass::Storage { .. } => {
if let Err((ty_handle, disalignment)) = type_info.storage_layout {
if self.flags.contains(super::ValidationFlags::STRUCT_LAYOUTS) {
if self.flags.contains(ValidationFlags::STRUCT_LAYOUTS) {
return Err(GlobalVariableError::Alignment(ty_handle, disalignment));
}
}
@ -353,7 +333,7 @@ impl super::Validator {
}
crate::StorageClass::Uniform => {
if let Err((ty_handle, disalignment)) = type_info.uniform_layout {
if self.flags.contains(super::ValidationFlags::STRUCT_LAYOUTS) {
if self.flags.contains(ValidationFlags::STRUCT_LAYOUTS) {
return Err(GlobalVariableError::Alignment(ty_handle, disalignment));
}
}
@ -408,47 +388,36 @@ impl super::Validator {
ep: &crate::EntryPoint,
module: &crate::Module,
mod_info: &ModuleInfo,
) -> Result<FunctionInfo, WithSpan<EntryPointError>> {
#[cfg(feature = "validate")]
) -> Result<FunctionInfo, EntryPointError> {
if ep.early_depth_test.is_some() && ep.stage != crate::ShaderStage::Fragment {
return Err(EntryPointError::UnexpectedEarlyDepthTest.with_span());
return Err(EntryPointError::UnexpectedEarlyDepthTest);
}
#[cfg(feature = "validate")]
if ep.stage == crate::ShaderStage::Compute {
if ep
.workgroup_size
.iter()
.any(|&s| s == 0 || s > MAX_WORKGROUP_SIZE)
{
return Err(EntryPointError::OutOfRangeWorkgroupSize.with_span());
return Err(EntryPointError::OutOfRangeWorkgroupSize);
}
} else if ep.workgroup_size != [0; 3] {
return Err(EntryPointError::UnexpectedWorkgroupSize.with_span());
return Err(EntryPointError::UnexpectedWorkgroupSize);
}
let info = self
.validate_function(&ep.function, module, mod_info)
.map_err(WithSpan::into_other)?;
let stage_bit = match ep.stage {
crate::ShaderStage::Vertex => ShaderStages::VERTEX,
crate::ShaderStage::Fragment => ShaderStages::FRAGMENT,
crate::ShaderStage::Compute => ShaderStages::COMPUTE,
};
#[cfg(feature = "validate")]
{
use super::ShaderStages;
let info = self.validate_function(&ep.function, module, mod_info)?;
let stage_bit = match ep.stage {
crate::ShaderStage::Vertex => ShaderStages::VERTEX,
crate::ShaderStage::Fragment => ShaderStages::FRAGMENT,
crate::ShaderStage::Compute => ShaderStages::COMPUTE,
};
if !info.available_stages.contains(stage_bit) {
return Err(EntryPointError::ForbiddenStageOperations.with_span());
}
if !info.available_stages.contains(stage_bit) {
return Err(EntryPointError::ForbiddenStageOperations);
}
self.location_mask.clear();
let mut argument_built_ins = 0;
// TODO: add span info to function arguments
for (index, fa) in ep.function.arguments.iter().enumerate() {
let mut ctx = VaryingContext {
ty: fa.ty,
@ -460,7 +429,7 @@ impl super::Validator {
capabilities: self.capabilities,
};
ctx.validate(fa.binding.as_ref())
.map_err_inner(|e| EntryPointError::Argument(index as u32, e).with_span())?;
.map_err(|e| EntryPointError::Argument(index as u32, e))?;
argument_built_ins = ctx.built_in_mask;
}
@ -476,14 +445,12 @@ impl super::Validator {
capabilities: self.capabilities,
};
ctx.validate(fr.binding.as_ref())
.map_err_inner(|e| EntryPointError::Result(e).with_span())?;
.map_err(EntryPointError::Result)?;
}
for bg in self.bind_group_masks.iter_mut() {
bg.clear();
}
#[cfg(feature = "validate")]
for (var_handle, var) in module.global_variables.iter() {
let usage = info[var_handle];
if usage.is_empty() {
@ -511,8 +478,7 @@ impl super::Validator {
allowed_usage,
usage
);
return Err(EntryPointError::InvalidGlobalUsage(var_handle, usage)
.with_span_handle(var_handle, &module.global_variables));
return Err(EntryPointError::InvalidGlobalUsage(var_handle, usage));
}
if let Some(ref bind) = var.binding {
@ -520,8 +486,7 @@ impl super::Validator {
self.bind_group_masks.push(BitSet::new());
}
if !self.bind_group_masks[bind.group as usize].insert(bind.binding as usize) {
return Err(EntryPointError::BindingCollision(var_handle)
.with_span_handle(var_handle, &module.global_variables));
return Err(EntryPointError::BindingCollision(var_handle));
}
}
}

95
third_party/rust/naga/src/valid/mod.rs поставляемый
Просмотреть файл

@ -5,11 +5,8 @@ mod function;
mod interface;
mod r#type;
#[cfg(feature = "validate")]
use crate::arena::{Arena, UniqueArena};
use crate::{
arena::Handle,
arena::{Arena, Handle},
proc::{InvalidBaseType, Layouter},
FastHashSet,
};
@ -19,7 +16,6 @@ use std::ops;
//TODO: analyze the model at the same time as we validate it,
// merge the corresponding matches over expressions and statements.
use crate::span::{AddSpan as _, WithSpan};
pub use analyzer::{ExpressionInfo, FunctionInfo, GlobalUse, Uniformity, UniformityRequirements};
pub use compose::ComposeError;
pub use expression::ExpressionError;
@ -33,19 +29,14 @@ bitflags::bitflags! {
#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
pub struct ValidationFlags: u8 {
/// Expressions.
#[cfg(feature = "validate")]
const EXPRESSIONS = 0x1;
/// Statements and blocks of them.
#[cfg(feature = "validate")]
const BLOCKS = 0x2;
/// Uniformity of control flow for operations that require it.
#[cfg(feature = "validate")]
const CONTROL_FLOW_UNIFORMITY = 0x4;
/// Host-shareable structure layouts.
#[cfg(feature = "validate")]
const STRUCT_LAYOUTS = 0x8;
/// Constants.
#[cfg(feature = "validate")]
const CONSTANTS = 0x10;
}
}
@ -106,7 +97,6 @@ pub struct Validator {
layouter: Layouter,
location_mask: BitSet,
bind_group_masks: Vec<BitSet>,
#[allow(dead_code)]
select_cases: FastHashSet<i32>,
valid_expression_list: Vec<Handle<crate::Expression>>,
valid_expression_set: BitSet,
@ -168,7 +158,6 @@ pub enum ValidationError {
}
impl crate::TypeInner {
#[cfg(feature = "validate")]
fn is_sized(&self) -> bool {
match *self {
Self::Scalar { .. }
@ -187,7 +176,6 @@ impl crate::TypeInner {
}
/// Return the `ImageDimension` for which `self` is an appropriate coordinate.
#[cfg(feature = "validate")]
fn image_storage_coordinates(&self) -> Option<crate::ImageDimension> {
match *self {
Self::Scalar {
@ -225,12 +213,11 @@ impl Validator {
}
}
#[cfg(feature = "validate")]
fn validate_constant(
&self,
handle: Handle<crate::Constant>,
constants: &Arena<crate::Constant>,
types: &UniqueArena<crate::Type>,
types: &Arena<crate::Type>,
) -> Result<(), ConstantError> {
let con = &constants[handle];
match con.inner {
@ -266,57 +253,38 @@ impl Validator {
}
/// Check the given module to be valid.
pub fn validate(
&mut self,
module: &crate::Module,
) -> Result<ModuleInfo, WithSpan<ValidationError>> {
pub fn validate(&mut self, module: &crate::Module) -> Result<ModuleInfo, ValidationError> {
self.reset_types(module.types.len());
self.layouter
.update(&module.types, &module.constants)
.map_err(|e| {
let InvalidBaseType(handle) = e;
ValidationError::from(e).with_span_handle(handle, &module.types)
})?;
self.layouter.update(&module.types, &module.constants)?;
#[cfg(feature = "validate")]
if self.flags.contains(ValidationFlags::CONSTANTS) {
for (handle, constant) in module.constants.iter() {
self.validate_constant(handle, &module.constants, &module.types)
.map_err(|error| {
ValidationError::Constant {
handle,
name: constant.name.clone().unwrap_or_default(),
error,
}
.with_span_handle(handle, &module.constants)
})?
.map_err(|error| ValidationError::Constant {
handle,
name: constant.name.clone().unwrap_or_default(),
error,
})?;
}
}
for (handle, ty) in module.types.iter() {
let ty_info = self
.validate_type(handle, &module.types, &module.constants)
.map_err(|error| {
ValidationError::Type {
handle,
name: ty.name.clone().unwrap_or_default(),
error,
}
.with_span_handle(handle, &module.types)
.map_err(|error| ValidationError::Type {
handle,
name: ty.name.clone().unwrap_or_default(),
error,
})?;
self.types[handle.index()] = ty_info;
}
#[cfg(feature = "validate")]
for (var_handle, var) in module.global_variables.iter() {
self.validate_global_var(var, &module.types)
.map_err(|error| {
ValidationError::GlobalVariable {
handle: var_handle,
name: var.name.clone().unwrap_or_default(),
error,
}
.with_span_handle(var_handle, &module.global_variables)
.map_err(|error| ValidationError::GlobalVariable {
handle: var_handle,
name: var.name.clone().unwrap_or_default(),
error,
})?;
}
@ -329,14 +297,11 @@ impl Validator {
match self.validate_function(fun, module, &mod_info) {
Ok(info) => mod_info.functions.push(info),
Err(error) => {
return Err(error.and_then(|error| {
ValidationError::Function {
handle,
name: fun.name.clone().unwrap_or_default(),
error,
}
.with_span_handle(handle, &module.functions)
}))
return Err(ValidationError::Function {
handle,
name: fun.name.clone().unwrap_or_default(),
error,
})
}
}
}
@ -348,21 +313,17 @@ impl Validator {
stage: ep.stage,
name: ep.name.clone(),
error: EntryPointError::Conflict,
}
.with_span()); // TODO: keep some EP span information?
});
}
match self.validate_entry_point(ep, module, &mod_info) {
Ok(info) => mod_info.entry_points.push(info),
Err(error) => {
return Err(error.and_then(|inner| {
ValidationError::EntryPoint {
stage: ep.stage,
name: ep.name.clone(),
error: inner,
}
.with_span()
}))
return Err(ValidationError::EntryPoint {
stage: ep.stage,
name: ep.name.clone(),
error,
})
}
}
}

27
third_party/rust/naga/src/valid/type.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
use super::Capabilities;
use crate::{
arena::{Arena, Handle, UniqueArena},
arena::{Arena, Handle},
proc::Alignment,
};
@ -89,8 +89,6 @@ pub enum TypeError {
InvalidArrayBaseType(Handle<crate::Type>),
#[error("The constant {0:?} can not be used for an array size")]
InvalidArraySizeConstant(Handle<crate::Constant>),
#[error("The constant {0:?} is specialized, and cannot be used as an array size")]
UnsupportedSpecializedArrayLength(Handle<crate::Constant>),
#[error("Array type {0:?} must have a length of one or more")]
NonPositiveArrayLength(Handle<crate::Constant>),
#[error("Array stride {stride} is smaller than the base element size {base_size}")]
@ -189,7 +187,7 @@ impl super::Validator {
pub(super) fn validate_type(
&self,
handle: Handle<crate::Type>,
types: &UniqueArena<crate::Type>,
types: &Arena<crate::Type>,
constants: &Arena<crate::Constant>,
) -> Result<TypeInfo, TypeError> {
use crate::TypeInner as Ti;
@ -352,19 +350,6 @@ impl super::Validator {
let sized_flag = match size {
crate::ArraySize::Constant(const_handle) => {
let length_is_positive = match constants.try_get(const_handle) {
Some(&crate::Constant {
specialization: Some(_),
..
}) => {
// Many of our back ends don't seem to support
// specializable array lengths. If you want to try to make
// this work, be sure to address all uses of
// `Constant::to_array_length`, which ignores
// specialization.
return Err(TypeError::UnsupportedSpecializedArrayLength(
const_handle,
));
}
Some(&crate::Constant {
inner:
crate::ConstantInner::Scalar {
@ -488,13 +473,9 @@ impl super::Validator {
handle,
);
// The last field may be an unsized array.
// only the last field can be unsized
if !base_info.flags.contains(TypeFlags::SIZED) {
let is_array = match types[member.ty].inner {
crate::TypeInner::Array { .. } => true,
_ => false,
};
if !is_array || i + 1 != members.len() {
if i + 1 != members.len() {
let name = member.name.clone().unwrap_or_default();
return Err(TypeError::InvalidDynamicArray(name, member.ty));
}

1
third_party/rust/petgraph/.cargo-checksum.json поставляемый Normal file

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

129
third_party/rust/petgraph/CONTRIBUTING.rst поставляемый Normal file
Просмотреть файл

@ -0,0 +1,129 @@
Contributing to ``petgraph``
============================
Hi! We'd love to have your contributions! If you want help or mentorship, reach
out to us in a GitHub issue, or ping ``bluss`` in `#rust on irc.mozilla.org`_
and introduce yourself.
.. _`\#rust on irc.mozilla.org`: irc://irc.mozilla.org#rust
* `Building`_
* `Testing`_
* `Pull Requests`_
* `Bug Fixes`_
* `Performance Improvements`_
* `Implementing New Algorithms`_
* `Where We Need Help`_
* `Team`_
Building
--------
::
$ cargo build
Testing
-------
::
$ cargo test --features all
Pull Requests
-------------
All pull requests are reviewed by a team_ member before merging.
Additionally, different kinds of pull requests have different requirements.
Bug Fixes
.........
We love getting bug fixes!
Make sure to include a regression test, so that we can be sure that we never
accidentally re-introduce the bug again.
Performance Improvements
........................
You made an algorithm faster? Awesome.
When submitting performance improvement, include the following:
* A new ``#[bench]`` function that exercises this code path, if one doesn't
already exist
* Before and after ``cargo bench`` scores, optionally formatted using
`cargo-benchcmp`_
.. _`cargo-benchcmp`: https://github.com/BurntSushi/cargo-benchcmp
Implementing New Algorithms
...........................
Implementing new graph algorithms is encouraged!
If you're going to implement a new algorithm, make sure that you do the
following:
* Add a ``quickcheck`` property test for the new algorithm
* Add a ``benchmark`` test for measuring performance of the new algorithm
* Document what the algorithm does and in what situations it should be used
* Document the big-O running time of the algorithm
* Include links to relevant reading materials, such as a paper or Wikipedia
* Make the algorithm work with generic graphs, constraining the generic graph
type parameter with our existing graph traits, like ``Visitable``, or with new
graph traits
Any team_ member can review a pull request implementing a new algorithm, but the
final decision whether or not the algorithm is appropriate for inclusion in the
``petgraph`` crate is left to ``@bluss``.
Additionally, assuming that the new algorithm is merged into ``petgraph``, you
are *strongly* encouraged to join the ``petgraph`` team_! *You* are the best
person to review any future bug fixes, performance improvements, and whatever
other changes that affect this new algorithm.
Where We Need Help
------------------
* Issues labeled `"help wanted"`_ are issues where we could use a little help
from you.
* Issues Labeled `"mentored"`_ are issues that don't really involve any more
investigation, just implementation. We've outlined what needs to be done, and
a team_ member has volunteered to help whoever claims the issue implement it,
and get the implementation merged.
.. _`"help wanted"`:
https://github.com/bluss/petgraph/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22
.. _`"mentored"`:
https://github.com/bluss/petgraph/issues?q=is%3Aopen+is%3Aissue+label%3A%22mentored%22
Team
----
The ``petgraph`` team consists of:
* ``@bluss``
* ``@fitzgen``
We need more team members to help spread out reviewing and maintenance
responsibilities — want to join us? `Drop a comment in this issue!`_
.. _`Drop a comment in this issue!`: https://github.com/bluss/petgraph/issues/TODO

81
third_party/rust/petgraph/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,81 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "petgraph"
version = "0.6.0"
authors = ["bluss", "mitchmindtree"]
description = "Graph data structure library. Provides graph types and graph algorithms."
documentation = "https://docs.rs/petgraph/"
readme = "README.rst"
keywords = ["data-structure", "graph", "unionfind", "graph-algorithms"]
categories = ["data-structures"]
license = "MIT/Apache-2.0"
repository = "https://github.com/petgraph/petgraph"
[package.metadata.docs.rs]
features = ["serde-1", "quickcheck"]
[package.metadata.release]
no-dev-version = true
[profile.bench]
debug = true
[profile.release]
[lib]
name = "petgraph"
bench = false
[dependencies.fixedbitset]
version = "0.4.0"
default-features = false
[dependencies.indexmap]
version = "1.6.2"
[dependencies.quickcheck]
version = "0.8"
optional = true
default-features = false
[dependencies.serde]
version = "1.0"
optional = true
[dependencies.serde_derive]
version = "1.0"
optional = true
[dev-dependencies.bincode]
version = "1.3.3"
[dev-dependencies.defmac]
version = "0.2.1"
[dev-dependencies.itertools]
version = "0.10.1"
default-features = false
[dev-dependencies.odds]
version = "0.4.0"
[dev-dependencies.rand]
version = "0.5.5"
[features]
all = ["unstable", "quickcheck", "matrix_graph", "stable_graph", "graphmap"]
default = ["graphmap", "stable_graph", "matrix_graph"]
generate = []
graphmap = []
matrix_graph = []
serde-1 = ["serde", "serde_derive"]
stable_graph = []
unstable = ["generate"]

201
third_party/rust/petgraph/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/petgraph/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
Copyright (c) 2015
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

38
third_party/rust/petgraph/Makefile поставляемый Normal file
Просмотреть файл

@ -0,0 +1,38 @@
DOCCRATES = petgraph fixedbitset
# deps to delete the generated docs
RMDOCS =
FEATURES = unstable
VERSIONS = $(patsubst %,target/VERS/%,$(DOCCRATES))
docs: mkdocs mksvgs subst $(RMDOCS)
# https://www.gnu.org/software/make/manual/html_node/Automatic-Variables.html
$(VERSIONS): Cargo.toml
mkdir -p $(@D)
cargo pkgid $(@F) | sed -e "s/.*#\(\|.*:\)//" > "$@"
$(DOCCRATES): %: target/VERS/%
# Put in the crate version into the docs
find ./doc/$@ -name "*.html" -exec sed -i -e "s/<title>\(.*\) - Rust/<title>$@ $(shell cat $<) - \1 - Rust/g" {} \;
subst: $(DOCCRATES)
mkdocs: Cargo.toml
cargo doc --features=$(FEATURES)
rm -rf ./doc
cp -r ./target/doc ./doc
- cat ./custom.css >> doc/main.css
$(RMDOCS): mkdocs
rm -r ./doc/$@
sed -i "/searchIndex\['$@'\]/d" doc/search-index.js
mksvgs: mkdocs graph-example.dot
dot -Tsvg < ./graph-example.dot > graph-example.svg
mv graph-example.svg ./doc/petgraph/graph/
.PHONY: docs mkdocs mksvgs subst $(DOCCRATES) $(RMDOCS)

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше